code stringlengths 101 5.91M |
|---|
def gen_config_yaml(manifest_root: Path, spm_filename: str, yaml_filename: str='config.yaml', specaugment_policy: str='lb', prepend_tgt_lang_tag: bool=False, sampling_alpha: float=1.0, audio_root: str=''):
manifest_root = manifest_root.absolute()
writer = S2TDataConfigWriter((manifest_root / yaml_filename))
writer.set_vocab_filename(spm_filename.replace('.model', '.txt'))
writer.set_input_channels(1)
writer.set_input_feat_per_channel(80)
specaugment_setters = {'lb': writer.set_specaugment_lb_policy, 'ld': writer.set_specaugment_ld_policy, 'sm': writer.set_specaugment_sm_policy, 'ss': writer.set_specaugment_ss_policy}
specaugment_setter = specaugment_setters.get(specaugment_policy, None)
if (specaugment_setter is not None):
specaugment_setter()
writer.set_bpe_tokenizer({'bpe': 'sentencepiece', 'sentencepiece_model': (manifest_root / spm_filename).as_posix()})
if prepend_tgt_lang_tag:
writer.set_prepend_tgt_lang_tag(True)
writer.set_sampling_alpha(sampling_alpha)
writer.set_feature_transforms('_train', ['utterance_cmvn', 'specaugment'])
writer.set_feature_transforms('*', ['utterance_cmvn'])
if (len(audio_root) > 0):
writer.set_audio_root(audio_root)
writer.flush() |
def simple_inference(model, input):
with torch.no_grad():
if isinstance(input, (dict, UserDict)):
output = model(**input)
elif isinstance(input, (list, tuple)):
try:
output = model(*input)
except:
output = model(input)
else:
output = model(input)
return output |
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
fncall = line
for pattern in ('\\bif\\s*\\((.*)\\)\\s*{', '\\bfor\\s*\\((.*)\\)\\s*{', '\\bwhile\\s*\\((.*)\\)\\s*[{;]', '\\bswitch\\s*\\((.*)\\)\\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1)
break
if ((not Search('\\b(if|for|while|switch|return|new|delete|catch|sizeof)\\b', fncall)) and (not Search(' \\([^)]+\\)\\([^)]*(\\)|,$)', fncall)) and (not Search(' \\([^)]+\\)\\[[^\\]]+\\]', fncall))):
if Search('\\w\\s*\\(\\s(?!\\s*\\\\$)', fncall):
error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call')
elif Search('\\(\\s+(?!(\\s*\\\\)|\\()', fncall):
error(filename, linenum, 'whitespace/parens', 2, 'Extra space after (')
if (Search('\\w\\s+\\(', fncall) and (not Search('#\\s*define|typedef|using\\s+\\w+\\s*=', fncall)) and (not Search('\\w\\s+\\((\\w+::)*\\*\\w+\\)\\(', fncall)) and (not Search('\\bcase\\s+\\(', fncall))):
if Search('\\boperator_*\\b', line):
error(filename, linenum, 'whitespace/parens', 0, 'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4, 'Extra space before ( in function call')
if Search('[^)]\\s+\\)\\s*[^{\\s]', fncall):
if Search('^\\s+\\)', fncall):
error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2, 'Extra space before )') |
def sparsenet201(**kwargs):
return get_sparsenet(num_layers=201, model_name='sparsenet201', **kwargs) |
def multitask_text_transformer_decoder_arch(args, decoder_layers, decoder_embed_dim=256, decoder_attention_heads=4):
args.decoder_layers = decoder_layers
args.decoder_embed_dim = decoder_embed_dim
args.decoder_attention_heads = decoder_attention_heads
base_multitask_text_transformer_decoder_arch(args) |
def unpack_data(dataB, device='cuda'):
if is_multidata(dataB):
if torch.is_tensor(dataB[0]):
if torch.is_tensor(dataB[1]):
return dataB[0].to(device)
elif is_multidata(dataB[1]):
return (dataB[0].to(device), dataB[1][0].to(device))
else:
raise RuntimeError('Invalid data format {} -- check your dataloader!'.format(type(dataB[1])))
elif is_multidata(dataB[0]):
return [d.to(device) for d in list(zip(*dataB))[0]]
else:
raise RuntimeError('Invalid data format {} -- check your dataloader!'.format(type(dataB[0])))
elif torch.is_tensor(dataB):
return dataB.to(device)
else:
raise RuntimeError('Invalid data format {} -- check your dataloader!'.format(type(dataB))) |
def distribute_presets(prefixes, scaffolding, config_updates):
for (path, value) in iterate_flattened(config_updates):
(scaffold_name, suffix) = find_best_match(path, prefixes)
scaff = scaffolding[scaffold_name]
set_by_dotted_path(scaff.presets, suffix, value) |
def config():
seed = 0
test_mode = False
dataset_name = None
hyperparameters = None
evaluation_metric = None
minimize = None
total_trials = None
parameterization = None |
def quaddobl_initialize(nbt, dim, wnd, dir, err):
from phcpy.phcpy2c3 import py2c_numbtrop_quaddobl_initialize as store
flat = []
for vec in dir:
flat = (flat + vec)
data = ((wnd + flat) + err)
store(nbt, dim, str(data)) |
def fix_cam_drop_frames(seq_path, label_names):
ts_path = os.path.join(seq_path, camera_configs['time_stamp_name'])
try:
with open(ts_path) as ts_f:
ts = ts_f.readlines()
except:
return label_names
n_labels = len(ts)
if (int((float(ts[(- 1)].rstrip()) * camera_configs['frame_rate'])) == (n_labels - 1)):
return label_names
label_names_new = ([None] * n_labels)
for (idx, line) in enumerate(ts):
time = float(line.rstrip())
real_id = int((time * camera_configs['frame_rate']))
if (real_id < n_labels):
label_names_new[real_id] = label_names[idx]
prev_nearest = (- np.ones((n_labels,), dtype=int))
post_nearest = (- np.ones((n_labels,), dtype=int))
prev_flag = (- 1)
post_flag = n_labels
for (idx, label) in enumerate(label_names_new):
if (label is not None):
prev_flag = idx
else:
prev_nearest[idx] = prev_flag
for (idx, label) in reversed(list(enumerate(label_names_new))):
if (label is not None):
post_flag = idx
else:
post_nearest[idx] = post_flag
for idx in range(n_labels):
if ((prev_nearest[idx] >= 0) and (prev_nearest[idx] < n_labels) and (post_nearest[idx] >= 0) and (post_nearest[idx] < n_labels)):
if ((idx - prev_nearest[idx]) <= (post_nearest[idx] - idx)):
sup_idx = prev_nearest[idx]
else:
sup_idx = post_nearest[idx]
elif (not ((prev_nearest[idx] >= 0) and (prev_nearest[idx] < n_labels))):
sup_idx = post_nearest[idx]
elif (not ((post_nearest[idx] >= 0) and (post_nearest[idx] < n_labels))):
sup_idx = prev_nearest[idx]
else:
sup_idx = None
if (label_names_new[idx] is None):
if ((sup_idx >= 0) and (sup_idx < n_labels)):
label_names_new[idx] = label_names_new[sup_idx]
else:
raise ValueError
return label_names_new |
def get_grad_norm(params, scale=1):
total_norm = 0.0
for p in params:
if (p.grad is not None):
param_norm = (p.grad.detach().data / scale).norm(2)
total_norm += (param_norm.item() ** 2)
total_norm = (total_norm ** 0.5)
return total_norm |
def add_packages(config, repeat=1):
train_dir = 'train_package'
package_dir = path.realpath(__file__).replace('pgportfolio/autotrain/generate.pyc', train_dir).replace('pgportfolio\\autotrain\\generate.pyc', train_dir).replace('pgportfolio/autotrain/generate.py', train_dir).replace('pgportfolio\\autotrain\\generate.py', train_dir)
all_subdir = [int(s) for s in os.listdir(package_dir) if os.path.isdir(((package_dir + '/') + s))]
if all_subdir:
max_dir_num = max(all_subdir)
else:
max_dir_num = 0
indexes = []
for i in range(repeat):
max_dir_num += 1
directory = ((package_dir + '/') + str(max_dir_num))
config['random_seed'] = i
os.makedirs(directory)
indexes.append(max_dir_num)
with open(((directory + '/') + 'net_config.json'), 'w') as outfile:
json.dump(config, outfile, indent=4, sort_keys=True)
logging.info(('create indexes %s' % indexes))
return indexes |
def parse_args():
parser = argparse.ArgumentParser(description='Finetune a transformers model on a text classification task')
parser.add_argument('--dataset_name', type=str, default=None, help='The name of the dataset to use (via the datasets library).')
parser.add_argument('--dataset_config_names', nargs='+', type=str, required=True, help='The configuration names of the dataset to use (via the datasets library).')
parser.add_argument('--dataset_split_names', nargs='+', type=str, required=True, help='The names of the training data set splits to use (via the datasets library).')
parser.add_argument('--preprocessing_num_workers', type=int, default=None, help='The number of processes to use for the preprocessing.')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--preprocessing_only', action='store_true', help='Only run the preprocessing script to be cached for future use')
parser.add_argument('--cache_dir', type=str, default=None, help='Where do you want to store the pretrained models downloaded from huggingface.co')
parser.add_argument('--validation_split_percentage', type=int, default=1, help='Percentage of training data that should be used for validation if no validation is present in dataset.')
parser.add_argument('--logging_steps', type=int, default=500, help='Number of steps between each logging')
parser.add_argument('--saving_steps', type=int, default=500, help='Number of steps between each logging')
parser.add_argument('--audio_column_name', type=str, default='audio', help="Column in the dataset that contains speech file path. Defaults to 'audio'")
parser.add_argument('--model_name_or_path', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.', required=True)
parser.add_argument('--config_name', type=str, default=None, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--train_cache_file_name', type=str, default=None, help='Path to the train cached file name')
parser.add_argument('--validation_cache_file_name', type=str, default=None, help='Path to the validation cached file name')
parser.add_argument('--per_device_train_batch_size', type=int, default=8, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--per_device_eval_batch_size', type=int, default=8, help='Batch size (per device) for the evaluation dataloader.')
parser.add_argument('--learning_rate', type=float, default=5e-05, help='Initial learning rate (after the potential warmup period) to use.')
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay to use.')
parser.add_argument('--num_train_epochs', type=int, default=3, help='Total number of training epochs to perform.')
parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--gradient_checkpointing', action='store_true', help='If True, use gradient checkpointing to save memory at the expense of slower backward pass.')
parser.add_argument('--lr_scheduler_type', type=SchedulerType, default='linear', help='The scheduler type to use.', choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'])
parser.add_argument('--num_warmup_steps', type=int, default=0, help='Number of steps for the warmup in the lr scheduler.')
parser.add_argument('--output_dir', type=str, default=None, help='Where to store the final model.')
parser.add_argument('--seed', type=int, default=0, help='A seed for reproducible training.')
parser.add_argument('--max_gumbel_temperature', type=float, default=2.0, help='Maximum temperature for gumbel softmax.')
parser.add_argument('--min_gumbel_temperature', type=float, default=0.5, help='Minimum temperature for gumbel softmax.')
parser.add_argument('--gumbel_temperature_decay', type=float, default=0.999995, help='Decay of gumbel temperature during training.')
parser.add_argument('--max_duration_in_seconds', type=float, default=5.0, help='Filter out audio files that are longer than `max_duration_in_seconds` seconds')
parser.add_argument('--min_duration_in_seconds', type=float, default=3.0, help='Filter out audio files that are shorter than `min_duration_in_seconds` seconds')
parser.add_argument('--pad_to_multiple_of', type=int, default=None, help='If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).')
parser.add_argument('--adam_beta1', type=float, default=0.9, help='Beta1 for AdamW optimizer')
parser.add_argument('--adam_beta2', type=float, default=0.999, help='Beta2 for AdamW optimizer')
parser.add_argument('--adam_epsilon', type=float, default=1e-08, help='Epsilon for AdamW optimizer')
parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.')
parser.add_argument('--hub_model_id', type=str, help='The name of the repository to keep in sync with the local `output_dir`.')
parser.add_argument('--hub_token', type=str, help='The token to use to push to the Model Hub.')
parser.add_argument('--mask_time_prob', type=float, default=None, help='Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked in the contrastive task. If omitted, will pull value from model config.')
parser.add_argument('--mask_time_length', type=int, default=None, help='Length of each vector mask span to mask along the time axis in the contrastive task. If omitted, will pull value from model config.')
args = parser.parse_args()
if args.push_to_hub:
assert (args.output_dir is not None), 'Need an `output_dir` to create a repo when `--push_to_hub` is passed.'
if (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
return args |
def calculate_auc(model, mbs_list, shuffle=True):
model.eval()
y_real = []
y_hat = []
if shuffle:
random.shuffle(mbs_list)
for (i, batch) in enumerate(mbs_list):
(output, label_tensor) = model(batch)
y_hat.extend(output.cpu().data.view((- 1)).numpy())
y_real.extend(label_tensor.cpu().data.view((- 1)).numpy())
auc = roc_auc_score(y_real, y_hat)
return (auc, y_real, y_hat) |
def recursive_merge_dicts(*args):
if (not args):
return dict()
q = args[0]
for p in args[1:]:
q = recursive_merge_2dicts(q, p)
return q |
class TestChatCache(unittest.TestCase):
def setUp(self):
return super().setUp()
def tearDown(self) -> None:
if (os.path.exists('./gptcache_data') and os.path.isdir('./gptcache_data')):
try:
shutil.rmtree('./gptcache_data')
print(f'The directory gptcache_data has been successfully deleted.')
except Exception as e:
print(f'An error occurred while deleting the directory: {e}')
else:
print(f'The directory gptcache_data does not exist.')
return super().tearDown()
def test_chat_cache(self):
cache_plugin = ChatCache(embedding_model_dir='hkunlp/instructor-large')
cache_plugin.init_similar_cache_from_config()
prompt = 'Tell me about Intel Xeon Scalable Processors.'
config = PipelineConfig(model_name_or_path='facebook/opt-125m')
chatbot = build_chatbot(config)
response = chatbot.predict(prompt)
cache_plugin.put(prompt, response)
answer = cache_plugin.get(prompt)
self.assertIn('Intel Xeon Scalable', str(answer['choices'][0]['text'])) |
class Graph():
def __init__(self, parent_map, children_map, id_list):
self.parent_map = parent_map
self.children_map = children_map
self.id_list = id_list
def topoligical_sort(self):
order = []
next = []
for id in self.id_list:
if ((len(self.parent_map[id]) == 0) and (id not in order)):
order.append(id)
for child_id in self.children_map[id]:
if ((child_id not in next) and (child_id not in order)):
next.append(child_id)
while (len(next) != 0):
id = next.pop(0)
order.append(id)
for child_id in self.children_map[id]:
if ((child_id not in next) and (child_id not in order)):
next.append(child_id)
order.reverse()
return order |
def ResNet18(winogradArgs: dict=None, quantArgs: dict=None, miscArgs: dict=None, num_classes: int=10, mult: int=1.0):
return ResNet(BasicBlock, [2, 2, 2, 2], num_classes, winogradArgs=winogradArgs, quantization=quantArgs, miscArgs=miscArgs, multiplier=mult) |
def apply_rotary_pos_emb_single(x, cos, sin, position_ids):
cos = cos.squeeze(1).squeeze(0)
sin = sin.squeeze(1).squeeze(0)
cos = cos[position_ids].unsqueeze(1)
sin = sin[position_ids].unsqueeze(1)
x_embed = ((x * cos) + (rotate_half(x) * sin))
return x_embed |
def discriminator(image, options, n_scale=2, reuse=False, name='discriminator'):
images = []
for i in range(n_scale):
images.append(tf.image.resize_bicubic(image, [(get_shape(image)[1] // (2 ** i)), (get_shape(image)[2] // (2 ** i))]))
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert (tf.get_variable_scope().reuse is False)
images = dis_down(images, 4, 2, n_scale, options.df_dim, 'd_h0_conv_scale_')
images = dis_down(images, 4, 2, n_scale, (options.df_dim * 2), 'd_h1_conv_scale_')
images = dis_down(images, 4, 2, n_scale, (options.df_dim * 4), 'd_h2_conv_scale_')
images = dis_down(images, 4, 2, n_scale, (options.df_dim * 8), 'd_h3_conv_scale_')
images = final_conv(images, n_scale, 'd_pred_scale_')
return images |
def label2onehot(label, length):
onehot = np.zeros(length)
onehot[label] = 1
return onehot |
class PseudoLabel():
def __init__(self, cfg):
(h, w) = cfg.INPUT.TARGET_INPUT_SIZE_TRAIN
self.prob_tar = np.zeros([1, h, w])
self.label_tar = np.zeros([1, h, w])
self.thres = []
self.number_class = cfg.MODEL.NUM_CLASSES
self.out_dir = cfg.OUTPUT_DIR
self.iter = 0
def save_results(self):
np.save(os.path.join(self.out_dir, 'thres_const.npy'), self.thres)
print('save done.')
def update_pseudo_label(self, input):
input = F.softmax(input.detach(), dim=1)
(prob, label) = torch.max(input, dim=1)
prob_np = prob.cpu().numpy()
label_np = label.cpu().numpy()
print(self.iter)
if (self.iter == 0):
self.prob_tar = prob_np
self.label_tar = label_np
else:
self.prob_tar = np.append(self.prob_tar, prob_np, axis=0)
self.label_tar = np.append(self.label_tar, label_np, axis=0)
self.iter += 1
def get_threshold_const(self, thred, percent=0.5):
for i in range(self.number_class):
x = self.prob_tar[(self.label_tar == i)]
if (len(x) == 0):
self.thres.append(0)
continue
x = np.sort(x)
self.thres.append(x[np.int(np.round((len(x) * percent)))])
self.thres = np.array(self.thres)
self.thres[(self.thres > thred)] = thred
return self.thres |
def inception_v4(inputs, num_classes=1001, is_training=True, dropout_keep_prob=0.8, reuse=None, scope='InceptionV4', create_aux_logits=True):
end_points = {}
with tf.variable_scope(scope, 'InceptionV4', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
(net, end_points) = inception_v4_base(inputs, scope=scope)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'):
if create_aux_logits:
with tf.variable_scope('AuxLogits'):
aux_logits = end_points['Mixed_6h']
aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3, padding='VALID', scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='Conv2d_1b_1x1')
aux_logits = slim.conv2d(aux_logits, 768, aux_logits.get_shape()[1:3], padding='VALID', scope='Conv2d_2a')
aux_logits = slim.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes, activation_fn=None, scope='Aux_logits')
end_points['AuxLogits'] = aux_logits
with tf.variable_scope('Logits'):
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID', scope='AvgPool_1a')
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_1b')
net = slim.flatten(net, scope='PreLogitsFlatten')
end_points['PreLogitsFlatten'] = net
logits = slim.fully_connected(net, num_classes, activation_fn=None, scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return (logits, end_points) |
def filter_opt(opt, tag):
ret = {}
for (k, v) in opt.items():
tokens = k.split('.')
if (tokens[0] == tag):
ret['.'.join(tokens[1:])] = v
return ret |
def evaluate_imagenet(gpu, encoder_usage_info, downstream_dataset, encoder, reference_label, trigger, reference, key='clean'):
cmd = f'nohup python3 -u training_downstream_classifier.py --encoder_usage_info {encoder_usage_info} --dataset {downstream_dataset} --trigger_file {trigger} --encoder {encoder} --reference_label {reference_label} --reference_file ./reference/imagenet/{reference}.npz --gpu {gpu} >./log/imagenet/evaluation_{key}_{downstream_dataset}.txt &'
os.system(cmd) |
def FunctionCorrelation(tenFirst, tenSecond, intStride):
return _FunctionCorrelation.apply(tenFirst, tenSecond, intStride) |
class BasicBlockSig(nn.Module):
def __init__(self, in_channels, out_channels, init='xavier', ksize=3, stride=1, pad=1):
super(BasicBlockSig, self).__init__()
self.body = nn.Sequential(nn.Conv2d(in_channels, out_channels, ksize, stride, pad), nn.Sigmoid())
def forward(self, x):
out = self.body(x)
return out |
def ten_problems():
result = []
b0 = ([[3, 6, 7, 8] for _ in range(7)] + [[4, 6, 7, 8] for _ in range(2)])
r0 = 231
result.append((b0, r0))
b1 = ([[2, 5, 6, 8], [3, 6, 7, 8], [4, 5, 7, 8]] + [[4, 6, 7, 8] for _ in range(7)])
r1 = 294
result.append((b1, r1))
b2 = (([[2, 4, 7, 8]] + [[3, 6, 7, 8] for _ in range(2)]) + [[4, 6, 7, 8] for _ in range(7)])
r2 = 356
result.append((b2, r2))
b3 = ([[2, 5, 7, 8] for _ in range(2)] + [[4, 6, 7, 8] for _ in range(8)])
r3 = 398
result.append((b3, r3))
b4 = ((([[3, 5, 7, 8]] + [[4, 5, 7, 8] for _ in range(2)]) + [[3, 6, 7, 8] for _ in range(2)]) + [[4, 6, 7, 8] for _ in range(5)])
r4 = 526
result.append((b4, r4))
b5 = (([[4, 5, 7, 8] for _ in range(3)] + [[3, 6, 7, 8] for _ in range(2)]) + [[4, 6, 7, 8] for _ in range(6)])
r5 = 734
result.append((b5, r5))
b6 = (([[3, 5, 7, 8]] + [[3, 6, 7, 8] for _ in range(3)]) + [[4, 6, 7, 8] for _ in range(7)])
r6 = 1104
result.append((b6, r6))
b7 = ([[3, 6, 7, 8] for _ in range(4)] + [[4, 6, 7, 8] for _ in range(8)])
r7 = 1600
result.append((b7, r7))
b8 = (([[3, 5, 7, 8]] + [[3, 6, 7, 8] for _ in range(2)]) + [[4, 6, 7, 8] for _ in range(9)])
r8 = 2166
result.append((b8, r8))
b9 = ([[3, 6, 7, 8] for _ in range(3)] + [[4, 6, 7, 8] for _ in range(10)])
r9 = 3102
result.append((b9, r9))
return result |
def calculate_desired_noise_rms(clean_rms, snr):
a = (float(snr) / 20)
noise_rms = (clean_rms / (10 ** a))
return noise_rms |
def save_dataset(train_, test_, filename):
torch.save({'train': train_, 'test': test_}, filename) |
_module
class FastRCNN(TwoStageDetector):
def __init__(self, backbone, bbox_roi_extractor, bbox_head, train_cfg, test_cfg, neck=None, shared_head=None, mask_roi_extractor=None, mask_head=None, pretrained=None):
super(FastRCNN, self).__init__(backbone=backbone, neck=neck, shared_head=shared_head, bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, train_cfg=train_cfg, test_cfg=test_cfg, mask_roi_extractor=mask_roi_extractor, mask_head=mask_head, pretrained=pretrained)
def forward_test(self, imgs, img_metas, proposals, **kwargs):
for (var, name) in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if (not isinstance(var, list)):
raise TypeError('{} must be a list, but got {}'.format(name, type(var)))
num_augs = len(imgs)
if (num_augs != len(img_metas)):
raise ValueError('num of augmentations ({}) != num of image meta ({})'.format(len(imgs), len(img_metas)))
imgs_per_gpu = imgs[0].size(0)
assert (imgs_per_gpu == 1)
if (num_augs == 1):
return self.simple_test(imgs[0], img_metas[0], proposals[0], **kwargs)
else:
return self.aug_test(imgs, img_metas, proposals, **kwargs) |
def remove_email(text):
subtext = text.split(' ')
sts = []
for (i, st) in enumerate(subtext):
st = st.strip()
st = re.sub('([a-zA-Z0-9_.+-]+[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+)', f'', st, flags=re.MULTILINE)
sts.append(st)
return ' '.join(sts) |
def gelu_fast(x):
if (not hasattr(gelu_fast, '_a')):
gelu_fast._a = math.sqrt((2 / math.pi))
return ((0.5 * x) * (1 + torch.tanh((gelu_fast._a * (x + (0.044715 * torch.pow(x, 3))))))) |
def construct_flatindex_from_embeddings(embeddings, ids=None):
dim = embeddings.shape[1]
print(('embedding shape: ' + str(embeddings.shape)))
index = faiss.index_factory(dim, 'Flat', faiss.METRIC_INNER_PRODUCT)
if (ids is not None):
ids = ids.astype(np.int64)
print(ids.shape, ids.dtype)
index = faiss.IndexIDMap2(index)
index.add_with_ids(embeddings, ids)
else:
index.add(embeddings)
return index |
class ExploreTaskDefinition(AbstractTaskDefinition):
joint_positions = [0.0, (- 1.33), (- 1.8), 0.0, 1.5, 1.6]
def __init__(self, *args, **kwargs):
super(ExploreTaskDefinition, self).__init__(*args, **kwargs)
self.addCamera(Camera('top', [(- 0.0), 0.0, 1.0], distance=0.7, roll=0.0, image_width=64, image_height=64, pitch=(- 90), yaw=0, fov=40))
def _setup(self):
def _setupRobot(self, handle):
if (not self.robot.mobile()):
raise RuntimeError(('Exploration task does not even make sense' + 'without a mobile robot.'))
self.robot.place([0, 0, 0], [0, 0, 0, 1], self.joint_positions)
self.robot.arm(self.joint_positions, pb.POSITION_CONTROL)
self.robot.gripper(0, pb.POSITION_CONTROL)
def _makeTask(self):
task = Task()
return task
def getName(self):
return 'explore'
def reset(self):
pass |
class PyTorchTensor(BaseTensor):
__slots__ = ()
norms: 'NormsMethods[PyTorchTensor]'
def __init__(self, raw: 'torch.Tensor'):
global torch
if (torch is None):
torch = import_module('torch')
super().__init__(raw)
def raw(self) -> 'torch.Tensor':
return cast(torch.Tensor, super().raw)
def numpy(self: TensorType) -> Any:
a = self.raw.detach().cpu().numpy()
if a.flags.writeable:
a.flags.writeable = False
return a
def item(self) -> Union[(int, float, bool)]:
return self.raw.item()
def shape(self) -> Shape:
return self.raw.shape
def reshape(self: TensorType, shape: Union[(Shape, int)]) -> TensorType:
if isinstance(shape, int):
shape = (shape,)
return type(self)(self.raw.reshape(shape))
def astype(self: TensorType, dtype: Any) -> TensorType:
return type(self)(self.raw.to(dtype))
def clip(self: TensorType, min_: float, max_: float) -> TensorType:
return type(self)(self.raw.clamp(min_, max_))
def square(self: TensorType) -> TensorType:
return type(self)((self.raw ** 2))
def sin(self: TensorType) -> TensorType:
return type(self)(torch.sin(self.raw))
def cos(self: TensorType) -> TensorType:
return type(self)(torch.cos(self.raw))
def tan(self: TensorType) -> TensorType:
return type(self)(torch.tan(self.raw))
def sinh(self: TensorType) -> TensorType:
return type(self)(torch.sinh(self.raw))
def cosh(self: TensorType) -> TensorType:
return type(self)(torch.cosh(self.raw))
def tanh(self: TensorType) -> TensorType:
return type(self)(torch.tanh(self.raw))
def arcsin(self: TensorType) -> TensorType:
return type(self)(torch.asin(self.raw))
def arccos(self: TensorType) -> TensorType:
return type(self)(torch.acos(self.raw))
def arctan(self: TensorType) -> TensorType:
return type(self)(torch.atan(self.raw))
def arcsinh(self: TensorType) -> TensorType:
return type(self)(torch.asinh(self.raw))
def arccosh(self: TensorType) -> TensorType:
return type(self)(torch.acosh(self.raw))
def arctanh(self: TensorType) -> TensorType:
return type(self)(torch.atanh(self.raw))
def sum(self: TensorType, axis: Optional[AxisAxes]=None, keepdims: bool=False) -> TensorType:
if ((axis is None) and (not keepdims)):
return type(self)(self.raw.sum())
if (axis is None):
axis = tuple(range(self.ndim))
return type(self)(self.raw.sum(dim=axis, keepdim=keepdims))
def prod(self: TensorType, axis: Optional[AxisAxes]=None, keepdims: bool=False) -> TensorType:
if ((axis is None) and (not keepdims)):
return type(self)(self.raw.prod())
if (axis is None):
axis = tuple(range(self.ndim))
elif (not isinstance(axis, Iterable)):
axis = (axis,)
x = self.raw
for i in sorted(axis, reverse=True):
x = x.prod(i, keepdim=keepdims)
return type(self)(x)
def mean(self: TensorType, axis: Optional[AxisAxes]=None, keepdims: bool=False) -> TensorType:
if (self.raw.dtype not in [torch.float16, torch.float32, torch.float64]):
raise ValueError(f'Can only calculate the mean of floating types. Got {self.raw.dtype} instead.')
if ((axis is None) and (not keepdims)):
return type(self)(self.raw.mean())
if (axis is None):
axis = tuple(range(self.ndim))
return type(self)(self.raw.mean(dim=axis, keepdim=keepdims))
def min(self: TensorType, axis: Optional[AxisAxes]=None, keepdims: bool=False) -> TensorType:
if ((axis is None) and (not keepdims)):
return type(self)(self.raw.min())
if (axis is None):
axis = tuple(range(self.ndim))
elif (not isinstance(axis, Iterable)):
axis = (axis,)
x = self.raw
for i in sorted(axis, reverse=True):
(x, _) = x.min(i, keepdim=keepdims)
return type(self)(x)
def max(self: TensorType, axis: Optional[AxisAxes]=None, keepdims: bool=False) -> TensorType:
if ((axis is None) and (not keepdims)):
return type(self)(self.raw.max())
if (axis is None):
axis = tuple(range(self.ndim))
elif (not isinstance(axis, Iterable)):
axis = (axis,)
x = self.raw
for i in sorted(axis, reverse=True):
(x, _) = x.max(i, keepdim=keepdims)
return type(self)(x)
def minimum(self: TensorType, other: TensorOrScalar) -> TensorType:
if isinstance(other, Tensor):
other_ = other.raw
elif (isinstance(other, int) or isinstance(other, float)):
other_ = torch.full_like(self.raw, other)
else:
raise TypeError('expected x to be a Tensor, int or float')
return type(self)(torch.min(self.raw, other_))
def maximum(self: TensorType, other: TensorOrScalar) -> TensorType:
if isinstance(other, Tensor):
other_ = other.raw
elif (isinstance(other, int) or isinstance(other, float)):
other_ = torch.full_like(self.raw, other)
else:
raise TypeError('expected x to be a Tensor, int or float')
return type(self)(torch.max(self.raw, other_))
def argmin(self: TensorType, axis: Optional[int]=None) -> TensorType:
return type(self)(self.raw.argmin(dim=axis))
def argmax(self: TensorType, axis: Optional[int]=None) -> TensorType:
return type(self)(self.raw.argmax(dim=axis))
def argsort(self: TensorType, axis: int=(- 1)) -> TensorType:
return type(self)(self.raw.argsort(dim=axis))
def sort(self: TensorType, axis: int=(- 1)) -> TensorType:
return type(self)(self.raw.sort(dim=axis).values)
def topk(self: TensorType, k: int, sorted: bool=True) -> Tuple[(TensorType, TensorType)]:
(values, indices) = self.raw.topk(k, sorted=sorted)
return (type(self)(values), type(self)(indices))
def uniform(self: TensorType, shape: ShapeOrScalar, low: float=0.0, high: float=1.0) -> TensorType:
return type(self)(((torch.rand(shape, dtype=self.raw.dtype, device=self.raw.device) * (high - low)) + low))
def normal(self: TensorType, shape: ShapeOrScalar, mean: float=0.0, stddev: float=1.0) -> TensorType:
return type(self)(((torch.randn(shape, dtype=self.raw.dtype, device=self.raw.device) * stddev) + mean))
def ones(self: TensorType, shape: ShapeOrScalar) -> TensorType:
return type(self)(torch.ones(shape, dtype=self.raw.dtype, device=self.raw.device))
def zeros(self: TensorType, shape: ShapeOrScalar) -> TensorType:
return type(self)(torch.zeros(shape, dtype=self.raw.dtype, device=self.raw.device))
def ones_like(self: TensorType) -> TensorType:
return type(self)(torch.ones_like(self.raw))
def zeros_like(self: TensorType) -> TensorType:
return type(self)(torch.zeros_like(self.raw))
def full_like(self: TensorType, fill_value: float) -> TensorType:
return type(self)(torch.full_like(self.raw, fill_value))
def onehot_like(self: TensorType, indices: TensorType, *, value: float=1) -> TensorType:
if (self.ndim != 2):
raise ValueError('onehot_like only supported for 2D tensors')
if (indices.ndim != 1):
raise ValueError('onehot_like requires 1D indices')
if (len(indices) != len(self)):
raise ValueError('length of indices must match length of tensor')
x = torch.zeros_like(self.raw)
rows = np.arange(x.shape[0])
x[(rows, indices.raw)] = value
return type(self)(x)
def from_numpy(self: TensorType, a: Any) -> TensorType:
return type(self)(torch.as_tensor(a, device=self.raw.device))
def _concatenate(self: TensorType, tensors: Iterable[TensorType], axis: int=0) -> TensorType:
tensors_ = unwrap_(*tensors)
return type(self)(torch.cat(tensors_, dim=axis))
def _stack(self: TensorType, tensors: Iterable[TensorType], axis: int=0) -> TensorType:
tensors_ = unwrap_(*tensors)
return type(self)(torch.stack(tensors_, dim=axis))
def transpose(self: TensorType, axes: Optional[Axes]=None) -> TensorType:
if (axes is None):
axes = tuple(range((self.ndim - 1), (- 1), (- 1)))
return type(self)(self.raw.permute(*axes))
def all(self: TensorType, axis: Optional[AxisAxes]=None, keepdims: bool=False) -> TensorType:
assert_bool(self)
if ((axis is None) and (not keepdims)):
return type(self)(self.raw.all())
if (axis is None):
axis = tuple(range(self.ndim))
elif (not isinstance(axis, Iterable)):
axis = (axis,)
x = self.raw
for i in sorted(axis, reverse=True):
x = x.all(i, keepdim=keepdims)
return type(self)(x)
def any(self: TensorType, axis: Optional[AxisAxes]=None, keepdims: bool=False) -> TensorType:
assert_bool(self)
if ((axis is None) and (not keepdims)):
return type(self)(self.raw.any())
if (axis is None):
axis = tuple(range(self.ndim))
elif (not isinstance(axis, Iterable)):
axis = (axis,)
x = self.raw
for i in sorted(axis, reverse=True):
x = x.any(i, keepdim=keepdims)
return type(self)(x)
def logical_and(self: TensorType, other: TensorOrScalar) -> TensorType:
assert_bool(self)
assert_bool(other)
return type(self)((self.raw & unwrap1(other)))
def logical_or(self: TensorType, other: TensorOrScalar) -> TensorType:
assert_bool(self)
assert_bool(other)
return type(self)((self.raw | unwrap1(other)))
def logical_not(self: TensorType) -> TensorType:
assert_bool(self)
return type(self)((~ self.raw))
def exp(self: TensorType) -> TensorType:
return type(self)(torch.exp(self.raw))
def log(self: TensorType) -> TensorType:
return type(self)(torch.log(self.raw))
def log2(self: TensorType) -> TensorType:
return type(self)(torch.log2(self.raw))
def log10(self: TensorType) -> TensorType:
return type(self)(torch.log10(self.raw))
def log1p(self: TensorType) -> TensorType:
return type(self)(torch.log1p(self.raw))
def tile(self: TensorType, multiples: Axes) -> TensorType:
if (len(multiples) != self.ndim):
raise ValueError('multiples requires one entry for each dimension')
return type(self)(self.raw.repeat(multiples))
def softmax(self: TensorType, axis: int=(- 1)) -> TensorType:
return type(self)(torch.nn.functional.softmax(self.raw, dim=axis))
def log_softmax(self: TensorType, axis: int=(- 1)) -> TensorType:
return type(self)(torch.nn.functional.log_softmax(self.raw, dim=axis))
def squeeze(self: TensorType, axis: Optional[AxisAxes]=None) -> TensorType:
if (axis is None):
return type(self)(self.raw.squeeze())
if (not isinstance(axis, Iterable)):
axis = (axis,)
x = self.raw
for i in sorted(axis, reverse=True):
if (x.shape[i] != 1):
raise ValueError('cannot select an axis to squeeze out which has size not equal to one')
x = x.squeeze(dim=i)
return type(self)(x)
def expand_dims(self: TensorType, axis: int) -> TensorType:
return type(self)(self.raw.unsqueeze(dim=axis))
def full(self: TensorType, shape: ShapeOrScalar, value: float) -> TensorType:
if (not isinstance(shape, Iterable)):
shape = (shape,)
return type(self)(torch.full(shape, value, dtype=self.raw.dtype, device=self.raw.device))
def index_update(self: TensorType, indices: Any, values: TensorOrScalar) -> TensorType:
(indices, values_) = unwrap_(indices, values)
if isinstance(indices, tuple):
indices = unwrap_(*indices)
x = self.raw.clone()
x[indices] = values_
return type(self)(x)
def arange(self: TensorType, start: int, stop: Optional[int]=None, step: Optional[int]=None) -> TensorType:
if (step is None):
step = 1
if (stop is None):
stop = start
start = 0
return type(self)(torch.arange(start=start, end=stop, step=step, device=self.raw.device))
def cumsum(self: TensorType, axis: Optional[int]=None) -> TensorType:
if (axis is None):
return type(self)(self.raw.reshape((- 1)).cumsum(dim=0))
return type(self)(self.raw.cumsum(dim=axis))
def flip(self: TensorType, axis: Optional[AxisAxes]=None) -> TensorType:
if (axis is None):
axis = tuple(range(self.ndim))
if (not isinstance(axis, Iterable)):
axis = (axis,)
return type(self)(self.raw.flip(dims=axis))
def meshgrid(self: TensorType, *tensors: TensorType, indexing: str='xy') -> Tuple[(TensorType, ...)]:
tensors = unwrap_(*tensors)
if ((indexing == 'ij') or (len(tensors) == 0)):
outputs = torch.meshgrid(self.raw, *tensors)
elif (indexing == 'xy'):
outputs = torch.meshgrid(tensors[0], self.raw, *tensors[1:])
else:
raise ValueError(f"Valid values for indexing are 'xy' and 'ij', got {indexing}")
results = [type(self)(out) for out in outputs]
if ((indexing == 'xy') and (len(results) >= 2)):
(results[0], results[1]) = (results[1], results[0])
return tuple(results)
def pad(self: TensorType, paddings: Tuple[(Tuple[(int, int)], ...)], mode: str='constant', value: float=0) -> TensorType:
if (len(paddings) != self.ndim):
raise ValueError('pad requires a tuple for each dimension')
for p in paddings:
if (len(p) != 2):
raise ValueError('pad requires a tuple for each dimension')
if (not ((mode == 'constant') or (mode == 'reflect'))):
raise ValueError("pad requires mode 'constant' or 'reflect'")
if (mode == 'reflect'):
if ((self.ndim != 3) and (self.ndim != 4)):
raise NotImplementedError
k = (self.ndim - 2)
if (paddings[:k] != (((0, 0),) * k)):
raise NotImplementedError
paddings = paddings[k:]
paddings_ = list((x for p in reversed(paddings) for x in p))
return type(self)(torch.nn.functional.pad(self.raw, paddings_, mode=mode, value=value))
def isnan(self: TensorType) -> TensorType:
return type(self)(torch.isnan(self.raw))
def isinf(self: TensorType) -> TensorType:
return type(self)(torch.isinf(self.raw))
def crossentropy(self: TensorType, labels: TensorType) -> TensorType:
if (self.ndim != 2):
raise ValueError('crossentropy only supported for 2D logits tensors')
if (self.shape[:1] != labels.shape):
raise ValueError('labels must be 1D and must match the length of logits')
return type(self)(torch.nn.functional.cross_entropy(self.raw, labels.raw, reduction='none'))
def slogdet(self: TensorType) -> Tuple[(TensorType, TensorType)]:
(sign, logabsdet) = torch.slogdet(self.raw)
return (type(self)(sign), type(self)(logabsdet))
def _value_and_grad_fn(self: TensorType, f: Callable[(..., TensorType)]) -> Callable[(..., Tuple[(TensorType, TensorType)])]:
...
def _value_and_grad_fn(self: TensorType, f: Callable[(..., TensorType)], has_aux: Literal[False]) -> Callable[(..., Tuple[(TensorType, TensorType)])]:
...
def _value_and_grad_fn(self: TensorType, f: Callable[(..., Tuple[(TensorType, Any)])], has_aux: Literal[True]) -> Callable[(..., Tuple[(TensorType, Any, TensorType)])]:
...
def _value_and_grad_fn(self: TensorType, f: Callable, has_aux: bool=False) -> Callable[(..., Tuple)]:
def value_and_grad(x: TensorType, *args: Any, **kwargs: Any) -> Tuple:
x = type(self)(x.raw.clone().requires_grad_())
if has_aux:
(loss, aux) = f(x, *args, **kwargs)
else:
loss = f(x, *args, **kwargs)
loss = loss.raw
grad_raw = torch.autograd.grad(loss, x.raw)[0]
grad = type(self)(grad_raw)
assert (grad.shape == x.shape)
loss = loss.detach()
loss = type(self)(loss)
if has_aux:
if isinstance(aux, PyTorchTensor):
aux = PyTorchTensor(aux.raw.detach())
elif isinstance(aux, tuple):
aux = tuple(((PyTorchTensor(t.raw.detach()) if isinstance(t, PyTorchTensor) else t) for t in aux))
return (loss, aux, grad)
else:
return (loss, grad)
return value_and_grad
def sign(self: TensorType) -> TensorType:
return type(self)(torch.sign(self.raw))
def sqrt(self: TensorType) -> TensorType:
return type(self)(torch.sqrt(self.raw))
def inv(self: TensorType) -> TensorType:
return type(self)(torch.linalg.inv(self.raw))
def round(self: TensorType) -> TensorType:
return type(self)(torch.round(self.raw))
def ceil(self: TensorType) -> TensorType:
return type(self)(torch.ceil(self.raw))
def floor(self: TensorType) -> TensorType:
return type(self)(torch.floor(self.raw))
def float32(self: TensorType) -> TensorType:
return self.astype(torch.float32)
def float64(self: TensorType) -> TensorType:
return self.astype(torch.float64)
def where(self: TensorType, x: TensorOrScalar, y: TensorOrScalar) -> TensorType:
if isinstance(x, Tensor):
x_ = x.raw
elif (isinstance(x, int) or isinstance(x, float)):
if isinstance(y, Tensor):
dtype = y.raw.dtype
else:
dtype = torch.float32
x_ = torch.full_like(self.raw, x, dtype=dtype)
else:
raise TypeError('expected x to be a Tensor, int or float')
if isinstance(y, Tensor):
y_ = y.raw
elif (isinstance(y, int) or isinstance(y, float)):
if isinstance(x, Tensor):
dtype = x.raw.dtype
else:
dtype = torch.float32
y_ = torch.full_like(self.raw, y, dtype=dtype)
return type(self)(torch.where(self.raw, x_, y_))
def __lt__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__lt__(unwrap1(other)))
def __le__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__le__(unwrap1(other)))
def __eq__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__eq__(unwrap1(other)))
def __ne__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__ne__(unwrap1(other)))
def __gt__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__gt__(unwrap1(other)))
def __ge__(self: TensorType, other: TensorOrScalar) -> TensorType:
return type(self)(self.raw.__ge__(unwrap1(other)))
def __getitem__(self: TensorType, index: Any) -> TensorType:
if isinstance(index, tuple):
index = tuple(((x.raw if isinstance(x, Tensor) else x) for x in index))
elif isinstance(index, Tensor):
index = index.raw
return type(self)(self.raw[index])
def take_along_axis(self: TensorType, index: TensorType, axis: int) -> TensorType:
if ((axis % self.ndim) != (self.ndim - 1)):
raise NotImplementedError('take_along_axis is currently only supported for the last axis')
return type(self)(torch.gather(self.raw, axis, index.raw))
def bool(self: TensorType) -> TensorType:
return self.astype(torch.bool) |
def init_bias_xavier(model, mode='fan_out', nonlinearity='relu', logger=None):
layers_initialized = 0
a = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
if (m.bias is not None):
layers_initialized += 1
m.bias.data.normal_(0, (math.sqrt(2) / math.sqrt((1 + (9 * m.bias.data.shape[0])))))
logger.info((('Initialized ' + str(layers_initialized)) + ' bias conv2d layers using nn.init.xavier.noraml_')) |
def build_recognizer(cfg, device):
world_size = du.get_world_size()
model = registry.RECOGNIZER[cfg.MODEL.RECOGNIZER.NAME](cfg)
if (cfg.MODEL.NORM.SYNC_BN and (world_size > 1)):
logger.info('start sync BN on the process group of {}'.format(du.LOCAL_RANK_GROUP))
convert_sync_bn(model, du.LOCAL_PROCESS_GROUP)
if (cfg.MODEL.CONV.ADD_BLOCKS is not None):
assert isinstance(cfg.MODEL.CONV.ADD_BLOCKS, tuple)
logger.info(f'add blocks: {cfg.MODEL.CONV.ADD_BLOCKS}')
for add_block in cfg.MODEL.CONV.ADD_BLOCKS:
if (add_block == 'RepVGGBlock'):
insert_repvgg_block(model)
if (add_block == 'ACBlock'):
insert_acblock(model)
if (add_block == 'DiverseBranchBlock'):
insert_dbblock(model)
preloaded = cfg.MODEL.RECOGNIZER.PRELOADED
if (preloaded != ''):
logger.info(f'load preloaded: {preloaded}')
check_pointer = CheckPointer(model)
check_pointer.load(preloaded, map_location=device)
logger.info('finish loading model weights')
if du.is_master_proc():
logger.info(f'full model info:')
logger.info(model)
model = model.to(device=device)
if (du.get_world_size() > 1):
model = DDP(model, device_ids=[device], output_device=device, find_unused_parameters=False)
return model |
def train_cpu(data, label, num_class, list_hidden_nodes, initial_learning_rate, momentum, max_steps, decay_steps, decay_factor, batch_size, train_dir, moving_average_decay=0.9999, summary_steps=500, checkpoint_steps=10000, MLP_trainable=True, save_file='model.ckpt', load_file=None, random_seed=None):
with tf.Graph().as_default(), tf.device('/cpu:0'):
if (random_seed is not None):
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
global_step = tf.Variable(0, trainable=False)
(data_holder, label_holder) = tf.train.shuffle_batch([tf.constant(data.T), tf.constant(label)], batch_size=batch_size, capacity=(20 * batch_size), min_after_dequeue=(10 * batch_size), enqueue_many=True)
data_holder = tf.cast(data_holder, tf.float32)
label_holder = tf.cast(label_holder, tf.float32)
(logits, feats) = inference(data_holder, list_hidden_nodes, num_class, MLP_trainable=MLP_trainable)
(loss, accuracy) = tcl_loss(logits, label_holder)
(train_op, lr) = train(loss, accuracy, global_step=global_step, initial_learning_rate=initial_learning_rate, momentum=momentum, decay_steps=decay_steps, decay_factor=decay_factor, moving_average_decay=moving_average_decay)
saver = tf.train.Saver(tf.global_variables())
summary_op = tf.summary.merge_all()
init = tf.global_variables_initializer()
config = tf.ConfigProto(log_device_placement=False)
sess = tf.Session(config=config)
sess.run(init)
if (load_file is not None):
print('Load trainable parameters from {0:s}...'.format(load_file))
reader = tf.train.NewCheckpointReader(load_file)
reader_var_to_shape_map = reader.get_variable_to_shape_map()
load_vars = tf.get_collection(FILTER_COLLECTION)
initialized_vars = []
for lv in load_vars:
if (lv.name.split(':')[0] in reader_var_to_shape_map):
print(' {0:s}'.format(lv.name))
initialized_vars.append(lv)
saver_init = tf.train.Saver(initialized_vars)
saver_init.restore(sess, load_file)
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(train_dir, sess.graph)
num_data = data.shape[1]
num_steps_in_epoch = int(np.floor((num_data / batch_size)))
for step in range(max_steps):
(_, loss_value, accuracy_value, lr_value) = sess.run([train_op, loss, accuracy, lr])
assert (not np.isnan(loss_value)), 'Model diverged with loss = NaN'
if ((step % summary_steps) == 0):
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
if ((step % checkpoint_steps) == 0):
checkpoint_path = os.path.join(train_dir, save_file)
saver.save(sess, checkpoint_path, global_step=step)
save_path = os.path.join(train_dir, save_file)
print('Save model in file: {0:s}'.format(save_path))
saver.save(sess, save_path) |
def eye_like(A: torch.Tensor) -> torch.Tensor:
return torch.eye(A.shape[(- 1)], dtype=A.dtype, device=A.device).expand_as(A) |
_arg_scope
def stack_blocks_dense(net, blocks, output_stride=None, store_non_strided_activations=False, outputs_collections=None):
current_stride = 1
rate = 1
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
block_stride = 1
for (i, unit) in enumerate(block.args):
if (store_non_strided_activations and (i == (len(block.args) - 1))):
block_stride = unit.get('stride', 1)
unit = dict(unit, stride=1)
with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]):
if ((output_stride is not None) and (current_stride == output_stride)):
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
if ((output_stride is not None) and (current_stride > output_stride)):
raise ValueError('The target output_stride cannot be reached.')
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
if ((output_stride is not None) and (current_stride == output_stride)):
rate *= block_stride
else:
net = subsample(net, block_stride)
current_stride *= block_stride
if ((output_stride is not None) and (current_stride > output_stride)):
raise ValueError('The target output_stride cannot be reached.')
if ((output_stride is not None) and (current_stride != output_stride)):
raise ValueError('The target output_stride cannot be reached.')
return net |
class RandomForest(IterativeComponentWithSampleWeight, AutotabularClassificationAlgorithm):
def __init__(self, criterion, max_features, max_depth, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, bootstrap, max_leaf_nodes, min_impurity_decrease, random_state=None, n_jobs=1, class_weight=None):
self.n_estimators = self.get_max_iter()
self.criterion = criterion
self.max_features = max_features
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.bootstrap = bootstrap
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.random_state = random_state
self.n_jobs = n_jobs
self.class_weight = class_weight
self.estimator = None
def get_max_iter():
return 512
def get_current_iter(self):
return self.estimator.n_estimators
def iterative_fit(self, X, y, sample_weight=None, n_iter=1, refit=False):
from sklearn.ensemble import RandomForestClassifier
if refit:
self.estimator = None
if (self.estimator is None):
self.n_estimators = int(self.n_estimators)
if check_none(self.max_depth):
self.max_depth = None
else:
self.max_depth = int(self.max_depth)
self.min_samples_split = int(self.min_samples_split)
self.min_samples_leaf = int(self.min_samples_leaf)
self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf)
if (self.max_features not in ('sqrt', 'log2', 'auto')):
max_features = int((X.shape[1] ** float(self.max_features)))
else:
max_features = self.max_features
self.bootstrap = check_for_bool(self.bootstrap)
if check_none(self.max_leaf_nodes):
self.max_leaf_nodes = None
else:
self.max_leaf_nodes = int(self.max_leaf_nodes)
self.min_impurity_decrease = float(self.min_impurity_decrease)
self.estimator = RandomForestClassifier(n_estimators=n_iter, criterion=self.criterion, max_features=max_features, max_depth=self.max_depth, min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, min_weight_fraction_leaf=self.min_weight_fraction_leaf, bootstrap=self.bootstrap, max_leaf_nodes=self.max_leaf_nodes, min_impurity_decrease=self.min_impurity_decrease, random_state=self.random_state, n_jobs=self.n_jobs, class_weight=self.class_weight, warm_start=True)
else:
self.estimator.n_estimators += n_iter
self.estimator.n_estimators = min(self.estimator.n_estimators, self.n_estimators)
self.estimator.fit(X, y, sample_weight=sample_weight)
return self
def configuration_fully_fitted(self):
if (self.estimator is None):
return False
return (not (len(self.estimator.estimators_) < self.n_estimators))
def predict(self, X):
if (self.estimator is None):
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if (self.estimator is None):
raise NotImplementedError()
probas = self.estimator.predict_proba(X)
probas = convert_multioutput_multiclass_to_multilabel(probas)
return probas
def get_properties(dataset_properties=None):
return {'shortname': 'RF', 'name': 'Random Forest Classifier', 'handles_regression': False, 'handles_classification': True, 'handles_multiclass': True, 'handles_multilabel': True, 'handles_multioutput': False, 'is_deterministic': True, 'input': (DENSE, SPARSE, UNSIGNED_DATA), 'output': (PREDICTIONS,)}
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
criterion = CategoricalHyperparameter('criterion', ['gini', 'entropy'], default_value='gini')
max_features = UniformFloatHyperparameter('max_features', 0.0, 1.0, default_value=0.5)
max_depth = UnParametrizedHyperparameter('max_depth', 'None')
min_samples_split = UniformIntegerHyperparameter('min_samples_split', 2, 20, default_value=2)
min_samples_leaf = UniformIntegerHyperparameter('min_samples_leaf', 1, 20, default_value=1)
min_weight_fraction_leaf = UnParametrizedHyperparameter('min_weight_fraction_leaf', 0.0)
max_leaf_nodes = UnParametrizedHyperparameter('max_leaf_nodes', 'None')
min_impurity_decrease = UnParametrizedHyperparameter('min_impurity_decrease', 0.0)
bootstrap = CategoricalHyperparameter('bootstrap', ['True', 'False'], default_value='True')
cs.add_hyperparameters([criterion, max_features, max_depth, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_leaf_nodes, bootstrap, min_impurity_decrease])
return cs |
def train(sess_config, input_hooks, model, data_init_op, steps, checkpoint_dir, tf_config=None, server=None):
model.is_training = True
hooks = []
hooks.extend(input_hooks)
scaffold = tf.compat.v1.train.Scaffold(local_init_op=tf.group(tf.compat.v1.local_variables_initializer(), data_init_op), saver=tf.compat.v1.train.Saver(max_to_keep=args.keep_checkpoint_max))
stop_hook = tf.compat.v1.train.StopAtStepHook(last_step=steps)
log_hook = tf.compat.v1.train.LoggingTensorHook({'steps': model.global_step, 'loss': model.loss}, every_n_iter=100)
hooks.append(stop_hook)
hooks.append(log_hook)
if (args.timeline > 0):
hooks.append(tf.compat.v1.train.ProfilerHook(save_steps=args.timeline, output_dir=checkpoint_dir))
save_steps = steps
with tf.compat.v1.train.MonitoredTrainingSession(master=(server.target if server else ''), is_chief=(tf_config['is_chief'] if tf_config else True), hooks=hooks, scaffold=scaffold, checkpoint_dir=checkpoint_dir, save_checkpoint_steps=save_steps, summary_dir=checkpoint_dir, save_summaries_steps=args.save_steps, config=sess_config) as sess:
while (not sess.should_stop()):
sess.run([model.loss, model.train_op])
print('Training completed.') |
def log_stats(stats, misc_args):
if hasattr(misc_args, 'epoch'):
lines = ('[%s][%s][Epoch %d][Iter %d / %d]\n' % (misc_args.run_name, misc_args.cfg_filename, misc_args.epoch, misc_args.step, misc_args.iters_per_epoch))
else:
lines = ('[%s][%s][Step %d / %d]\n' % (misc_args.run_name, misc_args.cfg_filename, stats['iter'], cfg.SOLVER.MAX_ITER))
lines += ('\t\tloss: %.6f, lr: %.6f time: %.6f, eta: %s\n' % (stats['loss'], stats['lr'], stats['time'], stats['eta']))
if stats['head_losses']:
lines += (('\t\t' + ', '.join((('%s: %.6f' % (k, v)) for (k, v) in stats['head_losses'].items()))) + '\n')
print(lines[:(- 1)]) |
def fbeta(y_true, y_pred, beta=1):
from keras import backend as K
if (beta < 0):
raise ValueError('The lowest choosable beta is zero (only precision).')
if (K.sum(K.round(K.clip(y_true, 0, 1))) == 0):
return 0
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
bb = (beta ** 2)
fbeta_score = (((1 + bb) * (p * r)) / (((bb * p) + r) + K.epsilon()))
return fbeta_score |
def _get_fused_attention(feature1, feature2):
upsample_module = nn.Upsample(size=(224, 224), mode='bilinear')
feat_map1 = feature1.detach().clone()
feat_map2 = feature2.detach().clone()
return ((torch.sigmoid(upsample_module(feat_map1)) + torch.sigmoid(upsample_module(feat_map2))) / 2.0) |
class MobileNetV2Block(nn.Module, ABC):
def __init__(self, in_channels, out_channels, expansion_rate=1, repeat=1, stride=1, padding=1, conv_layer=None, norm_layer=None, act_layer=None):
super(MobileNetV2Block, self).__init__()
features = list()
for i in range(repeat):
if (i != 0):
stride = 1
features.append(MobileNetV2InvertedResidual(in_channels, out_channels, expansion_rate=expansion_rate, stride=stride, padding=padding, conv_layer=conv_layer, norm_layer=norm_layer, act_layer=act_layer))
in_channels = out_channels
self.conv = nn.Sequential(*features)
def forward(self, x):
return self.conv(x) |
def drop_padding(seq: Sequence[Any], pad_id: Any):
if (pad_id is None):
return seq
return list(reversed(list(dropwhile((lambda x: (x == pad_id)), reversed(seq))))) |
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None, initial_state_fw=None, initial_state_bw=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None):
assert (not time_major)
flat_inputs = flatten(inputs, 2)
flat_len = (None if (sequence_length is None) else tf.cast(flatten(sequence_length, 0), 'int64'))
((flat_fw_outputs, flat_bw_outputs), final_state) = _bidirectional_dynamic_rnn(cell_fw, cell_bw, flat_inputs, sequence_length=flat_len, initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw, dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory, time_major=time_major, scope=scope)
fw_outputs = reconstruct(flat_fw_outputs, inputs, 2)
bw_outputs = reconstruct(flat_bw_outputs, inputs, 2)
return ((fw_outputs, bw_outputs), final_state) |
def get_split(config, split_name, dataset_dir, file_pattern=None, reader=None):
all_file = []
reader = tf.TFRecordReader()
batch_size = config.batch_size
data_splitnum = config.data_split_num
file_pattern = _FILE_PATTERN
if (split_name == 'train'):
num_epochs = None
for i in range(data_splitnum):
all_file.append(os.path.join(dataset_dir, 'sound20/', (file_pattern % (split_name, i))))
elif (split_name == 'test'):
(num_epochs, batch_size) = (1, 1)
all_file.append(os.path.join(dataset_dir, 'sound20/', (file_pattern % (split_name, 0))))
elif (split_name not in SPLITS_TO_SIZES):
raise ValueError(('split name %s was not recognized.' % split_name))
filename_queue = tf.train.string_input_producer(all_file, num_epochs=num_epochs, shuffle=False)
(_, serialized_example) = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features={'height': tf.FixedLenFeature([], tf.int64), 'width': tf.FixedLenFeature([], tf.int64), 'image_string': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.float32)})
image = tf.decode_raw(features['image_string'], tf.uint8)
label = tf.cast(features['label'], tf.float32)
height = tf.cast(features['height'], tf.int32)
width = tf.cast(features['width'], tf.int32)
image = tf.reshape(image, [height, width, 1])
resized_image = tf.image.resize_images(image, size=[IMAGE_HEIGHT, IMAGE_WIDTH])
min_after_dequeue = 10000
capacity = (min_after_dequeue + (3 * batch_size))
(images, labels) = tf.train.shuffle_batch([resized_image, label], batch_size=batch_size, capacity=capacity, num_threads=1, min_after_dequeue=min_after_dequeue, seed=config.random_seed)
return (images, labels, SPLITS_TO_SIZES[split_name]) |
def menet160_8x1_g8(**kwargs):
return get_menet(first_stage_channels=160, side_channels=8, groups=8, model_name='menet160_8x1_g8', **kwargs) |
class RegexpTokenizer(Tokenizer):
DIGIT = '\\p{Nd}+([:\\.\\,]\\p{Nd}+)*'
TITLE = '(dr|esq|hon|jr|mr|mrs|ms|prof|rev|sr|st|rt|messrs|mmes|msgr)\\.(?=\\p{Z})'
ABBRV = '([\\p{L}]\\.){2,}(?=\\p{Z}|$)'
ALPHA_NUM = '[\\p{L}\\p{N}\\p{M}]++'
HYPHEN = '{A}([-\\u058A\\u2010\\u2011]{A})+'.format(A=ALPHA_NUM)
NEGATION = "((?!n't)[\\p{L}\\p{N}\\p{M}])++(?=n't)|n't"
CONTRACTION1 = 'can(?=not\\b)'
CONTRACTION2 = "'([tsdm]|re|ll|ve)\\b"
START_DQUOTE = '(?<=[\\p{Z}\\(\\[{<]|^)(``|["\\u0093\\u201C\\u00AB])(?!\\p{Z})'
START_SQUOTE = "(?<=[\\p{Z}\\(\\[{<]|^)[\\'\\u0091\\u2018\\u201B\\u2039](?!\\p{Z})"
END_DQUOTE = '(?<!\\p{Z})(\\\'\\\'|["\\u0094\\u201D\\u00BB])'
END_SQUOTE = "(?<!\\p{Z})[\\'\\u0092\\u2019\\u203A]"
DASH = '--|[\\u0096\\u0097\\u2013\\u2014\\u2015]'
ELLIPSES = '\\.\\.\\.|\\u2026'
PUNCT = '\\p{P}'
NON_WS = '[^\\p{Z}\\p{C}]'
def __init__(self, **kwargs):
self._regexp = regex.compile(('(?P<digit>%s)|(?P<title>%s)|(?P<abbr>%s)|(?P<neg>%s)|(?P<hyph>%s)|(?P<contr1>%s)|(?P<alphanum>%s)|(?P<contr2>%s)|(?P<sdquote>%s)|(?P<edquote>%s)|(?P<ssquote>%s)|(?P<esquote>%s)|(?P<dash>%s)|(?<ellipses>%s)|(?P<punct>%s)|(?P<nonws>%s)' % (self.DIGIT, self.TITLE, self.ABBRV, self.NEGATION, self.HYPHEN, self.CONTRACTION1, self.ALPHA_NUM, self.CONTRACTION2, self.START_DQUOTE, self.END_DQUOTE, self.START_SQUOTE, self.END_SQUOTE, self.DASH, self.ELLIPSES, self.PUNCT, self.NON_WS)), flags=((regex.IGNORECASE + regex.UNICODE) + regex.MULTILINE))
if (len(kwargs.get('annotators', {})) > 0):
logger.warning(('%s only tokenizes! Skipping annotators: %s' % (type(self).__name__, kwargs.get('annotators'))))
self.annotators = set()
self.substitutions = kwargs.get('substitutions', True)
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
token = matches[i].group()
if self.substitutions:
groups = matches[i].groupdict()
if groups['sdquote']:
token = '``'
elif groups['edquote']:
token = "''"
elif groups['ssquote']:
token = '`'
elif groups['esquote']:
token = "'"
elif groups['dash']:
token = '--'
elif groups['ellipses']:
token = '...'
span = matches[i].span()
start_ws = span[0]
if ((i + 1) < len(matches)):
end_ws = matches[(i + 1)].span()[0]
else:
end_ws = span[1]
data.append((token, text[start_ws:end_ws], span))
return Tokens(data, self.annotators) |
class iSLReLU(nn.Module):
def __init__(self, slope=0.1):
self.alpha = ((1 - slope) / (1 + slope))
super().__init__()
def forward(self, x):
self._last_x = x
y = ((x + (self.alpha * (torch.sqrt((1 + (x * x))) - 1))) / (1 + self.alpha))
return y
def inverse(self, y):
a = self.alpha
b = (((1 + a) * y) + a)
x = ((torch.sqrt((((a ** 2) + ((a * b) ** 2)) - (a ** 4))) - b) / ((a ** 2) - 1))
return x
def logdet(self):
x = self._last_x
a = self.alpha
log_dets = torch.log(((1 + ((a * x) / torch.sqrt((1 + (x * x))))) / (1 + a)))
if (len(x.shape) == 2):
return log_dets.sum(1)
else:
return log_dets.sum(3).sum(2).sum(1)
def reduce_func_singular_values(self, func):
x = self._last_x
a = self.alpha
func_singular_vals = func(((1 + ((a * x) / torch.sqrt((1 + (x * x))))) / (1 + a)))
if (len(x.shape) == 2):
return func_singular_vals.sum(1)
else:
return func_singular_vals.sum(3).sum(2).sum(1) |
def batch(dataset, batch_size: int, drop_last: bool=False):
def iter_fn():
buffer = []
def _stack(xs):
if isinstance(xs[0], dict):
return {k: _stack([x[k] for x in xs]) for k in xs[0].keys()}
if isinstance(xs[0], (str, bytes)):
return list(xs)
return torch.stack(xs, 0)
for x in dataset():
buffer.append(x)
if (len(buffer) >= batch_size):
(yield _stack(buffer))
buffer = []
if (len(buffer) > 0):
(yield _stack(buffer))
buffer = []
return iter_fn |
def error_rate(predictions, labels):
assert (len(predictions) == len(labels))
preds = np.argmax(predictions, 1)
orig = np.argmax(labels, 1)
error_rate = (100.0 - ((100.0 * np.sum((preds == orig))) / predictions.shape[0]))
return (preds, orig, error_rate) |
def load_langpair_dataset(data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, remove_eos_from_source, max_source_positions, max_target_positions, prepend_bos=False, load_alignments=False, truncate_source=False, append_source_id=False, num_buckets=0, shuffle=True, pad_to_multiple=1, prepend_bos_src=None):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = (split + (str(k) if (k > 0) else ''))
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
elif (k > 0):
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
src_dataset = data_utils.load_indexed_dataset((prefix + src), src_dict, dataset_impl)
if truncate_source:
src_dataset = AppendTokenDataset(TruncateDataset(StripTokenDataset(src_dataset, src_dict.eos()), (max_source_positions - 1)), src_dict.eos())
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset((prefix + tgt), tgt_dict, dataset_impl)
if (tgt_dataset is not None):
tgt_datasets.append(tgt_dataset)
logger.info('{} {} {}-{} {} examples'.format(data_path, split_k, src, tgt, len(src_datasets[(- 1)])))
if (not combine):
break
assert ((len(src_datasets) == len(tgt_datasets)) or (len(tgt_datasets) == 0))
if (len(src_datasets) == 1):
src_dataset = src_datasets[0]
tgt_dataset = (tgt_datasets[0] if (len(tgt_datasets) > 0) else None)
else:
sample_ratios = ([1] * len(src_datasets))
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if (len(tgt_datasets) > 0):
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert (hasattr(src_dict, 'bos_index') and hasattr(tgt_dict, 'bos_index'))
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if (tgt_dataset is not None):
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
elif (prepend_bos_src is not None):
logger.info(f'prepending src bos: {prepend_bos_src}')
src_dataset = PrependTokenDataset(src_dataset, prepend_bos_src)
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(src_dataset, src_dict.index('[{}]'.format(src)))
if (tgt_dataset is not None):
tgt_dataset = AppendTokenDataset(tgt_dataset, tgt_dict.index('[{}]'.format(tgt)))
eos = tgt_dict.index('[{}]'.format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl)
tgt_dataset_sizes = (tgt_dataset.sizes if (tgt_dataset is not None) else None)
return LanguagePairDataset(src_dataset, src_dataset.sizes, src_dict, tgt_dataset, tgt_dataset_sizes, tgt_dict, left_pad_source=left_pad_source, left_pad_target=left_pad_target, remove_eos_from_source=remove_eos_from_source, align_dataset=align_dataset, eos=eos, num_buckets=num_buckets, shuffle=shuffle, pad_to_multiple=pad_to_multiple) |
def dla169(cfg, pretrained=None, **kwargs):
Bottleneck.expansion = 2
model = DLA(cfg, [1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024], block=Bottleneck, residual_root=True, **kwargs)
if (pretrained is not None):
model.load_pretrained_model(pretrained, 'dla169')
return model |
def test_observation_decoder(shape=(3, 64, 64)):
decoder = ObservationDecoder()
batch_size = 2
(c, h, w) = shape
embedding = torch.randn(batch_size, 1024)
with torch.no_grad():
obs_dist: torch.distributions.Normal = decoder(embedding)
obs_sample: torch.Tensor = obs_dist.sample()
assert (obs_sample.size(0) == batch_size)
assert (obs_sample.size(1) == c)
assert (obs_sample.size(2) == h)
assert (obs_sample.size(3) == w)
horizon = 4
embedding = torch.randn(batch_size, horizon, 1024)
with torch.no_grad():
obs_dist: torch.distributions.Normal = decoder(embedding)
obs_sample: torch.Tensor = obs_dist.sample()
assert (obs_sample.size(0) == batch_size)
assert (obs_sample.size(1) == horizon)
assert (obs_sample.size(2) == c)
assert (obs_sample.size(3) == h)
assert (obs_sample.size(4) == w)
horizon = 4
embedding = torch.randn(batch_size, horizon, 1024)
with torch.no_grad():
obs_dist: torch.distributions.Normal = decoder(embedding)
obs_sample: torch.Tensor = obs_dist.sample()
assert (obs_sample.size(0) == batch_size)
assert (obs_sample.size(1) == horizon)
assert (obs_sample.size(2) == c)
assert (obs_sample.size(3) == h)
assert (obs_sample.size(4) == w) |
class ExpLrUpdaterHook(LrUpdaterHook):
def __init__(self, gamma, **kwargs):
self.gamma = gamma
super(ExpLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
progress = (trainer.epoch if self.by_epoch else trainer.iter)
return (base_lr * (self.gamma ** progress)) |
class SydneyCaptions(RSICD):
splits = ['train', 'val', 'test']
def __init__(self, root: str='.data/sydney_captions', split: str='train', transform: T.Compose=T.Compose([T.ToTensor()])):
assert (split in self.splits)
self.root = root
self.transform = transform
self.captions = self.load_captions(os.path.join(root, 'dataset.json'), split)
self.image_root = 'images' |
def evaluate_metrics(prediction_file: Union[(str, Path, List[Dict[(str, str)]])], reference_file: Union[(str, Path, List[Dict[(str, str)]])], nb_reference_captions: int=5) -> Dict[(str, Dict[(str, Union[(float, Dict[(str, float)])])])]:
prediction_file = check_and_read_csv(prediction_file)
reference_file = check_and_read_csv(reference_file)
prediction_file.sort(key=(lambda row: row['file_name']))
reference_file.sort(key=(lambda row: row['file_name']))
reference_dict = {}
for row in reference_file:
reference_dict[row['file_name']] = row
file_names = [row['file_name'] for row in prediction_file]
assert all(((file_name in reference_dict) for file_name in file_names))
predictions = []
ground_truths = []
for row in prediction_file:
file_name = row['file_name']
predictions.append(row['caption_predicted'])
cap_names = ['caption_reference_{:02d}'.format(i) for i in range(1, (nb_reference_captions + 1))]
ground_truths.append([reference_dict[file_name][cap] for cap in cap_names])
(metrics, per_file_metrics) = evaluate_metrics_from_lists(predictions, ground_truths)
total_metrics = combine_single_and_per_file_metrics(metrics, per_file_metrics, file_names)
return {key.lower(): value for (key, value) in total_metrics.items()} |
class IICMeanTeacherTrainer(IICTrainer):
def _init(self):
super()._init()
self._iic_weight = deepcopy(self._reg_weight)
self._teacher_model = deepcopy(self._model)
for param in self._teacher_model.parameters():
param.detach_()
self._teacher_model.train()
config = deepcopy(self._config['MeanTeacherParameters'])
self._reg_criterion = {'mse': nn.MSELoss(), 'kl': KL_div()}[config['name']]
self._ema_updater = ema_updater(alpha=float(config['alpha']), weight_decay=float(config['weight_decay']))
self._mt_weight = float(config['weight'])
def epocher_class(self) -> Type[TrainEpocher]:
return MIMeanTeacherEpocher
def _run_epoch(self, epocher: MIMeanTeacherEpocher, *args, **kwargs) -> EpochResultDict:
epocher.init(mi_estimator_array=self._mi_estimator_array, reg_criterion=self._reg_criterion, teacher_model=self._teacher_model, ema_updater=self._ema_updater, mt_weight=self._mt_weight, iic_weight=self._iic_weight, enforce_matching=self._enforce_matching)
result = epocher.run()
return result
def _eval_epoch(self, *, loader, **kwargs) -> Tuple[(EpochResultDict, float)]:
evaler = EvalEpocher(self._teacher_model, val_loader=loader, sup_criterion=self._sup_criterion, cur_epoch=self._cur_epoch, device=self._device)
(result, cur_score) = evaler.run()
return (result, cur_score) |
class PreventStuckPlayer(ProxyPlayer):
def __init__(self, player, nr_repeat, action):
super(PreventStuckPlayer, self).__init__(player)
self.act_que = deque(maxlen=nr_repeat)
self.trigger_action = action
def action(self, act):
self.act_que.append(act)
if (self.act_que.count(self.act_que[0]) == self.act_que.maxlen):
act = self.trigger_action
(r, isOver) = self.player.action(act)
if isOver:
self.act_que.clear()
return (r, isOver)
def restart_episode(self):
super(PreventStuckPlayer, self).restart_episode()
self.act_que.clear() |
class Config(collections.MutableMapping):
_instance = None
_store: t.MutableMapping[(str, t.Any)]
_file = Path('config.toml')
_template = Path('config-example.toml')
def get_instance(cls):
if (cls._instance is None):
cls()
return cls._instance
def __init__(self):
if (Config._instance is not None):
raise RuntimeError('This class is a singleton!')
else:
Config._instance = self
with self._file.open() as f:
self._store = toml.parse(f.read())
with self._template.open() as f:
template = toml.parse(f.read())
if ('version' not in self):
raise RuntimeError(f"The attribute 'version' is missing in '{self._file}'.")
if ('version' not in template):
raise RuntimeError(f"The attribute 'version' is missing in '{self._template}'.")
self_version = version.parse(str(self['version']))
template_version = version.parse(str(template['version']))
if (self_version != template_version):
raise RuntimeError(f"The version of '{self._file}' ({self_version}) is not equal to the version of '{self._template}' ({template_version}). Please update your config!")
def __getitem__(self, key):
return self._store[key]
def __setitem__(self, key, value):
self._store[key] = value
def __delitem__(self, key):
del self._store[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def __repr__(self):
return repr(self._store)
def __str__(self):
return str(self._store) |
def repackage_hidden(h):
if (h is None):
return None
if isinstance(h, list):
return list((repackage_hidden(v) for v in h))
elif isinstance(h, tuple):
return tuple((repackage_hidden(v) for v in h))
return h.detach() |
def load_images(images, curriculum, device):
return_images = []
head = 0
for stage in curriculum['stages']:
stage_images = images[head:(head + stage['batch_size'])]
stage_images = F.interpolate(stage_images, size=stage['img_size'], mode='bilinear', align_corners=True)
return_images.append(stage_images)
head += stage['batch_size']
return return_images |
class KMeans(object):
def __init__(self, num_centers, dtype=np.float32, algorithm='lloyd', initialization='plus_plus', distance='l2', max_iter=100, num_rep=1, verbosity=0):
_check_integer(num_rep, 'num_rep', 1)
_check_integer(verbosity, 'verbosity', 0)
_check_integer(max_iter, 'max_iter', 0)
algorithm = KMeansAlgorithm._members[algorithm.upper()]
initialization = KMeansInitialization._members[initialization.upper()]
distance = VectorComparisonType._members[('DISTANCE_' + distance.upper())]
dtype = np.dtype(dtype)
c_dtype = np_to_c_types.get(dtype, None)
if (c_dtype not in [c_float, c_double]):
raise TypeError('data should be float32 or float64')
self.c_dtype = c_dtype
self.vl_dtype = c_to_vl_types[c_dtype]
self.num_centers = num_centers
self.kmeans_p = vl_kmeans_new(self.vl_dtype, distance)
self.kmeans = self.kmeans_p[0]
self.kmeans.verbosity = verbosity
self.kmeans.numRepetitions = num_rep
self.kmeans.algorithm = algorithm
self.kmeans.initialization = initialization
self.kmeans.maxNumIterations = max_iter
def _check_data(self, data):
data = np.asarray(data)
c_dtype = np_to_c_types.get(data.dtype, None)
if (c_dtype != self.c_dtype):
raise TypeError('different dtype')
if (data.ndim != 2):
raise TypeError('data must be num_data x dim')
(num_data, dim) = data.shape
if (dim == 0):
raise ValueError('data dimension is zero')
return data
def fit(self, data):
data = self._check_data(data)
(num_data, dim) = data.shape
data_p = data.ctypes.data_as(c_void_p)
self.energy = vl_kmeans_cluster(self.kmeans_p, data_p, data.shape[1], data.shape[0], self.num_centers)
centers_p = cast(self.kmeans.centers, POINTER(self.c_dtype))
self.centers = np.ctypeslib.as_array(centers_p, (self.num_centers, self.kmeans.dimension)).copy()
return self
def transform(self, data):
data = self._check_data(data)
assignments = np.empty(data.shape[0], dtype=np.uint32)
data_p = data.ctypes.data_as(c_void_p)
vl_kmeans_quantize(self.kmeans_p, assignments, None, data_p, data.shape[0])
return assignments
def fit_transform(self, data):
return self.fit(data).transform(data) |
def numpyasarray(np_data):
data = np_data
assert data.flags['C_CONTIGUOUS']
arr = TVMArray()
shape = c_array(tvm_shape_index_t, data.shape)
arr.data = data.ctypes.data_as(ctypes.c_void_p)
arr.shape = shape
arr.strides = None
arr.dtype = TVMType(np.dtype(data.dtype).name)
arr.ndim = data.ndim
arr.ctx = context(1, 0)
return (arr, shape) |
class Seq2SeqLMOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[List[torch.FloatTensor]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None |
def horizontal_flow(sublayout_width, sublayout_height, num_row, pref_w_list, pref_h_list, optional_index_weight_dict, fixed_boundary):
num = len(pref_w_list)
result_index = []
row_width = []
row_height = []
i = 0
removed_index_weight_dict = {}
for r in range(num_row):
row_width.append([])
result_index.append([])
remaining_total_available_width = ((num_row - r) * sublayout_width)
overall_delta = (remaining_total_available_width - sum(pref_w_list[i:]))
loss = (abs(overall_delta) + sum(removed_index_weight_dict.values()))
if (overall_delta <= 0):
while ((overall_delta <= 0) and (optional_index_weight_dict != {})):
flag = False
for (index, weight) in optional_index_weight_dict.items():
if (index >= i):
removed_index_weight_dict[index] = weight
flag = True
prev_overall_delta = overall_delta
overall_delta += pref_w_list[index]
break
if (loss > (overall_delta + sum(removed_index_weight_dict.values()))):
loss = (abs(overall_delta) + sum(removed_index_weight_dict.values()))
del optional_index_weight_dict[index]
else:
if flag:
del removed_index_weight_dict[index]
overall_delta = prev_overall_delta
break
else:
while ((overall_delta > 0) and (removed_index_weight_dict != {})):
for (index, weight) in removed_index_weight_dict.items():
if (index >= i):
optional_index_weight_dict[index] = weight
overall_delta = (remaining_total_available_width - sum(pref_w_list))
break
if (loss > (overall_delta + sum(removed_index_weight_dict.values()))):
loss = (overall_delta + sum(removed_index_weight_dict.values()))
del optional_index_weight_dict[index]
else:
del removed_index_weight_dict[index]
break
row_length = 0
start_index = i
while (i < num):
if (i not in removed_index_weight_dict.keys()):
delta = (overall_delta / ((num - len(removed_index_weight_dict)) - start_index))
row_length += (pref_w_list[i] + delta)
if (row_length <= sublayout_width):
result_index[(- 1)].append(i)
i += 1
elif ((row_length - sublayout_width) < (sublayout_width - ((row_length - pref_w_list[i]) - delta))):
result_index[(- 1)].append(i)
i += 1
else:
break
else:
i += 1
end_index = i
if (end_index == start_index):
return None
sum_row_width = 0
sum_row_height = 0
for index in result_index[(- 1)]:
sum_row_width += pref_w_list[index]
sum_row_height += pref_h_list[index]
row_delta = ((sublayout_width - sum_row_width) / len(result_index[(- 1)]))
row_height.append((sum_row_height / len(result_index[(- 1)])))
for index in result_index[(- 1)]:
row_width[(- 1)].append((pref_w_list[index] + row_delta))
if ((sum(row_height) > sublayout_height) or fixed_boundary):
delta = ((sum(row_height) - sublayout_height) / num_row)
row_height = [(x - delta) for x in row_height]
if (end_index != len(pref_w_list)):
return None
return (result_index, row_width, row_height, sum(removed_index_weight_dict.values())) |
def test_new_scope_val_depends_on_old():
run_cell('\n class Foo:\n shared = 99\n ')
run_cell('foo = Foo()')
run_cell('foo.shared = 11')
run_cell('foo_shared_alias = foo.shared')
run_cell('Foo.shared = 12')
run_cell('logging.info(foo_shared_alias)')
assert_detected()
run_cell('logging.info(foo.shared)')
assert_detected()
run_cell('logging.info(foo)')
assert_detected() |
def _cast_to_config(obj):
if isinstance(obj, dict):
return DictConfig(obj, flags={'allow_objects': True})
return obj |
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, cross=False):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, cross=cross)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm = norm_layer(dim)
mlp_hidden_dim = int((dim * mlp_ratio))
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, H, W):
x = (x + self.drop_path(self.attn(self.norm1(x))))
x = (x + self.drop_path(self.mlp(self.norm(x), H, W)))
return x |
def apply_lora(base_model_path, lora_path):
print(f'Loading the base model from {base_model_path}')
base_tokenizer = AutoTokenizer.from_pretrained(base_model_path)
base = AutoModelForCausalLM.from_pretrained(base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
print(f'Loading the LoRA adapter from {lora_path}')
lora_model = PeftModel.from_pretrained(base, lora_path)
print('Applying the LoRA')
model = lora_model.merge_and_unload()
return (base, model, base_tokenizer) |
def check_equal(first, second, verbose):
if verbose:
print()
for (i, (x, y)) in enumerate(zip(first, second)):
x = x.cpu().detach().numpy()
y = y.cpu().detach().numpy()
if verbose:
print('x = {}'.format(x.flatten()))
print('y = {}'.format(y.flatten()))
print(('-' * 80))
np.testing.assert_allclose(x, y, err_msg='Index: {}'.format(i)) |
def omniglot():
return itertools.chain(*[collect_download_configs((lambda : datasets.Omniglot(ROOT, background=background, download=True)), name=f"Omniglot, {('background' if background else 'evaluation')}") for background in (True, False)]) |
class TestPlotPDF(unittest.TestCase):
def test_custom_bins(self):
import numpy as np
import powerlaw
import matplotlib.pyplot as plt
data = (1.0 / np.random.power(4.0, 1000))
fit = powerlaw.Fit(data)
plt.figure()
bins = 2
ax = fit.plot_pdf(marker='*', bins=bins)
line = ax.lines[0]
assert (len(line.get_xdata()) == bins)
plt.close()
plt.figure()
bins = 10
ax = fit.plot_pdf(marker='*', bins=bins)
line = ax.lines[0]
assert (len(line.get_xdata()) == bins)
plt.close() |
class TestCrissCrossAttention(object):
def test_cc_attention(self):
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
from mmcv.ops import CrissCrossAttention
loss_func = Loss()
input = np.fromfile('tests/data/for_ccattention/ccattention_input.bin', dtype=np.float32)
output = np.fromfile('tests/data/for_ccattention/ccattention_output.bin', dtype=np.float32)
input = input.reshape((1, 32, 45, 45))
output = output.reshape((1, 32, 45, 45))
label = torch.ones((1, 32, 45, 45))
input = torch.FloatTensor(input)
output = torch.FloatTensor(output)
input.requires_grad = True
shape = input.shape
channel = shape[1]
cca = CrissCrossAttention(channel)
cca.to(device)
input = input.to(device)
label = label.to(device)
cca.train()
test_output = cca(input)
test_loss = loss_func(test_output, label)
test_loss.backward()
test_output = test_output.detach().cpu().numpy()
output = output.numpy()
assert np.allclose(test_output, output)
assert (test_output.shape == shape) |
def get(config, mode):
exec_config = copy.deepcopy(getattr(config, mode))
for att in list(config.keys()):
if (att not in ['trainor', 'validator', 'ensemblor']):
exec_config[att] = config[att]
return exec_config |
class CustomMetric():
def __init__(self, metric, metric_name, **kwargs):
self.metric = metric
self.metric_name = metric_name
self.kwargs = kwargs
self.scores = []
self.valid_classes = []
self.valid_matrices = []
self.names = []
self.score = None
self.valid_class = None
self.valid_matrix = None
self.name = None
self.last_scores = None
self.last_valid_classes = None
self.last_valid_matrices = None
self.last_names = None
def batch(self, prediction, target, name=None):
self.score = self.metric(prediction, target, **self.kwargs).to('cpu')
self.valid_class = target.unique().to('cpu')
dummy = torch.zeros_like(self.score).to('cpu')
dummy[self.valid_class] = 1
self.valid_matrix = dummy.type(torch.bool).to('cpu')
self.scores.append(self.score)
self.valid_classes.append(self.valid_class)
self.valid_matrices.append(self.valid_matrix)
if name:
self.name = name
self.names.append(self.name)
def get_metrics_batch(self, mean=True):
if mean:
return self.score[self.valid_class].mean()
else:
return self.score[self.valid_class]
def get_metrics_epoch(self, last=False, transpose=True):
if last:
if transpose:
scores = torch.stack(self.last_scores).T
masks = torch.stack(self.last_valid_matrices).T
else:
scores = torch.stack(self.last_scores)
masks = torch.stack(self.last_valid_matrices)
elif transpose:
scores = torch.stack(self.scores).T
masks = torch.stack(self.valid_matrices).T
else:
scores = torch.stack(self.scores)
masks = torch.stack(self.valid_matrices)
filtered = [s[m] for (s, m) in zip(scores, masks)]
return torch.stack([c.mean() for c in filtered])
def epoch(self):
self.last_scores = self.scores
self.last_valid_classes = self.valid_classes
self.last_valid_matrices = self.valid_matrices
self.last_names = self.names
result = self.get_metrics_epoch()
self.reset()
return result.mean()
def reset(self):
self.scores = []
self.valid_classes = []
self.valid_matrices = []
self.names = []
def __repr__(self):
return self.metric_name |
(argument('id', help='id of instance type to change bid', type=int), argument('--price', help='per machine bid price in $/hour', type=float), usage='vast.py change bid id [--price PRICE]', help='Change the bid price for a spot/interruptible instance', epilog=deindent('\n Change the current bid price of instance id to PRICE.\n If PRICE is not specified, then a winning bid price is used as the default.\n '))
def change__bid(args: argparse.Namespace):
url = apiurl(args, '/instances/bid_price/{id}/'.format(id=args.id))
print(f'URL: {url}')
r = requests.put(url, json={'client_id': 'me', 'price': args.price})
r.raise_for_status()
print('Per gpu bid price changed'.format(r.json())) |
def write_rttm(fn, turns):
with open(fn, 'wb') as f:
turns = sorted(turns, key=(lambda x: (x.fid, float(x.onset), float(x.dur))))
for turn in turns:
line = ' '.join(turn)
f.write(line.encode('utf-8'))
f.write(b'\n') |
class _DilatedResidualBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, dilation: int, causal: bool=True, norm: Literal[('batch', 'instance', None)]=None, activation: str='GELU', film_conditioning: bool=False, film_embedding_size: Optional[int]=None, film_batch_norm: bool=True, use_temporal_film: bool=False, temporal_film_block_size: Optional[int]=None):
super().__init__()
if (film_conditioning and ((film_embedding_size is None) or (not isinstance(film_embedding_size, int)) or (film_embedding_size < 1))):
raise ValueError('FiLM conditioning requires a valid embedding size (int >= 1).')
if (use_temporal_film and ((temporal_film_block_size is None) or (not isinstance(temporal_film_block_size, int)) or (temporal_film_block_size < 1))):
raise ValueError('TFiLM conditioning requires a valid block size (int >= 1).')
net = []
pre_activation_channels = ((out_channels * 2) if (activation == 'gated') else out_channels)
if (norm is not None):
if (norm not in ('batch', 'instance')):
raise ValueError('Invalid norm type (must be batch or instance)')
_Norm = (nn.BatchNorm1d if (norm == 'batch') else nn.InstanceNorm1d)
net.append(_Norm(in_channels))
net.extend([Pad(kernel_size, dilation, causal=causal), nn.Conv1d(in_channels, pre_activation_channels, kernel_size, dilation=dilation, padding=0)])
self.net = nn.Sequential(*net)
self.film = (FiLM(film_embedding_size, pre_activation_channels, film_batch_norm) if film_conditioning else None)
self.activation = _get_activation(activation)
self.tfilm = (TFiLM(out_channels, temporal_film_block_size) if use_temporal_film else None)
self.residual = nn.Conv1d(in_channels, out_channels, 1)
def forward(self, x: torch.Tensor, film_embedding: Optional[torch.Tensor]=None):
activations = self.net(x)
if (self.film is not None):
activations = self.film(activations, film_embedding)
y = self.activation(activations)
if (self.tfilm is not None):
y = self.tfilm(y)
return (y + self.residual(x)) |
def prepare_trainer_collator(model_args, preprocessor: Dict[(str, Any)], collator_kwargs: Dict[(str, Any)]) -> Tuple[(Type[TrainerForMMLLM], Dict[(str, DataCollator)])]:
type_ = model_args.type
trainer_cls = TYPE2TRAINER[type_]
data_collator_func = partial(Seq2Seq2DataCollatorWithImage, preprocessor=preprocessor, **collator_kwargs)
data_collator_dict = {'train_collator': data_collator_func(inference_mode=False), 'eval_collator': data_collator_func(inference_mode=True)}
return (trainer_cls, data_collator_dict) |
def init_weights_normal(m):
classname = m.__class__.__name__
if (classname == 'Conv2d'):
nn.init.normal_(m.weight.data)
nn.init.normal_(m.bias.data) |
def accuracy(model, train_time_data, train_schedule_data, anomaly_data, class_data, model_plotter):
(anomaly_correct, class_correct, class_total) = (0, 0, 0)
(tpl, tnl, fpl, fnl) = ([], [], [], [])
for (i, d) in enumerate(train_time_data):
output = model(train_time_data[i], train_schedule_data[i])
(source_anomaly, source_prototype) = output
(res, tp, tn, fp, fn) = anomaly_accuracy(source_anomaly, anomaly_data[i], model_plotter)
anomaly_correct += res
tpl.append(tp)
tnl.append(tn)
fpl.append(fp)
fnl.append(fn)
tp += res
fp += res
tn += (1 - res)
fn += (1 - res)
if (np.sum(anomaly_data[i]) > 0):
class_total += 1
class_correct += class_accuracy(source_prototype, anomaly_data[i], class_data[i], model, model_plotter)
(tp, fp, tn, fn) = (np.mean(tpl), np.mean(fpl), np.mean(tnl), np.mean(fn))
(p, r) = ((tp / (tp + fp)), (tp / (tp + fn)))
tqdm.write(f'P = {p}, R = {r}, F1 = {(((2 * p) * r) / (p + r))}')
return ((anomaly_correct / len(train_time_data)), (class_correct / class_total)) |
def get_phcfun_fromlib():
if ('linux' in sys.platform):
libphcpack = (LOCATION + '/libPHCpack.so')
phcpack = ctypes.CDLL(libphcpack)
return phcpack._ada_use_c2phc
if ('darwin' in sys.platform):
libphcpack = (LOCATION + '/libPHCpack.dylib')
phcpack = ctypes.CDLL(libphcpack)
return phcpack._ada_use_c2phc
if ('win' in sys.platform):
libphcpack = (LOCATION + '/libPHCpack.dll')
phcpack = ctypes.WinDLL(libphcpack, winmode=0)
return phcpack._ada_use_c2phc
print('The platform', sys.platform, 'is not supported.')
return None |
def ResNet34(conv_layer, linear_layer, init_type, **kwargs):
assert (init_type == 'kaiming_normal'), 'only supporting default init for Resnets'
return ResNet(conv_layer, linear_layer, BasicBlock, [3, 4, 6, 3], **kwargs) |
def inverse_warp_3d(img, disp, padding_mode='zeros', disp_Y=None):
device = disp.device
(B, D, H, W) = disp.shape
C = img.shape[1]
if (disp_Y is not None):
assert (disp.shape == disp_Y.shape), 'disparity map along x and y axis should have same shape!'
if (img.dim() == 4):
img = img.unsqueeze(2).expand(B, C, D, H, W)
elif (img.dim() == 5):
assert (D == img.shape[2]), 'The disparity number should be same between image and disparity map!'
else:
raise ValueError('image is only allowed with 4 or 5 dimensions, but got {} dimensions!'.format(img.dim()))
grid_d = torch.linspace(0, (D - 1), D).view(1, D, 1, 1).expand(B, D, H, W).to(device)
grid_h = torch.linspace(0, (H - 1), H).view(1, 1, H, 1).expand(B, D, H, W).to(device)
grid_w = torch.linspace(0, (W - 1), W).view(1, 1, 1, W).expand(B, D, H, W).to(device)
grid_w = (grid_w + disp)
if (disp_Y is not None):
grid_h = (grid_h + disp_Y)
grid_d = (((grid_d / (D - 1)) * 2) - 1)
grid_h = (((grid_h / (H - 1)) * 2) - 1)
grid_w = (((grid_w / (W - 1)) * 2) - 1)
grid_d = grid_d.unsqueeze(4)
grid_h = grid_h.unsqueeze(4)
grid_w = grid_w.unsqueeze(4)
grid = torch.cat((grid_w, grid_h, grid_d), 4)
projected_img = F.grid_sample(img, grid, padding_mode=padding_mode)
return projected_img |
def frameworkSrcBatch(args: argparse.Namespace, coreFunc: FunctionType) -> None:
tasks = util.readAllTasksFromDir(args.input)
lastApi: str = None
for id in range(args.start, len(tasks)):
task = tasks[id]
(api, label, src) = util.parseTask(task)
if args.singleapi:
if ((lastApi != None) and (lastApi != api)):
break
lastApi = api
try:
if config.skipApi(api, label):
raise Exception('Skipped', 'no detail')
coreFunc(SEED, args, src)
except Exception as e:
reason: str = 'FrameworkCrashCatch'
detail: str = str(e)
if (len(e.args) >= 2):
reason: str = e.args[0]
detail: str = e.args[1]
if (len(detail) > OUTPUT_LIMIT):
detail = 'Detail is too long'
if (reason == 'FrameworkCrashCatch'):
print(detail)
exit((- 1))
if ('Catch' in reason):
with open('catches.log', 'a') as f:
f.write('\nTitanFuzzTestcase {} {} {} {} {} {}'.format(id, api, label, reason, SEED, detail))
print('\nTitanFuzzTestcase', id, api, label, reason, SEED, detail) |
def discriminator_loss(loss_func, real, fake):
loss = []
real_loss = 0
fake_loss = 0
for i in range(2):
if loss_func.__contains__('wgan'):
real_loss = (- tf.reduce_mean(real[i]))
fake_loss = tf.reduce_mean(fake[i])
if (loss_func == 'lsgan'):
real_loss = tf.reduce_mean(tf.squared_difference(real[i], 1.0))
fake_loss = tf.reduce_mean(tf.square(fake[i]))
if ((loss_func == 'gan') or (loss_func == 'dragan')):
real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(real[i]), logits=real[i]))
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(fake[i]), logits=fake[i]))
if (loss_func == 'hinge'):
real_loss = tf.reduce_mean(relu((1.0 - real[i])))
fake_loss = tf.reduce_mean(relu((1.0 + fake[i])))
loss.append((real_loss + fake_loss))
return sum(loss) |
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.linear = torch.nn.Linear(30, 50)
def forward(self, x):
x = self.linear(x)
return x |
def parse_args():
parser = ArgumentParser(description='Training script: StyleGAN2 + ContraD with DataParallel.')
parser.add_argument('gin_config', type=str, help='Path to the gin configuration file')
parser.add_argument('architecture', type=str, help='Architecture')
parser.add_argument('--mode', default='std', type=str, help='Training mode (default: std)')
parser.add_argument('--penalty', default='none', type=str, help='Penalty (default: none)')
parser.add_argument('--aug', default='none', type=str, help='Augmentation (default: hfrt)')
parser.add_argument('--use_warmup', action='store_true', help='Use warmup strategy on LR')
parser.add_argument('--workers', default=8, type=int, metavar='N', help='number of data loading workers (default: 0)')
parser.add_argument('--temp', default=0.1, type=float, help='Temperature hyperparameter for contrastive losses')
parser.add_argument('--lbd_a', default=1.0, type=float, help='Relative strength of the fake loss of ContraD')
parser.add_argument('--no_lazy', action='store_true', help='Do not use lazy regularization')
parser.add_argument('--d_reg_every', type=int, default=16, help='Interval of applying R1 when lazy regularization is used')
parser.add_argument('--lbd_r1', type=float, default=10, help='R1 regularization')
parser.add_argument('--style_mix', default=0.9, type=float, help='Style mixing regularization')
parser.add_argument('--halflife_k', default=20, type=int, help='Half-life of exponential moving average in thousands of images')
parser.add_argument('--ema_start_k', default=None, type=int, help='When to start the exponential moving average of G (default: halflife_k)')
parser.add_argument('--halflife_lr', default=0, type=int, help='Apply LR decay when > 0')
parser.add_argument('--no_fid', action='store_true', help='Do not track FIDs during training')
parser.add_argument('--no_gif', action='store_true', help='Do not save GIF of sample generations from a fixed latent periodically during training')
parser.add_argument('--n_eval_avg', default=3, type=int, help='How many times to average FID and IS')
parser.add_argument('--print_every', help='', default=50, type=int)
parser.add_argument('--evaluate_every', help='', default=2000, type=int)
parser.add_argument('--save_every', help='', default=100000, type=int)
parser.add_argument('--comment', help='Comment', default='', type=str)
parser.add_argument('--resume', default=None, type=str, help='Path to logdir to resume the training')
parser.add_argument('--finetune', default=None, type=str, help='Path to logdir that contains a pre-trained checkpoint of D')
return parser.parse_args() |
class TFXLMForMultipleChoice(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def threshold_till_dag(B):
if is_dag(B):
return (B, 0)
B = np.copy(B)
nonzero_indices = np.where((B != 0))
weight_indices_ls = list(zip(B[nonzero_indices], nonzero_indices[0], nonzero_indices[1]))
sorted_weight_indices_ls = sorted(weight_indices_ls, key=(lambda tup: abs(tup[0])))
for (weight, j, i) in sorted_weight_indices_ls:
if is_dag(B):
break
B[(j, i)] = 0
dag_thres = abs(weight)
return (B, dag_thres) |
class LTRTrainer(BaseTrainer):
def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None):
super().__init__(actor, loaders, optimizer, settings, lr_scheduler)
self._set_default_settings()
self.stats = OrderedDict({loader.name: None for loader in self.loaders})
tensorboard_writer_dir = os.path.join('logs')
self.tensorboard_writer = TensorboardWriter(tensorboard_writer_dir, [l.name for l in loaders])
def _set_default_settings(self):
default = {'print_interval': 10, 'print_stats': None, 'description': ''}
def cycle_dataset(self, loader):
self.actor.train(loader.training)
self._init_timing()
for (i, data) in enumerate(loader, 1):
data = data.to(self.device)
data['epoch'] = self.epoch
data['settings'] = self.settings
(loss, stats) = self.actor(data)
if loader.training:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
batch_size = data['train_images'].shape[loader.stack_dim]
self._update_stats(stats, batch_size, loader)
self._print_stats(i, loader, batch_size)
def train_epoch(self):
for loader in self.loaders:
if ((self.epoch % loader.epoch_interval) == 0):
self.cycle_dataset(loader)
self._stats_new_epoch()
self._write_tensorboard()
def _init_timing(self):
self.num_frames = 0
self.start_time = time.time()
self.prev_time = self.start_time
def _update_stats(self, new_stats: OrderedDict, batch_size, loader):
if ((loader.name not in self.stats.keys()) or (self.stats[loader.name] is None)):
self.stats[loader.name] = OrderedDict({name: AverageMeter() for name in new_stats.keys()})
for (name, val) in new_stats.items():
if (name not in self.stats[loader.name].keys()):
self.stats[loader.name][name] = AverageMeter()
self.stats[loader.name][name].update(val, batch_size)
def _print_stats(self, i, loader, batch_size):
self.num_frames += batch_size
current_time = time.time()
batch_fps = (batch_size / (current_time - self.prev_time))
average_fps = (self.num_frames / (current_time - self.start_time))
self.prev_time = current_time
if (((i % 10) == 0) or (i == loader.__len__())):
print_str = ('[%s: %d, %d / %d] ' % (loader.name, self.epoch, i, loader.__len__()))
print_str += ('FPS: %.1f (%.1f) , ' % (average_fps, batch_fps))
for (name, val) in self.stats[loader.name].items():
if hasattr(val, 'avg'):
print_str += ('%s: %.5f , ' % (name, val.avg))
print(print_str[:(- 5)])
def _stats_new_epoch(self):
for loader in self.loaders:
if loader.training:
lr_list = self.lr_scheduler.get_lr()
for (i, lr) in enumerate(lr_list):
var_name = 'LearningRate/group{}'.format(i)
if (var_name not in self.stats[loader.name].keys()):
self.stats[loader.name][var_name] = StatValue()
self.stats[loader.name][var_name].update(lr)
for loader_stats in self.stats.values():
if (loader_stats is None):
continue
for stat_value in loader_stats.values():
if hasattr(stat_value, 'new_epoch'):
stat_value.new_epoch()
def _write_tensorboard(self):
if (self.epoch == 1):
self.tensorboard_writer.write_info('adafree_online', 'adafree', 'Train online for Adafree')
self.tensorboard_writer.write_epoch(self.stats, self.epoch) |
class ChannelSelector(object):
def __init__(self, train_channel='random', eval_channel=0, axis=1):
self.train_channel = train_channel
self.eval_channel = eval_channel
self.axis = axis
def __repr__(self):
return '{name}(train_channel={train_channel}, eval_channel={eval_channel}, axis={axis})'.format(name=self.__class__.__name__, train_channel=self.train_channel, eval_channel=self.eval_channel, axis=self.axis)
def __call__(self, x, train=True):
if (x.ndim <= self.axis):
ind = tuple(((slice(None) if (i < x.ndim) else None) for i in range((self.axis + 1))))
x = x[ind]
if train:
channel = self.train_channel
else:
channel = self.eval_channel
if (channel == 'random'):
ch = numpy.random.randint(0, x.shape[self.axis])
else:
ch = channel
ind = tuple(((slice(None) if (i != self.axis) else ch) for i in range(x.ndim)))
return x[ind] |
def test_can_move_down(board: Board, another_board: Board) -> None:
assert can_move_down(board)
assert can_move_down(another_board)
board = jnp.array([[0, 0, 0, 0], [1, 0, 0, 0], [2, 1, 0, 0], [3, 2, 1, 0]])
assert (~ can_move_down(board)) |
def store_model_weights(model, checkpoint_path, checkpoint_key='model', strict=True):
checkpoint_path = os.path.abspath(checkpoint_path)
output_dir = os.path.dirname(checkpoint_path)
model = copy.deepcopy(model)
checkpoint = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint[checkpoint_key], strict=strict)
tmp_path = os.path.join(output_dir, str(model.__hash__()))
torch.save(model.state_dict(), tmp_path)
sha256_hash = hashlib.sha256()
with open(tmp_path, 'rb') as f:
for byte_block in iter((lambda : f.read(4096)), b''):
sha256_hash.update(byte_block)
hh = sha256_hash.hexdigest()
output_path = os.path.join(output_dir, (('weights-' + str(hh[:8])) + '.pth'))
os.replace(tmp_path, output_path)
return output_path |
class FNetTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'token_type_ids']
def __init__(self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=True, unk_token='<unk>', sep_token='[SEP]', pad_token='<pad>', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> None:
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token)
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
super().__init__(do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
def vocab_size(self):
return len(self.sp_model)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
if (not hasattr(self, 'sp_model_kwargs')):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace('``', '"').replace("''", '"')
if (not self.keep_accents):
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if (not unicodedata.combining(c))])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text: str) -> List[str]:
text = self.preprocess_text(text)
pieces = self.sp_model.encode(text, out_type=str)
new_pieces = []
for piece in pieces:
if ((len(piece) > 1) and (piece[(- 1)] == str(',')) and piece[(- 2)].isdigit()):
cur_pieces = self.sp_model.EncodeAsPieces(piece[:(- 1)].replace(SPIECE_UNDERLINE, ''))
if ((piece[0] != SPIECE_UNDERLINE) and (cur_pieces[0][0] == SPIECE_UNDERLINE)):
if (len(cur_pieces[0]) == 1):
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[(- 1)])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
return new_pieces
def _convert_token_to_id(self, token):
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
return self.sp_model.IdToPiece(index)
def convert_tokens_to_string(self, tokens):
return self.sp_model.decode(tokens)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return ((cls + token_ids_0) + sep)
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
elif (not os.path.isfile(self.vocab_file)):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.