code stringlengths 101 5.91M |
|---|
class AzureMLCallback(TrainerCallback):
def __init__(self, azureml_run=None):
assert _has_azureml, 'AzureMLCallback requires azureml to be installed. Run `pip install azureml-sdk`.'
self.azureml_run = azureml_run
def on_init_end(self, args, state, control, **kwargs):
if ((self.azureml_run is None) and state.is_world_process_zero):
self.azureml_run = Run.get_context()
def on_log(self, args, state, control, logs=None, **kwargs):
if self.azureml_run:
for (k, v) in logs.items():
if isinstance(v, (int, float)):
self.azureml_run.log(k, v, description=k) |
class FeatureDeviation(Layer):
def __init__(self, scaling=True, mode='thresh', use_abs=True, use_square=True, min_std=1e-05, cutoff=10, **kwargs):
self.scaling = scaling
self.mode = mode
self.min_std_val = min_std
self.cutoff = cutoff
self.use_abs = use_abs
self.use_square = use_square
print('INFO: mode =', mode, 'min_std =', min_std, 'cutoff =', cutoff)
super(FeatureDeviation, self).__init__(**kwargs)
def build(self, input_shape):
nb_feats = input_shape[(- 1)]
std_shape = (1, 1, 1, nb_feats)
if self.scaling:
self.min_std = self.add_weight(shape=std_shape, initializer=initializers.Constant(self.min_std_val), name='min_std', constraint=constraints.non_neg())
if (self.mode == 'thresh'):
self.max_dev = self.add_weight(shape=std_shape, initializer=initializers.Constant(self.cutoff), name='max_dev', constraint=constraints.non_neg())
print('INFO: mode=thresh, create trainable params: max_dev, min_std')
elif (self.mode == 'tanh'):
self.alpha = self.add_weight(shape=std_shape, initializer=initializers.Constant(self.cutoff), name='scaling_coef', constraint=constraints.non_neg())
print('INFO: mode=tanh, create trainable params: alpha, min_std')
self.built = True
return
def call(self, x):
mu = K.mean(x, axis=(1, 2), keepdims=True)
if self.scaling:
sigma = K.maximum(K.std(x, axis=(1, 2), keepdims=True), (K.epsilon() + self.min_std))
xn = ((x - mu) / sigma)
if (self.mode == 'thresh'):
diff = K.minimum(K.maximum(xn, (- self.max_dev)), self.max_dev)
elif (self.mode == 'tanh'):
diff = K.tanh((xn * self.alpha))
else:
print('WARNING: unknown working mode {}'.format(mode))
else:
diff = (x - mu)
if self.use_abs:
diff = K.abs(diff)
if self.use_square:
diff = K.square(diff)
return diff
def compute_output_shape(self, input_shape):
return input_shape |
class ECSSD(BaseImageDataset):
def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None):
root = (env_settings().ecssd_dir if (root is None) else root)
super().__init__('ECSSD', root, image_loader)
self.image_list = self._load_dataset(min_area=min_area)
if (data_fraction is not None):
raise NotImplementedError
def _load_dataset(self, min_area=None):
images = []
for i in range(1, 1001):
a = imread_indexed(os.path.join(self.root, 'ground_truth_mask', '{:04d}.png'.format(i)))
if ((min_area is None) or ((a > 0).sum() > min_area)):
images.append(i)
return images
def get_name(self):
return 'ecssd'
def has_segmentation_info(self):
return True
def get_image_info(self, im_id):
mask = imread_indexed(os.path.join(self.root, 'ground_truth_mask', '{:04d}.png'.format(self.image_list[im_id])))
mask = torch.Tensor((mask == 255))
bbox = masks_to_bboxes(mask, fmt='t').view(4)
valid = ((bbox[2] > 0) & (bbox[3] > 0))
visible = valid.clone().byte()
return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}
def get_meta_info(self, im_id):
object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None})
return object_meta
def get_image(self, image_id, anno=None):
frame = self.image_loader(os.path.join(self.root, 'images', '{:04d}.jpg'.format(self.image_list[image_id])))
if (anno is None):
anno = self.get_image_info(image_id)
object_meta = self.get_meta_info(image_id)
return (frame, anno, object_meta) |
def compute_similarity_transform(S1, S2):
transposed = False
if ((S1.shape[0] != 3) and (S1.shape[0] != 2)):
S1 = S1.T
S2 = S2.T
transposed = True
assert (S2.shape[1] == S1.shape[1])
mu1 = S1.mean(axis=1, keepdims=True)
mu2 = S2.mean(axis=1, keepdims=True)
X1 = (S1 - mu1)
X2 = (S2 - mu2)
var1 = np.sum((X1 ** 2))
K = X1.dot(X2.T)
(U, s, Vh) = np.linalg.svd(K)
V = Vh.T
Z = np.eye(U.shape[0])
Z[((- 1), (- 1))] *= np.sign(np.linalg.det(U.dot(V.T)))
R = V.dot(Z.dot(U.T))
scale = (np.trace(R.dot(K)) / var1)
t = (mu2 - (scale * R.dot(mu1)))
S1_hat = ((scale * R.dot(S1)) + t)
if transposed:
S1_hat = S1_hat.T
return S1_hat |
def get_model(params):
data = params['dataset']
if ('mnist' in data):
model = {}
model['rep'] = MultiLeNetR()
if params['parallel']:
model['rep'] = nn.DataParallel(model['rep'])
model['rep'].cuda()
if ('L' in params['tasks']):
model['L'] = MultiLeNetO()
if params['parallel']:
model['L'] = nn.DataParallel(model['L'])
model['L'].cuda()
if ('R' in params['tasks']):
model['R'] = MultiLeNetO()
if params['parallel']:
model['R'] = nn.DataParallel(model['R'])
model['R'].cuda()
return model
if ('cityscapes' in data):
model = {}
model['rep'] = get_segmentation_encoder()
if params['parallel']:
model['rep'] = nn.DataParallel(model['rep'])
model['rep'].cuda()
if ('S' in params['tasks']):
model['S'] = SegmentationDecoder(num_class=19, task_type='C')
if params['parallel']:
model['S'] = nn.DataParallel(model['S'])
model['S'].cuda()
if ('I' in params['tasks']):
model['I'] = SegmentationDecoder(num_class=2, task_type='R')
if params['parallel']:
model['R'] = nn.DataParallel(model['R'])
model['I'].cuda()
if ('D' in params['tasks']):
model['D'] = SegmentationDecoder(num_class=1, task_type='R')
if params['parallel']:
model['D'] = nn.DataParallel(model['D'])
model['D'].cuda()
return model
if ('celeba' in data):
model = {}
model['rep'] = ResNet(BasicBlock, [2, 2, 2, 2])
if params['parallel']:
model['rep'] = nn.DataParallel(model['rep'])
model['rep'].cuda()
for t in params['tasks']:
model[t] = FaceAttributeDecoder()
if params['parallel']:
model[t] = nn.DataParallel(model[t])
model[t].cuda()
return model |
class MobileViTEncoder(nn.Module):
def __init__(self, config: MobileViTConfig) -> None:
super().__init__()
self.config = config
self.layer = nn.ModuleList()
self.gradient_checkpointing = False
dilate_layer_4 = dilate_layer_5 = False
if (config.output_stride == 8):
dilate_layer_4 = True
dilate_layer_5 = True
elif (config.output_stride == 16):
dilate_layer_5 = True
dilation = 1
layer_1 = MobileViTMobileNetLayer(config, in_channels=config.neck_hidden_sizes[0], out_channels=config.neck_hidden_sizes[1], stride=1, num_stages=1)
self.layer.append(layer_1)
layer_2 = MobileViTMobileNetLayer(config, in_channels=config.neck_hidden_sizes[1], out_channels=config.neck_hidden_sizes[2], stride=2, num_stages=3)
self.layer.append(layer_2)
layer_3 = MobileViTLayer(config, in_channels=config.neck_hidden_sizes[2], out_channels=config.neck_hidden_sizes[3], stride=2, hidden_size=config.hidden_sizes[0], num_stages=2)
self.layer.append(layer_3)
if dilate_layer_4:
dilation *= 2
layer_4 = MobileViTLayer(config, in_channels=config.neck_hidden_sizes[3], out_channels=config.neck_hidden_sizes[4], stride=2, hidden_size=config.hidden_sizes[1], num_stages=4, dilation=dilation)
self.layer.append(layer_4)
if dilate_layer_5:
dilation *= 2
layer_5 = MobileViTLayer(config, in_channels=config.neck_hidden_sizes[4], out_channels=config.neck_hidden_sizes[5], stride=2, hidden_size=config.hidden_sizes[2], num_stages=3, dilation=dilation)
self.layer.append(layer_5)
def forward(self, hidden_states: torch.Tensor, output_hidden_states: bool=False, return_dict: bool=True) -> Union[(tuple, BaseModelOutputWithNoAttention)]:
all_hidden_states = (() if output_hidden_states else None)
for (i, layer_module) in enumerate(self.layer):
if (self.gradient_checkpointing and self.training):
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(layer_module), hidden_states)
else:
hidden_states = layer_module(hidden_states)
if output_hidden_states:
all_hidden_states = (all_hidden_states + (hidden_states,))
if (not return_dict):
return tuple((v for v in [hidden_states, all_hidden_states] if (v is not None)))
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states) |
def build_msev2_yaml():
mse_yaml = '\n model:\n name: fake_yaml\n framework: tensorflow\n inputs: x\n outputs: op2_to_store\n device: cpu\n evaluation:\n accuracy:\n metric:\n topk: 1\n tuning:\n strategy:\n name: mse_v2\n accuracy_criterion:\n relative: 0.01\n exit_policy:\n max_trials: 10\n timeout: 3600\n '
with open('mse_yaml.yaml', 'w', encoding='utf-8') as f:
f.write(mse_yaml) |
_module()
class GridRCNN(TwoStageDetector):
def __init__(self, backbone: ConfigType, rpn_head: ConfigType, roi_head: ConfigType, train_cfg: ConfigType, test_cfg: ConfigType, neck: OptConfigType=None, data_preprocessor: OptConfigType=None, init_cfg: OptMultiConfig=None) -> None:
super().__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, data_preprocessor=data_preprocessor, init_cfg=init_cfg) |
def calculate_confs_on_correct(probabilities, class_idx, gt_targets):
gt_matching_idcs = torch.nonzero((gt_targets == class_idx), as_tuple=False).squeeze()
mapped_idx = map_index(probabilities.shape[1], class_idx)
(_, preds) = torch.max(probabilities[gt_matching_idcs], dim=1)
pred_gt_matching_idcs = gt_matching_idcs[(preds == mapped_idx)]
confs = probabilities[(pred_gt_matching_idcs, mapped_idx)].numpy()
frac_classified_as = torch.mean((preds == mapped_idx).float()).item()
return (confs, frac_classified_as) |
def get_marginal_density(layer_config, schema_tail, x_shape):
(likelihood, z_shape) = get_likelihood(layer_config, schema_tail, x_shape)
prior = get_density_recursive(schema_tail, z_shape)
approx_posterior = DiagonalGaussianConditionalDensity(coupler=get_coupler(input_shape=x_shape, num_channels_per_output=layer_config['num_z_channels'], config=layer_config['q_coupler']))
return MarginalDensity(prior=prior, likelihood=likelihood, approx_posterior=approx_posterior) |
def logging_setup(out_path=None):
if logging.root:
del logging.root.handlers[:]
logging.basicConfig(level=logging.INFO, handlers=[logging.FileHandler(str(out_path)), logging.StreamHandler(stream=sys.stdout)], format='[%(asctime)s/%(levelname)s/%(module)s] %(message)s', datefmt='%Y-%m-%d/%H:%M') |
def format_to_lines_tfds(args):
tokenized_sub_dirs = os.listdir(args.raw_path)
dataset_name = os.path.dirname(args.save_path).split('/')[(- 1)]
if (not os.path.isdir(args.save_path)):
os.makedirs(args.save_path)
corpora = {}
for tokenized_sub_dir in tokenized_sub_dirs:
path = pjoin(args.raw_path, tokenized_sub_dir)
files = []
for f in glob.glob(pjoin(path, '*.json')):
files.append(f)
corpora[tokenized_sub_dir] = files
files = []
for corpus_type in tokenized_sub_dirs:
a_lst = [(f, args) for f in corpora[corpus_type]]
pool = Pool(args.n_cpus)
dataset = []
p_ct = 0
for d in pool.imap_unordered(_format_to_lines, a_lst):
dataset.append(d)
if (len(dataset) >= args.shard_size):
if (corpus_type == 'validation'):
type_name = 'valid'
else:
type_name = corpus_type
pt_file = '{:s}.{:s}.{:d}.json'.format(dataset_name, type_name, p_ct)
with open(pjoin(args.save_path, pt_file), 'w') as save:
save.write(json.dumps(dataset))
p_ct += 1
dataset = []
pool.close()
pool.join()
if (len(dataset) > 0):
if (corpus_type == 'validation'):
type_name = 'valid'
else:
type_name = corpus_type
pt_file = '{:s}.{:s}.{:d}.json'.format(dataset_name, type_name, p_ct)
with open(pjoin(args.save_path, pt_file), 'w') as save:
save.write(json.dumps(dataset))
p_ct += 1
dataset = [] |
class SceneObjectClass(object):
def __init__(self, class_name: str=None, instances: int=None, attributes: str=None):
self.class_name = class_name
self.instances = instances
self.attributes = attributes
self.proximity_children = []
def set_instances(self, instances: int):
self.instances = instances
def set_attributes(self, attributes: str):
self.attributes = attributes
def add_proximity_children(self, objectclass: 'SceneObjectClass'):
self.proximity_children.append(objectclass)
def in_subtree(self, objectclass: 'SceneObjectClass'):
return ((objectclass == self) or any([el.in_subtree(objectclass) for el in self.proximity_children]))
def print_subtree(self):
children_printouts = []
children_printouts.append(str(self))
for el in self.proximity_children:
(str_form, list_form) = el.print_subtree()
for ell in list_form:
children_printouts.append(('| ' + ell))
return ('\n'.join(children_printouts), children_printouts)
def __str__(self):
return '{} x {} : {}'.format(self.instances, self.class_name, self.attributes)
def to_json(self, recursive: bool=True):
out = {'class_name': self.class_name, 'instances': self.instances, 'attributes': self.attributes}
if recursive:
out['proximity_children'] = [el.to_json() for el in self.proximity_children]
return out
def from_json(self, dictionary: dict, recursive: bool=True):
self.class_name = dictionary['class_name']
self.instances = dictionary['instances']
self.attributes = dictionary['attributes']
if recursive:
self.proximity_children = [SceneObjectClass().from_json(el) for el in dictionary['proximity_children']]
return self
def __eq__(self, other):
return (isinstance(other, SceneObjectClass) and (self.class_name == other.class_name) and (self.instances == other.instances) and (self.attributes == other.attributes) and (len(self.proximity_children) == len(other.proximity_children)) and all([(el1 == el2) for (el1, el2) in zip(self.proximity_children, other.proximity_children)]))
def bfs_enumeration(self):
enumeration = [self]
for el in self.proximity_children:
enumeration.extend(el.bfs_enumeration())
return enumeration |
class SuperMobileResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, dropout_rate, use_bias):
super(SuperMobileResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, dropout_rate, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, dropout_rate, use_bias):
conv_block = []
p = 0
if (padding_type == 'reflect'):
conv_block += [nn.ReflectionPad2d(1)]
elif (padding_type == 'replicate'):
conv_block += [nn.ReplicationPad2d(1)]
elif (padding_type == 'zero'):
p = 1
else:
raise NotImplementedError(('padding [%s] is not implemented' % padding_type))
conv_block += [SuperSeparableConv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=p, stride=1), norm_layer(dim), nn.ReLU(True)]
conv_block += [nn.Dropout(dropout_rate)]
p = 0
if (padding_type == 'reflect'):
conv_block += [nn.ReflectionPad2d(1)]
elif (padding_type == 'replicate'):
conv_block += [nn.ReplicationPad2d(1)]
elif (padding_type == 'zero'):
p = 1
else:
raise NotImplementedError(('padding [%s] is not implemented' % padding_type))
conv_block += [SuperSeparableConv2d(in_channels=dim, out_channels=dim, kernel_size=3, padding=p, stride=1), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, input, config):
x = input
cnt = 0
for module in self.conv_block:
if isinstance(module, SuperSeparableConv2d):
if (cnt == 1):
config['channel'] = input.size(1)
x = module(x, config)
cnt += 1
else:
x = module(x)
out = (input + x)
return out |
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
if ((args.local_rank not in [(- 1), 0]) and (not evaluate)):
torch.distributed.barrier()
processor = processors[task](language=args.language, train_language=args.train_language)
output_mode = output_modes[task]
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}_{}'.format(('test' if evaluate else 'train'), list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(task), str((args.train_language if ((not evaluate) and (args.train_language is not None)) else args.language))))
if (os.path.exists(cached_features_file) and (not args.overwrite_cache)):
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info('Creating features from dataset file at %s', args.data_dir)
label_list = processor.get_labels()
examples = (processor.get_test_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir))
features = convert_examples_to_features(examples, tokenizer, label_list=label_list, max_length=args.max_seq_length, output_mode=output_mode, pad_on_left=False, pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=0)
if (args.local_rank in [(- 1), 0]):
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
if ((args.local_rank == 0) and (not evaluate)):
torch.distributed.barrier()
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if (output_mode == 'classification'):
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
else:
raise ValueError('No other `output_mode` for XNLI.')
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset |
def is_tensorflow_tensor(arg: Any) -> bool:
if sf.util.tf_available:
import tensorflow as tf
return isinstance(arg, tf.Tensor)
else:
return False |
class MetricHandlerBase():
def __init__(self, name, *args, **kwargs):
self.name = name
def collect(self, collection, time, mode='train'):
pass |
def save_results(logits_matrix, targets_list, class_to_idx, args):
print('Saving inference results ...')
path_to_save = os.path.join(args.ckpt, (args.logname + '_test_results.pkl'))
with open(path_to_save, 'wb') as f:
pickle.dump([logits_matrix, targets_list, class_to_idx], f) |
def ham_mod_batch(x, t, bs, U, Yh, beta, a, b, c):
n = (len(x) // 2)
On = np.zeros((n, n))
In = np.eye(n)
B = np.array((beta * np.eye(n)))
F = np.vstack((np.hstack((On, In)), np.hstack(((- In), (- B)))))
dJ = gradient_batch(bs, x, U, Yh, a, b, c)
dxdt = F.dot(dJ)
return dxdt |
def data_processing():
(tsdata_train, tsdata_val, tsdata_test) = get_public_dataset(name='nyc_taxi')
scaler = StandardScaler()
for tsdata in [tsdata_train, tsdata_val, tsdata_test]:
tsdata.deduplicate().impute().gen_dt_feature().scale(scaler, fit=(tsdata is tsdata_train)).roll(lookback=lookback, horizon=horizon) |
def val(args):
model = get_model(args)
model.eval()
evaluations = NoteEvaluation.Evaluation(args)
for group in range(4):
args.restore_step = Restore_Step_list[group]
print(('GROUP %d' % group))
args.group = group
evaluations.group = args.group
val_dataloader = val_loader(args, k_shot=K_SHOT)
restore(args, model)
it = 0
for data in val_dataloader:
begin_time = time.time()
it = (it + 1)
(query_img, query_mask, support_img, support_mask, idx, size) = data
(query_img, query_mask, support_img, support_mask, idx) = (query_img.cuda(), query_mask.cuda(), support_img.cuda(), support_mask.cuda(), idx.cuda())
with torch.no_grad():
logits = model.forward_5shot(query_img, support_img, support_mask)
query_img = F.upsample(query_img, size=(size[0], size[1]), mode='bilinear')
query_mask = F.upsample(query_mask, size=(size[0], size[1]), mode='nearest')
(values, pred) = model.get_pred(logits, query_img)
evaluations.update_evl(idx, query_mask, pred, 0)
end_time = time.time()
ImgPerSec = (1 / (end_time - begin_time))
print(('It has tested %d, %.2f images/s' % (it, ImgPerSec)), end='\r')
print(('Group %d: %.4f ' % (args.group, evaluations.group_mean_iou[args.group])))
iou = evaluations.iou_list
print('IOU:', iou)
mIoU = np.mean(iou)
print('mIoU: ', mIoU)
print('group0_iou', evaluations.group_mean_iou[0])
print('group1_iou', evaluations.group_mean_iou[1])
print('group2_iou', evaluations.group_mean_iou[2])
print('group3_iou', evaluations.group_mean_iou[3])
print(evaluations.group_mean_iou)
return (mIoU, iou) |
def wer(ctm_edit_lines):
num_words = 0
num_incorrect_words = 0
for line in ctm_edit_lines:
if (line[7] != 'sil'):
num_words += 1
if (line[7] in ['ins', 'del', 'sub']):
num_incorrect_words += 1
if ((num_words == 0) and (num_incorrect_words > 0)):
return float('inf')
if ((num_words == 0) and (num_incorrect_words == 0)):
return 0
return (float(num_incorrect_words) / num_words) |
def load_state_ckpt(model_path, model):
checkpoint = torch.load(model_path)
model_dict = model.state_dict()
for (key, v) in checkpoint['state_dict'].items():
if (key in model_dict):
v1 = model_dict[key]
if (len(v.shape) != len(v1.shape)):
assert (v1.shape[:2] == v.shape[:2]), 'Workspace blob {} with shape {} does not match weights file shape {}'.format(key, v1.shape, v.shape)
assert (v1.shape[(- 2):] == v.shape[(- 2):]), 'Workspace blob {} with shape {} does not match weights file shape {}'.format(key, v1.shape, v.shape)
num_inflate = v1.shape[2]
checkpoint['state_dict'][key] = (torch.stack(([checkpoint['state_dict'][key]] * num_inflate), dim=2) / float(num_inflate))
assert (v1.shape == checkpoint['state_dict'][key].shape), 'Workspace blob {} with shape {} does not match weights file shape {}'.format(key, v1.shape, v.shape)
checkpoint['state_dict'] = {k: v for (k, v) in checkpoint['state_dict'].items() if (k in model_dict)}
model_dict.update(checkpoint['state_dict'])
model.load_state_dict(model_dict, strict=False)
ckpt_keys = set(checkpoint['state_dict'].keys())
own_keys = set(model.state_dict().keys())
missing_keys = (own_keys - ckpt_keys) |
class Classifier(nn.Module):
def __init__(self, embed_dim, class_num, type='linear'):
super(Classifier, self).__init__()
self.type = type
if (type == 'wn'):
self.fc = nn.utils.weight_norm(nn.Linear(embed_dim, class_num), name='weight')
self.fc.apply(init_weights)
else:
self.fc = nn.Linear(embed_dim, class_num)
self.fc.apply(init_weights)
def forward(self, x):
x = self.fc(x)
return x |
class EarlyStopMonitor():
def __init__(self, patience, mode='min'):
assert (mode in {'min', 'max'}), "`mode` must be one of 'min' or 'max'"
self.log = []
self.mode = mode
self.count = 0
self.patience = patience
def step(self, metric):
if (not self.log):
self.log.append(metric)
return False
flag = (metric > self.log[(- 1)])
if (flag == (self.mode == 'min')):
self.count += 1
else:
self.count = 0
self.log.append(metric)
return (self.count > self.patience) |
class TTSDatasetArguments():
audio_folder_path: Optional[str] = field(default=None, metadata={'help': 'The path to the directory of audios.'})
text_folder_path: Optional[str] = field(default=None, metadata={'help': 'The path to the directory of texts.'}) |
class Task():
def __init__(self, task_id, arguments, workers, status, script_url, optimized, approach, requirement, result='', q_model_path=''):
self.task_id = task_id
self.arguments = arguments
self.workers = workers
self.status = status
self.script_url = script_url
self.optimized = optimized
self.approach = approach
self.requirement = requirement
self.result = result
self.q_model_path = q_model_path |
def ibn_densenet121(**kwargs):
return get_ibndensenet(num_layers=121, model_name='ibn_densenet121', **kwargs) |
_executable('ffmpeg')
def concat_video(video_list, out_file, vcodec=None, acodec=None, log_level='info', print_cmd=False):
(_, tmp_filename) = tempfile.mkstemp(suffix='.txt', text=True)
with open(tmp_filename, 'w') as f:
for filename in video_list:
f.write(f'''file {osp.abspath(filename)}
''')
options = {'log_level': log_level}
if (vcodec is None):
options['vcodec'] = 'copy'
if (acodec is None):
options['acodec'] = 'copy'
convert_video(tmp_filename, out_file, print_cmd, pre_options='-f concat -safe 0', **options)
os.remove(tmp_filename) |
class PostProcessCocoPt(PostProcessCoco):
def __init__(self, use_inv_map, score_threshold):
super().__init__()
self.use_inv_map = use_inv_map
self.score_threshold = score_threshold
def __call__(self, results, ids, expected=None, result_dict=None):
processed_results = []
bs = len(results[0])
for idx in range(0, bs):
self.content_ids.append(ids[idx])
processed_results.append([])
detection_boxes = results[0][idx]
detection_classes = results[1][idx]
expected_classes = expected[idx][0]
scores = results[2][idx]
for detection in range(0, len(scores)):
if (scores[detection] < self.score_threshold):
break
detection_class = int(detection_classes[detection])
if (detection_class in expected_classes):
self.good += 1
box = detection_boxes[detection]
processed_results[idx].append([float(ids[idx]), box[1], box[0], box[3], box[2], scores[detection], float(detection_class)])
self.total += 1
return processed_results |
def stack(inputs, axis=1):
return Variable.from_jvalue(callZooFunc('float', 'stack', inputs, axis)) |
def write_files(data, path):
for d in DATASETS:
for p in PARTITIONS:
random.shuffle(data[d][p])
f = open(os.path.join(OUTPUT_PATH, '{}_{}.txt'.format(d, p)), 'w+')
for sample in data[d][p]:
story = ' '.join(sample[0])
task = sample[1]
line = '{}\t{}\n'.format(task, story)
f.write(line)
f.close() |
def valence(v: Graph.Vertex) -> int:
return sum((btToOrder[e.bondType] for e in v.incidentEdges)) |
_config
def il_tiny():
cfg = {'training': {'dataloader_fn_kwargs': {'data_path': '/mnt/data/expert_trajs/tiny'}, 'num_epochs': 3000}, 'saving': {'ticks_per_epoch': 1, 'log_interval': 500, 'save_interval': 100}} |
class SegformerLayer(nn.Module):
def __init__(self, config, hidden_size, num_attention_heads, drop_path, sr_ratio, mlp_ratio):
super().__init__()
self.layer_norm_1 = nn.LayerNorm(hidden_size)
self.attention = SegformerAttention(config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sr_ratio=sr_ratio)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.layer_norm_2 = nn.LayerNorm(hidden_size)
mlp_hidden_size = int((hidden_size * mlp_ratio))
self.mlp = SegformerMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size)
def forward(self, hidden_states, height, width, output_attentions=False):
self_attention_outputs = self.attention(self.layer_norm_1(hidden_states), height, width, output_attentions=output_attentions)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
attention_output = self.drop_path(attention_output)
hidden_states = (attention_output + hidden_states)
mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width)
mlp_output = self.drop_path(mlp_output)
layer_output = (mlp_output + hidden_states)
outputs = ((layer_output,) + outputs)
return outputs |
class DatasetLoader_pano(data.Dataset):
def __init__(self, cfg, split='train', resize_index=False):
self.split = split
self.resize_index = resize_index
if (split == 'train'):
self.root = (cfg['root'] + '/training')
self.resize_index = True
elif (split == 'val'):
self.root = (cfg['root'] + '/valid')
elif (split == 'test'):
self.root = (cfg['root'] + '/testing')
self.feature_type = cfg['feature_type']
self.files = os.listdir(os.path.join(self.root, file_folder_name))
self.df = pd.read_csv('eigen13_mapping_from_mpcat40.csv')
self.files = np.array(self.files)
self.envs = np.array([x.split('.')[0] for x in self.files])
self.files_gt = os.listdir(os.path.join(self.root, file_folder_gt_name))
assert (len(self.files) == len(self.files_gt))
assert (len(self.files) > 0)
self.available_idx = np.array(list(range(len(self.files))))
def __len__(self):
return len(self.available_idx)
def __getitem__(self, index):
env_index = self.available_idx[index]
file = self.files[env_index]
env = self.envs[env_index]
h5file = h5py.File(os.path.join(self.root, file_folder_name, file), 'r')
rgb = np.array(h5file['rgb'])
depth = np.array(h5file['depth'])
h5file.close()
h5file = h5py.File(os.path.join(self.root, file_folder_gt_name, file), 'r')
semmap_gt = np.array(h5file['map_semantic'])
for j in range(42):
labels = j
itemindex = np.where((semmap_gt == j))
row = itemindex[0]
column = itemindex[1]
new_label = self.df.loc[((self.df['mpcat40index'] == j), ['eigen13id'])]
new_label = np.array(new_label)[0][0].astype(np.uint8)
semmap_gt[(row, column)] = new_label
semmap_gt = (semmap_gt - 100)
h5file = h5py.File(os.path.join(self.root, 'smnet_training_data_maxHIndices'.format(self.split), file), 'r')
proj_indices = np.array(h5file['indices'])
masks_outliers = np.array(h5file['masks_outliers'])
h5file.close()
rgb_no_norm = rgb
rgb_img = rgb.astype(np.float32)
rgb_img = (rgb_img / 255.0)
rgb_img = torch.FloatTensor(rgb_img).permute(2, 0, 1)
rgb_img = normalize(rgb_img)
depth_img = depth
depth_img = depth_img.astype(np.float32)
depth_img = torch.FloatTensor(depth_img).unsqueeze(0)
depth_img = depth_normalize(depth_img)
rgb = rgb_img
depth = depth_img
proj_indices = torch.from_numpy(proj_indices).long()
masks_outliers = torch.from_numpy(masks_outliers).bool()
masks_inliers = (~ masks_outliers)
return (rgb, rgb_no_norm, masks_inliers, proj_indices, semmap_gt) |
def update(i):
global surf
surf.remove()
surf = ax.plot_surface(*(sim.grid / nm), Pt[i], cmap='viridis')
return [surf] |
def parsedoc(path, format=None):
if isinstance(path, str):
if ((format == 'pdf') or path.endswith('.pdf')):
return parsepdf(path)
if ((format == 'docx') or path.endswith('.docx')):
return parsedocx(path)
if ((format == 'html') or path.endswith(('.htm', '.html', '.xhtml'))):
return parsehtml(path)
for f in (parsepdf, parsedocx, parsehtml):
try:
return f(path)
except:
pass |
class PythonStatementGenerator(object):
def __init__(self):
self.indexVariables = {}
self.indentation = 0
def assignmentStatement(self, var, expr, replacement=None):
try:
if (var == VAR_COND):
return self.pythonExpression(expr, replacement)[0]
elif (var == VAR_RET):
return PyReturn(self.pythonExpression(expr, replacement)[0])
elif (var == VAR_OUT):
return PyPrint(self.pythonExpression(expr, replacement)[0])
try:
if ((expr.name == 'GetElement') and expr.args[0].name.startswith('iter#') and expr.args[1].name.startswith('ind#')):
self.indexVariables[(expr.args[0].name, expr.args[1].name)] = var
except NameError:
pass
except AttributeError:
pass
return self.generateAssignments(var, expr, replacement)
except NotAnLValueException as ex:
if (expr.name == 'ite'):
return self.pythonIfThenElseClause(var, expr, replacement)
else:
raise
def generateAssignments(self, var, expr, replacement=None):
pyexpr = self.pythonExpression(expr, replacement)
assignment = PyAssignment(PyVariable(var), pyexpr[0])
try:
if self.shouldEliminateLeftSide(expr):
assignment = pyexpr[0]
if (str(assignment.assigned.elseexpr) == str(var)):
assignment.assigned.elseexpr = None
except AttributeError:
pass
return self.extractAssignments(pyexpr[1], assignment)
def shouldEliminateLeftSide(self, expr):
return ((expr.name == 'AssignElement') or (expr.name == 'Delete') or ((expr.name == 'ite') and self.shouldEliminateLeftSide(expr.args[1])))
def areExpressionsEqual(self, expr1, expr2):
try:
if (expr1.name != expr2.name):
return False
return (expr1.args == expr2.args)
except TypeError:
return False
def getBoundVarName(self, bvarnumber):
if ((bvarnumber / 3) == 0):
bvarname = str(chr((120 + (bvarnumber % 3))))
else:
bvarname = (str(chr((120 + (bvarnumber % 3)))) + str((bvarnumber / 3)))
return bvarname
def pythonIfThenElseClause(self, var, expr, replacement):
ret = []
if (expr.name == 'ite'):
ret = (ret + [(PyCondition(self.pythonExpression(expr.args[0])[0]), self.indentation)])
self.indentation = (self.indentation + 1)
try:
ret = (ret + [(self.assignmentStatement(var, expr.args[1]), self.indentation, replacement)])
except NotAnLValueException as ex:
ret = (ret + [(PyAssignment(PyVariable(var), self.pythonExpression(ex.orig_val)[0]), self.indentation)])
if (replacement == None):
replacement = []
ret = (ret + [(self.assignmentStatement(var, expr.args[1], (replacement + [(ex.orig_val, var)])), self.indentation)])
self.indentation = (self.indentation - 1)
ret = (ret + [('else', self.indentation)])
self.indentation = (self.indentation + 1)
try:
ret = (ret + [(self.assignmentStatement(var, expr.args[2]), self.indentation, replacement)])
except NotAnLValueException as ex:
ret = (ret + [(PyAssignment(PyVariable(var), self.pythonExpression(ex.orig_val)[0]), self.indentation, replacement)])
if (replacement == None):
replacement = []
ret = (ret + [(self.assignmentStatement(var, expr.args[2], (replacement + [(ex.orig_val, var)])), self.indentation)])
self.indentation = (self.indentation - 1)
return PyIfThenElseClause(ret)
def pythonExpression(self, expr, replacement=None):
assignments = []
ret = None
if replacement:
try:
for repl in replacement:
if self.areExpressionsEqual(expr, repl[0]):
ret = PyVariable(repl[1])
for (arg, i) in expr.args:
if self.areExpressionsEqual(arg, repl[0]):
expr.args[i] = PyVariable(repl[1])
except TypeError:
pass
except AttributeError:
pass
if (ret == None):
try:
args_both = [self.pythonExpression(arg, replacement) for arg in expr.args]
args = []
for arg in args_both:
args.append(arg[0])
assignments = (assignments + arg[1])
if (expr.name == 'ListInit'):
ret = PyListInit(args)
elif (expr.name == 'SetInit'):
ret = PySetInit(args)
elif (expr.name == 'DictInit'):
ret = PyDictInit(args)
elif (expr.name == 'TupleInit'):
ret = PyTupleInit(args)
elif (expr.name == 'AssignElement'):
try:
ret = PyAssignment(PyGetElement(args[0], args[1]), args[2])
except NotAnLValueException as ex:
ex.orig_val = expr.args[0]
raise ex
elif (expr.name in BINARY_OPS):
ret = PyBinaryOperation(args[0], expr.name, args[1])
elif (expr.name in UNARY_OPS):
ret = PyUnaryOperation(expr.name, args[0])
elif (expr.name == 'StrAppend'):
ret = PyStrAppend(args[0], args[1])
elif (expr.name == 'ite'):
ret = PyIfThenElse(args[0], args[1], args[2])
elif (expr.name == 'GetAttr'):
ret = PyGetAttr(args[0], args[1])
elif (expr.name == 'Slice'):
ret = PySlice(args[0], args[1], args[2])
elif (expr.name == 'GetElement'):
try:
if ((args[0].name, args[1].name) in self.indexVariables):
ret = PyVariable(self.indexVariables[(args[0].name, args[1].name)])
else:
ret = PyGetElement(args[0], args[1])
except AttributeError:
try:
if (expr.args[0].name == 'pop'):
if (str(expr.args[1]) == '0'):
ret = PyFuncCall('pop#list', [])
elif (str(expr.args[1]) == '1'):
ret = PyFuncCall('pop#element', [])
else:
ret = PyGetElement(args[0], args[1])
except AttributeError:
ret = PyGetElement(args[0], args[1])
ret = PyGetElement(args[0], args[1])
elif (expr.name == 'Delete'):
ret = PyDelete(args)
elif (expr.name == 'FuncCall'):
ret = PyFuncCall(args[0], args[1:])
elif (expr.name == 'ListComp'):
ret = PyListComp(args[1], [PyComprehension([PyVariable(self.getBoundVarName(x)) for x in range(int(str(args[0])))], args[(- 2)], [args[(- 1)]])])
elif (expr.name == 'SetComp'):
ret = PySetComp(args[1], [PyComprehension([PyVariable(self.getBoundVarName(x)) for x in range(int(str(args[0])))], args[(- 2)], [args[(- 1)]])])
elif (expr.name == 'DictComp'):
ret = PyDictComp(args[1], args[2], [PyComprehension([PyVariable(self.getBoundVarName(x)) for x in range(int(str(args[0])))], args[(- 2)], [args[(- 1)]])])
elif (expr.name == 'GeneratorExp'):
ret = PyGeneratorExp(args[1], [PyComprehension([PyVariable(self.getBoundVarName(x)) for x in range(int(str(args[0])))], args[(- 2)], [args[(- 1)]])])
elif (expr.name == 'BoundVar'):
ret = PyVariable(self.getBoundVarName(int(expr.args[0].value)))
elif (expr.args != None):
try:
if ((expr.name in PRIMITIVE_FUNC) or callable(eval(expr.name))):
ret = PyFuncCall(expr.name, args)
else:
ret = PyFuncCall(PyGetAttr(self.pythonExpression(expr.args[0])[0], expr.name), args[1:])
except NameError:
ret = PyFuncCall(PyGetAttr(self.pythonExpression(expr.args[0])[0], expr.name), args[1:])
except AttributeError as ex:
try:
ret = PyConstant(expr.value)
except AttributeError:
ret = PyVariable(expr.name)
else:
pass
if expr.original:
var_ret = PyVariable(expr.original[0])
assignments.append((var_ret, ret, expr.original[1]))
ret = var_ret
previous_ids = []
ret_assignments = []
for i in range(len(assignments)):
if (assignments[i][2] not in previous_ids):
ret_assignments.append(assignments[i])
previous_ids.append(assignments[i][2])
return [ret, ret_assignments]
def extractAssignments(self, assignmentlist, assignment):
ret_assignmentlist = []
if (len(assignmentlist) > 0):
for ass in assignmentlist:
if ((str(ass[0]) != str(ass[1]).strip()) and (not str(ass[1]).strip().startswith('iter#'))):
ret_assignmentlist.append(PyAssignment(ass[0], ass[1]))
return PyAssignments((ret_assignmentlist + [assignment]))
return assignment |
def read_vec_int(file_or_fd):
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode()
if (binary == '\x00B'):
assert (fd.read(1).decode() == '\x04')
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0]
vec = np.frombuffer(fd.read((vec_size * 5)), dtype=[('size', 'int8'), ('value', 'int32')], count=vec_size)
assert (vec[0]['size'] == 4)
ans = vec[:]['value']
else:
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove('[')
arr.remove(']')
except ValueError:
pass
ans = np.array(arr, dtype=int)
if (fd is not file_or_fd):
fd.close()
return ans |
def moment_for_poly(mass, vertices, offset=(0, 0)):
verts = (Vec2d * len(vertices))
verts = verts(Vec2d(0, 0))
for (i, vertex) in enumerate(vertices):
verts[i].x = vertex[0]
verts[i].y = vertex[1]
return cp.cpMomentForPoly(mass, len(verts), verts, offset) |
_distributed
class TestAutoSeq2Seq(TestCase):
def setUp(self) -> None:
from bigdl.orca import init_orca_context
init_orca_context(cores=8, init_ray_on_spark=True)
def tearDown(self) -> None:
from bigdl.orca import stop_orca_context
stop_orca_context()
_torch
def test_fit_np(self):
auto_seq2seq = get_auto_estimator(backend='torch')
auto_seq2seq.fit(data=get_x_y(size=1000), epochs=1, batch_size=hp.choice([32, 64]), validation_data=get_x_y(size=400), n_sampling=1)
assert auto_seq2seq.get_best_model()
best_config = auto_seq2seq.get_best_config()
assert (0.1 <= best_config['dropout'] <= 0.3)
assert (best_config['batch_size'] in (32, 64))
assert (best_config['lstm_hidden_dim'] in (32, 64, 128))
assert (best_config['lstm_layer_num'] in (1, 2, 3, 4))
_tf2
def test_fit_np_keras(self):
keras_auto_s2s = get_auto_estimator(backend='keras')
keras_auto_s2s.fit(data=get_x_y(size=1000), epochs=1, batch_size=hp.choice([32, 64]), validation_data=get_x_y(size=400), n_sampling=1)
assert keras_auto_s2s.get_best_model()
best_config = keras_auto_s2s.get_best_config()
assert (0.1 <= best_config['dropout'] <= 0.3)
assert (best_config['batch_size'] in (32, 64))
assert (best_config['lstm_hidden_dim'] in (32, 64, 128))
assert (best_config['lstm_layer_num'] in (1, 2, 3, 4))
_torch
def test_fit_data_creator(self):
auto_seq2seq = get_auto_estimator()
auto_seq2seq.fit(data=train_dataloader_creator, epochs=1, batch_size=hp.choice([32, 64]), validation_data=valid_dataloader_creator, n_sampling=1)
assert auto_seq2seq.get_best_model()
best_config = auto_seq2seq.get_best_config()
assert (0.1 <= best_config['dropout'] <= 0.3)
assert (best_config['batch_size'] in (32, 64))
assert (best_config['lstm_hidden_dim'] in (32, 64, 128))
assert (best_config['lstm_layer_num'] in (1, 2, 3, 4))
_torch
def test_predict_evaluation(self):
auto_seq2seq = get_auto_estimator()
auto_seq2seq.fit(data=train_dataloader_creator(config={'batch_size': 64}), epochs=1, validation_data=valid_dataloader_creator(config={'batch_size': 64}), n_sampling=1)
(test_data_x, test_data_y) = get_x_y(size=100)
auto_seq2seq.predict(test_data_x)
auto_seq2seq.evaluate((test_data_x, test_data_y))
_torch
_inference
def test_onnx_methods(self):
auto_seq2seq = get_auto_estimator()
auto_seq2seq.fit(data=train_dataloader_creator(config={'batch_size': 64}), epochs=1, validation_data=valid_dataloader_creator(config={'batch_size': 64}), n_sampling=1)
(test_data_x, test_data_y) = get_x_y(size=100)
pred = auto_seq2seq.predict(test_data_x)
eval_res = auto_seq2seq.evaluate((test_data_x, test_data_y))
try:
import onnx
import onnxruntime
pred_onnx = auto_seq2seq.predict_with_onnx(test_data_x)
eval_res_onnx = auto_seq2seq.evaluate_with_onnx((test_data_x, test_data_y))
np.testing.assert_almost_equal(pred, pred_onnx, decimal=5)
np.testing.assert_almost_equal(eval_res, eval_res_onnx, decimal=5)
except ImportError:
pass
_torch
_inference
def test_save_load(self):
auto_seq2seq = get_auto_estimator()
auto_seq2seq.fit(data=train_dataloader_creator(config={'batch_size': 64}), epochs=1, validation_data=valid_dataloader_creator(config={'batch_size': 64}), n_sampling=1)
with tempfile.TemporaryDirectory() as tmp_dir_name:
auto_seq2seq.save(tmp_dir_name)
auto_seq2seq.load(tmp_dir_name)
(test_data_x, test_data_y) = get_x_y(size=100)
pred = auto_seq2seq.predict(test_data_x)
eval_res = auto_seq2seq.evaluate((test_data_x, test_data_y))
try:
import onnx
import onnxruntime
pred_onnx = auto_seq2seq.predict_with_onnx(test_data_x)
eval_res_onnx = auto_seq2seq.evaluate_with_onnx((test_data_x, test_data_y))
np.testing.assert_almost_equal(pred, pred_onnx, decimal=5)
np.testing.assert_almost_equal(eval_res, eval_res_onnx, decimal=5)
except ImportError:
pass
_tf2
def test_save_load_keras(self):
auto_keras_s2s = get_auto_estimator(backend='keras')
auto_keras_s2s.fit(data=get_x_y(size=1000), epochs=1, batch_size=hp.choice([32, 64]), validation_data=get_x_y(size=400), n_sampling=1)
with tempfile.TemporaryDirectory() as tmp_dir_name:
auto_keras_s2s.save(tmp_dir_name)
auto_keras_s2s.load(tmp_dir_name)
(test_data_x, test_data_y) = get_x_y(size=100)
pred = auto_keras_s2s.predict(test_data_x)
eval_res = auto_keras_s2s.evaluate((test_data_x, test_data_y)) |
def printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):
formatStr = (('{0:.' + str(decimals)) + 'f}')
percents = formatStr.format((100 * (iteration / float(total))))
filledLength = int(round(((barLength * iteration) / float(total))))
bar = (('' * filledLength) + ('-' * (barLength - filledLength)))
(sys.stdout.write(('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix))),)
if (iteration == total):
sys.stdout.write('\x1b[2K\r')
sys.stdout.flush() |
class TestProjects(unittest.TestCase):
def test_import(self):
from detectron2.projects import point_rend
_ = point_rend.add_pointrend_config
import detectron2.projects.deeplab as deeplab
_ = deeplab.add_deeplab_config |
def flatten(inputs):
return [([flatten(i) for i in inputs] if isinstance(inputs, (list, tuple)) else inputs)] |
class DataTrainingArguments():
lang: str = field(default=None, metadata={'help': 'Language id for summarization.'})
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
text_column: Optional[str] = field(default=None, metadata={'help': 'The name of the column in the datasets containing the full texts (for summarization).'})
summary_column: Optional[str] = field(default=None, metadata={'help': 'The name of the column in the datasets containing the summaries (for summarization).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a jsonlines or csv file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file).'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_source_length: Optional[int] = field(default=1024, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
max_target_length: Optional[int] = field(default=128, metadata={'help': 'The maximum total sequence length for target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
val_max_target_length: Optional[int] = field(default=None, metadata={'help': 'The maximum total sequence length for validation target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
num_beams: Optional[int] = field(default=None, metadata={'help': 'Number of beams to use for evaluation. This argument will be passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
ignore_pad_token_for_loss: bool = field(default=True, metadata={'help': 'Whether to ignore the tokens corresponding to padded labels in the loss computation or not.'})
source_prefix: Optional[str] = field(default='', metadata={'help': 'A prefix to add before every source text (useful for T5 models).'})
forced_bos_token: Optional[str] = field(default=None, metadata={'help': 'The token to force as the first generated token after the decoder_start_token_id.Useful for multilingual models like mBART where the first generated tokenneeds to be the target language token (Usually it is the target language token)'})
def __post_init__(self):
if ((self.dataset_name is None) and (self.train_file is None) and (self.validation_file is None)):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if (self.train_file is not None):
extension = self.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
if (self.validation_file is not None):
extension = self.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.'
if (self.val_max_target_length is None):
self.val_max_target_length = self.max_target_length |
class InstallHeaders(install_headers):
def run(self):
if (not self.distribution.headers):
return
for header in self.distribution.headers:
subdir = os.path.dirname(os.path.relpath(header, 'include/pybind11'))
install_dir = os.path.join(self.install_dir, subdir)
self.mkpath(install_dir)
(out, _) = self.copy_file(header, install_dir)
self.outfiles.append(out) |
def quad_double_estimated_distance(vrblvl=0):
if (vrblvl > 0):
print('in quad_double_estimated_distance ...')
phc = get_phcfun()
apar = pointer(c_int32(2))
bvrb = pointer(c_int32(0))
cdist = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> quad_double_estimated_distance calls phc', end='')
retval = phc(887, apar, bvrb, cdist, vrb)
if (vrblvl > 0):
print(', return value :', retval)
print('the estimated distance :', cdist[0])
return cdist[0] |
def test_check_parameters_min_values_bool():
x = torch.tensor([True, True, False], dtype=torch.bool)
dtypes = [torch.bool]
_check_parameter(x, 'x', min_value=0)
_check_parameter(x, 'x', min_value=(- 1.0))
assert_raises(ValueError, _check_parameter, x, 'x', min_value=1)
assert_raises(ValueError, _check_parameter, x, 'x', min_value=1000.0) |
class ModelCheckpoint_Stat(Callback):
def __init__(self, filepath, filepath_static, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='max', period=1, patience=None, validation_data=()):
super(ModelCheckpoint_Stat, self).__init__()
self.interval = period
(self.cubes, self.answs, self.preprocess_input, self.shape_size, self.batch_size) = validation_data
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.filepath_static = filepath_static
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
self.monitor_op = np.greater
self.best = (- np.Inf)
self.epochs_from_best_model = 0
self.patience = patience
def on_epoch_end(self, epoch, logs=None):
logs = (logs or {})
self.epochs_since_last_save += 1
if (self.epochs_since_last_save >= self.period):
self.epochs_since_last_save = 0
self.model.save(self.filepath_static, overwrite=True)
(score, _) = validate_files(self.model, self.preprocess_input, self.cubes, self.answs, self.shape_size, self.batch_size, verbose=False)
logs['score'] = score
filepath = self.filepath.format(epoch=(epoch + 1), **logs)
if (score > self.best):
self.epochs_from_best_model = 0
else:
self.epochs_from_best_model += 1
if self.save_best_only:
current = score
if (current is None):
warnings.warn(('Can save best model only with %s available, skipping.' % self.monitor), RuntimeWarning)
elif self.monitor_op(current, self.best):
if (self.verbose > 0):
print(('\nEpoch %05d: %s improved from %0.5f to %0.5f, saving model to %s' % ((epoch + 1), self.monitor, self.best, current, filepath)))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
shutil.copy(filepath, self.filepath_static)
elif (self.verbose > 0):
print(('\nEpoch %05d: %s did not improve' % ((epoch + 1), self.monitor)))
else:
if (self.verbose > 0):
print(('\nEpoch %05d: saving model to %s' % ((epoch + 1), filepath)))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
shutil.copy(filepath, self.filepath_static)
if (self.patience is not None):
if (self.epochs_from_best_model > self.patience):
print('Early stopping: {}'.format(self.epochs_from_best_model))
self.model.stop_training = True |
def write_obj_with_colors_texture(obj_name, vertices, colors, triangles, texture, uv_coords):
if (obj_name.split('.')[(- 1)] != 'obj'):
obj_name = (obj_name + '.obj')
mtl_name = obj_name.replace('.obj', '.mtl')
texture_name = obj_name.replace('.obj', '_texture.png')
triangles = triangles.copy()
triangles += 1
with open(obj_name, 'w') as f:
s = 'mtllib {}\n'.format(os.path.abspath(mtl_name))
f.write(s)
for i in range(vertices.shape[0]):
s = 'v {} {} {} {} {} {}\n'.format(vertices[(i, 0)], vertices[(i, 1)], vertices[(i, 2)], colors[(i, 0)], colors[(i, 1)], colors[(i, 2)])
f.write(s)
for i in range(uv_coords.shape[0]):
s = 'vt {} {}\n'.format(uv_coords[(i, 0)], (1 - uv_coords[(i, 1)]))
f.write(s)
f.write('usemtl FaceTexture\n')
for i in range(triangles.shape[0]):
s = 'f {}/{} {}/{} {}/{}\n'.format(triangles[(i, 2)], triangles[(i, 2)], triangles[(i, 1)], triangles[(i, 1)], triangles[(i, 0)], triangles[(i, 0)])
f.write(s)
with open(mtl_name, 'w') as f:
f.write('newmtl FaceTexture\n')
s = 'map_Kd {}\n'.format(os.path.abspath(texture_name))
f.write(s)
imsave(texture_name, texture) |
def indice_maxpool_backward(features, out_features, out_bp, indice_pairs, indice_pair_num):
if ((features.dtype == torch.float32) or (features.dtype == torch.half)):
return ext_module.indice_maxpool_backward(features, out_features, out_bp, indice_pairs, indice_pair_num)
else:
raise NotImplementedError |
class ThreeInterpolate(Function):
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
if (not (open3d.core.cuda.device_count() > 0)):
raise NotImplementedError
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
ctx.three_interpolate_for_backward = (idx, weight, features.size()[2])
output = three_interpolate(features, idx, weight)
return output
def backward(ctx, grad_out: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
if (not (open3d.core.cuda.device_count() > 0)):
raise NotImplementedError
(idx, weight, m) = ctx.three_interpolate_for_backward
grad_out_data = grad_out.data.contiguous()
grad_features = three_interpolate_grad(grad_out_data, idx, weight, m)
return (grad_features, None, None) |
def print_diagnostics():
print('System')
print(platform.platform())
os.system('cat /etc/lsb-release')
print(sys.version)
print('Python')
print(sys.version)
print(sys.version_info)
print('Pytorch')
try:
import torch
print(torch.__version__)
print(f'torch.cuda.is_available(): {torch.cuda.is_available()}')
except ImportError:
print('torch not installed')
print('NVIDIA-SMI')
os.system('which nvidia-smi')
for (k, v) in parse_nvidia_smi().items():
if ('version' in k.lower()):
print(k, v)
print('NVCC')
os.system('which nvcc')
os.system('nvcc --version')
print('CC')
CC = 'c++'
if (('CC' in os.environ) or ('CXX' in os.environ)):
if ('CXX' in os.environ):
os.environ['CC'] = os.environ['CXX']
CC = os.environ['CXX']
else:
CC = os.environ['CC']
print(f'CC={CC}')
os.system(f'which {CC}')
os.system(f'{CC} --version') |
def get_params_for_weight_decay_optimization(module, config):
weight_decay_params = {'params': []}
no_weight_decay_params = {'params': [], 'weight_decay': 0.0}
blacklist_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for module_ in module.modules():
if (isinstance(module_, blacklist_modules) or (config.weight_decay == 0.0)):
no_weight_decay_params['params'].extend([p for p in list(module_._parameters.values()) if ((p is not None) and p.requires_grad)])
else:
for (n, p) in list(module_._parameters.items()):
if ((p is not None) and p.requires_grad):
if (n != 'bias'):
weight_decay_params['params'].append(p)
else:
no_weight_decay_params['params'].append(p)
param_dict = {pn: p for (pn, p) in module.named_parameters() if ((p is not None) and p.requires_grad)}
assert ((len(no_weight_decay_params['params']) + len(weight_decay_params['params'])) == len(param_dict.keys())), 'Number of params in both groups != total number of trainable params'
if (config.weight_decay == 0.0):
return [no_weight_decay_params]
return [weight_decay_params, no_weight_decay_params] |
class TrustedFirstParty():
NAME = 'TFP'
def generate_additive_triple(size0, size1, op, device=None, *args, **kwargs):
a = generate_random_ring_element(size0, device=device)
b = generate_random_ring_element(size1, device=device)
c = getattr(torch, op)(a, b, *args, **kwargs)
a = ArithmeticSharedTensor(a, precision=0, src=0)
b = ArithmeticSharedTensor(b, precision=0, src=0)
c = ArithmeticSharedTensor(c, precision=0, src=0)
return (a, b, c)
def square(size, device=None):
r = generate_random_ring_element(size, device=device)
r2 = r.mul(r)
stacked = torch_stack([r, r2])
stacked = ArithmeticSharedTensor(stacked, precision=0, src=0)
return (stacked[0], stacked[1])
def generate_binary_triple(size0, size1, device=None):
a = generate_kbit_random_tensor(size0, device=device)
b = generate_kbit_random_tensor(size1, device=device)
c = (a & b)
a = BinarySharedTensor(a, src=0)
b = BinarySharedTensor(b, src=0)
c = BinarySharedTensor(c, src=0)
return (a, b, c)
def wrap_rng(size, device=None):
num_parties = comm.get().get_world_size()
r = [generate_random_ring_element(size, device=device) for _ in range(num_parties)]
theta_r = count_wraps(r)
shares = comm.get().scatter(r, src=0)
r = ArithmeticSharedTensor.from_shares(shares, precision=0)
theta_r = ArithmeticSharedTensor(theta_r, precision=0, src=0)
return (r, theta_r)
def B2A_rng(size, device=None):
r = generate_kbit_random_tensor(size, bitlength=1, device=device)
rA = ArithmeticSharedTensor(r, precision=0, src=0)
rB = BinarySharedTensor(r, src=0)
return (rA, rB)
def rand(*sizes, device=None):
samples = torch.rand(*sizes, device=device)
return ArithmeticSharedTensor(samples, src=0) |
def test_statcast_chunking() -> None:
result = statcast('2019-05-01', '2019-05-15').reset_index(drop=True)
assert (result is not None)
assert (not result.empty)
day_results = []
start_date = date(2019, 5, 1)
for day in range(15):
day_results.append(statcast(str((start_date + timedelta(days=day)))))
day_results_dataframe = pd.concat(day_results, axis=0).convert_dtypes(convert_string=False)
day_results_dataframe = day_results_dataframe.sort_values(['game_date', 'game_pk', 'at_bat_number', 'pitch_number'], ascending=False).reset_index(drop=True)
assert (list(result.columns) == list(day_results_dataframe.columns))
assert (len(result) == len(day_results_dataframe)) |
class SourceHandler(ScorerHandler):
def get(self):
instance_id = int(self.get_argument('instance_id'))
segment_size = None
if ('segment_size' in self.request.arguments):
string = self.get_argument('segment_size')
if (len(string) > 0):
segment_size = int(string)
r = json.dumps(self.scorer.send_src(int(instance_id), segment_size))
self.write(r) |
class TakeKey(gym.ObservationWrapper):
def __init__(self, env, take_key):
super(TakeKey, self).__init__(env)
self._take_key = take_key
assert (take_key in self.observation_space.spaces)
self.observation_space = self.env.observation_space[take_key]
def observation(self, observation):
observation = copy.copy(observation)
taken_observation = observation.pop(self._take_key)
self._ignored_observations = observation
return taken_observation |
def integrate_rgb_frames_for_fragment(color_files, depth_files, fragment_id, n_fragments, pose_graph_name, intrinsic, config):
pose_graph = o3d.io.read_pose_graph(pose_graph_name)
volume = o3d.pipelines.integration.ScalableTSDFVolume(voxel_length=(config['tsdf_cubic_size'] / 512.0), sdf_trunc=0.04, color_type=o3d.pipelines.integration.TSDFVolumeColorType.RGB8)
for i in range(len(pose_graph.nodes)):
i_abs = ((fragment_id * config['n_frames_per_fragment']) + i)
print(('Fragment %03d / %03d :: integrate rgbd frame %d (%d of %d).' % (fragment_id, (n_fragments - 1), i_abs, (i + 1), len(pose_graph.nodes))))
rgbd = read_rgbd_image(color_files[i_abs], depth_files[i_abs], False, config)
pose = pose_graph.nodes[i].pose
volume.integrate(rgbd, intrinsic, np.linalg.inv(pose))
mesh = volume.extract_triangle_mesh()
mesh.compute_vertex_normals()
return mesh |
class AdaptiveResNet(nn.Module):
def __init__(self, ch_cfg, block, layers, num_classes=1000, input_size=224):
super(AdaptiveResNet, self).__init__()
channels = np.load(os.path.join(ch_cfg, 'sample.npy'), allow_pickle=True).item()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, channels['conv1'], kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(channels['conv1'])
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], 1, channels['layer1'])
self.layer2 = self._make_layer(block, 128, layers[1], 2, channels['layer2'])
self.layer3 = self._make_layer(block, 256, layers[2], 2, channels['layer3'])
self.layer4 = self._make_layer(block, 512, layers[3], 2, channels['layer4'])
self.avgpool = nn.AvgPool2d((input_size // 32), stride=1)
self.fc = nn.Linear(channels['fc'], num_classes)
self._init_weights()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride, bottleneck_settings):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
(in_ch, _) = bottleneck_settings['0']['conv1']
if ('conv3' in bottleneck_settings['0'].keys()):
(_, out_ch) = bottleneck_settings['0']['conv3']
else:
(_, out_ch) = bottleneck_settings['0']['conv2']
downsample = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(out_ch))
layers = [block(bottleneck_settings['0'], stride, downsample)]
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(bottleneck_settings[str(i)]))
return nn.Sequential(*layers) |
_module()
class BFP(nn.Module):
def __init__(self, in_channels, num_levels, refine_level=2, refine_type=None, conv_cfg=None, norm_cfg=None):
super(BFP, self).__init__()
assert (refine_type in [None, 'conv', 'non_local'])
self.in_channels = in_channels
self.num_levels = num_levels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.refine_level = refine_level
self.refine_type = refine_type
assert (0 <= self.refine_level < self.num_levels)
if (self.refine_type == 'conv'):
self.refine = ConvModule(self.in_channels, self.in_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
elif (self.refine_type == 'non_local'):
self.refine = NonLocal2D(self.in_channels, reduction=1, use_scale=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self, inputs):
assert (len(inputs) == self.num_levels)
feats = []
gather_size = inputs[self.refine_level].size()[2:]
for i in range(self.num_levels):
if (i < self.refine_level):
gathered = F.adaptive_max_pool2d(inputs[i], output_size=gather_size)
else:
gathered = F.interpolate(inputs[i], size=gather_size, mode='nearest')
feats.append(gathered)
bsf = (sum(feats) / len(feats))
if (self.refine_type is not None):
bsf = self.refine(bsf)
outs = []
for i in range(self.num_levels):
out_size = inputs[i].size()[2:]
if (i < self.refine_level):
residual = F.interpolate(bsf, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
outs.append((residual + inputs[i]))
return tuple(outs) |
class AEPNN(Algorithm):
def __init__(self, ae, sympnet, lam=1, recurrent=1):
super(AEPNN, self).__init__()
self.ae = ae
self.sympnet = sympnet
self.lam = lam
self.recurrent = recurrent
self.dim = ae.encoder_size[0]
def criterion(self, X, y):
(X_latent, y_latent) = (self.ae.encode(X), self.ae.encode(y))
X_latent_step = X_latent
for i in range(self.recurrent):
X_latent_step = self.sympnet(X_latent_step)
symp_loss = torch.nn.MSELoss()(X_latent_step, y_latent)
ae_loss = (torch.nn.MSELoss()(self.ae.decode(X_latent), X) + torch.nn.MSELoss()(self.ae.decode(y_latent), y))
return (symp_loss + (self.lam * ae_loss))
def predict(self, x, steps=1, keepinitx=False, returnnp=False):
x = self._to_tensor(x)
size = len(x.size())
pred = [self.ae.encode(x)]
for _ in range(steps):
pred.append(self.sympnet(pred[(- 1)]))
pred = list(map(self.ae.decode, pred))
if keepinitx:
steps = (steps + 1)
else:
pred = pred[1:]
res = torch.cat(pred, dim=(- 1))
if (steps > 1):
res = res.view([(- 1), steps, self.dim][(2 - size):])
return (res.cpu().detach().numpy() if returnnp else res) |
class ModelNet40(Dataset):
def __init__(self, num_points, partition='train'):
(self.data, self.label) = load_data(partition)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
return (pointcloud, label)
def __len__(self):
return self.data.shape[0] |
def array_from_imgdir(directory: Path, crop_size: int=256, grayscale: bool=False, num_samples: int=None, num_workers: int=1) -> np.ndarray:
paths = []
for path in directory.iterdir():
if (path.suffix.lower() == '.png'):
paths.append(path)
if ((num_samples is not None) and (len(paths) == num_samples)):
break
if (num_samples and (len(paths) < num_samples)):
warnings.warn(f'Found only {len(paths)} images instead of {num_samples}.')
if grayscale:
def loader(path):
return np.array(Image.open(path).convert('L'))
else:
def loader(path):
return np.array(Image.open(path))
if (num_workers == 1):
array = np.array(list(map(loader, paths)))
else:
array = np.array(Parallel(n_jobs=num_workers)((delayed(loader)(path) for path in paths)))
if (array.shape[1:] != (crop_size, crop_size)):
print(f'Cropping from {array.shape[1:]} to {(crop_size, crop_size)}.')
array = center_crop(array, size=crop_size)
array = ((array / 127.5) - 1)
return array |
_start_docstrings('\n CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.\n for Named-Entity-Recognition (NER) tasks.\n ', CAMEMBERT_START_DOCSTRING)
class CamembertForTokenClassification(RobertaForTokenClassification):
config_class = CamembertConfig |
def save_cfg_file(file_path, source=__C):
source = source.copy()
masked_keys = ['DATASET_PATH', 'ROOT_DIR']
for key in masked_keys:
if (key in source):
del source[key]
delattr(source, key)
with open(file_path, 'w') as f:
logging.info(('Save YAML config file to %s' % file_path))
ordered_dump(source, f, yaml.SafeDumper, default_flow_style=None) |
class CarsEncodeTransforms(TransformsConfig):
def __init__(self, opts):
super(CarsEncodeTransforms, self).__init__(opts)
def get_transforms(self):
transforms_dict = {'transform_gt_train': transforms.Compose([transforms.Resize((192, 256)), transforms.RandomHorizontalFlip(0.5), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), 'transform_source': None, 'transform_test': transforms.Compose([transforms.Resize((192, 256)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), 'transform_inference': transforms.Compose([transforms.Resize((192, 256)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])}
return transforms_dict |
class Ipecac():
def __init__(self, set_, dig_, eles_):
self.set = set_
self.dig = dig_
self.eles = eles_
def ReverseAtomwiseEmbedding(self, emb_, atoms_, guess_, GdDistMatrix):
natoms = emb_.shape[0]
if (atoms_ == None):
atoms = np.full(natoms, 6)
else:
atoms = atoms_
coords = guess_
objective = (lambda crds: self.EmbAtomwiseErr(Mol(atoms, crds.reshape(natoms, 3)), emb_))
if 1:
def callbk(x_):
mn = Mol(atoms, x_.reshape(natoms, 3))
mn.BuildDistanceMatrix()
print('Distance error : ', np.sqrt(np.sum(((GdDistMatrix - mn.DistMatrix) * (GdDistMatrix - mn.DistMatrix)))))
import scipy.optimize
step = 0
res = optimize.minimize(objective, coords.reshape((natoms * 3)), method='L-BFGS-B', tol=1e-12, options={'maxiter': 5000000, 'maxfun': }, callback=callbk)
mfit = Mol(atoms, res.x.reshape(natoms, 3))
self.DistanceErr(GdDistMatrix, mfit)
return mfit
def BruteForceAtoms(self, mol_, emb_):
print('Searching for best atom fit')
bestmol = copy.deepcopy(mol_)
besterr = 100.0
for stoich in itertools.product([1, 6, 7, 8], repeat=len(mol_.atoms)):
tmpmol = Mol(np.array(stoich), mol_.coords)
tmperr = self.EmbAtomwiseErr(tmpmol, emb_)
if (tmperr < besterr):
bestmol = copy.deepcopy(tmpmol)
besterr = tmperr
print(besterr)
print(bestmol.atoms)
return bestmol.atoms
def EmbAtomwiseErr(self, mol_, emb_):
ins = self.dig.Emb(mol_, MakeOutputs=False)
err = np.sqrt(np.sum(((ins - emb_) * (ins - emb_))))
return err
def DistanceErr(self, GdDistMatrix_, mol_):
mol_.BuildDistanceMatrix()
print('Final Distance error : ', np.sqrt(np.sum(((GdDistMatrix_ - mol_.DistMatrix) * (GdDistMatrix_ - mol_.DistMatrix))))) |
def check_save_model_path():
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if (not os.path.exists(model_dirname)):
os.makedirs(model_dirname) |
class BigBirdForCausalLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def head_conv3x3(in_c, out_c, stride=1, norm=nn.InstanceNorm2d):
return nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=3, stride=stride, padding=1, bias=False), norm(out_c), nn.LeakyReLU(0.1, inplace=True)) |
class TestActorCritic(unittest.TestCase):
def test_discounted_cumsum(self):
discount = 0.99
bootstrap = 5.0
dones = np.array([0, 0, 0])
rewards = np.array([1.0, 1.0, 1.0])
discounts = (discount * (1 - dones))
rewards = np.append(rewards, bootstrap)
result = rvr.agents.A2C.discounted_cumsum(rewards, discounts)
expected = [7.821595, 6.8905, 5.95, 5.0]
self.assertAlmostEqual(result.tolist(), expected)
def test_discounted_cumsum_terminals(self):
discount = 0.99
bootstrap = 5.0
dones = np.array([0, 1, 0])
rewards = np.array([1.0, 1.0, 1.0])
discounts = (discount * (1 - dones))
rewards = np.append(rewards, bootstrap)
result = rvr.agents.A2C.discounted_cumsum(rewards, discounts)
expected = [1.99, 1.0, 5.95, 5.0]
self.assertAlmostEqual(result.tolist(), expected) |
def extract_frames_from_video_path(video_path, target_fps=3, num_frames=3, multi_thread_decode=False, sampling_strategy='rand', safeguard_duration=False):
with open(video_path, 'rb') as f:
input_bytes = f.read()
in_mem_bytes_io = io.BytesIO(input_bytes)
frames = extract_frames_from_video_binary(in_mem_bytes_io, target_fps=target_fps, num_frames=num_frames, multi_thread_decode=multi_thread_decode, sampling_strategy=sampling_strategy, safeguard_duration=safeguard_duration)
return frames |
def join_model_name(amr):
while True:
span = None
if (len(amr.tokens) < 2):
break
for i in range((len(amr.tokens) - 1)):
(x, y) = amr.tokens[i:(i + 2)]
if (x.isalpha() and x.isupper() and re.search('^-\\d+$', y)):
span = list(range(i, (i + 2)))
joined_tokens = ''.join([x, y])
if (joined_tokens in 'K-12'):
continue
break
else:
break
amr.replace_span(span, [joined_tokens], ['NNP'], ['ENTITY']) |
class G():
output_dir = None
output_file = None
first_row = True
log_headers = []
log_current_row = {} |
def register_extension(cls, fcreate=None):
if issubclass(cls, _NDArrayBase):
assert (fcreate is not None)
assert hasattr(cls, '_array_type_code')
_reg_ndarray(cls, fcreate)
else:
assert hasattr(cls, '_tvm_tcode')
if (fcreate and (cls._tvm_tcode < TypeCode.EXT_BEGIN)):
raise ValueError('Cannot register create when extension tcode is same as buildin')
_reg_extension(cls, fcreate)
return cls |
class Generator(nn.Module):
def __init__(self, ngpu, nc=3, ndf=160, ngf=160, nz=100):
super(Generator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(nn.ConvTranspose2d(nz, (ngf * 8), 4, 1, 0, bias=False), nn.BatchNorm2d((ngf * 8)), nn.ReLU(True), nn.ConvTranspose2d((ngf * 8), (ngf * 4), 4, 2, 1, bias=False), nn.BatchNorm2d((ngf * 4)), nn.ReLU(True), nn.ConvTranspose2d((ngf * 4), (ngf * 2), 4, 2, 1, bias=False), nn.BatchNorm2d((ngf * 2)), nn.ReLU(True), nn.ConvTranspose2d((ngf * 2), ngf, 4, 3, 3, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), nn.ConvTranspose2d(ngf, nc, 4, 4, 6, bias=False), nn.Tanh())
def forward(self, input):
return self.main(input) |
def lang_pair_dataset(lengths: Sequence[int]) -> LanguagePairDataset:
tokens = [([i] * l) for (i, l) in enumerate(lengths)]
return LanguagePairDataset(ListDataset(tokens), lengths, mock_dict()) |
def build_rpn(cfg):
if cfg.MODEL.RETINANET_ON:
return build_retinanet(cfg)
return RPNModule(cfg) |
def create_selfloop_edges(num_nodes):
edges = []
for i in range(0, num_nodes):
edges.append((int(i), int(i)))
return edges |
class VehiclePrediction(PythonMsg):
t: float = field(default=None)
x: array.array = field(default=None)
y: array.array = field(default=None)
v: array.array = field(default=None)
v_x: array.array = field(default=None)
v_y: array.array = field(default=None)
a_x: array.array = field(default=None)
a_y: array.array = field(default=None)
psi: array.array = field(default=None)
psidot: array.array = field(default=None)
v_long: array.array = field(default=None)
v_tran: array.array = field(default=None)
a_long: array.array = field(default=None)
a_tran: array.array = field(default=None)
e_psi: array.array = field(default=None)
s: array.array = field(default=None)
x_tran: array.array = field(default=None)
u_a: array.array = field(default=None)
u_steer: array.array = field(default=None)
lap_num: int = field(default=None)
local_state_covariance: array.array = field(default=None)
global_state_covariance: array.array = field(default=None)
def update_body_velocity_from_global(self):
self.v_long = (np.multiply(self.v_x, np.cos(self.psi)) + np.multiply(self.v_y, np.sin(self.psi))).tolist()
self.v_tran = ((- np.multiply(self.v_x, np.sin(self.psi))) + np.multiply(self.v_y, np.cos(self.psi))).tolist()
self.a_long = (np.multiply(self.a_x, np.cos(self.psi)) + np.multiply(self.a_y, np.sin(self.psi))).tolist()
self.a_tran = ((- np.multiply(self.a_y, np.sin(self.psi))) + np.multiply(self.a_y, np.cos(self.psi))).tolist()
def update_global_velocity_from_body(self):
self.v_x = (np.multiply(self.v_long, np.cos(self.psi)) - np.multiply(self.v_tran, np.sin(self.psi))).tolist()
self.v_y = (np.multiply(self.v_long, np.sin(self.psi)) + np.multiply(self.v_tran, np.cos(self.psi))).tolist()
self.a_x = (np.multiply(self.a_long, np.cos(self.psi)) - np.multiply(self.a_tran, np.sin(self.psi))).tolist()
self.a_y = (np.multiply(self.a_long, np.sin(self.psi)) + np.multiply(self.a_tran, np.cos(self.psi))).tolist() |
def init_spark_on_yarn_cluster(hadoop_conf, conda_name, num_executors, executor_cores, executor_memory='2g', driver_cores=4, driver_memory='2g', extra_executor_memory_for_ray=None, extra_python_lib=None, penv_archive=None, additional_archive=None, hadoop_user_name=None, spark_yarn_archive=None, spark_log_level='WARN', redirect_spark_log=True, jars=None, conf=None, py_files=None):
if (os.environ.get('OnAppMaster', 'False') == 'True'):
sc = init_internal_nncontext()
return sc
else:
from bigdl.dllib.utils.spark import SparkRunner
runner = SparkRunner(spark_log_level=spark_log_level, redirect_spark_log=redirect_spark_log)
return_value = runner.init_spark_on_yarn_cluster(hadoop_conf=hadoop_conf, conda_name=conda_name, num_executors=num_executors, executor_cores=executor_cores, executor_memory=executor_memory, driver_cores=driver_cores, driver_memory=driver_memory, extra_executor_memory_for_ray=extra_executor_memory_for_ray, extra_python_lib=extra_python_lib, penv_archive=penv_archive, additional_archive=additional_archive, hadoop_user_name=hadoop_user_name, spark_yarn_archive=spark_yarn_archive, jars=jars, conf=conf, py_files=py_files)
sys.exit(return_value) |
def load_model_config_from_hf(model_id: str):
assert has_hf_hub(True)
cached_file = _download_from_hf(model_id, 'config.json')
default_cfg = load_cfg_from_json(cached_file)
default_cfg['hf_hub'] = model_id
model_name = default_cfg.get('architecture')
return (default_cfg, model_name) |
class NaiveSyncBatchNorm(BatchNorm2d):
def forward(self, input):
if ((comm.get_world_size() == 1) or (not self.training)):
return super().forward(input)
assert (input.shape[0] > 0), 'SyncBatchNorm does not support empty input'
C = input.shape[1]
mean = torch.mean(input, dim=[0, 2, 3])
meansqr = torch.mean((input * input), dim=[0, 2, 3])
vec = torch.cat([mean, meansqr], dim=0)
vec = (AllReduce.apply(vec) * (1.0 / dist.get_world_size()))
(mean, meansqr) = torch.split(vec, C)
var = (meansqr - (mean * mean))
self.running_mean += (self.momentum * (mean.detach() - self.running_mean))
self.running_var += (self.momentum * (var.detach() - self.running_var))
invstd = torch.rsqrt((var + self.eps))
scale = (self.weight * invstd)
bias = (self.bias - (mean * scale))
scale = scale.reshape(1, (- 1), 1, 1)
bias = bias.reshape(1, (- 1), 1, 1)
return ((input * scale) + bias) |
class ResBlocks(nn.Module):
def __init__(self, num_blocks, input_nc, output_nc=None, hidden_nc=None, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), learnable_shortcut=False, use_spect=False, use_coord=False):
super(ResBlocks, self).__init__()
hidden_nc = (input_nc if (hidden_nc is None) else hidden_nc)
output_nc = (input_nc if (output_nc is None) else output_nc)
self.model = []
if (num_blocks == 1):
self.model += [ResBlock(input_nc, output_nc, hidden_nc, norm_layer, nonlinearity, learnable_shortcut, use_spect, use_coord)]
else:
self.model += [ResBlock(input_nc, hidden_nc, hidden_nc, norm_layer, nonlinearity, learnable_shortcut, use_spect, use_coord)]
for i in range((num_blocks - 2)):
self.model += [ResBlock(hidden_nc, hidden_nc, hidden_nc, norm_layer, nonlinearity, learnable_shortcut, use_spect, use_coord)]
self.model += [ResBlock(hidden_nc, output_nc, hidden_nc, norm_layer, nonlinearity, learnable_shortcut, use_spect, use_coord)]
self.model = nn.Sequential(*self.model)
def forward(self, inputs):
return self.model(inputs) |
def test_invalid_response_connection():
n = Network([_TestAgent2('A'), _TestAgent2('B'), _TestAgent2('C')], BatchResolver())
n.add_connection('A', 'B')
n.send('A', 'B', Request(0.0))
with pytest.raises(NetworkError):
n.resolve({aid: n.context_for(aid, EnvView(0, 0.0)) for aid in n.agents}) |
def load_flattened_documents(data_dir: str, docids: Set[str]) -> Dict[(str, List[str])]:
unflattened_docs = load_documents(data_dir, docids)
flattened_docs = dict()
for (doc, unflattened) in unflattened_docs.items():
flattened_docs[doc] = list(chain.from_iterable(unflattened))
return flattened_docs |
class TestYOLOXHead(TestCase):
def test_init_weights(self):
head = YOLOXHead(num_classes=4, in_channels=1, stacked_convs=1, use_depthwise=False)
head.init_weights()
bias_init = bias_init_with_prob(0.01)
for (conv_cls, conv_obj) in zip(head.multi_level_conv_cls, head.multi_level_conv_obj):
assert_allclose(conv_cls.bias.data, (torch.ones_like(conv_cls.bias.data) * bias_init))
assert_allclose(conv_obj.bias.data, (torch.ones_like(conv_obj.bias.data) * bias_init))
def test_predict_by_feat(self):
s = 256
img_metas = [{'img_shape': (s, s, 3), 'scale_factor': (1.0, 1.0)}]
test_cfg = Config(dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
head = YOLOXHead(num_classes=4, in_channels=1, stacked_convs=1, use_depthwise=False, test_cfg=test_cfg)
feat = [torch.rand(1, 1, (s // feat_size), (s // feat_size)) for feat_size in [4, 8, 16]]
(cls_scores, bbox_preds, objectnesses) = head.forward(feat)
head.predict_by_feat(cls_scores, bbox_preds, objectnesses, img_metas, cfg=test_cfg, rescale=True, with_nms=True)
head.predict_by_feat(cls_scores, bbox_preds, objectnesses, img_metas, cfg=test_cfg, rescale=False, with_nms=False)
def test_loss_by_feat(self):
s = 256
img_metas = [{'img_shape': (s, s, 3), 'scale_factor': 1}]
train_cfg = Config(dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5, candidate_topk=10, iou_weight=3.0, cls_weight=1.0)))
head = YOLOXHead(num_classes=4, in_channels=1, stacked_convs=1, use_depthwise=False, train_cfg=train_cfg)
assert (not head.use_l1)
assert isinstance(head.multi_level_cls_convs[0][0], ConvModule)
feat = [torch.rand(1, 1, (s // feat_size), (s // feat_size)) for feat_size in [4, 8, 16]]
(cls_scores, bbox_preds, objectnesses) = head.forward(feat)
gt_instances = InstanceData(bboxes=torch.empty((0, 4)), labels=torch.LongTensor([]))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses, [gt_instances], img_metas)
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(empty_cls_loss.item(), 0, 'there should be no cls loss when there are no true boxes')
self.assertEqual(empty_box_loss.item(), 0, 'there should be no box loss when there are no true boxes')
self.assertGreater(empty_obj_loss.item(), 0, 'objectness loss should be non-zero')
head = YOLOXHead(num_classes=4, in_channels=1, stacked_convs=1, use_depthwise=True, train_cfg=train_cfg)
assert isinstance(head.multi_level_cls_convs[0][0], DepthwiseSeparableConvModule)
head.use_l1 = True
gt_instances = InstanceData(bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]), labels=torch.LongTensor([2]))
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses, [gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
onegt_l1_loss = one_gt_losses['loss_l1'].sum()
self.assertGreater(onegt_cls_loss.item(), 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0, 'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0, 'obj loss should be non-zero')
self.assertGreater(onegt_l1_loss.item(), 0, 'l1 loss should be non-zero')
gt_instances = InstanceData(bboxes=torch.Tensor([[(s * 4), (s * 4), ((s * 4) + 10), ((s * 4) + 10)]]), labels=torch.LongTensor([2]))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses, [gt_instances], img_metas)
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(empty_cls_loss.item(), 0, 'there should be no cls loss when gt_bboxes out of bound')
self.assertEqual(empty_box_loss.item(), 0, 'there should be no box loss when gt_bboxes out of bound')
self.assertGreater(empty_obj_loss.item(), 0, 'objectness loss should be non-zero') |
def test_digits_sqrt_greedi_ln_sparse():
model = FeatureBasedSelection(100, 'sqrt', optimizer='greedi', optimizer_kwds={'optimizer1': 'lazy', 'optimizer2': 'naive'}, random_state=0)
model.fit(X_digits_sparse)
assert_array_equal(model.ranking, digits_sqrt_greedi_ranking)
assert_array_almost_equal(model.gains, digits_sqrt_greedi_gains, 4)
assert_array_almost_equal(model.subset, X_digits_sparse[model.ranking].toarray()) |
class Generator3(nn.Module):
def __init__(self):
super(Generator3, self).__init__()
image_size = 28
latent_dim = 100
output_channels = 1
self.init_size = (image_size // 4)
self.l1 = nn.Sequential(nn.Linear(latent_dim, (128 * (self.init_size ** 2))))
self.label_embedding = nn.Embedding(10, 10)
self.l2 = nn.Linear(10, (self.init_size * self.init_size))
self.conv_blocks = nn.Sequential(nn.BatchNorm2d(128), nn.Upsample(scale_factor=2), nn.Conv2d(128, 128, 3, stride=1, padding=1), nn.BatchNorm2d(128, 0.8), nn.LeakyReLU(0.2, inplace=True), nn.Upsample(scale_factor=2), nn.Conv2d(128, 64, 3, stride=1, padding=1), nn.BatchNorm2d(64, 0.8), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64, output_channels, 3, stride=1, padding=1), nn.Tanh())
def forward(self, z, labels):
out = self.l1(z)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
li = self.label_embedding(labels)
li = self.l2(li)
li = li.view(labels.shape[0], 1, self.init_size, self.init_size)
(out[0] + li)
img = self.conv_blocks(out)
return img |
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, gpu_ids=[], use_parallel=True, learn_residual=False):
netG = None
use_gpu = (len(gpu_ids) > 0)
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert torch.cuda.is_available()
if (which_model_netG == 'resnet_9blocks'):
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids, use_parallel=use_parallel, learn_residual=learn_residual)
elif (which_model_netG == 'resnet_0blocks'):
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=0, gpu_ids=gpu_ids, use_parallel=use_parallel, learn_residual=learn_residual)
elif (which_model_netG == 'resnet_1blocks'):
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=1, gpu_ids=gpu_ids, use_parallel=use_parallel, learn_residual=learn_residual)
elif (which_model_netG == 'resnet_6blocks'):
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, gpu_ids=gpu_ids, use_parallel=use_parallel, learn_residual=learn_residual)
elif (which_model_netG == 'resnet_16blocks'):
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=16, gpu_ids=gpu_ids, use_parallel=use_parallel, learn_residual=learn_residual)
elif (which_model_netG == 'unet_128'):
netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids, use_parallel=use_parallel, learn_residual=learn_residual)
elif (which_model_netG == 'unet_256'):
netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids, use_parallel=use_parallel, learn_residual=learn_residual)
else:
raise NotImplementedError(('Generator model name [%s] is not recognized' % which_model_netG))
if (len(gpu_ids) > 0):
netG.cuda(gpu_ids[0])
netG.apply(weights_init)
return netG |
def apply_augmentations(augmentations: List[Union[(Transform, Augmentation)]], inputs):
if isinstance(inputs, np.ndarray):
image_only = True
inputs = AugInput(inputs)
else:
image_only = False
tfms = inputs.apply_augmentations(augmentations)
return ((inputs.image if image_only else inputs), tfms) |
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
self.semi_test_cfg = dict(predict_on='teacher')
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs) |
def single_gpu_test(model, data_loader):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for data in data_loader:
with torch.no_grad():
result = model(return_loss=False, **data)
results.extend(result)
batch_size = len(result)
for _ in range(batch_size):
prog_bar.update()
return results |
class CmsPfSingleElectron(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version('1.6.0')
RELEASE_NOTES = {'1.0.0': 'Initial release.', '1.1.0': 'Initial release.', '1.2.0': '12_1_0_pre3 generation, add corrected energy, cluster flags, 20k events', '1.4.0': 'Add gen jet index information', '1.5.0': 'Without padding', '1.5.1': 'Remove outlier caps', '1.6.0': 'Regenerate with ARRAY_RECORD'}
MANUAL_DOWNLOAD_INSTRUCTIONS = '\n rsync -r --progress lxplus.cern.ch:/eos/user/j/jpata/mlpf/tensorflow_datasets/cms/cms_pf_single_electron ~/tensorflow_datasets/\n '
def __init__(self, *args, **kwargs):
kwargs['file_format'] = tfds.core.FileFormat.ARRAY_RECORD
super(CmsPfSingleElectron, self).__init__(*args, **kwargs)
def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({'X': tfds.features.Tensor(shape=(None, len(X_FEATURES)), dtype=tf.float32), 'ygen': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32), 'ycand': tfds.features.Tensor(shape=(None, len(Y_FEATURES)), dtype=tf.float32)}), supervised_keys=('X', 'ycand'), homepage='', citation=_CITATION, metadata=tfds.core.MetadataDict(x_features=X_FEATURES, y_features=Y_FEATURES))
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
path = dl_manager.manual_dir
sample_dir = 'SingleElectronFlatPt1To1000_pythia8_cfi'
return cms_utils.split_sample(((path / sample_dir) / 'raw'))
def _generate_examples(self, files):
return cms_utils.generate_examples(files) |
def preprocess(tokenizer, config, example, max_seq_length):
prompt = example['context']
target = example['target']
prompt_ids = tokenizer.encode(prompt, max_length=max_seq_length, truncation=True)
target_ids = tokenizer.encode(target, max_length=max_seq_length, truncation=True, add_special_tokens=False)
input_ids = ((prompt_ids + target_ids) + [config.eos_token_id])
return {'input_ids': input_ids, 'seq_len': len(prompt_ids)} |
class _module():
def __init__(self, receiver, buffer_size):
self._source_wires = _create_interface_source()
self._interconnect_wires = _create_interface_interconnect()
self._source = _create_source(receiver, self._source_wires, self._interconnect_wires)
self._interconnect = _create_interconnect(buffer_size, self._source_wires, self._interconnect_wires)
def start(self):
self._interconnect.start()
self._source.start()
def stop(self):
self._source.stop()
self._source.join()
self._interconnect.stop()
self._interconnect.join()
def attach_sink(self, sink_wires):
return self._interconnect.attach_sink(sink_wires)
def get_interface(self):
return self._interconnect_wires |
def run_model_with_conf(flags, args, model_name, model_conf):
target_abi = 'host'
dev = device.HostDevice('host', target_abi)
install_dir = ('/tmp/micro_run/' + model_name)
if (ModelKeys.check_tensors in model_conf):
model_conf[ModelKeys.output_tensors] = model_conf[ModelKeys.check_tensors]
model_conf[ModelKeys.output_shapes] = model_conf[ModelKeys.check_shapes]
model_args = {'model_name': model_name, 'input_node': ','.join(model_conf[ModelKeys.input_tensors]), 'input_shape': join_2d_array(model_conf[ModelKeys.input_shapes]), 'output_node': ','.join(model_conf[ModelKeys.output_tensors]), 'output_shape': join_2d_array(model_conf[ModelKeys.output_shapes]), 'input_data_format': ','.join([df.name for df in model_conf[ModelKeys.input_data_formats]]), 'output_data_format': ','.join([df.name for df in model_conf[ModelKeys.output_data_formats]])}
opts = ([('--%s=%s' % (arg_key, arg_val)) for (arg_key, arg_val) in model_args.items()] + args)
tmp_dir_name = tempfile.mkdtemp()
input_file_prefix = ((tmp_dir_name + '/') + model_name)
if (ModelKeys.validation_inputs_data in model_conf):
input_tensor = model_conf[ModelKeys.input_tensors]
input_data = model_conf[ModelKeys.validation_inputs_data]
mace_check((len(input_tensor) == len(input_data)), 'len(input_tensor) != len(validate_data')
for i in range(len(input_tensor)):
util.download_or_get_file(model_conf[ModelKeys.validation_inputs_data][i], '', util.formatted_file_name(input_file_prefix, input_tensor[i]))
else:
generate_input_data(input_file_prefix, model_conf[ModelKeys.input_tensors], model_conf[ModelKeys.input_shapes], model_conf[ModelKeys.input_ranges], model_conf[ModelKeys.input_data_types])
dev.install(Target(tmp_dir_name), (install_dir + '/validate_in'))
target_input_file = ('%s/validate_in/%s' % (install_dir, model_name))
target_output_dir = ('%s/validate_out' % install_dir)
dev.mkdir(target_output_dir)
target_output_file = ((target_output_dir + '/') + model_name)
opts += [('--input_file=%s' % target_input_file), ('--output_file=%s' % target_output_file)]
envs = []
if (flags.vlog_level > 0):
envs += [('MACE_CPP_MIN_VLOG_LEVEL=%s' % flags.vlog_level)]
target = Target('build/micro/host/tools/micro_run_static', [], opts=opts, envs=envs)
run_target.run_target(target_abi, install_dir, target, dev)
if flags.validate:
validate_model_file = util.download_or_get_model(model_conf[ModelKeys.model_file_path], model_conf[ModelKeys.model_sha256_checksum], tmp_dir_name)
validate_weight_file = ''
if (ModelKeys.weight_file_path in model_conf):
validate_weight_file = util.download_or_get_model(model_conf[ModelKeys.weight_file_path], model_conf[ModelKeys.weight_sha256_checksum], tmp_dir_name)
dev.pull(Target(target_output_dir), (tmp_dir_name + '/validate_out'))
output_file_prefix = ((tmp_dir_name + '/validate_out/') + model_name)
validate.validate(model_conf[ModelKeys.platform], validate_model_file, validate_weight_file, input_file_prefix, output_file_prefix, model_conf[ModelKeys.input_shapes], model_conf[ModelKeys.output_shapes], model_conf[ModelKeys.input_data_formats], model_conf[ModelKeys.output_data_formats], model_conf[ModelKeys.input_tensors], model_conf[ModelKeys.output_tensors], flags.validate_threshold, model_conf[ModelKeys.input_data_types], flags.backend, '', '')
shutil.rmtree(tmp_dir_name) |
_task('multilingual_translation')
class MultilingualTranslationTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', metavar='DIR', help='path to data directory')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS', help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language (only needed for inference)')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language (only needed for inference)')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left (default: True)')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left (default: False)')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset')
parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'], metavar='SRCTGT', help='replace beginning-of-sentence in source sentence with source or target language token. (src/tgt)')
parser.add_argument('--decoder-langtok', action='store_true', help='replace beginning-of-sentence in target sentence with target language token')
def __init__(self, args, dicts, training):
super().__init__(args)
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)]
self.eval_lang_pairs = self.lang_pairs
self.model_lang_pairs = self.lang_pairs
self.langs = list(dicts.keys())
def setup_task(cls, args, **kwargs):
(dicts, training) = cls.prepare(args, **kwargs)
return cls(args, dicts, training)
def prepare(cls, args, **kargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
if (args.lang_pairs is None):
raise ValueError('--lang-pairs is required. List all the language pairs in the training objective.')
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(',')
sorted_langs = sorted(list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')}))
if ((args.source_lang is not None) or (args.target_lang is not None)):
training = False
else:
training = True
dicts = OrderedDict()
for lang in sorted_langs:
paths = args.data.split(os.pathsep)
assert (len(paths) > 0)
dicts[lang] = Dictionary.load(os.path.join(paths[0], 'dict.{}.txt'.format(lang)))
if (len(dicts) > 0):
assert (dicts[lang].pad() == dicts[sorted_langs[0]].pad())
assert (dicts[lang].eos() == dicts[sorted_langs[0]].eos())
assert (dicts[lang].unk() == dicts[sorted_langs[0]].unk())
if ((args.encoder_langtok is not None) or args.decoder_langtok):
for lang_to_add in sorted_langs:
dicts[lang].add_symbol(_lang_token(lang_to_add))
logger.info('[{}] dictionary: {} types'.format(lang, len(dicts[lang])))
return (dicts, training)
def get_encoder_langtok(self, src_lang, tgt_lang):
if (self.args.encoder_langtok is None):
return self.dicts[src_lang].eos()
if (self.args.encoder_langtok == 'src'):
return _lang_token_index(self.dicts[src_lang], src_lang)
else:
return _lang_token_index(self.dicts[src_lang], tgt_lang)
def get_decoder_langtok(self, tgt_lang):
if (not self.args.decoder_langtok):
return self.dicts[tgt_lang].eos()
return _lang_token_index(self.dicts[tgt_lang], tgt_lang)
def alter_dataset_langtok(self, lang_pair_dataset, src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None):
if ((self.args.encoder_langtok is None) and (not self.args.decoder_langtok)):
return lang_pair_dataset
new_src_eos = None
if ((self.args.encoder_langtok is not None) and (src_eos is not None) and (src_lang is not None) and (tgt_lang is not None)):
new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang)
else:
src_eos = None
new_tgt_bos = None
if (self.args.decoder_langtok and (tgt_eos is not None) and (tgt_lang is not None)):
new_tgt_bos = self.get_decoder_langtok(tgt_lang)
else:
tgt_eos = None
return TransformEosLangPairDataset(lang_pair_dataset, src_eos=src_eos, new_src_eos=new_src_eos, tgt_bos=tgt_eos, new_tgt_bos=new_tgt_bos)
def load_dataset(self, split, epoch=0, **kwargs):
paths = self.args.data.split(os.pathsep)
assert (len(paths) > 0)
data_path = paths[(epoch % len(paths))]
def language_pair_dataset(lang_pair):
(src, tgt) = lang_pair.split('-')
langpair_dataset = load_langpair_dataset(data_path, split, src, self.dicts[src], tgt, self.dicts[tgt], combine=True, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions)
return self.alter_dataset_langtok(langpair_dataset, src_eos=self.dicts[src].eos(), src_lang=src, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt)
self.datasets[split] = RoundRobinZipDatasets(OrderedDict([(lang_pair, language_pair_dataset(lang_pair)) for lang_pair in self.lang_pairs]), eval_key=(None if self.training else ('%s-%s' % (self.args.source_lang, self.args.target_lang))))
def build_dataset_for_inference(self, src_tokens, src_lengths):
lang_pair = ('%s-%s' % (self.args.source_lang, self.args.target_lang))
return RoundRobinZipDatasets(OrderedDict([(lang_pair, self.alter_dataset_langtok(LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary), src_eos=self.source_dictionary.eos(), src_lang=self.args.source_lang, tgt_eos=self.target_dictionary.eos(), tgt_lang=self.args.target_lang))]), eval_key=lang_pair)
def build_model(self, args):
def check_args():
messages = []
if (len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs)) != 0):
messages.append('--lang-pairs should include all the language pairs {}.'.format(args.lang_pairs))
if (self.args.encoder_langtok != args.encoder_langtok):
messages.append('--encoder-langtok should be {}.'.format(args.encoder_langtok))
if (self.args.decoder_langtok != args.decoder_langtok):
messages.append('--decoder-langtok should {} be set.'.format(('' if args.decoder_langtok else 'not')))
if (len(messages) > 0):
raise ValueError(' '.join(messages))
check_args()
from fairseq import models
model = models.build_model(args, self)
if (not isinstance(model, FairseqMultiModel)):
raise ValueError('MultilingualTranslationTask requires a FairseqMultiModel architecture')
return model
def train_step(self, sample, model, criterion, optimizer, ignore_grad=False):
model.train()
from collections import defaultdict
(agg_loss, agg_sample_size, agg_logging_output) = (0.0, 0.0, defaultdict(float))
for lang_pair in self.model_lang_pairs:
if ((sample[lang_pair] is None) or (len(sample[lang_pair]) == 0)):
continue
(loss, sample_size, logging_output) = criterion(model.models[lang_pair], sample[lang_pair])
if ignore_grad:
loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f'{lang_pair}:{k}'] += logging_output[k]
return (agg_loss, agg_sample_size, agg_logging_output)
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
from collections import defaultdict
(agg_loss, agg_sample_size, agg_logging_output) = (0.0, 0.0, defaultdict(float))
for lang_pair in self.eval_lang_pairs:
if ((lang_pair not in sample) or (sample[lang_pair] is None) or (len(sample[lang_pair]) == 0)):
continue
(loss, sample_size, logging_output) = criterion(model.models[lang_pair], sample[lang_pair])
agg_loss += loss.data.item()
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f'{lang_pair}:{k}'] += logging_output[k]
return (agg_loss, agg_sample_size, agg_logging_output)
def inference_step(self, generator, models, sample, prefix_tokens=None):
with torch.no_grad():
return generator.generate(models, sample, prefix_tokens=prefix_tokens, bos_token=(_lang_token_index(self.target_dictionary, self.args.target_lang) if self.args.decoder_langtok else self.target_dictionary.eos()))
def reduce_metrics(self, logging_outputs, criterion):
with metrics.aggregate():
super().reduce_metrics(logging_outputs, criterion)
for k in ['sample_size', 'nsentences', 'ntokens']:
metrics.log_scalar(k, sum((l[k] for l in logging_outputs)))
def source_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.source_lang]
def target_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.target_lang]
def max_positions(self):
if (len(self.datasets.values()) == 0):
return {('%s-%s' % (self.args.source_lang, self.args.target_lang)): (self.args.max_source_positions, self.args.max_target_positions)}
return OrderedDict([(key, (self.args.max_source_positions, self.args.max_target_positions)) for split in self.datasets.keys() for key in self.datasets[split].datasets.keys()]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.