code stringlengths 101 5.91M |
|---|
def read_data(filename):
with codecs.open(filename, 'r', 'utf-8') as inp:
lines = inp.readlines()
inputs = []
outputs = []
tags = []
for line in lines:
line = line.strip().split('\t')
if line:
inputs.append(list(line[0].strip()))
outputs.append(list(line[1].strip()))
tags.append(line[2].strip().split(';'))
return (inputs, outputs, tags) |
def process_file_karate(file):
g = nx.read_edgelist(file)
gcc = sorted(nx.connected_components(g), key=len, reverse=True)
g = g.subgraph(gcc[0])
g = nx.convert_node_labels_to_integers(g)
return g |
_torch
_vision
class DeiTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = (DeiTFeatureExtractor if is_vision_available() else None)
def setUp(self):
self.feature_extract_tester = DeiTFeatureExtractionTester(self)
def feat_extract_dict(self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_properties(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feature_extractor, 'do_resize'))
self.assertTrue(hasattr(feature_extractor, 'size'))
self.assertTrue(hasattr(feature_extractor, 'do_center_crop'))
self.assertTrue(hasattr(feature_extractor, 'center_crop'))
self.assertTrue(hasattr(feature_extractor, 'do_normalize'))
self.assertTrue(hasattr(feature_extractor, 'image_mean'))
self.assertTrue(hasattr(feature_extractor, 'image_std'))
def test_batch_feature(self):
pass
def test_call_pil(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
def test_call_numpy(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
def test_call_pytorch(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size)) |
def get_interesting_values(dataset: LAMLDataset, per_top_categories: float) -> Dict:
unique_values_th = 10
categorical_features = get_columns_by_role(dataset, 'Category')
cat_interesting_values = dict()
df = dataset.data
for feature_name in categorical_features:
df_stat = df[feature_name].value_counts()
df_stat_len = len(df_stat)
if (df_stat_len > unique_values_th):
n_top_categories = round(((df_stat_len * per_top_categories) * 0.001))
else:
n_top_categories = df_stat_len
cat_interesting_values[feature_name] = df_stat.index[:n_top_categories].tolist()
return cat_interesting_values |
class model_inference(object):
def __init__(self, parser):
(args, _) = parser.parse_known_args()
if (args.postprocess_type == 'topx'):
from eval.postprocess_and_score_calc.topx import score_Parser
elif (args.postprocess_type == 'coco_mAP'):
from eval.postprocess_and_score_calc.coco_mAP import score_Parser
else:
print('postprocess_type error')
exit(1)
new_parser = argparse.ArgumentParser(parents=[parser, score_Parser().parser], conflict_handler='resolve')
args = new_parser.parse_args()
if args.model_file.endswith('.mlir'):
engine = mlir_inference(args)
elif (args.model_file.endswith('.bmodel') or args.model_file.endswith('.cvimodel')):
engine = bmodel_inference(args)
elif args.model_file.endswith('.onnx'):
parser = get_preprocess_parser(existed_parser=new_parser)
parser.add_argument('--input_shapes', type=str, help='input_shapes')
args = parser.parse_args()
engine = onnx_inference(args)
else:
print('model_file:{}, ext_name error'.format(args.model_file))
exit(1)
self.engine = engine
def run(self, idx, img_path, target=None):
self.engine.run(idx, img_path, target)
def get_result(self):
self.engine.get_result() |
class TFViTAttention(keras.layers.Layer):
def __init__(self, config: ConfigDict, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFViTSelfAttention(config, name='attention')
self.dense_output = TFViTSelfOutput(config, name='output')
def call(self, input_tensor: tf.Tensor, head_mask: tf.Tensor=None, output_attentions: bool=False, training: bool=False) -> Tuple[tf.Tensor]:
self_outputs = self.self_attention(hidden_states=input_tensor, head_mask=head_mask, output_attentions=output_attentions, training=training)
attention_output = self.dense_output(hidden_states=(self_outputs[0] if output_attentions else self_outputs), training=training)
if output_attentions:
outputs = ((attention_output,) + self_outputs[1:])
return outputs |
class DataLoading():
def __init__(self):
self.train_csv = 'preprocess/avspeech_train.csv'
def load_ids(from_id, to_id, split='train'):
train_csv = 'preprocess/avspeech_train.csv'
print('Loading IDs ')
data = pd.read_csv(train_csv, header=None, names=['id', 'start', 'end', 'x', 'y'])
ids = set([])
for i in range(from_id, (to_id + 1)):
if (not os.path.isfile((((('preprocess/' + split) + '/spectrograms/') + data.loc[(i, 'id')]) + '.pkl'))):
continue
elif (not os.path.isfile((((('preprocess/' + split) + '/embeddings/') + data.loc[(i, 'id')]) + '.pkl'))):
continue
else:
ids.add(data.loc[(i, 'id')])
print('Total ', len(ids), split, ' IDs Loaded !! ')
return list(ids)
def split_data(_ids, split=[0.8, 0.1, 0.1]):
data = np.array(_ids)
(valid, test) = (int((split[1] * len(_ids))), int((split[2] * len(_ids))))
train = (len(_ids) - (valid + test))
(train_split, valid_split, test_split) = (data[0:train], data[train:(train + valid)], data[(train + valid):])
print('Total ', 'train:', train, 'valid:', valid, 'test:', test, ' IDs Loaded !! ')
return (train_split, valid_split, test_split)
def load_data(_ids, split='train'):
x_data = np.zeros((len(_ids), 598, 257, 2))
y_data = np.zeros((len(_ids), 4096))
for i in range(len(_ids)):
with open((((('preprocess/' + split) + '/spectrograms/') + _ids[i]) + '.pkl'), 'rb') as f:
x_data[i] = pickle.load(f)
with open((((('preprocess/' + split) + '/embeddings/') + _ids[i]) + '.pkl'), 'rb') as f:
y_data[i] = pickle.load(f)
return (x_data, y_data)
def load_Y_data(_ids, split='train'):
y_data = np.zeros((len(_ids), 4096))
for i in range(len(_ids)):
with open((((('preprocess/' + split) + '/embeddings/') + _ids[i]) + '.pkl'), 'rb') as f:
y_data[i] = pickle.load(f)
return y_data
def load_X_data(_ids, split='train'):
x_data = np.zeros((len(_ids), 598, 257, 2))
for i in range(len(_ids)):
with open((((('preprocess/' + split) + '/spectrograms/') + _ids[i]) + '.pkl'), 'rb') as f:
x_data[i] = pickle.load(f)
return x_data |
def _worker_init(g, id):
if (singleton_pool.n_parallel > 1):
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
g.worker_id = id |
def mkdirs_ss(path):
if (not os.path.exists(path)):
os.makedirs(path)
print('folder created: {}'.format(path)) |
def r_ifelse(t):
(cond, stmt1, stmt2) = (t[2], t[5], t[9])
def fn(k, n):
stmt1_out = stmt1(k, n)
stmt2_out = stmt2(k, n)
if (stmt1_out == stmt2_out):
return stmt1_out
cond_out = cond(k, n)
if (cond_out[0] == 'not'):
else_cond = (['if'] + cond_out[1:])
else:
else_cond = (['if', 'not'] + cond_out)
return ((((['if'] + cond_out) + stmt1_out) + else_cond) + stmt2_out)
return [('ifelse_stmt', fn)] |
def dump_entity_linking_for_training(split, keep=10):
surface_index = surface_index_memory.EntitySurfaceIndexMemory('entity_linker/data/entity_list_file_freebase_complete_all_mention', 'entity_linker/data/surface_map_file_freebase_complete_all_mention', 'entity_linker/data/freebase_complete_all_mention')
entity_linker = BertEntityLinker(surface_index, model_path='/BERT_NER/trained_ner_model/', device='cuda:0')
sanity_checking = get_all_entity_candidates(entity_linker, 'the music video stronger was directed by whom')
print('RUNNING Sanity Checking on untterance')
print('\t', 'the music video stronger was directed by whom')
print('Checking result', sanity_checking[0][:2])
print('Checking result should successfully link stronger to some nodes in Freebase (MIDs)')
print('If checking result does not look good please check if the linker has been set up successfully')
datafile = f'outputs/grailqa_v1.0_{split}.json'
with open(datafile) as f:
data = json.load(f)
el_results = {}
for ex in tqdm(data, total=len(data)):
query = ex['question']
qid = str(ex['qid'])
all_candidates = get_all_entity_candidates(entity_linker, query)
all_candidates = [x[:keep] for x in all_candidates]
el_results[qid] = all_candidates
with open(f'outputs/grail_{split}_entities.json', 'w') as f:
json.dump(el_results, f) |
def partseg_seq_combined_categories(arch_str='64_128_256_256', batchnorm=True, skip_str=(), bilateral_nbr=1, conv_weight_filler='xavier', bltr_weight_filler='gauss_0.001', dataset='shapenet', dataset_params=None, sample_size=3000, batch_size=32, feat_dims_str='x_y_z', lattice_dims_str=None, renorm_class=False, deploy=False, create_prototxt=True, save_path=None):
n = caffe.NetSpec()
arch_str = [((v[0], int(v[1:])) if (v[0] in {'b', 'c'}) else ('c', int(v))) for v in arch_str.split('_')]
num_bltr_layers = sum(((v[0] == 'b') for v in arch_str))
if (num_bltr_layers > 0):
if (type(lattice_dims_str) == str):
lattice_dims_str = ((lattice_dims_str,) * num_bltr_layers)
elif (len(lattice_dims_str) == 1):
lattice_dims_str = (lattice_dims_str * num_bltr_layers)
else:
assert (len(lattice_dims_str) == num_bltr_layers), '{} lattices should be provided'.format(num_bltr_layers)
feat_dims = parse_channel_scale(feat_dims_str, channel_str=True)[0]
lattice_dims = [parse_channel_scale(s, channel_str=True)[0] for s in lattice_dims_str]
input_dims_w_dup = (feat_dims + reduce((lambda x, y: (x + y)), lattice_dims))
input_dims = reduce((lambda x, y: (x if (y in x) else (x + [y]))), input_dims_w_dup, [])
feat_dims_str = map_channel_scale(feat_dims_str, input_dims)
lattice_dims_str = [map_channel_scale(s, input_dims) for s in lattice_dims_str]
input_dims_str = '_'.join(input_dims)
else:
feat_dims = parse_channel_scale(feat_dims_str, channel_str=True)[0]
input_dims = feat_dims
feat_dims_str = map_channel_scale(feat_dims_str, input_dims)
input_dims_str = '_'.join(input_dims)
if (dataset == 'shapenet'):
from splatnet.configs import SN_NUM_PART_CATEGORIES
nclass = sum(SN_NUM_PART_CATEGORIES)
dataset_params_new = ({} if (not dataset_params) else dataset_params)
dataset_params = dict(subset_train='train', subset_test='val')
dataset_params.update(dataset_params_new)
dataset_params['feat_dims'] = input_dims_str
dataset_params['sample_size'] = sample_size
dataset_params['batch_size'] = batch_size
dataset_params['output_mask'] = renorm_class
for v in {'jitter_xyz', 'jitter_rotation', 'jitter_stretch'}:
if (v in dataset_params):
dataset_params[v] = float(dataset_params[v])
for v in {'sample_size', 'batch_size'}:
if (v in dataset_params):
dataset_params[v] = int(dataset_params[v])
for v in {'output_mask'}:
if (v in dataset_params):
dataset_params[v] = bool(dataset_params[v])
dataset_params_train = dataset_params.copy()
dataset_params_train['subset'] = dataset_params['subset_train']
del dataset_params_train['subset_train'], dataset_params_train['subset_test']
dataset_params_test = dataset_params.copy()
dataset_params_test['subset'] = dataset_params['subset_test']
dataset_params_test['jitter_xyz'] = 0.0
dataset_params_test['jitter_stretch'] = 0.0
dataset_params_test['jitter_rotation'] = 0.0
del dataset_params_test['subset_train'], dataset_params_test['subset_test']
datalayer_train = L.Python(name='data', include=dict(phase=caffe.TRAIN), ntop=(3 if renorm_class else 2), python_param=dict(module='dataset_shapenet', layer='InputShapenetAllCategories', param_str=repr(dataset_params_train)))
datalayer_test = L.Python(name='data', include=dict(phase=caffe.TEST), ntop=0, top=(['data', 'label', 'label_mask'] if renorm_class else ['data', 'label']), python_param=dict(module='dataset_shapenet', layer='InputShapenetAllCategories', param_str=repr(dataset_params_test)))
else:
raise ValueError('Dataset {} unknown'.format(dataset))
if deploy:
n.data = L.Input(shape=dict(dim=[1, len(input_dims), 1, sample_size]))
if renorm_class:
n.label_mask = L.Input(shape=dict(dim=[1, nclass, 1, 1]))
else:
if renorm_class:
(n.data, n.label, n.label_mask) = datalayer_train
else:
(n.data, n.label) = datalayer_train
n.test_data = datalayer_test
n.data_feat = L.Python(n.data, python_param=dict(module='custom_layers', layer='PickAndScale', param_str=feat_dims_str))
top_prev = n.data_feat
if (conv_weight_filler in {'xavier', 'msra'}):
conv_weight_filler = dict(type=conv_weight_filler)
elif conv_weight_filler.startswith('gauss_'):
conv_weight_filler = dict(type='gaussian', std=float(conv_weight_filler.split('_')[1]))
else:
conv_weight_filler = eval(conv_weight_filler)
assert bltr_weight_filler.startswith('gauss_')
bltr_weight_filler = dict(type='gaussian', std=float(bltr_weight_filler.split('_')[1]))
idx = 1
bltr_idx = 0
lattices = dict()
last_in_block = dict()
for (layer_type, n_out) in arch_str:
if (layer_type == 'c'):
n[('conv' + str(idx))] = L.Convolution(top_prev, convolution_param=dict(num_output=n_out, kernel_size=1, stride=1, pad=0, weight_filler=conv_weight_filler, bias_filler=dict(type='constant', value=0)), param=[dict(lr_mult=1), dict(lr_mult=0.1)])
elif (layer_type == 'b'):
lattice_dims_str_curr = lattice_dims_str[bltr_idx]
if (lattice_dims_str_curr in lattices):
(top_data_lattice, top_lattice) = lattices[lattice_dims_str_curr]
n[('conv' + str(idx))] = L.Permutohedral(top_prev, top_data_lattice, top_data_lattice, top_lattice, permutohedral_param=dict(num_output=n_out, group=1, neighborhood_size=bilateral_nbr, bias_term=True, norm_type=P.Permutohedral.AFTER, offset_type=P.Permutohedral.NONE, filter_filler=bltr_weight_filler, bias_filler=dict(type='constant', value=0)), param=[{'lr_mult': 1, 'decay_mult': 1}, {'lr_mult': 2, 'decay_mult': 0}])
else:
top_data_lattice = L.Python(n.data, python_param=dict(module='custom_layers', layer='PickAndScale', param_str=lattice_dims_str_curr))
n[('data_lattice' + str(len(lattices)))] = top_data_lattice
if (lattice_dims_str.count(lattice_dims_str_curr) > 1):
(n[('conv' + str(idx))], top_lattice) = L.Permutohedral(top_prev, top_data_lattice, top_data_lattice, ntop=2, permutohedral_param=dict(num_output=n_out, group=1, neighborhood_size=bilateral_nbr, bias_term=True, norm_type=P.Permutohedral.AFTER, offset_type=P.Permutohedral.NONE, filter_filler=bltr_weight_filler, bias_filler=dict(type='constant', value=0)), param=[{'lr_mult': 1, 'decay_mult': 1}, {'lr_mult': 2, 'decay_mult': 0}])
n[('lattice' + str(len(lattices)))] = top_lattice
else:
n[('conv' + str(idx))] = L.Permutohedral(top_prev, top_data_lattice, top_data_lattice, permutohedral_param=dict(num_output=n_out, group=1, neighborhood_size=bilateral_nbr, bias_term=True, norm_type=P.Permutohedral.AFTER, offset_type=P.Permutohedral.NONE, filter_filler=bltr_weight_filler, bias_filler=dict(type='constant', value=0)), param=[{'lr_mult': 1, 'decay_mult': 1}, {'lr_mult': 2, 'decay_mult': 0}])
top_lattice = None
lattices[lattice_dims_str_curr] = (top_data_lattice, top_lattice)
bltr_idx += 1
top_prev = n[('conv' + str(idx))]
if batchnorm:
n[('bn' + str(idx))] = L.BatchNorm(top_prev)
top_prev = n[('bn' + str(idx))]
n[('relu' + str(idx))] = L.ReLU(top_prev, in_place=True)
top_prev = n[('relu' + str(idx))]
if (skip_str is None):
skip_str = ()
skip_tos = [v.split('_')[0] for v in skip_str]
if (str(idx) in skip_tos):
skip_idxs = list(filter((lambda i: (skip_tos[i] == str(idx))), range(len(skip_tos))))
skip_params = [skip_str[i].split('_') for i in skip_idxs]
if (len(skip_params[0]) == 2):
assert all(((len(v) == 2) for v in skip_params))
else:
assert all(((v[2] == skip_params[0][2]) for v in skip_params))
if ((len(skip_params[0]) > 2) and ('g' in skip_params[0][2])):
n[('gpool' + str(idx))] = L.Python(top_prev, python_param=dict(module='custom_layers', layer='GlobalPooling'))
top_prev = n[('gpool' + str(idx))]
if ((len(skip_params[0]) > 2) and ('a' in skip_params[0][2])):
n[('add' + str(idx))] = L.Eltwise(top_prev, *[last_in_block[int(v[1])] for v in skip_params], eltwise_param=dict(operation=P.Eltwise.SUM))
top_prev = n[('add' + str(idx))]
else:
n[('concat' + str(idx))] = L.Concat(top_prev, *[last_in_block[int(v[1])] for v in skip_params])
top_prev = n[('concat' + str(idx))]
last_in_block[idx] = top_prev
idx += 1
n[('conv' + str(idx))] = L.Convolution(top_prev, convolution_param=dict(num_output=nclass, kernel_size=1, stride=1, pad=0, weight_filler=conv_weight_filler, bias_filler=dict(type='constant', value=0)), param=[dict(lr_mult=1), dict(lr_mult=0.1)])
top_prev = n[('conv' + str(idx))]
if renorm_class:
if deploy:
n.prob = L.Softmax(top_prev)
else:
n.prob_raw = L.Softmax(top_prev)
n.prob = L.Python(n.prob_raw, n.label_mask, python_param=dict(module='custom_layers', layer='ProbRenorm'))
n.loss = L.Python(n.prob, n.label, python_param=dict(module='custom_layers', layer='LogLoss'), loss_weight=1)
n.accuracy = L.Accuracy(n.prob, n.label)
elif deploy:
n.prob = L.Softmax(top_prev)
else:
n.loss = L.SoftmaxWithLoss(top_prev, n.label)
n.accuracy = L.Accuracy(top_prev, n.label)
net = n.to_proto()
if create_prototxt:
net = get_prototxt(net, save_path)
return net |
('/')
def index():
module_version = request.args.get('module_version')
exec(f'import urllib{module_version}')
return 'Hello World!' |
def extract_roi(detections, class_id, img_bbox, min_size, patch_size):
roi_candidates = []
for (did, detection) in enumerate(detections):
if (int(detection[0]) != class_id):
continue
obj_bbox = tuple(map(int, detection[(- 4):]))
pat_bbox = (((obj_bbox[0] + ((obj_bbox[2] - obj_bbox[0]) // 2)) - (patch_size[1] // 2)), ((obj_bbox[1] + ((obj_bbox[3] - obj_bbox[1]) // 2)) - (patch_size[0] // 2)), ((obj_bbox[0] + ((obj_bbox[2] - obj_bbox[0]) // 2)) + (patch_size[1] // 2)), ((obj_bbox[1] + ((obj_bbox[3] - obj_bbox[1]) // 2)) + (patch_size[0] // 2)))
if ((bb_size(obj_bbox) >= min_size) and bb_inside(pat_bbox, img_bbox)):
roi_candidates.append((float(detection[1]), obj_bbox, pat_bbox, did))
roi_candidates = sorted(roi_candidates, key=(lambda x: (- x[0])))
rois = []
for roi_candidate in roi_candidates:
if (not np.any([bb_overlap(roi_candidate[2], roi[2]) for roi in rois])):
rois.append(roi_candidate)
return rois |
_module()
class CoarseMaskHead(FCNMaskHead):
def __init__(self, num_convs=0, num_fcs=2, fc_out_channels=1024, downsample_factor=2, init_cfg=dict(type='Xavier', override=[dict(name='fcs'), dict(type='Constant', val=0.001, name='fc_logits')]), *arg, **kwarg):
super(CoarseMaskHead, self).__init__(*arg, num_convs=num_convs, upsample_cfg=dict(type=None), init_cfg=None, **kwarg)
self.init_cfg = init_cfg
self.num_fcs = num_fcs
assert (self.num_fcs > 0)
self.fc_out_channels = fc_out_channels
self.downsample_factor = downsample_factor
assert (self.downsample_factor >= 1)
delattr(self, 'conv_logits')
if (downsample_factor > 1):
downsample_in_channels = (self.conv_out_channels if (self.num_convs > 0) else self.in_channels)
self.downsample_conv = ConvModule(downsample_in_channels, self.conv_out_channels, kernel_size=downsample_factor, stride=downsample_factor, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
else:
self.downsample_conv = None
self.output_size = ((self.roi_feat_size[0] // downsample_factor), (self.roi_feat_size[1] // downsample_factor))
self.output_area = (self.output_size[0] * self.output_size[1])
last_layer_dim = (self.conv_out_channels * self.output_area)
self.fcs = ModuleList()
for i in range(num_fcs):
fc_in_channels = (last_layer_dim if (i == 0) else self.fc_out_channels)
self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
output_channels = (self.num_classes * self.output_area)
self.fc_logits = Linear(last_layer_dim, output_channels)
def init_weights(self):
super(FCNMaskHead, self).init_weights()
_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if (self.downsample_conv is not None):
x = self.downsample_conv(x)
x = x.flatten(1)
for fc in self.fcs:
x = self.relu(fc(x))
mask_pred = self.fc_logits(x).view(x.size(0), self.num_classes, *self.output_size)
return mask_pred |
def information_gain(true, pred):
N = np.sum(true)
information_gain = 0.0
for (true_value, pred_value) in zip(true, pred):
information_gain += (((1.0 * true_value) / N) * np.log((true_value / pred_value)))
return information_gain |
class ConvSlotEncoder(nn.Module):
def __init__(self, c_dim=128, i_dim=3, h_dim=128, downsample=0, att_height=None, att_width=None, num_slots=8, num_layers=4, slot_iters=3, self_attention=False, use_camera_pos=False, explicit_bg=False, resnet=False, coord_mapper=None, pos_encoder=None, pred_coords=False, encode_rays=False, mlp_output=False, fourier_pos=False, in_width=64, in_height=64, deterministic=False):
super().__init__()
self.convs = nn.ModuleList()
self.att_height = att_height
self.att_width = att_width
self.h_dim = h_dim
self.c_dim = c_dim
self.use_camera_pos = use_camera_pos
self.coord_mapper = coord_mapper
self.explicit_bg = explicit_bg
self.mlp_output = mlp_output
self.fourier_pos = fourier_pos
if use_camera_pos:
self.pixel_encoder = PixelEncoder3d(encode_rays=encode_rays)
i_dim += 99
i_dim += (3 + (96 if encode_rays else 0))
elif fourier_pos:
self.pixel_encoder = PixelEncoder2dFourier((in_height, in_width))
i_dim += (((8 * 2) * 2) + 2)
else:
assert ((att_height is not None) and (att_width is not None))
self.pixel_encoder = PixelEncoder2d(c_dim, (att_height, att_width))
self.resnet = resnet
if (not resnet):
for i in range(num_layers):
stride = (2 if (downsample > 0) else 1)
input_dim = (i_dim if (i == 0) else h_dim)
downsample -= 1
self.convs.append(nn.Conv2d(input_dim, h_dim, 5, stride=stride, padding=2))
else:
print(resnet)
if isinstance(resnet, str):
arch = resnet
else:
arch = 'resnet18'
self.resnet = make_custom_resnet(arch, i_dim, c_dim=h_dim, h_dim=h_dim, downsample=downsample)
pixel_enc_dim = (h_dim if mlp_output else c_dim)
self.norm = nn.LayerNorm(h_dim)
self.fc1 = nn.Linear(h_dim, h_dim)
self.fc2 = nn.Linear(h_dim, pixel_enc_dim)
self.actvn = nn.ReLU()
if pred_coords:
self.coord_pred = nn.Linear(pixel_enc_dim, 3)
else:
self.coord_pred = None
if mlp_output:
self.slot_att = nn.Sequential(nn.Flatten(1, 2), nn.Linear(((pixel_enc_dim * att_height) * att_width), (c_dim * 4)), self.actvn, nn.Linear((c_dim * 4), (c_dim * 2)), self.actvn, nn.Linear((c_dim * 2), c_dim))
else:
self.slot_att = SlotAttention(num_slots, att_height, att_width, c_dim, iters=slot_iters, hidden_dim=c_dim, self_attention=self_attention, explicit_bg=explicit_bg, pos_encoder=pos_encoder, deterministic=deterministic)
def forward(self, x, camera_pos=None, rays=None):
batch_size = x.size(0)
if self.fourier_pos:
x = self.pixel_encoder(x)
elif self.use_camera_pos:
x = self.pixel_encoder(x, camera_pos, rays)
if (not self.resnet):
for c in self.convs:
x = self.actvn(c(x))
else:
x = self.resnet(x)
x = x.permute(0, 2, 3, 1)
if ((not self.use_camera_pos) and (not self.mlp_output) and (not self.fourier_pos)):
x = self.pixel_encoder(x)
x = self.norm(x)
x = self.actvn(self.fc1(x))
pixel_encoding = self.fc2(x)
c_params = self.do_slot_att(pixel_encoding)
return (pixel_encoding, c_params)
def do_slot_att(self, pixel_encoding):
if self.use_camera_pos:
coord_mapper = (lambda x: project_to_image_plane(camera_pos, self.coord_mapper(x)))
else:
coord_mapper = self.coord_mapper
pixel_encoding = pixel_encoding.view(pixel_encoding.shape[0], (self.att_height * self.att_width), pixel_encoding.shape[(- 1)])
slots = self.slot_att(pixel_encoding)
if self.mlp_output:
slots = slots.unsqueeze(1)
return slots |
def batchify(data, bsz, device):
nbatch = (data.size(0) // bsz)
data = data.narrow(0, 0, (nbatch * bsz))
data = data.view(bsz, (- 1)).t().contiguous()
return data.to(device) |
class MeanRecord():
def __init__(self, window_size):
self.empty = True
self.window_size = window_size
if (window_size == 'inf'):
self.window = []
else:
self.window = deque(maxlen=window_size)
def record_value(self, value):
self.empty = False
self.window.append(value)
def summary(self):
assert (not self.empty), 'empty record'
return (sum(self.window) / len(self.window)) |
def validate_vanilla_requests_kwargs(data: dict[(str, Any)]) -> None:
url = data['url']
if (not urlparse(url).netloc):
raise RuntimeError(f'''The URL should be absolute, so Schemathesis knows where to send the data.
If you use the ASGI integration, please supply your test client as the `session` argument to `call`.
URL: {url}''') |
class PackageUpdater(ChecksumUpdater):
def __init__(self, package_name, new_version):
super(PackageUpdater, self).__init__(package_name)
self._update_version(new_version)
def _update_version(self, new_version):
old = Package(self.package_name)
package_version_txt = os.path.join(old.path, 'package-version.txt')
with open(package_version_txt, 'w') as f:
f.write((new_version.strip() + '\n'))
def download_upstream(self, download_url=None):
tarball = self.package.tarball
if (download_url is None):
pattern = self.package.tarball_upstream_url_pattern
if (pattern and ('VERSION' not in pattern)):
print('Warning: upstream_url pattern does not use the VERSION variable')
download_url = self.package.tarball_upstream_url
if (download_url is None):
raise ValueError('package has no default upstream_url pattern, download_url needed')
print('Downloading tarball from {0} to {1}'.format(download_url, tarball.upstream_fqn))
Download(download_url, tarball.upstream_fqn).run() |
class MinimalRNNCell(CellBase):
name = 'mrnn'
valid_keys = ['hx', 'bias']
def default_initializers(self):
return {'hx': 'xavier'}
def default_architecture(self):
return {'bias': True}
def __init__(self, d_input, d_model, hidden_activation='tanh', zero_bias_init=False, **kwargs):
self.hidden_activation = hidden_activation
self.zero_bias_init = zero_bias_init
super().__init__(d_input, d_model, **kwargs)
def reset_parameters(self):
self.W_hx = LinearActivation(self.d_input, self.d_model, bias=self.architecture['bias'], zero_bias_init=self.zero_bias_init, initializer=self.initializers['hx'], activation=self.hidden_activation, activate=True)
preact_ctor = LinearActivation
preact_args = [(self.d_input + self.d_model), self.d_model, self.architecture['bias']]
self.W_g = Gate(self.d_model, preact_ctor, preact_args, mechanism='G')
def forward(self, input, h):
hidden = self.W_hx(input)
hx = torch.cat((input, h), dim=(- 1))
g = self.W_g(hx)
h = (((1.0 - g) * h) + (g * hidden))
return (h, h) |
class MultiOptimizer():
def __init__(self, optimizers={}, schedulers={}):
self.optimizers = optimizers
self.schedulers = schedulers
self.keys = list(optimizers.keys())
self.param_groups = reduce((lambda x, y: (x + y)), [v.param_groups for v in self.optimizers.values()])
def state_dict(self):
state_dicts = [(key, self.optimizers[key].state_dict()) for key in self.keys]
return state_dicts
def load_state_dict(self, state_dict):
for (key, val) in state_dict:
try:
self.optimizers[key].load_state_dict(val)
except:
print(('Unloaded %s' % key))
def step(self, key=None, scaler=None):
keys = ([key] if (key is not None) else self.keys)
_ = [self._step(key, scaler) for key in keys]
def _step(self, key, scaler=None):
if (scaler is not None):
scaler.step(self.optimizers[key])
scaler.update()
else:
self.optimizers[key].step()
def zero_grad(self, key=None):
if (key is not None):
self.optimizers[key].zero_grad()
else:
_ = [self.optimizers[key].zero_grad() for key in self.keys]
def scheduler(self, *args, key=None):
if (key is not None):
self.schedulers[key].step(*args)
else:
_ = [self.schedulers[key].step(*args) for key in self.keys] |
def check_parameter_updates(model: torch.nn.Module, inputs: Any=None, output: Any=None) -> None:
model.train()
params_before = copy.deepcopy(list(model.parameters()))
optim = SGD(model.parameters(), lr=1000.0)
if (output is None):
if hasattr(model, 'compute_error'):
output = model.compute_error(*inputs)
else:
output = model(*inputs)
if isinstance(output, ActionOutput):
mu = output.squashed_mu
logstd = output.logstd
output = mu
if (logstd is not None):
output = (output + logstd)
elif isinstance(output, QFunctionOutput):
output = output.q_value
if isinstance(output, (list, tuple)):
loss = 0.0
for y in output:
loss += (y ** 2).sum()
else:
loss = (output ** 2).sum()
loss.backward()
optim.step()
for (before, after) in zip(params_before, model.parameters()):
assert (not torch.allclose(before, after)), f'tensor with shape of {after.shape} is not updated.' |
class ReviewBucketBatchSampler(Sampler):
def __init__(self, dataset, batch_size, shuffle_batches=True, split=None):
if (split is None):
ids = dataset.ids
else:
assert dataset.has_splits, "Dataset doesn't have train/dev/test splits"
if (split == 'train'):
ids = dataset.train_ids
elif (split == 'dev'):
ids = dataset.dev_ids
else:
ids = dataset.test_ids
lengths = dataset.lengths
self.batch_size = batch_size
self.shuffle_batches = shuffle_batches
self.buckets = defaultdict(list)
for full_id in tqdm(ids, disable=True):
if (full_id not in lengths):
dataset[full_id]
rev_len = len(lengths[full_id])
self.buckets[rev_len].append(full_id)
self.batch_list = []
for bucket_len in tqdm(sorted(self.buckets), disable=True):
self.buckets[bucket_len].sort(key=(lambda full_id: max(lengths[full_id])))
self.batch_list += [(bucket_len, start_idx) for start_idx in range(0, len(self.buckets[bucket_len]), self.batch_size)]
def __iter__(self):
if self.shuffle_batches:
shuffle(self.batch_list)
for (bucket_len, start_idx) in self.batch_list:
(yield self.buckets[bucket_len][start_idx:(start_idx + self.batch_size)])
def __len__(self):
return len(self.batch_list) |
class HDF5DataModule(pl.LightningDataModule):
def __init__(self, n_channels: int, batch_size: int, prep_files: dict, stft_length_samples: int, stft_shift_samples: int, snr_range: List[int], meta_frame_length: int, n_workers: int, dry_target: bool=True, target_dir=0, noise_snr: List[int]=None, fs: int=16000):
super().__init__()
self.batch_size = batch_size
self.fs = fs
self.meta_frame_len = meta_frame_length
self.snr_range = snr_range
self.n_channels = n_channels
self.stft_len = stft_length_samples
self.stft_shift = stft_shift_samples
self.target_dir = target_dir
self.noise_snr = noise_snr
self.n_workers = n_workers
self.train_dataset = MixDataset(stage='train', prep_files=prep_files, n_channels=self.n_channels, meta_frame_length=self.meta_frame_len, disable_random=False, snr_range=self.snr_range, dry_target=dry_target, target_dir=self.target_dir, noise_snr=self.noise_snr)
self.val_dataset = MixDataset(stage='val', prep_files=prep_files, n_channels=self.n_channels, meta_frame_length=self.meta_frame_len, disable_random=True, snr_range=self.snr_range, dry_target=dry_target, target_dir=self.target_dir, noise_snr=self.noise_snr)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=self.n_workers, shuffle=True)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=self.n_workers, shuffle=False) |
class TestSave():
.parametrize('shape,dtype', [((10, 10), np.uint8), ((10, 10), np.uint16), ((10, 10, 2), np.uint8), ((10, 10, 3), np.uint8), ((10, 10, 4), np.uint8)])
def test_imsave_roundtrip(self, shape, dtype, tmp_path):
if np.issubdtype(dtype, np.floating):
min_ = 0
max_ = 1
else:
min_ = 0
max_ = np.iinfo(dtype).max
expected = np.linspace(min_, max_, endpoint=True, num=np.prod(shape), dtype=dtype)
expected = expected.reshape(shape)
file_path = (tmp_path / 'roundtrip.png')
imsave(file_path, expected)
actual = imread(file_path)
np.testing.assert_array_almost_equal(actual, expected)
def test_bool_array_save(self):
with NamedTemporaryFile(suffix='.png') as f:
fname = f.name
with pytest.warns(UserWarning, match='.* is a boolean image'):
a = np.zeros((5, 5), bool)
a[(2, 2)] = True
imsave(fname, a) |
def resample_uv_to_bbox(predictor_output: DensePoseChartPredictorOutput, labels: torch.Tensor, box_xywh_abs: IntTupleBox) -> torch.Tensor:
return resample_uv_tensors_to_bbox(predictor_output.u, predictor_output.v, labels, box_xywh_abs) |
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
cls.add_constructor([param('char const *', 'name')])
cls.add_constructor([])
cls.add_constructor([param('ns3::TypeId const &', 'o')])
cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True)
cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True)
cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True)
cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True)
cls.add_method('GetGroupName', 'std::string', [], is_const=True)
cls.add_method('GetHash', 'ns3::TypeId::hash_t', [], is_const=True)
cls.add_method('GetName', 'std::string', [], is_const=True)
cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True)
cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True)
cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True)
cls.add_method('GetSize', 'std::size_t', [], is_const=True)
cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True)
cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True)
cls.add_method('GetUid', 'uint16_t', [], is_const=True)
cls.add_method('HasConstructor', 'bool', [], is_const=True)
cls.add_method('HasParent', 'bool', [], is_const=True)
cls.add_method('HideFromDocumentation', 'ns3::TypeId', [])
cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True)
cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True)
cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True)
cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True)
cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True)
cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True)
cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')], is_const=True)
cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True)
cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')])
cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')])
cls.add_method('SetSize', 'ns3::TypeId', [param('std::size_t', 'size')])
cls.add_method('SetUid', 'void', [param('uint16_t', 'uid')])
return |
def run_full_eval(model_dir, infer_model, infer_sess, eval_model, eval_sess, hparams, summary_writer, sample_src_data, sample_tgt_data):
run_sample_decode(infer_model, infer_sess, model_dir, hparams, summary_writer, sample_src_data, sample_tgt_data)
(dev_ppl, test_ppl) = run_internal_eval(eval_model, eval_sess, model_dir, hparams, summary_writer)
(dev_scores, test_scores, global_step) = run_external_eval(infer_model, infer_sess, model_dir, hparams, summary_writer)
result_summary = _format_results('dev', dev_ppl, dev_scores, hparams.metrics)
if hparams.test_prefix:
result_summary += (', ' + _format_results('test', test_ppl, test_scores, hparams.metrics))
return (result_summary, global_step, dev_scores, test_scores, dev_ppl, test_ppl) |
class _BaseFilter(ABC):
def transform(self, interactions: DataFrameLike) -> DataFrameLike:
if isinstance(interactions, SparkDataFrame):
return self._filter_spark(interactions)
return self._filter_pandas(interactions)
def _filter_spark(self, interactions: SparkDataFrame):
pass
def _filter_pandas(self, interactions: PandasDataFrame):
pass |
class ResNet(object):
def __init__(self, hps, images, labels, mode, reuse_variables=None):
self.hps = hps
self._images = images
self.labels = labels
self.mode = mode
self.reuse_variables = reuse_variables
self._extra_train_ops = []
def build_graph(self):
self.global_step = tf.contrib.framework.get_or_create_global_step()
self._build_model()
self._build_cost()
tf.summary.scalar('cost', self.cost)
if (self.mode == 'train'):
self._build_train_op()
self.summaries = tf.summary.merge_all()
def _stride_arr(self, stride):
return [1, stride, stride, 1]
def _build_model(self):
with tf.variable_scope('init', reuse=self.reuse_variables):
x = self._images
x = self._conv('init_conv', x, 3, 3, 16, self._stride_arr(1))
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
if self.hps.use_bottleneck:
res_func = self._bottleneck_residual
filters = [16, 64, 128, 256]
else:
res_func = self._residual
filters = [16, 16, 32, 64]
with tf.variable_scope('unit_1_0', reuse=self.reuse_variables):
x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]), activate_before_residual[0])
for i in six.moves.range(1, self.hps.num_residual_units):
with tf.variable_scope(('unit_1_%d' % i), reuse=self.reuse_variables):
x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
with tf.variable_scope('unit_2_0', reuse=self.reuse_variables):
x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]), activate_before_residual[1])
for i in six.moves.range(1, self.hps.num_residual_units):
with tf.variable_scope(('unit_2_%d' % i), reuse=self.reuse_variables):
x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
with tf.variable_scope('unit_3_0', reuse=self.reuse_variables):
x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]), activate_before_residual[2])
for i in six.moves.range(1, self.hps.num_residual_units):
with tf.variable_scope(('unit_3_%d' % i), reuse=self.reuse_variables):
x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
with tf.variable_scope('unit_last', reuse=self.reuse_variables):
x = self._batch_norm('final_bn', x)
x = self._relu(x, self.hps.relu_leakiness)
x = self._global_avg_pool(x)
with tf.variable_scope('logit', reuse=self.reuse_variables):
self.logits = self._fully_connected(x, self.hps.num_classes)
self.predictions = tf.nn.softmax(self.logits)
def _build_cost(self):
with tf.variable_scope('costs'):
xent = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.labels)
self.cost = tf.reduce_mean(xent, name='xent')
self.cost += self._decay()
def _build_train_op(self):
self.lrn_rate = tf.constant(self.hps.lrn_rate, tf.float32)
tf.summary.scalar('learning_rate', self.lrn_rate)
trainable_variables = tf.trainable_variables()
grads = tf.gradients(self.cost, trainable_variables)
if (self.hps.optimizer == 'sgd'):
optimizer = tf.train.GradientDescentOptimizer(self.lrn_rate)
elif (self.hps.optimizer == 'mom'):
optimizer = tf.train.MomentumOptimizer(self.lrn_rate, 0.9)
apply_op = optimizer.apply_gradients(zip(grads, trainable_variables), global_step=self.global_step, name='train_step')
train_ops = ([apply_op] + self._extra_train_ops)
self.train_op = tf.group(*train_ops)
def _batch_norm(self, name, x):
with tf.variable_scope(name):
params_shape = [x.get_shape()[(- 1)]]
beta = tf.get_variable('beta', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32))
gamma = tf.get_variable('gamma', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32))
moving_mean = tf.get_variable('moving_mean', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32), trainable=False)
moving_variance = tf.get_variable('moving_variance', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32), trainable=False)
(mean, variance) = tf.nn.moments(x, [0, 1, 2], name='moments')
assign_mean = moving_averages.assign_moving_average(moving_mean, mean, 0.9)
assign_variance = moving_averages.assign_moving_average(moving_variance, variance, 0.9)
if (self.mode == 'train'):
self._extra_train_ops.append(assign_mean)
self._extra_train_ops.append(assign_variance)
else:
mean = moving_mean
variance = moving_variance
tf.summary.histogram(mean.op.name, mean)
tf.summary.histogram(variance.op.name, variance)
y = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 0.001)
y.set_shape(x.get_shape())
return y
def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, self.hps.relu_leakiness)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, self.hps.relu_leakiness)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, self.hps.relu_leakiness)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if (in_filter != out_filter):
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(orig_x, [[0, 0], [0, 0], [0, 0], [((out_filter - in_filter) // 2), ((out_filter - in_filter) // 2)]])
x += orig_x
tf.logging.debug('image after unit %s', x.get_shape())
return x
def _bottleneck_residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
if activate_before_residual:
with tf.variable_scope('common_bn_relu'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, self.hps.relu_leakiness)
orig_x = x
else:
with tf.variable_scope('residual_bn_relu'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, self.hps.relu_leakiness)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 1, in_filter, (out_filter / 4), stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, self.hps.relu_leakiness)
x = self._conv('conv2', x, 3, (out_filter / 4), (out_filter / 4), [1, 1, 1, 1])
with tf.variable_scope('sub3'):
x = self._batch_norm('bn3', x)
x = self._relu(x, self.hps.relu_leakiness)
x = self._conv('conv3', x, 1, (out_filter / 4), out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if (in_filter != out_filter):
orig_x = self._conv('project', orig_x, 1, in_filter, out_filter, stride)
x += orig_x
tf.logging.info('image after unit %s', x.get_shape())
return x
def _decay(self):
costs = []
for var in tf.trainable_variables():
if (var.op.name.find('DW') > 0):
costs.append(tf.nn.l2_loss(var))
return tf.multiply(self.hps.weight_decay_rate, tf.add_n(costs))
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
with tf.variable_scope(name):
n = ((filter_size * filter_size) * out_filters)
kernel = tf.get_variable('DW', [filter_size, filter_size, in_filters, out_filters], tf.float32, initializer=tf.random_normal_initializer(stddev=np.sqrt((2.0 / n))))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
return tf.where(tf.less(x, 0.0), (leakiness * x), x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
num_non_batch_dimensions = len(x.get_shape())
prod_non_batch_dimensions = 1
for ii in range((num_non_batch_dimensions - 1)):
prod_non_batch_dimensions *= int(x.get_shape()[(ii + 1)])
x = tf.reshape(x, [tf.shape(x)[0], (- 1)])
w = tf.get_variable('DW', [prod_non_batch_dimensions, out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert (x.get_shape().ndims == 4)
return tf.reduce_mean(x, [1, 2]) |
def edge_rating(u: Node, v: Node, edge_weights: Dict[(Tuple[(Node, Node)], float)], node_weights: Dict[(Node, float)]) -> float:
return ((edge_weights[(u, v)] ** 2) / (1 + (node_weights[u] * node_weights[v]))) |
class CRFTokenClassificationTask(TokenClassfication):
def read_examples_from_file(data_dir, mode: Union[(Split, str)]) -> List[InputExample]:
raise NotImplementedError
def get_labels(path: str) -> List[str]:
raise NotImplementedError
def convert_examples_to_features(examples: List[InputExample], label_list: List[str], max_seq_length: int, tokenizer: PreTrainedTokenizer, cls_token_at_end=False, cls_token='[CLS]', cls_token_segment_id=1, sep_token='[SEP]', sep_token_extra=False, pad_on_left=False, pad_token='[PAD]', pad_token_segment_id=0, pad_token_label_id='[PAD]', sequence_a_segment_id=0, mask_padding_with_zero=True) -> List[InputFeatures]:
label_map = {label: i for (i, label) in enumerate(label_list)}
pad_token_label_id = '[PAD]'
features = []
for (ex_index, example) in enumerate(examples):
if ((ex_index % 10000) == 0):
logger.info('Writing example %d of %d', ex_index, len(examples))
tokens = []
label_ids = []
for (word, label) in zip(example.words, example.labels):
word_tokens = tokenizer.tokenize(word)
if (len(word_tokens) > 0):
tokens.extend(word_tokens)
label_ids.extend(([label_map[label]] + ([label_map[pad_token_label_id]] * (len(word_tokens) - 1))))
tokens += [sep_token]
label_ids += [label_map[sep_token]]
if sep_token_extra:
tokens += [sep_token]
label_ids += [label_map[sep_token]]
segment_ids = ([sequence_a_segment_id] * len(tokens))
if cls_token_at_end:
tokens += [cls_token]
label_ids += [label_map[cls_token]]
segment_ids += [cls_token_segment_id]
else:
tokens = ([cls_token] + tokens)
label_ids = ([label_map[cls_token]] + label_ids)
segment_ids = ([cls_token_segment_id] + segment_ids)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([(1 if mask_padding_with_zero else 0)] * len(input_ids))
padding_length = (max_seq_length - len(input_ids))
if pad_on_left:
input_ids = (([pad_token] * padding_length) + input_ids)
input_mask = (([(0 if mask_padding_with_zero else 1)] * padding_length) + input_mask)
segment_ids = (([pad_token_segment_id] * padding_length) + segment_ids)
label_ids = (([label_map[pad_token_label_id]] * padding_length) + label_ids)
else:
input_ids += ([pad_token] * padding_length)
input_mask += ([(0 if mask_padding_with_zero else 1)] * padding_length)
segment_ids += ([pad_token_segment_id] * padding_length)
label_ids += ([label_map[pad_token_label_id]] * padding_length)
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
assert (len(label_ids) == max_seq_length)
if (ex_index < 5):
logger.info('*** Example ***')
logger.info('guid: %s', example.guid)
logger.info('tokens: %s', ' '.join([str(x) for x in tokens]))
logger.info('input_ids: %s', ' '.join([str(x) for x in input_ids]))
logger.info('input_mask: %s', ' '.join([str(x) for x in input_mask]))
logger.info('segment_ids: %s', ' '.join([str(x) for x in segment_ids]))
logger.info('label_ids: %s', ' '.join([str(x) for x in label_ids]))
if ('token_type_ids' not in tokenizer.model_input_names):
segment_ids = None
features.append(InputFeatures(input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, label_ids=label_ids))
return features |
def cut_mlir_output(ast: MlirAST, output_names: List[str]):
ops = [ast.get_op_by_op_name(name) for name in output_names]
opd_ids = list(chain(*[i.opd_ids for i in ops]))
output_types = list(chain(*[i.output_types for i in ops]))
loc_label = ast.return_op.loc_label
module_state = ast.module.attrs['module.state']
can_new_cast = (module_state != 'TPU_ADDRESSED')
replace_dict = {}
for (i, (opd_id, output_type)) in enumerate(zip(opd_ids, output_types)):
if (output_type.dtype != 'f32'):
assert can_new_cast
new_loc = Location(ast.malloc_loc_id(), f'{ops[i].name}_f32')
ast.add_location(new_loc)
new_loc_label = new_loc.to_label()
new_cast_op = Operation([ast.malloc_opd_id()], OperationType('tpu.Cast', [opd_id]), [output_type], [output_type.create_a_f32()], new_loc_label)
ast.module.funcs[(- 1)].ops.insert((- 1), new_cast_op)
replace_dict[i] = new_cast_op
for (i, cast) in replace_dict.items():
opd_ids[i] = cast.opd_ids[0]
output_types[i] = cast.output_types[0]
new_return = Return(opd_ids, output_types, loc_label)
ast.set_return(new_return)
ast.module.funcs[(- 1)].ops[(- 1)] = new_return
ast.module.funcs[(- 1)].output_types = output_types
if (len(ast.module.funcs) > 1):
ast.module.funcs[0].output_types = output_types
return_op = ast.module.funcs[0].ops[(- 1)]
return_op.output_types = output_types
prefix = return_op.op_type.opds[0].split('#')[0]
new_opds = [f'{prefix}#{i}' for i in range(len(output_types))]
return_op.op_type.opds = new_opds
ast.module.funcs[0].ops[(- 2)].output_types = output_types
ast.module.funcs[0].ops[(- 2)].update_opd() |
class ProbabilisticDAG(nn.Module):
def __init__(self, n_nodes, temperature=1.0, hard=True, order_type='sinkhorn', noise_factor=1.0, initial_adj=None, lr=0.001, seed=0):
super().__init__()
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.n_nodes = n_nodes
self.temperature = temperature
self.hard = hard
self.order_type = order_type
self.mask = torch.triu(torch.ones(self.n_nodes, self.n_nodes, device=device), 1)
if (self.order_type == 'sinkhorn'):
self.noise_factor = noise_factor
p = torch.zeros(n_nodes, n_nodes, requires_grad=True, device=device)
self.perm_weights = torch.nn.Parameter(p)
elif (self.order_type == 'topk'):
p = torch.zeros(n_nodes, requires_grad=True, device=device)
self.perm_weights = torch.nn.Parameter(p)
self.sort = SoftSort_p1(hard=self.hard, tau=self.temperature)
else:
raise NotImplementedError
e = torch.zeros(n_nodes, n_nodes, requires_grad=True, device=device)
torch.nn.init.uniform_(e)
if (initial_adj is not None):
initial_adj = initial_adj.to(device)
zero_indices = (1 - initial_adj).bool()
e.requires_grad = False
e[zero_indices] = (- 300)
e.requires_grad = True
torch.diagonal(e).fill_((- 300))
self.edge_log_params = torch.nn.Parameter(e)
if (initial_adj is not None):
self.edge_log_params.register_hook((lambda grad: (grad * initial_adj.float())))
self.lr = lr
self.optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
def sample_edges(self):
p_log = F.logsigmoid(torch.stack((self.edge_log_params, (- self.edge_log_params))))
dag = gumbel_softmax(p_log, hard=True, dim=0)[0]
return dag
def sample_permutation(self):
if (self.order_type == 'sinkhorn'):
log_alpha = F.logsigmoid(self.perm_weights)
(P, _) = gumbel_sinkhorn(log_alpha, noise_factor=self.noise_factor, temp=self.temperature, hard=self.hard)
P = P.squeeze().to(device)
elif (self.order_type == 'topk'):
logits = F.log_softmax(self.perm_weights, dim=0).view(1, (- 1))
gumbels = (- torch.empty_like(logits).exponential_().log())
gumbels = ((logits + gumbels) / 1)
P = self.sort(gumbels)
P = P.squeeze()
else:
raise NotImplementedError
return P
def sample(self):
P = self.sample_permutation()
P_inv = P.transpose(0, 1)
dag_adj = self.sample_edges()
dag_adj = (dag_adj * torch.matmul(torch.matmul(P_inv, self.mask), P))
return dag_adj
def log_prob(self, dag_adj):
raise NotImplementedError
def deterministic_permutation(self, hard=True):
if (self.order_type == 'sinkhorn'):
log_alpha = F.logsigmoid(self.perm_weights)
(P, _) = gumbel_sinkhorn(log_alpha, temp=self.temperature, hard=hard, noise_factor=0)
P = P.squeeze().to(device)
elif (self.order_type == 'topk'):
sort = SoftSort_p1(hard=hard, tau=self.temperature)
P = sort(self.perm_weights.detach().view(1, (- 1)))
P = P.squeeze()
return P
def get_threshold_mask(self, threshold):
P = self.deterministic_permutation()
P_inv = P.transpose(0, 1)
dag = (torch.sigmoid(self.edge_log_params.detach()) > threshold).float()
dag = (dag * torch.matmul(torch.matmul(P_inv, self.mask), P))
return dag
def get_prob_mask(self):
P = self.deterministic_permutation()
P_inv = P.transpose(0, 1)
e = torch.sigmoid(self.edge_log_params.detach())
e = (e * torch.matmul(torch.matmul(P_inv, self.mask), P))
return e
def print_parameters(self, prob=True):
print('Permutation Weights')
print((torch.sigmoid(self.perm_weights) if prob else self.perm_weights))
print('Edge Probs')
print((torch.sigmoid(self.edge_log_params) if prob else self.edge_log_params)) |
def EllinghamHorton78Graph():
g = Graph({0: [1, 5, 60], 1: [2, 12], 2: [3, 7], 3: [4, 14], 4: [5, 9], 5: [6], 6: [7, 11], 7: [15], 8: [9, 13, 22], 9: [10], 10: [11, 72], 11: [12], 12: [13], 13: [14], 14: [72], 15: [16, 20], 16: [17, 27], 17: [18, 22], 18: [19, 29], 19: [20, 24], 20: [21], 21: [22, 26], 23: [24, 28, 72], 24: [25], 25: [26, 71], 26: [27], 27: [28], 28: [29], 29: [69], 30: [31, 35, 52], 31: [32, 42], 32: [33, 37], 33: [34, 43], 34: [35, 39], 35: [36], 36: [41, 63], 37: [65, 66], 38: [39, 59, 74], 39: [40], 40: [41, 44], 41: [42], 42: [74], 43: [44, 74], 44: [45], 45: [46, 50], 46: [47, 57], 47: [48, 52], 48: [49, 75], 49: [50, 54], 50: [51], 51: [52, 56], 53: [54, 58, 73], 54: [55], 55: [56, 59], 56: [57], 57: [58], 58: [75], 59: [75], 60: [61, 64], 61: [62, 71], 62: [63, 77], 63: [67], 64: [65, 69], 65: [77], 66: [70, 73], 67: [68, 73], 68: [69, 76], 70: [71, 76], 76: [77]}, pos={})
g._circle_embedding(list(range(15)), center=((- 2.5), 1.5))
g._circle_embedding(list(range(15, 30)), center=((- 2.5), (- 1.5)))
g._circle_embedding([30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 74, 43, 44], center=(2.5, 1.5))
g._circle_embedding([45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 75, 59], center=(2.5, (- 1.5)))
d = g._pos
d[76] = ((- 0.2), (- 0.1))
d[77] = (0.2, 0.1)
d[38] = (2.2, 0.1)
d[52] = (2.3, (- 0.1))
d[15] = ((- 2.1), (- 0.1))
d[72] = ((- 2.1), 0.1)
g._line_embedding([60, 61, 62, 63], first=((- 1), 2), last=(1, 2))
g._line_embedding([64, 65, 37], first=((- 0.5), 1.5), last=(1.2, 1.5))
g._line_embedding([66, 73, 67, 68, 69], first=(1.2, (- 2)), last=((- 0.8), (- 2)))
g._line_embedding([66, 70, 71], first=(0.7, (- 1.5)), last=((- 1), (- 1.5)))
g.name('Ellingham-Horton 78-graph')
return g |
def aggregate_mode(mode):
pred_dir = '/mnt/c/Users/salthamm/Documents/coding/DPR/data/coliee2021_task1/{}/output/{}/{}_{}_top1000.json'.format(mode[3], mode[0], mode[0], mode[1])
output_dir = '/mnt/c/Users/salthamm/Documents/coding/DPR/data/coliee2021_task1/{}/aggregate/{}'.format(mode[3], mode[0])
run = read_run_separate_aggregate(pred_dir, mode[2])
with open(os.path.join(output_dir, 'run_aggregated_{}_{}.pickle'.format(mode[0], mode[2])), 'wb') as f:
pickle.dump(run, f) |
class MyImage(data.Dataset):
def __init__(self, args, train=False):
self.args = args
self.train = False
self.name = 'MyImage'
self.scale = args.scale
self.idx_scale = 0
apath = ((((args.testpath + '/') + args.testset) + '/x') + str(args.scale[0]))
self.filelist = []
self.imnamelist = []
if (not train):
for f in os.listdir(apath):
try:
filename = os.path.join(apath, f)
misc.imread(filename)
self.filelist.append(filename)
self.imnamelist.append(f)
except:
pass
def __getitem__(self, idx):
filename = os.path.split(self.filelist[idx])[(- 1)]
(filename, _) = os.path.splitext(filename)
lr = misc.imread(self.filelist[idx])
lr = common.set_channel([lr], self.args.n_colors)[0]
return (common.np2Tensor([lr], self.args.rgb_range)[0], (- 1), filename)
def __len__(self):
return len(self.filelist)
def set_scale(self, idx_scale):
self.idx_scale = idx_scale |
def test():
biadjacency = coo_matrix([[1, 1, 1, 0], [1, 1, 0, 0], [1, 0, 0, 1], [0, 0, 1, 1]])
number_citations = np.array([100, 50, 10, 4])
indices = np.arange(biadjacency.shape[0])
(simplices, cochains, signals_top) = bipart2simpcochain(biadjacency, number_citations, function=np.sum)
cochains_true = [{frozenset({0}): ((100 + 50) + 10), frozenset({1}): (100 + 50), frozenset({2}): (100 + 4), frozenset({3}): (10 + 4)}, {frozenset({0, 1}): (100 + 50), frozenset({0, 2}): 100, frozenset({1, 2}): 100, frozenset({0, 3}): 10, frozenset({2, 3}): 4}, {frozenset({0, 1, 2}): 100}]
simplices_true = [{frozenset({0}): 0, frozenset({1}): 1, frozenset({2}): 2, frozenset({3}): 3}, {frozenset({0, 1}): 0, frozenset({0, 2}): 1, frozenset({0, 3}): 2, frozenset({1, 2}): 3, frozenset({2, 3}): 4}, {frozenset({0, 1, 2}): 0}]
assert (cochains == cochains_true)
assert (simplices == simplices_true) |
def generate_dicts(encode=True):
with open('{}/full_vocab.json'.format('meta_info/'), 'r') as f:
vocab = json.load(f)
with open('{}/answer_vocab.json'.format('meta_info/'), 'r') as f:
answer = json.load(f)
split = 'trainval_all_fully'
mode = 'train'
gqa_d = GQA(split=split, mode=mode, contained_weight=0.1, threshold=0.0, folder='gqa_bottom_up_features/', cutoff=0.5, vocab=vocab, answer=answer, forbidden='', object_info='meta_info/gqa_objects_merged_info.json', num_tokens=30, num_regions=48, length=9, max_layer=5, distribution=False, failure_path=None)
type2cand_dict = {}
start_t = time.time()
for (idx, ele) in enumerate(gqa_d.data):
if ((idx % 1000) == 0):
time_per_iter = ((time.time() - start_t) / (idx + 1e-09))
print(f'{idx} / {len(gqa_d.data)}, finished. Time per iter: {time_per_iter:.3f}.', end='\r')
type2cand_dict_p = os.path.join('meta_info/type2cand_dict.pkl')
pickle.dump(type2cand_dict, open(type2cand_dict_p, 'wb'))
(image_id, question_id) = (ele[0], ele[1])
cur_p = os.path.join('mmnm_questions/', 'mmnm_{}.pkl'.format(image_id))
entry = pickle.load(open(cur_p, 'rb'))[question_id]
prog_type = entry[3][(- 1)][0]
answer = entry[(- 1)]
if encode:
answer = gqa_d.answer_vocab.get(answer, UNK)
if (prog_type not in type2cand_dict):
type2cand_dict[prog_type] = set()
if (answer not in type2cand_dict[prog_type]):
type2cand_dict[prog_type].add(answer) |
def ResNet50(input_channels=3, imsize=32, output_dim=10):
return ResNet(Bottleneck, [3, 4, 6, 3], input_channels, imsize, output_dim) |
def gram_schmidt(B):
from sage.modules.free_module_element import vector
if ((len(B) == 0) or (len(B[0]) == 0)):
return (B, matrix(ZZ, 0, 0, []))
n = len(B)
Bstar = [B[0]]
K = B[0].base_ring().fraction_field()
zero = vector(K, len(B[0]))
if (Bstar[0] == zero):
raise ValueError('linearly dependent input for module version of Gram-Schmidt')
mu = matrix(K, n, n)
for i in range(1, n):
for j in range(i):
mu[(i, j)] = (B[i].dot_product(Bstar[j]) / Bstar[j].dot_product(Bstar[j]))
Bstar.append((B[i] - sum(((mu[(i, j)] * Bstar[j]) for j in range(i)))))
if (Bstar[i] == zero):
raise ValueError('linearly dependent input for module version of Gram-Schmidt')
return (Bstar, mu) |
class GradientPTQLearnRateZeroTest(GradientPTQBaseTest):
def get_gptq_config(self):
return GradientPTQConfig(1, optimizer=tf.keras.optimizers.SGD(learning_rate=0.0), optimizer_rest=tf.keras.optimizers.SGD(learning_rate=0.0), loss=multiple_tensors_mse_loss, train_bias=True, rounding_type=self.rounding_type, gptq_quantizer_params_override=self.override_params)
def compare(self, quantized_model, quantized_gptq_model, input_x=None, quantization_info=None):
self.unit_test.assertTrue((len(quantized_model.weights) == len(quantized_gptq_model.weights)), msg=('float model number of weights different from quantized model: ' + f'{len(quantized_gptq_model.weights)} != {len(quantized_model.weights)}'))
weights_diff = [np.isclose(np.max(np.abs((w_q - w_f))), 0, atol=1e-05) for (w_q, w_f) in zip(quantized_model.weights, quantized_gptq_model.weights)]
for weights_close in weights_diff:
self.unit_test.assertTrue(np.all(weights_close)) |
def get_prefix_no_bpe(sentence, bpe_symbol, prefix_len):
if (bpe_symbol is None):
return get_prefix(sentence, prefix_len)
else:
return ' '.join(get_prefix_from_len(sentence.split(), bpe_symbol, prefix_len)) |
def scope_basename(scope):
slash = scope.rfind('/')
if (slash == (- 1)):
return scope
return scope[(slash + 1):] |
class Problem03(Benchmark):
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [((- 10), 10)]
self.global_optimum = (- 6.7745761)
self.fglob = (- 12.03124)
def fun(self, x, *args):
self.nfev += 1
x = x[0]
y = 0.0
for k in range(1, 6):
y += (k * sin((((k + 1) * x) + k)))
return (- y) |
def test_ohem_sampler_empty_pred():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_labels = torch.LongTensor([1, 2, 2, 3])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(num=10, pos_fraction=0.5, context=context, neg_pos_ub=(- 1), add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int((2 ** i)), int((2 ** i))) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds)) |
def register_functions(root_module):
module = root_module
module.add_function('CRC32Calculate', 'uint32_t', [param('uint8_t const *', 'data'), param('int', 'length')])
module.add_function('MakeAddressChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [])
module.add_function('MakeDataRateChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [])
module.add_function('MakeIpv4AddressChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [])
module.add_function('MakeIpv4MaskChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [])
module.add_function('MakeIpv6AddressChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [])
module.add_function('MakeIpv6PrefixChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [])
module.add_function('MakeMac16AddressChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [])
module.add_function('MakeMac48AddressChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [])
module.add_function('MakeMac64AddressChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [])
module.add_function('MakeQueueSizeChecker', 'ns3::Ptr< ns3::AttributeChecker const >', [])
module.add_function('ReadFrom', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Address &', 'ad'), param('uint32_t', 'len')])
module.add_function('ReadFrom', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Ipv4Address &', 'ad')])
module.add_function('ReadFrom', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Ipv6Address &', 'ad')])
module.add_function('ReadFrom', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Mac16Address &', 'ad')])
module.add_function('ReadFrom', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Mac48Address &', 'ad')])
module.add_function('ReadFrom', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Mac64Address &', 'ad')])
module.add_function('WriteTo', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Address const &', 'ad')])
module.add_function('WriteTo', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Ipv4Address', 'ad')])
module.add_function('WriteTo', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Ipv6Address', 'ad')])
module.add_function('WriteTo', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Mac16Address', 'ad')])
module.add_function('WriteTo', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Mac48Address', 'ad')])
module.add_function('WriteTo', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Mac64Address', 'ad')])
register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module)
register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.add_cpp_namespace('TracedValueCallback'), root_module)
register_functions_ns3_addressUtils(module.add_cpp_namespace('addressUtils'), root_module)
register_functions_ns3_internal(module.add_cpp_namespace('internal'), root_module)
register_functions_ns3_tests(module.add_cpp_namespace('tests'), root_module)
return |
def convert_slow_checkpoint_to_fast(tokenizer_name, checkpoint_name, dump_path, force_download):
if ((tokenizer_name is not None) and (tokenizer_name not in TOKENIZER_CLASSES)):
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys())}.')
if (tokenizer_name is None):
tokenizer_names = TOKENIZER_CLASSES
else:
tokenizer_names = {tokenizer_name: getattr(transformers, (tokenizer_name + 'Fast'))}
logger.info(f'Loading tokenizer classes: {tokenizer_names}')
for tokenizer_name in tokenizer_names:
tokenizer_class = TOKENIZER_CLASSES[tokenizer_name]
add_prefix = True
if (checkpoint_name is None):
checkpoint_names = list(tokenizer_class.max_model_input_sizes.keys())
else:
checkpoint_names = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}')
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}')
tokenizer = tokenizer_class.from_pretrained(checkpoint, force_download=force_download)
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}')
if ('/' in checkpoint):
(checkpoint_directory, checkpoint_prefix_name) = checkpoint.split('/')
dump_path_full = os.path.join(dump_path, checkpoint_directory)
elif add_prefix:
checkpoint_prefix_name = checkpoint
dump_path_full = dump_path
else:
checkpoint_prefix_name = None
dump_path_full = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}')
if (checkpoint in list(tokenizer.pretrained_vocab_files_map.values())[0]):
file_path = list(tokenizer.pretrained_vocab_files_map.values())[0][checkpoint]
next_char = file_path.split(checkpoint)[(- 1)][0]
if (next_char == '/'):
dump_path_full = os.path.join(dump_path_full, checkpoint_prefix_name)
checkpoint_prefix_name = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}')
file_names = tokenizer.save_pretrained(dump_path_full, legacy_format=False, filename_prefix=checkpoint_prefix_name)
logger.info(f'=> File names {file_names}')
for file_name in file_names:
if (not file_name.endswith('tokenizer.json')):
os.remove(file_name)
logger.info(f'=> removing {file_name}') |
def main():
set_seeds(2020)
args = vars(parser.parse_args())
alphabet = Protein()
data_cfg = config.DataConfig(args['data_config'])
model_cfg = config.ModelConfig(args['model_config'], input_dim=len(alphabet), num_classes=2)
run_cfg = config.RunConfig(args['run_config'], sanity_check=args['sanity_check'])
(output, save_prefix) = set_output(args, 'train_pfam_log')
os.environ['CUDA_VISIBLE_DEVICES'] = (args['device'] if (args['device'] is not None) else '')
(device, data_parallel) = (torch.device(('cuda' if torch.cuda.is_available() else 'cpu')), (torch.cuda.device_count() > 1))
config.print_configs(args, [data_cfg, model_cfg, run_cfg], device, output)
flag_rnn = (model_cfg.model_type == 'RNN')
flag_plus = ((model_cfg.rnn_type == 'B') if flag_rnn else False)
flag_paired = ('testpairs' in data_cfg.path)
start = Print(' '.join(['start loading a train dataset:', data_cfg.path['train']]), output)
dataset_train = pfam.load_pfam(data_cfg, 'train', alphabet, args['sanity_check'])
dataset_train = dataset.Pfam_dataset(*dataset_train, alphabet, run_cfg, flag_rnn, model_cfg.max_len, random_pairing=flag_paired, augment=flag_plus, sanity_check=args['sanity_check'])
if (flag_rnn and flag_paired):
collate_fn = dataset.collate_paired_sequences
elif (flag_rnn and flag_plus):
collate_fn = dataset.collate_sequences
elif flag_rnn:
collate_fn = dataset.collate_sequences_pelmo
else:
collate_fn = None
iterator_train = torch.utils.data.DataLoader(dataset_train, run_cfg.batch_size_train, collate_fn=collate_fn, shuffle=True)
end = Print(' '.join(['loaded', str(len(dataset_train)), 'sequences']), output)
Print(' '.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print(' '.join(['start loading a test dataset:', data_cfg.path[('testpairs' if flag_paired else 'test')]]), output)
if flag_paired:
dataset_test = pfam.load_pfam_pairs(data_cfg, 'testpairs', alphabet, args['sanity_check'])
dataset_test = dataset.PairedPfam_dataset(*dataset_test, alphabet, run_cfg, flag_rnn, model_cfg.max_len)
else:
dataset_test = pfam.load_pfam(data_cfg, 'test', alphabet, args['sanity_check'])
dataset_test = dataset.Pfam_dataset(*dataset_test, alphabet, run_cfg, flag_rnn, model_cfg.max_len, random_pairing=flag_paired, augment=flag_plus, sanity_check=args['sanity_check'])
iterator_test = torch.utils.data.DataLoader(dataset_test, run_cfg.batch_size_eval, collate_fn=collate_fn)
end = Print(' '.join(['loaded', str(len(dataset_test)), 'sequence(pair)s']), output)
Print(' '.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print('start initializing a model', output)
models_list = []
if (not flag_rnn):
model = plus_tfm.PLUS_TFM(model_cfg)
run_cfg.set_total_steps(len(dataset_train))
elif flag_plus:
model = plus_rnn.PLUS_RNN(model_cfg)
else:
model = p_elmo.P_ELMo_lm(model_cfg)
models_list.append([model, '', False, flag_rnn, (flag_rnn and flag_paired)])
params = []
for (model, _, frz, _, _) in models_list:
if (not frz):
params += [p for p in model.parameters() if p.requires_grad]
load_models(args, models_list, device, data_parallel, output)
get_loss = (plus_rnn.get_loss if flag_rnn else plus_tfm.get_loss)
end = Print('end initializing a model', output)
Print(''.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print('start setting trainer configurations', output)
if flag_rnn:
optim = torch.optim.Adam(params, lr=run_cfg.learning_rate)
else:
optim = get_BertAdam_optimizer(run_cfg, models_list[0][0])
tasks_list = []
if (run_cfg.lm_loss_lambda != (- 1)):
tasks_list.append(['lm', [], ['acc']])
if (run_cfg.cls_loss_lambda != (- 1)):
tasks_list.append(['cls', [], ['acc']])
trainer = Trainer(models_list, get_loss, run_cfg, tasks_list, optim)
trainer_args = {}
trainer_args['data_parallel'] = data_parallel
trainer_args['paired'] = flag_paired
if (flag_paired and flag_rnn):
trainer_args['evaluate_cls'] = plus_rnn.evaluate_sfp
elif flag_paired:
trainer_args['evaluate_cls'] = plus_tfm.evaluate_sfp
else:
trainer_args['num_alphabets'] = len(alphabet)
end = Print('end setting trainer configurations', output)
Print(''.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print('start training a model', output)
Print(trainer.get_headline(), output)
for epoch in range(run_cfg.num_epochs):
for (B, batch) in enumerate(iterator_train):
batch = [(t.to(device) if (type(t) is torch.Tensor) else t) for t in batch]
trainer.train(batch, trainer_args)
if ((B % 10) == 0):
print('# epoch [{}/{}] train {:.1%} loss={:.4f}'.format((epoch + 1), run_cfg.num_epochs, (B / len(iterator_train)), trainer.loss_train), end='\r', file=sys.stderr)
if (((trainer.global_step % 20000) == 0) or args['sanity_check']):
print((' ' * 150), end='\r', file=sys.stderr)
if (run_cfg.lm_loss_lambda != (- 1)):
if flag_paired:
dataset_test.set_augment(True)
trainer.set_exec_flags(['lm', 'cls'], [True, False])
for (b, batch) in enumerate(iterator_test):
batch = [(t.to(device) if (type(t) is torch.Tensor) else t) for t in batch]
trainer.evaluate(batch, trainer_args)
if ((b % 10) == 0):
print('# lm {:.1%} loss={:.4f}'.format((b / len(iterator_test)), trainer.loss_eval), end='\r', file=sys.stderr)
print((' ' * 150), end='\r', file=sys.stderr)
if (run_cfg.cls_loss_lambda != (- 1)):
dataset_test.set_augment(False)
trainer.set_exec_flags(['lm', 'cls'], [False, True])
for (b, batch) in enumerate(iterator_test):
batch = [(t.to(device) if (type(t) is torch.Tensor) else t) for t in batch]
trainer.evaluate(batch, trainer_args)
if ((b % 10) == 0):
print('# cls {:.1%} loss={:.4f}'.format((b / len(iterator_test)), trainer.loss_eval), end='\r', file=sys.stderr)
print((' ' * 150), end='\r', file=sys.stderr)
trainer.save(save_prefix)
Print(trainer.get_log((epoch + 1), args=trainer_args), output)
trainer.set_exec_flags(['lm', 'cls'], [True, True])
trainer.reset()
end = Print('end trainin a model', output)
Print(''.join(['elapsed time:', str((end - start))]), output, newline=True)
output.close() |
class SimSiam(BYOL):
def __init__(self, backbone: nn.Module, params: Namespace):
super().__init__(backbone, params, use_momentum=False) |
def dict_merge(dct, merge_dct):
for (k, v) in merge_dct.items():
if ((k in dct) and isinstance(dct[k], dict) and isinstance(merge_dct[k], collections.Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k] |
class Parkinsons(BaseDataset):
__doc__ = f'''
This dataset is composed of a range of biomedical voice measurements from 42 people
with early-stage Parkinson's disease recruited to a six-month trial of a
telemonitoring device for remote symptom progression monitoring. The recordings
were automatically captured in the patient's homes.
Columns in the table contain subject number, subject age, subject gender, time
interval from baseline recruitment date, motor UPDRS, total UPDRS, and 16
biomedical voice measures. Each row corresponds to one of 5,875 voice recording
from these individuals. The main aim of the data is to predict the motor and total
UPDRS scores ('motor_UPDRS' and 'total_UPDRS') from the 16 voice measures.
{BASE_DATASET_DESCRIPTION}
Features:
subject# (int):
Integer that uniquely identifies each subject
age (int):
Subject age
sex (int):
Binary feature. Subject sex, with 0 being male and 1 female
test_time (float):
Time since recruitment into the trial. The integer part is the
number of days since recruitment
Jitter(%) (float):
Measure of variation in fundamental frequency
Jitter(Abs) (float):
Measure of variation in fundamental frequency
Jitter:RAP (float):
Measure of variation in fundamental frequency
Jitter:PPQ5 (float):
Measure of variation in fundamental frequency
Jitter:DDP (float):
Measure of variation in fundamental frequency
Shimmer (float):
Measure of variation in amplitude
Shimmer(dB) (float):
Measure of variation in amplitude
Shimmer:APQ3 (float):
Measure of variation in amplitude
Shimmer:APQ5 (float):
Measure of variation in amplitude
Shimmer:APQ11 (float):
Measure of variation in amplitude
Shimmer:DDA (float):
Measure of variation in amplitude
NHR (float):
Measure of ratio of noise to tonal components in the voice
HNR (float):
Measure of ratio of noise to tonal components in the voice
RPDE (float):
A nonlinear dynamical complexity measure
DFA (float):
Signal fractal scaling exponent
PPE (float):
A nonlinear measure of fundamental frequency variation
Targets:
motor_UPDRS (float):
Clinician's motor UPDRS score, linearly interpolated
total_UPDRS (float):
Clinician's total UPDRS score, linearly interpolated
Source:
Examples:
Load in the data set::
>>> dataset = Parkinsons()
>>> dataset.shape
(5875, 22)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((5875, 20), (5875, 2))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((4659, 20), (4659, 2), (1216, 20), (1216, 2))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = '
_features = range(20)
_targets = [20, 21]
def _prep_data(self, data: bytes) -> pd.DataFrame:
csv_file = io.BytesIO(data)
df = pd.read_csv(csv_file, header=0)
cols = [col for col in df.columns if (col[(- 5):] != 'UPDRS')]
df = df[(cols + ['motor_UPDRS', 'total_UPDRS'])]
return df |
def combined_metric_fpr_tpr(fpr, criterions, model, dataset, title, attacks, lowind, upind, real_dir, adv_dir, n_radius, targeted_lr, t_radius, untargeted_lr, u_radius):
target_1 = l1_vals(model, dataset, title, 'real', lowind, upind, real_dir, adv_dir, n_radius)
target_2 = targeted_vals(model, dataset, title, 'real', lowind, upind, real_dir, adv_dir, targeted_lr, t_radius)
target_3 = untargeted_vals(model, dataset, title, 'real', lowind, upind, real_dir, adv_dir, untargeted_lr, u_radius)
fpr_accurate = ((len(target_1[np.logical_or(np.logical_or((target_1 > criterions[fpr][0]), (target_2 > criterions[fpr][1])), (target_3 > criterions[fpr][2]))]) * 1.0) / len(target_1))
print('corresponding accurate fpr of this threshold is ', fpr_accurate)
for i in range(len(attacks)):
a_target_1 = l1_vals(model, dataset, title, attacks[i], lowind, upind, real_dir, adv_dir, n_radius)
a_target_2 = targeted_vals(model, dataset, title, attacks[i], lowind, upind, real_dir, adv_dir, targeted_lr, t_radius)
a_target_3 = untargeted_vals(model, dataset, title, attacks[i], lowind, upind, real_dir, adv_dir, untargeted_lr, u_radius)
tpr = ((len(a_target_1[np.logical_or(np.logical_or((a_target_1 > criterions[fpr][0]), (a_target_2 > criterions[fpr][1])), (a_target_3 > criterions[fpr][2]))]) * 1.0) / len(a_target_1))
print((('corresponding tpr for ' + attacks[i]) + ' of this threshold is'), tpr) |
class TestSparseNormalize(hu.HypothesisTestCase):
def ref_normalize(param_in, use_max_norm, norm):
param_norm = (np.linalg.norm(param_in) + 1e-12)
if ((use_max_norm and (param_norm > norm)) or (not use_max_norm)):
param_in = ((param_in * norm) / param_norm)
return param_in
(suppress_health_check=[HealthCheck.filter_too_much])
(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), use_max_norm=st.booleans(), norm=st.floats(min_value=1.0, max_value=4.0), data_strategy=st.data(), **hu.gcs_cpu_only)
def test_sparse_normalize(self, inputs, use_max_norm, norm, data_strategy, gc, dc):
(param, grad) = inputs
param += (0.02 * np.sign(param))
param[(param == 0.0)] += 0.02
indices = data_strategy.draw(hu.tensor(dtype=np.int64, min_dim=1, max_dim=1, elements=st.sampled_from(np.arange(param.shape[0]))))
hypothesis.note(('indices.shape: %s' % str(indices.shape)))
hypothesis.assume(np.array_equal(np.unique(indices.flatten()), np.sort(indices.flatten())))
op1 = core.CreateOperator('SparseNormalize', ['param', 'indices'], ['param'], use_max_norm=use_max_norm, norm=norm)
grad = grad[indices]
op2 = core.CreateOperator('SparseNormalize', ['param', 'indices', 'grad'], ['param'], use_max_norm=use_max_norm, norm=norm)
def ref_sparse_normalize(param, indices, grad=None):
param_out = np.copy(param)
for (_, index) in enumerate(indices):
param_out[index] = self.ref_normalize(param[index], use_max_norm, norm)
return (param_out,)
self.assertReferenceChecks(gc, op1, [param, indices], ref_sparse_normalize)
self.assertReferenceChecks(gc, op2, [param, indices, grad], ref_sparse_normalize) |
class iCIFAR10(iData):
use_path = False
train_trsf = [transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(p=0.5), transforms.ColorJitter(brightness=(63 / 255))]
test_trsf = []
common_trsf = [transforms.ToTensor(), transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.201))]
class_order = np.arange(10).tolist()
def download_data(self):
train_dataset = datasets.cifar.CIFAR10('./data', train=True, download=True)
test_dataset = datasets.cifar.CIFAR10('./data', train=False, download=True)
(self.train_data, self.train_targets) = (train_dataset.data, np.array(train_dataset.targets))
(self.test_data, self.test_targets) = (test_dataset.data, np.array(test_dataset.targets)) |
def strip_blank_lines(l):
while (l and (not l[0].strip())):
del l[0]
while (l and (not l[(- 1)].strip())):
del l[(- 1)]
return l |
class CategoricalNoiseModel(NoiseModel.NoiseModel):
def __init__(self, shape, cats_name_lists, probability=0, feature_importance=[], cats_probs_list=[], typo_prob=0.01, alpha_prob=1.0, p_cell=0.01, one_cell_flag=False):
super(CategoricalNoiseModel, self).__init__(shape, probability, feature_importance, one_cell_flag)
self.cats_name_lists = cats_name_lists
self.cats_probs_list = cats_probs_list
self.typo_prob = typo_prob
self.alpha_prob = alpha_prob
self.p_cell = p_cell
if (not self.cats_probs_list):
self.cats_probs_list = [(numpy.ones(len(self.cats_name_lists[i])) / float(len(self.cats_name_lists[i]))) for i in range(len(self.cats_name_lists))]
self.cats_probs_list = [((self.cats_probs_list[i] ** alpha_prob) / numpy.sum((self.cats_probs_list[i] ** alpha_prob))) for i in range(len(self.cats_name_lists))]
self.cats_probs_list = [numpy.append((cats_prob * (1 - typo_prob)), typo_prob) for cats_prob in self.cats_probs_list]
def corrupt(self, X):
Ns = numpy.shape(X)[0]
ps = numpy.shape(X)[1]
Y = numpy.copy(X)
if self.one_cell_flag:
for i in range(0, Ns):
a = numpy.random.choice(ps)
tmp_cat_name_list = (self.cats_name_lists[a] + [NoiseModel.generate_typo(str(Y[(i, a)]))])
tmp_cat_prob_list = self.cats_probs_list[a]
idx_rmv = (- 1)
for (idx, elem) in enumerate(tmp_cat_name_list):
if (elem == Y[(i, a)]):
idx_rmv = idx
break
if ((idx_rmv >= 0) and (len(tmp_cat_name_list) > 2)):
tmp_cat_name_list.pop(idx_rmv)
tmp_cat_prob_list = numpy.delete(self.cats_probs_list[a], idx_rmv)
tmp_cat_prob_list = (tmp_cat_prob_list / tmp_cat_prob_list.sum())
Y[(i, a)] = numpy.random.choice(tmp_cat_name_list, 1, False, tmp_cat_prob_list)[0]
else:
idxs = numpy.where((numpy.random.uniform(0.0, 1.0, X.shape) <= self.p_cell))
standard_mapping = dict(zip(range(X.shape[1]), range(X.shape[1])))
self.corrupt_elem(Y, zip(idxs[0], idxs[1]), standard_mapping)
return Y
def corrupt_elem(self, Y, idxs, idx_cat_map):
for (idx_0, idx_1) in idxs:
idx_cat = idx_cat_map[idx_1]
tmp_cat_name_list = (self.cats_name_lists[idx_cat] + [NoiseModel.generate_typo(str(Y[(idx_0, idx_1)]))])
tmp_cat_prob_list = self.cats_probs_list[idx_cat]
idx_rmv = (- 1)
for (idx, elem) in enumerate(tmp_cat_name_list):
if (elem == Y[(idx_0, idx_1)]):
idx_rmv = idx
break
if ((idx_rmv >= 0) and (len(tmp_cat_name_list) > 2)):
tmp_cat_name_list.pop(idx_rmv)
tmp_cat_prob_list = numpy.delete(self.cats_probs_list[idx_cat], idx_rmv)
tmp_cat_prob_list = (tmp_cat_prob_list / tmp_cat_prob_list.sum())
Y[(idx_0, idx_1)] = numpy.random.choice(tmp_cat_name_list, 1, False, tmp_cat_prob_list)[0] |
def hinge_loss(score, label):
ins_num = label.size(0)
score = (1 - (score * label))
return (score.masked_select((score > 0)).sum() / ins_num) |
def flat_module(x):
def unit(i, prefix):
c1 = PF.convolution(i, 4, (3, 3), pad=(1, 1), name=(prefix + '-c1'))
c2 = PF.convolution(F.relu(c1), 4, (3, 3), pad=(1, 1), name=(prefix + '-c2'))
c = F.add2(c2, c1, inplace=True)
return c
c = unit(x, 'c1')
c2 = unit(c, 'c2')
y = PF.affine(c2, 5, name='fc')
return y |
def map_fn(image, label):
i = tf.random.uniform([], maxval=num_mnist_digits_used, dtype=tf.int32)
digit = tf.squeeze(tf.slice(mnist_images, [i, 0, 0], [1, 28, 28]))
digit_label = tf.squeeze(tf.slice(mnist_labels, [i], [1]))
digit = tf.image.grayscale_to_rgb(tf.expand_dims(digit, (- 1)))
digit = tf.image.convert_image_dtype(digit, dtype=tf.float32)
image = (tf.image.resize(image, [224, 224]) / 255.0)
(size_big, size_small) = (224, 28)
images = []
for x_ratio in [0.15, 0.45, 0.75]:
for y_ratio in [0.15, 0.45, 0.75]:
(pad_x, pad_y) = (int((size_big * x_ratio)), int((size_big * y_ratio)))
(x_max, y_max) = ((size_big - size_small), (size_big - size_small))
d = tf.pad(digit, [[pad_x, (x_max - pad_x)], [pad_y, (y_max - pad_y)], [0, 0]])
images.append(d)
images.append(image)
image = tf.reduce_max(tf.stack(images, 0), 0)
return (image, label, digit_label) |
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen')
self.parser.add_argument('--print_freq', type=int, default=5, help='frequency of priting training results')
self.parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results')
self.parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--beta2', type=float, default=0.999, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam')
self.parser.add_argument('--q_len', type=int, default=4096, help='size of queue to save logits used in constrastive loss')
self.parser.add_argument('--l_len', type=int, default=256, help='size of logits in contrastive loss')
self.parser.add_argument('--moco_m', default=0.999, type=float, help='moco momentum of updating discriminator')
self.parser.add_argument('--moco_t', default=0.07, type=float, help='softmax temperature')
self.parser.add_argument('--w_match', default=1.0, type=float, help='the weight for feat match loss')
self.parser.add_argument('--video_frame_size', type=int, default=128, help='spatial size of video frames for training')
self.parser.add_argument('--cross_domain', action='store_true', help='in-domain or cross-domain training')
self.parser.add_argument('--G_step', type=int, default=5, help='number of training iterations for G')
self.parser.add_argument('--total_epoch', type=int, default=5, help='training epochs')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
self.parser.add_argument('--n_frames_G', type=int, default=16, help='number of input frames forwarded into generator')
self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use')
self.parser.add_argument('--norm_D_3d', type=str, default='instance', help='instance norm or batch nom for D_3d')
self.parser.add_argument('--nc', type=int, default=3, help='# of input channels for D_3d')
self.parser.add_argument('--dataroot', type=str, default='/path/to/dataset/')
self.parser.add_argument('--time_step', type=int, default=2, help='the spacing between neighboring frames.')
self.parser.add_argument('--world_size', default=(- 1), type=int, help='number of nodes for distributed training')
self.parser.add_argument('--rank', default=(- 1), type=int, help='node rank for distributed training')
self.parser.add_argument('--dist_url', default='tcp://localhost:10001', type=str, help='url used to set up distributed training')
self.parser.add_argument('--dist_backend', default='nccl', type=str, help='distributed backend')
self.parser.add_argument('--multiprocessing_distributed', action='store_true', help='Use multi-processing distributed training to launch N processes per node, which has N GPUs.')
self.isTrain = True
self.isPCA = False |
def paper_ref_role(typ: str, rawtext: str, text: str, lineno: int, inliner, options: Dict={}, content: List[str]=[]):
from docutils import nodes, utils
from sphinx.util.nodes import split_explicit_title
text = utils.unescape(text)
(has_explicit_title, title, link) = split_explicit_title(text)
link = link.lower()
if (link not in _PAPER_DATA):
inliner.reporter.warning(('Cannot find paper ' + link))
(paper_url, paper_title) = ('#', link)
else:
(paper_url, paper_title) = _PAPER_DATA[link]
if ('/' not in paper_url):
paper_url = (' + paper_url)
if (not has_explicit_title):
title = paper_title
pnode = nodes.reference(title, title, internal=False, refuri=paper_url)
return ([pnode], []) |
def _inter_cluster_edges(G, partition):
edges = defaultdict(list)
for (i, j) in G.edges():
c_i = partition[i]
c_j = partition[j]
if (c_i == c_j):
continue
edges[(c_i, c_j)].append((i, j))
return edges |
_api_compatible
def test_fht_agrees_with_fftlog(xp):
def f(r, mu):
return ((r ** (mu + 1)) * np.exp(((- (r ** 2)) / 2)))
r = np.logspace((- 4), 4, 16)
dln = np.log((r[1] / r[0]))
mu = 0.3
offset = 0.0
bias = 0.0
a = xp.asarray(f(r, mu))
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [(- 0.), (+ 0.), (- 0.), (+ 0.), (+ 0.), (+ 0.), (+ 0.), (+ 0.), (+ 0.), (- 0.), (- 0.), (+ 0.), (- 0.), (+ 0.), (- 0.), (+ 0.)]
theirs = xp.asarray(theirs, dtype=xp.float64)
xp_assert_close(ours, theirs)
offset = fhtoffset(dln, mu, bias=bias)
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [(+ 4.e-05), (- 9.e-06), (+ 0.), (+ 0.), (+ 0.), (+ 0.), (+ 0.), (+ 0.), (+ 0.), (- 0.), (+ 0.), (- 0.), (+ 0.), (- 5.e-05), (+ 3.e-05), (- 2.e-05)]
theirs = xp.asarray(theirs, dtype=xp.float64)
xp_assert_close(ours, theirs)
bias = 0.8
offset = fhtoffset(dln, mu, bias=bias)
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [(- 7.), (+ 0.), (+ 0.), (- 0.), (+ 0.), (+ 0.), (+ 0.), (+ 0.), (+ 0.), (- 0.), (+ 0.), (- 0.), (+ 3.e-05), (- 7.e-06), (+ 1.e-06), (- 8.e-07)]
theirs = xp.asarray(theirs, dtype=xp.float64)
xp_assert_close(ours, theirs)
bias = (- 0.8)
offset = fhtoffset(dln, mu, bias=bias)
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [(+ 8.e-06), (+ 4.e-05), (+ 0.), (+ 0.), (+ 0.), (+ 0.), (+ 0.), (+ 0.), (+ 0.), (- 0.), (+ 0.), (- 0.), (+ 0.), (+ 0.), (+ 2.), 10.]
theirs = xp.asarray(theirs, dtype=xp.float64)
xp_assert_close(ours, theirs) |
def load_data(task, path, train=True):
if (task == 'ECG'):
return load_ECG_data(path, True)
elif (task == 'satellite'):
return load_satellite_data(path, True)
elif (task == 'deepsea'):
return load_deepsea_data(path, True)
else:
raise NotImplementedError |
def extract_entities_from_subfolder(subfolder, nkjp_dir):
subfolder_entities = extract_unassigned_subfolder_entities(subfolder, nkjp_dir)
par_id_to_segs = assign_entities(subfolder, subfolder_entities, nkjp_dir)
return par_id_to_segs |
def greedy_best_fit(graph: Graph, P, node_weight_function, node_mem_estimator: NodeMemoryEstimator):
bins = {i: list() for i in range(P)}
bin_weights = heapdict({i: 0 for i in range(P)})
bin_memory = heapdict({i: 0 for i in range(P)})
node_to_weight = {n: node_weight_function(n) for n in graph.non_input_nodes}
node_to_weight = dict(sorted(node_to_weight.items(), key=(lambda item: item[1]), reverse=True))
gpu_mem_threshold_bytes = {i: node_mem_estimator.THRESHOLD for i in bins}
node_to_mem = {n: node_mem_estimator(n) for n in graph.non_input_nodes}
def check_memory_fit(candidate, bin_id):
if ((node_to_mem[candidate] + bin_memory[bin_id]) > gpu_mem_threshold_bytes[bin_id]):
print(f'-v- failed to add candidate to GPU {bin_id}')
return False
return True
def choose_bin(node):
tmp = []
while bin_weights:
(bin_id, w) = bin_weights.peekitem()
if (not check_memory_fit(node, bin_id)):
tmp.append(bin_weights.popitem())
continue
for (i, v) in tmp:
warnings.warn('it is improbable we got here.')
bin_weights[i] = v
return bin_id
print('Could not find an assignment which fits memory')
print(f'node: {node}')
print('bins:')
for x in tmp:
print(x)
print('node to mem:')
pprint(node_to_mem)
print(f'sum(node_to_mem.values()): {(sum(node_to_mem.values()) * 1e-09)} GB')
raise RuntimeError('Could not find an assignment which fits memory')
while node_to_weight:
(node, node_weight) = node_to_weight.popitem()
try:
bin_id = choose_bin(node)
except RuntimeError as e:
if (sum(node_to_mem.values()) < sum(gpu_mem_threshold_bytes.values())):
warnings.warn('Can find assignment using largest memory job first v1')
try:
bins = largest_memory_first_greedy_best_fit_v1(graph, P, node_weight_function, node_mem_estimator)
return bins
except Exception as ee:
print(f'-v- largest_memory_first_greedy_best_fit_v1 Failed: {str(ee)}')
raise e
bins[bin_id].append(node)
bin_weights[bin_id] += node_weight
bin_memory[bin_id] += node_to_mem[node]
print('bin_memory after greedy assignment:')
pprint(str(bin_memory))
print(f'sum(node_to_mem.values()): {(sum(node_to_mem.values()) * 1e-09)} GB')
return bins |
class BlissRecoverDuration(Job):
def __init__(self, bliss_corpus):
self.bliss_corpus = bliss_corpus
self.out = self.output_path('corpus.xml.gz')
def tasks(self):
(yield Task('run', mini_task=True))
def run(self):
import soundfile
c = corpus.Corpus()
c.load(tk.uncached_path(self.bliss_corpus))
for r in c.all_recordings():
assert (len(r.segments) == 1), 'needs to be a single segment recording'
old_duration = r.segments[0].end
(data, sample_rate) = soundfile.read(open(r.audio, 'rb'))
new_duration = (len(data) / sample_rate)
print(('%s: %f vs. %f' % (r.segments[0].name, old_duration, new_duration)))
r.segments[0].end = new_duration
c.dump(tk.uncached_path(self.out)) |
def gen_img(str1):
num = (len(str1) + 1)
print(num)
img = Image.new('RGB', ((15 * num), 32), (0, 0, 0))
draw = ImageDraw.Draw(img)
fontpath = 'simsun.ttc'
font = ImageFont.truetype(fontpath, 32)
draw.text((0, 0), str1, font=font, fill=(255, 255, 255))
img = np.array(img)
img = cv2.resize(img, (96, 32))
return img |
class QuestionAnsweringTrainer(Trainer):
def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):
super().__init__(*args, **kwargs)
self.eval_examples = eval_examples
self.post_process_function = post_process_function
def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None):
eval_dataset = (self.eval_dataset if (eval_dataset is None) else eval_dataset)
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_examples = (self.eval_examples if (eval_examples is None) else eval_examples)
compute_metrics = self.compute_metrics
self.compute_metrics = None
try:
output = self.prediction_loop(eval_dataloader, description='Evaluation', prediction_loss_only=(True if (compute_metrics is None) else None), ignore_keys=ignore_keys)
finally:
self.compute_metrics = compute_metrics
if ((self.post_process_function is not None) and (self.compute_metrics is not None)):
eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions)
metrics = self.compute_metrics(eval_preds)
self.log(metrics)
else:
metrics = {}
if (self.args.tpu_metrics_debug or self.args.debug):
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
def predict(self, test_dataset, test_examples, ignore_keys=None):
test_dataloader = self.get_test_dataloader(test_dataset)
compute_metrics = self.compute_metrics
self.compute_metrics = None
try:
output = self.prediction_loop(test_dataloader, description='Evaluation', prediction_loss_only=(True if (compute_metrics is None) else None), ignore_keys=ignore_keys)
finally:
self.compute_metrics = compute_metrics
if ((self.post_process_function is None) or (self.compute_metrics is None)):
return output
eval_preds = self.post_process_function(test_examples, test_dataset, output.predictions, 'test')
metrics = self.compute_metrics(eval_preds)
return PredictionOutput(predictions=eval_preds.predictions, label_ids=eval_preds.label_ids, metrics=metrics) |
def file_or_url_context(resource_name):
if is_url(resource_name):
url_components = urllib.parse.urlparse(resource_name)
(_, ext) = os.path.splitext(url_components.path)
try:
with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:
with urllib.request.urlopen(resource_name) as u:
f.write(u.read())
(yield f.name)
except (URLError, HTTPError):
os.remove(f.name)
raise
except (FileNotFoundError, FileExistsError, PermissionError, BaseException):
raise
else:
os.remove(f.name)
else:
(yield resource_name) |
def _process_define(arg, install=False):
if install:
defs = (getattr(arg, 'define', None) or '')
else:
((defs, one),) = (getattr(arg, 'define', None) or [('', '1')])
assert (one == '1')
defs = [df for df in defs.split(';') if (df != '')]
return [((s.strip(), None) if ('=' not in s) else tuple((ss.strip() for ss in s.split('=')))) for s in defs] |
class PriorBox(object):
def __init__(self, cfg):
super(PriorBox, self).__init__()
self.image_size = cfg['min_dim']
self.num_priors = len(cfg['aspect_ratios'])
self.variance = (cfg['variance'] or [0.1])
self.feature_maps = cfg['feature_maps']
self.min_sizes = cfg['min_sizes']
self.max_sizes = cfg['max_sizes']
self.steps = cfg['steps']
self.aspect_ratios = cfg['aspect_ratios']
self.clip = cfg['clip']
self.version = cfg['name']
for v in self.variance:
if (v <= 0):
raise ValueError('Variances must be greater than 0')
def forward(self):
mean = []
for (k, f) in enumerate(self.feature_maps):
for (i, j) in product(range(f), repeat=2):
f_k = (self.image_size / self.steps[k])
cx = ((j + 0.5) / f_k)
cy = ((i + 0.5) / f_k)
s_k = (self.min_sizes[k] / self.image_size)
mean += [cx, cy, s_k, s_k]
s_k_prime = sqrt((s_k * (self.max_sizes[k] / self.image_size)))
mean += [cx, cy, s_k_prime, s_k_prime]
for ar in self.aspect_ratios[k]:
mean += [cx, cy, (s_k * sqrt(ar)), (s_k / sqrt(ar))]
mean += [cx, cy, (s_k / sqrt(ar)), (s_k * sqrt(ar))]
output = torch.Tensor(mean).view((- 1), 4)
if self.clip:
output.clamp_(max=1, min=0)
return output |
class FluentCommandsDataset(Dataset):
def __init__(self, df, base_path, Sy_intent):
self.df = df
self.base_path = base_path
self.max_length = (SAMPLE_RATE * EXAMPLE_WAV_MAX_SEC)
self.Sy_intent = Sy_intent
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
wav_path = os.path.join(self.base_path, self.df.loc[idx].path)
(wav, sr) = torchaudio.load(wav_path)
wav = wav.squeeze(0)
label = []
for slot in ['action', 'object', 'location']:
value = self.df.loc[idx][slot]
label.append(self.Sy_intent[slot][value])
return (wav.numpy(), np.array(label), Path(wav_path).stem)
def collate_fn(self, samples):
return zip(*samples) |
def compute_e2e_dialogue_score(greedy, answer, tgt_lang, args, example_ids, contexts):
num_examples = len(answer)
subtask_metrics_dict = OrderedDict()
new_metrics = [((a.upper() + '_') + b) for (a, b) in zip(args.e2e_dialogue_valid_subtasks, args.e2e_dialogue_valid_submetrics)]
results = OrderedDict({'e2e_dialogue_score': 0.0})
subtask2result_key = OrderedDict({})
for (i, m) in enumerate(new_metrics):
results[m] = 0.0
subtask2result_key[args.e2e_dialogue_valid_subtasks[i]] = m
for (k, subtask) in enumerate(args.e2e_dialogue_valid_subtasks):
(ids, inputs, preds, golds) = ([], [], [], [])
for i in range(num_examples):
id_ = example_ids[i]
if id_.endswith(f'/{subtask}'):
ids.append(id_)
inputs.append(contexts[i])
preds.append(greedy[i])
golds.append(answer[i])
if golds:
metrics_to_compute = args.e2e_dialogue_valid_submetrics[k]
sub_metrics = compute_metrics(preds, golds, [metrics_to_compute], tgt_lang, args, ids, inputs)
subtask_metrics_dict[subtask] = (sub_metrics[metrics_to_compute], len(golds), args.e2e_dialogue_valid_subweights[k])
weighted_num_examples = 0
for (subtask, (sub_result, num_ex, weight)) in subtask_metrics_dict.items():
result_key = subtask2result_key[subtask]
results[result_key] += sub_result
results['e2e_dialogue_score'] += (weight * (sub_result * num_ex))
weighted_num_examples += (abs(weight) * num_ex)
results['e2e_dialogue_score'] /= weighted_num_examples
return results |
def validate_mc_tva(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(tva.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(tva.is_valid)
else:
return df.applymap(tva.is_valid)
return tva.is_valid(df) |
def make_lsh_hash_gen(d, output, key_dim, num_hashes, num_heads, num_rounds, hash_init=("variance_scaling_initializer(mode='fan_in', distribution='uniform', scale=%s)" % 1.0)):
assert ((num_hashes % 2) == 0)
d[(output + '_top_unnamed')] = {'class': 'variable', 'shape': (num_heads, num_rounds, key_dim, (num_hashes // 2)), 'trainable': False, 'init': hash_init, 'add_batch_axis': True}
d[(output + '_top')] = {'class': 'name_axis', 'axis': ['static:0', 'static:1'], 'description': ['att-heads', 'att-rounds'], 'from': [(output + '_top_unnamed')]}
d[(output + '_bottom')] = {'class': 'eval', 'eval': '-source(0)', 'from': [(output + '_top')]}
d[output] = {'class': 'copy', 'from': [(output + '_top'), (output + '_bottom')]} |
def compute_embeddings(wavs, lens):
with torch.no_grad():
wavs = wavs.to(run_opts['device'])
feats = params['compute_features'](wavs)
feats = params['mean_var_norm'](feats, lens)
emb = params['embedding_model'](feats, lens)
emb = params['mean_var_norm_emb'](emb, torch.ones(emb.shape[0], device=run_opts['device']))
return emb |
def compute_gassian_ll_scores(embeddings, reg_covar: float=0.0):
gmm = GaussianMixture(n_components=1, reg_covar=reg_covar)
gmm.fit(embeddings)
log_likelihood = gmm.score_samples(embeddings)
return log_likelihood |
def transpose_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, axes):
dy = grad_inputs[0]
x0_shape = input_shapes[0]
ctx = nn.get_current_context()
df = TransposeDataGrad(ctx, axes)
df.xshape = x0_shape
dx0 = df(dy)
return dx0 |
_function_dispatch(_array_split_dispatcher)
def array_split(ary, indices_or_sections, axis=0):
try:
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
try:
Nsections = (len(indices_or_sections) + 1)
div_points = (([0] + list(indices_or_sections)) + [Ntotal])
except TypeError:
Nsections = int(indices_or_sections)
if (Nsections <= 0):
raise ValueError('number sections must be larger than 0.')
(Neach_section, extras) = divmod(Ntotal, Nsections)
section_sizes = (([0] + (extras * [(Neach_section + 1)])) + ((Nsections - extras) * [Neach_section]))
div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary, axis, 0)
for i in range(Nsections):
st = div_points[i]
end = div_points[(i + 1)]
sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
return sub_arys |
def parse_outfile(fname):
files = re.split(',', fname)
bname = re.search('^(.+)\\.[^.]+$', files[0]).group(1)
for i in range((len(files) - 1)):
files[(i + 1)] = ((bname + '.') + files[(i + 1)])
return files |
def extract_holder_weights(constant_name, node_target, model, weights, to_numpy):
named_parameters_weights = {constant_name: to_numpy(parameter) for (name, parameter) in model.named_parameters() if (node_target == name)}
named_buffer_weights = {constant_name: to_numpy(parameter) for (name, parameter) in model.named_buffers() if (node_target == name)}
if ((len(named_parameters_weights) + len(named_buffer_weights)) > 1):
raise Exception(f'Constant parameter can only have one tensor. Here we have {(len(named_parameters_weights) + len(named_buffer_weights))}')
weights.update(named_parameters_weights)
weights.update(named_buffer_weights)
return weights |
def main_phn_lab(tsv_dir, lab_dir, lab_name, lab_sets, phn_dir, phn_sets, pad_len=0, upsample=1, verbose=False):
uid2refs = {}
for s in phn_sets:
uid2refs.update(read_phn(f'{phn_dir}/{s}.tsv'))
uid2hyps = {}
tsv_dir = (lab_dir if (tsv_dir is None) else tsv_dir)
for s in lab_sets:
uid2hyps.update(read_lab(f'{tsv_dir}/{s}.tsv', f'{lab_dir}/{s}.{lab_name}', pad_len, upsample))
_main(uid2refs, uid2hyps, verbose) |
def read_webdataset(url: str, multimodal_cfg: Dict[(str, Any)], tokenizer, is_train: bool, rsample_frac=None, task_sample_probs: Optional[Dict[(str, float)]]=None) -> wds.WebDataset:
_preprocess_multimodal = partial(preprocess_multimodal_mappable, multimodal_cfg=multimodal_cfg)
_preprocess_for_lm = partial(preprocess_for_lm_mappable, tokenizer=tokenizer)
logging.warning(f'reading datasets from {url}')
urls = expand_url_to_file_list(url)
urls = [maybe_add_gcs_prefix(f) for f in urls]
if is_train:
urls = repeat_shards(urls, task_sample_probs=task_sample_probs)
do_shuffle = (is_train or (rsample_frac is not None))
dataset = wds.WebDataset(urls, resampled=is_train, handler=wds.warn_and_continue, shardshuffle=is_train, nodesplitter=wds.split_by_node)
if is_train:
dataset = dataset.repeat()
dataset = dataset.decode(wds.imagehandler('torchrgb'))
dataset = dataset.compose(webdataset_element_to_conversation)
if do_shuffle:
dataset = dataset.shuffle(100)
if rsample_frac:
dataset = dataset.rsample(rsample_frac)
dataset = dataset.map(_preprocess_multimodal).map(_preprocess_for_lm)
if is_train:
dataset_len =
dataset = dataset.repeat(2).with_epoch(dataset_len)
return dataset |
def reduce(store, object_size):
object_id = hoplite.object_id_from_int(rank)
array = np.random.rand((object_size // 4)).astype(np.float32)
buffer = hoplite.Buffer.from_buffer(array)
store.put(buffer, object_id)
print('Buffer created, hash =', hash(buffer))
object_ids = []
for i in range(0, world_size):
object_ids.append(hoplite.object_id_from_int(i))
comm.Barrier()
if (rank == 0):
start = time.time()
reduction_id = store.reduce_async(object_ids, hoplite.ReduceOp.SUM)
reduced_buffer = store.get(reduction_id)
duration = (time.time() - start)
reduce_result = np.frombuffer(reduced_buffer)
print(f'Reduce completed, hash = {hash(reduced_buffer)}, duration = {duration}', flush=True)
print(reduce_result) |
def test_calculate_indexes_with_rollover(msa_sampler):
indexes = None
leader_length = 1
max_len = 5
rollover = True
(out_indexes, last_i) = msa_sampler.calculate_indexes(indexes, leader_length, max_len, rollover)
assert (out_indexes == [1, 2, 3, 4, 5])
assert (last_i == (- 1)) |
class KoBARTConditionalGeneration(pl.LightningModule):
def __init__(self, args, model, tokenizer, datamodules):
super().__init__()
self.assign_attributes(args, model, tokenizer, datamodules)
def assign_attributes(self, args, model, tokenizer, datamodules):
self.bos_token = '<s>'
self.eos_token = '</s>'
self.pad_token_id = 0
self.epoch = 0
self.outputs = []
self.decoded_labels = []
self.origs = []
self.step = 0
self.args = args
self.max_len = self.args.max_seq_len
self.scores = {}
self.generation_time = 0
self.model = model
self.tokenizer = tokenizer
self.dm = datamodules
def configure_optimizers(self):
param_optimizer = list(self.model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.lr, correct_bias=False)
data_len = len(self.dm.train_dataloader().dataset)
num_train_steps = int(((data_len * self.args.max_epochs) / self.args.batch_size))
if (data_len < self.args.batch_size):
num_train_steps = self.args.max_epochs
print(f'num_train_steps : {num_train_steps}')
num_warmup_steps = int((num_train_steps * self.args.warmup_ratio))
print(f'num_warmup_steps : {num_warmup_steps}')
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_train_steps)
lr_scheduler = {'scheduler': scheduler, 'monitor': 'loss', 'interval': 'step', 'frequency': 1}
self.scheduler = scheduler
return ([optimizer], [lr_scheduler])
def forward(self, inputs):
attention_mask = inputs['input_ids'].ne(self.pad_token_id).float()
decoder_attention_mask = inputs['decoder_input_ids'].ne(self.pad_token_id).float()
return self.model(input_ids=inputs['input_ids'], attention_mask=attention_mask, decoder_input_ids=inputs['decoder_input_ids'], decoder_attention_mask=decoder_attention_mask, labels=inputs['labels'], return_dict=True)
def training_step(self, batch, batch_idx):
outs = self(batch)
loss = outs.loss
self.log('train_loss', loss, prog_bar=False)
self.step += 1
return loss
def training_epoch_end(self, _):
self.scores[self.epoch]['generation_time'] = self.generation_time
print(f'''
Generation time: {self.generation_time}''')
self.generation_time = 0
self.epoch += 1
def generate(self, input_ids, labels):
self.model.eval()
start = time.time()
output = self.model.generate(input_ids, eos_token_id=1, max_length=self.max_len, num_beams=4)
output = self.tokenizer.batch_decode(output, skip_special_tokens=True)
end = time.time()
self.generation_time += (end - start)
decoded_label = self.tokenizer.batch_decode(labels.masked_fill((labels == (- 100)), 1), skip_special_tokens=True)
self.outputs += [x.replace('\n', '') for x in output]
self.decoded_labels += decoded_label
self.origs += self.tokenizer.batch_decode(input_ids, skip_special_tokens=True)
def should_generate(self):
return True
def validation_step(self, batch, batch_idx):
outs = self(batch)
loss = outs['loss']
if self.should_generate():
self.generate(batch['input_ids'], batch['labels'])
return loss
def test_step(self, batch, batch_idx):
outs = self(batch)
loss = outs['loss']
if self.should_generate():
self.generate(batch['input_ids'], batch['labels'])
return loss
def test_epoch_end(self, outputs):
return self.validation_epoch_end(outputs, mode='test')
def validation_epoch_end(self, outputs, mode='val'):
losses = []
for loss in outputs:
losses.append(loss)
total_loss = torch.stack(losses).mean()
if self.should_generate():
directory = f'outputs/generation/epoch{self.epoch}/{mode}'
path = Path(directory)
path.mkdir(parents=True, exist_ok=True)
with open((directory + f'/hypothesis_{total_loss}.txt'), 'w', encoding='utf-8') as f:
f.write('\n'.join(self.outputs))
with open((directory + f'/reference_{total_loss}.txt'), 'w', encoding='utf-8') as f:
f.write('\n'.join(self.decoded_labels))
with open((directory + f'/source_{total_loss}.txt'), 'w', encoding='utf-8') as f:
f.write('\n'.join(self.origs))
self.outputs = []
self.decoded_labels = []
self.origs = []
gleu_out = run_gleu(reference=(directory + f'/reference_{total_loss}.txt'), source=(directory + f'/source_{total_loss}.txt'), hypothesis=(directory + f'/hypothesis_{total_loss}.txt'))
logging.info(f'''
gleu_value: {gleu_out}
''')
with open((directory + f'/gleu_{gleu_out}.txt'), 'w', encoding='utf-8') as f:
f.write(f'''data: {self.args.data}, epoch: {self.epoch}, gleu_out: {gleu_out}, val_loss: {total_loss}
hparams:{self.hparams}''')
if (not os.path.exists('get_data/{self.args.data}_{mode}.m2')):
print("Making m2 file since we don't have one..")
command = f'cd KAGAS/ && python3 parallel_to_m2_korean.py -orig ../{directory}/source_{total_loss}.txt -cor ../{directory}/reference_{total_loss}.txt -out get_data/{self.args.data}_{mode}.m2 -noprint && cd ../'
os.system(command)
system_command = f'python3 ./metric/m2scorer/scripts/m2scorer.py {directory}/hypothesis_{total_loss}.txt get_data/{self.args.data}_{mode}.m2 > {directory}/m2score.txt'
print(f'Processing: {system_command}')
(p, r, f_score) = (0, 0, 0)
gleuscore = (float(gleu_out) * 100)
self.scores[self.epoch] = {'precision': p, 'recall': r, 'f_score': f_score, 'gleu': gleuscore, 'loss': total_loss.item()}
print(f'''
EPOCH {self.epoch} / VAL_LOSS {round(total_loss.item(), 2)} / GLEU {round(gleuscore, 2)}
''')
self.log(f'{mode}_loss', total_loss, prog_bar=False)
self.log(f'{mode}_gleu', gleuscore)
if (self.args.best['gleu'] < gleuscore):
self.args.best['gleu'] = gleuscore
self.args.best['f0.5'] = f_score
self.args.best['prec'] = p
self.args.best['rec'] = r
print(f'Print ordering of: precision, recall, f0.5 score, gleu score, val loss, generation_time. The last epoch is test epoch.')
pprint(self.scores) |
def register_Ns3RadioBearerStatsCalculator_methods(root_module, cls):
cls.add_constructor([param('ns3::RadioBearerStatsCalculator const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('std::string', 'protocolType')])
cls.add_method('DlRxPdu', 'void', [param('uint16_t', 'cellId'), param('uint64_t', 'imsi'), param('uint16_t', 'rnti'), param('uint8_t', 'lcid'), param('uint32_t', 'packetSize'), param('uint64_t', 'delay')])
cls.add_method('DlTxPdu', 'void', [param('uint16_t', 'cellId'), param('uint64_t', 'imsi'), param('uint16_t', 'rnti'), param('uint8_t', 'lcid'), param('uint32_t', 'packetSize')])
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('GetDlCellId', 'uint32_t', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('GetDlDelay', 'double', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('GetDlDelayStats', 'std::vector< double >', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('GetDlOutputFilename', 'std::string', [])
cls.add_method('GetDlPdcpOutputFilename', 'std::string', [])
cls.add_method('GetDlPduSizeStats', 'std::vector< double >', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('GetDlRxData', 'uint64_t', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('GetDlRxPackets', 'uint32_t', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('GetDlTxData', 'uint64_t', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('GetDlTxPackets', 'uint32_t', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('GetEpoch', 'ns3::Time', [], is_const=True)
cls.add_method('GetStartTime', 'ns3::Time', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('GetUlCellId', 'uint32_t', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('GetUlDelay', 'double', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('GetUlDelayStats', 'std::vector< double >', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('GetUlOutputFilename', 'std::string', [])
cls.add_method('GetUlPdcpOutputFilename', 'std::string', [])
cls.add_method('GetUlPduSizeStats', 'std::vector< double >', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('GetUlRxData', 'uint64_t', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('GetUlRxPackets', 'uint32_t', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('GetUlTxData', 'uint64_t', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('GetUlTxPackets', 'uint32_t', [param('uint64_t', 'imsi'), param('uint8_t', 'lcid')])
cls.add_method('SetDlPdcpOutputFilename', 'void', [param('std::string', 'outputFilename')])
cls.add_method('SetEpoch', 'void', [param('ns3::Time', 'e')])
cls.add_method('SetStartTime', 'void', [param('ns3::Time', 't')])
cls.add_method('SetUlPdcpOutputFilename', 'void', [param('std::string', 'outputFilename')])
cls.add_method('UlRxPdu', 'void', [param('uint16_t', 'cellId'), param('uint64_t', 'imsi'), param('uint16_t', 'rnti'), param('uint8_t', 'lcid'), param('uint32_t', 'packetSize'), param('uint64_t', 'delay')])
cls.add_method('UlTxPdu', 'void', [param('uint16_t', 'cellId'), param('uint64_t', 'imsi'), param('uint16_t', 'rnti'), param('uint8_t', 'lcid'), param('uint32_t', 'packetSize')])
return |
class InventoryManagementSystemUpdateItem(VirtualFunctionTool):
name = 'InventoryManagementSystemUpdateItem'
summary = 'Update an existing item in the inventory.'
parameters: List[ArgParameter] = [{'name': 'item_id', 'type': 'string', 'description': 'The unique identifier of the item, must be a non-empty string.', 'required': True}, {'name': 'item_name', 'type': 'string', 'description': 'The new name of the item, must be a non-empty string if provided.', 'required': False}, {'name': 'category', 'type': 'string', 'description': 'The new category of the item, must be a non-empty string if provided.', 'required': False}, {'name': 'quantity', 'type': 'integer', 'description': 'The new quantity of the item, must be a positive integer if provided.', 'required': False}, {'name': 'supplier', 'type': 'string', 'description': 'The new supplier of the item, must be a non-empty string if provided.', 'required': False}, {'name': 'description', 'type': 'string', 'description': 'The new description of the item, must be a non-empty string if provided.', 'required': False}]
returns: List[ArgReturn] = [{'name': 'success', 'type': 'boolean', 'description': 'Whether the item update was successful.'}]
exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': "The 'item_id' parameter is invalid."}, {'name': 'NotFoundException', 'description': "The 'item_id' does not exist."}] |
def main():
set_seeds(2020)
args = vars(parser.parse_args())
alphabet = Protein()
cfgs = []
data_cfg = config.DataConfig(args['data_config'])
cfgs.append(data_cfg)
if (args['lm_model_config'] is None):
model_cfg = config.ModelConfig(args['model_config'], input_dim=len(alphabet), num_classes=1)
cfgs += [model_cfg]
else:
lm_model_cfg = config.ModelConfig(args['lm_model_config'], idx='lm_model_config', input_dim=len(alphabet))
model_cfg = config.ModelConfig(args['model_config'], input_dim=len(alphabet), lm_dim=((lm_model_cfg.num_layers * lm_model_cfg.hidden_dim) * 2), num_classes=1)
cfgs += [model_cfg, lm_model_cfg]
if (model_cfg.model_type == 'RNN'):
pr_model_cfg = config.ModelConfig(args['pr_model_config'], idx='pr_model_config', model_type='MLP', num_classes=1)
if pr_model_cfg.projection:
pr_model_cfg.set_input_dim(model_cfg.embedding_dim)
else:
pr_model_cfg.set_input_dim((model_cfg.hidden_dim * 2))
cfgs.append(pr_model_cfg)
run_cfg = config.RunConfig(args['run_config'], sanity_check=args['sanity_check'])
cfgs.append(run_cfg)
(output, save_prefix) = set_output(args, 'eval_fluorescence_log', test=True)
os.environ['CUDA_VISIBLE_DEVICES'] = (args['device'] if (args['device'] is not None) else '')
(device, data_parallel) = (torch.device(('cuda' if torch.cuda.is_available() else 'cpu')), (torch.cuda.device_count() > 1))
config.print_configs(args, cfgs, device, output)
flag_rnn = (model_cfg.model_type == 'RNN')
flag_lm_model = (args['lm_model_config'] is not None)
start = Print(' '.join(['start loading a test dataset', data_cfg.path['test']]), output)
dataset_test = fluorescence.load_fluorescence(data_cfg, 'test', alphabet, args['sanity_check'])
dataset_test = dataset.Seq_dataset(*dataset_test, alphabet, run_cfg, flag_rnn, model_cfg.max_len)
collate_fn = (dataset.collate_sequences if flag_rnn else None)
iterator_test = torch.utils.data.DataLoader(dataset_test, run_cfg.batch_size_eval, collate_fn=collate_fn)
end = Print(' '.join(['loaded', str(len(dataset_test)), 'sequences']), output)
Print(' '.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print('start initializing a model', output)
models_list = []
if (not flag_rnn):
model = plus_tfm.PLUS_TFM(model_cfg)
elif (not flag_lm_model):
model = plus_rnn.PLUS_RNN(model_cfg)
else:
model = p_elmo.P_ELMo(model_cfg)
models_list.append([model, '', flag_lm_model, flag_rnn, False])
if flag_lm_model:
lm_model = p_elmo.P_ELMo_lm(lm_model_cfg)
models_list.append([lm_model, 'lm', True, False, False])
if flag_rnn:
pr_model = mlp.MLP(pr_model_cfg, per_seq=True)
models_list.append([pr_model, 'pr', False, True, False])
(params, pr_params) = ([], [])
for (model, idx, frz, _, _) in models_list:
if frz:
continue
elif (idx != 'pr'):
params += [p for p in model.parameters() if p.requires_grad]
else:
pr_params += [p for p in model.parameters() if p.requires_grad]
load_models(args, models_list, device, data_parallel, output, tfm_cls=flag_rnn)
get_loss = (plus_rnn.get_loss if flag_rnn else plus_tfm.get_loss)
end = Print('end initializing a model', output)
Print(''.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print('start setting trainer configurations', output)
tasks_list = []
tasks_list.append(['cls', [], ['rho', 'r']])
if (not flag_lm_model):
tasks_list.append(['lm', [], ['acc']])
trainer = Trainer(models_list, get_loss, run_cfg, tasks_list)
trainer_args = {}
trainer_args['data_parallel'] = data_parallel
trainer_args['paired'] = False
if flag_rnn:
trainer_args['projection'] = pr_model_cfg.projection
trainer_args['regression'] = True
if flag_rnn:
trainer_args['evaluate_cls'] = plus_rnn.evaluate_cls_protein
else:
trainer_args['evaluate_cls'] = plus_tfm.evaluate_cls_protein
end = Print('end setting trainer configurations', output)
Print(''.join(['elapsed time:', str((end - start))]), output, newline=True)
start = Print('start evaluating a model', output)
Print(trainer.get_headline(test=True), output)
dataset_test.set_augment(False)
trainer.set_exec_flags(['cls', 'lm'], [True, False])
for (b, batch) in enumerate(iterator_test):
batch = [(t.to(device) if (type(t) is torch.Tensor) else t) for t in batch]
trainer.evaluate(batch, trainer_args)
if ((b % 10) == 0):
print('# cls {:.1%} loss={:.4f}'.format((b / len(iterator_test)), trainer.loss_eval), end='\r', file=sys.stderr)
print((' ' * 150), end='\r', file=sys.stderr)
if (not flag_lm_model):
dataset_test.set_augment(True)
trainer.set_exec_flags(['cls', 'lm'], [False, True])
for (b, batch) in enumerate(iterator_test):
batch = [(t.to(device) if (type(t) is torch.Tensor) else t) for t in batch]
trainer.evaluate(batch, trainer_args)
if ((b % 10) == 0):
print('# lm {:.1%} loss={:.4f}'.format((b / len(iterator_test)), trainer.loss_eval), end='\r', file=sys.stderr)
print((' ' * 150), end='\r', file=sys.stderr)
Print(trainer.get_log(test_idx='Fluorescence', args=trainer_args), output)
trainer.reset()
end = Print('end evaluating a model', output)
Print(''.join(['elapsed time:', str((end - start))]), output, newline=True)
output.close() |
def register_Ns3TvSpectrumTransmitterHelper_methods(root_module, cls):
cls.add_constructor([param('ns3::TvSpectrumTransmitterHelper const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'streamNum')])
cls.add_method('CreateRegionalTvTransmitters', 'void', [param('ns3::TvSpectrumTransmitterHelper::Region', 'region'), param('ns3::TvSpectrumTransmitterHelper::Density', 'density'), param('double', 'originLatitude'), param('double', 'originLongitude'), param('double', 'maxAltitude'), param('double', 'maxRadius')])
cls.add_method('Install', 'ns3::NetDeviceContainer', [param('ns3::NodeContainer', 'nodes')])
cls.add_method('Install', 'ns3::NetDeviceContainer', [param('ns3::NodeContainer', 'nodes'), param('ns3::TvSpectrumTransmitterHelper::Region', 'region'), param('uint16_t', 'channelNumber')])
cls.add_method('InstallAdjacent', 'ns3::NetDeviceContainer', [param('ns3::NodeContainer', 'nodes')])
cls.add_method('InstallAdjacent', 'ns3::NetDeviceContainer', [param('ns3::NodeContainer', 'nodes'), param('ns3::TvSpectrumTransmitterHelper::Region', 'region'), param('uint16_t', 'channelNumber')])
cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'val')])
cls.add_method('SetChannel', 'void', [param('ns3::Ptr< ns3::SpectrumChannel >', 'c')])
return |
def test_static_field_field(static_field_mock):
ref = vr.StaticFieldReference(static_field_mock)
assert (ref.field == static_field_mock) |
class SomeClass():
q: float
def method(self, a):
return (a * self.q)
def __call__(self, a):
return self.method(a) |
.parametrize('smiles,top_file,name,num_beads', [('N1=C(N)NNC1N', 'valid_GUA.top', 'GUA', 2), ('CCC', 'valid_PRO.top', 'PRO', 1)])
def test_auto_martini_run_smiles(smiles: str, top_file: Path, name: str, num_beads: int):
(mol, _) = auto_martini.topology.gen_molecule_smi(smiles)
cg_mol = auto_martini.solver.Cg_molecule(mol, name)
assert (len(cg_mol.cg_bead_names) == num_beads) |
class SubstituteFunction(ExpressionTreeWalker):
def __init__(self, ex, *args):
if (len(args) == 2):
self.substitutions = {args[0]: args[1]}
elif (len(args) == 1):
self.substitutions = args[0]
else:
raise TypeError('SubstituteFunction takes either one or two arguments.')
self.ex = ex
def composition(self, ex, operator):
new = self.substitutions.get(operator)
if (new is not None):
return new(*[self(_) for _ in ex.operands()])
else:
return super().composition(ex, operator)
def derivative(self, ex, operator):
new = self.substitutions.get(operator.function())
if (new is not None):
return operator.change_function(new)(*[self(_) for _ in ex.operands()])
else:
return operator(*[self(_) for _ in ex.operands()]) |
def concatenate(tensor_list, axis=0):
concat_size = sum((tt.shape[axis] for tt in tensor_list))
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range((axis + 1), tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, (offset + tt.shape[axis])),)
for k in range((axis + 1), tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out |
class ObjMethod():
def __init__(self, dirPath):
self.dir = dirPath
def save_obj(self, obj, name):
with open(((self.dir + name) + '.pkl'), 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(self, name):
with open(((self.dir + name) + '.pkl'), 'rb') as f:
return pickle.load(f) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.