code stringlengths 101 5.91M |
|---|
def read_type_file(type_file):
all_types = set()
for line in type_file:
all_types.add(''.join(line.split()).lower())
return all_types |
def _cast_to_type(data, dtype):
if isinstance(data, pd.Series):
data = data.apply(dtype)
elif isinstance(data, (np.ndarray, list)):
data = np.array([dtype(value) for value in data])
else:
data = dtype(data)
return data |
def postprocess_args(args):
if ((args['retag_method'] is None) and ('lang' in args) and (args['lang'] in RETAG_METHOD)):
args['retag_method'] = RETAG_METHOD[args['lang']]
if (args['retag_method'] is None):
args['retag_method'] = 'xpos'
if (args['retag_method'] == 'xpos'):
args['retag_xpos'] = True
elif (args['retag_method'] == 'upos'):
args['retag_xpos'] = False
else:
raise ValueError('Unknown retag method {}'.format(xpos)) |
def test_embedding(device):
from speechbrain.nnet.embedding import Embedding
embedding_dim = 39
blank_id = 39
size_dict = 40
emb = Embedding(num_embeddings=size_dict, consider_as_one_hot=True, blank_id=blank_id).to(device)
inputs = torch.Tensor([10, 5, 2, 0, 39]).to(device).long()
output = emb(inputs)
assert (output.shape == (5, 39))
embedding_dim = 128
emb = Embedding(num_embeddings=size_dict, embedding_dim=embedding_dim).to(device)
inputs = torch.randint(0, 40, (5, 10), device=device)
output = emb(inputs)
assert (output.shape == (5, 10, 128))
assert torch.jit.trace(emb, inputs) |
def mobilenet_v1(nc, pretrained=False, progress=True, **kwargs):
net = MobileNetV1ImageNet(nc, **kwargs)
if pretrained:
state_dict = torch.load('./models/mobilenet_v1_imagenet.pth', map_location='cpu')
state_dict = state_dict.get('state_dict')
def rename(x):
return x.replace('module.', '').replace('conv.', 'conv.conv.').replace('conv2d.', 'conv2d.conv.').replace('bn.', 'conv2d.bn.').replace('bn_0.', 'dw_conv.bn.').replace('bn_1.', 'conv.bn.').replace('fc.', 'fc.fc.')
state_dict = {rename(k): v for (k, v) in state_dict.items()}
net.load_state_dict(state_dict)
return net |
def get_data_loaders(cfg, args):
(tr_dataset, te_dataset) = get_datasets(cfg, args)
train_loader = data.DataLoader(dataset=tr_dataset, batch_size=cfg.train.batch_size, shuffle=True, num_workers=cfg.num_workers, drop_last=True, worker_init_fn=init_np_seed)
test_loader = data.DataLoader(dataset=te_dataset, batch_size=cfg.val.batch_size, shuffle=False, num_workers=cfg.num_workers, drop_last=False, worker_init_fn=init_np_seed)
loaders = {'test_loader': test_loader, 'train_loader': train_loader}
return loaders |
def insert(conn, test_session):
conn = sqlite3.connect(conn)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
with conn:
if (test_session.type == 'DialogFlow_CX'):
cursor.execute('INSERT INTO bots \n (name, type, descript, status, stage, dev, eval, end_point, orgId, \n deploymentId, buttonId, created_at, updated_at, version, num_t5_paraphrases, \n num_pegasus_paraphrases, num_intent_utts, num_simulations, max_simulation_rounds, \n has_bot, has_ml) VALUES \n (:name, :type, :descript, :status, :stage, :dev, :eval, :end_point, :orgId, \n :deploymentId, :buttonId, :created_at, :updated_at,:version, :num_t5_paraphrases, \n :num_pegasus_paraphrases, :num_intent_utts, :num_simulations, :max_simulation_rounds, 0, 0\n ) ', {'name': test_session.name, 'type': test_session.type, 'descript': test_session.descript, 'status': 'new', 'stage': 'config', 'dev': test_session.dev, 'eval': test_session.eval, 'end_point': test_session.cx_credential, 'orgId': test_session.project_id, 'buttonId': test_session.agent_id, 'deploymentId': test_session.location_id, 'created_at': test_session.created_at, 'updated_at': test_session.updated_at, 'version': test_session.version, 'num_t5_paraphrases': test_session.num_t5_paraphrases, 'num_pegasus_paraphrases': test_session.num_pegasus_paraphrases, 'num_intent_utts': test_session.num_intent_utts, 'num_simulations': test_session.num_simulations, 'max_simulation_rounds': test_session.max_simulation_rounds})
else:
cursor.execute('INSERT INTO bots \n (name, type, descript, status, stage, dev, eval, end_point, orgId, \n deploymentId, buttonId, created_at, updated_at, version, \n num_t5_paraphrases, num_pegasus_paraphrases, num_intent_utts, \n num_simulations, max_simulation_rounds, has_bot, has_ml) VALUES \n (:name, :type, :descript, :status, :stage, :dev, :eval, :end_point, :orgId, \n :deploymentId, :buttonId, :created_at, :updated_at,:version, \n :num_t5_paraphrases, :num_pegasus_paraphrases, :num_intent_utts, \n :num_simulations, :max_simulation_rounds, 0, 0\n ) ', {'name': test_session.name, 'type': test_session.type, 'descript': test_session.descript, 'status': 'new', 'stage': 'config', 'dev': test_session.dev, 'eval': test_session.eval, 'end_point': test_session.end_point, 'orgId': test_session.orgId, 'buttonId': test_session.buttonId, 'deploymentId': test_session.deploymentId, 'created_at': test_session.created_at, 'updated_at': test_session.updated_at, 'version': test_session.version, 'num_t5_paraphrases': test_session.num_t5_paraphrases, 'num_pegasus_paraphrases': test_session.num_pegasus_paraphrases, 'num_intent_utts': test_session.num_intent_utts, 'num_simulations': test_session.num_simulations, 'max_simulation_rounds': test_session.max_simulation_rounds})
conn.commit()
return cursor.lastrowid |
class ArkReader(object):
def __init__(self, scp_path):
self.scp_position = 0
fin = open(scp_path, 'r')
self.utt_ids = []
self.scp_data = []
line = fin.readline()
while ((line != '') and (line != None)):
(utt_id, path_pos) = line.replace('\n', '').split(' ')
(path, pos) = path_pos.split(':')
self.utt_ids.append(utt_id)
self.scp_data.append((path, pos))
line = fin.readline()
fin.close()
def shuffle(self):
zipped = zip(self.utt_ids, self.scp_data)
random.shuffle(zipped)
(self.utt_ids, self.scp_data) = zip(*zipped)
self.scp_position = 0
def read_ark(self, ark_file, ark_offset=0):
ark_read_buffer = open(ark_file, 'rb')
ark_read_buffer.seek(int(ark_offset), 0)
header = struct.unpack('<xcccc', ark_read_buffer.read(5))
if (header[0] != 'B'):
print(ark_file)
print('Input .ark file is not binary')
sys.exit(1)
if (header[1] == 'C'):
if ((header[2] == 'M') and (header[3] != '2')):
header_read = struct.unpack('<ffii', ark_read_buffer.read(16))
if (header_read[3] == 0):
print('Empty matrix.')
sys.exit(1)
else:
global_header = GlobalHeader(1, header_read)
ark_mat = self.read_compress(self, global_header, ark_read_buffer)
return ark_mat
else:
print('Unsupport format.')
print('Maybe because of the matrices with 8 or fewer rows.')
sys.exit(1)
else:
(_, rows) = struct.unpack('<bi', ark_read_buffer.read(5))
(_, cols) = struct.unpack('<bi', ark_read_buffer.read(5))
if (header[1] == 'F'):
tmp_mat = np.frombuffer(ark_read_buffer.read(((rows * cols) * 4)), dtype=np.float32)
elif (header[1] == 'D'):
tmp_mat = np.frombuffer(ark_read_buffer.read(((rows * cols) * 8)), dtype=np.float64)
ark_mat = np.reshape(tmp_mat, (rows, cols))
return ark_mat
return None
def uint16_to_float(global_header, value):
return (global_header.min_value + ((global_header.range * 1.e-05) * value))
def char_to_float(p0, p25, p75, p100, value):
if (value < 64):
return (p0 + (((p25 - p0) * value) * (1 / 64.0)))
elif (value <= 192):
return (p25 + (((p75 - p25) * (value - 64)) * (1 / 128.0)))
else:
return (p75 + (((p100 - p75) * (value - 192)) * (1 / 63.0)))
def read_compress(self, header, buf):
col_header = []
ark_mat = np.zeros((header.num_rows, header.num_cols))
for i in range(header.num_cols):
col_header.append(PerColHeader(struct.unpack('<HHHH', buf.read(8))))
for i in range(header.num_cols):
p0 = self.uint16_to_float(header, col_header[i].percentile_0)
p25 = self.uint16_to_float(header, col_header[i].percentile_25)
p75 = self.uint16_to_float(header, col_header[i].percentile_75)
p100 = self.uint16_to_float(header, col_header[i].percentile_100)
for j in range(header.num_rows):
value = struct.unpack('<B', buf.read(1))[0]
ark_mat[(j, i)] = self.char_to_float(p0, p25, p75, p100, value)
return ark_mat
def read_next_utt(self):
if (len(self.scp_data) == 0):
return (None, None, True)
if (self.scp_position >= len(self.scp_data)):
looped = True
self.scp_position = 0
else:
looped = False
self.scp_position += 1
utt_ids = self.utt_ids[(self.scp_position - 1)]
utt_data = self.read_utt_data_from_index((self.scp_position - 1))
return (utt_ids, utt_data, looped)
def read_next_scp(self):
if (self.scp_position >= len(self.scp_data)):
self.scp_position = 0
self.scp_position += 1
return self.utt_ids[(self.scp_position - 1)]
def read_previous_scp(self):
if (self.scp_position < 0):
self.scp_position = (len(self.scp_data) - 1)
self.scp_position -= 1
return self.utt_ids[(self.scp_position + 1)]
def read_utt_data_from_id(self, utt_id):
utt_mat = self.read_utt_data_from_index(self.utt_ids.index(utt_id))
return utt_mat
def read_utt_data_from_index(self, index):
return self.read_ark(self, self.scp_data[index][0], self.scp_data[index][1])
def split(self):
self.scp_data = self.scp_data[self.scp_position:(- 1)]
self.utt_ids = self.utt_ids[self.scp_position:(- 1)] |
def test_f1_macro_1d_np_array():
y_true = np.array([1, 2, 3, 4])
y_pred = np.array([1, 2, 3, 4])
assert (1 == f1(y_true, y_pred, 'macro')) |
class SolverCallbackObj(ctypes.c_void_p):
def __init__(self, solver):
self._as_parameter_ = solver
def from_param(obj):
return obj |
def squeeze(input, downscale_factor=2):
(batch_size, in_channels, in_height, in_width) = input.shape
out_channels = (in_channels * (downscale_factor ** 2))
out_height = (in_height // downscale_factor)
out_width = (in_width // downscale_factor)
input_view = input.reshape(batch_size, in_channels, out_height, downscale_factor, out_width, downscale_factor)
output = input_view.permute(0, 1, 3, 5, 2, 4)
return output.reshape(batch_size, out_channels, out_height, out_width) |
class TestTupleConstraintTag():
def instance(self):
return TupleConstraintTag()
def test_no_matching_tuples(self, instance):
target = [['a', 'b'], ['c', 'd']]
prediction = [['x', 'y'], ['z', 'w']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 0.0)
target = [['a', 'b'], ['c', 'd']]
prediction = [['a', 'y'], ['c', 'w']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 0.0)
def test_all_matching_tuples(self, instance):
target = [['a', 'b'], ['c', 'd']]
prediction = [['a', 'b'], ['c', 'd']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 1.0)
def test_partial_matching_tuples(self, instance):
target = [['a', 'b'], ['c', 'd']]
prediction = [['a', 'b'], ['a', 'b'], ['c', 'd']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 0.5)
def test_empty_tables(self, instance):
target = []
prediction = []
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 1.0)
def test_single_tuple_table(self, instance):
target = [['a', 'b']]
prediction = [['a', 'b']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 1.0)
def test_no_tuples_in_prediction(self, instance):
target = [['a', 'b'], ['c', 'd']]
prediction = []
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 0.0)
def test_no_tuples_in_target(self, instance):
target = []
prediction = [['a', 'b'], ['c', 'd']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 0.0)
def test_duplicate_tuples_in_target(self, instance):
target = [['a', 'b'], ['a', 'b'], ['c', 'd']]
prediction = [['a', 'b'], ['c', 'd']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 0.5)
def test_all_matching_tuples_diff_order(self, instance):
target = [['a', 'b'], ['c', 'd']]
prediction = [['c', 'd'], ['a', 'b']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 1.0)
target = [['a', 'b'], ['c', 'd']]
prediction = [['d', 'c'], ['b', 'a']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 1.0)
def test_special_case(self, instance):
target = [[1, 2], ['c', 'd']]
prediction = [[1, 2], ['c', 'd']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 1.0)
target = [[1, 2], ['c', 'd']]
prediction = [[1, 2], ['c', 'd'], ['a', 'b']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 1.0)
target = [[1, 2], ['c', 'd']]
prediction = [[1, 2], ['d', 'c'], ['a', 'b']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 1.0)
target = [[1, 2], [1, 'd']]
prediction = [[1, 2], ['d', 1], ['a', 'b']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 1.0)
def test_evaluate_single_no_special_case_time(self, instance):
target = np.random.rand(20, 1000).tolist()
prediction = np.random.rand(20, 1000).tolist()
start = time.time()
_ = instance.evaluate_single_no_special_case(target, prediction)
end = time.time()
assert ((start - end) < 0.001) |
def init(variant, ckpt, base='', prefix='', mode=DATASET_MODES.val):
(runner, ckpt_path) = make_runner(variant, ckpt, base, prefix)
return init_by_ckpt(ckpt_path, mode) |
class SLinear(nn.Module):
def __init__(self, in_features, out_features, Q_l, bias=True):
super(SLinear, self).__init__()
self.Q_l = Q_l
self.qlevels = Q_l.size(0)
self.linear = nn.Linear(in_features, (out_features * self.qlevels), bias=bias)
def ucollapse(self, x):
x = x.view(x.size(0), (- 1), self.qlevels)
return x.matmul(self.Q_l)
def forward(self, x):
x = self.linear(x)
return self.ucollapse(x) |
def normal_kl(mean1, logvar1, mean2, logvar2):
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, torch.Tensor):
tensor = obj
break
assert (tensor is not None), 'at least one argument must be a Tensor'
(logvar1, logvar2) = [(x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)) for x in (logvar1, logvar2)]
return (0.5 * (((((- 1.0) + logvar2) - logvar1) + torch.exp((logvar1 - logvar2))) + (((mean1 - mean2) ** 2) * torch.exp((- logvar2))))) |
class TFMobileBertForNextSentencePrediction():
def __init__(self, *args, **kwargs):
requires_tf(self) |
def check_valid_prior(filename):
from Util import load_txt_vector
v = load_txt_vector(filename)
v = numpy.array(v)
assert (v.ndim == 1)
assert all((v < 0.0)), 'log space assumed'
v = numpy.exp(v)
tot = numpy.sum(v)
assert numpy.isclose(tot, 1.0, atol=0.0001) |
_wrapper
def evaluate(cfg: DictConfig) -> Tuple[(dict, dict)]:
assert cfg.ckpt_path
log.info(f'Instantiating datamodule <{cfg.datamodule._target_}>')
datamodule: LightningDataModule = hydra.utils.instantiate(cfg.datamodule)
log.info(f'Instantiating model <{cfg.model._target_}>')
model: LightningModule = hydra.utils.instantiate(cfg.model)
log.info('Instantiating loggers...')
logger: List[LightningLoggerBase] = utils.instantiate_loggers(cfg.get('logger'))
log.info(f'Instantiating trainer <{cfg.trainer._target_}>')
trainer: Trainer = hydra.utils.instantiate(cfg.trainer, logger=logger)
object_dict = {'cfg': cfg, 'datamodule': datamodule, 'model': model, 'logger': logger, 'trainer': trainer}
if logger:
log.info('Logging hyperparameters!')
utils.log_hyperparameters(object_dict)
log.info('Starting testing!')
trainer.test(model=model, datamodule=datamodule, ckpt_path=cfg.ckpt_path)
metric_dict = trainer.callback_metrics
return (metric_dict, object_dict) |
def test_error_handling():
def func(a, b):
return (a + b)
deps = wn.causal_graphs.trace_dependencies(func, (1.0, 0.2))
assert (set(deps[0]) == set([0, 1]))
deps = wn.causal_graphs.trace_dependencies(func, (1.0, 2))
assert (set(deps[0]) == set([0, 1]))
a = np.random.rand(3, 3).astype(np.float32)
b = np.random.rand(3, 3).astype(np.int32)
deps = wn.causal_graphs.trace_dependencies(func, (a, b))
assert (set(deps[0]) == set([0, 1]))
a = np.random.rand(3, 3).astype(np.int32)
b = np.random.rand(3, 3).astype(np.int32)
deps = wn.causal_graphs.trace_dependencies(func, (a, b))
assert (set(deps[0]) == set([0, 1]))
with pytest.raises(ValueError):
wn.causal_graphs.trace_dependencies(func, (1.0, 'str')) |
def my_collate_bert(batch):
(input_ids, word_indexer, input_aspect_ids, aspect_indexer, input_cat_ids, segment_ids, dep_tag_ids, pos_class, text_len, aspect_len, sentiment, dep_rel_ids, dep_heads, aspect_positions, dep_dir_ids) = zip(*batch)
text_len = torch.tensor(text_len)
aspect_len = torch.tensor(aspect_len)
sentiment = torch.tensor(sentiment)
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
input_aspect_ids = pad_sequence(input_aspect_ids, batch_first=True, padding_value=0)
input_cat_ids = pad_sequence(input_cat_ids, batch_first=True, padding_value=0)
segment_ids = pad_sequence(segment_ids, batch_first=True, padding_value=0)
word_indexer = pad_sequence(word_indexer, batch_first=True, padding_value=1)
aspect_indexer = pad_sequence(aspect_indexer, batch_first=True, padding_value=1)
aspect_positions = pad_sequence(aspect_positions, batch_first=True, padding_value=0)
dep_tag_ids = pad_sequence(dep_tag_ids, batch_first=True, padding_value=0)
dep_dir_ids = pad_sequence(dep_dir_ids, batch_first=True, padding_value=0)
pos_class = pad_sequence(pos_class, batch_first=True, padding_value=0)
dep_rel_ids = pad_sequence(dep_rel_ids, batch_first=True, padding_value=0)
dep_heads = pad_sequence(dep_heads, batch_first=True, padding_value=0)
(_, sorted_idx) = text_len.sort(descending=True)
input_ids = input_ids[sorted_idx]
input_aspect_ids = input_aspect_ids[sorted_idx]
word_indexer = word_indexer[sorted_idx]
aspect_indexer = aspect_indexer[sorted_idx]
input_cat_ids = input_cat_ids[sorted_idx]
segment_ids = segment_ids[sorted_idx]
aspect_positions = aspect_positions[sorted_idx]
dep_tag_ids = dep_tag_ids[sorted_idx]
dep_dir_ids = dep_dir_ids[sorted_idx]
pos_class = pos_class[sorted_idx]
text_len = text_len[sorted_idx]
aspect_len = aspect_len[sorted_idx]
sentiment = sentiment[sorted_idx]
dep_rel_ids = dep_rel_ids[sorted_idx]
dep_heads = dep_heads[sorted_idx]
return (input_ids, word_indexer, input_aspect_ids, aspect_indexer, input_cat_ids, segment_ids, dep_tag_ids, pos_class, text_len, aspect_len, sentiment, dep_rel_ids, dep_heads, aspect_positions, dep_dir_ids) |
def train(train_data, val_data, pro_num, timestamp, timespan, model, optimizer, logger, saver, num_epochs, batch_size, grad_clip):
criterion = nn.BCEWithLogitsLoss()
step = 0
metrics = Metrics()
corr_data = get_corr_data(pro_num)
for epoch in range(num_epochs):
train_batches = prepare_batches(train_data, batch_size)
val_batches = prepare_batches(val_data, batch_size)
for (item_inputs, label_inputs, item_ids, timestamp, labels) in train_batches:
rel = corr_data[((item_ids - 1).unsqueeze(1).repeat(1, item_ids.shape[(- 1)], 1), (item_inputs - 1).unsqueeze((- 1)).repeat(1, 1, item_inputs.shape[(- 1)]))]
item_inputs = item_inputs.cuda()
time = computeRePos(timestamp, timespan)
label_inputs = label_inputs.cuda()
item_ids = item_ids.cuda()
(preds, weights) = model(item_inputs, label_inputs, item_ids, torch.Tensor(rel).cuda(), time.cuda())
loss = compute_loss(preds, labels.cuda(), criterion)
preds = torch.sigmoid(preds).detach().cpu()
(train_auc, train_acc) = compute_auc(preds, labels)
model.zero_grad()
loss.backward()
clip_grad_norm_(model.parameters(), grad_clip)
optimizer.step()
step += 1
metrics.store({'loss/train': loss.item()})
metrics.store({'auc/train': train_auc})
if (step == (len(train_batches) - 1)):
torch.save(weights, 'weight_tensor_rel')
if ((step % 1000) == 0):
logger.log_scalars(metrics.average(), step)
model.eval()
for (item_inputs, label_inputs, item_ids, timestamp, labels) in val_batches:
rel = corr_data[((item_ids - 1).unsqueeze(1).repeat(1, item_ids.shape[(- 1)], 1), (item_inputs - 1).unsqueeze((- 1)).repeat(1, 1, item_inputs.shape[(- 1)]))]
item_inputs = item_inputs.cuda()
time = computeRePos(timestamp, timespan)
label_inputs = label_inputs.cuda()
item_ids = item_ids.cuda()
with torch.no_grad():
(preds, weights) = model(item_inputs, label_inputs, item_ids, torch.Tensor(rel).cuda(), time.cuda())
preds = torch.sigmoid(preds).cpu()
(val_auc, val_acc) = compute_auc(preds, labels)
metrics.store({'auc/val': val_auc, 'acc/val': val_acc})
model.train()
average_metrics = metrics.average()
logger.log_scalars(average_metrics, step)
print(average_metrics)
stop = saver.save(average_metrics['auc/val'], model)
if stop:
break |
class Vimeo90KDataset(data.Dataset):
def __init__(self, opt):
super(Vimeo90KDataset, self).__init__()
self.opt = opt
(self.gt_root, self.lq_root) = (Path(opt['dataroot_gt']), Path(opt['dataroot_lq']))
with open(opt['meta_info_file'], 'r') as fin:
self.keys = [line.split(' ')[0] for line in fin]
self.file_client = None
self.io_backend_opt = opt['io_backend']
self.is_lmdb = False
if (self.io_backend_opt['type'] == 'lmdb'):
self.is_lmdb = True
self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root]
self.io_backend_opt['client_keys'] = ['lq', 'gt']
self.neighbor_list = [(i + ((9 - opt['num_frame']) // 2)) for i in range(opt['num_frame'])]
self.random_reverse = opt['random_reverse']
logger = get_root_logger()
logger.info(f'Random reverse is {self.random_reverse}.')
def __getitem__(self, index):
if (self.file_client is None):
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
if (self.random_reverse and (random.random() < 0.5)):
self.neighbor_list.reverse()
scale = self.opt['scale']
gt_size = self.opt['gt_size']
key = self.keys[index]
(clip, seq) = key.split('/')
if self.is_lmdb:
img_gt_path = f'{key}/im4'
else:
img_gt_path = (((self.gt_root / clip) / seq) / 'im4.png')
img_bytes = self.file_client.get(img_gt_path, 'gt')
img_gt = imfrombytes(img_bytes, float32=True)
img_lqs = []
for neighbor in self.neighbor_list:
if self.is_lmdb:
img_lq_path = f'{clip}/{seq}/im{neighbor}'
else:
img_lq_path = (((self.lq_root / clip) / seq) / f'im{neighbor}.png')
img_bytes = self.file_client.get(img_lq_path, 'lq')
img_lq = imfrombytes(img_bytes, float32=True)
img_lqs.append(img_lq)
(img_gt, img_lqs) = paired_random_crop(img_gt, img_lqs, gt_size, scale, img_gt_path)
img_lqs.append(img_gt)
img_results = augment(img_lqs, self.opt['use_flip'], self.opt['use_rot'])
img_results = img2tensor(img_results)
img_lqs = torch.stack(img_results[0:(- 1)], dim=0)
img_gt = img_results[(- 1)]
return {'lq': img_lqs, 'gt': img_gt, 'key': key}
def __len__(self):
return len(self.keys) |
def Contrast(img, v):
assert (0.1 <= v <= 1.9)
return PIL.ImageEnhance.Contrast(img).enhance(v) |
def restore_source_model(saved_pb_name, grad_dict=None):
print('restoring', saved_pb_name)
with open((saved_pb_name + '.pickle'), 'rb') as f:
info = pickle.load(f)
print(info)
sess = K.get_session()
print('restoring frozen graph def')
with open((saved_pb_name + '.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
tensor_search_name = set()
for (gname, (input_names, output_names)) in info.items():
tensor_search_name = tensor_search_name.union(set((input_names + output_names)))
found_tensors = {}
ops = tf.get_default_graph().get_operations()
for op in ops:
if (len(op.outputs) != 1):
continue
if (op.outputs[0].name in tensor_search_name):
found_tensors[op.outputs[0].name] = op.outputs[0]
flag = True
for t in tensor_search_name:
if (t not in found_tensors):
print('Tensor not found:', t)
flag = False
if (not flag):
return
print('all nodes found')
for (gname, (input_names, output_names)) in info.items():
input_list = [found_tensors[tname] for tname in input_names]
output_list = [found_tensors[tname] for tname in output_names]
print('{0}\n Input: {1}\n Output: {2}\n'.format(gname, input_list, output_list))
grad_dict[gname] = (input_list, output_list, K.function(input_list, output_list))
print('restore finished') |
def line_search_wolfe1(f, fprime, xk, pk, gfk=None, old_fval=None, old_old_fval=None, args=(), c1=0.0001, c2=0.9, amax=50, amin=1e-08, xtol=1e-14):
if (gfk is None):
gfk = fprime(xk)
if isinstance(fprime, tuple):
eps = fprime[1]
fprime = fprime[0]
newargs = ((f, eps) + args)
gradient = False
else:
newargs = args
gradient = True
gval = [gfk]
gc = [0]
fc = [0]
def phi(s):
fc[0] += 1
return f((xk + (s * pk)), *args)
def derphi(s):
gval[0] = fprime((xk + (s * pk)), *newargs)
if gradient:
gc[0] += 1
else:
fc[0] += (len(xk) + 1)
return np.dot(gval[0], pk)
derphi0 = np.dot(gfk, pk)
(stp, fval, old_fval) = scalar_search_wolfe1(phi, derphi, old_fval, old_old_fval, derphi0, c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)
return (stp, fc[0], gc[0], fval, old_fval, gval[0]) |
def test_no_abstract_class():
cluster = generate_test_cluster('tests.fixtures.cluster.abstract')
assert (len(cluster.accessible_objects_under_test) == 1)
assert (len(cluster.generators) == 3)
assert (len(cluster.modifiers) == 1) |
def _iter_module_files():
for module in list(sys.modules.values()):
if (module is None):
continue
filename = getattr(module, '__file__', None)
if filename:
if (os.path.isdir(filename) and os.path.exists(os.path.join(filename, '__init__.py'))):
filename = os.path.join(filename, '__init__.py')
old = None
while (not os.path.isfile(filename)):
old = filename
filename = os.path.dirname(filename)
if (filename == old):
break
else:
if (filename[(- 4):] in ('.pyc', '.pyo')):
filename = filename[:(- 1)]
(yield filename) |
class DeterministicMLPPolicy(Policy, LayersPowered, Serializable):
def __init__(self, name, env_spec, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, output_nonlinearity=tf.nn.tanh, prob_network=None, bn=False):
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if (prob_network is None):
prob_network = MLP(input_shape=(env_spec.observation_space.flat_dim,), output_dim=env_spec.action_space.flat_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, name='prob_network')
self._l_prob = prob_network.output_layer
self._l_obs = prob_network.input_layer
self._f_prob = tensor_utils.compile_function([prob_network.input_layer.input_var], L.get_output(prob_network.output_layer, deterministic=True))
self.prob_network = prob_network
super(DeterministicMLPPolicy, self).__init__(env_spec)
LayersPowered.__init__(self, [prob_network.output_layer])
def vectorized(self):
return True
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
action = self._f_prob([flat_obs])[0]
return (action, dict())
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
actions = self._f_prob(flat_obs)
return (actions, dict())
def get_action_sym(self, obs_var):
return L.get_output(self.prob_network.output_layer, obs_var) |
def cb_sign2map(a, var, index=None):
ret = {'varname': a}
ret['varname_i'] = ret['varname']
ret['ctype'] = getctype(var)
if (ret['ctype'] in c2capi_map):
ret['atype'] = c2capi_map[ret['ctype']]
if (ret['ctype'] in cformat_map):
ret['showvalueformat'] = ('%s' % cformat_map[ret['ctype']])
if isarray(var):
ret = dictappend(ret, getarrdims(a, var))
(ret['pydocsign'], ret['pydocsignout']) = getpydocsign(a, var)
if hasnote(var):
ret['note'] = var['note']
var['note'] = ['See elsewhere.']
return ret |
class Subsets_s(Parent):
element_class = Set_object_enumerated
def __init__(self, s):
Parent.__init__(self, category=EnumeratedSets().Finite())
if (s not in EnumeratedSets()):
from sage.sets.finite_enumerated_set import FiniteEnumeratedSet
L = list(uniq(s))
s = FiniteEnumeratedSet(L)
self._s = s
def _ls(self):
return self._s.list()
def underlying_set(self):
return self.element_class(self._s)
def __eq__(self, other):
if (self.__class__ != other.__class__):
return False
return (self._s == other._s)
def __ne__(self, other):
return (not (self == other))
def __hash__(self):
return hash(self._s)
def _repr_(self):
return 'Subsets of {}'.format(self._s)
def __contains__(self, value):
if (value not in Sets()):
return False
return all(((v in self._s) for v in value))
def cardinality(self):
return (Integer(1) << self._s.cardinality())
__len__ = cardinality
def first(self):
return self.element_class([])
def last(self):
return self.element_class(self._s)
def __iter__(self):
k = ZZ_0
while (k <= self._s.cardinality()):
for ss in Subsets_sk(self._s, k)._fast_iterator():
(yield self.element_class(ss))
k += 1
def random_element(self):
k = ZZ.random_element(0, self.cardinality())
return self.unrank(k)
def rank(self, sub):
if (sub not in Sets()):
ssub = Set(sub)
if (len(sub) != len(ssub)):
raise ValueError('repeated elements in {}'.format(sub))
sub = ssub
try:
index_list = sorted((self._s.rank(x) for x in sub))
except (ValueError, IndexError):
raise ValueError('{} is not a subset of {}'.format(Set(sub), self._s))
n = self._s.cardinality()
r = sum((binomial(n, i) for i in range(len(index_list))))
return (r + combination.rank(index_list, n))
def unrank(self, r):
r = Integer(r)
if ((r >= self.cardinality()) or (r < 0)):
raise IndexError('index out of range')
else:
k = ZZ_0
n = self._s.cardinality()
bin = Integer(1)
while (r >= bin):
r -= bin
k += 1
bin = binomial(n, k)
return self.element_class([self._s.unrank(i) for i in combination.from_rank(r, n, k)])
def __call__(self, el):
if (not isinstance(el, Element)):
return self._element_constructor_(el)
else:
return Parent.__call__(self, el)
def _element_constructor_(self, X):
e = self.element_class(X)
if (e not in self):
raise ValueError('{} not in {}'.format(e, self))
return e
def _an_element_(self):
return self.unrank((self.cardinality() // 2))
def lattice(self):
S = self.underlying_set()
return S.subsets_lattice() |
(Output('outlet-gender-heatmap', 'figure'), [Input('topic-data', 'data')])
def update_gender_heatmap(data):
if (data is None):
return {'data': []}
else:
(dff, y_labels) = construct_outlet_gender_DF(data)
width = ((35 * len(dff.columns.tolist())) + 467)
return {'data': [{'type': 'heatmap', 'z': dff.values.tolist(), 'y': y_labels, 'x': dff.columns.tolist(), 'xgap': 3, 'ygap': 3, 'colorscale': [[0, 'rgb(0, 77, 114)'], [0.5, 'rgb(255, 255, 255)'], [1, 'rgb(175, 24, 88)']], 'zmid': 0, 'zmin': (- max(dff.max())), 'zmax': max(dff.max()), 'showscale': True, 'hovertemplate': '%{x}<br>%{y}<br>Gender prominence: %{z:.3f}<extra></extra>', 'colorbar': {'x': 1.05, 'thickness': 25, 'len': 0.9, 'tickmode': 'array', 'tickvals': [((- max(dff.max())) + 0.01), 0, (max(dff.max()) - 0.01)], 'ticktext': ['Male<br>prominence', 'Neutral', 'Female<br>prominence']}}], 'layout': {'font': {'size': 14}, 'height': 600, 'width': width, 'xaxis': {'side': 'top', 'gridcolor': 'rgba(0, 0, 0, 0)', 'tickangle': (- 35.0), 'ticks': 'outside'}, 'yaxis': {'side': 'left', 'gridcolor': 'rgba(0, 0, 0, 0)', 'ticks': 'outside'}, 'margin': {'l': 350, 'r': 80, 't': 120, 'b': 30}}} |
def dot(x, y, sparse=False):
if sparse:
return tf.sparse_tensor_dense_matmul(x, y)
else:
return tf.matmul(x, y) |
class FunctionalLinearReluModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = FunctionalLinear()
def forward(self, x):
x = self.linear(x)
x = F.relu(x)
return x |
class LazyFrames(object):
def __init__(self, frames):
self._frames = frames
self._out = None
def _force(self):
if (self._out is None):
self._out = np.concatenate(self._frames, axis=(- 1))
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if (dtype is not None):
out = out.astype(dtype)
return out |
class Attention_Enhanced_TPS(nn.Module):
def __init__(self, rectified_img_size, point_size):
super().__init__()
self.eps = 1e-06
self.thela = 0.5
self.point_size = point_size
self.point_y = point_size[0]
self.point_x = point_size[1]
self.num_fiducial = (self.point_y * self.point_x)
self.rectified_img_height = rectified_img_size[0]
self.rectified_img_width = rectified_img_size[1]
self.C = self._build_C()
self.P = self._build_P(self.rectified_img_width, self.rectified_img_height)
self.register_buffer('hat_C', torch.tensor(self._build_hat_C(self.num_fiducial, self.C)).float())
self.register_buffer('P_hat', torch.tensor(self._build_P_hat(self.num_fiducial, self.C, self.P)).float())
def _build_C(self):
ctrl_pts_x = (np.linspace(0.5, (self.point_x - 0.5), num=int(self.point_x)) / self.point_x)
ctrl_pts_y = (np.linspace(0.5, (self.point_y - 0.5), num=int(self.point_y)) / self.point_y)
C = np.stack(np.meshgrid(ctrl_pts_x, ctrl_pts_y), axis=2).reshape([(- 1), 2])
return C
def _build_hat_C(self, num_fiducial, C):
hat_C = np.zeros((num_fiducial, num_fiducial), dtype=float)
for i in range(0, num_fiducial):
for j in range(i, num_fiducial):
r = np.linalg.norm((C[i] - C[j]))
hat_C[(i, j)] = r
hat_C[(j, i)] = r
np.fill_diagonal(hat_C, 1)
hat_C = ((hat_C ** 2) * np.log(hat_C))
delta_C = np.concatenate([np.concatenate([np.ones((num_fiducial, 1)), C, hat_C], axis=1), np.concatenate([np.zeros((2, 3)), np.transpose(C)], axis=1), np.concatenate([np.zeros((1, 3)), np.ones((1, num_fiducial))], axis=1)], axis=0)
inv_delta_C = np.linalg.inv(delta_C)
return inv_delta_C
def build_inv_delta_C(self, hat_C, cc_score, device):
(B, num_fiducial, _) = hat_C.size()
C = torch.tensor(self.C).float().to(device).unsqueeze(0).repeat(B, 1, 1)
hat_C = (hat_C * ((cc_score * 0.1) + 1))
delta_C = torch.cat([torch.cat([torch.ones((B, num_fiducial, 1)).to(device), C, hat_C], dim=2), torch.cat([torch.zeros((B, 2, 3)).to(device), C.transpose(2, 1)], dim=2), torch.cat([torch.zeros((B, 1, 3)).to(device), torch.ones((B, 1, num_fiducial)).to(device)], dim=2)], dim=1)
inv_delta_C = torch.inverse(delta_C)
return inv_delta_C
def _build_P(self, rectified_img_width, rectified_img_height):
rectified_img_grid_x = (np.linspace(0.5, (rectified_img_width - 0.5), num=int(rectified_img_width)) / rectified_img_width)
rectified_img_grid_y = (np.linspace(0.5, (rectified_img_height - 0.5), num=int(rectified_img_height)) / rectified_img_height)
P = np.stack(np.meshgrid(rectified_img_grid_x, rectified_img_grid_y), axis=2)
return P.reshape([(- 1), 2])
def _build_P_hat(self, num_fiducial, C, P):
n = P.shape[0]
P_tile = np.tile(np.expand_dims(P, axis=1), (1, num_fiducial, 1))
C_tile = np.expand_dims(C, axis=0)
P_diff = (P_tile - C_tile)
rbf_norm = np.linalg.norm(P_diff, ord=2, axis=2, keepdims=False)
P_hat = np.multiply(np.square(rbf_norm), np.log((rbf_norm + self.eps)))
return P_hat
def P_hat_score_process(self, P_hat, pc_score, device):
(B, n, _) = pc_score.size()
P = torch.tensor(self.P).float().to(device).unsqueeze(0).repeat(B, 1, 1)
P_hat = (P_hat * ((pc_score * self.thela) + 1))
P_hat = torch.cat([torch.ones((B, n, 1)).to(device), P, P_hat], dim=2)
return P_hat
def build_P_prime(self, batch_C_prime, pc_score, device='cuda'):
batch_size = batch_C_prime.size(0)
batch_inv_delta_C = self.hat_C.to(device).repeat(batch_size, 1, 1)
batch_P_hat = self.P_hat.repeat(batch_size, 1, 1)
batch_P_hat = self.P_hat_score_process(batch_P_hat, pc_score, device)
batch_C_prime_with_zeros = torch.cat((batch_C_prime, torch.zeros(batch_size, 3, 2).float().to(device)), dim=1)
batch_T = torch.bmm(batch_inv_delta_C, batch_C_prime_with_zeros)
batch_P_prime = torch.bmm(batch_P_hat, batch_T)
return batch_P_prime |
_utils.test(arch=ti.cpu)
def test_init_bad_arg():
with pytest.raises(KeyError):
ti.init(_test_mode=True, debug=True, foo_bar=233) |
class SeedingConfiguration():
seed: int = time.time_ns()
constant_seeding: bool = True
initial_population_seeding: bool = False
initial_population_data: str = ''
seeded_testcases_reuse_probability: float = 0.9
initial_population_mutations: int = 0
dynamic_constant_seeding: bool = True
seeded_primitives_reuse_probability: float = 0.2
seeded_dynamic_values_reuse_probability: float = 0.6
seed_from_archive: bool = False
seed_from_archive_probability: float = 0.2
seed_from_archive_mutations: int = 3
max_dynamic_length: int = 1000
max_dynamic_pool_size: int = 50 |
def make_spline_knot_matrix(n, order, mode='mirror'):
knot_values = get_spline_knot_values(order)
matrix = np.zeros((n, n))
for (diag, knot_value) in enumerate(knot_values):
indices = np.arange(diag, n)
if (diag == 0):
matrix[(indices, indices)] = knot_value
else:
matrix[(indices, (indices - diag))] = knot_value
matrix[((indices - diag), indices)] = knot_value
knot_values_sum = (knot_values[0] + (2 * sum(knot_values[1:])))
if (mode == 'mirror'):
(start, step) = (1, 1)
elif (mode == 'reflect'):
(start, step) = (0, 1)
elif (mode == 'wrap'):
(start, step) = ((- 1), (- 1))
else:
raise ValueError('unsupported mode {}'.format(mode))
for row in range((len(knot_values) - 1)):
for (idx, knot_value) in enumerate(knot_values[(row + 1):]):
matrix[(row, (start + (step * idx)))] += knot_value
matrix[(((- row) - 1), (((- start) - 1) - (step * idx)))] += knot_value
return (matrix / knot_values_sum) |
def layer_config_kwargs(kwargs):
return set_layer_config(scriptable=kwargs.pop('scriptable', None), exportable=kwargs.pop('exportable', None), no_jit=kwargs.pop('no_jit', None)) |
def test_write_to_io():
doc = CoNLL.conll2doc(input_str=ENGLISH_SAMPLE)
output = io.StringIO()
CoNLL.write_doc2conll(doc, output)
output_value = output.getvalue()
assert output_value.endswith('\n\n')
assert (output_value.strip() == ENGLISH_SAMPLE) |
def get_random_uniform_cluster(clustering, nclasses, ntargets_per_class):
views = sorted(list(clustering.keys()))
view_idx_map = {v: i for (i, v) in enumerate(views)}
ncentroids = np.array([clustering[view].ncentroids for (i, view) in enumerate(views)])
argmax_view = ncentroids.argmax()
ncentroids = clustering[views[argmax_view]].ncentroids
start_indices = []
assignments = np.full((len(views), ncentroids), 0)
for i in range(ncentroids):
view = views[argmax_view]
cluster_idx = (assignments[view_idx_map[view]] == 0).argmax(axis=0)
current_cluster = clustering[view].cen2ind[cluster_idx]
cluster_size = len(current_cluster)
shuffle = np.arange(cluster_size)
np.random.shuffle(shuffle)
for i in range(len(shuffle)):
idx = current_cluster[shuffle[i]]
picks = []
oks = []
for (j, view) in enumerate(views):
if (j == argmax_view):
continue
cluster = clustering[view].get_assignment(idx)
picks.append((j, cluster))
if ((assignments[j][cluster] > 0) and (i != (len(shuffle) - 1))):
break
oks.append(1)
if (len(oks) == len(views[1:])):
break
start_indices.append(idx)
assignments[argmax_view][cluster_idx] += 1
for (i, cluster) in picks:
assignments[i][cluster] += 1
assert (len(start_indices) == ncentroids), 'insufficient number of start_indices picked'
assert (assignments.sum() == (ncentroids * len(views))), 'assignments value mismatch'
assignment_variance = ((assignments - 1) ** 2).mean(axis=1)
print('assignment_variance: {}'.format(assignment_variance))
return start_indices |
def register_Ns3MmwaveSpectrumSignalParameters_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::mmwaveSpectrumSignalParameters const &', 'p')])
cls.add_method('Copy', 'ns3::Ptr< ns3::SpectrumSignalParameters >', [], is_virtual=True)
cls.add_instance_attribute('packetBurst', 'ns3::Ptr< ns3::PacketBurst >', is_const=False)
return |
class ImageNet(ImageFolder):
WNIDS = ['n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n', 'n']
CLASSES = ['tench, Tinca tinca', 'goldfish, Carassius auratus', 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias', 'tiger shark, Galeocerdo cuvieri', 'hammerhead, hammerhead shark', 'electric ray, crampfish, numbfish, torpedo', 'stingray', 'cock', 'hen', 'ostrich, Struthio camelus', 'brambling, Fringilla montifringilla', 'goldfinch, Carduelis carduelis', 'house finch, linnet, Carpodacus mexicanus', 'junco, snowbird', 'indigo bunting, indigo finch, indigo bird, Passerina cyanea', 'robin, American robin, Turdus migratorius', 'bulbul', 'jay', 'magpie', 'chickadee', 'water ouzel, dipper', 'kite', 'bald eagle, American eagle, Haliaeetus leucocephalus', 'vulture', 'great grey owl, great gray owl, Strix nebulosa', 'European fire salamander, Salamandra salamandra', 'common newt, Triturus vulgaris', 'eft', 'spotted salamander, Ambystoma maculatum', 'axolotl, mud puppy, Ambystoma mexicanum', 'bullfrog, Rana catesbeiana', 'tree frog, tree-frog', 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui', 'loggerhead, loggerhead turtle, Caretta caretta', 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea', 'mud turtle', 'terrapin', 'box turtle, box tortoise', 'banded gecko', 'common iguana, iguana, Iguana iguana', 'American chameleon, anole, Anolis carolinensis', 'whiptail, whiptail lizard', 'agama', 'frilled lizard, Chlamydosaurus kingi', 'alligator lizard', 'Gila monster, Heloderma suspectum', 'green lizard, Lacerta viridis', 'African chameleon, Chamaeleo chamaeleon', 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis', 'African crocodile, Nile crocodile, Crocodylus niloticus', 'American alligator, Alligator mississipiensis', 'triceratops', 'thunder snake, worm snake, Carphophis amoenus', 'ringneck snake, ring-necked snake, ring snake', 'hognose snake, puff adder, sand viper', 'green snake, grass snake', 'king snake, kingsnake', 'garter snake, grass snake', 'water snake', 'vine snake', 'night snake, Hypsiglena torquata', 'boa constrictor, Constrictor constrictor', 'rock python, rock snake, Python sebae', 'Indian cobra, Naja naja', 'green mamba', 'sea snake', 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus', 'diamondback, diamondback rattlesnake, Crotalus adamanteus', 'sidewinder, horned rattlesnake, Crotalus cerastes', 'trilobite', 'harvestman, daddy longlegs, Phalangium opilio', 'scorpion', 'black and gold garden spider, Argiope aurantia', 'barn spider, Araneus cavaticus', 'garden spider, Aranea diademata', 'black widow, Latrodectus mactans', 'tarantula', 'wolf spider, hunting spider', 'tick', 'centipede', 'black grouse', 'ptarmigan', 'ruffed grouse, partridge, Bonasa umbellus', 'prairie chicken, prairie grouse, prairie fowl', 'peacock', 'quail', 'partridge', 'African grey, African gray, Psittacus erithacus', 'macaw', 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita', 'lorikeet', 'coucal', 'bee eater', 'hornbill', 'hummingbird', 'jacamar', 'toucan', 'drake', 'red-breasted merganser, Mergus serrator', 'goose', 'black swan, Cygnus atratus', 'tusker', 'echidna, spiny anteater, anteater', 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus', 'wallaby, brush kangaroo', 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus', 'wombat', 'jellyfish', 'sea anemone, anemone', 'brain coral', 'flatworm, platyhelminth', 'nematode, nematode worm, roundworm', 'conch', 'snail', 'slug', 'sea slug, nudibranch', 'chiton, coat-of-mail shell, sea cradle, polyplacophore', 'chambered nautilus, pearly nautilus, nautilus', 'Dungeness crab, Cancer magister', 'rock crab, Cancer irroratus', 'fiddler crab', 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica', 'American lobster, Northern lobster, Maine lobster, Homarus americanus', 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish', 'crayfish, crawfish, crawdad, crawdaddy', 'hermit crab', 'isopod', 'white stork, Ciconia ciconia', 'black stork, Ciconia nigra', 'spoonbill', 'flamingo', 'little blue heron, Egretta caerulea', 'American egret, great white heron, Egretta albus', 'bittern', 'crane', 'limpkin, Aramus pictus', 'European gallinule, Porphyrio porphyrio', 'American coot, marsh hen, mud hen, water hen, Fulica americana', 'bustard', 'ruddy turnstone, Arenaria interpres', 'red-backed sandpiper, dunlin, Erolia alpina', 'redshank, Tringa totanus', 'dowitcher', 'oystercatcher, oyster catcher', 'pelican', 'king penguin, Aptenodytes patagonica', 'albatross, mollymawk', 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus', 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca', 'dugong, Dugong dugon', 'sea lion', 'Chihuahua', 'Japanese spaniel', 'Maltese dog, Maltese terrier, Maltese', 'Pekinese, Pekingese, Peke', 'Shih-Tzu', 'Blenheim spaniel', 'papillon', 'toy terrier', 'Rhodesian ridgeback', 'Afghan hound, Afghan', 'basset, basset hound', 'beagle', 'bloodhound, sleuthhound', 'bluetick', 'black-and-tan coonhound', 'Walker hound, Walker foxhound', 'English foxhound', 'redbone', 'borzoi, Russian wolfhound', 'Irish wolfhound', 'Italian greyhound', 'whippet', 'Ibizan hound, Ibizan Podenco', 'Norwegian elkhound, elkhound', 'otterhound, otter hound', 'Saluki, gazelle hound', 'Scottish deerhound, deerhound', 'Weimaraner', 'Staffordshire bullterrier, Staffordshire bull terrier', 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier', 'Bedlington terrier', 'Border terrier', 'Kerry blue terrier', 'Irish terrier', 'Norfolk terrier', 'Norwich terrier', 'Yorkshire terrier', 'wire-haired fox terrier', 'Lakeland terrier', 'Sealyham terrier, Sealyham', 'Airedale, Airedale terrier', 'cairn, cairn terrier', 'Australian terrier', 'Dandie Dinmont, Dandie Dinmont terrier', 'Boston bull, Boston terrier', 'miniature schnauzer', 'giant schnauzer', 'standard schnauzer', 'Scotch terrier, Scottish terrier, Scottie', 'Tibetan terrier, chrysanthemum dog', 'silky terrier, Sydney silky', 'soft-coated wheaten terrier', 'West Highland white terrier', 'Lhasa, Lhasa apso', 'flat-coated retriever', 'curly-coated retriever', 'golden retriever', 'Labrador retriever', 'Chesapeake Bay retriever', 'German short-haired pointer', 'vizsla, Hungarian pointer', 'English setter', 'Irish setter, red setter', 'Gordon setter', 'Brittany spaniel', 'clumber, clumber spaniel', 'English springer, English springer spaniel', 'Welsh springer spaniel', 'cocker spaniel, English cocker spaniel, cocker', 'Sussex spaniel', 'Irish water spaniel', 'kuvasz', 'schipperke', 'groenendael', 'malinois', 'briard', 'kelpie', 'komondor', 'Old English sheepdog, bobtail', 'Shetland sheepdog, Shetland sheep dog, Shetland', 'collie', 'Border collie', 'Bouvier des Flandres, Bouviers des Flandres', 'Rottweiler', 'German shepherd, German shepherd dog, German police dog, alsatian', 'Doberman, Doberman pinscher', 'miniature pinscher', 'Greater Swiss Mountain dog', 'Bernese mountain dog', 'Appenzeller', 'EntleBucher', 'boxer', 'bull mastiff', 'Tibetan mastiff', 'French bulldog', 'Great Dane', 'Saint Bernard, St Bernard', 'Eskimo dog, husky', 'malamute, malemute, Alaskan malamute', 'Siberian husky', 'dalmatian, coach dog, carriage dog', 'affenpinscher, monkey pinscher, monkey dog', 'basenji', 'pug, pug-dog', 'Leonberg', 'Newfoundland, Newfoundland dog', 'Great Pyrenees', 'Samoyed, Samoyede', 'Pomeranian', 'chow, chow chow', 'keeshond', 'Brabancon griffon', 'Pembroke, Pembroke Welsh corgi', 'Cardigan, Cardigan Welsh corgi', 'toy poodle', 'miniature poodle', 'standard poodle', 'Mexican hairless', 'timber wolf, grey wolf, gray wolf, Canis lupus', 'white wolf, Arctic wolf, Canis lupus tundrarum', 'red wolf, maned wolf, Canis rufus, Canis niger', 'coyote, prairie wolf, brush wolf, Canis latrans', 'dingo, warrigal, warragal, Canis dingo', 'dhole, Cuon alpinus', 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus', 'hyena, hyaena', 'red fox, Vulpes vulpes', 'kit fox, Vulpes macrotis', 'Arctic fox, white fox, Alopex lagopus', 'grey fox, gray fox, Urocyon cinereoargenteus', 'tabby, tabby cat', 'tiger cat', 'Persian cat', 'Siamese cat, Siamese', 'Egyptian cat', 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor', 'lynx, catamount', 'leopard, Panthera pardus', 'snow leopard, ounce, Panthera uncia', 'jaguar, panther, Panthera onca, Felis onca', 'lion, king of beasts, Panthera leo', 'tiger, Panthera tigris', 'cheetah, chetah, Acinonyx jubatus', 'brown bear, bruin, Ursus arctos', 'American black bear, black bear, Ursus americanus, Euarctos americanus', 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus', 'sloth bear, Melursus ursinus, Ursus ursinus', 'mongoose', 'meerkat, mierkat', 'tiger beetle', 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle', 'ground beetle, carabid beetle', 'long-horned beetle, longicorn, longicorn beetle', 'leaf beetle, chrysomelid', 'dung beetle', 'rhinoceros beetle', 'weevil', 'fly', 'bee', 'ant, emmet, pismire', 'grasshopper, hopper', 'cricket', 'walking stick, walkingstick, stick insect', 'cockroach, roach', 'mantis, mantid', 'cicada, cicala', 'leafhopper', 'lacewing, lacewing fly', "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", 'damselfly', 'admiral', 'ringlet, ringlet butterfly', 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus', 'cabbage butterfly', 'sulphur butterfly, sulfur butterfly', 'lycaenid, lycaenid butterfly', 'starfish, sea star', 'sea urchin', 'sea cucumber, holothurian', 'wood rabbit, cottontail, cottontail rabbit', 'hare', 'Angora, Angora rabbit', 'hamster', 'porcupine, hedgehog', 'fox squirrel, eastern fox squirrel, Sciurus niger', 'marmot', 'beaver', 'guinea pig, Cavia cobaya', 'sorrel', 'zebra', 'hog, pig, grunter, squealer, Sus scrofa', 'wild boar, boar, Sus scrofa', 'warthog', 'hippopotamus, hippo, river horse, Hippopotamus amphibius', 'ox', 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis', 'bison', 'ram, tup', 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis', 'ibex, Capra ibex', 'hartebeest', 'impala, Aepyceros melampus', 'gazelle', 'Arabian camel, dromedary, Camelus dromedarius', 'llama', 'weasel', 'mink', 'polecat, fitch, foulmart, foumart, Mustela putorius', 'black-footed ferret, ferret, Mustela nigripes', 'otter', 'skunk, polecat, wood pussy', 'badger', 'armadillo', 'three-toed sloth, ai, Bradypus tridactylus', 'orangutan, orang, orangutang, Pongo pygmaeus', 'gorilla, Gorilla gorilla', 'chimpanzee, chimp, Pan troglodytes', 'gibbon, Hylobates lar', 'siamang, Hylobates syndactylus, Symphalangus syndactylus', 'guenon, guenon monkey', 'patas, hussar monkey, Erythrocebus patas', 'baboon', 'macaque', 'langur', 'colobus, colobus monkey', 'proboscis monkey, Nasalis larvatus', 'marmoset', 'capuchin, ringtail, Cebus capucinus', 'howler monkey, howler', 'titi, titi monkey', 'spider monkey, Ateles geoffroyi', 'squirrel monkey, Saimiri sciureus', 'Madagascar cat, ring-tailed lemur, Lemur catta', 'indri, indris, Indri indri, Indri brevicaudatus', 'Indian elephant, Elephas maximus', 'African elephant, Loxodonta africana', 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens', 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca', 'barracouta, snoek', 'eel', 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch', 'rock beauty, Holocanthus tricolor', 'anemone fish', 'sturgeon', 'gar, garfish, garpike, billfish, Lepisosteus osseus', 'lionfish', 'puffer, pufferfish, blowfish, globefish', 'abacus', 'abaya', "academic gown, academic robe, judge's robe", 'accordion, piano accordion, squeeze box', 'acoustic guitar', 'aircraft carrier, carrier, flattop, attack aircraft carrier', 'airliner', 'airship, dirigible', 'altar', 'ambulance', 'amphibian, amphibious vehicle', 'analog clock', 'apiary, bee house', 'apron', 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin', 'assault rifle, assault gun', 'backpack, back pack, knapsack, packsack, rucksack, haversack', 'bakery, bakeshop, bakehouse', 'balance beam, beam', 'balloon', 'ballpoint, ballpoint pen, ballpen, Biro', 'Band Aid', 'banjo', 'bannister, banister, balustrade, balusters, handrail', 'barbell', 'barber chair', 'barbershop', 'barn', 'barometer', 'barrel, cask', 'barrow, garden cart, lawn cart, wheelbarrow', 'baseball', 'basketball', 'bassinet', 'bassoon', 'bathing cap, swimming cap', 'bath towel', 'bathtub, bathing tub, bath, tub', 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon', 'beacon, lighthouse, beacon light, pharos', 'beaker', 'bearskin, busby, shako', 'beer bottle', 'beer glass', 'bell cote, bell cot', 'bib', 'bicycle-built-for-two, tandem bicycle, tandem', 'bikini, two-piece', 'binder, ring-binder', 'binoculars, field glasses, opera glasses', 'birdhouse', 'boathouse', 'bobsled, bobsleigh, bob', 'bolo tie, bolo, bola tie, bola', 'bonnet, poke bonnet', 'bookcase', 'bookshop, bookstore, bookstall', 'bottlecap', 'bow', 'bow tie, bow-tie, bowtie', 'brass, memorial tablet, plaque', 'brassiere, bra, bandeau', 'breakwater, groin, groyne, mole, bulwark, seawall, jetty', 'breastplate, aegis, egis', 'broom', 'bucket, pail', 'buckle', 'bulletproof vest', 'bullet train, bullet', 'butcher shop, meat market', 'cab, hack, taxi, taxicab', 'caldron, cauldron', 'candle, taper, wax light', 'cannon', 'canoe', 'can opener, tin opener', 'cardigan', 'car mirror', 'carousel, carrousel, merry-go-round, roundabout, whirligig', "carpenter's kit, tool kit", 'carton', 'car wheel', 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM', 'cassette', 'cassette player', 'castle', 'catamaran', 'CD player', 'cello, violoncello', 'cellular telephone, cellular phone, cellphone, cell, mobile phone', 'chain', 'chainlink fence', 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour', 'chain saw, chainsaw', 'chest', 'chiffonier, commode', 'chime, bell, gong', 'china cabinet, china closet', 'Christmas stocking', 'church, church building', 'cinema, movie theater, movie theatre, movie house, picture palace', 'cleaver, meat cleaver, chopper', 'cliff dwelling', 'cloak', 'clog, geta, patten, sabot', 'cocktail shaker', 'coffee mug', 'coffeepot', 'coil, spiral, volute, whorl, helix', 'combination lock', 'computer keyboard, keypad', 'confectionery, confectionary, candy store', 'container ship, containership, container vessel', 'convertible', 'corkscrew, bottle screw', 'cornet, horn, trumpet, trump', 'cowboy boot', 'cowboy hat, ten-gallon hat', 'cradle', 'crane', 'crash helmet', 'crate', 'crib, cot', 'Crock Pot', 'croquet ball', 'crutch', 'cuirass', 'dam, dike, dyke', 'desk', 'desktop computer', 'dial telephone, dial phone', 'diaper, nappy, napkin', 'digital clock', 'digital watch', 'dining table, board', 'dishrag, dishcloth', 'dishwasher, dish washer, dishwashing machine', 'disk brake, disc brake', 'dock, dockage, docking facility', 'dogsled, dog sled, dog sleigh', 'dome', 'doormat, welcome mat', 'drilling platform, offshore rig', 'drum, membranophone, tympan', 'drumstick', 'dumbbell', 'Dutch oven', 'electric fan, blower', 'electric guitar', 'electric locomotive', 'entertainment center', 'envelope', 'espresso maker', 'face powder', 'feather boa, boa', 'file, file cabinet, filing cabinet', 'fireboat', 'fire engine, fire truck', 'fire screen, fireguard', 'flagpole, flagstaff', 'flute, transverse flute', 'folding chair', 'football helmet', 'forklift', 'fountain', 'fountain pen', 'four-poster', 'freight car', 'French horn, horn', 'frying pan, frypan, skillet', 'fur coat', 'garbage truck, dustcart', 'gasmask, respirator, gas helmet', 'gas pump, gasoline pump, petrol pump, island dispenser', 'goblet', 'go-kart', 'golf ball', 'golfcart, golf cart', 'gondola', 'gong, tam-tam', 'gown', 'grand piano, grand', 'greenhouse, nursery, glasshouse', 'grille, radiator grille', 'grocery store, grocery, food market, market', 'guillotine', 'hair slide', 'hair spray', 'half track', 'hammer', 'hamper', 'hand blower, blow dryer, blow drier, hair dryer, hair drier', 'hand-held computer, hand-held microcomputer', 'handkerchief, hankie, hanky, hankey', 'hard disc, hard disk, fixed disk', 'harmonica, mouth organ, harp, mouth harp', 'harp', 'harvester, reaper', 'hatchet', 'holster', 'home theater, home theatre', 'honeycomb', 'hook, claw', 'hoopskirt, crinoline', 'horizontal bar, high bar', 'horse cart, horse-cart', 'hourglass', 'iPod', 'iron, smoothing iron', "jack-o'-lantern", 'jean, blue jean, denim', 'jeep, landrover', 'jersey, T-shirt, tee shirt', 'jigsaw puzzle', 'jinrikisha, ricksha, rickshaw', 'joystick', 'kimono', 'knee pad', 'knot', 'lab coat, laboratory coat', 'ladle', 'lampshade, lamp shade', 'laptop, laptop computer', 'lawn mower, mower', 'lens cap, lens cover', 'letter opener, paper knife, paperknife', 'library', 'lifeboat', 'lighter, light, igniter, ignitor', 'limousine, limo', 'liner, ocean liner', 'lipstick, lip rouge', 'Loafer', 'lotion', 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system', "loupe, jeweler's loupe", 'lumbermill, sawmill', 'magnetic compass', 'mailbag, postbag', 'mailbox, letter box', 'maillot', 'maillot, tank suit', 'manhole cover', 'maraca', 'marimba, xylophone', 'mask', 'matchstick', 'maypole', 'maze, labyrinth', 'measuring cup', 'medicine chest, medicine cabinet', 'megalith, megalithic structure', 'microphone, mike', 'microwave, microwave oven', 'military uniform', 'milk can', 'minibus', 'miniskirt, mini', 'minivan', 'missile', 'mitten', 'mixing bowl', 'mobile home, manufactured home', 'Model T', 'modem', 'monastery', 'monitor', 'moped', 'mortar', 'mortarboard', 'mosque', 'mosquito net', 'motor scooter, scooter', 'mountain bike, all-terrain bike, off-roader', 'mountain tent', 'mouse, computer mouse', 'mousetrap', 'moving van', 'muzzle', 'nail', 'neck brace', 'necklace', 'nipple', 'notebook, notebook computer', 'obelisk', 'oboe, hautboy, hautbois', 'ocarina, sweet potato', 'odometer, hodometer, mileometer, milometer', 'oil filter', 'organ, pipe organ', 'oscilloscope, scope, cathode-ray oscilloscope, CRO', 'overskirt', 'oxcart', 'oxygen mask', 'packet', 'paddle, boat paddle', 'paddlewheel, paddle wheel', 'padlock', 'paintbrush', "pajama, pyjama, pj's, jammies", 'palace', 'panpipe, pandean pipe, syrinx', 'paper towel', 'parachute, chute', 'parallel bars, bars', 'park bench', 'parking meter', 'passenger car, coach, carriage', 'patio, terrace', 'pay-phone, pay-station', 'pedestal, plinth, footstall', 'pencil box, pencil case', 'pencil sharpener', 'perfume, essence', 'Petri dish', 'photocopier', 'pick, plectrum, plectron', 'pickelhaube', 'picket fence, paling', 'pickup, pickup truck', 'pier', 'piggy bank, penny bank', 'pill bottle', 'pillow', 'ping-pong ball', 'pinwheel', 'pirate, pirate ship', 'pitcher, ewer', "plane, carpenter's plane, woodworking plane", 'planetarium', 'plastic bag', 'plate rack', 'plow, plough', "plunger, plumber's helper", 'Polaroid camera, Polaroid Land camera', 'pole', 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria', 'poncho', 'pool table, billiard table, snooker table', 'pop bottle, soda bottle', 'pot, flowerpot', "potter's wheel", 'power drill', 'prayer rug, prayer mat', 'printer', 'prison, prison house', 'projectile, missile', 'projector', 'puck, hockey puck', 'punching bag, punch bag, punching ball, punchball', 'purse', 'quill, quill pen', 'quilt, comforter, comfort, puff', 'racer, race car, racing car', 'racket, racquet', 'radiator', 'radio, wireless', 'radio telescope, radio reflector', 'rain barrel', 'recreational vehicle, RV, R.V.', 'reel', 'reflex camera', 'refrigerator, icebox', 'remote control, remote', 'restaurant, eating house, eating place, eatery', 'revolver, six-gun, six-shooter', 'rifle', 'rocking chair, rocker', 'rotisserie', 'rubber eraser, rubber, pencil eraser', 'rugby ball', 'rule, ruler', 'running shoe', 'safe', 'safety pin', 'saltshaker, salt shaker', 'sandal', 'sarong', 'sax, saxophone', 'scabbard', 'scale, weighing machine', 'school bus', 'schooner', 'scoreboard', 'screen, CRT screen', 'screw', 'screwdriver', 'seat belt, seatbelt', 'sewing machine', 'shield, buckler', 'shoe shop, shoe-shop, shoe store', 'shoji', 'shopping basket', 'shopping cart', 'shovel', 'shower cap', 'shower curtain', 'ski', 'ski mask', 'sleeping bag', 'slide rule, slipstick', 'sliding door', 'slot, one-armed bandit', 'snorkel', 'snowmobile', 'snowplow, snowplough', 'soap dispenser', 'soccer ball', 'sock', 'solar dish, solar collector, solar furnace', 'sombrero', 'soup bowl', 'space bar', 'space heater', 'space shuttle', 'spatula', 'speedboat', "spider web, spider's web", 'spindle', 'sports car, sport car', 'spotlight, spot', 'stage', 'steam locomotive', 'steel arch bridge', 'steel drum', 'stethoscope', 'stole', 'stone wall', 'stopwatch, stop watch', 'stove', 'strainer', 'streetcar, tram, tramcar, trolley, trolley car', 'stretcher', 'studio couch, day bed', 'stupa, tope', 'submarine, pigboat, sub, U-boat', 'suit, suit of clothes', 'sundial', 'sunglass', 'sunglasses, dark glasses, shades', 'sunscreen, sunblock, sun blocker', 'suspension bridge', 'swab, swob, mop', 'sweatshirt', 'swimming trunks, bathing trunks', 'swing', 'switch, electric switch, electrical switch', 'syringe', 'table lamp', 'tank, army tank, armored combat vehicle, armoured combat vehicle', 'tape player', 'teapot', 'teddy, teddy bear', 'television, television system', 'tennis ball', 'thatch, thatched roof', 'theater curtain, theatre curtain', 'thimble', 'thresher, thrasher, threshing machine', 'throne', 'tile roof', 'toaster', 'tobacco shop, tobacconist shop, tobacconist', 'toilet seat', 'torch', 'totem pole', 'tow truck, tow car, wrecker', 'toyshop', 'tractor', 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi', 'tray', 'trench coat', 'tricycle, trike, velocipede', 'trimaran', 'tripod', 'triumphal arch', 'trolleybus, trolley coach, trackless trolley', 'trombone', 'tub, vat', 'turnstile', 'typewriter keyboard', 'umbrella', 'unicycle, monocycle', 'upright, upright piano', 'vacuum, vacuum cleaner', 'vase', 'vault', 'velvet', 'vending machine', 'vestment', 'viaduct', 'violin, fiddle', 'volleyball', 'waffle iron', 'wall clock', 'wallet, billfold, notecase, pocketbook', 'wardrobe, closet, press', 'warplane, military plane', 'washbasin, handbasin, washbowl, lavabo, wash-hand basin', 'washer, automatic washer, washing machine', 'water bottle', 'water jug', 'water tower', 'whiskey jug', 'whistle', 'wig', 'window screen', 'window shade', 'Windsor tie', 'wine bottle', 'wing', 'wok', 'wooden spoon', 'wool, woolen, woollen', 'worm fence, snake fence, snake-rail fence, Virginia fence', 'wreck', 'yawl', 'yurt', 'web site, website, internet site, site', 'comic book', 'crossword puzzle, crossword', 'street sign', 'traffic light, traffic signal, stoplight', 'book jacket, dust cover, dust jacket, dust wrapper', 'menu', 'plate', 'guacamole', 'consomme', 'hot pot, hotpot', 'trifle', 'ice cream, icecream', 'ice lolly, lolly, lollipop, popsicle', 'French loaf', 'bagel, beigel', 'pretzel', 'cheeseburger', 'hotdog, hot dog, red hot', 'mashed potato', 'head cabbage', 'broccoli', 'cauliflower', 'zucchini, courgette', 'spaghetti squash', 'acorn squash', 'butternut squash', 'cucumber, cuke', 'artichoke, globe artichoke', 'bell pepper', 'cardoon', 'mushroom', 'Granny Smith', 'strawberry', 'orange', 'lemon', 'fig', 'pineapple, ananas', 'banana', 'jackfruit, jak, jack', 'custard apple', 'pomegranate', 'hay', 'carbonara', 'chocolate sauce, chocolate syrup', 'dough', 'meat loaf, meatloaf', 'pizza, pizza pie', 'potpie', 'burrito', 'red wine', 'espresso', 'cup', 'eggnog', 'alp', 'bubble', 'cliff, drop, drop-off', 'coral reef', 'geyser', 'lakeside, lakeshore', 'promontory, headland, head, foreland', 'sandbar, sand bar', 'seashore, coast, seacoast, sea-coast', 'valley, vale', 'volcano', 'ballplayer, baseball player', 'groom, bridegroom', 'scuba diver', 'rapeseed', 'daisy', "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", 'corn', 'acorn', 'hip, rose hip, rosehip', 'buckeye, horse chestnut, conker', 'coral fungus', 'agaric', 'gyromitra', 'stinkhorn, carrion fungus', 'earthstar', 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa', 'bolete', 'ear, spike, capitulum', 'toilet tissue, toilet paper, bathroom tissue']
def __init__(self, root: str, split: str='train', transform: Optional[Callable]=None, target_transform: Optional[Callable]=None):
assert (split in ('train', 'val'))
split_folder = (Path(root) / split)
super().__init__(split_folder, transform=transform, target_transform=target_transform)
self.classes = self.WNIDS
self.class_to_idx = {cls: idx for (idx, cls) in enumerate(self.classes)} |
def decay_nuclides(shell_mass, initial_composition, epoch):
decay_model = Ejecta(shell_mass, initial_composition)
new_fractions = decay_model.decay(epoch)
return new_fractions |
def _make_sdfg_2(succeed: bool=True):
name = ('success' if succeed else 'failure')
sdfg = dace.SDFG(f'redundant_second_array_{name}')
sdfg.add_array('A', [20], dace.int32)
sdfg.add_transient('tmp', [7], dace.int32)
sdfg.add_view('A_0', [8], dace.int32)
sdfg.add_view('A_1', [7], dace.int32)
state = sdfg.add_state()
first_A = state.add_read('A')
second_A = state.add_write('A')
first_A_0 = state.add_access('A_0')
second_A_0 = state.add_access('A_0')
(_, me, mx) = state.add_mapped_tasklet('MyMap', {'i': '0:7'}, {'inp': dace.Memlet('tmp[i]')}, 'out = 2 * inp', {'out': dace.Memlet('A_1[i]')}, external_edges=True)
tmp = state.in_edges(me)[0].src
A_1 = state.out_edges(mx)[0].dst
iset = ('11:19' if succeed else '1:9')
state.add_nedge(first_A, first_A_0, dace.Memlet(data='A', subset=iset, other_subset='0:8'))
state.add_nedge(first_A_0, tmp, dace.Memlet(data='A_0', subset='1:8', other_subset='0:7'))
state.add_nedge(A_1, second_A_0, dace.Memlet(data='A_0', subset='0:7', other_subset='0:7'))
state.add_nedge(second_A_0, second_A, dace.Memlet(data='A', subset='1:9', other_subset='0:8'))
return sdfg |
_grad()
def extract_feature_from_generator(model, inception, batch_size, n_sample, truncation=1.0):
torch.set_default_tensor_type('torch.cuda.FloatTensor')
n_batch = (n_sample // batch_size)
resid = (n_sample - (n_batch * batch_size))
batch_sizes = ([batch_size] * n_batch)
if (resid > 0):
batch_sizes.append(resid)
features = []
for batch in tqdm(batch_sizes):
code = torch.randn(batch, 512).cuda()
styles = model.generator.style(code)
styles = ((truncation * styles) + ((1 - truncation) * model.w_mu))
(canon_depth, canon_albedo, canon_light, view, neutral_style, trans_map, canon_im_raw) = model.estimate(styles)
recon_im = model.render(canon_depth, canon_albedo, canon_light, view, trans_map=trans_map)[0]
img = recon_im.clamp(min=(- 1), max=1)
feat = inception(img)[0].view(img.shape[0], (- 1))
features.append(feat.to('cpu'))
features = torch.cat(features, 0)
return features |
def prepare_images(image_paths):
images = []
labels = []
for image in tqdm(image_paths):
image_pixels = plt.imread(image)
image_pixels = cv2.resize(image_pixels, (224, 224))
image_pixels = (image_pixels / 255.0)
label = image.split('/')[2].split('_')[0]
images.append(image_pixels)
labels.append(label)
images = np.array(images)
labels = np.array(labels)
print(images.shape, labels.shape)
return (images, labels) |
def get_linear_schedule_with_warmup(*args, **kwargs):
requires_backends(get_linear_schedule_with_warmup, ['torch']) |
class MobileDet(tf.keras.Model):
_MODEL_FN = {'mobiledet_cpu': mobiledet_cpu_backbone, 'mobiledet_gpu': mobiledet_gpu_backbone, 'mobiledet_edge_tpu': mobiledet_edgetpu_backbone, 'mobiledet_dsp': mobiledet_dsp_backbone}
def __init__(self, input_shape, model_name=None, multiplier=1.0, checkpoint=None, normalization_op_params=None):
input_layer = tf.keras.Input(shape=input_shape, name='resnet_input')
outputs = MobileDet._MODEL_FN[model_name](input_layer, multiplier=multiplier, normalization_op_params=normalization_op_params)
super(MobileDet, self).__init__(inputs=[input_layer], outputs=outputs, name=model_name)
if checkpoint:
latest_checkpoint = tf.train.latest_checkpoint(checkpoint)
self.load_weights(latest_checkpoint).assert_consumed()
logging.info('Initialized weights from {}'.format(latest_checkpoint))
else:
logging.warning('Proceeding with random initialization!') |
class DiscriminatorMultiToSingleOutputStackedMixin():
def __init__(self, *args, return_feats_only_levels=None, **kwargs):
super().__init__(*args, **kwargs)
self.return_feats_only_levels = return_feats_only_levels
def forward(self, x):
out_feat_tuples = super().forward(x)
outs = [out for (out, _) in out_feat_tuples]
scaled_outs = ([outs[0]] + [F.interpolate(cur_out, size=outs[0].shape[(- 2):], mode='bilinear', align_corners=False) for cur_out in outs[1:]])
out = torch.cat(scaled_outs, dim=1)
if (self.return_feats_only_levels is not None):
feat_lists = [out_feat_tuples[i][1] for i in self.return_feats_only_levels]
else:
feat_lists = [flist for (_, flist) in out_feat_tuples]
feats = [f for flist in feat_lists for f in flist]
return (out, feats) |
_model
def ese_vovnet99b_iabn(pretrained=False, **kwargs):
norm_layer = get_norm_act_layer('iabn')
return _vovnet('ese_vovnet99b_iabn', pretrained=pretrained, norm_layer=norm_layer, **kwargs) |
def default_model_params():
params = dict()
params['img_height'] = 128
params['img_width'] = None
params['batch_size'] = 12
params['img_channels'] = 1
params['conv_blocks'] = 4
params['conv_filter_n'] = [32, 64, 128, 256]
params['conv_filter_size'] = [[3, 3], [3, 3], [3, 3], [3, 3]]
params['conv_pooling_size'] = [[2, 2], [2, 2], [2, 2], [2, 2]]
params['rnn_units'] = 512
params['rnn_layers'] = 2
return params |
class Experiment(object):
def compile(cls, name, exp, configurator):
action = exp.get('action')
description = exp.get('description')
desc = exp.get('desc')
if (action == 'profile'):
data_file = (exp.get('data_file') or (configurator.data_file + '.profiles'))
else:
data_file = (exp.get('data_file') or configurator.data_file)
reporting = Reporting.compile(exp.get('reporting', {}), configurator.reporting, configurator.options, configurator.ui)
run_details = ExpRunDetails.compile(exp, configurator.run_details)
variables = ExpVariables.compile(exp, ExpVariables.empty())
executions = exp.get('executions')
suites = exp.get('suites')
env = exp.get('env')
return Experiment(name, (description or desc), action, env, data_file, reporting, run_details, variables, configurator, executions, suites)
def __init__(self, name, description, action, env, data_file, reporting, run_details, variables, configurator, executions, suites):
self.name = name
self._description = description
self._action = action
self._run_details = run_details
self._variables = variables
self._env = env
self._reporting = reporting
self._data_store = configurator.data_store
self._persistence = self._data_store.get(data_file, configurator, action)
self._suites = self._compile_executors_and_benchmark_suites(executions, suites, configurator)
self._benchmarks = self._compile_benchmarks()
self.runs = self._compile_runs(configurator)
def _compile_runs(self, configurator):
runs = set()
for bench in self._benchmarks:
if (not configurator.run_filter.applies_to_bench(bench)):
continue
variables = bench.variables
for cores in variables.cores:
for input_size in variables.input_sizes:
for var_val in variables.variable_values:
for machine in variables.machines:
if (not configurator.run_filter.applies_to_machine(machine)):
continue
run = self._data_store.create_run_id(bench, cores, input_size, var_val, machine)
bench.add_run(run)
runs.add(run)
run.add_reporting(self._reporting)
run.add_persistence(self._persistence)
return runs
def _compile_executors_and_benchmark_suites(self, executions, suites, configurator):
results = []
for executor_cfg in executions:
(executor_name, executor_details) = value_with_optional_details(executor_cfg)
run_details = self._run_details
variables = self._variables
if executor_details:
run_details = ExpRunDetails.compile(executor_details, run_details)
variables = ExpVariables.compile(executor_details, variables)
suites_for_executor = executor_details.get('suites', suites)
else:
suites_for_executor = suites
executor = configurator.get_executor(executor_name, run_details, variables, self._action)
for suite_name in suites_for_executor:
suite = BenchmarkSuite.compile(suite_name, configurator.get_suite(suite_name), executor, configurator.build_commands)
results.append(suite)
return results
def _compile_benchmarks(self):
bench_cfgs = []
for suite in self._suites:
for bench in suite.benchmarks_config:
bench_cfgs.append(Benchmark.compile(bench, suite, self._data_store))
return bench_cfgs |
.parametrize('inshape, kernel, divisor, outshape', [((2, 4, 10, 10), (3, 2), 1, (2, 4, 12, 11)), ((2, 4, 10, 10), (3, 2), 2, (2, 2, 12, 11))])
def test_parametric_function_2d(inshape, kernel, divisor, outshape):
base_axis = (len(inshape) - 3)
sample_channels = inshape[base_axis]
outmap_channels = (sample_channels // divisor)
x = nn.Variable(inshape)
y = PF.depthwise_deconvolution(x, kernel, divisor=divisor)
p = nn.get_parameters()
assert (y.shape == outshape)
assert (p['depthwise_deconv/W'].shape == ((sample_channels,) + kernel))
assert (p['depthwise_deconv/b'].shape == (outmap_channels,))
nn.clear_parameters() |
def check_loss_raises(Clf):
clf = Clf(loss='hinge', random_state=0)
assert_raises(ValueError, clf.fit, X, y) |
class TransformerLayer(Layer):
def __init__(self, units: int, activation: Activation=None, **kwargs):
self.units = units
self.activation = activation
super(TransformerLayer, self).__init__(**kwargs)
def transform(self, inputs: tf.Tensor) -> tf.Tensor:
raise NotImplementedError('Needs to be overwritten by child class.')
def inverse_transform(self, outputs: tf.Tensor) -> tf.Tensor:
raise NotImplementedError('Needs to be overwritten by child class.')
def call(self, inputs, training=None, mask=None):
outputs = self.transform(inputs)
if self.activation:
outputs = self.activation(outputs)
return outputs
def matrix(self):
identity_matrix = np.eye(self.units, dtype=np.complex64)
return self.transform(identity_matrix).numpy()
def inverse_matrix(self):
identity_matrix = np.eye(self.units, dtype=np.complex64)
return self.inverse_transform(identity_matrix).numpy()
def plot(self, plt):
plot_complex_matrix(plt, self.matrix) |
def dur2str(ons):
if (ons < 62):
return ('r' + int2char(ons))
return ('R' + int2char((ons - 62))) |
class AnalyticTypeElement(LatticePosetElement):
def _repr_(self):
return self.analytic_name()
def _latex_(self):
from sage.misc.latex import latex
return latex(self.analytic_name())
def analytic_space_name(self):
name = ''
if (self.parent()('quasi') <= self):
name += 'Quasi'
if (self.parent()('mero') <= self):
name += 'MeromorphicModular'
elif (self.parent()('weak') <= self):
name += 'WeakModular'
elif (self.parent()('holo') <= self):
name += 'Modular'
elif (self.parent()('cusp') <= self):
name += 'Cusp'
else:
name = 'Zero'
return name
def latex_space_name(self):
name = ''
if (self.parent()('quasi') <= self):
name += 'Q'
if (self.parent()('mero') <= self):
name += '\\tilde{M}'
elif (self.parent()('weak') <= self):
name += 'M^!'
elif (self.parent()('holo') <= self):
name += 'M'
elif (self.parent()('cusp') <= self):
name += 'C'
else:
name = 'Z'
return name
def analytic_name(self):
name = ''
if (self.parent()('quasi') <= self):
name += 'quasi '
if (self.parent()('mero') <= self):
name += 'meromorphic modular'
elif (self.parent()('weak') <= self):
name += 'weakly holomorphic modular'
elif (self.parent()('holo') <= self):
name += 'modular'
elif (self.parent()('cusp') <= self):
name += 'cuspidal'
else:
name = 'zero'
return name
def reduce_to(self, reduce_type):
reduce_type = self.parent()(reduce_type)
return (self * reduce_type)
def extend_by(self, extend_type):
extend_type = self.parent()(extend_type)
return (self + extend_type)
def __iter__(self):
return iter([el.element for el in self.element]) |
def test_named_record_int32():
t = RecordType([NumpyType('int32')], None, {'__record__': 'Name'})
assert (str(parser.parse(str(t))) == str(t)) |
class TensorProductFunctor(CovariantFunctorialConstruction):
_functor_name = 'tensor'
_functor_category = 'TensorProducts'
symbol = ' # '
unicode_symbol = f' {unicode_otimes} ' |
class SNetDS2BN_base_8(Network):
def setup(self):
print('2D SNet with 16 channel output')
base_filter = 8
self.feed('data').conv_bn(3, base_filter, 1, dilation_rate=1, center=True, scale=True, name='sconv0_0').conv_bn(3, (base_filter * 2), 1, dilation_rate=1, center=True, scale=True, name='sconv0_1').conv_bn(3, (base_filter * 2), 1, dilation_rate=2, center=True, scale=True, name='sconv0_2').conv_bn(3, (base_filter * 2), 1, dilation_rate=1, center=True, scale=True, relu=True, name='sconv0_3')
self.feed('sconv0_2').conv_bn(3, (base_filter * 2), 1, dilation_rate=3, center=True, scale=True, name='sconv1_2').conv_bn(3, (base_filter * 2), 1, dilation_rate=1, center=True, scale=True, relu=True, name='sconv1_3')
self.feed('sconv0_2').conv_bn(3, (base_filter * 2), 1, dilation_rate=4, center=True, scale=True, name='sconv2_2').conv_bn(3, (base_filter * 2), 1, dilation_rate=1, center=True, scale=True, relu=True, name='sconv2_3')
self.feed('sconv0_3', 'sconv1_3', 'sconv2_3').concat(axis=(- 1), name='sconcat').conv(3, (base_filter * 2), 1, relu=False, name='sconv3_0') |
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
(fan_in, fan_out) = _calculate_fan_in_and_fan_out(tensor)
if (mode == 'fan_in'):
denom = fan_in
elif (mode == 'fan_out'):
denom = fan_out
elif (mode == 'fan_avg'):
denom = ((fan_in + fan_out) / 2)
variance = (scale / denom)
if (distribution == 'truncated_normal'):
trunc_normal_(tensor, std=(math.sqrt(variance) / 0.))
elif (distribution == 'normal'):
tensor.normal_(std=math.sqrt(variance))
elif (distribution == 'uniform'):
bound = math.sqrt((3 * variance))
tensor.uniform_((- bound), bound)
else:
raise ValueError(f'invalid distribution {distribution}') |
def test_UnmaskedArray_RecordArray_NumpyArray():
v2a = ak.contents.unmaskedarray.UnmaskedArray(ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3]))], ['nest']))
roundtrip(v2a)
array = ak.highlevel.Array(v2a)
memoryleak(array, swallow)
memoryleak(array, passthrough)
memoryleak(array, passthrough2)
memoryleak(array, digest)
memoryleak(array, digest2) |
def _seg_35():
return [(13274, 'M', u'pr'), (13275, 'M', u'sr'), (13276, 'M', u'sv'), (13277, 'M', u'wb'), (13278, 'M', u'vm'), (13279, 'M', u'am'), (13280, 'M', u'1'), (13281, 'M', u'2'), (13282, 'M', u'3'), (13283, 'M', u'4'), (13284, 'M', u'5'), (13285, 'M', u'6'), (13286, 'M', u'7'), (13287, 'M', u'8'), (13288, 'M', u'9'), (13289, 'M', u'10'), (13290, 'M', u'11'), (13291, 'M', u'12'), (13292, 'M', u'13'), (13293, 'M', u'14'), (13294, 'M', u'15'), (13295, 'M', u'16'), (13296, 'M', u'17'), (13297, 'M', u'18'), (13298, 'M', u'19'), (13299, 'M', u'20'), (13300, 'M', u'21'), (13301, 'M', u'22'), (13302, 'M', u'23'), (13303, 'M', u'24'), (13304, 'M', u'25'), (13305, 'M', u'26'), (13306, 'M', u'27'), (13307, 'M', u'28'), (13308, 'M', u'29'), (13309, 'M', u'30'), (13310, 'M', u'31'), (13311, 'M', u'gal'), (13312, 'V'), (40957, 'X'), (40960, 'V'), (42125, 'X'), (42128, 'V'), (42183, 'X'), (42192, 'V'), (42540, 'X'), (42560, 'M', u''), (42561, 'V'), (42562, 'M', u''), (42563, 'V'), (42564, 'M', u''), (42565, 'V'), (42566, 'M', u''), (42567, 'V'), (42568, 'M', u''), (42569, 'V'), (42570, 'M', u''), (42571, 'V'), (42572, 'M', u''), (42573, 'V'), (42574, 'M', u''), (42575, 'V'), (42576, 'M', u''), (42577, 'V'), (42578, 'M', u''), (42579, 'V'), (42580, 'M', u''), (42581, 'V'), (42582, 'M', u''), (42583, 'V'), (42584, 'M', u''), (42585, 'V'), (42586, 'M', u''), (42587, 'V'), (42588, 'M', u''), (42589, 'V'), (42590, 'M', u''), (42591, 'V'), (42592, 'M', u''), (42593, 'V'), (42594, 'M', u''), (42595, 'V'), (42596, 'M', u''), (42597, 'V'), (42598, 'M', u''), (42599, 'V'), (42600, 'M', u''), (42601, 'V'), (42602, 'M', u''), (42603, 'V'), (42604, 'M', u''), (42605, 'V'), (42624, 'M', u''), (42625, 'V'), (42626, 'M', u''), (42627, 'V'), (42628, 'M', u''), (42629, 'V'), (42630, 'M', u''), (42631, 'V')] |
class LatticePosets(Category):
_method
def super_categories(self):
return [Posets()]
Finite = LazyImport('sage.categories.finite_lattice_posets', 'FiniteLatticePosets')
class ParentMethods():
_method
def meet(self, x, y):
_method
def join(self, x, y): |
class PyTorchBenchmark(Benchmark):
args: PyTorchBenchmarkArguments
configs: PretrainedConfig
framework: str = 'PyTorch'
def framework_version(self):
return torch.__version__
def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
_inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
return self._measure_speed(_inference)
def _inference_memory(self, model_name: str, batch_size: int, sequence_length: int) -> [Memory, Optional[MemorySummary]]:
_inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
return self._measure_memory(_inference)
def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
_train = self._prepare_train_func(model_name, batch_size, sequence_length)
return self._measure_speed(_train)
def _train_memory(self, model_name: str, batch_size: int, sequence_length: int) -> [Memory, Optional[MemorySummary]]:
_train = self._prepare_train_func(model_name, batch_size, sequence_length)
return self._measure_memory(_train)
def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[([], None)]:
config = self.config_dict[model_name]
if self.args.torchscript:
config.torchscript = True
has_model_class_in_config = (hasattr(config, 'architectures') and isinstance(config.architectures, list) and (len(config.architectures) > 0))
if ((not self.args.only_pretrain_model) and has_model_class_in_config):
try:
model_class = config.architectures[0]
transformers_module = __import__('transformers', fromlist=[model_class])
model_cls = getattr(transformers_module, model_class)
model = model_cls(config)
except ImportError:
raise ImportError(f'{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`.')
else:
model = MODEL_MAPPING[config.__class__](config)
model.eval()
model.to(self.args.device)
vocab_size = (config.vocab_size if hasattr(config, 'vocab_size') else config.encoder.vocab_size)
input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)
if self.args.fp16:
logger.info('Running training in Mixed Precision...')
assert self.args.is_gpu, 'Mixed precision is possible only for GPU.'
model.half()
if self.args.torchscript:
with torch.no_grad():
inference_model = torch.jit.trace(model, input_ids)
else:
inference_model = model
def encoder_decoder_forward():
with torch.no_grad():
outputs = inference_model(input_ids, decoder_input_ids=input_ids)
return outputs
def encoder_forward():
with torch.no_grad():
outputs = inference_model(input_ids)
return outputs
_forward = (encoder_decoder_forward if config.is_encoder_decoder else encoder_forward)
return _forward
def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[([], None)]:
config = self.config_dict[model_name]
has_model_class_in_config = (hasattr(config, 'architectures') and isinstance(config.architectures, list) and (len(config.architectures) > 0))
if ((not self.args.only_pretrain_model) and has_model_class_in_config):
try:
model_class = config.architectures[0]
transformers_module = __import__('transformers', fromlist=[model_class])
model_cls = getattr(transformers_module, model_class)
model = model_cls(config)
except ImportError:
raise ImportError(f'{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`.')
else:
model = MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config)
if self.args.torchscript:
raise NotImplementedError('Training for torchscript is currently not implemented')
else:
train_model = model
model.train()
model.to(self.args.device)
vocab_size = (config.vocab_size if hasattr(config, 'vocab_size') else config.encoder.vocab_size)
input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)
if self.args.fp16:
logger.info('Running training in Mixed Precision...')
assert self.args.is_gpu, 'Mixed precision is possible only for GPU.'
model.half()
def compute_loss_and_backprob_encoder():
loss = train_model(input_ids, labels=input_ids)[0]
loss.backward()
return loss
def compute_loss_and_backprob_encoder_decoder():
loss = train_model(input_ids, decoder_input_ids=input_ids, labels=input_ids)[0]
loss.backward()
return loss
_train = (compute_loss_and_backprob_encoder_decoder if config.is_encoder_decoder else compute_loss_and_backprob_encoder)
return _train
def _measure_speed(self, func) -> float:
try:
if (self.args.is_tpu or self.args.torchscript):
logger.info('Do inference on TPU or torchscript. Running model 5 times to stabilize compilation')
timeit.repeat(func, repeat=1, number=5)
runtimes = timeit.repeat(func, repeat=self.args.repeat, number=10)
if (self.args.is_tpu and self.args.torch_xla_tpu_print_metrics):
import torch_xla.debug.metrics as met
self.print_fn(met.metrics_report())
return (min(runtimes) / 10.0)
except RuntimeError as e:
self.print_fn("Doesn't fit on GPU. {}".format(e))
return 'N/A'
def _measure_memory(self, func: Callable[([], None)]) -> [Memory, MemorySummary]:
try:
if self.args.trace_memory_line_by_line:
trace = start_memory_tracing('transformers')
if self.args.is_tpu:
raise NotImplementedError('Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking with `--no-memory` or `args.memory=False`')
elif self.args.is_gpu:
if (not is_py3nvml_available()):
logger.warning("py3nvml not installed, we won't log GPU memory usage. Install py3nvml (pip install py3nvml) to log information about GPU.")
memory = 'N/A'
else:
logger.info('Measuring total GPU usage on GPU device. Make sure to not have additional processes running on the same GPU.')
nvml.nvmlInit()
func()
handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
max_bytes_in_use = meminfo.used
memory = Memory(max_bytes_in_use)
nvml.nvmlShutdown()
else:
memory_bytes = measure_peak_memory_cpu(func)
memory = (Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes)
if self.args.trace_memory_line_by_line:
summary = stop_memory_tracing(trace)
else:
summary = None
return (memory, summary)
except RuntimeError as e:
self.print_fn("Doesn't fit on GPU. {}".format(e))
return ('N/A', None) |
class ResNet(nn.Module):
def __init__(self, depth, num_classes=1000, death_mode='linear', death_rate=0.5):
super(ResNet, self).__init__()
assert (((depth - 2) % 6) == 0), 'depth should be 6n+2'
n = ((depth - 2) // 6)
block = (Bottleneck if (depth >= 44) else BasicBlock)
nblocks = ((depth - 2) // 2)
if (death_mode == 'uniform'):
death_rates = ([death_rate] * nblocks)
print('Stochastic Depth: uniform mode')
elif (death_mode == 'linear'):
death_rates = [((float((i + 1)) * death_rate) / float(nblocks)) for i in range(nblocks)]
print('Stochastic Depth: linear mode')
else:
death_rates = ([0.0] * (3 * n))
print('Stochastic Depth: none mode')
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, bias=False)
self.layer1 = self._make_layer(block, 16, n, death_rates[:n])
self.layer2 = self._make_layer(block, 32, n, death_rates[n:(2 * n)], stride=2)
self.layer3 = self._make_layer(block, 64, n, death_rates[(2 * n):], stride=2)
self.bn1 = nn.BatchNorm2d((64 * block.expansion))
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc1 = nn.Linear((64 * block.expansion), (64 * block.expansion))
self.fc = nn.Linear((64 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, death_rates, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, death_rate=death_rates[0]))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, death_rate=death_rates[i]))
return nn.Sequential(*layers)
def split2(self, x):
(x1, x2) = torch.split(x, (x.shape[1] / 2), 1)
return (x1, x2)
def reparameterize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = torch.autograd.Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.bn1(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc1(x)
output = self.fc(x)
return output |
def convert_citylabelTo16label():
with open('./synthia2cityscapes_info.json', 'r') as f:
paramdic = json.load(f)
class_ind = paramdic['city2common']
city_gt_dir = '/data/ugui0/ksaito/D_A/image_citiscape/www.cityscapes-dataset.com/file-handling/gtFine'
split_list = ['train', 'test', 'val']
original_suffix = 'labelIds'
processed_suffix = 'label16IDs'
for split in tqdm(split_list):
base_dir = os.path.join(city_gt_dir, split)
place_list = os.listdir(base_dir)
for place in tqdm(place_list):
target_dir = os.path.join(base_dir, place)
pngfn_list = os.listdir(target_dir)
original_pngfn_list = [x for x in pngfn_list if (original_suffix in x)]
for pngfn in tqdm(original_pngfn_list):
gt_fn = os.path.join(target_dir, pngfn)
original_gt_im = Image.open(gt_fn)
processed_gt_im = swap_labels(np.array(original_gt_im), class_ind)
outfn = gt_fn.replace(original_suffix, processed_suffix)
processed_gt_im.save(outfn, 'PNG') |
def format_mnist():
dir_path = os.path.dirname(os.path.realpath(__file__))
mnist_path = (Path(dir_path) / 'data/mnist')
try:
return np.load((mnist_path / 'mnist.npz'))
except Exception:
train_loader = torch.utils.data.DataLoader(datasets.MNIST(mnist_path, train=True, download=True, transform=transforms.Compose([transforms.ToTensor()])), batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(datasets.MNIST(mnist_path, train=False, transform=transforms.Compose([transforms.ToTensor()])), batch_size=128, shuffle=True)
list_X = []
list_Y = []
for (x, y) in train_loader:
list_X.append(x.view((- 1), 784).detach().cpu().numpy())
list_Y.append(y.detach().cpu().numpy())
X_train = np.concatenate(list_X, axis=0)
Y_train = np.concatenate(list_Y, axis=0)
list_X = []
list_Y = []
for (x, y) in test_loader:
list_X.append(x.view((- 1), 784).detach().cpu().numpy())
list_Y.append(y.detach().cpu().numpy())
X_test = np.concatenate(list_X, axis=0)
Y_test = np.concatenate(list_Y, axis=0)
d_data = {'X_train': X_train, 'Y_train': Y_train, 'X_test': X_test, 'Y_test': Y_test}
np.savez((mnist_path / 'mnist.npz'), **d_data)
return d_data |
class RRDBNet(nn.Module):
def __init__(self, in_nc, out_nc, nf, nb, gc=32):
super(RRDBNet, self).__init__()
RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)
self.conv_first = nn.Conv2d(3, nf, 3, 1, 1, bias=True)
self.RRDB_trunk = mutil.make_layer(RRDB_block_f, nb)
self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.CondNet = nn.Sequential(nn.Conv2d(1, 1, 1, 1), nn.LeakyReLU(0.1, True), nn.Conv2d(1, 32, 1))
def forward(self, x):
cond = self.CondNet(x[1])
fea = self.conv_first(x[0])
fea2 = (fea, cond)
fea3 = self.RRDB_trunk(fea2)
trunk = self.trunk_conv(fea3[0])
fea = (fea + trunk)
fea = self.lrelu(self.upconv1(F.interpolate(fea, scale_factor=2, mode='nearest')))
fea = self.lrelu(self.upconv2(F.interpolate(fea, scale_factor=2, mode='nearest')))
out = self.conv_last(self.lrelu(self.HRconv(fea)))
return out |
class BasicUnit(nn.Module):
def forward(self, x):
raise NotImplementedError
def unit_str(self):
raise NotImplementedError
def config(self):
raise NotImplementedError
def build_from_config(config):
raise NotImplementedError
def get_flops(self, x):
raise NotImplementedError |
class StudentModelArguments():
student_name_or_path: Optional[str] = field(default='distilbert-base-uncased', metadata={'help': 'The NLI/zero-shot teacher model to be distilled.'}) |
def min_cycles(G, v):
pr = G.neighbors_in(v)
sp = G.shortest_paths(v)
return [sp[i] for i in pr if (i in sp)] |
class CDFCopy(spacepy.datamodel.SpaceData):
def __init__(self, cdf):
super(CDFCopy, self).__init__(((key, var.copy()) for (key, var) in cdf.items()), attrs=cdf.attrs.copy()) |
.operations('success')
def test_do_not_send_incomplete_report_file(file_report_handler, service, openapi3_schema_url):
context = mock.create_autospec(ExecutionContext)
for event in generate_events(openapi3_schema_url):
if isinstance(event, events.Finished):
file_report_handler.handle_event(context, events.Interrupted())
else:
file_report_handler.handle_event(context, event)
file_report_handler.shutdown()
assert (not os.path.exists(file_report_handler.file_handle.name)) |
def main():
assert (args.num_tasks == len(args.meta_datasets))
(minis, minis_test) = load_meta_dataset(args)
(model, cur_device) = model_builder._construct_model(args)
metalearner = Meta(args, model).to(cur_device)
step_number = min([len(mini) for mini in minis])
test_step_number = len(minis_test)
BEST_LOSS = np.inf
BEST_PROMPTER = metalearner.prompter
BEST_TEST_ACC = (- np.inf)
BEST_TEST_EPOCH = (- 1)
global_step = 0
test_acc_list = []
def save_prompter(prompter, acc, epoch):
file_path = ((((((('./checkpoints_' + str(args.pretrained_model)) + '/lr') + str(args.meta_lr)) + '_step') + str(args.update_step)) + '_eps') + str(args.meta_step_size))
if (not os.path.exists(file_path)):
os.makedirs(file_path)
file_name = (((((args.prompt_method + '_Epoch') + str(epoch)) + '_') + str(acc)) + '.pth')
save_model_path = os.path.join(file_path, file_name)
state = {'state_dict': prompter.state_dict(), 'epoch': epoch}
torch.save(state, save_model_path)
def load_model(model, acc):
model_file_path = os.path.join('./checkpoints/', args.dataset)
file_name = (((str(acc) + '_') + args.target_model) + '.pth')
model_checkpoint_path = os.path.join(model_file_path, file_name)
assert os.path.exists(model_checkpoint_path)
model.net.load_state_dict(torch.load(model_checkpoint_path))
return model
if (not args.wo_da):
metalearner.coarse_clustering(minis, 'meta_training')
metalearner.coarse_clustering(minis_test, 'task_adapting')
for epoch in range(args.epochs):
minis_iter = []
for i in range(len(minis)):
minis_iter.append(iter(minis[i]))
if (epoch == 0):
accs = metalearner.finetuning(minis_test)
test_acc_list.append(accs)
logger.info('[Prompt Finetuning] Testing acc on {}: {}'.format(args.test_dataset, accs))
for step in range(step_number):
try:
batch_data = []
for each in minis_iter:
batch_data.append(each.next())
except:
break
global_step += 1
metalearner.lr_scheduler(global_step, (args.epochs * step_number), 0)
accs = metalearner(batch_data)
if (((step + 1) % 1) == 0):
logger.info('[Meta Training] Epoch: [{}/{}], Step: [{}/{}], Training loss: {}'.format(epoch, (args.epochs - 1), step, (step_number - 1), accs[(- 1)]))
if (accs[(- 1)] < BEST_LOSS):
BEST_LOSS = accs[(- 1)]
BEST_PROMPTER = deepcopy(metalearner.prompter)
logger.info('[Meta Training] Epoch: [{}/{}], Step: [{}/{}], Current best loss: {}'.format(epoch, (args.epochs - 1), step, (step_number - 1), BEST_LOSS))
save_prompter(BEST_PROMPTER, BEST_LOSS, epoch)
if (((epoch + 1) % 1) == 0):
accs = metalearner.finetuning(minis_test, BEST_PROMPTER)
test_acc_list.append(accs)
logger.info('[Prompt Finetuning] Testing acc on {}: {}'.format(args.test_dataset, accs))
if (accs > BEST_TEST_ACC):
BEST_TEST_ACC = accs
BEST_TEST_EPOCH = epoch
save_prompter(BEST_PROMPTER, accs, BEST_TEST_EPOCH)
logger.info('[Prompt Finetuning] Current Best Evaluation is Epoch: {}, Acc: {}'.format(BEST_TEST_EPOCH, BEST_TEST_ACC))
logger.info('')
accs = metalearner.finetuning(minis_test)
logger.info('[Prompt Finetuning] Testing acc on {} after the last epoch: {}'.format(args.test_dataset, accs)) |
('/get_active_clients', methods=['GET'])
def get_active_clients():
combiner_id = request.args.get('combiner', None)
if (combiner_id is None):
return (jsonify({'success': False, 'message': 'Missing combiner id.'}), 400)
return api.get_active_clients(combiner_id) |
def test_hinsage_save_load(tmpdir):
G = example_hin_1({'A': 8, 'B': 4})
gen = HinSAGENodeGenerator(G, 1, [2, 2], 'A')
hs = HinSAGE(layer_sizes=[2, 2], generator=gen, normalize='none', activations=['relu', 'relu'])
test_utils.model_save_load(tmpdir, hs) |
def load_pretrained_component_from_model(component: Union[(FairseqEncoder, FairseqDecoder)], checkpoint: str):
if (not os.path.exists(checkpoint)):
raise IOError('Model file not found: {}'.format(checkpoint))
state = load_checkpoint_to_cpu(checkpoint)
if isinstance(component, FairseqEncoder):
component_type = 'encoder'
elif isinstance(component, FairseqDecoder):
component_type = 'decoder'
else:
raise ValueError('component to load must be either a FairseqEncoder or FairseqDecoder. Loading other component types are not supported.')
component_state_dict = OrderedDict()
for key in state['model'].keys():
if key.startswith(component_type):
component_subkey = key[(len(component_type) + 1):]
component_state_dict[component_subkey] = state['model'][key]
component.load_state_dict(component_state_dict, strict=True)
return component |
def _nanmin(X, axis=None):
(xp, _) = get_namespace(X)
if _is_numpy_namespace(xp):
return xp.asarray(numpy.nanmin(X, axis=axis))
else:
mask = xp.isnan(X)
X = xp.min(xp.where(mask, xp.asarray((+ xp.inf), device=device(X)), X), axis=axis)
mask = xp.all(mask, axis=axis)
if xp.any(mask):
X = xp.where(mask, xp.asarray(xp.nan), X)
return X |
def features_sparse():
return csr_matrix([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]) |
def knn_graph(x: torch.Tensor, k: int, batch: Optional[torch.Tensor]=None, loop: bool=False, flow: str='source_to_target', cosine: bool=False, num_workers: int=1, batch_size: Optional[int]=None) -> torch.Tensor:
assert (flow in ['source_to_target', 'target_to_source'])
edge_index = knn(x, x, (k if loop else (k + 1)), batch, batch, cosine, num_workers, batch_size)
if (flow == 'source_to_target'):
(row, col) = (edge_index[1], edge_index[0])
else:
(row, col) = (edge_index[0], edge_index[1])
if (not loop):
mask = (row != col)
(row, col) = (row[mask], col[mask])
return torch.stack([row, col], dim=0) |
def mwrank_console():
from sage.repl.rich_output.display_manager import get_display_manager
if (not get_display_manager().is_in_terminal()):
raise RuntimeError('Can use the console only in the terminal. Try %%mwrank magics instead.')
os.system('mwrank') |
_tf
class TFT5ModelTest(TFModelTesterMixin, unittest.TestCase):
is_encoder_decoder = True
all_model_classes = ((TFT5Model, TFT5WithLMHeadModel) if is_tf_available() else ())
class TFT5ModelTester(object):
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, n_positions=14, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, scope=None):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.n_positions = n_positions
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.d_ff = d_ff
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.initializer_factor = initializer_factor
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_labels = None
if self.use_labels:
token_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = T5Config(vocab_size=self.vocab_size, n_positions=self.n_positions, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=(self.hidden_size // self.num_attention_heads), num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor)
return (config, input_ids, input_mask, token_labels)
def create_and_check_t5_model(self, config, input_ids, input_mask, token_labels):
model = TFT5Model(config=config)
inputs = {'encoder_input_ids': input_ids, 'decoder_input_ids': input_ids, 'decoder_attention_mask': input_mask}
(encoder_output, decoder_output) = model(inputs)
(encoder_output, decoder_output) = model(input_ids, decoder_attention_mask=input_mask, encoder_input_ids=input_ids)
result = {'encoder_output': encoder_output.numpy(), 'decoder_output': decoder_output.numpy()}
self.parent.assertListEqual(list(result['encoder_output'].shape), [self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertListEqual(list(result['decoder_output'].shape), [self.batch_size, self.seq_length, self.hidden_size])
def create_and_check_t5_with_lm_head(self, config, input_ids, input_mask, token_labels):
model = TFT5WithLMHeadModel(config=config)
inputs = {'encoder_input_ids': input_ids, 'decoder_input_ids': input_ids, 'decoder_attention_mask': input_mask}
(prediction_scores, decoder_output) = model(inputs)
result = {'prediction_scores': prediction_scores.numpy()}
self.parent.assertListEqual(list(result['prediction_scores'].shape), [self.batch_size, self.seq_length, self.vocab_size])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, input_mask, token_labels) = config_and_inputs
inputs_dict = {'encoder_input_ids': input_ids, 'decoder_input_ids': input_ids, 'decoder_attention_mask': input_mask}
return (config, inputs_dict)
def setUp(self):
self.model_tester = TFT5ModelTest.TFT5ModelTester(self)
self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_t5_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_t5_model(*config_and_inputs)
def test_with_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_t5_with_lm_head(*config_and_inputs)
def test_model_from_pretrained(self):
for model_name in ['t5-small']:
model = TFT5Model.from_pretrained(model_name, cache_dir=CACHE_DIR)
self.assertIsNotNone(model) |
def make_pass_decorator(object_type, ensure=False):
def decorator(f):
def new_func(*args, **kwargs):
ctx = get_current_context()
if ensure:
obj = ctx.ensure_object(object_type)
else:
obj = ctx.find_object(object_type)
if (obj is None):
raise RuntimeError("Managed to invoke callback without a context object of type '{}' existing".format(object_type.__name__))
return ctx.invoke(f, obj, *args, **kwargs)
return update_wrapper(new_func, f)
return decorator |
def get_models_from_hist(hist_idx, hist, input_state, output_state, state_space, model_compile_dict):
model_dict = {}
for idx in hist_idx:
model_state_str = [hist.iloc[idx][('L%i' % (i + 1))] for i in range((hist.shape[1] - 5))]
model_dict[idx] = model_fn.build_sequential_model_from_string(model_state_str, input_state, output_state, state_space, model_compile_dict)
model_dict[idx].load_weights(('%s/weights/trial_%i/bestmodel.h5' % (hist.iloc[idx].dir, hist.iloc[idx].ID)))
return model_dict |
(ignore_result=True)
def crawl_person_infos_not_in_seed_ids(uid):
if (not uid):
return
get_user_profile(uid) |
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = (ACT2FN[config.hidden_act] if isinstance(config.hidden_act, str) else config.hidden_act)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states |
def check_slate_bandit_feedback(bandit_feedback: BanditFeedback, is_factorizable: bool=False):
pscore_columns: List[str] = []
pscore_candidate_columns = ['pscore_cascade', 'pscore', 'pscore_item_position']
for column in pscore_candidate_columns:
if ((column in bandit_feedback) and (bandit_feedback[column] is not None)):
pscore_columns.append(column)
else:
pscore_columns.append(column)
assert (len(pscore_columns) > 0), f'bandit feedback must contain at least one of the following pscore columns: {pscore_candidate_columns}'
bandit_feedback_df = pd.DataFrame()
for column in (['slate_id', 'position', 'action'] + pscore_columns):
bandit_feedback_df[column] = bandit_feedback[column]
bandit_feedback_df = bandit_feedback_df.sort_values(['slate_id', 'position']).reset_index(drop=True).copy()
assert (bandit_feedback_df.duplicated(['slate_id', 'position']).sum() == 0), '`position` must not be duplicated in each slate'
assert ((bandit_feedback_df.duplicated(['slate_id', 'action']).sum() == 0) if (not is_factorizable) else True), 'action must not be duplicated in each slate'
for column in pscore_columns:
invalid_pscore_flgs = ((bandit_feedback_df[column] < 0) | (bandit_feedback_df[column] > 1))
assert (invalid_pscore_flgs.sum() == 0), 'the range of pscores must be [0, 1]'
if (('pscore_cascade' in pscore_columns) and ('pscore' in pscore_columns)):
assert ((bandit_feedback_df['pscore_cascade'] < bandit_feedback_df['pscore']).sum() == 0), 'pscore must be smaller than or equal to pscore_cascade'
if (('pscore_item_position' in pscore_columns) and ('pscore' in pscore_columns)):
assert ((bandit_feedback_df['pscore_item_position'] < bandit_feedback_df['pscore']).sum() == 0), 'pscore must be smaller than or equal to pscore_item_position'
if (('pscore_item_position' in pscore_columns) and ('pscore_cascade' in pscore_columns)):
assert ((bandit_feedback_df['pscore_item_position'] < bandit_feedback_df['pscore_cascade']).sum() == 0), 'pscore_cascade must be smaller than or equal to pscore_item_position'
if ('pscore_cascade' in pscore_columns):
previous_minimum_pscore_cascade = bandit_feedback_df.groupby('slate_id')['pscore_cascade'].expanding().min().values
assert ((previous_minimum_pscore_cascade < bandit_feedback_df['pscore_cascade']).sum() == 0), 'pscore_cascade must be non-decresing sequence in each slate'
if ('pscore' in pscore_columns):
count_pscore_in_expression = bandit_feedback_df.groupby('slate_id').apply((lambda x: x['pscore'].unique().shape[0]))
assert ((count_pscore_in_expression != 1).sum() == 0), '`pscore` must be unique in each slate'
if (('pscore' in pscore_columns) and ('pscore_cascade' in pscore_columns)):
last_slot_feedback_df = bandit_feedback_df.drop_duplicates('slate_id', keep='last')
assert ((last_slot_feedback_df['pscore'] != last_slot_feedback_df['pscore_cascade']).sum() == 0), 'pscore must be the same as pscore_cascade in the last slot' |
def collate(batch):
batch = list(zip(*batch))
(topic_entity, question, answer, entity_range) = batch
topic_entity = torch.stack(topic_entity)
question = {k: torch.cat([q[k] for q in question], dim=0) for k in question[0]}
answer = torch.stack(answer)
entity_range = torch.stack(entity_range)
return (topic_entity, question, answer, entity_range) |
def plot_distribution(counter):
import numpy as np
(labels, values) = zip(*counter.items())
indexes = np.arange(len(labels))
import seaborn as sns
sns.set(color_codes=True)
sns.distplot(values)
plt.show() |
class Vggish(nn.Module):
args = {'postprocess': False}
output_dims = 128
model_tag = {'name': 'VGGish', 'dataset': 'YouTube-8M'}
def __init__(self, args):
super().__init__()
torch.hub.set_dir(str(args.data.cache_dir))
self.model = torch.hub.load('harritaylor/torchvggish', 'vggish')
self.model.postprocess = args.postprocess
self.model.preprocess = False
def download(cls, args):
torch.hub.set_dir(str(args.data.cache_dir))
model = torch.hub.load('harritaylor/torchvggish', 'vggish')
return model
def get_preprocessor(self):
return preprocess
def forward(self, data):
B = data.shape[0]
data = rearrange(data, 'b n c h w -> (b n) c h w')
data = self.model.forward(data)
data = rearrange(data, '(b n) c -> b n c', b=B)
data = data.mean(dim=1)
return data |
class TestCloneNet(test_util.TestCase):
def testPartialClone(self):
params = core.Net('params')
p1 = params.ConstantFill([], ['p1'])
workspace.CreateNet(params)
workspace.RunNetOnce(params)
n = core.Net('original')
a1 = n.AddExternalInput('a1')
a2 = n.AddExternalInput('a2')
(b1, b2) = n.Concat([a1, a2], ['b1', 'b2'], axis=0)
c1 = n.Sum([b1, p1], ['c1'])
c2 = n.Sum([b2], ['c2'])
d = n.Sum([c1, c2], ['d'])
n.AddGradientOperators([d])
k = n.Sum([p1], ['k'])
e = n.Sum([d], ['e'])
e = n.Sum([e, k], [e])
e = n.Sum([e], [e])
f = n.Sum(e, ['f'])
def net_assert(net, num_ops, inputs, outputs, internals):
self.assertEqual(len(net.Proto().op), num_ops)
self.assertEqual(set(net.Proto().external_input), inputs)
self.assertEqual(set(net.Proto().external_output), outputs)
all_blobs = set(net.Proto().external_input)
all_blobs |= set(net.Proto().external_output)
for op in net.Proto().op:
all_blobs |= (set(op.input) | set(op.output))
self.assertEqual(all_blobs, ((inputs | outputs) | internals))
for input in inputs:
workspace.FeedBlob(input, np.array([]))
workspace.CreateNet(net)
(n2, (d22,)) = n.ClonePartial('f1', {a1: 'a11', a2: 'a22'}, [d])
net_assert(n2, 4, {'p1', 'a11', 'a22'}, {'f1/d'}, {'f1/b1', 'f1/b2', 'f1/c1', 'f1/c2', 'p1'})
self.assertTrue(isinstance(d22, core.BlobReference))
self.assertEqual(d22.Net(), n2)
self.assertEqual(str(d22), 'f1/d')
(n3, (d22,)) = n.ClonePartial('f2', [b1, b2], [d])
net_assert(n3, 3, {'p1', 'b1', 'b2'}, {'f2/d'}, {'f2/c1', 'f2/c2', 'p1'})
self.assertEqual(str(d22), 'f2/d')
(n4, (c22,)) = n.ClonePartial('f3', [b1], [c1])
net_assert(n4, 1, {'p1', 'b1'}, {'f3/c1'}, {'p1'})
self.assertEqual(str(c22), 'f3/c1')
(n5, (c11, c22)) = n.ClonePartial('f4', [b1, b2], [c1, c2])
net_assert(n5, 2, {'p1', 'b1', 'b2'}, {'f4/c1', 'f4/c2'}, {'p1'})
self.assertEqual(str(c11), 'f4/c1')
self.assertEqual(str(c22), 'f4/c2')
with self.assertRaises(AssertionError):
n.ClonePartial('f4', [a1, a2, c2], [d])
(n6, (e22,)) = n.ClonePartial('f5', [d], [e])
net_assert(n6, 4, {'p1', 'd'}, {'f5/e'}, {'f5/k', 'p1'})
self.assertEqual(str(e22), 'f5/e')
(n8, (e22, f22)) = n.ClonePartial('f7', [d], [e, f])
net_assert(n8, 5, {'p1', 'd'}, {'f7/e', 'f7/f'}, {'p1', 'f7/k'})
self.assertEqual(str(e22), 'f7/e')
self.assertEqual(str(f22), 'f7/f')
params._CheckLookupTables()
n._CheckLookupTables()
def test_mask_clone_update_external_list(self):
n = core.Net('original')
a1 = n.AddExternalInput('a1')
a2 = n.AddExternalInput('a2')
p1 = 'p1'
(b1, b2) = n.Concat([a1, a2], ['b1', 'b2'], axis=0)
c1 = n.Sum([b1, p1], ['c1'])
c2 = n.Sum([b2], ['c2'])
n.Sum([c1, c2], ['d'])
new_net = n.Clone('new', op_id_mask=[0, 1], keep_schema=True, update_external_list=True)
self.assertEqual(sorted(map(str, new_net.external_inputs)), ['a1', 'a2', 'p1'], 'external input not matched')
self.assertEqual(sorted(map(str, new_net.external_outputs)), ['b2', 'c1'], 'external output not matched')
new_net = n.Clone('new2', op_id_mask=[2, 3], keep_schema=True, update_external_list=True)
self.assertEqual(sorted(map(str, new_net.external_inputs)), ['b2', 'c1'], 'external input not matched')
self.assertEqual(sorted(map(str, new_net.external_outputs)), ['d'], 'external output not matched') |
class GeneralizedReedSolomonCode(AbstractLinearCode):
_registered_encoders = {}
_registered_decoders = {}
def __init__(self, evaluation_points, dimension, column_multipliers=None):
if column_multipliers:
if (len(evaluation_points) != len(column_multipliers)):
raise ValueError('There must be the same number of evaluation points as column multipliers')
try:
common_points = vector((list(evaluation_points) + list(column_multipliers)))
F = common_points.base_ring()
self._evaluation_points = common_points[:len(evaluation_points)]
self._column_multipliers = common_points[len(evaluation_points):]
except (TypeError, ValueError) as e:
raise ValueError(('Failed converting all evaluation points and column multipliers to the same field (%s)' % e))
else:
try:
self._evaluation_points = vector(evaluation_points)
F = self._evaluation_points.base_ring()
self._column_multipliers = vector(F, ([F.one()] * len(self._evaluation_points)))
except (TypeError, ValueError) as e:
raise ValueError(('Failed converting all evaluation points to the same field (%s)' % e))
if ((not F.is_finite()) or (not F.is_field())):
raise ValueError(('Evaluation points must be in a finite field (and %s is not one)' % F))
super().__init__(F, len(self._evaluation_points), 'EvaluationVector', 'Gao')
if ((dimension not in ZZ) or (dimension > self._length) or (dimension < 1)):
raise ValueError('The dimension must be a positive integer at most the length of the code.')
self._dimension = dimension
if (F.zero() in self._column_multipliers):
raise ValueError('All column multipliers must be non-zero')
if (len(self._evaluation_points) != len(set(self._evaluation_points))):
raise ValueError('All evaluation points must be different')
def __eq__(self, other):
return (isinstance(other, GeneralizedReedSolomonCode) and (self.base_field() == other.base_field()) and (self.length() == other.length()) and (self.dimension() == other.dimension()) and (self.evaluation_points() == other.evaluation_points()) and (self.column_multipliers() == other.column_multipliers()))
def __hash__(self):
return hash((self.base_field(), self.length(), self.dimension(), tuple(self.evaluation_points()), tuple(self.column_multipliers())))
def _repr_(self):
return ('[%s, %s, %s] %sReed-Solomon Code over GF(%s)' % (self.length(), self.dimension(), self.minimum_distance(), ('Generalized ' if self.is_generalized() else ''), self.base_field().cardinality()))
def _latex_(self):
return ('[%s, %s, %s] \\textnormal{ %sReed-Solomon Code over } %s' % (self.length(), self.dimension(), self.minimum_distance(), ('Generalized ' if self.is_generalized() else ''), self.base_field()._latex_()))
def minimum_distance(self):
return ((self.length() - self.dimension()) + 1)
def evaluation_points(self):
return self._evaluation_points
def column_multipliers(self):
return self._column_multipliers
def is_generalized(self):
return (not all((beta.is_one() for beta in self.column_multipliers())))
_method
def multipliers_product(self):
a = self.evaluation_points()
one = self.base_ring().one()
return [(one / prod(((ai - ah) for (h, ah) in enumerate(a) if (h != i)))) for (i, ai) in enumerate(a)]
_method
def parity_column_multipliers(self):
n = self.length()
col_mults = self.column_multipliers()
etas = self.multipliers_product()
return [(etas[i] / col_mults[i]) for i in range(n)]
_method
def parity_check_matrix(self):
return self.dual_code().generator_matrix()
_method
def dual_code(self):
col_mults = self.parity_column_multipliers()
return GeneralizedReedSolomonCode(self.evaluation_points(), (self.length() - self.dimension()), col_mults)
def covering_radius(self):
return (self.length() - self.dimension())
_method
def weight_distribution(self):
from sage.symbolic.ring import SR
from sage.functions.other import binomial
d = self.minimum_distance()
n = self.length()
q = self.base_ring().order()
s = SR.var('s')
wd = ([1] + ([0] * (d - 1)))
for i in range(d, (n + 1)):
tmp = (binomial(n, i) * (q - 1))
wd.append((tmp * symbolic_sum(((binomial((i - 1), s) * ((- 1) ** s)) * (q ** ((i - d) - s))), s, 0, (i - d))))
return wd
def _punctured_form(self, points):
if (not isinstance(points, (Integer, int, set))):
raise TypeError('points must be either a Sage Integer, a Python int, or a set')
alphas = list(self.evaluation_points())
col_mults = list(self.column_multipliers())
n = self.length()
punctured_alphas = []
punctured_col_mults = []
punctured_alphas = [alphas[i] for i in range(n) if (i not in points)]
punctured_col_mults = [col_mults[i] for i in range(n) if (i not in points)]
G = self.generator_matrix()
G = G.delete_columns(list(points))
dimension = G.rank()
return GeneralizedReedSolomonCode(punctured_alphas, dimension, punctured_col_mults) |
def plot_ls_solution(ax, ls_rmsve, alg, sp):
lbl = f'{alg} $\lambda=$ {sp}'
x = np.arange(ls_rmsve.shape[0])
y = (ls_rmsve[(- 1)] * np.ones(ls_rmsve.shape[0]))
ax.plot(x, y, label=lbl, linewidth=1.0, color=ALG_COLORS[alg], linestyle=':') |
def test_fortranfiles_write():
for filename in iglob(path.join(DATA_PATH, 'fortran-*-*x*x*.dat')):
m = re.search('fortran-([^-]+)-(\\d+)x(\\d+)x(\\d+).dat', filename, re.I)
if (not m):
raise RuntimeError(("Couldn't match %s filename to regex" % filename))
dims = (int(m.group(2)), int(m.group(3)), int(m.group(4)))
dtype = m.group(1).replace('s', '<')
data = np.arange(np.prod(dims)).reshape(dims).astype(dtype)
tmpdir = tempfile.mkdtemp()
try:
testFile = path.join(tmpdir, path.basename(filename))
f = FortranFile(testFile, 'w', '<u4')
f.write_record(data.T)
f.close()
originalfile = open(filename, 'rb')
newfile = open(testFile, 'rb')
assert_equal(originalfile.read(), newfile.read(), err_msg=filename)
originalfile.close()
newfile.close()
finally:
shutil.rmtree(tmpdir) |
_params({'scoring': [str, callable, None]}, prefer_skip_nested_validation=True)
def get_scorer(scoring):
if isinstance(scoring, str):
try:
scorer = copy.deepcopy(_SCORERS[scoring])
except KeyError:
raise ValueError(('%r is not a valid scoring value. Use sklearn.metrics.get_scorer_names() to get valid options.' % scoring))
else:
scorer = scoring
return scorer |
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):
super(GANLoss, self).__init__()
self.gan_type = gan_type.lower()
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if ((self.gan_type == 'gan') or (self.gan_type == 'ragan')):
self.loss = nn.BCEWithLogitsLoss()
elif (self.gan_type == 'lsgan'):
self.loss = nn.MSELoss()
elif (self.gan_type == 'wgan-gp'):
def wgan_loss(input, target):
return (((- 1) * input.mean()) if target else input.mean())
self.loss = wgan_loss
else:
raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))
def get_target_label(self, input, target_is_real):
if (self.gan_type == 'wgan-gp'):
return target_is_real
if target_is_real:
return torch.empty_like(input).fill_(self.real_label_val)
else:
return torch.empty_like(input).fill_(self.fake_label_val)
def forward(self, input, target_is_real):
target_label = self.get_target_label(input, target_is_real)
loss = self.loss(input, target_label)
return loss |
def build_bbh_fewshot_dataset(dataset_name, folder):
assert (dataset_name in ALL_BBH_TEMPLATES)
prompt_templates = ALL_BBH_TEMPLATES[dataset_name]
for (idx, prompt_template) in enumerate(prompt_templates):
print('Current prompt template: ', prompt_template)
template = Template(prompt_template)
data_file = f'data_generator/bbh/{dataset_name}.json'
with open(data_file, 'r') as fs:
dataset = json.load(fs)
examples = dataset['examples']
os.makedirs(f'{folder}/bbh/', exist_ok=True)
os.makedirs(f'{folder}/bbh/{dataset_name}', exist_ok=True)
write_f = open(f'{folder}/bbh/{dataset_name}/{dataset_name}_zero_template_{idx}.json', 'w')
for (idx, example) in enumerate(examples):
inputs = example['input']
sample_output = example['target']
template_input = template.render(inputs=inputs)
template_output = sample_output
write_f.write((json.dumps({'input': template_input, 'output': template_output}) + '\n'))
write_f.close() |
def get_regularization(gptq_config: GradientPTQConfig, representative_data_gen: Callable) -> Callable:
if (gptq_config.rounding_type == RoundingType.SoftQuantizer):
num_batches = 0
for _ in representative_data_gen():
num_batches += 1
n_epochs = (GradientPTQConfigV2.from_v1(n_ptq_iter=num_batches, config_v1=gptq_config).n_epochs if (not (type(gptq_config) == GradientPTQConfigV2)) else gptq_config.n_epochs)
return SoftQuantizerRegularization(total_gradient_steps=(num_batches * n_epochs))
else:
return (lambda m, e_reg: 0) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.