code stringlengths 101 5.91M |
|---|
def register_Ns3LteRrcSapCarrierFreqEutra_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteRrcSap::CarrierFreqEutra const &', 'arg0')])
cls.add_instance_attribute('dlCarrierFreq', 'uint16_t', is_const=False)
cls.add_instance_attribute('ulCarrierFreq', 'uint16_t', is_const=False)
return |
def build_backbone(cfg):
assert (cfg.MODEL.BACKBONE.CONV_BODY in registry.BACKBONES), 'cfg.MODEL.BACKBONE.CONV_BODY: {} are not registered in registry'.format(cfg.MODEL.BACKBONE.CONV_BODY)
return registry.BACKBONES[cfg.MODEL.BACKBONE.CONV_BODY](cfg) |
def train(args, model, train_sampler, valid_samplers=None, test_samplers=None, rank=0, rel_parts=None, cross_rels=None, barrier=None, client=None):
logs = []
for arg in vars(args):
logging.info('{:20}:{}'.format(arg, getattr(args, arg)))
if (len(args.gpu) > 0):
gpu_id = (args.gpu[(rank % len(args.gpu))] if (args.mix_cpu_gpu and (args.num_proc > 1)) else args.gpu[0])
else:
gpu_id = (- 1)
if args.async_update:
model.create_async_update()
if (args.strict_rel_part or args.soft_rel_part):
model.prepare_relation(th.device(('cuda:' + str(gpu_id))))
if args.soft_rel_part:
model.prepare_cross_rels(cross_rels)
if (args.encoder_model_name in ['roberta', 'concat']):
model.transform_net = model.transform_net.to(th.device(('cuda:' + str(gpu_id))))
optimizer = th.optim.Adam(model.transform_net.parameters(), args.mlp_lr)
else:
optimizer = None
train_start = start = time.time()
sample_time = 0
update_time = 0
forward_time = 0
backward_time = 0
for step in range(0, args.max_step):
start1 = time.time()
(pos_g, neg_g) = next(train_sampler)
sample_time += (time.time() - start1)
if (client is not None):
model.pull_model(client, pos_g, neg_g)
start1 = time.time()
if (optimizer is not None):
optimizer.zero_grad()
(loss, log) = model.forward(pos_g, neg_g, gpu_id)
forward_time += (time.time() - start1)
start1 = time.time()
loss.backward()
backward_time += (time.time() - start1)
start1 = time.time()
if (client is not None):
model.push_gradient(client)
else:
model.update(gpu_id)
if (optimizer is not None):
optimizer.step()
update_time += (time.time() - start1)
logs.append(log)
if ((args.force_sync_interval > 0) and (((step + 1) % args.force_sync_interval) == 0)):
barrier.wait()
if (((step + 1) % args.log_interval) == 0):
if ((client is not None) and (client.get_machine_id() != 0)):
pass
else:
for k in logs[0].keys():
v = (sum((l[k] for l in logs)) / len(logs))
logging.info('[proc {}][Train]({}/{}) average {}: {}'.format(rank, (step + 1), args.max_step, k, v))
logs = []
logging.info('[proc {}][Train] {} steps take {:.3f} seconds'.format(rank, args.log_interval, (time.time() - start)))
logging.info('[proc {}]sample: {:.3f}, forward: {:.3f}, backward: {:.3f}, update: {:.3f}'.format(rank, sample_time, forward_time, backward_time, update_time))
sample_time = 0
update_time = 0
forward_time = 0
backward_time = 0
start = time.time()
if (args.valid and (((step + 1) % args.eval_interval) == 0) and (step > 1) and (valid_samplers is not None)):
valid_start = time.time()
if (args.strict_rel_part or args.soft_rel_part):
model.writeback_relation(rank, rel_parts)
if (barrier is not None):
barrier.wait()
logging.info('[proc {}]barrier wait in validation take {:.3f} seconds:'.format(rank, (time.time() - valid_start)))
valid_start = time.time()
if (valid_samplers is not None):
valid_input_dict = test(args, model, valid_samplers, step, rank, mode='Valid')
th.save(valid_input_dict, os.path.join(args.save_path, 'valid_{}_{}.pkl'.format(rank, step)))
if (test_samplers is not None):
test_input_dict = test(args, model, test_samplers, step, rank, mode='Test')
th.save(test_input_dict, os.path.join(args.save_path, 'test_{}_{}.pkl'.format(rank, step)))
logging.info('[proc {}]validation and test take {:.3f} seconds:'.format(rank, (time.time() - valid_start)))
if args.soft_rel_part:
model.prepare_cross_rels(cross_rels)
if (barrier is not None):
barrier.wait()
print('proc {} takes {:.3f} seconds'.format(rank, (time.time() - train_start)))
time.sleep(10)
if ((rank == 0) and (not args.no_save_emb)):
save_model(args, model, None, None)
print('proc {} model saved'.format(rank))
if (barrier is not None):
barrier.wait()
print('proc {} after barrier'.format(rank))
if args.async_update:
model.finish_async_update()
print('proc {} finish async update'.format(rank))
if (args.strict_rel_part or args.soft_rel_part):
model.writeback_relation(rank, rel_parts)
print('proc {} return'.format(rank)) |
def parse_argv(parser):
parser.add_argument('--seed', default=123, type=int, help='Random seed.')
parser.add_argument('-d', '--destdir', default='.embeddings/', type=str, help='where to save embeddings.')
parser.add_argument('--embeddings', required=True, help='which embeddings to download') |
class ResNet(nn.Module):
def __init__(self, block, num_blocks, in_channels=1, num_classes=2):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def train_sequential(model, train_loader, val_loader, max_epochs=200, frequency=2, patience=5, model_path='saved_model', full_config_dict={}):
loss_1 = 'CE'
loss_2 = model.loss
print('### Encoder training ###')
model.loss = loss_1
model.no_density = True
(train_losses_1, val_losses_1, train_accuracies_1, val_accuracies_1) = train(model, train_loader, val_loader, max_epochs=max_epochs, frequency=frequency, patience=patience, model_path=model_path, full_config_dict=full_config_dict)
print('### Normalizing Flow training ###')
model.load_state_dict(torch.load(f'{model_path}')['model_state_dict'])
for param in model.sequential.parameters():
param.requires_grad = False
model.loss = loss_2
model.no_density = False
(train_losses_2, val_losses_2, train_accuracies_2, val_accuracies_2) = train(model, train_loader, val_loader, max_epochs=max_epochs, frequency=frequency, patience=patience, model_path=model_path, full_config_dict=full_config_dict)
return ((train_losses_1 + train_losses_2), (val_losses_1 + val_losses_2), (train_accuracies_1 + train_accuracies_2), (val_accuracies_1 + val_accuracies_2)) |
def split_list(array, split_factors):
assert (round(sum(split_factors), 6) == 1), 'split_factors should sum to one'
np.random.shuffle(array)
pivots = [int((len(array) * x)) for x in split_factors]
out = []
indx = 0
for i in pivots:
out.append(array[indx:(i + indx)])
indx = i
return out |
def load_db_data_to_data_frame(datasource, select):
conn = db.connect_with_data_source(datasource)
generator = verifier.fetch_samples(conn, select, n=(- 1))
names = generator.field_names
dtypes = []
for dtype in generator.field_types:
if (dtype in ['VARCHAR', 'CHAR', 'TEXT', 'STRING']):
dtypes.append(np.str)
else:
dtypes.append(np.float64)
df = pd.DataFrame(columns=names)
for (i, rows) in enumerate(generator()):
df.loc[i] = rows
for (name, dtype) in zip(names, dtypes):
df[name] = df[name].astype(dtype)
conn.close()
return df |
def get_weights_quantizer_for_node(node: BaseNode) -> BaseKerasInferableQuantizer:
if (node.final_weights_quantization_cfg is None):
Logger.critical(f'Can not set quantizer for a node with no final weights quantization configuration')
node_w_qc = node.final_weights_quantization_cfg
weights_quantization_method = node_w_qc.weights_quantization_method
quantier_for_node = get_inferable_quantizer_class(QuantizationTarget.Weights, weights_quantization_method, BaseKerasInferableQuantizer)
kwargs = get_inferable_quantizer_kwargs(node_w_qc, QuantizationTarget.Weights)
return quantier_for_node(**kwargs) |
def merge_dict(dicts: Sequence[dict], merge_fn: Callable=(lambda *args: args)) -> dict:
if (len(dicts) == 0):
return dict()
return {key: merge_fn([dict_[key] for dict_ in dicts]) for key in dicts[0].keys()} |
class Struc2Vec():
def __init__(self, graph, walk_length=10, num_walks=100, workers=1, verbose=0, stay_prob=0.3, opt1_reduce_len=True, opt2_reduce_sim_calc=True, opt3_num_layers=None, temp_path='./temp_struc2vec/', reuse=False):
self.graph = graph
(self.idx2node, self.node2idx) = preprocess_nxgraph(graph)
self.idx = list(range(len(self.idx2node)))
self.opt1_reduce_len = opt1_reduce_len
self.opt2_reduce_sim_calc = opt2_reduce_sim_calc
self.opt3_num_layers = opt3_num_layers
self.resue = reuse
self.temp_path = temp_path
if (not os.path.exists(self.temp_path)):
os.mkdir(self.temp_path)
if (not reuse):
shutil.rmtree(self.temp_path)
os.mkdir(self.temp_path)
self.create_context_graph(self.opt3_num_layers, workers, verbose)
self.prepare_biased_walk()
self.walker = BiasedWalker(self.idx2node, self.temp_path)
self.sentences = self.walker.simulate_walks(num_walks, walk_length, stay_prob, workers, verbose)
self._embeddings = {}
def create_context_graph(self, max_num_layers, workers=1, verbose=0):
pair_distances = self._compute_structural_distance(max_num_layers, workers, verbose)
(layers_adj, layers_distances) = self._get_layer_rep(pair_distances)
pd.to_pickle(layers_adj, (self.temp_path + 'layers_adj.pkl'))
(layers_accept, layers_alias) = self._get_transition_probs(layers_adj, layers_distances)
pd.to_pickle(layers_alias, (self.temp_path + 'layers_alias.pkl'))
pd.to_pickle(layers_accept, (self.temp_path + 'layers_accept.pkl'))
def prepare_biased_walk(self):
sum_weights = {}
sum_edges = {}
average_weight = {}
gamma = {}
layer = 0
while os.path.exists((((self.temp_path + 'norm_weights_distance-layer-') + str(layer)) + '.pkl')):
probs = pd.read_pickle((((self.temp_path + 'norm_weights_distance-layer-') + str(layer)) + '.pkl'))
for (v, list_weights) in probs.items():
sum_weights.setdefault(layer, 0)
sum_edges.setdefault(layer, 0)
sum_weights[layer] += sum(list_weights)
sum_edges[layer] += len(list_weights)
average_weight[layer] = (sum_weights[layer] / sum_edges[layer])
gamma.setdefault(layer, {})
for (v, list_weights) in probs.items():
num_neighbours = 0
for w in list_weights:
if (w > average_weight[layer]):
num_neighbours += 1
gamma[layer][v] = num_neighbours
layer += 1
pd.to_pickle(average_weight, (self.temp_path + 'average_weight'))
pd.to_pickle(gamma, (self.temp_path + 'gamma.pkl'))
def train(self, embed_size=128, window_size=5, workers=3, iter=5):
sentences = self.sentences
print('Learning representation...')
model = Word2Vec(sentences, vector_size=embed_size, window=window_size, min_count=0, hs=1, sg=1, workers=workers, epochs=iter)
print('Learning representation done!')
self.w2v_model = model
return model
def get_embeddings(self):
if (self.w2v_model is None):
print('model not train')
return {}
self._embeddings = {}
for word in self.graph.nodes():
self._embeddings[word] = self.w2v_model.wv[word]
return self._embeddings
def _compute_ordered_degreelist(self, max_num_layers):
degreeList = {}
vertices = self.idx
for v in vertices:
degreeList[v] = self._get_order_degreelist_node(v, max_num_layers)
return degreeList
def _get_order_degreelist_node(self, root, max_num_layers=None):
if (max_num_layers is None):
max_num_layers = float('inf')
ordered_degree_sequence_dict = {}
visited = ([False] * len(self.graph.nodes()))
queue = deque()
level = 0
queue.append(root)
visited[root] = True
while ((len(queue) > 0) and (level <= max_num_layers)):
count = len(queue)
if self.opt1_reduce_len:
degree_list = {}
else:
degree_list = []
while (count > 0):
top = queue.popleft()
node = self.idx2node[top]
degree = len(self.graph[node])
if self.opt1_reduce_len:
degree_list[degree] = (degree_list.get(degree, 0) + 1)
else:
degree_list.append(degree)
for nei in self.graph[node]:
nei_idx = self.node2idx[nei]
if (not visited[nei_idx]):
visited[nei_idx] = True
queue.append(nei_idx)
count -= 1
if self.opt1_reduce_len:
orderd_degree_list = [(degree, freq) for (degree, freq) in degree_list.items()]
orderd_degree_list.sort(key=(lambda x: x[0]))
else:
orderd_degree_list = sorted(degree_list)
ordered_degree_sequence_dict[level] = orderd_degree_list
level += 1
return ordered_degree_sequence_dict
def _compute_structural_distance(self, max_num_layers, workers=1, verbose=0):
if os.path.exists((self.temp_path + 'structural_dist.pkl')):
structural_dist = pd.read_pickle((self.temp_path + 'structural_dist.pkl'))
else:
if self.opt1_reduce_len:
dist_func = cost_max
else:
dist_func = cost
if os.path.exists((self.temp_path + 'degreelist.pkl')):
degreeList = pd.read_pickle((self.temp_path + 'degreelist.pkl'))
else:
degreeList = self._compute_ordered_degreelist(max_num_layers)
pd.to_pickle(degreeList, (self.temp_path + 'degreelist.pkl'))
if self.opt2_reduce_sim_calc:
degrees = self._create_vectors()
degreeListsSelected = {}
vertices = {}
n_nodes = len(self.idx)
for v in self.idx:
nbs = get_vertices(v, len(self.graph[self.idx2node[v]]), degrees, n_nodes)
vertices[v] = nbs
degreeListsSelected[v] = degreeList[v]
for n in nbs:
degreeListsSelected[n] = degreeList[n]
else:
vertices = {}
for v in degreeList:
vertices[v] = [vd for vd in degreeList.keys() if (vd > v)]
results = Parallel(n_jobs=workers, verbose=verbose)((delayed(compute_dtw_dist)(part_list, degreeList, dist_func) for part_list in partition_dict(vertices, workers)))
dtw_dist = dict(ChainMap(*results))
structural_dist = convert_dtw_struc_dist(dtw_dist)
pd.to_pickle(structural_dist, (self.temp_path + 'structural_dist.pkl'))
return structural_dist
def _create_vectors(self):
degrees = {}
degrees_sorted = set()
G = self.graph
for v in self.idx:
degree = len(G[self.idx2node[v]])
degrees_sorted.add(degree)
if (degree not in degrees):
degrees[degree] = {}
degrees[degree]['vertices'] = []
degrees[degree]['vertices'].append(v)
degrees_sorted = np.array(list(degrees_sorted), dtype='int')
degrees_sorted = np.sort(degrees_sorted)
l = len(degrees_sorted)
for (index, degree) in enumerate(degrees_sorted):
if (index > 0):
degrees[degree]['before'] = degrees_sorted[(index - 1)]
if (index < (l - 1)):
degrees[degree]['after'] = degrees_sorted[(index + 1)]
return degrees
def _get_layer_rep(self, pair_distances):
layer_distances = {}
layer_adj = {}
for (v_pair, layer_dist) in pair_distances.items():
for (layer, distance) in layer_dist.items():
vx = v_pair[0]
vy = v_pair[1]
layer_distances.setdefault(layer, {})
layer_distances[layer][(vx, vy)] = distance
layer_adj.setdefault(layer, {})
layer_adj[layer].setdefault(vx, [])
layer_adj[layer].setdefault(vy, [])
layer_adj[layer][vx].append(vy)
layer_adj[layer][vy].append(vx)
return (layer_adj, layer_distances)
def _get_transition_probs(self, layers_adj, layers_distances):
layers_alias = {}
layers_accept = {}
for layer in layers_adj:
neighbors = layers_adj[layer]
layer_distances = layers_distances[layer]
node_alias_dict = {}
node_accept_dict = {}
norm_weights = {}
for (v, neighbors) in neighbors.items():
e_list = []
sum_w = 0.0
for n in neighbors:
if ((v, n) in layer_distances):
wd = layer_distances[(v, n)]
else:
wd = layer_distances[(n, v)]
w = np.exp((- float(wd)))
e_list.append(w)
sum_w += w
e_list = [(x / sum_w) for x in e_list]
norm_weights[v] = e_list
(accept, alias) = create_alias_table(e_list)
node_alias_dict[v] = alias
node_accept_dict[v] = accept
pd.to_pickle(norm_weights, (((self.temp_path + 'norm_weights_distance-layer-') + str(layer)) + '.pkl'))
layers_alias[layer] = node_alias_dict
layers_accept[layer] = node_accept_dict
return (layers_accept, layers_alias) |
class TestNet(nn.Module):
def __init__(self):
super(TestNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3)
self.bn1 = nn.BatchNorm2d(num_features=64)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3)
self.bn2 = nn.BatchNorm2d(num_features=128)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3)
self.bn3 = nn.BatchNorm2d(num_features=256)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3)
self.bn4 = nn.BatchNorm2d(num_features=512)
self.linear = nn.Linear(in_features=4608, out_features=1000)
self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.max_pool2 = nn.MaxPool2d(kernel_size=4, stride=4)
self.relu = nn.ReLU()
def forward(self, input):
output = self.conv1(input)
output = self.bn1(output)
output = self.relu(output)
output = self.max_pool(output)
output = self.conv2(output)
output = self.bn2(output)
output = self.relu(output)
output = self.max_pool(output)
output = self.conv3(output)
output = self.bn3(output)
output = self.relu(output)
output = self.max_pool(output)
output = self.conv4(output)
output = self.bn4(output)
output = self.relu(output)
output = self.max_pool2(output)
output = output.view(output.size(0), (- 1))
output = self.linear(output)
return output |
class CubicHeckeMatrixRep(Matrix_generic_dense):
_method
def _get_block(self, ind):
representation_type = self.parent()._representation_type
if (not representation_type.is_split()):
return matrix(self)
n = self.parent()._cubic_hecke_algebra.ngens()
s = sum((irr_rep.dimension() for irr_rep in AbsIrreducibeRep if ((irr_rep.number_gens() == n) and (irr_rep.internal_index() < ind))))
for irr_rep in AbsIrreducibeRep:
if ((irr_rep.number_gens() == n) and (irr_rep.internal_index() == ind)):
d = irr_rep.dimension()
return matrix(self.submatrix(s, s, d, d))
raise ValueError('no irreducible representation for this index')
_method
def _irr_to_ind(self, irr):
representation_type = self.parent()._representation_type
if (not representation_type.is_split()):
raise TypeError('representation type is non split')
ch_algebra = self.parent()._cubic_hecke_algebra
if (ch_algebra.strands() != (irr.number_gens() + 1)):
raise TypeError(('representation must have %s generators' % (ch_algebra.strands() - 1)))
ind = irr.gap_index()
if (representation_type == RepresentationType.SplitIrredMarin):
ind = irr.internal_index()
return ind
_method
def __getitem__(self, item):
if isinstance(item, AbsIrreducibeRep):
return self._get_block(self._irr_to_ind(item))
elif isinstance(item, (Integer, int)):
return self._get_block(item)
return super().__getitem__(item)
_method
def block_diagonal_list(self):
representation_type = self.parent()._representation_type
n = self.parent()._cubic_hecke_algebra.strands()
m = representation_type.number_of_representations(n)
return [self._get_block(i) for i in range(m)]
_method
def reduce_to_irr_block(self, irr):
if isinstance(irr, AbsIrreducibeRep):
ind = self._irr_to_ind(irr)
else:
ind = Integer(irr)
from copy import copy
mat_list = copy(self.parent().zero().block_diagonal_list())
mat_list[ind] = self[ind]
return block_diagonal_matrix(mat_list, subdivide=self.parent()._subdivide, sparse=True) |
def is_None_tensor(recved_tensor):
return ((recved_tensor.size() == torch.Size()) and np.isnan(recved_tensor.item())) |
def got8(all_potential_countries) -> operations.GraphOfOperations:
operations_graph = operations.GraphOfOperations()
sub_texts = operations.Generate(1, 1)
operations_graph.append_operation(sub_texts)
sub_paragraphs = []
for i in range(1, 9):
paragraph_id = f'Paragraph {i}'
sub_text = operations.Selector((lambda thoughts, list_id=paragraph_id: [thought for thought in thoughts if (thought.state['part'] == list_id)]))
sub_text.add_predecessor(sub_texts)
operations_graph.add_operation(sub_text)
count_sub_text = operations.Generate(1, 10)
count_sub_text.add_predecessor(sub_text)
operations_graph.add_operation(count_sub_text)
score_sub_text = operations.Score(1, False, partial(num_errors, all_potential_countries))
score_sub_text.add_predecessor(count_sub_text)
operations_graph.add_operation(score_sub_text)
keep_best_sub_text = operations.KeepBestN(1, False)
keep_best_sub_text.add_predecessor(score_sub_text)
operations_graph.add_operation(keep_best_sub_text)
sub_paragraphs.append(keep_best_sub_text)
while (len(sub_paragraphs) > 1):
new_sub_paragraphs = []
for i in range(0, len(sub_paragraphs), 2):
aggregate = operations.Aggregate(3)
aggregate.add_predecessor(sub_paragraphs[i])
aggregate.add_predecessor(sub_paragraphs[(i + 1)])
operations_graph.add_operation(aggregate)
val_im_aggregate = operations.ValidateAndImprove(1, True, 3, valid_aggregation)
val_im_aggregate.add_predecessor(aggregate)
operations_graph.add_operation(val_im_aggregate)
score_aggregate = operations.Score(1, False, partial(num_errors, all_potential_countries))
score_aggregate.add_predecessor(val_im_aggregate)
operations_graph.add_operation(score_aggregate)
keep_best_aggregate = operations.KeepBestN(1, False)
keep_best_aggregate.add_predecessor(score_aggregate)
operations_graph.add_operation(keep_best_aggregate)
new_sub_paragraphs.append(keep_best_aggregate)
sub_paragraphs = new_sub_paragraphs
operations_graph.append_operation(operations.GroundTruth(test_keyword_counting))
return operations_graph |
def multiple_databases():
os.makedirs(DB_PATH)
db_1 = SingleDatabase(db_path=DB_PATH, db_name=f'{DB_NAME}_1', tables={TABLE_NAME: TABLE_DATAFRAME})
db_2 = SingleDatabase(db_path=DB_PATH, db_name=f'{DB_NAME}_2', tables={TABLE_NAME: TABLE_DATAFRAME})
db_3 = SingleDatabase(db_path=DB_PATH, db_name=f'{DB_NAME}_3', tables={TABLE_NAME: TABLE_DATAFRAME})
(yield MultipleDatabases(DB_PATH))
shutil.rmtree(DB_PATH) |
def rename(checkpoint_dir, replace_from, replace_to, add_prefix, dry_run, out_checkpoint_dir):
checkpoint = tf.train.get_checkpoint_state(checkpoint_dir)
with tf.Session() as sess:
for (var_name, _) in tf.contrib.framework.list_variables(checkpoint_dir):
var = tf.contrib.framework.load_variable(checkpoint_dir, var_name)
new_name = var_name
if (None not in [replace_from, replace_to]):
new_name = new_name.replace(replace_from, replace_to)
if add_prefix:
new_name = (add_prefix + new_name)
if dry_run:
print(('%s would be renamed to %s.' % (var_name, new_name)))
else:
print(('Renaming %s to %s.' % (var_name, new_name)))
var = tf.Variable(var, name=new_name)
if (not dry_run):
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
saver.save(sess, out_checkpoint_dir) |
def update_graphics(board: np.ndarray, game_display, clock, fps: int=1) -> None:
import pygame
n = board.shape[0]
canvas_scale = int(((ctypes.windll.user32.GetSystemMetrics(1) * (16 / 30)) / n))
game_display.fill((255, 255, 255))
for y in range(canvas_scale, ((n + 2) * canvas_scale), canvas_scale):
pygame.draw.line(game_display, (0, 0, 0), [y, canvas_scale], [y, ((n + 1) * canvas_scale)])
for x in range(canvas_scale, ((n + 2) * canvas_scale), canvas_scale):
pygame.draw.line(game_display, (0, 0, 0), [canvas_scale, x], [((n + 1) * canvas_scale), x])
if ((x < ((n + 1) * canvas_scale)) and (y < ((n + 1) * canvas_scale))):
message_display(game_display, (((u'' + str(((x / canvas_scale) - 1))) + ', ') + str(((y / canvas_scale) - 1))), ((x + (canvas_scale / 4)), (y + (canvas_scale / 10))), int((canvas_scale / 8)))
gold_p1 = board[(int((n / 2)) - 1)][int((n / 2))][MONEY_IDX]
gold_p2 = board[int((n / 2))][(int((n / 2)) - 1)][MONEY_IDX]
message_display(game_display, ((u'' + 'Gold Player +1: ') + str(gold_p1)), (int(((n / 8) * canvas_scale)), (((n + 1) * canvas_scale) + int(((int((canvas_scale / 12)) + (canvas_scale * (0 / 4))) + int((canvas_scale * (1 / 8))))))), int((canvas_scale / 6)))
message_display(game_display, ((u'' + 'Gold Player -1: ') + str(gold_p2)), (int(((n / 8) * canvas_scale)), (((n + 1) * canvas_scale) + int(((int((canvas_scale / 12)) + (canvas_scale * (1 / 4))) + int((canvas_scale * (1 / 8))))))), int((canvas_scale / 6)))
time_remaining = board[0][0][TIME_IDX]
message_display(game_display, ((u'' + 'Remaining ') + str(time_remaining)), (int(((n / 8) * canvas_scale)), (((n + 1) * canvas_scale) + int(((int((canvas_scale / 12)) + (canvas_scale * (2 / 4))) + int((canvas_scale * (1 / 8))))))), int((canvas_scale / 6)))
for y in range(n):
for x in range(n):
a_player = board[x][y][P_NAME_IDX]
if ((a_player == 1) or (a_player == (- 1))):
a_type = board[x][y][A_TYPE_IDX]
actor_color = d_a_color[a_type]
actor_location = (int((((x * canvas_scale) + (canvas_scale / 2)) + canvas_scale)), (int(((y * canvas_scale) + (canvas_scale / 2))) + canvas_scale))
(actor_x, actor_y) = actor_location
actor_size = int((canvas_scale / 3))
actor_short_name = d_type_rev[a_type]
actor_carry = board[x][y][CARRY_IDX]
actor_health = board[x][y][HEALTH_IDX]
pygame.draw.circle(game_display, actor_color, actor_location, actor_size)
player_color = (0, 0, 0)
if (a_player == 1):
player_color = (0, 255, 0)
if (a_player == (- 1)):
player_color = (255, 0, 0)
pygame.draw.circle(game_display, player_color, actor_location, actor_size, int((actor_size / 10)))
message_display(game_display, (u'' + actor_short_name), actor_location, int((actor_size * 0.7)))
if (a_type != 1):
message_display(game_display, (u'hp: ' + str(actor_health)), (actor_x, (actor_y + (canvas_scale * (2 / 10)))), int((actor_size * 0.5)))
if (a_type == 2):
message_display(game_display, (u'carry: ' + str(actor_carry)), (actor_x, (actor_y + (canvas_scale * (4 / 10)))), int((actor_size * 0.5)))
pygame.display.update()
clock.tick(fps) |
class T5Stack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.precomputed_masks = config.precomputed_masks
for i in range(config.num_layers):
self.add_module(str(i), T5Block(config, has_relative_attention_bias=bool((i == 0))))
self.num_layers = config.num_layers
self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.init_weights()
def get_input_embeddings(self):
return self.embed_tokens
def get_output_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(self, input_ids, shared_embedding, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None):
input_shape = input_ids.size()
input_ids = input_ids.view((- 1), input_shape[(- 1)])
assert is_not_None(self.embed_tokens), 'You have to intialize the model with valid token embeddings'
inputs_embeds = self.embed_tokens(shared_embedding, input_ids)
(batch_size, seq_length) = input_shape
if (not self.precomputed_masks):
if is_None(attention_mask):
attention_mask = torch.ones(batch_size, seq_length).to(inputs_embeds.device)
if (self.is_decoder and is_None(encoder_attention_mask) and is_not_None(encoder_hidden_states)):
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(batch_size, encoder_seq_length).to(inputs_embeds.device)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, attention_mask.device)
if (self.is_decoder and is_not_None(encoder_attention_mask)):
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
else:
extended_attention_mask = attention_mask
encoder_extended_attention_mask = encoder_attention_mask
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i in range(self.num_layers):
layer_module = getattr(self, str(i))
layer_outputs = layer_module(hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias)
if (i == 0):
hidden_states = layer_outputs[0]
position_bias = layer_outputs[1]
if (self.is_decoder and is_not_None(encoder_hidden_states)):
encoder_decoder_position_bias = layer_outputs[2]
else:
hidden_states = layer_outputs
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states |
class _QueueWriter(dataio.Writer):
def __init__(self, wrapper):
self._wrapper = wrapper
def setup_ex(self, init_net, exit_net):
exit_net.CloseBlobsQueue([self._wrapper.queue()], 0)
def write_ex(self, fields, local_init_net, local_finish_net, status):
self._wrapper._new_writer(self.schema(), local_init_net)
enqueue_net = core.Net('enqueue')
enqueue(enqueue_net, self._wrapper.queue(), fields, status)
return [enqueue_net] |
.parametrize('observation_shape', [(100,), (4, 84, 84), ((100,), (200,))])
.parametrize('q_func_factory', [MeanQFunctionFactory(), QRQFunctionFactory()])
.parametrize('scalers', [None, 'min_max'])
.parametrize('advantage_type', ['mean', 'max'])
.parametrize('weight_type', ['exp', 'binary'])
.parametrize('target_update_type', ['hard', 'soft'])
def test_crr(observation_shape: Shape, q_func_factory: QFunctionFactory, scalers: Optional[str], advantage_type: str, weight_type: str, target_update_type: str) -> None:
(observation_scaler, action_scaler, reward_scaler) = create_scaler_tuple(scalers, observation_shape)
config = CRRConfig(actor_encoder_factory=DummyEncoderFactory(), critic_encoder_factory=DummyEncoderFactory(), q_func_factory=q_func_factory, observation_scaler=observation_scaler, action_scaler=action_scaler, reward_scaler=reward_scaler, advantage_type=advantage_type, weight_type=weight_type, target_update_type=target_update_type)
crr = config.create()
algo_tester(crr, observation_shape, deterministic_best_action=False, test_policy_copy=False) |
def collect_predictions(model, data_configs):
folder = data_configs.get('dir')
testsets = ['bs_full']
for testset in testsets:
data_file = os.path.join(folder, testset[0])
print(f'>>> Evaluating model on test data {data_file}')
data = np.load(data_file)
raw_predictions = model.predict(data, raw=True)
print(f'>>> Data shape: {data.shape}; Prediction shape: {raw_predictions.shape}')
filename = f'Pred-{testset[0]}'
savefile = os.path.join(folder, filename)
print(f'>>> Save predictions to file [{savefile}].')
np.save(savefile, raw_predictions) |
class Conv3d(_ConvNd):
__doc__ = (('Applies a 3D convolution over an input signal composed of several input\n planes.\n\n In the simplest case, the output value of the layer with input size :math:`(N, C_{in}, D, H, W)`\n and output :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` can be precisely described as:\n\n .. math::\n out(N_i, C_{out_j}) = bias(C_{out_j}) +\n \\sum_{k = 0}^{C_{in} - 1} weight(C_{out_j}, k) \\star input(N_i, k)\n\n where :math:`\\star` is the valid 3D `cross-correlation`_ operator\n ' + "\n\n This module supports :ref:`TensorFloat32<tf32_on_ampere>`.\n\n * :attr:`stride` controls the stride for the cross-correlation.\n\n * :attr:`padding` controls the amount of padding applied to the input. It\n can be either a string {{'valid', 'same'}} or a tuple of ints giving the\n amount of implicit padding applied on both sides.\n\n * :attr:`dilation` controls the spacing between the kernel points; also known as the a trous algorithm.\n It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.\n\n {groups_note}\n\n The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:\n\n - a single ``int`` -- in which case the same value is used for the depth, height and width dimension\n - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,\n the second `int` for the height dimension and the third `int` for the width dimension\n\n Note:\n {depthwise_separable_note}\n\n Note:\n {cudnn_reproducibility_note}\n\n Note:\n ``padding='valid'`` is the same as no padding. ``padding='same'`` pads\n the input so the output has the shape as the input. However, this mode\n doesn't support any stride values other than 1.\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int, tuple or str, optional): Padding added to all six sides of\n the input. Default: 0\n padding_mode (string, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n dilation (int or tuple, optional): Spacing between kernel elements. Default: 1\n groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``\n ".format(**reproducibility_notes, **convolution_notes)) + '\n\n Shape:\n - Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`\n - Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` where\n\n .. math::\n D_{out} = \\left\\lfloor\\frac{D_{in} + 2 \\times \\text{padding}[0] - \\text{dilation}[0]\n \\times (\\text{kernel\\_size}[0] - 1) - 1}{\\text{stride}[0]} + 1\\right\\rfloor\n\n .. math::\n H_{out} = \\left\\lfloor\\frac{H_{in} + 2 \\times \\text{padding}[1] - \\text{dilation}[1]\n \\times (\\text{kernel\\_size}[1] - 1) - 1}{\\text{stride}[1]} + 1\\right\\rfloor\n\n .. math::\n W_{out} = \\left\\lfloor\\frac{W_{in} + 2 \\times \\text{padding}[2] - \\text{dilation}[2]\n \\times (\\text{kernel\\_size}[2] - 1) - 1}{\\text{stride}[2]} + 1\\right\\rfloor\n\n Attributes:\n weight (Tensor): the learnable weights of the module of shape\n :math:`(\\text{out\\_channels}, \\frac{\\text{in\\_channels}}{\\text{groups}},`\n :math:`\\text{kernel\\_size[0]}, \\text{kernel\\_size[1]}, \\text{kernel\\_size[2]})`.\n The values of these weights are sampled from\n :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n :math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{2}\\text{kernel\\_size}[i]}`\n bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``,\n then the values of these weights are\n sampled from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n :math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{2}\\text{kernel\\_size}[i]}`\n\n Examples::\n\n >>> # With square kernels and equal stride\n >>> m = nn.Conv3d(16, 33, 3, stride=2)\n >>> # non-square kernels and unequal stride and with padding\n >>> m = nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))\n >>> input = torch.randn(20, 16, 10, 50, 100)\n >>> output = m(input)\n\n .. _cross-correlation:\n .. _link:\n ')
def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_3_t, stride: _size_3_t=1, padding: Union[(str, _size_3_t)]=0, dilation: _size_3_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size_ = _triple(kernel_size)
stride_ = _triple(stride)
padding_ = (padding if isinstance(padding, str) else _triple(padding))
dilation_ = _triple(dilation)
super(Conv3d, self).__init__(in_channels, out_channels, kernel_size_, stride_, padding_, dilation_, False, _triple(0), groups, bias, padding_mode, **factory_kwargs)
def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
if (self.padding_mode != 'zeros'):
return F.conv3d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode), weight, bias, self.stride, _triple(0), self.dilation, self.groups)
return F.conv3d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
def forward(self, input: Tensor) -> Tensor:
return self._conv_forward(input, self.weight, self.bias) |
def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
v0_copy = np.copy(v0)
v1_copy = np.copy(v1)
v0 = (v0 / np.linalg.norm(v0))
v1 = (v1 / np.linalg.norm(v1))
dot = np.sum((v0 * v1))
if (np.abs(dot) > DOT_THRESHOLD):
return lerp(t, v0_copy, v1_copy)
theta_0 = np.arccos(dot)
sin_theta_0 = np.sin(theta_0)
theta_t = (theta_0 * t)
sin_theta_t = np.sin(theta_t)
s0 = (np.sin((theta_0 - theta_t)) / sin_theta_0)
s1 = (sin_theta_t / sin_theta_0)
v2 = ((s0 * v0_copy) + (s1 * v1_copy))
return v2 |
def get_ttest_args():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', choices=['ttest', 'fisher', 'mcnemar'], default='ttest')
parser.add_argument('-em', '--evaluate_metric', default='acc')
parser.add_argument('-t', '--evaluate_split', default='test')
parser.add_argument('-o', '--override', help='Used to override args and config, this is at the highest priority')
parser.add_argument('-e1', '--past_exp1', metavar='{CKPT_PATH,CKPT_DIR}', help='Load from a checkpoint')
parser.add_argument('-e2', '--past_exp2', metavar='{CKPT_PATH,CKPT_DIR}', help='Load from another checkpoint')
parser.add_argument('-u1', '--upstream1', default='default', type=str, help='used to override the upstream string for checkpoint e1')
parser.add_argument('-u2', '--upstream2', default='default', type=str, help='used to override the upstream string for checkpoint e2')
parser.add_argument('--seed', default=1337, type=int)
parser.add_argument('--verbose', action='store_true', help='Print model infomation')
parser.add_argument('--ckpt_name', default='best-states-dev', help='The string used for searching the checkpoint, example choices: `states-*`, `best-states-dev`, `best-states-test`.')
args = parser.parse_args()
(args1, config1) = get_past_exp(args, args.past_exp1, args.ckpt_name)
(args2, config2) = get_past_exp(args, args.past_exp2, args.ckpt_name)
if (args.upstream1 != 'default'):
args1.upstream = args.upstream1
if (args.upstream2 != 'default'):
args2.upstream = args.upstream2
return (args.mode, args1, config1, args2, config2) |
def adaptive_avg_pool3d(input, output_size):
output_size = _list_with_default(output_size, input.size())
return torch._C._nn.adaptive_avg_pool3d(input, output_size) |
def mk_lean_auto_soundness_name(fn_name: str, namespaces: List[ScopedName]):
prefix = 'auto_sound_'
return get_name_in_open_scopes(ScopedName.from_string(fn_name), namespaces, prefix) |
class docVarListEntryType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, term=None):
self.term = term
def factory(*args_, **kwargs_):
if docVarListEntryType.subclass:
return docVarListEntryType.subclass(*args_, **kwargs_)
else:
return docVarListEntryType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_term(self):
return self.term
def set_term(self, term):
self.term = term
def export(self, outfile, level, namespace_='', name_='docVarListEntryType', namespacedef_=''):
showIndent(outfile, level)
outfile.write(('<%s%s %s' % (namespace_, name_, namespacedef_)))
self.exportAttributes(outfile, level, namespace_, name_='docVarListEntryType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, (level + 1), namespace_, name_)
showIndent(outfile, level)
outfile.write(('</%s%s>\n' % (namespace_, name_)))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docVarListEntryType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='docVarListEntryType'):
if self.term:
self.term.export(outfile, level, namespace_, name_='term')
def hasContent_(self):
if (self.term is not None):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docVarListEntryType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.term:
showIndent(outfile, level)
outfile.write('term=model_.docTitleType(\n')
self.term.exportLiteral(outfile, level, name_='term')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[(- 1)]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
pass
def buildChildren(self, child_, nodeName_):
if ((child_.nodeType == Node.ELEMENT_NODE) and (nodeName_ == 'term')):
obj_ = docTitleType.factory()
obj_.build(child_)
self.set_term(obj_) |
def create_sqlite_connection_provider(db_uri):
uri = urlparse.urlparse(db_uri)
if (uri.scheme != 'sqlite'):
raise ValueError(('Scheme is not sqlite: ' + db_uri))
if uri.netloc:
raise ValueError(('Can not connect to SQLite over network: ' + db_uri))
if (uri.path == ':memory:'):
raise ValueError(('Memory mode SQLite not supported: ' + db_uri))
path = os.path.expanduser(uri.path)
params = _get_connect_params(uri.query)
return (lambda : db.Connection(sqlite3.connect(path, **params))) |
def linear(input_, output_size, scope_name='linear'):
with tf.variable_scope(scope_name):
input_ = tf.reshape(input_, [(- 1), np.prod(input_.get_shape().as_list()[1:])])
output = tf.layers.dense(input_, output_size)
return output |
class FlaxAutoModelForSpeechSeq2Seq(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def register_Ns3SimpleRefCount__Ns3PbbTlv_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbTlv__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter< ns3::PbbTlv > > const &', 'o')])
return |
class ResNetPoolingHead(nn.Module):
def __init__(self, pool_size):
super(ResNetPoolingHead, self).__init__()
self.avg_pool = nn.AvgPool2d(pool_size, stride=1)
def forward(self, input):
x = self.avg_pool(input)
x = x.view(x.shape[0], (- 1))
return x |
def hook_avgpool3d(m, x, y):
k = _triple(m.kernel_size)
k = torch.prod(torch.Tensor(k)).item()
flops_per_ele = k
flops = (flops_per_ele * y.numel())
return int(flops) |
def check_load_config(config_class, config_file):
try:
draccus.parse(config_class, config_file, args=[])
except Exception as e:
raise Exception(f'failed to parse {config_file}') from e |
def setup(dirname=None, format_strs=['stdout', 'tensorboard', 'csv'], action=None):
if (dirname is None):
dirname = os.getenv('SISL_LOGDIR')
if (dirname is None):
dirname = osp.join(tempfile.gettempdir(), datetime.datetime.now().strftime('sisl-%Y-%m-%d-%H-%M-%S-%f'))
if (os.path.isdir(dirname) and len(os.listdir(dirname))):
if (not action):
warn('Log directory {} exists! Please either backup/delete it, or use a new directory.'.format(dirname))
warn("If you're resuming from a previous run you can choose to keep it.")
info('Select Action: k (keep) / d (delete) / q (quit):')
while (not action):
action = input().lower().strip()
act = action
if (act == 'b'):
backup_name = (dirname + _get_time_str())
shutil.move(dirname, backup_name)
info("Directory '{}' backuped to '{}'".format(dirname, backup_name))
elif (act == 'd'):
shutil.rmtree(dirname)
elif (act == 'n'):
dirname = (dirname + _get_time_str())
info('Use a new log directory {}'.format(dirname))
elif (act == 'k'):
pass
elif (act == 'q'):
raise OSError('Directory {} exits!'.format(dirname))
else:
raise ValueError('Unknown action: {}'.format(act))
os.makedirs(dirname, exist_ok=True)
output_formats = [make_output_format(f, dirname) for f in format_strs]
Logger.CURRENT = Logger(dirname=dirname, output_formats=output_formats)
hdl = logging.FileHandler(filename=osp.join(dirname, 'log.log'), encoding='utf-8', mode='w')
hdl.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
Logger.CURRENT._logger.removeHandler(Logger.CURRENT._handler)
Logger.CURRENT._logger.addHandler(hdl)
Logger.CURRENT._logger.info(('Argv: ' + ' '.join(sys.argv)))
has_git = True
timestamp = datetime.datetime.now(dateutil.tz.tzlocal()).strftime('%Y_%m_%d_%H_%M_%S')
try:
current_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip().decode('utf-8')
clean_state = (len(subprocess.check_output(['git', 'status', '--porcelain'])) == 0)
except subprocess.CalledProcessError as _:
Logger.CURRENT._logger.warn('Warning: failed to execute git commands')
has_git = False
if has_git:
if clean_state:
Logger.CURRENT._logger.info('Commit: {}'.format(current_commit))
else:
Logger.CURRENT._logger.info('Commit: {}_dirty_{}'.format(current_commit, timestamp)) |
class OrthogonalMatrixGroup_generic(NamedMatrixGroup_generic):
_method
def invariant_bilinear_form(self):
if (self._invariant_form is not None):
return self._invariant_form
from sage.matrix.constructor import identity_matrix
m = identity_matrix(self.base_ring(), self.degree())
m.set_immutable()
return m
invariant_quadratic_form = invariant_bilinear_form
invariant_form = invariant_bilinear_form
def _check_matrix(self, x, *args):
if (self._special and (x.determinant() != 1)):
raise TypeError('matrix must have determinant one')
F = self.invariant_bilinear_form()
if (((x * F) * x.transpose()) != F):
if (F == self.one().matrix()):
raise TypeError('matrix must be orthogonal')
else:
raise TypeError(('matrix must be orthogonal with respect to the symmetric form\n%s' % F)) |
class SuperCrystals(Category_singleton):
def super_categories(self):
return [Crystals()]
class ParentMethods():
def tensor(self, *crystals, **options):
cartan_type = self.cartan_type()
if any(((c.cartan_type() != cartan_type) for c in crystals)):
raise ValueError('all crystals must be of the same Cartan type')
if (cartan_type.letter == 'Q'):
from sage.combinat.crystals.tensor_product import FullTensorProductOfQueerSuperCrystals
return FullTensorProductOfQueerSuperCrystals(((self,) + tuple(crystals)), **options)
else:
from sage.combinat.crystals.tensor_product import FullTensorProductOfSuperCrystals
return FullTensorProductOfSuperCrystals(((self,) + tuple(crystals)), **options)
class Finite(CategoryWithAxiom):
class ParentMethods():
_method(key=(lambda s, i: (tuple(i) if (i is not None) else s.index_set())))
def digraph(self, index_set=None):
from sage.graphs.digraph import DiGraph
from sage.misc.latex import LatexExpr
from sage.combinat.root_system.cartan_type import CartanType
if (index_set is None):
index_set = self.index_set()
G = DiGraph(multiedges=True)
G.add_vertices(self)
for i in index_set:
for x in G:
y = x.f(i)
if (y is not None):
G.add_edge(x, y, i)
def edge_options(data):
(u, v, l) = data
edge_opts = {'edge_string': '->', 'color': 'black'}
if (l > 0):
edge_opts['color'] = CartanType._colors.get(l, 'black')
edge_opts['label'] = LatexExpr(str(l))
elif (l < 0):
edge_opts['color'] = ('dashed,' + CartanType._colors.get((- l), 'black'))
edge_opts['label'] = LatexExpr(('\\overline{%s}' % str((- l))))
else:
edge_opts['color'] = ('dotted,' + CartanType._colors.get(l, 'black'))
edge_opts['label'] = LatexExpr(str(l))
return edge_opts
G.set_latex_options(format='dot2tex', edge_labels=True, edge_options=edge_options)
return G
def genuine_highest_weight_vectors(self):
return tuple([x[0] for x in self._genuine_highest_lowest_weight_vectors()])
connected_components_generators = genuine_highest_weight_vectors
def connected_components(self):
category = SuperCrystals()
from sage.categories.regular_supercrystals import RegularSuperCrystals
if (self in RegularSuperCrystals()):
category = RegularSuperCrystals()
index_set = self.index_set()
cartan_type = self.cartan_type()
CCs = []
for mg in self.connected_components_generators():
if (not isinstance(mg, tuple)):
mg = (mg,)
subcrystal = self.subcrystal(generators=mg, index_set=index_set, cartan_type=cartan_type, category=category)
CCs.append(subcrystal)
return CCs
def genuine_lowest_weight_vectors(self):
return tuple([x[1] for x in self._genuine_highest_lowest_weight_vectors()])
_method
def _genuine_highest_lowest_weight_vectors(self):
X = []
for G in self.digraph().connected_components_subgraphs():
src = G.sources()
sinks = G.sinks()
max_dist = (- 1)
pair = None
for s in src:
for t in sinks:
d = G.distance(s, t)
if ((d < float('inf')) and (d > max_dist)):
pair = (s, t)
max_dist = d
X.append(pair)
return tuple(X)
def character(self):
from sage.rings.integer_ring import ZZ
A = self.weight_lattice_realization().algebra(ZZ)
return A.sum((A(x.weight()) for x in self))
_method
def highest_weight_vectors(self):
return tuple(self.digraph().sources())
_method
def lowest_weight_vectors(self):
return tuple(self.digraph().sinks())
class ElementMethods():
def is_genuine_highest_weight(self, index_set=None):
P = self.parent()
if ((index_set is None) or (set(index_set) == set(P.index_set()))):
return (self in P.genuine_highest_weight_vectors())
S = P.subcrystal(generators=P, index_set=index_set, category=P.category())
return any(((self == x.value) for x in S.genuine_highest_weight_vectors()))
def is_genuine_lowest_weight(self, index_set=None):
P = self.parent()
if ((index_set is None) or (set(index_set) == set(P.index_set()))):
return (self in P.genuine_lowest_weight_vectors())
S = P.subcrystal(generators=P, index_set=index_set, category=P.category())
return any(((self == x.value) for x in S.genuine_lowest_weight_vectors()))
class TensorProducts(TensorProductsCategory):
_method
def extra_super_categories(self):
return [self.base_category()] |
_model
def regnety_008(pretrained=False, **kwargs):
return _regnet('regnety_008', pretrained, **kwargs) |
def plot_results(results, cols, pdffile, num_seen=0, num_anoms=0, plot_sd=False, ylabel=None, legend_loc='lower right', legend_datasets=None, axis_fontsize=20, legend_size=14):
dataset = results[0][0]
dp = DataPlotter(pdfpath=pdffile, rows=1, cols=1)
pl = dp.get_next_plot()
plt.xlim([0, num_seen])
if (num_anoms < 0):
ylabel = ('# of anomalies seen' if (ylabel is None) else ylabel)
ylim = 0.0
for result in results:
ylim = max(ylim, np.max(result[2]))
plt.ylim([0.0, (ylim + 2)])
else:
ylabel = ('% of total anomalies seen' if (ylabel is None) else ylabel)
plt.ylim([0.0, 100.0])
plt.xlabel('# instances labeled', fontsize=axis_fontsize)
plt.ylabel(ylabel, fontsize=axis_fontsize)
for (i, result) in enumerate(results):
num_found_avg = result[2]
num_found_sd = result[3]
if (num_anoms > 0):
num_found_avg = ((num_found_avg * 100.0) / num_anoms)
num_found_sd = ((num_found_sd * 100.0) / num_anoms)
logger.debug(('label: %s' % result[1]))
pl.plot(np.arange(len(num_found_avg)), num_found_avg, '-', color=cols[i], linewidth=1, label=result[1])
if plot_sd:
pts = get_n_intermediate(np.arange((len(num_found_avg) + (i * 5)), dtype=int))
pts = np.minimum((len(num_found_avg) - 1), pts)
pl.errorbar(pts, num_found_avg[pts], yerr=(1.96 * num_found_sd[pts]), fmt='.', color=cols[i])
if ((legend_datasets is None) or (dataset in legend_datasets)):
pl.legend(loc=legend_loc, prop={'size': legend_size})
dp.close() |
class OsaBlock(nn.Module):
def __init__(self, in_chs, mid_chs, out_chs, layer_per_block, residual=False, depthwise=False, attn='', norm_layer=BatchNormAct2d):
super(OsaBlock, self).__init__()
self.residual = residual
self.depthwise = depthwise
next_in_chs = in_chs
if (self.depthwise and (next_in_chs != mid_chs)):
assert (not residual)
self.conv_reduction = ConvBnAct(next_in_chs, mid_chs, 1, norm_layer=norm_layer)
else:
self.conv_reduction = None
mid_convs = []
for i in range(layer_per_block):
if self.depthwise:
conv = SeparableConvBnAct(mid_chs, mid_chs, norm_layer=norm_layer)
else:
conv = ConvBnAct(next_in_chs, mid_chs, 3, norm_layer=norm_layer)
next_in_chs = mid_chs
mid_convs.append(conv)
self.conv_mid = SequentialAppendList(*mid_convs)
next_in_chs = (in_chs + (layer_per_block * mid_chs))
self.conv_concat = ConvBnAct(next_in_chs, out_chs, norm_layer=norm_layer)
if attn:
self.attn = create_attn(attn, out_chs)
else:
self.attn = None
def forward(self, x):
output = [x]
if (self.conv_reduction is not None):
x = self.conv_reduction(x)
x = self.conv_mid(x, output)
x = self.conv_concat(x)
if (self.attn is not None):
x = self.attn(x)
if self.residual:
x = (x + output[0])
return x |
_task_model('contact_prediction', 'onehot')
class ProteinOneHotForContactPrediction(ProteinOneHotAbstractModel):
def __init__(self, config):
super().__init__(config)
self.onehot = ProteinOneHotModel(config)
self.predict = PairwiseContactPredictionHead(config.hidden_size, ignore_index=(- 1))
self.init_weights()
def forward(self, input_ids, protein_length, input_mask=None, targets=None):
outputs = self.onehot(input_ids, input_mask=input_mask)
(sequence_output, pooled_output) = outputs[:2]
outputs = (self.predict(sequence_output, protein_length, targets) + outputs[2:])
return outputs |
class _validation_args():
camera_preset: str
coverage: str = field(default='uniform', choices=['exhaustive', 'uniform'])
repeat_cameras: int = 1
every_n_steps: int = 2500
rays_batch_size: int = 8192 |
class PeakSignalToNoiseRatioMetric(Metric):
def __init__(self):
self._metric = None
self._device = get_torch_device()
def __repr__(self):
return 'PeakSignalToNoiseRatioMetric()'
def evaluate_generation(self, adapter_spec: AdapterSpec, request_state: RequestState, metric_service: MetricService, eval_cache_path: str) -> List[Stat]:
assert (request_state.result is not None)
request_result: RequestResult = request_state.result
image_locations: List[str] = gather_generated_image_locations(request_result)
if (len(image_locations) == 0):
return []
gold_image_path: str = get_gold_image_location(request_state)
score: float = self._compute_psnr_scores(image_locations, gold_image_path)
return [Stat(MetricName('expected_psnr_score')).add(score)]
def _compute_psnr_scores(self, generated_image_locations: List[str], reference_image_path: str) -> float:
try:
from torchmetrics import PeakSignalNoiseRatio
except ModuleNotFoundError as e:
handle_module_not_found_error(e, ['heim'])
if (self._metric is None):
self._metric = PeakSignalNoiseRatio().to(self._device)
preprocessing = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()])
generated_images: List[torch.Tensor] = []
reference_images: List[torch.Tensor] = []
for location in generated_image_locations:
image = preprocessing(open_image(location))
generated_images.append(image)
image = preprocessing(open_image(reference_image_path))
reference_images.append(image)
img1: torch.Tensor = torch.stack(generated_images).to(self._device)
img2: torch.Tensor = torch.stack(reference_images).to(self._device)
score: float = self._metric(img1, img2).detach().item()
return score |
def R2():
def hermite(n, y):
if (n == 1):
return (2 * y)
if (n == 0):
return 1
return expand((((2 * y) * hermite((n - 1), y)) - ((2 * (n - 1)) * hermite((n - 2), y))))
t1 = clock()
hermite(15, var('y'))
t2 = clock()
return (t2 - t1) |
class Retrain_Autodeeplab(nn.Module):
def __init__(self, args, input_channels=3):
super(Retrain_Autodeeplab, self).__init__()
filter_param_dict = {0: 1, 1: 2, 2: 4, 3: 8}
BatchNorm2d = (ABN if args.use_ABN else NaiveBN)
if (((not args.dist) and args.use_ABN) or (args.dist and args.use_ABN and (dist.get_rank() == 0))):
print('=> use ABN!')
if ((args.net_arch is not None) and (args.cell_arch is not None)):
(net_arch, cell_arch) = (np.load(args.net_arch), np.load(args.cell_arch))
else:
network_arch = np.load(os.path.join(args.exp, 'network_path_space.npy'))
cell_arch = np.load(os.path.join(args.exp, 'genotype.npy'))
network_path = np.load(os.path.join(args.exp, 'network_path.npy'))
self.encoder = newModel(network_arch, cell_arch, args.num_classes, 12, args.filter_multiplier, BatchNorm=BatchNorm2d, args=args, input_channels=input_channels)
self.aspp = ASPP(((args.filter_multiplier * args.block_multiplier) * filter_param_dict[network_path[(- 1)]]), 256, args.num_classes, conv=nn.Conv2d, norm=BatchNorm2d)
self.decoder = Decoder(args.num_classes, filter_multiplier=((args.filter_multiplier * args.block_multiplier) * filter_param_dict[network_path[2]]), args=args, last_level=network_path[(- 1)])
def forward(self, x):
(encoder_output, low_level_feature) = self.encoder(x)
high_level_feature = self.aspp(encoder_output)
decoder_output = self.decoder(high_level_feature, low_level_feature)
return nn.Upsample((x.shape[2], x.shape[3]), mode='bilinear', align_corners=True)(decoder_output)
def get_params(self):
(back_bn_params, back_no_bn_params) = self.encoder.get_params()
tune_wd_params = ((list(self.aspp.parameters()) + list(self.decoder.parameters())) + back_no_bn_params)
no_tune_wd_params = back_bn_params
return (tune_wd_params, no_tune_wd_params) |
def test_ignored_param_warning(line_graph):
walker = UniformRandomWalk(line_graph, n=2, length=3)
with pytest.raises(ValueError, match="cannot specify both 'walker' and 'length'"):
UnsupervisedSampler(line_graph, walker=walker, length=5)
with pytest.raises(ValueError, match="cannot specify both 'walker' and 'number_of_walks'"):
UnsupervisedSampler(line_graph, walker=walker, number_of_walks=5)
with pytest.raises(ValueError, match="cannot specify both 'walker' and 'seed'"):
UnsupervisedSampler(line_graph, walker=walker, seed=1) |
class Pose2_SE2(Pose2):
def from_tangent(cls, v: T.Sequence[T.Scalar], epsilon: T.Scalar=sf.epsilon()) -> Pose2_SE2:
theta = v[0]
R = Rot2.from_tangent([theta], epsilon=epsilon)
a = ((R.z.imag + (epsilon * sf.sign_no_zero(R.z.imag))) / (theta + (epsilon * sf.sign_no_zero(theta))))
b = ((1 - R.z.real) / (theta + (epsilon * sf.sign_no_zero(theta))))
t = Vector2(((a * v[1]) - (b * v[2])), ((b * v[1]) + (a * v[2])))
return cls(R, t)
def to_tangent(self, epsilon: T.Scalar=sf.epsilon()) -> T.List[T.Scalar]:
theta = self.R.to_tangent(epsilon=epsilon)[0]
halftheta = (0.5 * (theta + (sf.sign_no_zero(theta) * epsilon)))
a = ((halftheta * (1 + self.R.z.real)) / (self.R.z.imag + (sf.sign_no_zero(self.R.z.imag) * epsilon)))
V_inv = Matrix([[a, halftheta], [(- halftheta), a]])
t_tangent = (V_inv * self.t)
return [theta, t_tangent[0], t_tangent[1]]
def storage_D_tangent(self) -> Matrix:
storage_D_tangent_R = self.R.storage_D_tangent()
storage_D_tangent_t = self.R.to_rotation_matrix()
return Matrix.block_matrix([[storage_D_tangent_R, Matrix.zeros(2, 2)], [Matrix.zeros(2, 1), storage_D_tangent_t]])
def tangent_D_storage(self) -> Matrix:
tangent_D_storage_R = self.R.tangent_D_storage()
tangent_D_storage_t = self.R.to_rotation_matrix().T
return Matrix.block_matrix([[tangent_D_storage_R, Matrix.zeros(1, 2)], [Matrix.zeros(2, 2), tangent_D_storage_t]])
def retract(self, vec: T.Sequence[T.Scalar], epsilon: T.Scalar=sf.epsilon()) -> Pose2_SE2:
return LieGroup.retract(self, vec, epsilon)
def local_coordinates(self, b: Pose2_SE2, epsilon: T.Scalar=sf.epsilon()) -> T.List[T.Scalar]:
return LieGroup.local_coordinates(self, b, epsilon)
def hat(cls, vec: T.List[T.Scalar]) -> Matrix33:
R_tangent = [vec[0]]
t_tangent = [vec[1], vec[2]]
top_left = Rot2.hat(R_tangent)
top_right = Matrix21(t_tangent)
bottom = Matrix13.zero()
return T.cast(Matrix33, top_left.row_join(top_right).col_join(bottom)) |
def ccs_on_same_gpu_has_path_via_missing_nodes(cur_set, graph, id_to_node_worked_on, prev_topo_sort_id, topo_sort_id, unbroken_stage):
missing_topo_sort_ids = list(range((prev_topo_sort_id + 1), topo_sort_id))
is_ok = True
for missing_topo_sort_id in missing_topo_sort_ids:
if (missing_topo_sort_id not in id_to_node_worked_on):
continue
if (id_to_node_worked_on[missing_topo_sort_id].id not in graph):
continue
cur_nodes = [id_to_node_worked_on[x] for x in cur_set]
scs = set(cur_set)
missing_nodes_in_work_graph = [id_to_node_worked_on[x] for x in missing_topo_sort_ids if ((x not in scs) and (x in id_to_node_worked_on))]
nodes_left_in_unborken_stage = set((id_to_node_worked_on[x] for x in unbroken_stage))
nodes_left_in_unborken_stage.add(id_to_node_worked_on[topo_sort_id])
A: Set[Node] = set(cur_nodes)
B: Set[Node] = set(nodes_left_in_unborken_stage)
edge_nodes: Set[Node] = set(missing_nodes_in_work_graph)
edges = []
for a in A:
for c in a.out_edges:
if (c in edge_nodes):
edges.append((0, (c.id + 2)))
for c in edge_nodes:
for nc in c.out_edges:
if (nc in edge_nodes):
edges.append(((c.id + 2), (nc.id + 2)))
elif (nc in B):
edges.append(((c.id + 2), 1))
G = nx.DiGraph(incoming_graph_data=edges)
G.add_node(0)
G.add_node(1)
has_path = nx.algorithms.shortest_paths.generic.has_path(G, 0, 1)
is_ok = (not has_path)
if (not is_ok):
break
has_path_via_missing_nodes = (not is_ok)
return has_path_via_missing_nodes |
def register_Ns3PhyRxStatsCalculator_methods(root_module, cls):
cls.add_constructor([param('ns3::PhyRxStatsCalculator const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DlPhyReception', 'void', [param('ns3::PhyReceptionStatParameters', 'params')])
cls.add_method('DlPhyReceptionCallback', 'void', [param('ns3::Ptr< ns3::PhyRxStatsCalculator >', 'phyRxStats'), param('std::string', 'path'), param('ns3::PhyReceptionStatParameters', 'params')], is_static=True)
cls.add_method('GetDlRxOutputFilename', 'std::string', [])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('GetUlRxOutputFilename', 'std::string', [])
cls.add_method('SetDlRxOutputFilename', 'void', [param('std::string', 'outputFilename')])
cls.add_method('SetUlRxOutputFilename', 'void', [param('std::string', 'outputFilename')])
cls.add_method('UlPhyReception', 'void', [param('ns3::PhyReceptionStatParameters', 'params')])
cls.add_method('UlPhyReceptionCallback', 'void', [param('ns3::Ptr< ns3::PhyRxStatsCalculator >', 'phyRxStats'), param('std::string', 'path'), param('ns3::PhyReceptionStatParameters', 'params')], is_static=True)
return |
def online_learning_self_train(supervision, agent, init_train_data, online_data_loader, train_table, val_data, val_table, test_data, test_table, update_iter, model_save_path, record_save_path, model_renew_fn, max_seq_length=222, num_target_layers=2, detail=False, st_pos=0, end_pos=(- 1), cnt_tot=1, path_db=None, batch_size=16):
ave_loss = 0
cnt = 0
cnt_sc = 0
cnt_sa = 0
cnt_wn = 0
cnt_wc = 0
cnt_wo = 0
cnt_wv = 0
cnt_wvi = 0
cnt_lx = 0
cnt_x = 0
cnt_list = []
results = []
interaction_records_dict = {'records': [], 'start_iter': 0}
interaction_records = interaction_records_dict['records']
count_exit = 0
count_failure = 0
count_iter = 0
num_total_examples = len(online_data_loader.dataset)
annotation_buffer = []
iter_annotation_buffer = []
print('## supervision:', supervision)
print(('## data size: %d ' % num_total_examples))
print(('## update_iter: %d ' % update_iter))
conf_threshold = None
if (supervision == 'self_train_0.5'):
conf_threshold = 0.5
print('## conf_threshold:', str(conf_threshold))
print(('## st_pos: %d ' % st_pos))
init_train_data = data_preprocessing(agent.world_model.tokenizer, init_train_data, train_table, max_seq_length, bool_remove_none=True, bool_loss_weight=False)
if (st_pos > 0):
print('## WARNING: inaccurate interaction performance report...')
print(('Loading interaction records from %s...' % record_save_path))
interaction_records_dict = json.load(open(record_save_path, 'r'))
interaction_records = interaction_records_dict['records']
print(('Record item size: %d ' % len(interaction_records)))
learning_start_time = datetime.datetime.now()
print('## Online starting time: {}'.format(learning_start_time))
dset_name = 'train'
engine = DBEngine(os.path.join(path_db, f'{dset_name}.db'))
for (iB, t) in enumerate(online_data_loader):
cnt += len(t)
assert (len(t) == 1)
(nlu, nlu_t, sql_i, sql_q, sql_t, tb, hs_t, hds) = get_fields(t, train_table, no_hs_t=True, no_sql_t=True)
g_sql_q = generate_sql_q(sql_i, tb)
(g_sc, g_sa, g_wn, g_wc, g_wo, g_wv) = get_g(sql_i)
g_wvi_corenlp = get_g_wvi_corenlp(t)
if (len(interaction_records) >= cnt):
record = interaction_records[(cnt - 1)]
if ('sql_i' not in record):
continue
if ((conf_threshold is None) or (float(record['logprob']) > np.log(conf_threshold))):
gen_sql_i = eval(record['sql_i'])
gen_tag_seq = eval(record['tag_seq'])
assert (g_sql_q[0] == record['true_sql'])
tt_to_t_idx1 = []
for (i, token) in enumerate(nlu_t[0]):
sub_tokens = agent.world_model.tokenizer.tokenize(token)
for sub_token in sub_tokens:
tt_to_t_idx1.append(i)
annotated_example = extract_weighted_example(t[0], tt_to_t_idx1, gen_sql_i, gen_tag_seq)
if (annotated_example is not None):
iter_annotation_buffer.append(annotated_example)
count_iter += 1
if ((count_iter % update_iter) == 0):
print((' count_iter %d, nl %s' % (count_iter, record['nl'])))
print(' Time stamp: {}'.format(datetime.datetime.now()))
else:
(wemb_n, wemb_h, l_n, l_hpu, l_hs, nlu_tt, t_to_tt_idx, tt_to_t_idx) = get_wemb_bert(agent.world_model.bert_config, agent.world_model.model_bert, agent.world_model.tokenizer, nlu_t, hds, max_seq_length, num_out_layers_n=num_target_layers, num_out_layers_h=num_target_layers)
try:
g_wvi = get_g_wvi_bert_from_g_wvi_corenlp(t_to_tt_idx, g_wvi_corenlp)
(g_wv_str, g_wv_str_wp) = convert_pr_wvi_to_string(g_wvi, nlu_t, nlu_tt, tt_to_t_idx, nlu)
except:
count_failure += 1
results1 = {}
results1['error'] = 'Skip happened'
results1['nlu'] = nlu[0]
results1['table_id'] = tb[0]['id']
results.append(results1)
print(('## Failure %d' % count_failure))
interaction_records.append({'nl': t[0]['question'], 'true_sql': g_sql_q[0], 'true_sql_i': '{}'.format(sql_i[0]), 'questioned_indices': [], 'q_counter': 0})
continue
print(('\n' + ('#' * 50)))
print('NL input: {}\nTrue SQL: {}'.format(t[0]['question'], g_sql_q[0]))
if isinstance(agent.error_detector, ErrorDetectorBayesDropout):
input_item = [tb, nlu_t, nlu, hds]
else:
input_item = [wemb_n, l_n, wemb_h, l_hpu, l_hs, tb, nlu_t, nlu_tt, tt_to_t_idx, nlu]
hyp = agent.world_model.decode(input_item, dec_beam_size=1, bool_verbal=False)[0]
print((('-' * 50) + '\nBefore interaction: \ninitial SQL: {}'.format(hyp.sql)))
Hypothesis.print_hypotheses([hyp])
pr_sc = [hyp.sql_i['sel']]
pr_sa = [hyp.sql_i['agg']]
pr_wn = [len(hyp.sql_i['conds'])]
pr_wc = [[col for (col, _, _) in hyp.sql_i['conds']]]
pr_wo = [[op for (_, op, _) in hyp.sql_i['conds']]]
pr_sql_i = [hyp.sql_i]
pr_wvi = None
print('initial evaluation: ')
(cnt_sc1_list, cnt_sa1_list, cnt_wn1_list, cnt_wc1_list, cnt_wo1_list, cnt_wv1_list, cnt_wvi1_list, cnt_lx1_list, cnt_x1_list, cnt_list1, g_ans, pr_ans) = agent.evaluation([pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, pr_sql_i], [g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi, sql_i], engine, tb, bool_verbal=True)
record = {'nl': t[0]['question'], 'true_sql': g_sql_q[0], 'true_sql_i': '{}'.format(sql_i[0]), 'sql': '{}'.format(hyp.sql), 'sql_i': '{}'.format(hyp.sql_i), 'dec_seq': '{}'.format(hyp.dec_seq), 'tag_seq': '{}'.format(hyp.tag_seq), 'logprob': '{}'.format(hyp.logprob), 'lx_correct': int(sum(cnt_lx1_list)), 'x_correct': int(sum(cnt_x1_list)), 'q_counter': 0, 'questioned_indices': []}
if isinstance(agent.error_detector, ErrorDetectorBayesDropout):
record.update({'logprob_list': '{}'.format(hyp.logprob_list), 'test_tag_seq': '{}'.format(hyp.test_tag_seq)})
interaction_records.append(record)
if ((conf_threshold is None) or (hyp.logprob > np.log(conf_threshold))):
annotated_example = extract_weighted_example(t[0], tt_to_t_idx[0], hyp.sql_i, hyp.tag_seq)
if (annotated_example is not None):
iter_annotation_buffer.append(annotated_example)
cnt_sc += sum(cnt_sc1_list)
cnt_sa += sum(cnt_sa1_list)
cnt_wn += sum(cnt_wn1_list)
cnt_wc += sum(cnt_wc1_list)
cnt_wo += sum(cnt_wo1_list)
cnt_wv += sum(cnt_wv1_list)
cnt_wvi += sum(cnt_wvi1_list)
cnt_lx += sum(cnt_lx1_list)
cnt_x += sum(cnt_x1_list)
cnt_list.append(cnt_list1)
if detail:
pr_wv_str = None
current_cnt = [cnt_tot, cnt, cnt_sc, cnt_sa, cnt_wn, cnt_wc, cnt_wo, cnt_wv, cnt_wvi, cnt_lx, cnt_x]
report_detail(hds, nlu, g_sc, g_sa, g_wn, g_wc, g_wo, g_wv, g_wv_str, g_sql_q, g_ans, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wv_str, pr_sql_i, pr_ans, cnt_list1, current_cnt)
count_iter += 1
del wemb_n, wemb_h
if (((count_iter % update_iter) == 0) or (count_iter == num_total_examples)):
if (count_iter <= st_pos):
iter_annotation_buffer = data_preprocessing(agent.world_model.tokenizer, iter_annotation_buffer, train_table, max_seq_length, bool_remove_none=True, bool_loss_weight=False)
annotation_buffer.extend(iter_annotation_buffer)
iter_annotation_buffer = []
continue
print('\n~~~\nCurrent interaction performance (iter {}): '.format(count_iter))
_ave_loss = (ave_loss / cnt)
_acc_sc = (cnt_sc / cnt)
_acc_sa = (cnt_sa / cnt)
_acc_wn = (cnt_wn / cnt)
_acc_wc = (cnt_wc / cnt)
_acc_wo = (cnt_wo / cnt)
_acc_wvi = (cnt_wvi / cnt)
_acc_wv = (cnt_wv / cnt)
_acc_lx = (cnt_lx / cnt)
_acc_x = (cnt_x / cnt)
_acc = [_ave_loss, _acc_sc, _acc_sa, _acc_wn, _acc_wc, _acc_wo, _acc_wvi, _acc_wv, _acc_lx, _acc_x]
print('Interaction acc: {}'.format(_acc))
q_count = sum([item['q_counter'] for item in interaction_records])
dist_q_count = sum([len(set(item['questioned_indices'])) for item in interaction_records])
print('Interaction #questions: {}, #questions per example: {:.3f} (exclude options: {:.3f}).'.format(q_count, ((q_count * 1.0) / len(interaction_records)), ((dist_q_count * 1.0) / len(interaction_records))))
print('Interaction #exit: {}'.format(count_exit))
print('~~~\n')
print(('Saving interaction records to %s...' % record_save_path))
json.dump(interaction_records_dict, open(record_save_path, 'w'), indent=4)
iter_annotation_buffer = data_preprocessing(agent.world_model.tokenizer, iter_annotation_buffer, train_table, max_seq_length, bool_remove_none=True, bool_loss_weight=False)
annotation_buffer.extend(iter_annotation_buffer)
print('~~~\nUpdating base semantic parser at iter {}'.format(count_iter))
model = agent.world_model.semparser
model_bert = agent.world_model.model_bert
print('Retraining from scratch...')
update_buffer = (init_train_data + annotation_buffer)
model_renew_fn(model, model_bert)
print(('Train data size: %d ' % len(update_buffer)))
(opt, opt_bert) = get_opt(model, model_bert, True)
(train_loader, dev_loader) = get_loader_wikisql(update_buffer, val_data, batch_size, shuffle_train=True)
test_loader = get_loader_wikisql_v2(test_data, batch_size, False)
print('## Starting update at iter {}, anno_cost {}...time spent {}'.format(count_iter, sum([item['q_counter'] for item in interaction_records]), (datetime.datetime.now() - learning_start_time)))
model_dir = os.path.join(model_save_path, ('%d/' % count_iter))
if (not os.path.isdir(model_dir)):
os.mkdir(model_dir)
(dev_acc_lx_t_best, dev_acc_ex_t_best, test_acc_lx_t_best, test_acc_ex_t_best) = run_epochs(model, model_bert, opt, opt_bert, agent.world_model.bert_config, agent.world_model.tokenizer, path_db, model_dir, train_loader, train_table, dev_loader, val_table, test_loader, test_table, early_stop_ep=(EARLY_STOP_EPOCH_STAGE1 if (count_iter <= EARLY_THRESHOLD) else EARLY_STOP_EPOCH_STAGE2), bool_eval=True, startime_time=learning_start_time)
print('## Ending update at iter {}, anno_cost {}, dev acc_lx {}, dev acc_ex {}, test acc_lx {},test acc_ex {}...time spent {}\n'.format(count_iter, sum([item['q_counter'] for item in interaction_records]), dev_acc_lx_t_best, dev_acc_ex_t_best, test_acc_lx_t_best, test_acc_ex_t_best, (datetime.datetime.now() - learning_start_time)))
print(('Update interaction_records_dict: start_iter = %d.' % count_iter))
interaction_records_dict['start_iter'] = count_iter
print(('Saving interaction records to %s...' % record_save_path))
json.dump(interaction_records_dict, open(record_save_path, 'w'), indent=4)
iter_annotation_buffer = []
if ((end_pos != (- 1)) and (count_iter == end_pos)):
print('## Ending online learning at iter {}\n'.format(end_pos))
break
ave_loss /= cnt
acc_sc = (cnt_sc / cnt)
acc_sa = (cnt_sa / cnt)
acc_wn = (cnt_wn / cnt)
acc_wc = (cnt_wc / cnt)
acc_wo = (cnt_wo / cnt)
acc_wvi = (cnt_wvi / cnt)
acc_wv = (cnt_wv / cnt)
acc_lx = (cnt_lx / cnt)
acc_x = (cnt_x / cnt)
print('## End online learning at time {}...time spent {}\n'.format(datetime.datetime.now(), (datetime.datetime.now() - learning_start_time)))
q_count = sum([item['q_counter'] for item in interaction_records])
dist_q_count = sum([len(set(item['questioned_indices'])) for item in interaction_records])
print('#questions: {}, #questions per example: {:.3f} (exclude options: {:.3f}).'.format(q_count, ((q_count * 1.0) / len(interaction_records)), ((dist_q_count * 1.0) / len(interaction_records))))
print('#exit: {}'.format(count_exit))
acc = [ave_loss, acc_sc, acc_sa, acc_wn, acc_wc, acc_wo, acc_wvi, acc_wv, acc_lx, acc_x]
return (acc, results, cnt_list, interaction_records_dict) |
class AttributeObserver(metaclass=ABCMeta):
def __init__(self):
super().__init__()
def update(self, att_val, class_val, weight):
raise NotImplementedError
def probability_of_attribute_value_given_class(self, att_val, class_val):
raise NotImplementedError
def get_best_evaluated_split_suggestion(self, criterion, pre_split_dist, att_idx, binary_only):
raise NotImplementedError |
class Func_legendre_Q(BuiltinFunction):
def __init__(self):
BuiltinFunction.__init__(self, 'legendre_Q', nargs=2, latex_name='Q', conversions={'maxima': 'legendre_q', 'mathematica': 'LegendreQ', 'maple': 'LegendreQ'})
def _eval_(self, n, x, *args, **kwds):
ret = self._eval_special_values_(n, x)
if (ret is not None):
return ret
if (n in ZZ):
if (n < 0):
from sage.rings.infinity import unsigned_infinity
return SR(unsigned_infinity)
return self.eval_formula(n, x)
def _eval_special_values_(self, n, x):
if (n == (QQ((- 1)) / 2)):
from sage.functions.special import elliptic_kc
return elliptic_kc(((x + 1) / 2))
if (x == 1):
from sage.rings.infinity import unsigned_infinity
return SR(unsigned_infinity)
if (x == (- 1)):
from sage.rings.infinity import unsigned_infinity
return SR(unsigned_infinity)
if (x == 0):
from .gamma import gamma
from .other import sqrt
from .trig import sin
try:
gam = (gamma(((n + 1) / 2)) / gamma(((n / 2) + 1)))
if gam.is_infinity():
return gam
return ((((- sqrt(SR.pi())) / 2) * sin(((SR.pi() / 2) * n))) * gam)
except TypeError:
pass
def _evalf_(self, n, x, parent=None, **kwds):
ret = self._eval_special_values_(n, x)
if (ret is not None):
return ret
return _mpmath_utils_call(_mpmath_legenq, n, 0, x, parent=parent)
def eval_recursive(self, n, arg, **kwds):
from sage.functions.log import ln
if (n == 0):
return ((ln((1 + arg)) - ln((1 - arg))) / 2)
elif (n == 1):
return (((arg / 2) * (ln((1 + arg)) - ln((1 - arg)))) - 1)
(x, l) = PolynomialRing(QQ, 'x,l').gens()
help1 = (l / 2)
help2 = (((x / 2) * l) - 1)
for j in range(1, n):
help3 = (((((2 * j) + 1) * x) * help2) - (j * help1))
help3 = (help3 / (j + 1))
help1 = help2
help2 = help3
sum1 = sum(((help3.monomial_coefficient(mon) * (arg ** mon.exponents()[0][0])) for mon in help3.monomials() if (not l.divides(mon))))
sum2 = sum((((help3.monomial_coefficient(mon) * (arg ** mon.exponents()[0][0])) * (ln((1 + arg)) - ln((1 - arg)))) for mon in help3.monomials() if l.divides(mon)))
return (sum1 + sum2)
def eval_formula(self, n, arg, **kwds):
from sage.functions.log import ln
if (n == 0):
return ((ln((1 + arg)) - ln((1 - arg))) / 2)
elif (n == 1):
return (((arg / 2) * (ln((1 + arg)) - ln((1 - arg)))) - 1)
arg = SR(arg)
return (((legendre_P(n, arg) * (ln((1 + arg)) - ln((1 - arg)))) / 2) - self._Wfunc(n, arg))
def _Wfunc(self, n, arg):
if (n == 0):
return 0
if (n == 1):
return 1
x = PolynomialRing(QQ, 'x').gen()
help1 = 0
help2 = 1
for j in range(2, (n + 1)):
help3 = (((((2 * j) - 1) * x) * help2) - ((j - 1) * help1))
help3 = (help3 / j)
help1 = help2
help2 = help3
return sum(((b * (arg ** a)) for (a, b) in enumerate(help3)))
def _derivative_(self, n, x, *args, **kwds):
diff_param = kwds['diff_param']
if (diff_param == 0):
raise NotImplementedError('Derivative w.r.t. to the index is not supported.')
else:
return ((((n * x) * legendre_Q(n, x)) - (n * legendre_Q((n - 1), x))) / ((x ** 2) - 1)) |
class ScriptMeta(type):
def __init__(cls, name, bases, attrs):
cls._methods: Dict[(str, Any)] = {}
cls._constants_set = set(getattr(cls, '__constants__', ()))
for base in reversed(bases):
for (k, v) in getattr(base, '_methods', {}).items():
cls._methods[k] = v
base_constants = getattr(base, '_constants_set', set())
cls._constants_set = cls._constants_set.union(base_constants)
for (k, v) in sorted(attrs.items()):
if isinstance(v, ScriptMethodStub):
delattr(cls, k)
cls._methods[v.original_method.__name__] = v
if getattr(cls, '_disable_script_meta', False):
return super(ScriptMeta, cls).__init__(name, bases, attrs)
original_init = getattr(cls, '__init__', (lambda self: None))
(original_init)
def init_then_script(self, *args, **kwargs):
num_methods = len(cls._methods)
original_init(self, *args, **kwargs)
added_methods_in_init = (len(cls._methods) > num_methods)
if (type(self) == cls):
def make_stubs(module):
cls = type(module)
if hasattr(cls, '_methods'):
return [v for (k, v) in sorted(cls._methods.items())]
else:
return infer_methods_to_compile(module)
self.__dict__['_actual_script_module'] = torch.jit._recursive.create_script_module(self, make_stubs, share_types=(not added_methods_in_init))
concrete_type = self._actual_script_module._concrete_type
for name in concrete_type.get_attributes():
delattr(self, name)
for (name, _) in concrete_type.get_modules():
delattr(self, name)
for name in ('_parameters', '_buffers', '_modules'):
delattr(self, name)
cls.__init__ = init_then_script
return super(ScriptMeta, cls).__init__(name, bases, attrs) |
def main():
parser = argparse.ArgumentParser(description='Run the Dawid-Skene, Fast Dawid-Skene, the Hybrid, or the Majority Voting Algorithm')
parser.add_argument('--dataset', type=str, required=True, help='Name of the dataset to use')
parser.add_argument('--k', default=0, type=int, required=False, help='Number of annotators to use. Each data point must have at least K annotators. If more annotators are available, the first K annotators are used. If K = 0, then all available annotations for each data point are used. Default is 0')
parser.add_argument('--algorithm', type=str, choices=['DS', 'FDS', 'H', 'MV'], required=True, help='Algorithm to use - DS: Dawid-Skene, FDS: Fast-Dawid Skene, H: Hybrid, MV: Majority Voting')
parser.add_argument('--mode', default='aggregate', type=str, choices=['aggregate', 'test'], required=False, help='The mode to run this program - aggregate: obtain aggregated dataset, test: aggregate data and compare with ground truths. Default is aggregate')
parser.add_argument('--crowd_annotations_path', default=None, type=str, required=False, help='Path to crowdsourced annotations. Default is crowd.csv inside the dataset directory')
parser.add_argument('--ground_truths_path', default=None, type=str, required=False, help='Path to ground truths, if using test mode. Default is gold.csv inside the dataset directory')
parser.add_argument('--dataset_path', default=None, type=str, required=False, help='Custom path to dataset, to override default')
parser.add_argument('--seed', default=18, type=int, required=False, help='Sets the random seed. Default is 18')
parser.add_argument('--output', default=None, type=str, required=False, help='Path to write CSV output, output is not written if this is not set')
parser.add_argument('--print_result', action='store_true', help='Prints the predictions and accuracy to standard output, if set')
parser.add_argument('-v', '--verbose', action='store_true', help='Run in verbose mode', dest='verbose')
args = parser.parse_args()
np.random.seed(args.seed)
run(args) |
def construct_scheduler(optimizer, cfg: OmegaConf):
scheduler_type = cfg.train.scheduler
decay_factor = cfg.train.scheduler_params.decay_factor
decay_steps = cfg.train.scheduler_params.decay_steps
patience = cfg.train.scheduler_params.patience
warmup_epochs = cfg.train.scheduler_params.warmup_epochs
warmup = (warmup_epochs != (- 1))
if (scheduler_type == 'multistep'):
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=decay_steps, gamma=(1.0 / decay_factor))
elif (scheduler_type == 'plateau'):
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=(1.0 / decay_factor), patience=patience, verbose=True)
elif (scheduler_type == 'exponential'):
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=decay_factor, last_epoch=(- 1))
elif (scheduler_type == 'cosine'):
size_dataset = DATASET_SIZES[cfg.dataset]
if warmup:
T_max = ((cfg.train.epochs - warmup_epochs) * math.ceil((size_dataset / float(cfg.train.batch_size))))
else:
T_max = (cfg.train.epochs * math.ceil((size_dataset / float(cfg.train.batch_size))))
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=T_max, eta_min=1e-06)
else:
lr_scheduler = None
print(f'WARNING! No scheduler will be used. cfg.train.scheduler = {scheduler_type}')
if (warmup and (lr_scheduler is not None)):
size_dataset = DATASET_SIZES[cfg.dataset]
lr_scheduler = ckconv.nn.LinearWarmUp_LRScheduler(optimizer=optimizer, lr_scheduler=lr_scheduler, warmup_iterations=(warmup_epochs * math.ceil((size_dataset / float(cfg.train.batch_size)))))
return lr_scheduler |
.parametrize('val,true_dist,false_dist', [(True, 0.0, 1.0), (object(), 0.0, inf), (ExecutionTracer(), 0.0, inf), (False, 1.0, 0), ([], 1.0, 0), (set(), 1.0, 0), ({}, 1.0, 0), ((), 1.0, 0), ('', 1.0, 0), (b'', 1.0, 0), (0, 1.0, 0), (['something'], 0.0, 1.0), ({'something'}, 0.0, 1.0), ({'a': 'something'}, 0.0, 1.0), (('something',), 0.0, 1.0), ('a', 0.0, 1.0), (['something', 'another', 'bla'], 0.0, 3.0), ({'something', 'another', 'bla'}, 0.0, 3.0), ({'a': 'something', 'b': 'another', 'c': 'bla'}, 0.0, 3.0), (('something', 'another', 'bla'), 0.0, 3.0), ('abcdef', 0.0, 6.0), (b'abcdef', 0.0, 6.0), (42, 0.0, 42.0), ((3 + 4j), 0.0, 5.0), (7.5, 0.0, 7.5)])
def test_bool_distances(val, true_dist, false_dist):
tracer = ExecutionTracer()
tracer.current_thread_identifier = threading.current_thread().ident
tracer.register_predicate(MagicMock(code_object_id=0))
tracer.executed_bool_predicate(val, 0)
assert (tracer.get_trace().true_distances.get(0) == true_dist)
assert (tracer.get_trace().false_distances.get(0) == false_dist) |
class BJJmat(SpectralMatrix):
def assemble(self, method):
(test, trial) = (self.testfunction, self.trialfunction)
assert isinstance(test[0], J)
assert isinstance(trial[0], J)
return {0: get_norm_sq(test[0], trial[0], method)} |
def _print_top_wer_spks(spks_by_wer, file=sys.stdout):
print(('=' * 80), file=file)
print('SPEAKERS WITH HIGHEST WER', file=file)
for dets in spks_by_wer:
print('{speaker} %WER {WER:.2f}'.format(**dets), file=file) |
def get_noise_pred_single(latents, t, context, unet):
noise_pred = unet(latents, t, encoder_hidden_states=context)['sample']
return noise_pred |
def invert(A0, A1, tmp, i0, i1):
return If((Select(A0, i0) > Select(A0, (i0 + 1))), And((tmp == Select(A0, i0)), (A1 == Store(A0, i0, Select(A0, (i0 + 1)))), (A1 == Store(A0, (i0 + 1), tmp))), (A1 == A0)) |
def main(arg):
global best_acc1
data_info = datainfo(logger, arg)
model = create_model(data_info['img_size'], data_info['n_classes'], arg)
n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad))
print(f'Creating model: {arg.model}')
print(f"Number of params: {format(n_parameters, ',')}")
if (',' in args.gpu_id):
model = nn.DataParallel(model, device_ids=range(len(arg.gpu_id.split(','))))
else:
model.to(args.device)
print(f'Initial learning rate: {arg.lr:.6f}')
print(f'Start training for {arg.epochs} epochs')
if (arg.ls and arg.pyx):
print('label smoothing used')
criterion = LabelSmoothingCrossEntropy()
else:
criterion = nn.CrossEntropyLoss()
if ((arg.sd > 0.0) and arg.pyx):
print(f'Stochastic depth({arg.sd}) used ')
criterion = criterion.to(args.device)
normalize = [transforms.Normalize(mean=data_info['stat'][0], std=data_info['stat'][1])]
if (arg.cm and arg.pyx):
print('Cutmix used')
if (arg.mu and arg.pyx):
print('Mixup used')
if ((arg.ra > 1) and arg.pyx):
print(f'Repeated Aug({arg.ra}) used')
'\n Data Augmentation\n '
augmentations = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(data_info['img_size'], padding=4)]
if (arg.aa and arg.pyx):
print('Auto augmentation used')
if ('cifar' in arg.dataset):
print('CIFAR Policy')
from utils.autoaug import CIFAR10Policy
augmentations += [CIFAR10Policy()]
elif ('svhn' in arg.dataset):
print('SVHN Policy')
from utils.autoaug import SVHNPolicy
augmentations += [SVHNPolicy()]
else:
print('imagenet Policy')
from utils.autoaug import ImageNetPolicy
augmentations += [ImageNetPolicy()]
augmentations += [transforms.ToTensor(), *normalize]
if ((arg.re > 0) and arg.pyx):
from utils.random_erasing import RandomErasing
print(f'Random erasing({arg.re}) used ')
augmentations += [RandomErasing(probability=arg.re, sh=arg.re_sh, r1=arg.re_r1, mean=data_info['stat'][0])]
augmentations = transforms.Compose(augmentations)
(train_dataset, val_dataset, px_dataset) = dataload(arg, augmentations, normalize, data_info, px=True)
train_loader = torch.utils.data.DataLoader(train_dataset, num_workers=arg.workers, pin_memory=True, batch_sampler=RASampler(len(train_dataset), arg.batch_size, 1, arg.ra, shuffle=True, drop_last=True))
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=100, shuffle=False, pin_memory=True, num_workers=arg.workers)
px_loader = torch.utils.data.DataLoader(px_dataset, batch_size=arg.batch_size, shuffle=False, pin_memory=True, num_workers=arg.workers)
px_loader = cycle(px_loader)
diffusion_model = GaussianDiffusion(model, image_size=arg.img_size, channels=(3 if (arg.dataset != 'mnist') else 1), timesteps=arg.t_step, loss_type=arg.loss).to(args.device)
ema_model = copy.deepcopy(diffusion_model)
optimizer = torch.optim.AdamW(diffusion_model.parameters(), lr=arg.lr, weight_decay=arg.wd)
scheduler = build_scheduler(arg, optimizer, len(train_loader))
n_ch = 3
im_sz = arg.img_size
buffer = torch.FloatTensor((10000 if (arg.dataset != 'stl10') else 5000), n_ch, im_sz, im_sz).uniform_((- 1), 1)
if arg.resume:
checkpoint = torch.load(arg.resume)
diffusion_model.load_state_dict(checkpoint['model_state_dict'])
ema_model.load_state_dict(checkpoint['model_state_dict'])
if ('optimizer_state_dict' in checkpoint):
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
scheduler.load_state_dict(checkpoint['scheduler'])
final_epoch = arg.epochs
arg.epochs = (final_epoch - (checkpoint['epoch'] + 1))
print('Beginning training')
test_acc = 0
sample_time = 0
for epoch in range(arg.epochs):
epoch_start = time.time()
(lr, avg_loss, avg_acc, avg_ce, avg_dif) = train(train_loader, px_loader, diffusion_model, ema_model, criterion, optimizer, epoch, scheduler, arg)
metrics = {'lr': lr}
tf_metrics = {'lr': lr, 'Train/Loss': avg_loss, 'Train/Acc': avg_acc, 'Train/CELoss': avg_ce, 'Train/DifLoss': avg_dif}
end = time.time()
torch.save({'model_state_dict': diffusion_model.state_dict(), 'epoch': epoch, 'optimizer_state_dict': optimizer.state_dict(), 'scheduler': scheduler.state_dict()}, os.path.join(arg.save_path, 'checkpoint.pth'))
if (arg.pyx > 0):
(test_acc, test_loss) = validate(val_loader, model, criterion, arg, epoch=epoch)
(acc2, test_loss2) = validate(val_loader, ema_model.denoise_fn, criterion, arg, epoch=epoch)
if (test_acc > best_acc1):
print('* Best model update *')
best_acc1 = test_acc
torch.save({'model_state_dict': diffusion_model.state_dict(), 'epoch': epoch, 'optimizer_state_dict': optimizer.state_dict(), 'scheduler': scheduler.state_dict()}, os.path.join(arg.save_path, 'best.pth'))
print(f'Best acc1 {best_acc1:.2f}')
metrics = {'lr': lr, 'Loss': test_loss, 'Acc': test_acc, 'EMALoss': test_loss2, 'EMAAccuracy': acc2}
tf_metrics = {'lr': lr, 'Test/Loss': test_loss, 'Test/Accuracy': test_acc, 'Test/EMALoss': test_loss2, 'Test/EMAAccuracy': acc2, 'Train/Loss': avg_loss, 'Train/Acc': avg_acc, 'Train/CELoss': avg_ce, 'Train/DifLoss': avg_dif}
if ((args.px > 0) and (arg.dataset in ['cifar10', 'cifar100'])):
sample_start = time.time()
(inc, fid) = sample_ema(ema_model, buffer, epoch, arg)
sample_end = time.time()
print(f'sample takes {(sample_end - sample_start)}')
sample_time += (sample_end - sample_start)
if (fid != 0):
metrics['IS'] = inc
metrics['fid'] = fid
for k in tf_metrics:
v = tf_metrics[k]
arg.writer.add_scalar(k, v, epoch)
if arg.wandb:
import wandb
wandb.log(metrics)
remain_time = ((args.epochs - epoch) * (end - epoch_start))
total_time = (args.epochs * (end - epoch_start))
print(f'PID {arg.pid} Total ~ {str(timedelta(seconds=total_time))}, epoch {str(timedelta(seconds=(end - epoch_start)))},remain {str(timedelta(seconds=remain_time))}')
print(f'total sample time {str(timedelta(seconds=sample_time))}')
print(f'Creating model: {arg.model}')
print(f"Number of params: {format(n_parameters, ',')}")
print(f'Initial learning rate: {arg.lr:.6f}')
print(f'best top-1: {best_acc1:.2f}, final top-1: {test_acc:.2f}')
torch.save({'model_state_dict': diffusion_model.state_dict(), 'epoch': (args.epochs - 1), 'optimizer_state_dict': optimizer.state_dict(), 'scheduler': scheduler.state_dict()}, os.path.join(arg.save_path, 'checkpoint.pth'))
torch.save({'model_state_dict': ema_model.state_dict()}, os.path.join(arg.save_path, 'ema_checkpoint.pth')) |
def get_opparam_converter_with_context(context, opparam_converter: dict):
def wrap(fn):
def outer(cmd):
return fn(context, cmd)
return outer
new_convert = {}
for (k, v) in opparam_converter.items():
new_convert[k] = wrap(v)
return new_convert |
class ChairsData(Data):
URL = '
TRAIN_VAL_URL = '
dirs = ['flying_chairs']
def __init__(self, data_dir, stat_log_dir=None, development=True, fast_dir=None):
super().__init__(data_dir, stat_log_dir, development=development, fast_dir=fast_dir)
def _fetch_if_missing(self):
local_path = os.path.join(self.data_dir, 'flying_chairs')
train_val_path = os.path.join(local_path, 'FlyingChairs_train_val.txt')
if (not os.path.isdir(local_path)):
did_download = True
self._download_and_extract(self.URL, local_path)
urlretrieve(self.TRAIN_VAL_URL, train_val_path)
else:
did_download = False
data_path = os.path.join(local_path, 'FlyingChairs_release', 'data')
os.makedirs(os.path.join(local_path, 'image'), exist_ok=True)
os.makedirs(os.path.join(local_path, 'flow'), exist_ok=True)
os.makedirs(os.path.join(local_path, 'test_image'), exist_ok=True)
if os.path.isdir(data_path):
print('>> converting chairs data to .png')
train_val_repeated = []
train_val = []
with open(train_val_path) as f:
for line in f:
training = (int(line.strip()) == 1)
train_val_repeated.extend([training, training])
train_val.extend([training])
im_files = [f for f in os.listdir(data_path) if re.match('[0-9]+.*\\.ppm', f)]
im_files.sort()
flow_files = [f for f in os.listdir(data_path) if re.match('[0-9]+.*\\.flo', f)]
flow_files.sort()
for (t, f) in zip(train_val_repeated, im_files):
(name, ext) = os.path.splitext(f)
path = os.path.join(data_path, f)
im = Image.open(path)
folder = ('image' if t else 'test_image')
im.save(os.path.join(local_path, folder, (name + '.png')), 'PNG')
for (t, f) in zip(train_val, flow_files):
path = os.path.join(data_path, f)
if (not t):
copyfile(path, os.path.join(local_path, 'flow', f))
if did_download:
rmtree(data_path)
print('>> processed chairs data')
def get_raw_dirs(self):
return [os.path.join(self.current_dir, 'flying_chairs', 'image')] |
def resnet_fn(input_layer, block_fn, layers, normalization_op_params=None):
norm_activation = norm_activation_builder(normalization_op_params, activation='relu')
inputs = conv2d_fixed_padding(inputs=input_layer, filters=64, kernel_size=7, strides=2)
inputs = tf.identity(inputs, 'initial_conv')
inputs = norm_activation()(inputs)
inputs = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='SAME', name='initial_max_pool')(inputs)
c2 = block_group(inputs=inputs, filters=64, block_fn=block_fn, blocks=layers[0], strides=1, name='block_group1', normalization_op_params=normalization_op_params)
c3 = block_group(inputs=c2, filters=128, block_fn=block_fn, blocks=layers[1], strides=2, name='block_group2', normalization_op_params=normalization_op_params)
c4 = block_group(inputs=c3, filters=256, block_fn=block_fn, blocks=layers[2], strides=2, name='block_group3', normalization_op_params=normalization_op_params)
c5 = block_group(inputs=c4, filters=512, block_fn=block_fn, blocks=layers[3], strides=2, name='block_group4', normalization_op_params=normalization_op_params)
return {'2': c2, '3': c3, '4': c4, '5': c5} |
def multi_ref(refs, hypos):
(_ref, _hypo) = ([], [])
ref_cnt = 0
assert (len(refs) == len(hypos))
for (rs, hs) in zip(refs, hypos):
a = set()
for h in hs:
s = [sentence_bleu(h, r) for r in rs]
j = np.argmax(s)
_ref.append(rs[j])
_hypo.append(h)
best = [k for k in range(len(rs)) if (s[k] == s[j])]
a.add(random.choice(best))
ref_cnt += len(a)
print(('#refs covered: %.2f' % (ref_cnt / len(refs))))
refs = list(zip(*refs))
hypos = list(zip(*hypos))
k = len(hypos)
m = len(refs)
flat_hypos = [hypos[j][i] for i in range(len(hypos[0])) for j in range(k)]
duplicated_refs = [[ref for ref in refs_i for _ in range(k)] for refs_i in refs]
loo_bleus = []
for held_out_ref in range(m):
remaining_refs = (duplicated_refs[:held_out_ref] + duplicated_refs[(held_out_ref + 1):])
assert (len(remaining_refs) == (m - 1))
loo_bleus.append(corpus_bleu(flat_hypos, remaining_refs))
print(('average multi-reference BLEU (leave-one-out): %.2f' % np.mean(loo_bleus))) |
def get_representative_dataset(n_iter):
def representative_dataset():
ds_iter = iter(train_loader)
for _ in range(n_iter):
(yield [next(ds_iter)[0]])
return representative_dataset |
def lr_calc(epoch):
if ((epoch < args.lr_drop_epoch) or (epoch >= (args.lr_drop_epoch + args.lr_rtrn_epochs))):
return (args.lr_decay ** epoch)
else:
return ((args.lr_decay ** epoch) - ((args.lr_decay ** epoch) * (1 - ((epoch - args.lr_drop_epoch) / args.lr_rtrn_epochs)))) |
def test_limit_memory():
limit_memory(2)
expected = (2 * (1024 ** 3))
(soft, hard) = resource.getrlimit(resource.RLIMIT_AS)
assert (expected == soft)
assert (expected == hard) |
def register_Ns3QuicEchoClientHelper_methods(root_module, cls):
cls.add_constructor([param('ns3::QuicEchoClientHelper const &', 'arg0')])
cls.add_constructor([param('ns3::Address', 'ip'), param('uint16_t', 'port')])
cls.add_constructor([param('ns3::Address', 'addr')])
cls.add_method('Install', 'ns3::ApplicationContainer', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True)
cls.add_method('Install', 'ns3::ApplicationContainer', [param('std::string', 'nodeName')], is_const=True)
cls.add_method('Install', 'ns3::ApplicationContainer', [param('ns3::NodeContainer', 'c')], is_const=True)
cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
cls.add_method('SetFill', 'void', [param('ns3::Ptr< ns3::Application >', 'app'), param('std::string', 'fill')])
cls.add_method('SetFill', 'void', [param('ns3::Ptr< ns3::Application >', 'app'), param('uint8_t', 'fill'), param('uint32_t', 'dataLength')])
cls.add_method('SetFill', 'void', [param('ns3::Ptr< ns3::Application >', 'app'), param('uint8_t *', 'fill'), param('uint32_t', 'fillLength'), param('uint32_t', 'dataLength')])
return |
def display_section_name(title: str, separator: str='=', **kwargs: Any) -> None:
message = f' {title} '.center(get_terminal_width(), separator)
kwargs.setdefault('bold', True)
click.secho(message, **kwargs) |
def eval_datasets_flist_reader(flist):
imlist = []
with open(flist, 'r') as rf:
for line in rf.readlines():
(q, r) = line.strip().split(',')
if (q == 'query_id'):
continue
imlist.append((q, r))
return imlist |
class BR(nn.Module):
def __init__(self, nOut, act_name='prelu'):
super().__init__()
self.br = nn.Sequential(nn.BatchNorm2d(nOut), activation_fn(nOut, name=act_name))
def forward(self, x):
return self.br(x) |
_module()
class GlobalContextHead(nn.Module):
def __init__(self, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_weight=1.0, conv_cfg=None, norm_cfg=None, conv_to_res=False):
super(GlobalContextHead, self).__init__()
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv_to_res = conv_to_res
self.fp16_enabled = False
if self.conv_to_res:
num_res_blocks = (num_convs // 2)
self.convs = ResLayer(SimplifiedBasicBlock, in_channels, self.conv_out_channels, num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
else:
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = (self.in_channels if (i == 0) else conv_out_channels)
self.convs.append(ConvModule(in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(conv_out_channels, num_classes)
self.criterion = nn.BCEWithLogitsLoss()
def init_weights(self):
nn.init.normal_(self.fc.weight, 0, 0.01)
nn.init.constant_(self.fc.bias, 0)
_fp16()
def forward(self, feats):
x = feats[(- 1)]
for i in range(self.num_convs):
x = self.convs[i](x)
x = self.pool(x)
mc_pred = x.reshape(x.size(0), (- 1))
mc_pred = self.fc(mc_pred)
return (mc_pred, x)
_fp32(apply_to=('pred',))
def loss(self, pred, labels):
labels = [lbl.unique() for lbl in labels]
targets = pred.new_zeros(pred.size())
for (i, label) in enumerate(labels):
targets[(i, label)] = 1.0
loss = (self.loss_weight * self.criterion(pred, targets))
return loss |
def GetClustCf(tspec, *args):
if (type(tspec) == PUNGraph):
return GetClustCf_PUNGraph(tspec, *args)
if (type(tspec) == PUndirNet):
return GetClustCf_PUndirNet(tspec, *args)
if (type(tspec) == PDirNet):
return GetClustCf_PDirNet(tspec, *args)
if (type(tspec) == PNGraph):
return GetClustCf_PNGraph(tspec, *args)
if (type(tspec) == PNEANet):
return GetClustCf_PNEANet(tspec, *args)
if (type(tspec) == PNGraphMP):
return GetClustCf_PNGraphMP(tspec, *args)
if (type(tspec) == PNEANetMP):
return GetClustCf_PNEANetMP(tspec, *args)
raise TypeError('First argument has invalid type') |
def run_test(args):
if (args.config_path is not None):
path = '--config_path'
path_name = args.config_path
else:
path = '--ckpt_path'
path_name = args.ckpt_path
subprocess.run(['python', 'test.py', '--dataset', 'custom', path, path_name, '--phase', 'test', '--together', 'True', '--test_csv', str((args.img_folder + '/dummy.csv')), '--save_dir', args.save_dir])
os.remove((args.img_folder + '/dummy.csv'))
os.remove((args.save_dir + '/results/test/groundtruth.csv')) |
def test_views_between_maps_work():
def test_inline_reshape_views_work(A: dace.float64[(3, 3)], B: dace.float64[9]):
result = dace.define_local([9], dace.float64)
result[:] = nested_add2(A, B)
result_reshaped = reshape_node(result)
return np.transpose(result_reshaped)
sdfg = test_inline_reshape_views_work.to_sdfg(simplify=False)
sdfg.expand_library_nodes()
sdfg.simplify() |
class RegionpropsTableAll():
param_names = ['cache']
params = (False, True)
def setup(self, cache):
try:
from skimage.measure import regionprops_table
except ImportError:
raise NotImplementedError('regionprops_table unavailable')
(self.label_image, self.intensity_image) = init_regionprops_data()
def time_regionprops_table_all(self, cache):
measure.regionprops_table(self.label_image, self.intensity_image, properties=PROP_VALS, cache=cache) |
def train(train_loader, model, criterion, optimizer, scheduler, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.train()
end = time.time()
for (i, (input, target)) in enumerate(train_loader):
data_time.update((time.time() - end))
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
optimizer.zero_grad()
if (FLAGS.min_width > 0):
max_width = FLAGS.max_width
min_width = FLAGS.min_width
width_mult_list = [min_width]
sampled_width = list(np.random.uniform(min_width, max_width, (FLAGS.num_subnet - 1)))
width_mult_list.extend(sampled_width)
model.apply((lambda m: setattr(m, 'width_mult', max_width)))
else:
model.apply((lambda m: setattr(m, 'fullnet', True)))
max_output = model(input.cuda(non_blocking=True))
loss = criterion(max_output, target)
loss.backward()
(prec1, prec5) = accuracy(max_output.data, target, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
max_output_detach = max_output.detach()
if (FLAGS.min_width > 0):
for width_mult in sorted(width_mult_list, reverse=True):
model.apply((lambda m: setattr(m, 'width_mult', width_mult)))
resolution = FLAGS.resos[random.randint(0, (len(FLAGS.resos) - 1))]
output = model(F.interpolate(input, (resolution, resolution), mode='bilinear', align_corners=True))
loss = torch.nn.KLDivLoss(reduction='batchmean')(F.log_softmax(output, dim=1), F.softmax(max_output_detach, dim=1))
loss.backward()
else:
model.apply((lambda m: setattr(m, 'fullnet', False)))
for k in range(3):
resolution = FLAGS.resos[random.randint(0, (len(FLAGS.resos) - 1))]
output = model(F.interpolate(input, (resolution, resolution), mode='bilinear', align_corners=True))
loss = torch.nn.KLDivLoss(reduction='batchmean')(F.log_softmax(output, dim=1), F.softmax(max_output_detach, dim=1))
loss.backward()
optimizer.step()
scheduler.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % FLAGS.print_freq) == 0):
logger.info('Epoch: [{0}][{1}/{2}]\tLR:{3: .4f}\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\ {top1.val:.3f} ({top1.avg:.3f}) {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader), optimizer.param_groups[0]['lr'], batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5)) |
def load_ckpt_base_path_meta_data(search_space):
base_data_directory = '/home/soroush/data/s3'
import os
if ((search_space.get('rl_variant.ckpt_base_path', None) is not None) and (len(search_space['rl_variant.ckpt_base_path']) > 0)):
search_space['rl_variant.ckpt'] = []
for base_path in search_space['rl_variant.ckpt_base_path']:
for exp_path in os.listdir(os.path.join(base_data_directory, base_path)):
if os.path.isdir(os.path.join(base_data_directory, base_path, exp_path)):
id_and_seed = exp_path.split('id')[(- 1)]
id = int(id_and_seed[:3])
seed = int(id_and_seed.split('s')[(- 1)])
path = os.path.join(base_path, exp_path)
if ((path not in search_space.get('rl_variant.ckpt_exclude_path', [])) and (id not in search_space.get('rl_variant.ckpt_exclude_id', []))):
search_space['rl_variant.ckpt'].append(path)
if ('rl_variant.ckpt_base_path' in search_space):
del search_space['rl_variant.ckpt_base_path']
if ('rl_variant.ckpt_exclude_path' in search_space):
del search_space['rl_variant.ckpt_exclude_path']
if ('rl_variant.ckpt_exclude_id' in search_space):
del search_space['rl_variant.ckpt_exclude_id'] |
.parametrize('n', [0, 1, 2, 3.2])
.parametrize('alpha', [0.0, 1, np.nan])
.parametrize('x', [1e-06, 2, np.nan])
def test_gegenbauer_nan(n, alpha, x):
nan_gegenbauer = np.isnan(_ufuncs.eval_gegenbauer(n, alpha, x))
nan_arg = np.any(np.isnan([n, alpha, x]))
assert (nan_gegenbauer == nan_arg) |
class _Ops(types.ModuleType):
__file__ = os.path.join(os.path.dirname(__file__), '_ops.py')
def __init__(self):
super(_Ops, self).__init__('torch.ops')
self.loaded_libraries = set()
def __getattr__(self, name):
namespace = _OpNamespace(name)
setattr(self, name, namespace)
return namespace
def load_library(self, path):
path = torch._utils_internal.resolve_library_path(path)
with dl_open_guard():
ctypes.CDLL(path)
self.loaded_libraries.add(path) |
def load_class_map(map_or_filename, root=''):
if isinstance(map_or_filename, dict):
assert dict, 'class_map dict must be non-empty'
return map_or_filename
class_map_path = map_or_filename
if (not os.path.exists(class_map_path)):
class_map_path = os.path.join(root, class_map_path)
assert os.path.exists(class_map_path), ('Cannot locate specified class map file (%s)' % map_or_filename)
class_map_ext = os.path.splitext(map_or_filename)[(- 1)].lower()
if (class_map_ext == '.txt'):
with open(class_map_path) as f:
class_to_idx = {v.strip(): k for (k, v) in enumerate(f)}
else:
assert False, f'Unsupported class map file extension ({class_map_ext}).'
return class_to_idx |
_cache(maxsize=1000)
def measure_multiple_with_cache_ket(state: Tuple[complex], num_states: int, length_diff: int) -> Tuple[(List[array], List[float])]:
state = array(state)
basis_count = (2 ** num_states)
projectors = ([None] * basis_count)
probabilities = ([0] * basis_count)
for i in range(basis_count):
M = zeros((1, basis_count), dtype=complex)
M[(0, i)] = 1
projectors[i] = kron(M, identity((2 ** length_diff)))
probabilities[i] = (((state.conj().T projectors[i].T) projectors[i]) state).real
if (probabilities[i] < 0):
probabilities[i] = 0
if (probabilities[i] > 1):
probabilities[i] = 1
return_states = ([None] * len(projectors))
for (i, proj) in enumerate(projectors):
if (probabilities[i] > 0):
new_state = ((proj state) / sqrt(probabilities[i]))
new_state = tuple(new_state)
return_states[i] = new_state
return (return_states, probabilities) |
.parametrize('seed', [313])
def test_all_gather(seed, comm_nccl_opts):
if (comm_nccl_opts is None):
pytest.skip('Communicator test is disabled. You can turn it on by an option `--test-communicator`.')
if (len(comm_nccl_opts.devices) < 2):
pytest.skip('Communicator test is disabled. Use more than 1 gpus.')
comm = comm_nccl_opts.comm
device_id = int(comm_nccl_opts.device_id)
n_devices = len(comm_nccl_opts.devices)
rng = np.random.RandomState(seed)
x_data = rng.rand(3, 4)
x = nn.Variable(x_data.shape)
x.d = (x_data * device_id)
y_list = []
for i in range(n_devices):
y = nn.Variable(x_data.shape)
y_list.append(y)
comm.all_gather(x.data, [y.data for y in y_list])
refs = ref_all_gather(x_data, n_devices)
for (y, ref) in zip(y_list, refs):
assert_allclose(y.d, ref, rtol=0.001, atol=1e-06) |
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(nn.Linear(channel, (channel // reduction), bias=False), nn.ReLU(inplace=True), nn.Linear((channel // reduction), channel, bias=False), nn.Sigmoid())
def forward(self, x):
(b, c, _, _) = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return (x * y.expand_as(x)) |
def example():
task = MyOwnTask()
env = CausalWorld(task=task, enable_visualization=True)
env.reset()
for _ in range(2000):
for _ in range(10):
(obs, reward, done, info) = env.step(env.action_space.sample())
random_intervention_dict = env.do_single_random_intervention()
env.close() |
def register_Ns3EpcMmeApplication_methods(root_module, cls):
cls.add_constructor([param('ns3::EpcMmeApplication const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AddBearer', 'uint8_t', [param('uint64_t', 'imsi'), param('ns3::Ptr< ns3::EpcTft >', 'tft'), param('ns3::EpsBearer', 'bearer')])
cls.add_method('AddEnb', 'void', [param('uint16_t', 'ecgi'), param('ns3::Ipv4Address', 'enbS1UAddr')])
cls.add_method('AddUe', 'void', [param('uint64_t', 'imsi')])
cls.add_method('GetS11SapMme', 'ns3::EpcS11SapMme *', [])
cls.add_method('GetS1apSapMme', 'ns3::EpcS1apSapMme *', [])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetS11SapSgw', 'void', [param('ns3::EpcS11SapSgw *', 's')])
cls.add_method('SetS1apSapMmeProvider', 'void', [param('ns3::EpcS1apSapMmeProvider *', 'provider')])
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
return |
class GraphHandler(object):
def __init__(self, model):
self.model = model
self.saver = tf.train.Saver(max_to_keep=3)
self.writer = None
def initialize(self, sess):
sess.run(tf.global_variables_initializer())
if (cfg.load_model or (cfg.mode != 'train')):
self.restore(sess)
if (cfg.mode == 'train'):
self.writer = tf.summary.FileWriter(logdir=cfg.summary_dir, graph=tf.get_default_graph())
def add_summary(self, summary, global_step):
_logger.add()
_logger.add('saving summary...')
self.writer.add_summary(summary, global_step)
_logger.done()
def add_summaries(self, summaries, global_step):
for summary in summaries:
self.add_summary(summary, global_step)
def save(self, sess, global_step=None):
_logger.add()
_logger.add(('saving model to %s' % cfg.ckpt_path))
self.saver.save(sess, cfg.ckpt_path, global_step)
_logger.done()
def restore(self, sess):
_logger.add()
print(cfg.ckpt_dir)
if (cfg.load_step is None):
_logger.add(('trying to restore from dir %s' % cfg.ckpt_dir))
latest_checkpoint_path = tf.train.latest_checkpoint(cfg.ckpt_dir)
else:
latest_checkpoint_path = (cfg.load_path or ((cfg.ckpt_path + '-') + str(cfg.load_step)))
if (latest_checkpoint_path is not None):
_logger.add(('trying to restore from ckpt file %s' % latest_checkpoint_path))
try:
self.saver.restore(sess, latest_checkpoint_path)
_logger.add('success to restore')
except tf.errors.NotFoundError:
_logger.add('failure to restore')
if (cfg.mode != 'train'):
raise FileNotFoundError('canot find model file')
else:
_logger.add(('No check point file in dir %s ' % cfg.ckpt_dir))
if (cfg.mode != 'train'):
raise FileNotFoundError('canot find model file')
_logger.done() |
def _create_table(conn, table):
if (conn.driver == 'mysql'):
stmt = 'CREATE TABLE IF NOT EXISTS {0} (id INT, block TEXT, PRIMARY KEY (id))'.format(table)
elif (conn.driver == 'hive'):
stmt = 'CREATE TABLE IF NOT EXISTS {0} (id INT, block STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY "\\001" STORED AS TEXTFILE'.format(table)
elif (conn.driver == 'maxcompute'):
stmt = 'CREATE TABLE IF NOT EXISTS {0} (id INT, block STRING)'.format(table)
else:
raise SQLFlowDiagnostic('unsupported driver {0} on creating table.'.format(conn.driver))
conn.execute(stmt) |
class TestNoop():
def test_noop(self):
env = Noop(DummyDiscretePixelEnv(), noop_max=3)
for _ in range(1000):
env.reset()
assert (1 <= env.env.step_called <= 3)
env = Noop(DummyDiscretePixelEnv(), noop_max=10)
for _ in range(1000):
obs = env.reset()
if ((env.env.step_called % 5) == 0):
assert np.array_equal(obs, np.ones(env.observation_space.shape))
else:
assert (not np.array_equal(obs, np.ones(env.observation_space.shape))) |
def _rel_pos_enc_shift(x: Tensor, axis: Dim, pos_emb_spatial_dim: Dim, hist_dim: Dim) -> Tensor:
batch_dims = x.remaining_dims((axis, pos_emb_spatial_dim))
(x_padded, (pos_emb_spatial_dim_,)) = rf.pad(x, axes=[pos_emb_spatial_dim], padding=[(1, 0)], value=0.0)
x_padded = rf.reshape(x_padded, (axis, pos_emb_spatial_dim_), (pos_emb_spatial_dim_, axis))
(x_padded, pos_emb_spatial_dim_) = rf.slice(x_padded, axis=pos_emb_spatial_dim_, start=1)
x_padded = rf.reshape(x_padded, (pos_emb_spatial_dim_, axis), (axis, pos_emb_spatial_dim_))
(x_padded, _) = rf.slice(x_padded, axis=pos_emb_spatial_dim_, size=hist_dim)
x_padded.verify_out_shape((set(batch_dims) | {axis, hist_dim}))
return x_padded |
class RPNLossComputation(object):
def __init__(self, proposal_matcher, fg_bg_sampler, box_coder):
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.box_coder = box_coder
def match_targets_to_anchors(self, anchor, target):
match_quality_matrix = boxlist_iou(target, anchor)
matched_idxs = self.proposal_matcher(match_quality_matrix)
target = target.copy_with_fields([])
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field('matched_idxs', matched_idxs)
return matched_targets
def prepare_targets(self, anchors, targets):
labels = []
regression_targets = []
for (anchors_per_image, targets_per_image) in zip(anchors, targets):
if (len(targets_per_image) > 0):
matched_targets = self.match_targets_to_anchors(anchors_per_image, targets_per_image)
matched_idxs = matched_targets.get_field('matched_idxs')
labels_per_image = (matched_idxs >= 0)
labels_per_image = labels_per_image.to(dtype=torch.float32)
labels_per_image[(~ anchors_per_image.get_field('visibility'))] = (- 1)
inds_to_discard = (matched_idxs == Matcher.BETWEEN_THRESHOLDS)
labels_per_image[inds_to_discard] = (- 1)
regression_targets_per_image = self.box_coder.encode(matched_targets.bbox, anchors_per_image.bbox)
else:
num_anchors = len(anchors_per_image)
device = anchors_per_image.bbox.device
labels_per_image = torch.zeros(num_anchors, dtype=torch.float32).to(device)
regression_targets_per_image = torch.zeros(num_anchors, 4, dtype=torch.float32).to(device)
labels.append(labels_per_image)
regression_targets.append(regression_targets_per_image)
return (labels, regression_targets)
def __call__(self, anchors, objectness, box_regression, targets):
anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors]
(labels, regression_targets) = self.prepare_targets(anchors, targets)
(sampled_pos_inds, sampled_neg_inds) = self.fg_bg_sampler(labels)
sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1)
sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1)
cfg.debug_info.sampled_pos_inds = len(sampled_pos_inds)
sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)
objectness_flattened = []
box_regression_flattened = []
for (objectness_per_level, box_regression_per_level) in zip(objectness, box_regression):
(N, A, H, W) = objectness_per_level.shape
objectness_per_level = objectness_per_level.permute(0, 2, 3, 1).reshape(N, (- 1))
box_regression_per_level = box_regression_per_level.view(N, (- 1), 4, H, W)
box_regression_per_level = box_regression_per_level.permute(0, 3, 4, 1, 2)
box_regression_per_level = box_regression_per_level.reshape(N, (- 1), 4)
objectness_flattened.append(objectness_per_level)
box_regression_flattened.append(box_regression_per_level)
objectness = cat(objectness_flattened, dim=1).reshape((- 1))
box_regression = cat(box_regression_flattened, dim=1).reshape((- 1), 4)
labels = torch.cat(labels, dim=0)
regression_targets = torch.cat(regression_targets, dim=0)
box_loss = (smooth_l1_loss(box_regression[sampled_pos_inds], regression_targets[sampled_pos_inds], beta=(1.0 / 9), size_average=False) / sampled_inds.numel())
if (not cfg.MODEL.RPN.FOCAL_LOSS):
objectness_loss = F.binary_cross_entropy_with_logits(objectness[sampled_inds], labels[sampled_inds])
else:
alpha = cfg.MODEL.ROI_BOX_HEAD.FOCAL_ALPHA
gamma = cfg.MODEL.ROI_BOX_HEAD.FOCAL_GAMMA
p = objectness.sigmoid()
labels_float = labels.to(torch.float32)
pt = ((p * labels_float) + ((1 - p) * (1 - labels_float)))
valid_mask = (labels >= 0).to(torch.float32)
w = (valid_mask * ((alpha * labels_float) + ((1 - alpha) * (1 - labels_float))))
objectness_loss = ((w * (1 - pt).pow(gamma)) * pt.log())
objectness_loss = ((- objectness_loss.sum()) / (labels_float * valid_mask).sum())
return (objectness_loss, box_loss) |
_pydub_effect
def strip_silence(seg, silence_len=1000, silence_thresh=(- 16), padding=100):
if (padding > silence_len):
raise InvalidDuration('padding cannot be longer than silence_len')
chunks = split_on_silence(seg, silence_len, silence_thresh, padding)
crossfade = (padding / 2)
if (not len(chunks)):
return seg[0:0]
seg = chunks[0]
for chunk in chunks[1:]:
seg = seg.append(chunk, crossfade=crossfade)
return seg |
class KipfGCN(torch.nn.Module):
def __init__(self, data, num_class, params):
super(KipfGCN, self).__init__()
self.p = params
self.data = data
self.conv1 = GCNConv(self.data.num_features, self.p.gcn_dim, cached=True)
self.conv2 = GCNConv(self.p.gcn_dim, num_class, cached=True)
def forward(self, x, edge_index):
x = F.relu(self.conv1(x, edge_index))
x = F.dropout(x, p=self.p.dropout, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1) |
def main_worker(gpu, ngpus_per_node, args, checkpoint_folder):
args.gpu = gpu
if (args.multiprocessing_distributed and (args.gpu != 0)):
def print_pass(*args):
pass
builtins.print = print_pass
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
print(f"=> creating model '{args.arch}', pretrained={args.from_imagenet}")
model = moco.builder.MoCo(models.__dict__[args.arch], args.moco_dim, args.moco_k, args.moco_m, args.moco_t, args.mlp, args.from_imagenet)
print(model)
if args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
raise NotImplementedError('Only DistributedDataParallel is supported.')
else:
raise NotImplementedError('Only DistributedDataParallel is supported.')
print('Distributed model defined')
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
print('Loss defined')
if (args.optimizer == 'sgd'):
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
elif (args.optimizer == 'adam'):
optimizer = torch.optim.Adam(model.parameters(), args.lr, betas=(0.9, 0.999), weight_decay=args.weight_decay)
print('Optimizer defined')
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if (args.gpu is None):
checkpoint = torch.load(args.resume)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = args.train_data
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if (args.aug_setting == 'moco_v2'):
augmentation = [transforms.RandomResizedCrop(224, scale=(0.2, 1.0)), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.RandomApply([moco.loader.GaussianBlur([0.1, 2.0])], p=0.5), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]
elif (args.aug_setting == 'moco_v1'):
augmentation = [transforms.RandomResizedCrop(224, scale=(0.2, 1.0)), transforms.RandomGrayscale(p=0.2), transforms.ColorJitter(0.4, 0.4, 0.4, 0.4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]
elif (args.aug_setting == 'chexpert'):
augmentation = image_transform.get_transform(args, training=True)
print('Augmentation defined')
train_dataset = datasets.ImageFolder(traindir, moco.loader.TwoCropsTransform(transforms.Compose(augmentation)))
print('Training dataset defined')
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
print('Training dataloader defined')
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
train(train_loader, model, criterion, optimizer, epoch, args)
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0) and (((epoch % args.save_epoch) == 0) or (epoch == (args.epochs - 1))))):
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, is_best=False, filename=os.path.join(checkpoint_folder, 'checkpoint_{:04d}.pth.tar'.format(epoch))) |
def divide_variable(self, c, i, in_place=False):
if (not in_place):
Q = self.parent()(self.base_ring(), self.dim(), self.coefficients())
Q.divide_variable(c, i, in_place=True)
return Q
tmp = (self[(i, i)] / (c * c))
self[(i, i)] = tmp
for k in range(self.dim()):
if (k != i):
tmp = (self[(k, i)] / c)
self[(k, i)] = tmp |
def DropoutIfTraining(model, blob_in, blob_out, dropout_rate):
if (model.train and (dropout_rate > 0)):
blob_out = model.Dropout(blob_in, blob_out, ratio=dropout_rate, is_test=False)
return blob_out
else:
return blob_in |
def leaky_relu(g, input, negative_slope, inplace=False):
return g.op('LeakyRelu', input, alpha_f=_scalar(negative_slope)) |
class GemmUniversalLauncher():
def __init__(self, operation: 'GemmOperationUniversal', seed: int=2080, interleaved=False, verification=True, profiling=False, warmup_iterations=500, iterations=500, **kwargs) -> None:
self.reduction_operation: ReductionOperation = ReductionOperation(shape=cutlass.MatrixCoord(4, (32 * operation.C.alignment)), C=operation.C, element_accumulator=operation.tile_description.math_instruction.element_accumulator, element_compute=operation.epilogue_functor.element_epilogue, epilogue_functor=operation.epilogue_functor, count=operation.C.alignment)
self.math_operation = operation.tile_description.math_instruction.math_operation
self.verification = verification
self.profiling = profiling
self.timer = GpuTimer()
self.warmup_iterations = warmup_iterations
self.iterations = iterations
if ('sleep' in kwargs.keys()):
self.sleep_time = kwargs['sleep']
else:
self.sleep_time = 0
op_list = [operation]
if (operation.arch < 90):
op_list.append(self.reduction_operation)
pycutlass.compiler.add_module(op_list)
self.operation = operation
self.dtype_A = GemmUniversalLauncher.numpy_type(operation.A.element)
self.dtype_B = GemmUniversalLauncher.numpy_type(operation.B.element)
self.dtype_C = GemmUniversalLauncher.numpy_type(operation.C.element)
self.dtype_D = GemmUniversalLauncher.numpy_type(operation.C.element)
accumulator_size = DataTypeSize[operation.tile_description.math_instruction.element_accumulator]
element_size = DataTypeSize[operation.A.element]
if (element_size == 1):
self.scope_max = 1
self.scope_min = 0
elif (element_size <= 8):
self.scope_max = 1
self.scope_min = (- 1)
elif (element_size == 16):
self.scope_max = 4
self.scope_min = (- 4)
else:
self.scope_max = 8
self.scope_min = (- 8)
self.seed: int = seed
self.interleaved = interleaved
self.compute_type = operation.epilogue_functor.element_epilogue
self.accumulator_type = operation.tile_description.math_instruction.element_accumulator
def print_problem_size(self, p, mode, batch_count):
if (mode == cutlass.gemm.Mode.Gemm):
mode = 'Gemm'
elif (mode == cutlass.gemm.Mode.Batched):
mode = 'GemmBatched'
elif (mode == cutlass.gemm.Mode.GemmSplitKParallel):
mode = 'GemmSplitKParallel'
problem_size = ('problem: %d, %d, %d\n batch_count: %d\n mode: %s' % (p.m(), p.n(), p.k(), batch_count, mode))
print(problem_size)
def numpy_type(type):
if (type == cutlass.float64):
return np.float64
elif (type == cutlass.float32):
return np.float32
elif (type == cutlass.float16):
return np.float16
elif (type == cutlass.bfloat16):
return bfloat16
elif (type == cutlass.int32):
return np.int32
elif (type == cutlass.int8):
return np.int8
else:
raise ValueError(('unsupported type: %s' % ShortDataTypeNames[type]))
def uniform_init(self, size, dtype):
if (dtype in [np.float32, np.float16, bfloat16, np.float64]):
return np.ceil(np.random.uniform(low=(self.scope_min - 0.5), high=(self.scope_max - 0.5), size=size).astype(dtype))
else:
return np.random.uniform(low=(self.scope_min - 1), high=(self.scope_max + 1), size=size).astype(dtype)
def reorder_tensor_B(self, tensor_B, problem_size):
reordered_tensor_B = np.empty_like(tensor_B)
tensor_ref_B = getTensorRef(tensor_B, problem_size, 'b', self.operation.B.layout)
reordered_tensor_ref_B = getTensorRef(reordered_tensor_B, problem_size, 'b', self.operation.B.layout)
cutlass.gemm.host.reorder_column(tensor_ref_B, reordered_tensor_ref_B, problem_size)
return reordered_tensor_B
def host_reference(self, problem_size, batch_count, tensor_A, tensor_B, tensor_C, alpha, beta):
tensor_D_ref = np.ones_like(tensor_C)
alpha = self.numpy_type(self.compute_type)(alpha)
beta = self.numpy_type(self.compute_type)(beta)
init_acc = 0
alpha = self.compute_type(alpha).value()
beta = self.compute_type(beta).value()
init_acc = self.accumulator_type(init_acc).value()
for i in range(batch_count):
if self.operation.switched:
tensor_ref_A = getTensorRef(tensor_A, problem_size, 'a', transpose(self.operation.B.layout), batch_offset=i)
tensor_ref_B = getTensorRef(tensor_B, problem_size, 'b', transpose(self.operation.A.layout), batch_offset=i)
tensor_ref_C = getTensorRef(tensor_C, problem_size, 'c', transpose(self.operation.C.layout), batch_offset=i)
tensor_ref_D_ref = getTensorRef(tensor_D_ref, problem_size, 'd', transpose(self.operation.C.layout), batch_offset=i)
else:
tensor_ref_A = getTensorRef(tensor_A, problem_size, 'a', self.operation.A.layout, batch_offset=i)
tensor_ref_B = getTensorRef(tensor_B, problem_size, 'b', self.operation.B.layout, batch_offset=i)
tensor_ref_C = getTensorRef(tensor_C, problem_size, 'c', self.operation.C.layout, batch_offset=i)
tensor_ref_D_ref = getTensorRef(tensor_D_ref, problem_size, 'd', self.operation.C.layout, batch_offset=i)
if (self.math_operation in [MathOperation.multiply_add_saturate]):
cutlass.test.gemm.host.gemm_saturate(problem_size, alpha, tensor_ref_A, tensor_ref_B, beta, tensor_ref_C, tensor_ref_D_ref, init_acc)
else:
cutlass.test.gemm.host.gemm(problem_size, alpha, tensor_ref_A, tensor_ref_B, beta, tensor_ref_C, tensor_ref_D_ref, init_acc)
return tensor_D_ref
def equal(self, tensor_D, tensor_D_ref, problem_size, batch_count):
for i in range(batch_count):
tensor_view_D = getTensorView(tensor_D, problem_size, 'd', self.operation.C.layout, batch_offset=i)
tensor_view_D_ref = getTensorView(tensor_D_ref, problem_size, 'd', self.operation.C.layout, batch_offset=i)
if (not cutlass.test.gemm.host.equals(tensor_view_D, tensor_view_D_ref)):
return False
return True
def bytes(self, problem_size, batch_count=1, alpha=1.0, beta=0.0):
m = problem_size.m()
n = problem_size.n()
k = problem_size.k()
bytes = (((((DataTypeSize[self.operation.A.element] * m) // 8) * k) + (((DataTypeSize[self.operation.B.element] * n) // 8) * k)) + (((DataTypeSize[self.operation.C.element] * m) // 8) * n))
if (beta != 0):
bytes += (((DataTypeSize[self.operation.C.element] * m) // 8) * n)
bytes *= batch_count
return bytes
def flops(self, problem_size, batch_count=1):
m = problem_size.m()
n = problem_size.n()
k = problem_size.k()
flops_ = ((((m * n) * k) * 2) * batch_count)
return flops_
def run_cutlass_profiler(self, mode, problem_size, batch_count=1, alpha=1.0, beta=0.0):
cutlass_path = os.getenv('CUTLASS_PATH')
assert (cutlass_path is not None), "Environment variable 'CUTLASS_PATH' is not defined."
values = {'profiler_path': (cutlass_path + '/build/tools/profiler/cutlass_profiler'), 'kernel_name': self.operation.procedural_name(), 'verification_providers': 'device', 'provider': 'cutlass', 'm': str(problem_size.m()), 'n': str(problem_size.n()), 'k': str(problem_size.k()), 'split_k_slices': str(batch_count), 'alpha': str(alpha), 'beta': str(beta), 'warmup': str(self.warmup_iterations), 'profile': str(self.iterations)}
cmd_template = '${profiler_path} --kernels=${kernel_name} --verification-providers=${verification_providers} --providers=${provider} --m=${m} --n=${n} --k=${k}'
cmd = SubstituteTemplate(cmd_template, values)
result = subprocess.getoutput(cmd)
m = re.search('Runtime:\\s+(?P<runtime>\\d+.\\d+)', result)
runtime = float(m.group('runtime'))
m = re.search('Bytes:\\s+(?P<bytes>\\d+)', result)
bytes = int(m.group('bytes'))
m = re.search('FLOPs:\\s+(?P<flops>\\d+)', result)
flops = int(m.group('flops'))
assert (bytes == self.bytes(problem_size, alpha, beta))
assert (flops == self.flops(problem_size))
return runtime
def run(self, mode, problem_size, batch_count=1, split_k_slices=1, alpha=1.0, beta=0.0):
assert (get_allocated_size() == 0), ('%d byte of pool memory is not released in previous run' % get_allocated_size())
np.random.seed(self.seed)
true_batch_count = (batch_count if (mode == cutlass.gemm.Mode.Batched) else 1)
tensor_A = self.uniform_init(size=(((problem_size.m() * problem_size.k()) * true_batch_count),), dtype=self.dtype_A)
tensor_B = self.uniform_init(size=(((problem_size.n() * problem_size.k()) * true_batch_count),), dtype=self.dtype_B)
tensor_C = self.uniform_init(size=(((problem_size.m() * problem_size.n()) * true_batch_count),), dtype=self.dtype_C)
tensor_D = np.zeros(shape=(((problem_size.m() * problem_size.n()) * true_batch_count),), dtype=self.dtype_D)
arguments = GemmArguments(operation=self.operation, problem_size=problem_size, A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D, output_op=self.operation.epilogue_type(alpha, beta), gemm_mode=mode, split_k_slices=split_k_slices, batch=batch_count)
if (mode == cutlass.gemm.Mode.GemmSplitKParallel):
reduction_arguments = ReductionArguments(self.reduction_operation, problem_size=[problem_size.m(), problem_size.n()], partitions=split_k_slices, workspace=arguments.ptr_D, destination=tensor_D, source=tensor_C, output_op=self.reduction_operation.epilogue_type(alpha, beta))
self.operation.run(arguments)
if (mode == cutlass.gemm.Mode.GemmSplitKParallel):
self.reduction_operation.run(reduction_arguments)
passed = True
if self.verification:
if (mode == cutlass.gemm.Mode.GemmSplitKParallel):
reduction_arguments.sync()
else:
arguments.sync()
tensor_D_ref = self.host_reference(problem_size, true_batch_count, tensor_A, tensor_B, tensor_C, alpha, beta)
passed = self.equal(tensor_D, tensor_D_ref, problem_size, true_batch_count)
try:
assert passed
except AssertionError:
self.print_problem_size(problem_size, mode, batch_count)
if self.profiling:
sleep(self.sleep_time)
for _ in range(self.warmup_iterations):
self.operation.run(arguments)
if (mode == cutlass.gemm.Mode.GemmSplitKParallel):
self.reduction_operation.run(reduction_arguments)
self.timer.start()
for _ in range(self.iterations):
self.operation.run(arguments)
if (mode == cutlass.gemm.Mode.GemmSplitKParallel):
self.reduction_operation.run(reduction_arguments)
self.timer.stop_and_wait()
runtime = self.timer.duration(self.iterations)
del arguments
if (mode == cutlass.gemm.Mode.GemmSplitKParallel):
del reduction_arguments
assert (get_allocated_size() == 0), ('%d byte of pool memory is not released after current run' % get_allocated_size())
if self.profiling:
return runtime
return passed |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.