code stringlengths 101 5.91M |
|---|
def test_contextual_confusion_matrix_points(expected_point, observed_point):
expected_return = (4, 7, 3, 5)
returned = contextual_confusion_matrix(expected_point, observed_point)
np.testing.assert_array_equal(np.array(returned), np.array(expected_return)) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
def test_exp_double_backward(seed, ctx, func_name):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
inputs = [rng.randn(2, 3).astype(np.float32)]
backward_function_tester(rng, F.exp, inputs=inputs, func_args=[], func_kwargs={}, atol_accum=0.01, dstep=0.001, ctx=ctx) |
class TestDrainConfig():
def setup(self):
self.ERROR = 0.01
def test_default_drain_config(self):
params = DrainParams()
assert isinstance(params, DrainParams)
assert (params.sim_th == pytest.approx(0.4, self.ERROR))
assert (not params.extra_delimiters)
def test_assigned_drain_config(self):
params = DrainParams(sim_th=0.6, extra_delimiters=tuple(','))
assert isinstance(params, DrainParams)
assert (params.sim_th == pytest.approx(0.6, self.ERROR))
assert (',' in params.extra_delimiters)
def test_from_dict(self):
config_dict = {'sim_th': 0.2, 'extra_delimiters': [',']}
config = DrainParams.from_dict(config_dict)
assert (config.sim_th == approx(0.2)), 'sim_th is not 0.2'
assert (type(config.extra_delimiters) is tuple), 'extra_delimiters is not tuple'
assert (',' in config.extra_delimiters), "extra_delimiters doesn't contain comma" |
class MlpContextEncoder(CudaModule):
def __init__(self, n, k, nembed, nhid, init_range, device_id):
super(MlpContextEncoder, self).__init__(device_id)
self.cnt_enc = nn.Embedding(n, nembed)
self.val_enc = nn.Embedding(n, nembed)
self.encoder = nn.Sequential(nn.Tanh(), nn.Linear((k * nembed), nhid))
self.cnt_enc.weight.data.uniform_((- init_range), init_range)
self.val_enc.weight.data.uniform_((- init_range), init_range)
init_cont(self.encoder, init_range)
def forward(self, ctx):
idx = np.arange((ctx.size(0) // 2))
cnt_idx = Variable(self.to_device(torch.from_numpy(((2 * idx) + 0))))
val_idx = Variable(self.to_device(torch.from_numpy(((2 * idx) + 1))))
cnt = ctx.index_select(0, cnt_idx)
val = ctx.index_select(0, val_idx)
cnt_emb = self.cnt_enc(cnt)
val_emb = self.val_enc(val)
h = torch.mul(cnt_emb, val_emb)
h = h.transpose(0, 1).contiguous().view(ctx.size(1), (- 1))
ctx_h = self.encoder(h).unsqueeze(0)
return ctx_h |
class Tree(nn.Module):
def __init__(self, tree_struct, tree_modules, split=False, node_split=None, child_left=None, child_right=None, extend=False, node_extend=None, child_extension=None, cuda_on=True, breadth_first=True, soft_decision=True):
super(Tree, self).__init__()
assert (not (split and extend))
self.soft_decision = soft_decision
self.cuda_on = cuda_on
self.split = split
self.extend = extend
self.tree_struct = tree_struct
self.node_split = node_split
self.node_extend = node_extend
self.breadth_first = breadth_first
self.leaves_list = get_leaf_nodes(tree_struct)
self.paths_list = [get_path_to_root(i, tree_struct) for i in self.leaves_list]
self.tree_modules = nn.ModuleList()
for (i, node) in enumerate(tree_modules):
node_modules = nn.Sequential()
node_modules.add_module('transform', node['transform'])
node_modules.add_module('classifier', node['classifier'])
node_modules.add_module('router', node['router'])
self.tree_modules.append(node_modules)
if split:
self.child_left = nn.Sequential()
self.child_left.add_module('transform', child_left['transform'])
self.child_left.add_module('classifier', child_left['classifier'])
self.child_left.add_module('router', child_left['router'])
self.child_right = nn.Sequential()
self.child_right.add_module('transform', child_right['transform'])
self.child_right.add_module('classifier', child_right['classifier'])
self.child_right.add_module('router', child_right['router'])
if extend:
self.child_extension = nn.Sequential()
self.child_extension.add_module('transform', child_extension['transform'])
self.child_extension.add_module('classifier', child_extension['classifier'])
self.child_extension.add_module('router', child_extension['router'])
def forward(self, input):
if self.breadth_first:
return self.forward_breadth_first(input)
else:
return self.forward_depth_first(input)
def forward_depth_first(self, input):
y_pred = 0.0
prob_last = None
for (nodes, edges) in self.paths_list:
if (self.split and (nodes[(- 1)] == self.node_split)):
(y_tmp, prob_last) = self.node_pred_split(input, nodes, edges)
y_pred += y_tmp
elif (self.extend and (nodes[(- 1)] == self.node_extend)):
(y_tmp, prob_last) = self.node_pred_extend(input, nodes, edges)
y_pred += y_tmp
else:
y_pred += self.node_pred(input, nodes, edges)
if self.training:
return (torch.log((1e-10 + y_pred)), prob_last)
else:
return torch.log((1e-10 + y_pred))
def forward_breadth_first(self, input):
t_list = [self.tree_modules[0].transform(input)]
r_list = [1.0]
s_list = []
prob_last = 1.0
for node in self.tree_struct:
inp = t_list.pop(0)
ro = r_list.pop(0)
if (self.split and (node['index'] == self.node_split)):
s_list.append(self.child_left.classifier(self.child_left.transform(inp)))
s_list.append(self.child_right.classifier(self.child_right.transform(inp)))
p_left = self.tree_modules[node['index']].router(inp)
p_left = torch.unsqueeze(p_left, 1)
prob_last = p_left
r_list.append((ro * p_left))
r_list.append((ro * (1.0 - p_left)))
elif (self.extend and (node['index'] == self.node_extend)):
s_list.append(self.child_extension.classifier(self.child_extension.transform(inp)))
p_left = 1.0
r_list.append((ro * p_left))
elif node['is_leaf']:
s_list.append(self.tree_modules[node['index']].classifier(inp))
r_list.append(ro)
elif node['extended']:
t_list.append(self.tree_modules[node['left_child']].transform(inp))
p_left = self.tree_modules[node['index']].router(inp)
r_list.append((ro * p_left))
else:
t_list.append(self.tree_modules[node['left_child']].transform(inp))
t_list.append(self.tree_modules[node['right_child']].transform(inp))
p_left = self.tree_modules[node['index']].router(inp)
p_left = torch.unsqueeze(p_left, 1)
r_list.append((ro * p_left))
r_list.append((ro * (1.0 - p_left)))
y_pred = 0.0
for (r, s) in zip(r_list, s_list):
y_pred += (r * torch.exp(s))
out = torch.log((1e-10 + y_pred))
if self.training:
return (out, prob_last)
else:
return out
def node_pred(self, input, nodes, edges):
prob = 1.0
for (node, state) in zip(nodes[:(- 1)], edges):
input = self.tree_modules[node].transform(input)
if state:
prob = (prob * self.tree_modules[node].router(input))
else:
prob = (prob * (1.0 - self.tree_modules[node].router(input)))
if (not isinstance(prob, float)):
prob = torch.unsqueeze(prob, 1)
node_final = nodes[(- 1)]
input = self.tree_modules[node_final].transform(input)
y_pred = (prob * torch.exp(self.tree_modules[node_final].classifier(input)))
return y_pred
def node_pred_split(self, input, nodes, edges):
prob = 1.0
for (node, state) in zip(nodes[:(- 1)], edges):
input = self.tree_modules[node].transform(input)
if state:
prob = (prob * self.tree_modules[node].router(input))
else:
prob = (prob * (1.0 - self.tree_modules[node].router(input)))
if (not isinstance(prob, float)):
prob = torch.unsqueeze(prob, 1)
node_final = nodes[(- 1)]
input = self.tree_modules[node_final].transform(input)
prob_last = torch.unsqueeze(self.tree_modules[node_final].router(input), 1)
y_pred = (prob * ((prob_last * torch.exp(self.child_left.classifier(self.child_left.transform(input)))) + ((1.0 - prob_last) * torch.exp(self.child_right.classifier(self.child_right.transform(input))))))
return (y_pred, prob_last)
def node_pred_extend(self, input, nodes, edges):
prob = 1.0
for (node, state) in zip(nodes[:(- 1)], edges):
input = self.tree_modules[node].transform(input)
if state:
prob = (prob * self.tree_modules[node].router(input))
else:
prob = (prob * (1.0 - self.tree_modules[node].router(input)))
if (not isinstance(prob, float)):
prob = torch.unsqueeze(prob, 1)
prob_last = 1.0
node_final = nodes[(- 1)]
input = self.tree_modules[node_final].transform(input)
y_pred = (prob * torch.exp(self.child_extension.classifier(self.child_extension.transform(input))))
return (y_pred, prob_last)
def compute_routing_probabilities(self, input):
for (i, (nodes, edges)) in enumerate(self.paths_list):
prob = 1.0
for (node, state) in zip(nodes[:(- 1)], edges):
input = self.tree_modules[node].transform(input)
if state:
prob = (prob * self.tree_modules[node].router(input))
else:
prob = (prob * (1.0 - self.tree_modules[node].router(input)))
if (not isinstance(prob, float)):
prob = torch.unsqueeze(prob, 1)
if (self.split and (nodes[(- 1)] == self.node_split)):
node_final = nodes[(- 1)]
input = self.tree_modules[node_final].transform(input)
prob_last = torch.unsqueeze(self.tree_modules[node_final].router(input), 1)
prob = torch.cat(((prob_last * prob), ((1.0 - prob_last) * prob)), dim=1)
if (i == 0):
prob_tensor = prob
else:
prob_tensor = torch.cat((prob_tensor, prob), dim=1)
return prob_tensor
def compute_routing_probability_specificnode(self, input, node_idx):
(nodes, edges) = get_path_to_root(node_idx, self.tree_struct)
prob = 1.0
for (node, edge) in zip(nodes[:(- 1)], edges):
input = self.tree_modules[node].transform(input)
if edge:
prob = (prob * self.tree_modules[node].router(input))
else:
prob = (prob * (1.0 - self.tree_modules[node].router(input)))
if (not isinstance(prob, float)):
prob = torch.unsqueeze(prob, 1)
prob_sum = prob.sum(dim=0)
return prob_sum.data[0]
else:
return (prob * input.size(0))
def compute_routing_probabilities_uptonode(self, input, node_idx):
leaves_up_to_node = get_past_leaf_nodes(self.tree_struct, node_idx)
paths_list_up_to_node = [get_path_to_root(i, self.tree_struct) for i in leaves_up_to_node]
for (i, (nodes, edges)) in enumerate(paths_list_up_to_node):
dtype = (torch.cuda.FloatTensor if self.cuda_on else torch.FloatTensor)
prob = Variable(torch.ones(input.size(0)).type(dtype))
output = input.clone()
for (node, state) in zip(nodes[:(- 1)], edges):
output = self.tree_modules[node].transform(output)
if state:
prob = (prob * self.tree_modules[node].router(output))
else:
prob = (prob * (1.0 - self.tree_modules[node].router(output)))
if (not isinstance(prob, float)):
prob = torch.unsqueeze(prob, 1)
if (self.split and (nodes[(- 1)] == self.node_split)):
node_final = nodes[(- 1)]
output = self.tree_modules[node_final].transform(output)
prob_last = torch.unsqueeze(self.tree_modules[node_final].router(output), 1)
prob = torch.cat(((prob_last * prob), ((1.0 - prob_last) * prob)), dim=1)
if (i == 0):
prob_tensor = prob
else:
prob_tensor = torch.cat((prob_tensor, prob), dim=1)
return (prob_tensor, leaves_up_to_node)
def update_tree_modules(self):
tree_modules_new = []
for node_module in self.tree_modules:
node = {'transform': node_module.transform, 'classifier': node_module.classifier, 'router': node_module.router}
tree_modules_new.append(node)
return tree_modules_new
def update_children(self):
assert (self.split or self.extend)
if self.split:
child_left = {'transform': self.child_left.transform, 'classifier': self.child_left.classifier, 'router': self.child_left.router}
child_right = {'transform': self.child_right.transform, 'classifier': self.child_right.classifier, 'router': self.child_right.router}
print('returning left and right children')
return (child_left, child_right)
elif self.extend:
child_extension = {'transform': self.child_extension.transform, 'classifier': self.child_extension.classifier, 'router': self.child_extension.router}
print('returning an extended child')
return child_extension |
def to_tensor(data):
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif (isinstance(data, Sequence) and (not mmcv.is_str(data))):
return [to_tensor(d) for d in data]
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
elif (data is None):
return data
else:
raise TypeError('type {} cannot be converted to tensor.'.format(type(data))) |
class BertQA():
def __init__(self, config):
self.batch_size = config['batch_size']
self.model = AutoModelForQuestionAnswering.from_pretrained(config['model_weights'])
self.tokenizer = AutoTokenizer.from_pretrained(config['model_weights'])
self.page_retrieval = (config['page_retrieval'].lower() if ('page_retrieval' in config) else None)
self.max_sequence_length = config.get('max_sequence_length', 512)
self.ignore_index = 9999
def parallelize(self):
self.model = nn.DataParallel(self.model)
def prepare_inputs_for_vqa(self, question, context, context_page_corresp, answers=None):
encoding = self.tokenizer(question, context, padding=True, truncation=True, max_length=self.max_sequence_length, return_tensors='pt')
input_ids = encoding['input_ids'].to(self.model.device)
attention_mask = encoding['attention_mask'].to(self.model.device)
context_encoding = self.tokenizer.batch_encode_plus(context, padding=True, truncation=True, max_length=self.max_sequence_length)
if (answers is not None):
(start_pos, end_pos, context_page_token_correspondent) = model_utils.get_start_end_idx('BertQA', encoding, context, context_encoding, answers, context_page_corresp, self.page_retrieval, self.tokenizer.sep_token_id, self.tokenizer.pad_token_id, self.ignore_index, self.model.device)
else:
(start_pos, end_pos, context_page_token_correspondent) = (None, None, None)
return (input_ids, attention_mask, context_encoding, start_pos, end_pos, context_page_token_correspondent)
def forward(self, batch, return_pred_answer=False):
question = batch['questions']
context = batch['contexts']
answers = batch['answers']
if (self.page_retrieval == 'logits'):
outputs = []
pred_answers = []
pred_answer_pages = []
answ_confidence = []
for batch_idx in range(len(context)):
document_encoding = self.tokenizer(([question[batch_idx]] * len(context[batch_idx])), context[batch_idx], padding=True, truncation=True, max_length=self.max_sequence_length, return_tensors='pt')
max_logits = (- 999999)
answer_page = None
document_outputs = None
for page_idx in range(len(document_encoding['input_ids'])):
input_ids = document_encoding['input_ids'][page_idx].to(self.model.device)
attention_mask = document_encoding['attention_mask'][page_idx].to(self.model.device)
page_outputs = self.model(input_ids.unsqueeze(dim=0), attention_mask=attention_mask.unsqueeze(dim=0))
(pred_answer, answer_conf) = self.get_answer_from_model_output(input_ids.unsqueeze(dim=0), page_outputs)
if (answer_conf[0] > max_logits):
answer_page = page_idx
document_outputs = page_outputs
max_logits = answer_conf[0]
outputs.append(None)
pred_answers.extend((self.get_answer_from_model_output([document_encoding['input_ids'][answer_page]], document_outputs)[0] if return_pred_answer else None))
pred_answer_pages.append(answer_page)
answ_confidence.append(max_logits)
else:
(input_ids, attention_mask, context_encoding, start_pos, end_pos, context_page_token_correspondent) = self.prepare_inputs_for_vqa(question, context, batch['context_page_corresp'], answers)
outputs = self.model(input_ids, attention_mask=attention_mask, start_positions=start_pos, end_positions=end_pos)
(pred_answers, answ_confidence) = (self.get_answer_from_model_output(input_ids, outputs) if return_pred_answer else None)
if (self.page_retrieval == 'oracle'):
pred_answer_pages = batch['answer_page_idx']
elif (self.page_retrieval == 'concat'):
pred_answer_pages = [(context_page_token_correspondent[batch_idx][pred_start_idx].item() if (len(context_page_token_correspondent[batch_idx]) > pred_start_idx) else (- 1)) for (batch_idx, pred_start_idx) in enumerate(outputs.start_logits.argmax((- 1)).tolist())]
elif (self.page_retrieval == 'none'):
pred_answer_pages = None
return (outputs, pred_answers, pred_answer_pages, answ_confidence)
def get_answer_from_model_output(self, input_tokens, outputs):
start_idxs = torch.argmax(outputs.start_logits, axis=1)
end_idxs = torch.argmax(outputs.end_logits, axis=1)
answers = []
for batch_idx in range(len(input_tokens)):
context_tokens = self.tokenizer.convert_ids_to_tokens(input_tokens[batch_idx].tolist())
answer_tokens = context_tokens[start_idxs[batch_idx]:(end_idxs[batch_idx] + 1)]
answer = self.tokenizer.decode(self.tokenizer.convert_tokens_to_ids(answer_tokens))
answer = answer.strip()
answers.append(answer)
answ_confidence = model_utils.get_extractive_confidence(outputs)
return (answers, answ_confidence) |
def skip_combination(net, method, suffix_aggr):
if ((net == 'vgg') and ((method == 'tlEBPreluLayer') or (method == 'tlEBPposReflect') or (method == 'tlEBPnegReflect') or (method == 'meanEBP_VGG'))):
return True
return False |
def cal_mask_bbox(head_mask, factor=1.3):
(bs, _, height, width) = head_mask.shape
bbox = np.zeros((bs, 4), dtype=np.int32)
valid = np.ones((bs,), dtype=np.float32)
for i in range(bs):
mask = head_mask[(i, 0)]
(ys, xs) = np.where((mask == 1))
if (len(ys) == 0):
valid[i] = 0.0
bbox[(i, 0)] = 0
bbox[(i, 1)] = width
bbox[(i, 2)] = 0
bbox[(i, 3)] = height
continue
lt_y = np.min(ys)
lt_x = np.min(xs)
rt_y = np.max(ys)
rt_x = np.max(xs)
h = (rt_y - lt_y)
w = (rt_x - lt_x)
cy = ((lt_y + rt_y) // 2)
cx = ((lt_x + rt_x) // 2)
_h = (h * factor)
_w = (w * factor)
_lt_y = max(0, int((cy - (_h / 2))))
_lt_x = max(0, int((cx - (_w / 2))))
_rt_y = min(height, int((cy + (_h / 2))))
_rt_x = min(width, int((cx + (_w / 2))))
if ((_lt_x == _rt_x) or (_lt_y == _rt_y)):
valid[i] = 0.0
bbox[(i, 0)] = 0
bbox[(i, 1)] = width
bbox[(i, 2)] = 0
bbox[(i, 3)] = height
else:
bbox[(i, 0)] = _lt_x
bbox[(i, 1)] = _rt_x
bbox[(i, 2)] = _lt_y
bbox[(i, 3)] = _rt_y
return (bbox, valid) |
def ID2arch(hist_df, state_str_to_state_shortname):
id2arch = {}
num_layers = sum([1 for x in hist_df.columns.values if x.startswith('L')])
for i in hist_df.ID:
arch = tuple((state_str_to_state_shortname[x][hist_df.loc[(hist_df.ID == i)][('L%i' % (x + 1))].iloc[0]] for x in range(num_layers)))
id2arch[i] = arch
return id2arch |
def test_clean_inplace(df_urls: pd.DataFrame) -> None:
df_clean = clean_url(df_urls, column='messy_url', inplace=True, report=False)
df_check = pd.DataFrame({'messy_url_details': [np.nan, {'scheme': ' 'host': 'www.facebookee.com', 'messy_url_clean': ' 'queries': {'auth': 'facebookeeauth', 'token': 'iwusdkc', 'not_token': 'hiThere', 'another_token': ''}}, {'scheme': ' 'host': 'www.sfu.ca', 'messy_url_clean': ' 'queries': {'auth': 'sampletoken1', 'studentid': '1234', 'loc': 'van'}}, np.nan, np.nan, np.nan, {'scheme': ' 'host': 'www.sfu.ca', 'messy_url_clean': ' 'queries': {'auth': 'sampletoken2', 'studentid': '1230', 'loc': 'bur'}}, np.nan, np.nan, np.nan, np.nan, {'scheme': ' 'host': 'www.sfu.ca', 'messy_url_clean': ' 'queries': {'auth': 'sampletoken3', 'studentid': '1231', 'loc': 'sur'}}, {'scheme': ' 'host': 'www.sfu.ca', 'messy_url_clean': ' 'queries': {'auth': 'sampletoken1', 'studentid': '1232', 'loc': 'van'}}]})
assert df_check.equals(df_clean) |
class MlpHead(nn.Module):
def __init__(self, dim, num_classes=1000, mlp_ratio=4, act_layer=SquaredReLU, norm_layer=nn.LayerNorm, head_dropout=0.0, bias=True):
super().__init__()
hidden_features = int((mlp_ratio * dim))
self.fc1 = nn.Linear(dim, hidden_features, bias=bias)
self.act = act_layer()
self.norm = norm_layer(hidden_features)
self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias)
self.head_dropout = nn.Dropout(head_dropout)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.norm(x)
x = self.head_dropout(x)
x = self.fc2(x)
return x |
class PTBTokenizer():
def tokenize(self, captions_for_image):
cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, 'edu.stanford.nlp.process.PTBTokenizer', '-preserveLines', '-lowerCase']
final_tokenized_captions_for_image = {}
image_id = [k for (k, v) in captions_for_image.items() for _ in range(len(v))]
sentences = '\n'.join([c['caption'].replace('\n', ' ') for (k, v) in captions_for_image.items() for c in v])
path_to_jar_dirname = os.path.dirname(os.path.abspath(__file__))
tmp_file = tempfile.NamedTemporaryFile(delete=False, dir=path_to_jar_dirname)
tmp_file.write(sentences.encode('UTF-8'))
tmp_file.close()
cmd.append(os.path.basename(tmp_file.name))
p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, stdout=subprocess.PIPE)
token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
lines = token_lines.split('\n')
os.remove(tmp_file.name)
for (k, line) in zip(image_id, lines):
if (not (k in final_tokenized_captions_for_image)):
final_tokenized_captions_for_image[k] = []
tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') if (w not in PUNCTUATIONS)])
final_tokenized_captions_for_image[k].append(tokenized_caption)
return final_tokenized_captions_for_image |
def import_dataset(name='CORA'):
root = f'BENCHMARK/{name.upper()}/'
if (name.upper() == 'CORA'):
dataset = Planetoid(root=root, name='CORA')
elif (name.upper() == 'CORA-F'):
dataset = CitationFull(root=root, name='cora')
elif (name.upper() == 'CITESEER'):
dataset = Planetoid(root=root, name='citeseer')
elif (name.upper() == 'PUBMED'):
dataset = Planetoid(root=root, name='PubMed')
elif (name.upper() == 'COAUTHOR-P'):
dataset = Coauthor(root=root, name='Physics')
elif (name.upper() == 'COAUTHOR-C'):
dataset = Coauthor(root=root, name='CS')
elif (name.upper() == 'AMAZON-C'):
dataset = Amazon(root=root, name='Computers')
elif (name.upper() == 'AMAZON-P'):
dataset = Amazon(root=root, name='Photo')
elif (name.lower() == 'all'):
Planetoid(root=root, name='CORA')
Planetoid(root=root, name='citeseer')
CitationFull(root=root, name='cora')
Planetoid(root=root, name='PubMed')
Coauthor(root=root, name='Physics')
Coauthor(root=root, name='CS')
Amazon(root=root, name='Computers')
Amazon(root=root, name='Photo')
exit()
return dataset |
def alpha_analysis(alpha):
try:
if (alpha < 0.667):
return 'Low'
if (0.667 <= alpha < 0.8):
return 'Tentative'
if (alpha >= 0.8):
return 'High'
return 'None'
except Exception:
return 'None' |
def job_fssdJ5q_imq_opt(p, data_source, tr, te, r, null_sim=None):
return job_fssdJ1q_imq_opt(p, data_source, tr, te, r, J=5) |
.experimental
def test_read_data_invalid_format(data_preparator):
with pytest.raises(ValueError, match='Invalid value of format_type.*'):
data_preparator.read_as_spark_df(path='/test_path', format_type='blabla')
with pytest.raises(ValueError, match='Either data or path parameters must not be None'):
data_preparator.read_as_spark_df(format_type='csv') |
def submit_pai_evaluate(datasource, original_sql, select, label_name, model, model_params, result_table, user=''):
params = dict(locals())
project = table_ops.get_project(datasource)
if (result_table.count('.') == 0):
result_table = ('%s.%s' % (project, result_table))
params['result_table'] = result_table
oss_model_path = pai_model.get_oss_model_save_path(datasource, model, user=user)
(model_type, estimator) = pai_model.get_saved_model_type_and_estimator(datasource, model)
if (model_type == EstimatorType.PAIML):
raise SQLFlowDiagnostic('PAI model evaluation is not supported yet.')
if (model_type == EstimatorType.XGBOOST):
params['entry_type'] = 'evaluate_xgb'
validation_metrics = model_params.get('validation.metrics', 'accuracy_score')
else:
params['entry_type'] = 'evaluate_tf'
validation_metrics = model_params.get('validation.metrics', 'Accuracy')
validation_metrics = [m.strip() for m in validation_metrics.split(',')]
with db.connect_with_data_source(datasource) as conn:
result_column_names = create_evaluate_table(conn, result_table, validation_metrics)
with table_ops.create_tmp_tables_guard(select, datasource) as data_table:
params['pai_table'] = data_table
params['result_column_names'] = result_column_names
if try_pai_local_run(params, oss_model_path):
return
conf = cluster_conf.get_cluster_config(model_params)
with temp_file.TemporaryDirectory(prefix='sqlflow', dir='/tmp') as cwd:
prepare_archive(cwd, estimator, oss_model_path, params)
cmd = get_pai_tf_cmd(conf, ('file://' + os.path.join(cwd, JOB_ARCHIVE_FILE)), ('file://' + os.path.join(cwd, PARAMS_FILE)), ENTRY_FILE, model, oss_model_path, data_table, '', result_table, project)
submit_pai_task(cmd, datasource) |
class TorchSimpleFeatures(FeaturesPipeline, TabularDataFeatures):
def __init__(self, use_te: bool=False, top_intersections: int=5, max_bin_count: int=10, max_intersection_depth: int=3, te_subsample: Optional[Union[(int, float)]]=None, sparse_ohe: Union[(str, bool)]='auto', auto_unique_co: int=50, output_categories: bool=True, multiclass_te_co: int=3, use_qnt: bool=True, n_quantiles: Optional[int]=None, subsample: int=.0, output_distribution: str='normal', noise: float=0.001, qnt_factor: int=30, **kwargs):
super().__init__(multiclass_te=False, top_intersections=top_intersections, max_intersection_depth=max_intersection_depth, subsample=te_subsample, auto_unique_co=auto_unique_co, output_categories=output_categories, ascending_by_cardinality=True, max_bin_count=max_bin_count, sparse_ohe=sparse_ohe, multiclass_te_co=multiclass_te_co, **kwargs)
self.use_qnt = use_qnt
self.n_quantiles = n_quantiles
self.subsample = subsample
self.output_distribution = output_distribution
self.noise = noise
self.qnt_factor = qnt_factor
self.use_te = use_te
def create_pipeline(self, train: NumpyOrPandas) -> LAMLTransformer:
transformers_list = []
cat_cols = get_columns_by_role(train, 'Category')
freq_cols = get_columns_by_role(train, 'Category', encoding_type='freq')
other_cols = sorted(list((set(cat_cols) - set(freq_cols))))
transformers_list.append(self.get_freq_encoding(train, freq_cols))
if (len(other_cols) > 0):
cat_processing = SequentialTransformer([ColumnsSelector(keys=other_cols), LabelEncoder()])
if self.use_te:
target_encoder = self.get_target_encoder(train)
te_part = self.get_categorical_raw(train, other_cols)
if ((te_part is not None) and (target_encoder is not None)):
transformers_list.append(SequentialTransformer([te_part, target_encoder()]))
intersections = self.get_categorical_intersections(train)
if ((intersections is not None) and (target_encoder is not None)):
transformers_list.append(SequentialTransformer([intersections, target_encoder()]))
else:
transformers_list.append(cat_processing)
datetimes = get_columns_by_role(train, 'Datetime')
if (len(datetimes) > 0):
dt_processing = SequentialTransformer([ColumnsSelector(keys=datetimes), TimeToNum()])
transformers_list.append(dt_processing)
numerics = get_columns_by_role(train, 'Numeric')
if (len(numerics) > 0):
num_processing = SequentialTransformer([ColumnsSelector(keys=numerics), FillInf(), FillnaMean(), (QuantileTransformer(n_quantiles=self.n_quantiles, subsample=self.subsample, output_distribution=self.output_distribution, noise=self.noise, qnt_factor=self.qnt_factor) if self.use_qnt else StandardScaler()), ConvertDataset(dataset_type=NumpyDataset), ChangeRoles(NumericRole(np.float32))])
transformers_list.append(num_processing)
union_all = UnionTransformer(transformers_list)
return union_all |
class _DiscreteQFunctionProtocol(Protocol):
_q_func_forwarder: DiscreteEnsembleQFunctionForwarder |
class QuantizeNonQNNToRecordingModifier(FunctionModifier):
def __init__(self, functions_ranks, config=None, training=True):
super(QuantizeNonQNNToRecordingModifier, self).__init__()
self._config = config
self._fct_bin_set = {'Add2': F.add2, 'Sub2': F.sub2, 'Mul2': F.mul2, 'Div2': F.div2, 'Pow2': F.pow2}
self._training = training
self.functions_ranks = functions_ranks
def get_function_rank(self, f):
rank = self.functions_ranks.get(f, (- 1))
return rank
def check(self, f):
def backward_traverse(f, l):
for inp in f.inputs:
if (inp.parent is not None):
l.append(inp.parent)
backward_traverse(inp.parent, l)
def forward_traverse(f, l):
for fref in f.outputs[0].function_references:
l.append(fref)
forward_traverse(fref, l)
def is_skip_layer(f):
skip_inputs_layers = self._config.skip_inputs_layers
skip_outputs_layers = self._config.skip_outputs_layers
if ((not skip_inputs_layers) and (not skip_outputs_layers)):
return False
fs = []
if skip_outputs_layers:
forward_traverse(f, fs)
fs = list(set([func.info.type_name for func in fs]))
is_output_layer = (True if skip_outputs_layers else False)
for skl in skip_outputs_layers:
if (skl in fs):
is_output_layer = False
break
fs = []
if skip_inputs_layers:
backward_traverse(f, fs)
is_input_layer = (True if skip_inputs_layers else False)
fs = list(set([func.info.type_name for func in fs]))
for skl in skip_inputs_layers:
if (skl in fs):
is_input_layer = False
break
for skl in skip_inputs_layers:
if ((f.info.type_name == skl) and is_input_layer):
return True
for skl in skip_outputs_layers:
if ((f.info.type_name == skl) and is_output_layer):
return True
return False
fn = f.info.type_name
cfg = self._config
if ((fn == 'Sink') or (fn == 'BatchNormalization')):
return False
record_layers = cfg.record_layers
if (record_layers and (fn not in record_layers)):
return False
if is_skip_layer(f):
return False
return True
def share_recorder(self, f, inputs, new_inputs, cfg):
fn = f.info.type_name
recorder_activation = cfg.recorder_activation
recorder_weight = cfg.recorder_weight
axes = ([3] if cfg.channel_last else [1])
if (fn in ['Add2', 'Concatenate']):
idx = 0
min_rank = inputs[0].rank
for (i, input_var) in enumerate(new_inputs[1:]):
if (input_var.rank < min_rank):
idx = (i + 1)
min_rank = input_var.rank
shared_name = 'x0'
scope = self.get_parameter_scope(new_inputs[idx].parent.inputs[1])
for (i, input_var) in enumerate(new_inputs):
if (i == idx):
continue
input_var = input_var.parent.inputs[0]
with nn.parameter_scope(scope):
input_var = recorder_activation()(input_var, axes=axes, training=self._training, name=shared_name)
new_inputs[i] = input_var
return new_inputs
def add_recorder(self, f, inputs, cfg):
fn = f.info.type_name
function_rank = self.get_function_rank(f)
scope = '{}-{}'.format(fn, function_rank)
axes = ([3] if cfg.channel_last else [1])
recorder_activation = cfg.recorder_activation
recorder_weight = cfg.recorder_weight
params_idx = 1
if (fn in ['Concatenate', 'Stack']):
params_idx = len(inputs)
if (fn in self._fct_bin_set):
params_idx = 2
new_inputs = []
for (i, input_var) in enumerate(inputs[:params_idx]):
fref = input_var.function_references
if (fref and (fref[0].info.type_name == cfg.recorder_activation().name())):
input_var = fref[0].outputs[0]
else:
with nn.parameter_scope(scope):
parent = input_var.parent
if (parent and (parent.info.type_name == recorder_activation().name())):
input_var = input_var
else:
input_var = recorder_activation()(input_var, axes=axes, training=self._training, name='x{}'.format(i))
new_inputs.append(input_var)
for (i, input_parameter) in enumerate(inputs[params_idx:]):
with nn.parameter_scope(scope):
input_parameter = recorder_weight()(input_parameter, axes=axes, training=self._training, name='w{}'.format(i))
new_inputs.append(input_parameter)
return new_inputs
def modify(self, f, inputs):
if (not self.check(f)):
return
fn = f.info.type_name
cfg = self._config
axes = ([3] if cfg.channel_last else [1])
recorder_activation = cfg.recorder_activation
new_inputs = self.add_recorder(f, inputs, cfg)
new_inputs = self.share_recorder(f, inputs, new_inputs, cfg)
h = self._modify_as_same(f, new_inputs)
next_func = f.outputs[0].function_references[0]
next_func_rank = self.get_function_rank(next_func)
scope = '{}-{}'.format(next_func.info.type_name, next_func_rank)
with nn.parameter_scope(scope):
if (cfg.recorder_position == cfg.RecorderPosition.BOTH):
h = recorder_activation()(h, axes=axes, training=self._training, name='x0')
return h |
def test_prepare_grayscale_input_2D():
with pytest.raises(ValueError):
_prepare_grayscale_input_2D(np.zeros((3, 3, 3)))
with pytest.raises(ValueError):
_prepare_grayscale_input_2D(np.zeros((3, 1)))
with pytest.raises(ValueError):
_prepare_grayscale_input_2D(np.zeros((3, 1, 1)))
_prepare_grayscale_input_2D(np.zeros((3, 3)))
_prepare_grayscale_input_2D(np.zeros((3, 3, 1)))
_prepare_grayscale_input_2D(np.zeros((1, 3, 3))) |
def add_ConnectorServicer_to_server(servicer, server):
rpc_method_handlers = {'AllianceStatusStream': grpc.unary_stream_rpc_method_handler(servicer.AllianceStatusStream, request_deserializer=fedn__pb2.ClientAvailableMessage.FromString, response_serializer=fedn__pb2.Status.SerializeToString), 'SendStatus': grpc.unary_unary_rpc_method_handler(servicer.SendStatus, request_deserializer=fedn__pb2.Status.FromString, response_serializer=fedn__pb2.Response.SerializeToString), 'ListActiveClients': grpc.unary_unary_rpc_method_handler(servicer.ListActiveClients, request_deserializer=fedn__pb2.ListClientsRequest.FromString, response_serializer=fedn__pb2.ClientList.SerializeToString), 'AcceptingClients': grpc.unary_unary_rpc_method_handler(servicer.AcceptingClients, request_deserializer=fedn__pb2.ConnectionRequest.FromString, response_serializer=fedn__pb2.ConnectionResponse.SerializeToString), 'SendHeartbeat': grpc.unary_unary_rpc_method_handler(servicer.SendHeartbeat, request_deserializer=fedn__pb2.Heartbeat.FromString, response_serializer=fedn__pb2.Response.SerializeToString), 'ReassignClient': grpc.unary_unary_rpc_method_handler(servicer.ReassignClient, request_deserializer=fedn__pb2.ReassignRequest.FromString, response_serializer=fedn__pb2.Response.SerializeToString), 'ReconnectClient': grpc.unary_unary_rpc_method_handler(servicer.ReconnectClient, request_deserializer=fedn__pb2.ReconnectRequest.FromString, response_serializer=fedn__pb2.Response.SerializeToString)}
generic_handler = grpc.method_handlers_generic_handler('grpc.Connector', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) |
def fuzzer_bitmap_diff(fuzzers, before_fuzzer_info, after_fuzzer_info):
before_global_bitmap = before_fuzzer_info['global_bitmap']
after_bitmap = after_fuzzer_info['bitmap']
bitmap_diff = {}
for fuzzer in fuzzers:
bitmap_diff[fuzzer] = (after_bitmap[fuzzer] - before_global_bitmap)
return bitmap_diff |
def simulate_with_timeout(experiment_id, policy_name, throughputs_file, per_instance_type_prices_dir, available_clouds, assign_SLOs, cluster_spec, lam, seed, interval, fixed_job_duration, generate_multi_gpu_jobs, enable_global_queue, num_total_jobs, solver, log_dir, timeout, verbose, num_gpus_per_server, ideal, num_sub_problems):
time.sleep(random.uniform(0, 5))
num_total_jobs_str = ('num_total_jobs=%d.log' % num_total_jobs)
cluster_spec_str = ('v100:%d|p100:%d|k80:%d' % (cluster_spec['v100'], cluster_spec['p100'], cluster_spec['k80']))
num_threads = (32 // num_sub_problems)
policy = utils.get_policy(policy_name, seed=seed, solver=solver, num_threads=num_threads)
if verbose:
current_time = datetime.datetime.now()
print(('[%s] [Experiment ID: %2d] Configuration: cluster_spec=%s, policy=%s, seed=%d, num_total_jobs=%d, num_sub_problems=%d' % (current_time, experiment_id, cluster_spec_str, policy.name, seed, num_total_jobs, num_sub_problems)))
with open(os.path.join(log_dir, num_total_jobs_str), 'w') as f:
with contextlib.redirect_stdout(f), contextlib.redirect_stderr(f):
sched = scheduler.Scheduler(policy, throughputs_file=throughputs_file, seed=seed, time_per_iteration=interval, per_instance_type_prices_dir=per_instance_type_prices_dir, available_clouds=available_clouds, assign_SLOs=assign_SLOs, enable_global_queue=enable_global_queue, simulate=True, num_sub_problems=num_sub_problems)
cluster_spec_str = ('v100:%d|p100:%d|k80:%d' % (cluster_spec['v100'], cluster_spec['p100'], cluster_spec['k80']))
if (timeout is None):
sched.simulate(cluster_spec, lam=lam, fixed_job_duration=fixed_job_duration, generate_multi_gpu_jobs=generate_multi_gpu_jobs, num_total_jobs=num_total_jobs, num_gpus_per_server=num_gpus_per_server, ideal=ideal)
average_jct = sched.get_average_jct()
utilization = sched.get_cluster_utilization()
makespan = sched.get_current_timestamp()
total_cost = sched.get_total_cost()
else:
try:
func_timeout(timeout, sched.simulate, args=(cluster_spec,), kwargs={'lam': lam, 'fixed_job_duration': fixed_job_duration, 'generate_multi_gpu_jobs': generate_multi_gpu_jobs, 'num_total_jobs': num_total_jobs, 'num_gpus_per_server': num_gpus_per_server, 'ideal': ideal})
average_jct = sched.get_average_jct()
utilization = sched.get_cluster_utilization()
makespan = sched.get_current_timestamp()
total_cost = sched.get_total_cost()
except FunctionTimedOut:
average_jct = float('inf')
utilization = 1.0
makespan = float('inf')
total_cost = float('inf')
if verbose:
current_time = datetime.datetime.now()
print(('[%s] [Experiment ID: %2d] Results: average JCT=%f, utilization=%f, makespan=%f, total_cost=$%.2f' % (current_time, experiment_id, average_jct, utilization, makespan, total_cost)))
sched.shutdown()
return (average_jct, utilization) |
_test()
def test_axpy_fpga_array():
configs = [(0.5, 1, dace.float32), (1.0, 4, dace.float64)]
return run_test(configs, 'fpga_array') |
class VGG16FeatureExtractor(nn.Module):
def __init__(self):
super().__init__()
vgg16 = models.vgg16(pretrained=True)
self.enc_1 = nn.Sequential(vgg16.features[0], vgg16.features[1], vgg16.features[2], vgg16.features[3], vgg16.features[4])
self.enc_2 = nn.Sequential(vgg16.features[5], vgg16.features[6], vgg16.features[7], vgg16.features[8], vgg16.features[9])
self.enc_3 = nn.Sequential(vgg16.features[10], vgg16.features[11], vgg16.features[12], vgg16.features[13], vgg16.features[14], vgg16.features[15], vgg16.features[16])
for i in range(3):
for param in getattr(self, 'enc_{:d}'.format((i + 1))).parameters():
param.requires_grad = False
def forward(self, image):
results = [image]
for i in range(3):
func = getattr(self, 'enc_{:d}'.format((i + 1)))
results.append(func(results[(- 1)]))
return results[1:] |
def make_act(act='ReLU', **kwargs):
inplace = kwargs.pop('inplace', True)
if (len(act) == 0):
return None
act = {'ReLU': nn.ReLU(inplace=inplace), 'ReLU6': nn.ReLU6(inplace=inplace), 'PReLU': nn.PReLU(), 'LeakyReLU': nn.LeakyReLU(inplace=inplace), 'H_Sigmoid': nn.Hardsigmoid(), 'Sigmoid': nn.Sigmoid(), 'TanH': nn.Tanh(), 'H_Swish': nn.Hardswish(), 'Swish': ops.Swish(), 'Mish': ops.Mish()}[act]
return act |
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == 'thisown'):
return self.this.own(value)
if (hasattr(self, name) or (name == 'this')):
set(self, name, value)
else:
raise AttributeError(('You cannot add attributes to %s' % self))
return set_attr |
class SourceCheckpoint(Callback):
_zero_only
def on_save_checkpoint(self, trainer, pl_module, checkpoint):
checkpoint_filename = ('-'.join(['source', pl_module.hparams.training_dataset.name, str(trainer.current_epoch)]) + '.pth')
os.makedirs(os.path.join(trainer.weights_save_path, 'source_checkpoints'), exist_ok=True)
checkpoint_path = os.path.join(trainer.weights_save_path, 'source_checkpoints', checkpoint_filename)
torch.save(pl_module.model.state_dict(), checkpoint_path) |
_unique
def uninstallation_paths(dist):
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
(yield path)
if path.endswith('.py'):
(dn, fn) = os.path.split(path)
base = fn[:(- 3)]
path = os.path.join(dn, (base + '.pyc'))
(yield path)
path = os.path.join(dn, (base + '.pyo'))
(yield path) |
def normalized_fidelity(u: tf.Tensor, u_hat: tf.Tensor):
def trf(x: tf.Tensor, y=None):
y = (x if (y is None) else y)
trace = tf.linalg.trace((tf.transpose(tf.math.conj(x)) y))
return ((tf.math.real(trace) ** 2) + (tf.math.imag(trace) ** 2))
return (trf(u_hat, u) / trf(u_hat)) |
def roth_ruckenstein_root_finder(p, maxd=None, precision=None):
gens = p.parent().gens()
if (len(gens) == 2):
p = p.polynomial(gens[1])
return p.roots(multiplicities=False, degree_bound=maxd, algorithm='Roth-Ruckenstein') |
def craft_log_config(env_cfg, train_cfg, wandb_cfg, what_to_log):
for log_key in what_to_log:
location = what_to_log[log_key]
if (location[0] == 'train_cfg'):
wandb_cfg[log_key] = recursive_value_find(train_cfg, location[1:])
elif (location[0] == 'env_cfg'):
wandb_cfg[log_key] = recursive_value_find(env_cfg, location[1:])
else:
raise Exception(f"You didn't specify a valid cfg file in location: {location}") |
class env():
def __init__(self, fn_name, use_stack=True):
self.fn_name = fn_name
self.use_stack = use_stack
def __enter__(self):
start(self.fn_name, use_stack=self.use_stack)
def __exit__(self, e, ev, t):
stop(self.fn_name, use_stack=self.use_stack) |
class Node(ABC):
_prod: Production
def __init__(self, prod: Production):
self._prod = prod
def production(self) -> Production:
return self._prod
def type(self) -> Type:
return self._prod.lhs
def is_leaf(self) -> bool:
raise NotImplementedError
def is_enum(self) -> bool:
raise NotImplementedError
def is_param(self) -> bool:
raise NotImplementedError
def is_apply(self) -> bool:
raise NotImplementedError
def children(self) -> List['Node']:
raise NotImplementedError
def to_sexp(self):
raise NotImplementedError |
def conv3d(x, w, name, s=1, pd='SAME'):
cnv = tf.nn.convolution(x, w, padding=pd, strides=[s, s, s], name=name)
return cnv |
def get_fluents(task):
fluent_names = set()
for action in task.actions:
for eff in action.effects:
fluent_names.add(eff.literal.predicate)
return [pred for pred in task.predicates if (pred.name in fluent_names)] |
def GroundSeg(depth_image, color_image, stride=160):
global ROW
virtual_lane_available = []
for i in range(stride, ROW, stride):
if (i == (ROW / 2)):
(temp_image, dead_end) = verticalGround(depth_image, color_image, i, plot=False)
else:
(temp_image, dead_end) = verticalGround(depth_image, color_image, i, plot=False)
virtual_lane_available.append(dead_end)
return (temp_image, virtual_lane_available) |
def distilhubert(refresh=False, *args, **kwargs):
return distilhubert_base(*args, refresh=refresh, **kwargs) |
class GenericGraphQuery(SQLQuery):
def __init__(self, query_string, database=None, param_tuple=None):
if (database is None):
database = GraphDatabase()
if (not isinstance(database, GraphDatabase)):
raise TypeError(('%s is not a valid GraphDatabase' % database))
SQLQuery.__init__(self, database, query_string, param_tuple) |
def save_sparse_graph_to_npz(filepath, sparse_graph):
data_dict = {'adj_data': sparse_graph.adj_matrix.data, 'adj_indices': sparse_graph.adj_matrix.indices, 'adj_indptr': sparse_graph.adj_matrix.indptr, 'adj_shape': sparse_graph.adj_matrix.shape}
if sp.isspmatrix(sparse_graph.attr_matrix):
data_dict['attr_data'] = sparse_graph.attr_matrix.data
data_dict['attr_indices'] = sparse_graph.attr_matrix.indices
data_dict['attr_indptr'] = sparse_graph.attr_matrix.indptr
data_dict['attr_shape'] = sparse_graph.attr_matrix.shape
elif isinstance(sparse_graph.attr_matrix, np.ndarray):
data_dict['attr_matrix'] = sparse_graph.attr_matrix
if sp.isspmatrix(sparse_graph.labels):
data_dict['labels_data'] = sparse_graph.labels.data
data_dict['labels_indices'] = sparse_graph.labels.indices
data_dict['labels_indptr'] = sparse_graph.labels.indptr
data_dict['labels_shape'] = sparse_graph.labels.shape
elif isinstance(sparse_graph.labels, np.ndarray):
data_dict['labels'] = sparse_graph.labels
if (sparse_graph.node_names is not None):
data_dict['node_names'] = sparse_graph.node_names
if (sparse_graph.attr_names is not None):
data_dict['attr_names'] = sparse_graph.attr_names
if (sparse_graph.class_names is not None):
data_dict['class_names'] = sparse_graph.class_names
if (sparse_graph.metadata is not None):
data_dict['metadata'] = sparse_graph.metadata
if (not filepath.endswith('.npz')):
filepath += '.npz'
np.savez(filepath, **data_dict) |
_numpy_output(positive=True, check_dtype=True)
def test_ufunc_log_c(A: dace.complex64[10]):
return np.log(A) |
def append_data(dataset: str, version_target: str, version_from: str, interval=0.2):
df_target = pd.read_pickle(((DATA_ROOT / dataset) / f'{version_target}.pkl'))
df_from = pd.read_pickle(((DATA_ROOT / dataset) / f'{version_from}.pkl'))
row_num = len(df_from)
l = 0
r = (l + interval)
if (r <= 1):
L.info(f'Start appending {version_target} with {version_from} in [{l}, {r}]')
df_target = df_target.append(df_from[int((l * row_num)):int((r * row_num))], ignore_index=True, sort=False)
pd.to_pickle(df_target, ((DATA_ROOT / dataset) / f'{version_target}+{version_from}_{r:.1f}.pkl'))
df_target.to_csv(((DATA_ROOT / dataset) / f'{version_target}+{version_from}_{r:.1f}.csv'), index=False)
load_table(dataset, f'{version_target}+{version_from}_{r:.1f}')
else:
L.info(f'Appending Fail! Batch size is too big!') |
class MOT19Wrapper(MOT17Wrapper):
def __init__(self, split, dataloader):
train_sequences = ['MOT19-01', 'MOT19-02', 'MOT19-03', 'MOT19-05']
test_sequences = ['MOT19-04', 'MOT19-06', 'MOT19-07', 'MOT19-08']
if ('train' == split):
sequences = train_sequences
elif ('test' == split):
sequences = test_sequences
elif ('all' == split):
sequences = (train_sequences + test_sequences)
elif (f'MOT19-{split}' in (train_sequences + test_sequences)):
sequences = [f'MOT19-{split}']
else:
raise NotImplementedError('MOT19CVPR split not available.')
self._data = []
for s in sequences:
self._data.append(MOTSequence(s, 'MOT19', **dataloader)) |
(message='scipy.misc.unindent_string is deprecated in Scipy 1.3.0')
def unindent_string(docstring):
return _ld.unindent_string(docstring) |
def flatten_dt(dt):
if isinstance(dt, dict):
return reduce((lambda x, y: {**x, **y}), dt.values())
else:
return reduce((lambda x, y: {**x, **y}), dt) |
def get_non_linearity(layer_type='relu'):
if (layer_type == 'relu'):
nl_layer = functools.partial(nn.ReLU, inplace=True)
elif (layer_type == 'lrelu'):
nl_layer = functools.partial(nn.LeakyReLU, negative_slope=0.2, inplace=False)
elif (layer_type == 'elu'):
nl_layer = functools.partial(nn.ELU, inplace=True)
else:
raise NotImplementedError(('nonlinearity activitation [%s] is not found' % layer_type))
return nl_layer |
(scope='module')
def clean_duplication_ui() -> UserInterface:
df = pd.DataFrame({'city': ['Quebec', 'Quebec', 'Quebec', 'Quebec', 'Quebec', 'quebec', 'vancouver', 'vancouver', 'vancouverr', 'Vancouver', 'Vancouver', 'Vancouver', 'van', 'Ottowa', 'Ottowa', 'otowa', 'hello', np.nan]})
return UserInterface(df, 'city', 'df', 5) |
_encoder('transformer')
class TransformerEncoder(Encoder):
class Config(Encoder.Config):
name: str = 'transformer'
num_segments: int = 2
bert_model_name: str = 'bert-base-uncased'
hidden_size: int = 768
num_hidden_layers: int = 12
num_attention_heads: int = 12
output_attentions: bool = False
output_hidden_states: bool = False
random_init: bool = False
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
hf_params = {'config': self._build_encoder_config(config)}
should_random_init = self.config.get('random_init', False)
if self.config.bert_model_name.startswith('bert-'):
if should_random_init:
self.module = BertModelJit(**hf_params)
else:
self.module = BertModelJit.from_pretrained(self.config.bert_model_name, **hf_params)
elif should_random_init:
self.module = AutoModel.from_config(**hf_params)
else:
self.module = AutoModel.from_pretrained(self.config.bert_model_name, **hf_params)
self.embeddings = self.module.embeddings
self.original_config = self.config
self.config = self.module.config
self._init_segment_embeddings()
def _init_segment_embeddings(self):
if self.original_config.get('num_segments', None):
num_segments = self.original_config.num_segments
if hasattr(self.embeddings, 'token_type_embeddings'):
new_embeds = nn.Embedding(num_segments, self.config.hidden_size)
new_embeds.weight.data[:2].copy_(self.embeddings.token_type_embeddings.weight)
for idx in range(2, (num_segments - 1)):
new_embeds.weight.data[idx].copy_(self.embeddings.token_type_embeddings.weight.data.mean(dim=0))
self.embeddings.token_type_embeddings = new_embeds
def _build_encoder_config(self, config: Config):
return AutoConfig.from_pretrained(config.bert_model_name, **OmegaConf.to_container(config))
def forward(self, *args, return_sequence=False, **kwargs) -> Tensor:
output = self.module(*args, **kwargs)
return (output[0] if return_sequence else output[1]) |
def _evaluate_markers(markers, environment):
groups = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[(- 1)].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
(lhs, op, rhs) = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[(- 1)].append(_eval_op(lhs_value, op, rhs_value))
else:
assert (marker in ['and', 'or'])
if (marker == 'or'):
groups.append([])
return any((all(item) for item in groups)) |
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, milestones, gamma=0.1, warmup_factor=(1.0 / 3), warmup_iters=5, warmup_method='linear', last_epoch=(- 1)):
if (not (list(milestones) == sorted(milestones))):
raise ValueError('Milestones should be a list of increasing integers. Got {}', milestones)
if (warmup_method not in ('constant', 'linear')):
raise ValueError("Only 'constant' or 'linear' warmup_method acceptedgot {}".format(warmup_method))
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if (self.last_epoch < self.warmup_iters):
if (self.warmup_method == 'constant'):
warmup_factor = self.warmup_factor
elif (self.warmup_method == 'linear'):
alpha = (float(self.last_epoch) / self.warmup_iters)
warmup_factor = ((self.warmup_factor * (1 - alpha)) + alpha)
return [((base_lr * warmup_factor) * (self.gamma ** bisect_right(self.milestones, self.last_epoch))) for base_lr in self.base_lrs] |
_params({'estimator': [HasMethods('fit')], 'scoring': [StrOptions(set(get_scorer_names())), callable, None], 'allow_none': ['boolean']}, prefer_skip_nested_validation=True)
def check_scoring(estimator, scoring=None, *, allow_none=False):
if isinstance(scoring, str):
return get_scorer(scoring)
if callable(scoring):
module = getattr(scoring, '__module__', None)
if (hasattr(module, 'startswith') and module.startswith('sklearn.metrics.') and (not module.startswith('sklearn.metrics._scorer')) and (not module.startswith('sklearn.metrics.tests.'))):
raise ValueError(('scoring value %r looks like it is a metric function rather than a scorer. A scorer should require an estimator as its first parameter. Please use `make_scorer` to convert a metric to a scorer.' % scoring))
return get_scorer(scoring)
if (scoring is None):
if hasattr(estimator, 'score'):
return _PassthroughScorer(estimator)
elif allow_none:
return None
else:
raise TypeError(("If no scoring is specified, the estimator passed should have a 'score' method. The estimator %r does not." % estimator)) |
('/internal-server-errors/improper-input-type-handling', methods=['POST'])
def improper_input_type_handling():
data = request.json
if ((not isinstance(data, dict)) or ('number' not in data)):
return (jsonify({'success': False}), 400)
digits = [int(d) for d in str(data['number'])]
even_digits_sum = sum(digits[(- 1)::(- 2)])
odd_digits_sum = sum((sum(divmod((d * 2), 10)) for d in digits[(- 2)::(- 2)]))
checksum = (even_digits_sum + odd_digits_sum)
is_valid = ((checksum % 10) == 0)
return jsonify({'success': is_valid}) |
def test():
simple = ak.Array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])
assert (ak.operations.to_dataframe(simple)['values'].values.tolist() == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5])
index = ak.index.Index64(np.array([3, 3, 1, 5], dtype=np.int64))
indexed = ak.Array(ak.contents.IndexedArray(index, simple.layout))
assert (indexed.to_list() == [3.3, 3.3, 1.1, 5.5])
assert (ak.operations.to_dataframe(indexed)['values'].values.tolist() == [3.3, 3.3, 1.1, 5.5])
tuples = ak.Array(ak.contents.RecordArray([simple.layout, simple.layout], fields=None))
assert (ak.operations.to_dataframe(tuples)['1'].values.tolist() == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5])
offsets = ak.index.Index64(np.array([0, 1, 1, 3, 4], dtype=np.int64))
nested = ak.Array(ak.contents.ListOffsetArray(offsets, indexed.layout))
assert (ak.operations.to_dataframe(nested)['values'].values.tolist() == [3.3, 3.3, 1.1, 5.5])
offsets2 = ak.index.Index64(np.array([0, 3, 3, 4, 6], dtype=np.int64))
nested2 = ak.Array(ak.contents.ListOffsetArray(offsets2, tuples.layout))
assert (ak.operations.to_dataframe(nested2)['1'].values.tolist() == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5])
recrec = ak.Array([{'x': {'y': 1}}, {'x': {'y': 2}}, {'x': {'y': 3}}])
assert (ak.operations.to_dataframe(recrec)[('x', 'y')].values.tolist() == [1, 2, 3])
recrec2 = ak.Array([{'x': {'a': 1, 'b': 2}, 'y': {'c': 3, 'd': 4}}, {'x': {'a': 10, 'b': 20}, 'y': {'c': 30, 'd': 40}}])
assert (ak.operations.to_dataframe(recrec2)[('y', 'c')].values.tolist() == [3, 30])
recrec3 = ak.Array([{'x': 1, 'y': {'c': 3, 'd': 4}}, {'x': 10, 'y': {'c': 30, 'd': 40}}])
assert (ak.operations.to_dataframe(recrec3)[('y', 'c')].values.tolist() == [3, 30])
tuptup = ak.Array([(1.0, (1.1, 1.2)), (2.0, (2.1, 2.2)), (3.0, (3.1, 3.2))])
assert (ak.operations.to_dataframe(tuptup)[('1', '0')].values.tolist() == [1.1, 2.1, 3.1])
recrec4 = ak.Array([[{'x': 1, 'y': {'c': 3, 'd': 4}}], [{'x': 10, 'y': {'c': 30, 'd': 40}}]])
assert (ak.operations.to_dataframe(recrec4)[('y', 'c')].values.tolist() == [3, 30]) |
def test_runtime_assert():
def prog_runtime(A: dace.float64[20]):
assert (A[0] >= 0)
A = np.random.rand(20)
prog_runtime(A) |
('/normal')
def normal():
dc = request.form['dc']
username = request.form['username']
search_filter = request.form['search_filter']
server = 'ldap://localhost'
search_base = 'dc={},dc=com'.format(dc)
ldap_conn = ldap.initialize(server)
ldap_conn.protocol_version = ldap.VERSION3
ldap_conn.simple_bind_s(username, password)
search_filter = '({}={})'.format(search_base, username)
search_scope = 'sub'
attrs = ['uid', 'mail']
search_results = ldap_conn.search_s(search_base, search_scope, search_filter, attrs)
user = search_results[0][0][1]
if (user is not None):
mail = search_results[0][0][2]
ldap_conn.unbind()
return '{}, {}'.format(user, mail)
else:
ldap_conn.unbind()
return '{}, {}'.format('not found', 'not found') |
def auresize(audio_arr, size, channel_first=False):
audio = _auresize_before(audio_arr, size, channel_first)
n_channel_num = size[1]
n_sample_num = size[0]
o_channel_num = audio.shape[1]
o_sample_num = audio.shape[0]
if ((o_channel_num != 1) and (n_channel_num != 1)):
if (o_channel_num != n_channel_num):
raise ValueError('pydub set_channels only supports mono-to-multi channel and multi-to-mono channel conversion')
audio_segment = get_audiosegment_from_nparray(audio)
new_rate = math.floor((((48000 * n_sample_num) / o_sample_num) + 1))
audio_segment = audio_segment.set_frame_rate(new_rate)
audio_segment = audio_segment.set_channels(n_channel_num)
audio_segment = audio_segment.get_sample_slice(0, n_sample_num)
resized = get_nparray_from_pydub(audio_segment)
return resized |
def _construct_loader(cfg, split, batch_size, shuffle, drop_last):
dataset_name = cfg.DATA.NAME
if dataset_name.startswith('vtab-'):
from .datasets.tf_dataset import TFDataset
dataset = TFDataset(cfg, split)
else:
assert (dataset_name in _DATASET_CATALOG.keys()), "Dataset '{}' not supported".format(dataset_name)
dataset = _DATASET_CATALOG[dataset_name](cfg, split)
sampler = (DistributedSampler(dataset) if (cfg.NUM_GPUS > 1) else None)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=(False if sampler else shuffle), sampler=sampler, num_workers=cfg.DATA.NUM_WORKERS, pin_memory=cfg.DATA.PIN_MEMORY, drop_last=drop_last)
return loader |
def test_dqn():
explorer = baselines.explorers.DQN(model=fakeModel, rounds=3, sequences_batch_size=5, model_queries_per_batch=20, starting_sequence=starting_sequence, alphabet='ATCG')
explorer.run(fakeLandscape) |
class ResourceManager():
def __init__(self, owner: 'QuantumRouter', memory_array_name: str):
self.name = 'resource_manager'
self.owner = owner
self.memory_manager = MemoryManager(owner.components[memory_array_name])
self.memory_manager.set_resource_manager(self)
self.rule_manager = RuleManager()
self.rule_manager.set_resource_manager(self)
self.pending_protocols = []
self.waiting_protocols = []
self.memory_to_protocol_map = {}
def load(self, rule: 'Rule') -> bool:
log.logger.info('load rule {}'.format(rule))
self.rule_manager.load(rule)
for memory_info in self.memory_manager:
memories_info = rule.is_valid(memory_info)
if (len(memories_info) > 0):
rule.do(memories_info)
for info in memories_info:
info.to_occupied()
return True
def expire(self, rule: 'Rule') -> None:
log.logger.info('expired rule {}'.format(rule))
created_protocols = self.rule_manager.expire(rule)
while created_protocols:
protocol = created_protocols.pop()
if (protocol in self.waiting_protocols):
self.waiting_protocols.remove(protocol)
elif (protocol in self.pending_protocols):
self.pending_protocols.remove(protocol)
elif (protocol in self.owner.protocols):
self.owner.protocols.remove(protocol)
else:
raise Exception('Unknown place of protocol')
for memory in protocol.memories:
self.update(protocol, memory, 'RAW')
def update(self, protocol: 'EntanglementProtocol', memory: 'Memory', state: str) -> None:
self.memory_manager.update(memory, state)
if protocol:
memory.detach(protocol)
memory.attach(memory.memory_array)
if (protocol in protocol.rule.protocols):
protocol.rule.protocols.remove(protocol)
if (protocol in self.owner.protocols):
self.owner.protocols.remove(protocol)
if (protocol in self.waiting_protocols):
self.waiting_protocols.remove(protocol)
if (protocol in self.pending_protocols):
self.pending_protocols.remove(protocol)
memo_info = self.memory_manager.get_info_by_memory(memory)
for rule in self.rule_manager:
memories_info = rule.is_valid(memo_info)
if (len(memories_info) > 0):
rule.do(memories_info)
for info in memories_info:
info.to_occupied()
return
self.owner.get_idle_memory(memo_info)
def get_memory_manager(self):
return self.memory_manager
def send_request(self, protocol: 'EntanglementProtocol', req_dst: str, req_condition_func: RequestConditionFunc, req_args: Arguments):
protocol.own = self.owner
if (req_dst is None):
self.waiting_protocols.append(protocol)
return
if (protocol not in self.pending_protocols):
self.pending_protocols.append(protocol)
memo_names = [memo.name for memo in protocol.memories]
msg = ResourceManagerMessage(ResourceManagerMsgType.REQUEST, protocol=protocol.name, node=self.owner.name, memories=memo_names, req_condition_func=req_condition_func, req_args=req_args)
self.owner.send_message(req_dst, msg)
log.logger.info('{} network manager send {} message to {}'.format(self.owner.name, msg.msg_type.name, req_dst))
def received_message(self, src: str, msg: 'ResourceManagerMessage') -> None:
log.logger.info('{} receive {} message from {}'.format(self.name, msg.msg_type.name, src))
if (msg.msg_type is ResourceManagerMsgType.REQUEST):
protocol = msg.req_condition_func(self.waiting_protocols, msg.req_args)
if (protocol is not None):
protocol.set_others(msg.ini_protocol_name, msg.ini_node_name, msg.ini_memories_name)
memo_names = [memo.name for memo in protocol.memories]
new_msg = ResourceManagerMessage(ResourceManagerMsgType.RESPONSE, protocol=msg.ini_protocol_name, node=msg.ini_node_name, memories=msg.ini_memories_name, is_approved=True, paired_protocol=protocol.name, paired_node=self.owner.name, paired_memories=memo_names)
self.owner.send_message(src, new_msg)
self.waiting_protocols.remove(protocol)
self.owner.protocols.append(protocol)
protocol.start()
return
new_msg = ResourceManagerMessage(ResourceManagerMsgType.RESPONSE, protocol=msg.ini_protocol_name, node=msg.ini_node_name, memories=msg.ini_memories_name, is_approved=False, paired_protocol=None, paired_node=None, paired_memories=None)
self.owner.send_message(src, new_msg)
elif (msg.msg_type is ResourceManagerMsgType.RESPONSE):
protocol_name = msg.ini_protocol_name
protocol: Optional[EntanglementProtocol] = None
for p in self.pending_protocols:
if (p.name == protocol_name):
protocol = p
break
else:
if msg.is_approved:
self.release_remote_protocol(src, msg.paired_protocol)
return
if msg.is_approved:
protocol.set_others(msg.paired_protocol, msg.paired_node, msg.paired_memories)
if protocol.is_ready():
self.pending_protocols.remove(protocol)
self.owner.protocols.append(protocol)
protocol.own = self.owner
protocol.start()
else:
protocol.rule.protocols.remove(protocol)
for memory in protocol.memories:
memory.detach(protocol)
memory.attach(memory.memory_array)
info = self.memory_manager.get_info_by_memory(memory)
if (info.remote_node is None):
self.update(None, memory, 'RAW')
else:
self.update(None, memory, 'ENTANGLED')
self.pending_protocols.remove(protocol)
elif (msg.msg_type is ResourceManagerMsgType.RELEASE_PROTOCOL):
for p in self.owner.protocols:
if (p.name == msg.protocol):
p.release()
elif (msg.msg_type is ResourceManagerMsgType.RELEASE_MEMORY):
target_id = msg.memory
for protocol in self.owner.protocols:
for memory in protocol.memories:
if (memory.name == target_id):
protocol.release()
return
def memory_expire(self, memory: 'Memory'):
self.update(None, memory, 'RAW')
def release_remote_protocol(self, dst: str, protocol: str) -> None:
msg = ResourceManagerMessage(ResourceManagerMsgType.RELEASE_PROTOCOL, protocol=protocol, node='', memories=[])
self.owner.send_message(dst, msg)
def release_remote_memory(self, dst: str, memory_id: str) -> None:
msg = ResourceManagerMessage(ResourceManagerMsgType.RELEASE_MEMORY, protocol='', node='', memories=[], memory_id=memory_id)
self.owner.send_message(dst, msg) |
def faf(df: DataFrame, num_false_positives: float, num_frames: float) -> float:
return ((num_false_positives / num_frames) * 100) |
class Real(general_dataset):
def __init__(self, root='data/meta-dataset/real', mode='test', backbone_name='resnet12', transform=None):
assert (mode in ['train', 'val', 'test'])
self.mode = mode
(_, train_process, val_process) = load(backbone_name, jit=False)
if ((mode == 'val') or (mode == 'test')):
transform = val_process
elif (mode == 'train'):
transform = train_process
super().__init__(root, transform)
self.label = self.targets |
class MeanPoolGatingNetwork(torch.nn.Module):
def __init__(self, embed_dim, num_experts, dropout=None):
super().__init__()
self.embed_dim = embed_dim
self.num_experts = num_experts
self.fc1 = torch.nn.Linear(embed_dim, embed_dim)
self.dropout = (torch.nn.Dropout(dropout) if (dropout is not None) else None)
self.fc2 = torch.nn.Linear(embed_dim, num_experts)
def forward(self, encoder_out):
if (not (('encoder_out' in encoder_out) and ('encoder_padding_mask' in encoder_out) and (encoder_out['encoder_out'][0].size(2) == self.embed_dim))):
raise ValueError('Unexpected format for encoder_out')
encoder_padding_mask = encoder_out['encoder_padding_mask'][0]
encoder_out = encoder_out['encoder_out'][0].transpose(0, 1)
if (encoder_padding_mask is not None):
encoder_out = encoder_out.clone()
encoder_out[encoder_padding_mask] = 0
ntokens = torch.sum((~ encoder_padding_mask), dim=1, keepdim=True)
x = (torch.sum(encoder_out, dim=1) / ntokens.type_as(encoder_out))
else:
x = torch.mean(encoder_out, dim=1)
x = torch.tanh(self.fc1(x))
if (self.dropout is not None):
x = self.dropout(x)
x = self.fc2(x)
return F.log_softmax(x, dim=(- 1), dtype=torch.float32).type_as(x) |
def get_sgx_docker_containers() -> List[Container]:
docker_containers = docker_client.containers.list()
return [x for x in docker_containers if (isinstance(x.attrs['HostConfig']['Devices'], list) and ('/dev/isgx' in map((lambda y: y['PathOnHost']), x.attrs['HostConfig']['Devices'])))] |
class TestMaskedLanguageModel(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_legacy_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_legacy_mlm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(data_dir, 'masked_lm')
def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_mlm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(data_dir, arch='masked_lm', extra_args=(('--encoder-learned-pos',) if learned_pos_emb else ()))
with tempfile.TemporaryDirectory('test_mlm_translation') as translation_dir:
create_dummy_data(translation_dir)
preprocess_translation_data(translation_dir, extra_flags=['--joined-dictionary'])
train_translation_model(translation_dir, arch='transformer_from_pretrained_xlm', extra_flags=((['--decoder-layers', '1', '--decoder-embed-dim', '32', '--decoder-attention-heads', '1', '--decoder-ffn-embed-dim', '32', '--encoder-layers', '1', '--encoder-embed-dim', '32', '--encoder-attention-heads', '1', '--encoder-ffn-embed-dim', '32', '--pretrained-xlm-checkpoint', '{}/checkpoint_last.pt'.format(data_dir), '--activation-fn', 'gelu', '--max-source-positions', '500', '--max-target-positions', '500'] + (['--encoder-learned-pos', '--decoder-learned-pos'] if learned_pos_emb else [])) + (['--init-encoder-only'] if encoder_only else [])), task='translation_from_pretrained_xlm')
def test_pretrained_masked_lm_for_translation_learned_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(True, False)
def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(False, False)
def test_pretrained_masked_lm_for_translation_encoder_only(self):
self._test_pretrained_masked_lm_for_translation(True, True) |
class Version(_BaseVersion):
_regex = re.compile((('^\\s*' + VERSION_PATTERN) + '\\s*$'), (re.VERBOSE | re.IGNORECASE))
def __init__(self, version):
match = self._regex.search(version)
if (not match):
raise InvalidVersion("Invalid version: '{0}'".format(version))
self._version = _Version(epoch=(int(match.group('epoch')) if match.group('epoch') else 0), release=tuple((int(i) for i in match.group('release').split('.'))), pre=_parse_letter_version(match.group('pre_l'), match.group('pre_n')), post=_parse_letter_version(match.group('post_l'), (match.group('post_n1') or match.group('post_n2'))), dev=_parse_letter_version(match.group('dev_l'), match.group('dev_n')), local=_parse_local_version(match.group('local')))
self._key = _cmpkey(self._version.epoch, self._version.release, self._version.pre, self._version.post, self._version.dev, self._version.local)
def __repr__(self):
return '<Version({0})>'.format(repr(str(self)))
def __str__(self):
parts = []
if (self.epoch != 0):
parts.append('{0}!'.format(self.epoch))
parts.append('.'.join((str(x) for x in self.release)))
if (self.pre is not None):
parts.append(''.join((str(x) for x in self.pre)))
if (self.post is not None):
parts.append('.post{0}'.format(self.post))
if (self.dev is not None):
parts.append('.dev{0}'.format(self.dev))
if (self.local is not None):
parts.append('+{0}'.format(self.local))
return ''.join(parts)
def epoch(self):
_epoch = self._version.epoch
return _epoch
def release(self):
_release = self._version.release
return _release
def pre(self):
_pre = self._version.pre
return _pre
def post(self):
return (self._version.post[1] if self._version.post else None)
def dev(self):
return (self._version.dev[1] if self._version.dev else None)
def local(self):
if self._version.local:
return '.'.join((str(x) for x in self._version.local))
else:
return None
def public(self):
return str(self).split('+', 1)[0]
def base_version(self):
parts = []
if (self.epoch != 0):
parts.append('{0}!'.format(self.epoch))
parts.append('.'.join((str(x) for x in self.release)))
return ''.join(parts)
def is_prerelease(self):
return ((self.dev is not None) or (self.pre is not None))
def is_postrelease(self):
return (self.post is not None)
def is_devrelease(self):
return (self.dev is not None)
def major(self):
return (self.release[0] if (len(self.release) >= 1) else 0)
def minor(self):
return (self.release[1] if (len(self.release) >= 2) else 0)
def micro(self):
return (self.release[2] if (len(self.release) >= 3) else 0) |
class Vocab(object):
def __init__(self, vocab_file, max_size):
self._word_to_id = {}
self._id_to_word = {}
self._count = 0
with open(vocab_file, 'r') as vocab_f:
for line in vocab_f:
pieces = line.split()
if (len(pieces) != 2):
sys.stderr.write(('Bad line: %s\n' % line))
continue
if (pieces[0] in self._word_to_id):
raise ValueError(('Duplicated word: %s.' % pieces[0]))
self._word_to_id[pieces[0]] = self._count
self._id_to_word[self._count] = pieces[0]
self._count += 1
if (self._count > max_size):
raise ValueError(('Too many words: >%d.' % max_size))
def WordToId(self, word):
if (word not in self._word_to_id):
return self._word_to_id[UNKNOWN_TOKEN]
return self._word_to_id[word]
def IdToWord(self, word_id):
if (word_id not in self._id_to_word):
raise ValueError(('id not found in vocab: %d.' % word_id))
return self._id_to_word[word_id]
def NumIds(self):
return self._count |
def named_tensor():
tensor = base_pb2.NamedTensor(name='tensor_name', round_number=0, lossless=False, report=False, data_bytes=(32 * b'1'))
tensor.tags.append('model')
metadata = tensor.transformer_metadata.add()
metadata.int_to_float[1] = 1.0
metadata.int_list.extend([1, 8])
metadata.bool_list.append(True)
return tensor |
def deco(name):
f = getattr(windows, name)
def wrapped(*args, **kwargs):
return f(*args, **kwargs)
wrapped.__name__ = name
wrapped.__module__ = 'scipy.signal'
if hasattr(f, '__qualname__'):
wrapped.__qualname__ = f.__qualname__
if f.__doc__:
lines = f.__doc__.splitlines()
for (li, line) in enumerate(lines):
if (line.strip() == 'Parameters'):
break
else:
raise RuntimeError('dev error: badly formatted doc')
spacing = (' ' * line.find('P'))
lines.insert(li, '{0}.. warning:: scipy.signal.{1} is deprecated,\n{0} use scipy.signal.windows.{1} instead.\n'.format(spacing, name))
wrapped.__doc__ = '\n'.join(lines)
return wrapped |
def select_unit(t: float):
time_unit = {(- 3): 'ns', (- 2): 'us', (- 1): 'ms'}.get(int((np.log10(t) // 3)), 's')
time_scale = {'ns': 1e-09, 'us': 1e-06, 'ms': 0.001, 's': 1}[time_unit]
return (time_unit, time_scale) |
class AutoUplift(BaseAutoUplift):
def __init__(self, base_task: Task, uplift_candidates: List[MetaLearnerWrapper]=[], add_dd_candidates: bool=False, metric: Union[(str, TUpliftMetric, Callable)]='adj_qini', has_report: bool=False, increasing_metric: bool=True, test_size: float=0.2, threshold_imbalance_treatment: float=0.2, timeout: Optional[int]=None, timeout_metalearner: Optional[int]=None, timeout_single_learner: Optional[int]=None, cpu_limit: int=4, gpu_ids: Optional[str]='all', random_state: int=42):
super().__init__(base_task, metric, has_report, increasing_metric, test_size, timeout, timeout_metalearner, timeout_single_learner, cpu_limit, gpu_ids, random_state)
if (len(uplift_candidates) > 0):
if (timeout is not None):
logger.warning("'timeout' is used only for a global time.")
if add_dd_candidates:
logger.warning("'add_dd_candidates' isn't used when 'uplift_candidates' is specified.")
if (len(uplift_candidates) > 0):
self.uplift_candidates = uplift_candidates
else:
self.uplift_candidates = []
self.best_metalearner: Optional[MetaLearner] = None
self.best_metalearner_candidate: Optional[MetaLearnerWrapper] = None
self.add_dd_candidates = add_dd_candidates
self.candidate_holdout_metrics: List[Union[(float, None)]] = []
self._threshold_imbalance_treatment = threshold_imbalance_treatment
def fit(self, data: DataFrame=None, roles: Dict=None, verbose: int=0, train_data: DataFrame=None, test_data: DataFrame=None):
if (test_data is None):
(train_data, test_data, test_treatment, test_target) = self._prepare_data(data, roles)
else:
(_, target_col) = uplift_utils._get_target_role(roles)
(_, treatment_col) = uplift_utils._get_treatment_role(roles)
test_treatment = test_data[treatment_col].ravel()
test_target = test_data[target_col].ravel()
best_metalearner: Optional[MetaLearner] = None
best_metalearner_candidate_info: Optional[MetaLearnerWrapper] = None
best_metric_value = 0.0
if (len(self.uplift_candidates) == 0):
self._generate_uplift_candidates(data, roles)
self.candidate_holdout_metrics = ([None] * len(self.uplift_candidates))
self.candidate_worktime = ([None] * len(self.uplift_candidates))
self._timer.start()
for (idx_candidate, candidate_info) in enumerate(self.uplift_candidates):
metalearner = candidate_info()
try:
start_fit = time.time()
metalearner.fit(train_data, roles, verbose)
logger.info('Uplift candidate #{} [{}] is fitted'.format(idx_candidate, candidate_info.name))
end_fit = time.time()
self.candidate_worktime[idx_candidate] = (end_fit - start_fit)
(uplift_pred, _, _) = metalearner.predict(test_data)
uplift_pred = uplift_pred.ravel()
metric_value = self.calculate_metric(test_target, uplift_pred, test_treatment)
self.candidate_holdout_metrics[idx_candidate] = metric_value
if (best_metalearner_candidate_info is None):
best_metalearner = metalearner
best_metalearner_candidate_info = candidate_info
best_metric_value = metric_value
elif (((best_metric_value < metric_value) and self.increasing_metric) or ((best_metric_value > metric_value) and (not self.increasing_metric))):
best_metalearner = metalearner
best_metalearner_candidate_info = candidate_info
best_metric_value = metric_value
except NotTrainedError:
end_fit = time.time()
self.candidate_worktime[idx_candidate] = (end_fit - start_fit)
if self._timer.time_limit_exceeded():
logger.warning("Time of training exceeds 'timeout': {} > {}.".format(self._timer.time_spent, self.timeout))
logger.warning('There is fitted {}/{} candidates'.format((idx_candidate + 1), len(self.uplift_candidates)))
if ((idx_candidate + 1) < len(self.uplift_candidates)):
logger.warning("Try to increase 'timeout' or set 'None'(eq. infinity)")
break
self.best_metalearner_candidate_info = best_metalearner_candidate_info
self.best_metalearner = best_metalearner
def predict(self, data: DataFrame) -> Tuple[(np.ndarray, ...)]:
assert (self.best_metalearner is not None), "First call 'self.fit(...)', to choose best metalearner"
return self.best_metalearner.predict(data)
def create_best_metalearner(self, need_report: bool=False, update_metalearner_params: Dict[(str, Any)]={}, update_baselearner_params: Dict[(str, Any)]={}) -> Union[(MetaLearner, ReportDecoUplift)]:
assert (self.best_metalearner_candidate_info is not None), "First call 'self.fit(...), to choose best metalearner"
candidate_info = deepcopy(self.best_metalearner_candidate_info)
if (len(update_metalearner_params) > 0):
candidate_info.update_params(update_metalearner_params)
if (len(update_baselearner_params) > 0):
candidate_info.update_baselearner_params(update_baselearner_params)
best_metalearner = candidate_info()
if need_report:
if isinstance(self.metric, str):
rdu = ReportDecoUplift()
best_metalearner = rdu(best_metalearner)
else:
logger.warning("Report doesn't work with custom metric, return just best_metalearner.")
return best_metalearner
def get_metalearners_rating(self) -> DataFrame:
rating_table = DataFrame({'MetaLearner': [info.name for info in self.uplift_candidates], 'Parameters': [info.params for info in self.uplift_candidates], 'Metrics': self.candidate_holdout_metrics, 'WorkTime': self.candidate_worktime})
rating_table['Rank'] = rating_table['Metrics'].rank(method='first', ascending=(not self.increasing_metric))
rating_table.sort_values('Rank', inplace=True)
rating_table.reset_index(drop=True, inplace=True)
return rating_table
def _generate_uplift_candidates(self, data: DataFrame, roles):
self._calculate_tabular_time()
self.uplift_candidates = self._default_uplift_candidates
if self.has_report:
self.uplift_candidates = [c for c in self.uplift_candidates if (c.klass in ReportDecoUplift._available_metalearners)]
if self.add_dd_candidates:
dd_candidates = self._generate_data_depend_uplift_candidates(data, roles)
if self.has_report:
dd_candidates = [c for c in dd_candidates if (c.klass in ReportDecoUplift._available_metalearners)]
self.uplift_candidates.extend(dd_candidates)
def _calculate_tabular_time(self):
if self.has_report:
num_tabular_automls = (16 if self.add_dd_candidates else 11)
else:
num_tabular_automls = (22 if self.add_dd_candidates else 17)
if (self.timeout_single_learner is not None):
self._tabular_timeout = self.timeout_single_learner
elif (self.timeout_metalearner is not None):
self._tabular_timeout = None
elif (self.timeout is not None):
self._tabular_timeout = (self.timeout / num_tabular_automls)
else:
self._tabular_timeout = None
def _default_uplift_candidates(self) -> List[MetaLearnerWrapper]:
return [MetaLearnerWrapper(name='__SLearner__Default__', klass=SLearner, params={'base_task': self.base_task, 'timeout': self.timeout_metalearner}), MetaLearnerWrapper(name='__TLearner__Default__', klass=TLearner, params={'base_task': self.base_task, 'timeout': self.timeout_metalearner}), MetaLearnerWrapper(name='__TDLearner__Default__', klass=TDLearner, params={'base_task': self.base_task, 'timeout': self.timeout_metalearner}), MetaLearnerWrapper(name='__XLearner__Default__', klass=XLearner, params={'base_task': self.base_task, 'timeout': self.timeout_metalearner}), MetaLearnerWrapper(name='__RLearner__Linear__', klass=RLearner, params={'timeout': self.timeout_metalearner, 'propensity_learner': BaseLearnerWrapper(name='__Linear__', klass=uplift_utils.create_linear_automl, params={'task': Task('binary')}), 'mean_outcome_learner': BaseLearnerWrapper(name='__Linear__', klass=uplift_utils.create_linear_automl, params={'task': self.base_task}), 'effect_learner': BaseLearnerWrapper(name='__Linear__', klass=uplift_utils.create_linear_automl, params={'task': Task('reg')})}), MetaLearnerWrapper(name='__RLearner__Default__', klass=RLearner, params={'base_task': self.base_task, 'timeout': (self.timeout_metalearner if (self.timeout_metalearner is not None) else ((self._tabular_timeout * 3) if (self._tabular_timeout is not None) else None))}), MetaLearnerWrapper(name='__SLearner__TabularAutoML__', klass=SLearner, params={'timeout': self.timeout_metalearner, 'learner': BaseLearnerWrapper(name='__TabularAutoML__', klass=TabularAutoML, params={'task': self.base_task, 'timeout': (self.timeout_metalearner if (self._tabular_timeout is None) else self._tabular_timeout)})}), MetaLearnerWrapper(name='__TLearner__TabularAutoML__', klass=TLearner, params={'timeout': self.timeout_metalearner, 'treatment_learner': BaseLearnerWrapper(name='__TabularAutoML__', klass=TabularAutoML, params={'task': self.base_task, 'timeout': (noner(self.timeout_metalearner, (lambda t: int((t / 2)))) if (self._tabular_timeout is None) else self._tabular_timeout)}), 'control_learner': BaseLearnerWrapper(name='__TabularAutoML__', klass=TabularAutoML, params={'task': self.base_task, 'timeout': (noner(self.timeout_metalearner, (lambda t: int((t / 2)))) if (self._tabular_timeout is None) else self._tabular_timeout)})}), MetaLearnerWrapper(name='__TDLearner__TabularAutoML__', klass=TDLearner, params={'timeout': self.timeout_metalearner, 'treatment_learner': BaseLearnerWrapper(name='__TabularAutoML__', klass=TabularAutoML, params={'task': self.base_task, 'timeout': (noner(self.timeout_metalearner, (lambda t: int((t / 2)))) if (self._tabular_timeout is None) else self._tabular_timeout)}), 'control_learner': BaseLearnerWrapper(name='__TabularAutoML__', klass=TabularAutoML, params={'task': self.base_task, 'timeout': (noner(self.timeout_metalearner, (lambda t: int((t / 2)))) if (self._tabular_timeout is None) else self._tabular_timeout)})}), MetaLearnerWrapper(name='__XLearner__Propensity_Linear__Other_TabularAutoML__', klass=XLearner, params={'timeout': self.timeout_metalearner, 'outcome_learners': [BaseLearnerWrapper(name='__TabularAutoML__', klass=TabularAutoML, params={'task': self.base_task, 'timeout': (noner(self.timeout_metalearner, (lambda t: int((t / 4)))) if (self._tabular_timeout is None) else self._tabular_timeout)})], 'effect_learners': [BaseLearnerWrapper(name='__TabularAutoML__', klass=TabularAutoML, params={'task': Task('reg'), 'timeout': (noner(self.timeout_metalearner, (lambda t: int((t / 4)))) if (self._tabular_timeout is None) else self._tabular_timeout)})], 'propensity_learner': BaseLearnerWrapper(name='__Linear__', klass=uplift_utils.create_linear_automl, params={'task': Task('binary')})}), MetaLearnerWrapper(name='__XLearner__TabularAutoML__', klass=XLearner, params={'timeout': self.timeout_metalearner, 'outcome_learners': [BaseLearnerWrapper(name='__TabularAutoML__', klass=TabularAutoML, params={'task': self.base_task, 'timeout': (noner(self.timeout_metalearner, (lambda t: int((t / 5)))) if (self._tabular_timeout is None) else self._tabular_timeout)})], 'effect_learners': [BaseLearnerWrapper(name='__TabularAutoML__', klass=TabularAutoML, params={'task': Task('reg'), 'timeout': (noner(self.timeout_metalearner, (lambda t: int((t / 5)))) if (self._tabular_timeout is None) else self._tabular_timeout)})], 'propensity_learner': BaseLearnerWrapper(name='__TabularAutoML__', klass=TabularAutoML, params={'task': Task('binary'), 'timeout': (noner(self.timeout_metalearner, (lambda t: int((t / 5)))) if (self._tabular_timeout is None) else self._tabular_timeout)})})]
def _generate_data_depend_uplift_candidates(self, data: DataFrame, roles: dict) -> List[MetaLearnerWrapper]:
dd_uplift_candidates: List[MetaLearnerWrapper] = []
(_, treatment_col) = uplift_utils._get_treatment_role(roles)
treatment_rate = data[treatment_col].mean()
is_imbalance_treatment = False
ordered_outcome_learners = [BaseLearnerWrapper(name='__Linear__', klass=uplift_utils.create_linear_automl, params={'task': Task('binary')}), BaseLearnerWrapper(name='__TabularAutoML__', klass=TabularAutoML, params={'task': self.base_task, 'timeout': self._tabular_timeout})]
ordered_effect_learners = [BaseLearnerWrapper(name='__Linear__', klass=uplift_utils.create_linear_automl, params={'task': Task('reg')}), BaseLearnerWrapper(name='__TabularAutoML__', klass=TabularAutoML, params={'task': Task('reg'), 'timeout': self._tabular_timeout})]
(control_model, treatment_model) = ('Linear', 'Preset')
if (treatment_rate > (0.5 + self._threshold_imbalance_treatment)):
is_imbalance_treatment = True
elif (treatment_rate < (0.5 - self._threshold_imbalance_treatment)):
is_imbalance_treatment = True
ordered_outcome_learners = ordered_outcome_learners[::(- 1)]
ordered_effect_learners = ordered_effect_learners[::(- 1)]
(control_model, treatment_model) = ('Preset', 'Linear')
if is_imbalance_treatment:
dd_uplift_candidates.extend([MetaLearnerWrapper(name='XLearner__Propensity_Linear__Control_{}__Treatment_{}'.format(control_model, treatment_model), klass=XLearner, params={'timeout': self.timeout_metalearner, 'outcome_learners': ordered_outcome_learners, 'effect_learners': ordered_effect_learners, 'propensity_learner': BaseLearnerWrapper(name='__Linear__', klass=uplift_utils.create_linear_automl, params={'task': Task('binary')})}), MetaLearnerWrapper(name='XLearner__Control_{}__Treatment_{}'.format(control_model, treatment_model), klass=XLearner, params={'timeout': self.timeout_metalearner, 'outcome_learners': ordered_outcome_learners, 'effect_learners': ordered_effect_learners, 'propensity_learner': BaseLearnerWrapper(name='__TabularAutoML__', klass=TabularAutoML, params={'task': Task('binary'), 'timeout': self._tabular_timeout})})])
return dd_uplift_candidates |
('ir_labeled_tuple_loader')
class IrLabeledTupleDatasetReader(DatasetReader):
def __init__(self, tokenizer: Tokenizer=None, token_indexers: Dict[(str, TokenIndexer)]=None, source_add_start_token: bool=True, max_doc_length: int=(- 1), max_query_length: int=(- 1), lazy: bool=False) -> None:
super().__init__(lazy)
self._tokenizer = (tokenizer or WordTokenizer())
self._token_indexers = (token_indexers or {'tokens': SingleIdTokenIndexer(lowercase_tokens=True)})
self._source_add_start_token = source_add_start_token
self.max_doc_length = max_doc_length
self.max_query_length = max_query_length
def _read(self, file_path):
with open(cached_path(file_path), 'r', encoding='utf8') as data_file:
for (line_num, line) in enumerate(data_file):
line = line.strip('\n')
if (not line):
continue
line_parts = line.split('\t')
if (len(line_parts) != 4):
raise ConfigurationError(('Invalid line format: %s (line number %d)' % (line, (line_num + 1))))
(query_id, doc_id, query_sequence, doc_sequence) = line_parts
(yield self.text_to_instance(query_id, doc_id, query_sequence, doc_sequence))
def text_to_instance(self, query_id: str, doc_id: str, query_sequence: str, doc_sequence: str) -> Instance:
query_id_field = LabelField(int(query_id), skip_indexing=True)
doc_id_field = LabelField(int(doc_id), skip_indexing=True)
query_tokenized = self._tokenizer.tokenize(query_sequence)
if (self.max_query_length > (- 1)):
query_tokenized = query_tokenized[:self.max_query_length]
query_field = TextField(query_tokenized, self._token_indexers)
doc_tokenized = self._tokenizer.tokenize(doc_sequence)
if (self.max_doc_length > (- 1)):
doc_tokenized = doc_tokenized[:self.max_doc_length]
doc_field = TextField(doc_tokenized, self._token_indexers)
query_length = LabelField(len(query_tokenized), skip_indexing=True)
doc_length = LabelField(len(doc_tokenized), skip_indexing=True)
return Instance({'query_id': query_id_field, 'doc_id': doc_id_field, 'query_tokens': query_field, 'doc_tokens': doc_field, 'query_length': query_length, 'doc_length': doc_length}) |
def save_checkpoint(config, epoch, model, max_accuracy, optimizer, lr_scheduler, logger):
save_state = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'max_accuracy': max_accuracy, 'epoch': epoch, 'config': config}
if (config.AMP_OPT_LEVEL != 'O0'):
save_state['amp'] = amp.state_dict()
save_path = os.path.join(config.OUTPUT, f'ckpt_epoch_{epoch}.pth')
logger.info(f'{save_path} saving......')
torch.save(save_state, save_path)
logger.info(f'{save_path} saved !!!') |
()
class Checkpoint():
def __init__(self, checkpoint_dir: str, patience: Optional[int]=7, delta: Optional[float]=0.0):
self.checkpoint_dir = checkpoint_dir
self.model_path = join(checkpoint_dir, 'model.pth')
self.patience = patience
self.counter = 0
self.best_loss = float('inf')
self.early_stop = False
self.delta = delta
self.summary_writer = SummaryWriter(log_dir=checkpoint_dir)
def __call__(self, epoch: int, model: torch.nn.Module, scalars: Optional[Dict[(str, float)]]=None):
for (name, value) in scalars.items():
self.summary_writer.add_scalar(name, value, epoch)
if (name == 'Loss/Val'):
val_loss = value
if (val_loss <= (self.best_loss + self.delta)):
logging.info(f'Validation loss decreased ({self.best_loss:.3f} --> {val_loss:.3f}). Saving model ...')
torch.save(model.state_dict(), self.model_path)
self.best_loss = val_loss
self.counter = 0
else:
self.counter += 1
logging.info(f'Validation loss increased ({self.best_loss:.3f} --> {val_loss:.3f}). Early stopping counter: {self.counter} out of {self.patience}')
if (self.counter >= self.patience >= 0):
self.early_stop = True
self.summary_writer.flush()
def close(self, scores: Optional[Dict[(str, float)]]=None):
if (scores is not None):
for (name, value) in scores.items():
self.summary_writer.add_scalar(name, value)
self.summary_writer.close() |
def build_scheduler(cfg, optimizer):
return SCHEDULERS.build(cfg, default_args=dict(optimizer=optimizer)) |
def _from_whatever(data, fmt=None):
from sage.graphs.graph import Graph
if isinstance(data, str):
lines = data.splitlines()
else:
lines = try_read(data, splitlines=True)
if ((lines is not None) and (fmt is None)):
if hasattr(data, 'name'):
if data.name.endswith('.g6'):
fmt = 'graph6'
elif data.name.endswith('.s6'):
fmt = 'sparse6'
else:
try:
lines = iter(data)
except TypeError:
raise TypeError('must be a string, an iterable of strings, or a readable file-like object')
if (fmt == 'graph6'):
kwargs = {'format': fmt}
elif (fmt == 'sparse6'):
kwargs = {'format': fmt, 'sparse': True}
else:
kwargs = {}
out = []
for line in lines:
if (not isinstance(line, str)):
raise TypeError('must be an iterable of strings')
line = line.strip()
if (not line):
continue
if ('\n' in line):
out.append(_from_whatever(line.splitlines(), fmt=fmt))
else:
out.append(Graph(line, **kwargs))
return out |
def test_affinity_map_construction():
arr = np.random.rand(3, 3, 4, 5).astype(np.float32)
aff = AffinityMap(arr, voxel_offset=(0, (- 1), (- 1), (- 1))) |
class T5():
def __init__(self, config):
self.batch_size = config['batch_size']
self.tokenizer = T5Tokenizer.from_pretrained(config['model_weights'])
self.model = T5ForConditionalGeneration.from_pretrained(config['model_weights'])
self.page_retrieval = (config['page_retrieval'].lower() if ('page_retrieval' in config) else None)
def parallelize(self):
self.model = nn.DataParallel(self.model)
def prepare_inputs_for_vqa(self, question, context, answers=None):
input_text = ['question: {:s} context: {:s}'.format(q, c) for (q, c) in zip(question, context)]
tokens = self.tokenizer(input_text, return_tensors='pt', padding=True, truncation=True).to(self.model.device)
if (answers is not None):
answers = [random.choice(answer) for answer in answers]
labels = self.tokenizer(answers, return_tensors='pt', padding=True)
labels.input_ids[(labels.input_ids[:] == self.tokenizer.pad_token_id)] = (- 100)
labels = labels.input_ids.to(self.model.device)
else:
labels = None
return (tokens.input_ids, tokens.attention_mask, labels)
def forward(self, batch, return_pred_answer=False):
question = batch['questions']
context = batch['contexts']
answers = batch['answers']
if (self.page_retrieval == 'logits'):
num_pages = batch['num_pages']
outputs = []
pred_answers = []
pred_answer_pages = []
pred_answers_conf = []
for batch_idx in range(len(context)):
(input_ids, attention_mask, _) = self.prepare_inputs_for_vqa(([question[batch_idx]] * num_pages[batch_idx]), context[batch_idx])
(pred_answer, logits) = (self.get_answer_from_model_output(input_ids, attention_mask) if return_pred_answer else None)
max_logits = (- 999999)
answer_page = None
best_answer = None
for p_ix in range(len(input_ids)):
if (logits[p_ix] > max_logits):
max_logits = logits[p_ix]
answer_page = p_ix
best_answer = pred_answer[p_ix]
outputs.append(None)
pred_answers.append(best_answer)
pred_answer_pages.append(answer_page)
pred_answers_conf.append(max_logits)
else:
(input_ids, attention_mask, labels) = self.prepare_inputs_for_vqa(question, context, answers)
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
(pred_answers, pred_answers_conf) = (self.get_answer_from_model_output(input_ids, attention_mask) if return_pred_answer else None)
if (self.page_retrieval == 'oracle'):
pred_answer_pages = batch['answer_page_idx']
else:
pred_answer_pages = None
return (outputs, pred_answers, pred_answer_pages, pred_answers_conf)
def get_answer_from_model_output(self, input_ids, attention_mask):
output = self.model.generate(input_ids, attention_mask=attention_mask, output_scores=True, return_dict_in_generate=True, output_attentions=True)
pred_answers = self.tokenizer.batch_decode(output['sequences'], skip_special_tokens=True)
pred_answers_conf = model_utils.get_generative_confidence(output)
return (pred_answers, pred_answers_conf) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--user_weights_file', required=True)
parser.add_argument('--item_weights_file', required=True)
parser.add_argument('--output_dir', required=True)
args = parser.parse_args()
print('Saving item weights....')
item_mat = mmread(args.item_weights_file)
item_mat = item_mat.T
item_mat[(item_mat < 1e-09)] = 0.0
np.savetxt(('%s/item_weights.csv' % args.output_dir), item_mat.T, delimiter=',')
print('Saving user weights....')
user_mat = mmread(args.user_weights_file)
user_mat[(user_mat < 1e-09)] = 0.0
np.savetxt(('%s/user_weights.csv' % args.output_dir), user_mat, delimiter=',') |
def test_estimate_bandwidth_1sample(global_dtype):
bandwidth = estimate_bandwidth(X.astype(global_dtype, copy=False), n_samples=1, quantile=0.3)
assert (bandwidth.dtype == X.dtype)
assert (bandwidth == pytest.approx(0.0, abs=1e-05)) |
class F1Scorer(Scorer):
def keys(self) -> Set[str]:
return {'f1'}
def _score_single_ref(self, context: str, questions: List[str], answers: List[str], predictions: List[str], probabilities: List[float], null_probabilities: List[float]) -> List[Dict[(str, float)]]:
scores = []
for (prediction, answer, prob, null_prob) in zip(predictions, answers, probabilities, null_probabilities):
if ((prediction is None) or (null_prob >= prob)):
scores.append({'f1': 0.0})
else:
scores.append({'f1': compute_f1(answer, prediction)})
return scores |
_converter_regitstry('DMA_compress')
def DMA_compress_converter(context: 'BM1688Context', reg: DMA_compress_reg):
return (([],) * 3) |
class SKFF(nn.Module):
def __init__(self, in_channels, height=3, reduction=8, bias=False):
super(SKFF, self).__init__()
self.height = height
d = max(int((in_channels / reduction)), 4)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_du = nn.Sequential(nn.Conv2d(in_channels, d, 1, padding=0, bias=bias), nn.PReLU())
self.fcs = nn.ModuleList([])
for i in range(self.height):
self.fcs.append(nn.Conv2d(d, in_channels, kernel_size=1, stride=1, bias=bias))
self.softmax = nn.Softmax(dim=1)
def forward(self, inp_feats):
batch_size = inp_feats[0].shape[0]
n_feats = inp_feats[0].shape[1]
inp_feats = torch.cat(inp_feats, dim=1)
inp_feats = inp_feats.view(batch_size, self.height, n_feats, inp_feats.shape[2], inp_feats.shape[3])
feats_U = torch.sum(inp_feats, dim=1)
feats_S = self.avg_pool(feats_U)
feats_Z = self.conv_du(feats_S)
attention_vectors = [fc(feats_Z) for fc in self.fcs]
attention_vectors = torch.cat(attention_vectors, dim=1)
attention_vectors = attention_vectors.view(batch_size, self.height, n_feats, 1, 1)
attention_vectors = self.softmax(attention_vectors)
feats_V = torch.sum((inp_feats * attention_vectors), dim=1)
return feats_V |
_params
def test_quad_vec_simple_inf(quadrature):
f = (lambda x: (1 / (1 + (np.float64(x) ** 2))))
for epsabs in [0.1, 0.001, 1e-06]:
if ((quadrature == 'trapz') and (epsabs < 0.0001)):
continue
kwargs = dict(norm='max', epsabs=epsabs, quadrature=quadrature)
(res, err) = quad_vec(f, 0, np.inf, **kwargs)
assert_allclose(res, (np.pi / 2), rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, 0, (- np.inf), **kwargs)
assert_allclose(res, ((- np.pi) / 2), rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, (- np.inf), 0, **kwargs)
assert_allclose(res, (np.pi / 2), rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, np.inf, 0, **kwargs)
assert_allclose(res, ((- np.pi) / 2), rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, (- np.inf), np.inf, **kwargs)
assert_allclose(res, np.pi, rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, np.inf, (- np.inf), **kwargs)
assert_allclose(res, (- np.pi), rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, np.inf, np.inf, **kwargs)
assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, (- np.inf), (- np.inf), **kwargs)
assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
(res, err) = quad_vec(f, 0, np.inf, points=(1.0, 2.0), **kwargs)
assert_allclose(res, (np.pi / 2), rtol=0, atol=max(epsabs, err))
f = (lambda x: (np.sin((x + 2)) / (1 + (x ** 2))))
exact = ((np.pi / np.e) * np.sin(2))
epsabs = 1e-05
(res, err, info) = quad_vec(f, (- np.inf), np.inf, limit=1000, norm='max', epsabs=epsabs, quadrature=quadrature, full_output=True)
assert (info.status == 1)
assert_allclose(res, exact, rtol=0, atol=max(epsabs, (1.5 * err))) |
def mfp2d(arr, xth=0.5, iterations=1000000, verbose=True, point='random'):
info = arr.shape
longy = max([info[0], info[1]])
longest = int((np.sqrt(2) * longy))
num_sz = np.zeros(longest)
ar = np.zeros(arr.shape)
ar[(arr >= xth)] = 1
thetas = np.random.randint(0, 360, size=iterations)
ls = np.sin(((thetas * np.pi) / 180))
ms = np.cos(((thetas * np.pi) / 180))
if (point == 'random'):
loc = np.argwhere((ar == 1))
rand_loc = np.random.randint(0, high=loc.shape[0], size=iterations)
(xs, ys) = (loc[(rand_loc, 0)], loc[(rand_loc, 1)])
else:
(xs, ys) = point
if (ar[(xs, ys)] == 0):
print('Given point is outside the structure.')
return None
(xs, ys) = ((xs * np.ones(iterations)), (ys * np.ones(iterations)))
interp_func = RegularGridInterpolator((np.arange(info[0]), np.arange(info[1])), ar, bounds_error=False, fill_value=0)
if verbose:
for rr in tqdm(range(longest)):
(xs, ys) = ((xs + ls), (ys + ms))
pts = np.vstack((xs, ys)).T
vals = interp_func(pts)
check = np.argwhere((vals <= 0.5))
num_sz[rr] = check.shape[0]
(xs, ys) = (np.delete(xs, check), np.delete(ys, check))
(ls, ms) = (np.delete(ls, check), np.delete(ms, check))
if (not xs.size):
break
else:
for rr in range(longest):
(xs, ys) = ((xs + ls), (ys + ms))
pts = np.vstack((xs, ys)).T
vals = interp_func(pts)
check = np.argwhere((vals <= 0.5))
num_sz[rr] = check.shape[0]
(xs, ys) = (np.delete(xs, check), np.delete(ys, check))
(ls, ms) = (np.delete(ls, check), np.delete(ms, check))
if (not xs.size):
break
size_px = np.arange(longest)
return (num_sz, size_px) |
def BruckRyserChowla_check(v, k, lambd):
from sage.rings.rational_field import QQ
if ((k * (k - 1)) != (lambd * (v - 1))):
return Unknown
if ((v % 2) == 0):
return is_square((k - lambd))
g = (1 if ((v % 4) == 1) else (- 1))
C = Conic(QQ, [1, (lambd - k), ((- g) * lambd)])
(flag, sol) = C.has_rational_point(point=True)
return flag |
def get_notebooks(tutorial_dir: str) -> List[Notebook]:
path = os.path.abspath(tutorial_dir)
config_path = os.path.join(path, NOTEBOOKS_CONFIG_FNAME)
if (not os.path.isfile(config_path)):
logging.info(f'No {NOTEBOOKS_CONFIG_FNAME} config file in {path}')
return []
with open(config_path, 'r') as f:
notebooks = f.read().splitlines()
return [Notebook(os.path.join(path, nb)) for nb in notebooks if nb] |
def parse_stage_factory(context):
def parse(compsrc):
source_desc = compsrc.source_desc
full_module_name = compsrc.full_module_name
initial_pos = (source_desc, 1, 0)
(saved_cimport_from_pyx, Options.cimport_from_pyx) = (Options.cimport_from_pyx, False)
scope = context.find_module(full_module_name, pos=initial_pos, need_pxd=0)
Options.cimport_from_pyx = saved_cimport_from_pyx
tree = context.parse(source_desc, scope, pxd=0, full_module_name=full_module_name)
tree.compilation_source = compsrc
tree.scope = scope
tree.is_pxd = False
return tree
return parse |
def visible_gpu(gpus):
gpus = ([gpus] if isinstance(gpus, int) else list(gpus))
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(list(map(str, gpus)))
return list(range(len(gpus))) |
def test_analyse_module(parsed_module_no_dependencies):
test_cluster = analyse_module(parsed_module_no_dependencies)
assert (test_cluster.num_accessible_objects_under_test() == 4) |
class RandomPolicy(QLearningAlgoBase[(None, RandomPolicyConfig)]):
_action_size: int
def __init__(self, config: RandomPolicyConfig):
super().__init__(config, False, None)
self._action_size = 1
def inner_create_impl(self, observation_shape: Shape, action_size: int) -> None:
self._action_size = action_size
def predict(self, x: Observation) -> NDArray:
return self.sample_action(x)
def sample_action(self, x: Observation) -> NDArray:
x = np.asarray(x)
action_shape = (x.shape[0], self._action_size)
if (self._config.distribution == 'uniform'):
action = np.random.uniform((- 1.0), 1.0, size=action_shape)
elif (self._config.distribution == 'normal'):
action = np.random.normal(0.0, self._config.normal_std, size=action_shape)
else:
raise ValueError(f'invalid distribution type: {self._config.distribution}')
action = np.clip(action, (- 1.0), 1.0)
if self._config.action_scaler:
action = self._config.action_scaler.reverse_transform_numpy(action)
return action
def predict_value(self, x: Observation, action: NDArray) -> NDArray:
raise NotImplementedError
def inner_update(self, batch: TorchMiniBatch) -> Dict[(str, float)]:
raise NotImplementedError
def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS |
(0.1)
('/get_tv_plan', methods=['POST'])
def funcGetTVPlanPrice():
dm_msg = request.json['entities']
entity_name = 'new_tv_plan'
tvplan = dm_msg[entity_name]
price = {'hulu live': 200, 'hulu tv': 200, 'fubo tv': 300, 'pluto tv': 500}
try:
return json_resp(True, 'the price of {} is {} dollar per year'.format(tvplan, price[tvplan]))
except Exception:
return json_resp(False, msg='the price of {} is not avaiable'.format(tvplan)) |
class LazyInstanceNorm2d(_LazyNormBase, _InstanceNorm):
cls_to_become = InstanceNorm2d
def _check_input_dim(self, input):
if (input.dim() != 4):
raise ValueError('expected 4D input (got {}D input)'.format(input.dim())) |
def evaluate_conll(gold_path, predictions, official_stdout=False):
with tempfile.NamedTemporaryFile(delete=False, mode='w') as prediction_file:
with open(gold_path, 'r') as gold_file:
output_conll(gold_file, prediction_file, predictions)
print('Predicted conll file: {}'.format(prediction_file.name))
return {m: official_conll_eval(gold_file.name, prediction_file.name, m, official_stdout) for m in ('muc', 'bcub', 'ceafe')} |
def clear():
if (not isatty(sys.stdout)):
return
if WIN:
os.system('cls')
else:
sys.stdout.write('\x1b[2J\x1b[1;1H') |
def print_banner():
print('/\\')
print('| |')
print('| BLASYS -- Approximate Logic Synthesis Using Boolean Matrix Factorization |')
print('| Version: {} |'.format(__version__))
print('| |')
print('| Copyright (C) 2019 SCALE Lab, Brown University |')
print('| |')
print('| Permission to use, copy, modify, and/or distribute this software for any |')
print('| purpose with or without fee is hereby granted, provided that the above |')
print('| copyright notice and this permission notice appear in all copies. |')
print('| |')
print('| THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |')
print('| WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |')
print('| MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |')
print('| ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |')
print('| WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |')
print('| ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |')
print('| OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |')
print('| |')
print('\\/') |
def unpack_string_to_character_literals(literal):
chars = []
pos = literal.pos
stype = literal.__class__
sval = literal.value
sval_type = sval.__class__
for char in sval:
cval = sval_type(char)
chars.append(stype(pos, value=cval, constant_result=cval))
return chars |
def get_norm_act_layer(layer_class):
layer_class = layer_class.replace('_', '').lower()
if layer_class.startswith('batchnorm'):
layer = BatchNormAct2d
elif layer_class.startswith('groupnorm'):
layer = GroupNormAct
elif (layer_class == 'evonormbatch'):
layer = EvoNormBatch2d
elif (layer_class == 'evonormsample'):
layer = EvoNormSample2d
elif ((layer_class == 'iabn') or (layer_class == 'inplaceabn')):
layer = InplaceAbn
else:
assert False, ('Invalid norm_act layer (%s)' % layer_class)
return layer |
def all_cached_data(polytopes):
all_polars(polytopes)
all_points(polytopes)
reflexive = [p for p in polytopes if p.is_reflexive()]
all_nef_partitions(reflexive)
polar = [p.polar() for p in reflexive]
all_points(polar)
all_nef_partitions(polar) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.