code stringlengths 101 5.91M |
|---|
class Iron(ConsumedResource):
def __init__(self, *args, **kwargs):
super().__init__('Iron', *args, **kwargs) |
def test_case46():
url = (discoveryIp + '/ngsi9/ngsi-ld/registration/urn:ngsi-ld:Vehicle:C001')
r = requests.get(url)
resp_content = r.content
resInJson = resp_content.decode('utf8').replace("'", '"')
resp = json.loads(resInJson)
if (resp['ID'] == 'urn:ngsi-ld:Vehicle:C001'):
print('\nValidated')
else:
print('\nNot Validated')
assert (r.status_code == 200) |
class ConcatenationAggregator(Layer):
def __init__(self, input_dim, output_dim, review_item_adj, review_user_adj, review_vecs, user_vecs, item_vecs, dropout=0.0, act=tf.nn.relu, name=None, concat=False, **kwargs):
super(ConcatenationAggregator, self).__init__(**kwargs)
self.review_item_adj = review_item_adj
self.review_user_adj = review_user_adj
self.review_vecs = review_vecs
self.user_vecs = user_vecs
self.item_vecs = item_vecs
self.dropout = dropout
self.act = act
self.concat = concat
if (name is not None):
name = ('/' + name)
else:
name = ''
with tf.variable_scope(((self.name + name) + '_vars')):
self.vars['con_agg_weights'] = glorot([input_dim, output_dim], name='con_agg_weights')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
def _call(self, inputs):
review_vecs = tf.nn.dropout(self.review_vecs, (1 - self.dropout))
user_vecs = tf.nn.dropout(self.user_vecs, (1 - self.dropout))
item_vecs = tf.nn.dropout(self.item_vecs, (1 - self.dropout))
ri = tf.nn.embedding_lookup(item_vecs, tf.cast(self.review_item_adj, dtype=tf.int32))
ri = tf.transpose(tf.random_shuffle(tf.transpose(ri)))
ru = tf.nn.embedding_lookup(user_vecs, tf.cast(self.review_user_adj, dtype=tf.int32))
ru = tf.transpose(tf.random_shuffle(tf.transpose(ru)))
concate_vecs = tf.concat([review_vecs, ru, ri], axis=1)
output = tf.matmul(concate_vecs, self.vars['con_agg_weights'])
return self.act(output) |
.parametrize('n_rounds, n_actions, dim_context, base_model_for_evaluation_policy, base_model_for_reg_model', offline_experiment_configurations)
def test_offline_policy_learner_performance(n_rounds: int, n_actions: int, dim_context: int, base_model_for_evaluation_policy: str, base_model_for_reg_model: str) -> None:
def process(i: int):
dataset = SyntheticBanditDataset(n_actions=n_actions, dim_context=dim_context, reward_function=logistic_reward_function, behavior_policy_function=linear_behavior_policy, random_state=i)
bandit_feedback_train = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
bandit_feedback_test = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
ipw_policy = IPWLearner(n_actions=dataset.n_actions, base_classifier=base_model_dict[base_model_for_evaluation_policy](**hyperparams[base_model_for_evaluation_policy]))
q_policy = QLearner(n_actions=dataset.n_actions, base_model=base_model_dict[base_model_for_evaluation_policy](**hyperparams[base_model_for_evaluation_policy]))
nn_policy = NNPolicyLearner(n_actions=dataset.n_actions, dim_context=dim_context, off_policy_objective='ipw')
random_policy = RandomPolicy(n_actions=dataset.n_actions)
uniform_sample_weight_policy = UniformSampleWeightLearner(n_actions=dataset.n_actions, base_classifier=base_model_dict[base_model_for_evaluation_policy](**hyperparams[base_model_for_evaluation_policy]))
ipw_policy.fit(context=bandit_feedback_train['context'], action=bandit_feedback_train['action'], reward=bandit_feedback_train['reward'], pscore=bandit_feedback_train['pscore'])
q_policy.fit(context=bandit_feedback_train['context'], action=bandit_feedback_train['action'], reward=bandit_feedback_train['reward'], pscore=bandit_feedback_train['pscore'])
nn_policy.fit(context=bandit_feedback_train['context'], action=bandit_feedback_train['action'], reward=bandit_feedback_train['reward'], pscore=bandit_feedback_train['pscore'])
uniform_sample_weight_policy.fit(context=bandit_feedback_train['context'], action=bandit_feedback_train['action'], reward=bandit_feedback_train['reward'], pscore=bandit_feedback_train['pscore'])
ipw_action_dist = ipw_policy.predict(context=bandit_feedback_test['context'])
q_action_dist = q_policy.predict(context=bandit_feedback_test['context'])
nn_action_dist = nn_policy.predict(context=bandit_feedback_test['context'])
random_action_dist = random_policy.predict(context=bandit_feedback_test['context'])
uniform_sample_weight_action_dist = uniform_sample_weight_policy.predict(context=bandit_feedback_test['context'])
gt_ipw_learner = dataset.calc_ground_truth_policy_value(expected_reward=bandit_feedback_test['expected_reward'], action_dist=ipw_action_dist)
gt_q_learner = dataset.calc_ground_truth_policy_value(expected_reward=bandit_feedback_test['expected_reward'], action_dist=q_action_dist)
gt_nn_learner = dataset.calc_ground_truth_policy_value(expected_reward=bandit_feedback_test['expected_reward'], action_dist=nn_action_dist)
gt_random_policy = dataset.calc_ground_truth_policy_value(expected_reward=bandit_feedback_test['expected_reward'], action_dist=random_action_dist)
gt_uniform_sample_weight_learner = dataset.calc_ground_truth_policy_value(expected_reward=bandit_feedback_test['expected_reward'], action_dist=uniform_sample_weight_action_dist)
return (gt_ipw_learner, gt_q_learner, gt_nn_learner, gt_random_policy, gt_uniform_sample_weight_learner)
n_runs = 10
processed = Parallel(n_jobs=(- 1), verbose=0)([delayed(process)(i) for i in np.arange(n_runs)])
list_gt_ipw = list()
list_gt_q = list()
list_gt_nn = list()
list_gt_random = list()
list_gt_unif_ipw = list()
for (i, gt_policy_values) in enumerate(processed):
(gt_ipw, gt_q, gt_nn, gt_random, gt_unif_ipw) = gt_policy_values
list_gt_ipw.append(gt_ipw)
list_gt_q.append(gt_q)
list_gt_nn.append(gt_nn)
list_gt_random.append(gt_random)
list_gt_unif_ipw.append(gt_unif_ipw)
print(f'Performance of Random is {np.mean(list_gt_random)}')
print(f'Performance of IPWLearner with Uniform Weight is {np.mean(list_gt_unif_ipw)}')
print(f'Performance of IPWLearner is {np.mean(list_gt_ipw)}')
assert (np.mean(list_gt_ipw) > np.mean(list_gt_random))
assert (np.mean(list_gt_ipw) > np.mean(list_gt_unif_ipw))
print(f'Performance of QLearner is {np.mean(list_gt_q)}')
assert (np.mean(list_gt_q) > np.mean(list_gt_random))
assert (np.mean(list_gt_q) > np.mean(list_gt_unif_ipw))
print(f'Performance of NNPolicyLearner is {np.mean(list_gt_nn)}')
assert (np.mean(list_gt_nn) > np.mean(list_gt_random))
assert (np.mean(list_gt_nn) > np.mean(list_gt_unif_ipw)) |
def patch_nonscriptable_classes():
from detectron2.modeling.backbone import ResNet, FPN
def prepare_resnet(self):
ret = deepcopy(self)
ret.stages = nn.ModuleList(ret.stages)
for k in self.stage_names:
delattr(ret, k)
return ret
ResNet.__prepare_scriptable__ = prepare_resnet
def prepare_fpn(self):
ret = deepcopy(self)
ret.lateral_convs = nn.ModuleList(ret.lateral_convs)
ret.output_convs = nn.ModuleList(ret.output_convs)
for (name, _) in self.named_children():
if name.startswith('fpn_'):
delattr(ret, name)
return ret
FPN.__prepare_scriptable__ = prepare_fpn
from detectron2.modeling.roi_heads import StandardROIHeads
if hasattr(StandardROIHeads, '__annotations__'):
StandardROIHeads.__annotations__ = deepcopy(StandardROIHeads.__annotations__)
StandardROIHeads.__annotations__['mask_on'] = torch.jit.Final[bool]
StandardROIHeads.__annotations__['keypoint_on'] = torch.jit.Final[bool] |
class ParallelScheduler(RunScheduler):
def __init__(self, executor, seq_scheduler_class, ui, print_execution_plan):
RunScheduler.__init__(self, executor, ui, print_execution_plan)
self._seq_scheduler_class = seq_scheduler_class
self._lock = RLock()
self._num_worker_threads = self._number_of_threads()
self._remaining_work = None
self._worker_threads = None
def _number_of_threads(self):
non_interference_factor = float(2.5)
return int(floor((cpu_count() / non_interference_factor)))
def _split_runs(runs):
seq_runs = []
par_runs = []
for run in runs:
if run.execute_exclusively:
seq_runs.append(run)
else:
par_runs.append(run)
return (seq_runs, par_runs)
def _process_sequential_runs(self, runs):
(seq_runs, par_runs) = self._split_runs(runs)
scheduler = self._seq_scheduler_class(self._executor, self.ui, self._print_execution_plan)
scheduler._process_remaining_runs(seq_runs)
return par_runs
def _process_remaining_runs(self, runs):
self._remaining_work = self._process_sequential_runs(runs)
self._worker_threads = [BenchmarkThread(self, i) for i in range(self._num_worker_threads)]
for thread in self._worker_threads:
thread.start()
exceptions = []
for thread in self._worker_threads:
thread.join()
if (thread.exception is not None):
exceptions.append(thread.exception)
if exceptions:
if (len(exceptions) == 1):
raise exceptions[0]
raise BenchmarkThreadExceptions(exceptions)
def _determine_num_work_items_to_take(self):
k = len(self._remaining_work)
per_thread = int(floor((float(k) / float(self._num_worker_threads))))
per_thread = max(1, per_thread)
return per_thread
def get_local_scheduler(self):
return self._seq_scheduler_class(self._executor, self.ui, self._print_execution_plan)
def acquire_work(self):
with self._lock:
if (not self._remaining_work):
return None
num = self._determine_num_work_items_to_take()
assert (num <= len(self._remaining_work))
work = []
for _ in range(num):
work.append(self._remaining_work.pop())
return work |
def train_epoch(model, tokenizer, optimizer, scheduler, train_dataloader, tr_loss, logging_loss, global_step, steps_trained_in_current_epoch, tb_writer, best_dev_perp, args):
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from to use fp16 training.')
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0]))
cur_dev_perp = None
for (step, batch) in enumerate(epoch_iterator):
if (steps_trained_in_current_epoch > 0):
steps_trained_in_current_epoch -= 1
continue
(inputs, labels) = (batch, batch)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
model.train()
outputs = model(inputs, labels=labels)
loss = outputs[0]
if (args.n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)):
if ((args.local_rank == (- 1)) and args.evaluate_during_training):
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
cur_dev_perp = results['perplexity'].item()
print('Result-ckpt{}:\n'.format(global_step))
print('Perplexity: {}'.format(cur_dev_perp))
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', ((tr_loss - logging_loss) / args.logging_steps), global_step)
logging_loss = tr_loss
if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
if args.evaluate_during_training:
save_checkpoint(model, optimizer, scheduler, tokenizer, args, global_step)
if (cur_dev_perp < best_dev_perp):
output_dir = os.path.join(args.output_dir, 'best_dev_checkpoint')
os.makedirs(output_dir, exist_ok=True)
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
print('Updated BEST model\nOLD perplexity: {}\nNEW perplexity: {}'.format(best_dev_perp, cur_dev_perp))
best_dev_perp = cur_dev_perp
print('')
print('')
if ((args.max_steps > 0) and (global_step > args.max_steps)):
epoch_iterator.close()
break
return (model, optimizer, scheduler, global_step, tr_loss, logging_loss, best_dev_perp) |
def eval_group(pred, label):
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
pred_total = len(pred_cols)
label_total = len(label_cols)
cnt = 0
pred_cols = [(pred.split('.')[1] if ('.' in pred) else pred) for pred in pred_cols]
label_cols = [(label.split('.')[1] if ('.' in label) else label) for label in label_cols]
for col in pred_cols:
if (col in label_cols):
cnt += 1
label_cols.remove(col)
return (label_total, pred_total, cnt) |
def unfold_segments(tensor, tgt_dur, sample_rate=16000):
seg_len = int((tgt_dur * sample_rate))
src_len = len(tensor)
hop_len = (seg_len // 4)
tgt_len = (seg_len if (src_len <= seg_len) else (((src_len // hop_len) + 1) * hop_len))
pad_len = (tgt_len - src_len)
front_pad_len = random.randint(0, pad_len)
tail_pad_len = (pad_len - front_pad_len)
padded_tensor = torch.cat([torch.zeros(front_pad_len), tensor, torch.zeros(tail_pad_len)])
segments = padded_tensor.unfold(0, seg_len, hop_len).unbind(0)
return segments |
class PascalVOCDataset(torch.utils.data.Dataset):
CLASSES = ('__background__ ', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
def __init__(self, data_dir, split, use_difficult=False, transforms=None):
self.root = data_dir
self.image_set = split
self.keep_difficult = use_difficult
self.transforms = transforms
self._annopath = os.path.join(self.root, 'Annotations', '%s.xml')
self._imgpath = os.path.join(self.root, 'JPEGImages', '%s.jpg')
self._imgsetpath = os.path.join(self.root, 'ImageSets', 'Main', '%s.txt')
with open((self._imgsetpath % self.image_set)) as f:
self.ids = f.readlines()
self.ids = [x.strip('\n') for x in self.ids]
self.id_to_img_map = {k: v for (k, v) in enumerate(self.ids)}
cls = PascalVOCDataset.CLASSES
self.class_to_ind = dict(zip(cls, range(len(cls))))
def __getitem__(self, index):
img_id = self.ids[index]
img = Image.open((self._imgpath % img_id)).convert('RGB')
target = self.get_groundtruth(index)
target = target.clip_to_image(remove_empty=True)
if (self.transforms is not None):
(img, target) = self.transforms(img, target)
return (img, target, index)
def __len__(self):
return len(self.ids)
def get_groundtruth(self, index):
img_id = self.ids[index]
anno = ET.parse((self._annopath % img_id)).getroot()
anno = self._preprocess_annotation(anno)
(height, width) = anno['im_info']
target = BoxList(anno['boxes'], (width, height), mode='xyxy')
target.add_field('labels', anno['labels'])
target.add_field('difficult', anno['difficult'])
return target
def _preprocess_annotation(self, target):
boxes = []
gt_classes = []
difficult_boxes = []
TO_REMOVE = 1
for obj in target.iter('object'):
difficult = (int(obj.find('difficult').text) == 1)
if ((not self.keep_difficult) and difficult):
continue
name = obj.find('name').text.lower().strip()
bb = obj.find('bndbox')
box = [bb.find('xmin').text, bb.find('ymin').text, bb.find('xmax').text, bb.find('ymax').text]
bndbox = tuple(map((lambda x: (x - TO_REMOVE)), list(map(int, box))))
boxes.append(bndbox)
gt_classes.append(self.class_to_ind[name])
difficult_boxes.append(difficult)
size = target.find('size')
im_info = tuple(map(int, (size.find('height').text, size.find('width').text)))
res = {'boxes': torch.tensor(boxes, dtype=torch.float32), 'labels': torch.tensor(gt_classes), 'difficult': torch.tensor(difficult_boxes), 'im_info': im_info}
return res
def get_img_info(self, index):
img_id = self.ids[index]
anno = ET.parse((self._annopath % img_id)).getroot()
size = anno.find('size')
im_info = tuple(map(int, (size.find('height').text, size.find('width').text)))
return {'height': im_info[0], 'width': im_info[1]}
def map_class_id_to_class_name(self, class_id):
return PascalVOCDataset.CLASSES[class_id] |
def check_task(task: str) -> Tuple[(Dict, Any)]:
if (task in TASK_ALIASES):
task = TASK_ALIASES[task]
if (task in SUPPORTED_TASKS):
targeted_task = SUPPORTED_TASKS[task]
return (targeted_task, None)
if task.startswith('translation'):
tokens = task.split('_')
if ((len(tokens) == 4) and (tokens[0] == 'translation') and (tokens[2] == 'to')):
targeted_task = SUPPORTED_TASKS['translation']
return (targeted_task, (tokens[1], tokens[3]))
raise KeyError(f"Invalid translation task {task}, use 'translation_XX_to_YY' format")
raise KeyError(f"Unknown task {task}, available tasks are {(get_supported_tasks() + ['translation_XX_to_YY'])}") |
def test_MIMO_pipeline():
from speechbrain.utils.data_pipeline import DataPipeline, takes, provides
('text', 'other-text')
('reversed', 'concat')
def text_pipeline(text, other):
return (text[::(- 1)], (text + other))
('reversed', 'concat')
('reversed_twice', 'double_concat')
def second_pipeline(rev, concat):
(yield rev[::(- 1)])
(yield (concat + concat))
('hello-world')
def provider():
(yield 'hello-world')
('hello-world', 'reversed_twice')
('message')
def messenger(hello, name):
return f'{hello}, {name}'
pipeline = DataPipeline(['text', 'other-text'], dynamic_items=[second_pipeline, text_pipeline], output_keys=['text', 'reversed', 'reversed_twice'])
result = pipeline({'text': 'abc', 'other-text': 'def'})
assert (result['reversed'] == 'cba')
assert (result['reversed_twice'] == 'abc')
result = pipeline.compute_specific(['concat'], {'text': 'abc', 'other-text': 'def'})
assert (result['concat'] == 'abcdef')
result = pipeline.compute_specific(['double_concat'], {'text': 'abc', 'other-text': 'def'})
assert (result['double_concat'] == 'abcdefabcdef')
assert ('concat' not in result)
pipeline.add_dynamic_item(messenger)
with pytest.raises(RuntimeError):
pipeline.compute_specific(['message'], {'text': 'abc', 'other-text': 'def'})
pipeline.add_dynamic_item(provider)
result = pipeline.compute_specific(['message'], {'text': 'abc', 'other-text': 'def'})
assert (result['message'] == 'hello-world, abc') |
def get_layer_id_for_vit(name, num_layers):
if (name in ['cls_token', 'pos_embed']):
return 0
elif name.startswith('patch_embed'):
return 0
elif name.startswith('blocks'):
return (int(name.split('.')[1]) + 1)
else:
return num_layers |
class CorrelationFunction(Function):
def forward(ctx, input1, input2, pad_size=3, kernel_size=3, max_displacement=20, stride1=1, stride2=2, corr_multiply=1):
ctx.save_for_backward(input1, input2)
ctx.pad_size = pad_size
ctx.kernel_size = kernel_size
ctx.max_displacement = max_displacement
ctx.stride1 = stride1
ctx.stride2 = stride2
ctx.corr_multiply = corr_multiply
with torch.cuda.device_of(input1):
rbot1 = input1.new()
rbot2 = input2.new()
output = input1.new()
correlation_cuda.forward(input1, input2, rbot1, rbot2, output, ctx.pad_size, ctx.kernel_size, ctx.max_displacement, ctx.stride1, ctx.stride2, ctx.corr_multiply)
return output
def backward(ctx, grad_output):
(input1, input2) = ctx.saved_tensors
with torch.cuda.device_of(input1):
rbot1 = input1.new()
rbot2 = input2.new()
grad_input1 = input1.new()
grad_input2 = input2.new()
correlation_cuda.backward(input1, input2, rbot1, rbot2, grad_output, grad_input1, grad_input2, ctx.pad_size, ctx.kernel_size, ctx.max_displacement, ctx.stride1, ctx.stride2, ctx.corr_multiply)
return (grad_input1, grad_input2, None, None, None, None, None, None) |
def parse_file(task_name, log_dir, foldername):
try:
lines = result_parser_utils.read_rank0_lines(log_dir, foldername)
return ((float(lines[5].split()[2]) / 1000) / 1000)
except Exception:
return None |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out |
def strip_over_cont(text):
sents = []
skip = False
for line in text.split('\n'):
if (line.strip() == '(Over)'):
skip = True
elif (line.strip() == '(Cont)'):
skip = False
continue
if (not skip):
sents.append(line)
text = '\n'.join(sents)
return text |
def k2s_matrix(kmatrix):
dimensions = __array_dimensions__(kmatrix).python()
kmatrix_list = __make_array_to_lists__(kmatrix).python()
return matrix(dimensions[0], dimensions[1], kmatrix_list) |
class CppInclude(object):
def __init__(self, member=None, std=None, prefix=None):
assert (member or std)
self.member = member
self.std = std
self.prefix = prefix
def relative_path(self):
if self.std:
return self.std
return '{}.hpp'.format(os.path.join(self.member.type_ref.package_name, self.member.type_ref.name))
def absolute_path(self):
if (not self.prefix):
return self.relative_path
return os.path.join(self.prefix, self.relative_path)
def directive(self):
name = self.absolute_path
if (self.std is not None):
name = '<{}>'.format(name)
else:
name = '"{}"'.format(name)
return '#include {}\n'.format(name)
def package(self):
return self.member.type_ref.package_name
def __hash__(self):
return hash(self.directive)
def __eq__(self, other):
return (hash(self) == hash(other))
def __repr__(self):
return self.directive |
def _spanning_type(type1, type2):
if (type1.is_numeric and type2.is_numeric):
return widest_numeric_type(type1, type2)
elif (type1.is_builtin_type and (type1.name == 'float') and type2.is_numeric):
return widest_numeric_type(c_double_type, type2)
elif (type2.is_builtin_type and (type2.name == 'float') and type1.is_numeric):
return widest_numeric_type(type1, c_double_type)
elif (type1.is_extension_type and type2.is_extension_type):
return widest_extension_type(type1, type2)
elif (type1.is_pyobject or type2.is_pyobject):
return py_object_type
elif type1.assignable_from(type2):
if (type1.is_extension_type and type1.typeobj_is_imported()):
return py_object_type
return type1
elif type2.assignable_from(type1):
if (type2.is_extension_type and type2.typeobj_is_imported()):
return py_object_type
return type2
elif (type1.is_ptr and type2.is_ptr):
if (type1.base_type.is_cpp_class and type2.base_type.is_cpp_class):
common_base = widest_cpp_type(type1.base_type, type2.base_type)
if common_base:
return CPtrType(common_base)
return c_void_ptr_type
else:
return None |
def create_banner(app):
return html.Div(id='banner', className='banner', children=[html.Img(src=app.get_asset_url('logo_small.png')), html.Plaintext(' Powered by Salesforce AI Research')]) |
class JsonProgressBar(BaseProgressBar):
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.i = None
self.size = None
def __iter__(self):
self.size = len(self.iterable)
for (i, obj) in enumerate(self.iterable, start=self.offset):
self.i = i
(yield obj)
def log(self, stats, tag=None, step=None):
step = (step or self.i or 0)
if ((step > 0) and (self.log_interval is not None) and ((step % self.log_interval) == 0)):
update = (((self.epoch - 1) + ((self.i + 1) / float(self.size))) if (self.epoch is not None) else None)
stats = self._format_stats(stats, epoch=self.epoch, update=update)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def print(self, stats, tag=None, step=None):
self.stats = stats
if (tag is not None):
self.stats = OrderedDict([(((tag + '_') + k), v) for (k, v) in self.stats.items()])
stats = self._format_stats(self.stats, epoch=self.epoch)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def _format_stats(self, stats, epoch=None, update=None):
postfix = OrderedDict()
if (epoch is not None):
postfix['epoch'] = epoch
if (update is not None):
postfix['update'] = round(update, 3)
for key in stats.keys():
postfix[key] = format_stat(stats[key])
return postfix |
def _cycliclrloader(obj, path, end_of_epoch, device=None):
del end_of_epoch
state_dict = torch.load(path, map_location=device)
if (state_dict.get('_scale_fn_ref') == WEAKREF_MARKER):
if (not isinstance(obj._scale_fn_ref, weakref.WeakMethod)):
MSG = 'Loading CyclicLR scheduler and the _scale_ref_fn did not exist in instance.'
MSG += ' You did not construct it with the same parameters it was created!'
MSG += ' Looks like you changed the scale function!'
MSG += ' If this was not intentional, the scheduler might not work correctly.'
warnings.warn(MSG)
try:
obj.load_state_dict(torch.load(path, map_location=device), strict=True)
except TypeError:
obj.load_state_dict(torch.load(path, map_location=device)) |
class DoubleConv(nn.Module):
def __init__(self, in_ch, out_ch):
super(DoubleConv, self).__init__()
self.in_ch = in_ch
self.out_ch = out_ch
self.conv = nn.Sequential(nn.Conv2d(in_ch, out_ch, 3, padding=1), nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True), nn.Conv2d(out_ch, out_ch, 3, padding=1), nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True))
def forward(self, x):
x = self.conv(x)
return x |
def copy_geometric_data(graph):
(node_attr, edge_index, edge_attr, global_attr) = decompose_graph(graph)
ret = Data(x=node_attr, edge_index=edge_index, edge_attr=edge_attr)
ret.global_attr = global_attr
return ret |
class ChromosomeOutputVariableFactory(Generic[T], metaclass=ABCMeta):
def __init__(self, variable: RuntimeVariable) -> None:
self._variable = variable
def get_data(self, individual: chrom.Chromosome) -> T:
def get_variable(self, individual: chrom.Chromosome) -> sb.OutputVariable[T]:
return sb.OutputVariable(name=self._variable.name, value=self.get_data(individual)) |
class MobileViTForImageClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def load_combined_train_data_woc(output_path: str):
return (torch.cat((load_data_tensors_TW(join(output_path, 'vectors', 'train', 'identifiers_param_train_datapoints_x.npy')), load_data_tensors_TW(join(output_path, 'vectors', 'train', 'identifiers_ret_train_datapoints_x.npy')), load_data_tensors_TW(join(output_path, 'vectors', 'train', 'identifiers_var_train_datapoints_x.npy')))), torch.cat((load_data_tensors_TW(join(output_path, 'vectors', 'train', 'params_train_aval_types_dp.npy')), load_data_tensors_TW(join(output_path, 'vectors', 'train', 'ret_train_aval_types_dp.npy')), load_data_tensors_TW(join(output_path, 'vectors', 'train', 'var_train_aval_types_dp.npy'))))) |
def init():
a = []
for i in np.linspace(0, 1, n, False):
for j in np.linspace(0, 1, n, False):
a.append([i, j])
return np.array(a).astype(np.float32) |
def kl_check(loader, model, device):
(total, num_samples) = (0, 0)
criterion = nn.KLDivLoss(reduction='sum')
sm = nn.Softmax(dim=1)
model.eval()
with torch.no_grad():
for (images, labels, confs) in loader:
(images, labels, confs) = (images.to(device), labels.to(device), confs.to(device))
outputs = model(images)
total += criterion(torch.log(sm(outputs)), confs)
num_samples += len(labels)
return (total / num_samples) |
def __getattr__(name):
return _sub_module_deprecation(sub_package='optimize', module='zeros', private_modules=['_zeros_py'], all=__all__, attribute=name) |
class CocoDistEvalmAPHook(DistEvalHook):
def evaluate(self, runner, results):
tmp_file = osp.join(runner.work_dir, 'temp_0')
result_files = results2json(self.dataset, results, tmp_file)
res_types = (['bbox', 'segm'] if runner.model.module.with_mask else ['bbox'])
cocoGt = self.dataset.coco
imgIds = cocoGt.getImgIds()
for res_type in res_types:
try:
cocoDt = cocoGt.loadRes(result_files[res_type])
except IndexError:
print('No prediction found.')
break
iou_type = res_type
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
metrics = ['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l']
for i in range(len(metrics)):
key = '{}_{}'.format(res_type, metrics[i])
val = float('{:.3f}'.format(cocoEval.stats[i]))
runner.log_buffer.output[key] = val
runner.log_buffer.output['{}_mAP_copypaste'.format(res_type)] = '{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} {ap[4]:.3f} {ap[5]:.3f}'.format(ap=cocoEval.stats[:6])
runner.log_buffer.ready = True
for res_type in res_types:
os.remove(result_files[res_type]) |
def describe_token_expr(expr):
if (':' in expr):
(type, value) = expr.split(':', 1)
if (type == TOKEN_NAME):
return value
else:
type = expr
return _describe_token_type(type) |
def get_grad_norm_(parameters, norm_type: float=2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if (p.grad is not None)]
norm_type = float(norm_type)
if (len(parameters) == 0):
return torch.tensor(0.0)
device = parameters[0].grad.device
if (norm_type == inf):
total_norm = max((p.grad.detach().abs().max().to(device) for p in parameters))
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm |
def get_reversed_add_exprs(expr: Expression, simplifier: LeanExprSimplifier) -> List[Tuple[(str, str)]]:
if (isinstance(expr, ExprNeg) or isinstance(expr, ExprParentheses)):
return get_reversed_add_exprs(expr.val, simplifier)
if isinstance(expr, ExprCast):
return get_reversed_add_exprs(expr.expr, simplifier)
if isinstance(expr, ExprPow):
return (get_reversed_add_exprs(expr.a, simplifier) + get_reversed_add_exprs(expr.b, simplifier))
if isinstance(expr, ExprFuncCall):
rev: List[Tuple[(str, str)]] = []
for arg in expr.rvalue.arguments.args:
rev += get_reversed_add_exprs(arg.expr, simplifier)
return rev
if isinstance(expr, ExprDeref):
return get_reversed_add_exprs(expr.addr, simplifier)
if isinstance(expr, ExprOperator):
rev = []
if (expr.op == '+'):
a_expr = simplifier.visit(expr.a)
b_expr = simplifier.visit(expr.b)
if (isinstance(a_expr, ExprConst) and (not isinstance(b_expr, ExprConst))):
rev.append((to_lean_description(expr.a), to_lean_description(expr.b)))
rev += (get_reversed_add_exprs(expr.a, simplifier) + get_reversed_add_exprs(expr.b, simplifier))
return rev
return [] |
def sparse_categorical_crossentropy(y_true, y_pred):
return K.sparse_categorical_crossentropy(y_true, y_pred) |
def create_nmslib_index_instance(params: NmslibHnswParam):
index = nmslib.init(method=params.method, space=params.space, data_type=nmslib.DataType.SPARSE_VECTOR)
return index |
class LinearAttention(nn.Module):
def __init__(self, in_dim=300, mem_dim=300):
super().__init__()
self.linear = nn.Linear(in_dim, mem_dim)
self.fc = nn.Linear((mem_dim * 2), 1)
self.leakyrelu = nn.LeakyReLU(0.01)
def forward(self, feature, aspect_v, dmask):
Q = self.linear(aspect_v)
Q = Q.unsqueeze(1)
Q = Q.expand_as(feature)
Q = self.linear(Q)
feature = self.linear(feature)
att_feature = torch.cat([feature, Q], dim=2)
att_weight = self.fc(att_feature)
dmask = dmask.unsqueeze(2)
att_weight = mask_logits(att_weight, dmask)
attention = F.softmax(att_weight, dim=1)
out = torch.bmm(feature.transpose(1, 2), attention)
out = out.squeeze(2)
return out |
_tf
class TFXLNetModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = ((TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple) if is_tf_available() else ())
test_pruning = False
class TFXLNetModelTester(object):
def __init__(self, parent, batch_size=13, seq_length=7, mem_len=10, clamp_len=(- 1), reuse_len=15, is_training=True, use_labels=True, vocab_size=99, cutoffs=[10, 50, 80], hidden_size=32, num_attention_heads=4, d_inner=128, num_hidden_layers=5, type_sequence_label_size=2, untie_r=True, bi_data=False, same_length=False, initializer_range=0.05, seed=1, type_vocab_size=2):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.mem_len = mem_len
self.clamp_len = clamp_len
self.reuse_len = reuse_len
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.cutoffs = cutoffs
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.d_inner = d_inner
self.num_hidden_layers = num_hidden_layers
self.bi_data = bi_data
self.untie_r = untie_r
self.same_length = same_length
self.initializer_range = initializer_range
self.seed = seed
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
def prepare_config_and_inputs(self):
input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
segment_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
input_mask = ids_tensor([self.batch_size, self.seq_length], 2, dtype=tf.float32)
input_ids_q = ids_tensor([self.batch_size, (self.seq_length + 1)], self.vocab_size)
perm_mask = tf.zeros((self.batch_size, (self.seq_length + 1), self.seq_length), dtype=tf.float32)
perm_mask_last = tf.ones((self.batch_size, (self.seq_length + 1), 1), dtype=tf.float32)
perm_mask = tf.concat([perm_mask, perm_mask_last], axis=(- 1))
target_mapping = tf.zeros((self.batch_size, 1, self.seq_length), dtype=tf.float32)
target_mapping_last = tf.ones((self.batch_size, 1, 1), dtype=tf.float32)
target_mapping = tf.concat([target_mapping, target_mapping_last], axis=(- 1))
sequence_labels = None
lm_labels = None
is_impossible_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
is_impossible_labels = ids_tensor([self.batch_size], 2, dtype=tf.float32)
config = XLNetConfig(vocab_size=self.vocab_size, d_model=self.hidden_size, n_head=self.num_attention_heads, d_inner=self.d_inner, n_layer=self.num_hidden_layers, untie_r=self.untie_r, mem_len=self.mem_len, clamp_len=self.clamp_len, same_length=self.same_length, reuse_len=self.reuse_len, bi_data=self.bi_data, initializer_range=self.initializer_range, num_labels=self.type_sequence_label_size)
return (config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels)
def set_seed(self):
random.seed(self.seed)
tf.random.set_seed(self.seed)
def create_and_check_xlnet_base_model(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels):
model = TFXLNetModel(config)
inputs = {'input_ids': input_ids_1, 'input_mask': input_mask, 'token_type_ids': segment_ids}
(_, _) = model(inputs)
inputs = [input_ids_1, input_mask]
(outputs, mems_1) = model(inputs)
result = {'mems_1': [mem.numpy() for mem in mems_1], 'outputs': outputs.numpy()}
config.mem_len = 0
model = TFXLNetModel(config)
no_mems_outputs = model(inputs)
self.parent.assertEqual(len(no_mems_outputs), 1)
self.parent.assertListEqual(list(result['outputs'].shape), [self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertListEqual(list((list(mem.shape) for mem in result['mems_1'])), ([[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers))
def create_and_check_xlnet_lm_head(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels):
model = TFXLNetLMHeadModel(config)
inputs_1 = {'input_ids': input_ids_1, 'token_type_ids': segment_ids}
(all_logits_1, mems_1) = model(inputs_1)
inputs_2 = {'input_ids': input_ids_2, 'mems': mems_1, 'token_type_ids': segment_ids}
(all_logits_2, mems_2) = model(inputs_2)
inputs_3 = {'input_ids': input_ids_q, 'perm_mask': perm_mask, 'target_mapping': target_mapping}
(logits, _) = model(inputs_3)
result = {'mems_1': [mem.numpy() for mem in mems_1], 'all_logits_1': all_logits_1.numpy(), 'mems_2': [mem.numpy() for mem in mems_2], 'all_logits_2': all_logits_2.numpy()}
self.parent.assertListEqual(list(result['all_logits_1'].shape), [self.batch_size, self.seq_length, self.vocab_size])
self.parent.assertListEqual(list((list(mem.shape) for mem in result['mems_1'])), ([[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers))
self.parent.assertListEqual(list(result['all_logits_2'].shape), [self.batch_size, self.seq_length, self.vocab_size])
self.parent.assertListEqual(list((list(mem.shape) for mem in result['mems_2'])), ([[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers))
def create_and_check_xlnet_qa(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels):
model = TFXLNetForQuestionAnsweringSimple(config)
inputs = {'input_ids': input_ids_1, 'attention_mask': input_mask, 'token_type_ids': segment_ids}
(start_logits, end_logits, mems) = model(inputs)
result = {'start_logits': start_logits.numpy(), 'end_logits': end_logits.numpy(), 'mems': [m.numpy() for m in mems]}
self.parent.assertListEqual(list(result['start_logits'].shape), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result['end_logits'].shape), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list((list(mem.shape) for mem in result['mems'])), ([[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers))
def create_and_check_xlnet_sequence_classif(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels):
model = TFXLNetForSequenceClassification(config)
(logits, mems_1) = model(input_ids_1)
result = {'mems_1': [mem.numpy() for mem in mems_1], 'logits': logits.numpy()}
self.parent.assertListEqual(list(result['logits'].shape), [self.batch_size, self.type_sequence_label_size])
self.parent.assertListEqual(list((list(mem.shape) for mem in result['mems_1'])), ([[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers))
def create_and_check_xlnet_for_token_classification(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels):
config.num_labels = input_ids_1.shape[1]
model = TFXLNetForTokenClassification(config)
inputs = {'input_ids': input_ids_1, 'attention_mask': input_mask}
(logits, mems_1) = model(inputs)
result = {'mems_1': [mem.numpy() for mem in mems_1], 'logits': logits.numpy()}
self.parent.assertListEqual(list(result['logits'].shape), [self.batch_size, self.seq_length, config.num_labels])
self.parent.assertListEqual(list((list(mem.shape) for mem in result['mems_1'])), ([[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels) = config_and_inputs
inputs_dict = {'input_ids': input_ids_1}
return (config, inputs_dict)
def setUp(self):
self.model_tester = TFXLNetModelTest.TFXLNetModelTester(self)
self.config_tester = ConfigTester(self, config_class=XLNetConfig, d_inner=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_xlnet_base_model(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_base_model(*config_and_inputs)
def test_xlnet_lm_head(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_lm_head(*config_and_inputs)
def test_xlnet_sequence_classif(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_sequence_classif(*config_and_inputs)
def test_xlnet_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_for_token_classification(*config_and_inputs)
def test_xlnet_qa(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_qa(*config_and_inputs)
def test_model_from_pretrained(self):
for model_name in list(TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
model = TFXLNetModel.from_pretrained(model_name, cache_dir=CACHE_DIR)
self.assertIsNotNone(model) |
def CLRNet50(input_shape, classes):
return CLRNet(input_shape, classes, bottleneck, repetitions=[3, 4, 6, 3]) |
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
valid_mask = ((labels >= 0) & (labels != ignore_index))
inds = torch.nonzero((valid_mask & (labels < label_channels)), as_tuple=False)
if (inds.numel() > 0):
bin_labels[(inds, labels[inds])] = 1
valid_mask = valid_mask.view((- 1), 1).expand(labels.size(0), label_channels).float()
if (label_weights is None):
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.view((- 1), 1).repeat(1, label_channels)
bin_label_weights *= valid_mask
return (bin_labels, bin_label_weights) |
def validate_address_sdtypes(column_metadata, column_names):
valid_sdtypes = ('country_code', 'administrative_unit', 'city', 'postcode', 'street_address', 'secondary_address', 'state', 'state_abbr')
bad_columns = []
for column_name in column_names:
if (column_name not in column_metadata):
continue
if (column_metadata[column_name].get('sdtype') not in valid_sdtypes):
bad_columns.append(column_name)
if bad_columns:
raise InvalidMetadataError(f"Columns {bad_columns} have unsupported sdtypes for column relationship type 'address'.") |
def set_template(template, cfg):
if ('srwarp-all' in template):
cfg.model = 'srwarp.baseline'
cfg.residual = True
cfg.kernel_size_up = 3
cfg.kernel_net = True
cfg.kernel_net_multi = True
cfg.kernel_depthwise = True
if ('down' in template):
cfg.scale = 2
cfg.patch = 64
cfg.dtrain = ['downsampler.unpaired']
cfg.augmentation = ''
cfg.unpaired_hr = '../dataset/DIV2K/patch_gradient/crop_filtered_odd'
if ('video' in template):
cfg.unpaired_lr = '../experiment/jpeg/q20_test/patch_gradient/crop_seoul_moon_filtered'
else:
cfg.unpaired_lr = '../dataset/DIV2K/patch_gradient_gaussian/crop_lr_{}_filtered_even'.format(cfg.degradation_test)
cfg.kernel_gt = '../dataset/DIV2K/kernel_{}.mat'.format(cfg.degradation_test)
cfg.n_feats = 48
cfg.depth = 4
cfg.batch_size = 16
if ('self' in template):
cfg.loss = 'loss/ds_self.txt'
cfg.trainer = 'downsampler.self'
cfg.save = 'kernels_self'
cfg.epochs = 20
cfg.milestones = [5, 10, 15]
else:
cfg.loss = 'loss/ds_iterative.txt'
cfg.trainer = 'downsampler.iterative'
cfg.save = 'kernels_16'
cfg.epochs = 30
cfg.milestones = [10, 15, 20]
if ('jaeha' in template):
cfg.model = 'jaeha.generator'
cfg.dis = 'jaeha.discriminator'
cfg.loss = 'loss/ds_jaeha.txt'
cfg.trainer = 'jaeha.unpaired'
cfg.lr = 5e-05
cfg.save = 'kernels_jaeha'
cfg.epochs = 80
cfg.milestones = []
else:
cfg.model = 'downsampler.dnew'
cfg.dis = 'downsampler.discriminator_kgan'
cfg.depth_sub = 7
cfg.width_sub = 64
cfg.adjust_weight = 0.01
cfg.reset = True
"\n if 'ft' in template:\n if 'face' in template:\n cfg.scale = 8\n cfg.dtrain = ['sr.celeba_mask']\n cfg.dtest = ['sr.celeba_mask']\n cfg.n_classes = 10\n else:\n cfg.scale = 4\n cfg.dtrain = ['sr.mixed']\n cfg.dtest = ['sr.mixed']\n cfg.n_classes = 8\n\n if 'rrdb' in template:\n cfg.model = 'sr.rrdb'\n\n if not cfg.resume:\n if 'face' in template:\n cfg.resume = 'dl-edsr-baseline-face-x8'\n else:\n if 'rrdb' in template:\n cfg.resume = 'dl-rrdb-x4'\n else:\n cfg.resume = 'dl-edsr-baseline-x4'\n\n if 'mixed' in template:\n cfg.use_div2k = True\n cfg.use_ost = True\n cfg.use_flickr = False\n if 'div2k' in template:\n cfg.use_div2k = True\n if 'ost' in template:\n cfg.use_ost = True\n if 'df2k' in template:\n cfg.use_div2k = True\n cfg.use_flickr = True\n if 'all' in template:\n cfg.use_div2k = True\n cfg.use_flickr = True\n cfg.use_ost = True\n\n cfg.lr = 1e-4\n cfg.gan_k = 0\n if cfg.use_patch and cfg.use_div2k:\n if cfg.use_flickr:\n cfg.epochs = 28\n cfg.milestones = [4, 7, 14, 21]\n else:\n if cfg.use_ost:\n cfg.epochs = 95\n cfg.milestones = [12, 24, 48, 72]\n else:\n cfg.epochs = 112\n cfg.milestones = [14, 28, 56, 84]\n else:\n cfg.epochs = 200\n cfg.milestones = [25, 50, 100, 150]\n if 'face' in template:\n cfg.epochs //= 2\n cfg.milestones = [d // 2 for d in cfg.milestones]\n\n if 'more' in template:\n cfg.epochs = int(1.5 * cfg.epochs)\n cfg.milestones = [int(1.5 * d) for d in cfg.milestones]\n\n if 'madv' in template:\n cfg.loss = 'loss/sr_mask.txt'\n cfg.trainer = 'sr.mask'\n if 'old' in template:\n cfg.dis = 'mask.discriminator_old'\n elif 'early' in template:\n cfg.dis = 'mask.discriminator'\n # Mask is applied at the end of classification layer,\n # so the scale doesn't change. Use early_stop to modify the model\n cfg.dis_early_fork = 1\n cfg.mask_scale = 16\n # Override\n if 'early1' in template:\n cfg.dis_early_fork = 1\n cfg.mask_scale = 16\n elif 'early2' in template:\n cfg.dis_early_fork = 2\n cfg.mask_scale = 16\n elif 'seg' in template:\n cfg.dis = 'mask.discriminator_seg'\n cfg.dis_seg_model = 'segmentation/model.pt'\n cfg.dis_seg_n_feat = 32\n cfg.mask_scale = 16\n # Override\n if 'segd' in template:\n cfg.dis = 'mask.discriminator_segdeep'\n # TODO type other segmentation network arguments here\n else:\n # Default\n cfg.dis = 'mask.discriminator'\n else:\n cfg.no_mask = True\n cfg.loss = 'loss/sr_adversarial.txt'\n cfg.dis = 'srgan.discriminator'\n cfg.dpatch = 0\n " |
class TestThread(object):
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty(((len(self.seeds),) + sz))
out2 = np.empty(((len(self.seeds),) + sz))
t = [Thread(target=function, args=(random.RandomState(s), o)) for (s, o) in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
for (s, o) in zip(self.seeds, out2):
function(random.RandomState(s), o)
if ((np.intp().dtype.itemsize == 4) and (sys.platform == 'win32')):
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, ([(1 / 6.0)] * 6), size=10000)
self.check_function(gen_random, sz=(10000, 6)) |
def pipeline_archetype9():
ink_phase = [Letterpress(n_samples=(200, 300), n_clusters=(500, 680), std_range=(2500, 2500), value_range=(245, 255), value_threshold_range=(128, 128), blur=0), Geometric(scale=(2, 2), randomize=0), Faxify(scale_range=(1.0, 1.0), monochrome=1, monochrome_method='threshold_otsu', halftone=0, invert=1, half_kernel_size=(1, 1), angle=(73, 73), sigma=(3, 3)), Dithering(dither='ordered', order=(2, 2)), Geometric(scale=(0.5, 0.5), randomize=0), Markup(num_lines_range=(1, 1), markup_length_range=(0.4, 0.4), markup_thickness_range=(2, 3), markup_type='underline', markup_color=(0, 0, 0), single_word_mode=False, large_word_mode=False), Geometric(translation=(0, 0.15), randomize=0)]
paper_phase = [NoisyLines(noisy_lines_direction=0, noisy_lines_location=[0], noisy_lines_number_range=(1, 1), noisy_lines_color=(0, 0, 0), noisy_lines_thickness_range=(1, 1), noisy_lines_random_noise_intensity_range=(0.5, 0.9), noisy_lines_length_interval_range=(0, 300), noisy_lines_gaussian_kernel_value_range=(1, 1), noisy_lines_overlay_method='ink_to_paper'), Geometric(translation=(0, 0.95), randomize=0), BadPhotoCopy(noise_type=1, noise_side='none', noise_iteration=(2, 3), noise_size=(1, 3), noise_value=(0, 1), noise_sparsity=(0.9, 0.9), noise_concentration=(0.01, 0.01), blur_noise=0, wave_pattern=0, edge_effect=0)]
post_phase = [Geometric(rotate_range=((- 1), (- 1)), randomize=0)]
pipeline = AugraphyPipeline(ink_phase=ink_phase, paper_phase=paper_phase, post_phase=post_phase)
return pipeline |
class RandomShortPendulum(ModifiablePendulumEnv):
def __init__(self):
super(RandomShortPendulum, self).__init__()
self.length = uniform_exclude_inner(self.np_random.uniform, self.EXTREME_LOWER_LENGTH, self.EXTREME_UPPER_LENGTH, self.RANDOM_LOWER_LENGTH, self.RANDOM_UPPER_LENGTH)
def reset(self, new=True):
if new:
self.length = uniform_exclude_inner(self.np_random.uniform, self.EXTREME_LOWER_LENGTH, self.EXTREME_UPPER_LENGTH, self.RANDOM_LOWER_LENGTH, self.RANDOM_UPPER_LENGTH)
return super(RandomShortPendulum, self).reset(new)
def parameters(self):
parameters = super(RandomShortPendulum, self).parameters
parameters.update({'length': self.length})
return parameters |
def generate_anno(anno_dir, anno_id, split):
anno_path_tmp = os.path.join(anno_dir, (anno_id + '_{}.txt'))
anno_cls_path_tmp = os.path.join(txt_dir_voc2007, '{}_{}.txt')
count = 0
annotations = []
for category in tqdm(categories_list):
anno_path = anno_path_tmp.format(category['name'])
anno_cls_path = anno_cls_path_tmp.format(category['name'], split)
with open(anno_cls_path) as f:
lines = f.readlines()
pos_id = []
for line in lines:
line = line.strip()
line = line.split()
img_id = line[0]
label = line[1]
if (label == '1'):
pos_id.append(img_id)
with open(anno_path) as f:
lines = f.readlines()
used_id = []
for line in lines:
line = line.strip()
line = line.split()
img_id = line[0]
x1 = float(line[2])
y1 = float(line[3])
x2 = float(line[4])
y2 = float(line[5])
if (img_id not in pos_id):
continue
if (img_id in used_id):
continue
used_id.append(img_id)
w = (x2 - x1)
h = (y2 - y1)
area = int((w * h))
anno = {'area': area, 'image_id': img_id, 'bbox': [int(x1), int(y1), int(w), int(h)], 'iscrowd': 0, 'category_id': category['id'], 'id': count}
count += 1
annotations.append(anno)
return annotations |
def gen_logger(name, file=None, copy_root=True, propagate=False):
logger = logging.getLogger(name)
logger.propagate = propagate
if (file is not None):
__add_file_handler(logger, file)
if copy_root:
for hdl in LOG.handlers:
logger.addHandler(hdl)
return logger |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
if ((groups != 1) or (base_width != 64)):
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if (dilation > 1):
raise NotImplementedError('Dilation > 1 not supported in BasicBlock')
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out |
def load_data(ds_dir, batch_size, n_cpu, cut_len):
torchaudio.set_audio_backend('sox_io')
train_dir = os.path.join(ds_dir, 'train')
test_dir = os.path.join(ds_dir, 'test')
train_ds = DemandDataset(train_dir, cut_len)
test_ds = DemandDataset(test_dir, cut_len)
train_dataset = torch.utils.data.DataLoader(dataset=train_ds, batch_size=batch_size, pin_memory=True, shuffle=False, sampler=DistributedSampler(train_ds), drop_last=True, num_workers=n_cpu)
test_dataset = torch.utils.data.DataLoader(dataset=test_ds, batch_size=batch_size, pin_memory=True, shuffle=False, sampler=DistributedSampler(test_ds), drop_last=False, num_workers=n_cpu)
return (train_dataset, test_dataset) |
class AdobeDataset(data.Dataset):
def __init__(self, opt):
super(AdobeDataset, self).__init__()
self.opt = opt
self.interval_list = opt['interval_list']
self.random_reverse = opt['random_reverse']
logger.info('Temporal augmentation interval list: [{}], with random reverse is {}.'.format(','.join((str(x) for x in opt['interval_list'])), self.random_reverse))
self.half_N_frames = (opt['N_frames'] // 2)
self.LR_N_frames = (1 + self.half_N_frames)
assert (self.LR_N_frames > 1), 'Error: Not enough LR frames to interpolate'
self.LR_index_list = []
for i in range(self.LR_N_frames):
self.LR_index_list.append((i * 2))
(self.GT_root, self.LQ_root) = (opt['dataroot_GT'], opt['dataroot_LQ'])
self.data_type = self.opt['data_type']
self.LR_input = (False if (opt['GT_size'] == opt['LQ_size']) else True)
if opt['cache_keys']:
logger.info('Using cache keys: {}'.format(opt['cache_keys']))
cache_keys = opt['cache_keys']
else:
cache_keys = 'Vimeo7_train_keys.pkl'
logger.info('Using cache keys - {}.'.format(cache_keys))
if (self.data_type == 'lmdb'):
(self.GT_env, self.LQ_env) = (None, None)
elif (self.data_type == 'mc'):
self.mclient = None
elif (self.data_type == 'img'):
pass
else:
raise ValueError('Wrong data type: {}'.format(self.data_type))
with open('/work/abcd233746pc/adobe240fps_folder_train.txt') as t:
video_list = t.readlines()
self.file_list = []
self.gt_list = []
if (opt['ref_num'] is None):
opt['ref_num'] = 2
interval_num = (opt['ref_num'] - 1)
self.interval_num = interval_num
for video in video_list:
if (video[(- 1)] == '\n'):
video = video[:(- 1)]
index = 0
interval = 7
frames = os.listdir(os.path.join(self.GT_root, video))
frames = sorted([int(frame[:(- 4)]) for frame in frames])
frames = [(str(frame) + '.png') for frame in frames]
while ((index + ((interval + 1) * interval_num)) < (len(frames) - 0)):
videoInputs = [frames[i] for i in range(index, ((index + ((1 + interval) * interval_num)) + 1), (1 + interval))]
video_all_gt = [frames[i] for i in range((index + ((1 + interval) * (interval_num // 2))), ((index + ((1 + interval) * ((interval_num // 2) + 1))) + 1))]
videoInputs = [os.path.join(video, f) for f in videoInputs]
videoGts = [os.path.join(video, f) for f in video_all_gt]
self.file_list.append(videoInputs)
self.gt_list.append(videoGts)
index += 1
print(len(self.file_list))
print(len(self.gt_list))
def _init_lmdb(self):
self.GT_env = lmdb.open(self.opt['dataroot_GT'], readonly=True, lock=False, readahead=False, meminit=False)
self.LQ_env = lmdb.open(self.opt['dataroot_LQ'], readonly=True, lock=False, readahead=False, meminit=False)
def _ensure_memcached(self):
if (self.mclient is None):
server_list_config_file = None
client_config_file = None
self.mclient = mc.MemcachedClient.GetInstance(server_list_config_file, client_config_file)
def _read_img_mc(self, path):
value = mc.pyvector()
self.mclient.Get(path, value)
value_buf = mc.ConvertBuffer(value)
img_array = np.frombuffer(value_buf, np.uint8)
img = cv2.imdecode(img_array, cv2.IMREAD_UNCHANGED)
return img
def _read_img_mc_BGR(self, path, name_a, name_b):
img_B = self._read_img_mc(osp.join((path + '_B'), name_a, (name_b + '.png')))
img_G = self._read_img_mc(osp.join((path + '_G'), name_a, (name_b + '.png')))
img_R = self._read_img_mc(osp.join((path + '_R'), name_a, (name_b + '.png')))
img = cv2.merge((img_B, img_G, img_R))
return img
def __getitem__(self, index):
scale = self.opt['scale']
img_GT_l = []
img_LQop_l = [osp.join(self.GT_root, fp) for fp in self.file_list[index]]
img_GTop_l = np.array([osp.join(self.GT_root, fp) for fp in self.gt_list[index]])
gt_sampled_idx = (([0] + sorted(random.sample(range(len(img_GTop_l)), self.opt['sample_num']))) + [(len(img_GTop_l) - 1)])
img_GTop_l = img_GTop_l[gt_sampled_idx]
times = []
for i in gt_sampled_idx[1:(- 1)]:
times.append(torch.tensor([(i / 8)]))
img_LQo_l = [cv2.imread(fp) for fp in img_LQop_l]
img_GTo_l = [cv2.imread(fp) for fp in img_GTop_l]
if (img_LQo_l[0] is None):
print([osp.join(self.GT_root, fp) for fp in self.file_list[index]])
print([osp.join(self.GT_root, fp) for fp in self.gt_list[index]])
return (img_LQo_l, img_GTo_l, times, img_LQop_l)
def __len__(self):
return len(self.file_list) |
def _seg_17():
return [(7737, 'V'), (7738, 'M', u'l'), (7739, 'V'), (7740, 'M', u'l'), (7741, 'V'), (7742, 'M', u'm'), (7743, 'V'), (7744, 'M', u'm'), (7745, 'V'), (7746, 'M', u'm'), (7747, 'V'), (7748, 'M', u'n'), (7749, 'V'), (7750, 'M', u'n'), (7751, 'V'), (7752, 'M', u'n'), (7753, 'V'), (7754, 'M', u'n'), (7755, 'V'), (7756, 'M', u'o'), (7757, 'V'), (7758, 'M', u'o'), (7759, 'V'), (7760, 'M', u'o'), (7761, 'V'), (7762, 'M', u'o'), (7763, 'V'), (7764, 'M', u'p'), (7765, 'V'), (7766, 'M', u'p'), (7767, 'V'), (7768, 'M', u'r'), (7769, 'V'), (7770, 'M', u'r'), (7771, 'V'), (7772, 'M', u'r'), (7773, 'V'), (7774, 'M', u'r'), (7775, 'V'), (7776, 'M', u's'), (7777, 'V'), (7778, 'M', u's'), (7779, 'V'), (7780, 'M', u's'), (7781, 'V'), (7782, 'M', u's'), (7783, 'V'), (7784, 'M', u's'), (7785, 'V'), (7786, 'M', u't'), (7787, 'V'), (7788, 'M', u't'), (7789, 'V'), (7790, 'M', u't'), (7791, 'V'), (7792, 'M', u't'), (7793, 'V'), (7794, 'M', u'u'), (7795, 'V'), (7796, 'M', u'u'), (7797, 'V'), (7798, 'M', u'u'), (7799, 'V'), (7800, 'M', u'u'), (7801, 'V'), (7802, 'M', u'u'), (7803, 'V'), (7804, 'M', u'v'), (7805, 'V'), (7806, 'M', u'v'), (7807, 'V'), (7808, 'M', u'w'), (7809, 'V'), (7810, 'M', u'w'), (7811, 'V'), (7812, 'M', u'w'), (7813, 'V'), (7814, 'M', u'w'), (7815, 'V'), (7816, 'M', u'w'), (7817, 'V'), (7818, 'M', u'x'), (7819, 'V'), (7820, 'M', u'x'), (7821, 'V'), (7822, 'M', u'y'), (7823, 'V'), (7824, 'M', u'z'), (7825, 'V'), (7826, 'M', u'z'), (7827, 'V'), (7828, 'M', u'z'), (7829, 'V'), (7834, 'M', u'a'), (7835, 'M', u's'), (7836, 'V'), (7838, 'M', u'ss'), (7839, 'V'), (7840, 'M', u'a'), (7841, 'V')] |
class attentionNet(nn.Module):
def __init__(self, squeezeFilters=32, expandFilters=64, scailingFactor=2, numAttentionBlock=10):
super(attentionNet, self).__init__()
self.inputConv = nn.Conv2d(3, squeezeFilters, 3, 1, 1)
self.inputConv_bn = nn.BatchNorm2d(squeezeFilters)
self.featureAttention0 = selfAttention(squeezeFilters, squeezeFilters, 3, 1, 1)
self.featureAttention0_bn = nn.BatchNorm2d(squeezeFilters)
self.globalPooling = nn.AvgPool2d(2, 2)
self.ab1 = attentionGuidedResBlock(squeezeFilters, expandFilters)
self.ab2 = attentionGuidedResBlock(squeezeFilters, expandFilters)
self.ab3 = attentionGuidedResBlock(squeezeFilters, expandFilters)
self.ab4 = attentionGuidedResBlock(squeezeFilters, expandFilters)
self.ab5 = attentionGuidedResBlock(squeezeFilters, expandFilters)
self.ab6 = attentionGuidedResBlock(squeezeFilters, expandFilters)
self.ab7 = attentionGuidedResBlock(squeezeFilters, expandFilters)
self.ab8 = attentionGuidedResBlock(squeezeFilters, expandFilters)
self.ab9 = attentionGuidedResBlock(squeezeFilters, expandFilters)
self.ab10 = attentionGuidedResBlock(squeezeFilters, expandFilters)
self.ab11 = attentionGuidedResBlock(squeezeFilters, expandFilters)
self.ab12 = attentionGuidedResBlock(squeezeFilters, expandFilters)
self.featureAttention1 = selfAttention(squeezeFilters, squeezeFilters, 3, 1, 1)
self.featureAttention1_bn = nn.BatchNorm2d(squeezeFilters)
self.psUpsampling = pixelShuffleUpsampling(inputFilters=squeezeFilters, scailingFactor=2)
self.featureAttention2 = selfAttention(squeezeFilters, squeezeFilters, 3, 1, 1)
self.featureAttention2_bn = nn.BatchNorm2d(squeezeFilters)
self.fb1 = attentionGuidedResBlock(squeezeFilters, expandFilters, bn=False)
self.fb2 = attentionGuidedResBlock(squeezeFilters, expandFilters, bn=False)
self.fb3 = attentionGuidedResBlock(squeezeFilters, expandFilters, bn=False)
self.fb4 = attentionGuidedResBlock(squeezeFilters, expandFilters, bn=False)
self.fb5 = attentionGuidedResBlock(squeezeFilters, expandFilters, bn=False)
self.fb6 = attentionGuidedResBlock(squeezeFilters, expandFilters, bn=False)
self.featureAttention3 = selfAttention(squeezeFilters, squeezeFilters, 3, 1, 1)
self.featureAttention3_bn = nn.BatchNorm2d(squeezeFilters)
self.convOut = nn.Conv2d(squeezeFilters, 3, 1)
self._initialize_weights()
def forward(self, img):
xInp = F.relu(self.inputConv_bn(self.inputConv(img)))
xFA0 = F.relu(self.featureAttention0_bn(self.featureAttention0(xInp)))
xGAP = self.globalPooling(xFA0)
xAB1 = self.ab1(xGAP)
xAB2 = (self.ab2(xAB1) + xAB1)
xAB3 = (self.ab3(xAB2) + xAB2)
xAB4 = (self.ab4(xAB3) + xAB3)
xAB5 = (self.ab5(xAB4) + xAB4)
xAB6 = (self.ab6(xAB5) + xAB5)
xAB7 = (self.ab7(xAB6) + xAB6)
xAB8 = (self.ab8(xAB7) + xAB7)
xAB9 = (self.ab9(xAB8) + xAB8)
xAB10 = (self.ab10(xAB9) + xAB9)
xFA1 = F.relu(self.featureAttention1_bn(self.featureAttention1(xAB10)))
xUPS = (F.relu(self.psUpsampling(xFA1)) + xInp)
xFA2 = F.relu(self.featureAttention2_bn(self.featureAttention2(xUPS)))
xFB1 = self.fb1(xFA2)
xFB2 = (self.fb2(xFB1) + xFB1)
xFB3 = (self.fb2(xFB2) + xFB2)
xFB4 = (self.fb2(xFB3) + xFB3)
xFB5 = (self.fb2(xFB4) + xFB4)
xFA3 = (F.relu(self.featureAttention3_bn(self.featureAttention3(xFB5))) + xFA2)
return torch.tanh((self.convOut(xFA3) + img))
def _initialize_weights(self):
self.inputConv.apply(init_weights)
self.featureAttention0.apply(init_weights)
self.globalPooling.apply(init_weights)
self.ab1.apply(init_weights)
self.ab2.apply(init_weights)
self.ab3.apply(init_weights)
self.ab4.apply(init_weights)
self.ab5.apply(init_weights)
self.ab6.apply(init_weights)
self.ab7.apply(init_weights)
self.ab8.apply(init_weights)
self.ab9.apply(init_weights)
self.ab10.apply(init_weights)
self.ab11.apply(init_weights)
self.ab12.apply(init_weights)
self.featureAttention1.apply(init_weights)
self.psUpsampling.apply(init_weights)
self.featureAttention2.apply(init_weights)
self.fb1.apply(init_weights)
self.fb2.apply(init_weights)
self.fb3.apply(init_weights)
self.fb4.apply(init_weights)
self.fb5.apply(init_weights)
self.fb6.apply(init_weights)
self.featureAttention3.apply(init_weights)
self.convOut.apply(init_weights) |
.parametrize('seed', [313])
.parametrize('shape_a, shape_b', [((), ()), ((), (2, 2, 2)), ((2, 2, 2), ())])
def test_backward_dot_have_scalar(seed, shape_a, shape_b):
rng = np.random.RandomState(seed)
inputs = []
func_args = []
if (not shape_a):
a = rng.randn()
func_args += [a]
else:
a = rng.randn(*shape_a).astype(np.float32)
inputs += [a]
if (not shape_b):
b = rng.randn()
func_args += [b]
else:
b = rng.randn(*shape_b).astype(np.float32)
inputs += [b]
function_network_tester(rng, F.dot, inputs, func_args=func_args, have_scalar=True) |
def test_persistDockerImage1():
designerUrl = (designerIp + '/dockerimage')
headers = {'Content-Type': 'application/json'}
r = requests.post(designerUrl, data=json.dumps(data.test200), headers=headers)
assert (r.status_code == 200) |
def ref_grad_binary_tanh(x, dy, **kw):
return (dy * (1 - np.floor(np.minimum(np.abs(x), 1)))).flatten() |
def test_fv_e2e():
dim = 128
num_modes = 8
expected_dim = (((2 * num_modes) * dim) + num_modes)
descriptors = [np.random.random((np.random.randint(5, 30), dim)) for _ in range(10)]
gmm = learn_gmm(descriptors, n_modes=num_modes)
fisher_vec = fisher_vector(descriptors[0], gmm)
assert (len(fisher_vec) == expected_dim) |
def test_iterate_seqs_no_chunking_1():
dataset = DummyDataset(input_dim=2, output_dim=3, num_seqs=2, seq_len=11)
dataset.chunk_step = 0
dataset.chunk_size = 0
dataset.init_seq_order(1)
seqs = list(dataset.iterate_seqs())
assert_equal(len(seqs), 2)
assert_equal(seqs[0], (0, 0, 11))
assert_equal(seqs[1], (1, 0, 11)) |
def _run_selection(args, data):
res = []
for partition in data:
(assignments, shard_names, filenames, clustering_types) = partition
samples_list = run_greedy(args, assignments, shard_names, filenames, clustering_types, args.subset.size, args.subset.ratio, measure_name=args.measure_name, cluster_pairing=args.clustering.pairing, shuffle_candidates=args.shuffle_candidates, verbose=args.verbose)
res.append(samples_list)
return list(chain(*res)) |
class CIFAR10(Dataset):
def __init__(self, path):
self.cifar10 = datasets.CIFAR10(root=path, download=True, train=True, transform=cifar10_transformer())
def __getitem__(self, index):
if isinstance(index, numpy.float64):
index = index.astype(numpy.int64)
(data, target) = self.cifar10[index]
return (data, target, index)
def __len__(self):
return len(self.cifar10) |
def predict():
args = get_args()
kwargs = args.__dict__
save_dir = kwargs['save_dir']
common.setup_logger(save_dir, log_name='inten_pred.log', debug=kwargs['debug'])
pl.utilities.seed.seed_everything(kwargs.get('seed'))
yaml_args = yaml.dump(kwargs)
logging.info(f'''
{yaml_args}''')
with open((Path(save_dir) / 'args.yaml'), 'w') as fp:
fp.write(yaml_args)
dataset_name = kwargs['dataset_name']
data_dir = (Path('data/spec_datasets') / dataset_name)
labels = (data_dir / kwargs['dataset_labels'])
df = pd.read_csv(labels, sep='\t')
if (kwargs['subset_datasets'] != 'none'):
splits = pd.read_csv(((data_dir / 'splits') / kwargs['split_name']), sep='\t')
folds = set(splits.keys())
folds.remove('spec')
fold_name = list(folds)[0]
if (kwargs['subset_datasets'] == 'train_only'):
names = splits[(splits[fold_name] == 'train')]['spec'].tolist()
elif (kwargs['subset_datasets'] == 'test_only'):
names = splits[(splits[fold_name] == 'test')]['spec'].tolist()
elif (kwargs['subset_datasets'] == 'debug_special'):
names = splits[(splits[fold_name] == 'test')]['spec'].tolist()
names = names[:5]
else:
raise NotImplementedError()
df = df[df['spec'].isin(names)]
best_checkpoint = kwargs['checkpoint_pth']
model = scarf_model.ScarfIntenNet.load_from_checkpoint(best_checkpoint)
logging.info(f'Loaded model with from {best_checkpoint}')
num_workers = kwargs.get('num_workers', 0)
form_dag_folder = Path(kwargs['formula_folder'])
all_json_pths = [Path(i) for i in form_dag_folder.glob('*.json')]
name_to_json = {i.stem.replace('pred_', ''): i for i in all_json_pths}
graph_featurizer = nn_utils.MolDGLGraph(atom_feats=model.atom_feats, bond_feats=model.bond_feats, pe_embed_k=model.pe_embed_k)
pred_dataset = scarf_data.IntenDataset(df, num_workers=num_workers, data_dir=data_dir, form_map=name_to_json, graph_featurizer=graph_featurizer, root_embedder=model.root_embedder)
collate_fn = pred_dataset.get_collate_fn()
pred_loader = DataLoader(pred_dataset, num_workers=kwargs['num_workers'], collate_fn=collate_fn, shuffle=False, batch_size=kwargs['batch_size'])
model.eval()
gpu = kwargs['gpu']
if gpu:
model = model.cuda()
device = ('cuda' if gpu else 'cpu')
pred_list = []
binned_out = kwargs['binned_out']
with torch.no_grad():
for batch in tqdm(pred_loader):
spec_names = batch['names']
form_strs = batch['form_strs']
graphs = batch['graphs'].to(device)
formulae = batch['formulae'].to(device)
diffs = batch['diffs'].to(device)
num_forms = batch['num_forms'].to(device)
adducts = batch['adducts'].to(device)
outputs = model.predict(graphs=graphs, full_formula=formulae, diffs=diffs, num_forms=num_forms, adducts=adducts, binned_out=binned_out)
output_specs = outputs['spec']
for (spec, form_str, output_spec) in zip(spec_names, form_strs, output_specs):
output_obj = {'spec_name': spec, 'forms': form_str, 'form_masses': [common.formula_mass(i) for i in form_str], 'output_spec': output_spec, 'smiles': pred_dataset.name_to_smiles[spec], 'root_form': pred_dataset.name_to_root_form[spec]}
pred_list.append(output_obj)
if binned_out:
spec_names_ar = [str(i['spec_name']) for i in pred_list]
smiles_ar = [str(i['smiles']) for i in pred_list]
inchikeys = [common.inchikey_from_smiles(i) for i in smiles_ar]
preds = np.vstack([i['output_spec'] for i in pred_list])
output = {'preds': preds, 'smiles': smiles_ar, 'ikeys': inchikeys, 'spec_names': spec_names_ar, 'num_bins': model.inten_buckets.shape[(- 1)], 'upper_limit': 1500, 'sparse_out': False}
out_file = (Path(kwargs['save_dir']) / 'binned_preds.p')
with open(out_file, 'wb') as fp:
pickle.dump(output, fp)
else:
for pred_obj in pred_list:
mz = pred_obj['form_masses']
intens = [float(i) for i in pred_obj['output_spec']]
cand_form = pred_obj['root_form']
smiles = pred_obj['smiles']
form_list = pred_obj['forms']
spec_name = pred_obj['spec_name']
tbl = {'mz': mz, 'ms2_inten': intens, 'rel_inten': intens, 'mono_mass': mz, 'formula_mass_no_adduct': mz, 'mass_diff': ([0] * len(mz)), 'formula': form_list, 'ions': (['H+'] * len(mz))}
new_form = {'cand_form': cand_form, 'spec_name': spec_name, 'cand_ion': 'H+', 'output_tbl': tbl, 'smiles': smiles}
save_path = (Path(kwargs['save_dir']) / 'tree_preds_inten')
save_path.mkdir(exist_ok=True)
out_file = (save_path / f'pred_{spec_name}.json')
with open(out_file, 'w') as fp:
json.dump(new_form, fp, indent=2) |
class Refiner():
def __init__(self, prompt, args):
self.prompt = prompt
self.temperature = args.temperature
self.top_p = args.top_p
def set_refinement_fields(self, object_dlg_history: List[DialogueTurn], new_dlg_turn: DialogueTurn, engine_dict):
prompt_output = llm_generate(template_file=self.prompt, prompt_parameter_values={'dlg': object_dlg_history, 'new_dlg_turn': new_dlg_turn}, engine=engine_dict['default'], max_tokens=300, temperature=self.temperature, top_p=self.top_p, stop_tokens=None, postprocess=False)
if self.prompt.endswith('refine_w_feedback.prompt'):
return Refiner.handle_refinement_with_feedback(new_dlg_turn, prompt_output)
elif self.prompt.endswith('refine.prompt'):
return Refiner.handle_refinement_without_feedback(new_dlg_turn, prompt_output)
else:
raise ValueError('Unknown refinement prompt.')
def handle_refinement_without_feedback(new_dlg_turn, prompt_output):
new_dlg_turn.refined_utterance = prompt_output.strip()
return new_dlg_turn.refined_utterance
def handle_refinement_with_feedback(new_dlg_turn, prompt_output: str):
refine_identifiers = ['Revised response after applying this feedback:', 'Response after applying this feedback:']
for identifier in refine_identifiers:
if (identifier in prompt_output):
(feedback, prompt_output) = prompt_output.split(identifier)
(new_dlg_turn.feedback, new_dlg_turn.feedback_scores) = Refiner._parse_feedback(feedback)
if (sum(new_dlg_turn.feedback_scores) == (100 * len(new_dlg_turn.feedback_scores))):
new_dlg_turn.refined_utterance = new_dlg_turn.agent_utterance
else:
new_dlg_turn.refined_utterance = prompt_output.strip()
return new_dlg_turn.refined_utterance
logger.error('Skipping refinement due to malformatted Refined response: %s', prompt_output)
new_dlg_turn.refined_utterance = new_dlg_turn.agent_utterance
return new_dlg_turn.refined_utterance
def _parse_feedback(feedback):
if ('User:' in feedback):
feedback = feedback.split('User:')[0]
feedback_lines = feedback.strip().split('\n')
if ((len(feedback_lines) < 4) or (len(feedback_lines) > 5)):
logger.error('Feedback malformatted')
logger.error(feedback_lines)
return ([], [])
scores = []
for line in feedback_lines:
score = line.strip().split(' ')[(- 1)].strip()
if ((score == 'N/A') or ('this criterion is not applicable' in line)):
score = 100
else:
try:
score = int(score.split('/')[0])
except:
logger.error(f'Feedback line malformatted: {line}')
score = 100
scores.append(score)
logger.info('Feedback scores: %s', scores)
return (feedback_lines, scores) |
def get_op_loc(op):
if isinstance(op, (OpView, Operation)):
res = str(op.location).replace('loc(', '').strip(')')
if ('fused' in res):
res = match_fused_loc.search(res).group(1)
return escape(res)
elif isinstance(op, Value):
return get_op_loc(op.owner)
raise NotImplementedError() |
class SentenceTerScorer(Scorer):
def __init__(self, argument_string):
Scorer.__init__(self, argument_string='')
self._reference = None
self.additional_flags = argument_string
def set_reference(self, reference_tokens):
if hasattr(self._reference, 'extension'):
self._reference.lock.acquire()
clean_p = subprocess.Popen(self._reference.clean_cmd, shell=True)
clean_p.communicate()
self._reference.lock.release()
self._reference = SentenceTerReference(reference_tokens, additional_flags=self.additional_flags) |
def _format_axis(fig: Figure, minv: int, maxv: int, axis: str) -> None:
divisor = 4.5
if (np.isinf(minv) or np.isinf(maxv)):
gap = 1.0
else:
gap = ((maxv - minv) / divisor)
(_, after) = f'{gap:.0e}'.split('e')
round_to = ((- 1) * int(after))
minv = np.round(minv, round_to)
gap = np.round(gap, round_to)
ticks = [float(minv)]
if (not np.isinf(maxv)):
while ((max(ticks) + gap) < maxv):
ticks.append((max(ticks) + gap))
ticks = np.round(ticks, round_to)
ticks = [(int(tick) if tick.is_integer() else tick) for tick in ticks]
formatted_ticks = _format_ticks(ticks)
if (axis == 'x'):
fig.xgrid.ticker = ticks
fig.xaxis.ticker = ticks
fig.xaxis.major_label_overrides = dict(zip(ticks, formatted_ticks))
fig.xaxis.major_label_text_font_size = '10pt'
fig.xaxis.major_label_standoff = 7
fig.xaxis.major_tick_line_color = None
elif (axis == 'y'):
fig.ygrid.ticker = ticks
fig.yaxis.ticker = ticks
fig.yaxis.major_label_overrides = dict(zip(ticks, formatted_ticks))
fig.yaxis.major_label_text_font_size = '10pt'
fig.yaxis.major_label_standoff = 5 |
class CSPDarknet(nn.Module):
cfg = {'n': [0.33, 0.25], 't': [0.33, 0.375], 's': [0.33, 0.5], 'm': [0.67, 0.75], 'l': [1.0, 1.0], 'x': [1.33, 1.25]}
def __init__(self, subtype='cspdark_s', out_channels=[64, 128, 256, 512, 1024], layers=[3, 9, 9, 3], spp_ksizes=(5, 9, 13), depthwise=False, conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), out_stages=[2, 3, 4], output_stride=32, backbone_path=None, pretrained=False, frozen_stages=(- 1), norm_eval=False):
super(CSPDarknet, self).__init__()
self.subtype = subtype
self.out_stages = out_stages
self.output_stride = output_stride
self.backbone_path = backbone_path
self.pretrained = pretrained
self.out_channels = out_channels
self.layers = layers
self.frozen_stages = frozen_stages
self.norm_eval = norm_eval
conv = (DepthwiseSeparableConvModule if depthwise else ConvModule)
(depth_mul, width_mul) = self.cfg[self.subtype.split('_')[1]]
self.out_channels = list(map((lambda x: int((x * width_mul))), out_channels))
layers = list(map((lambda x: max(round((x * depth_mul)), 1)), layers))
self.stem = Focus(3, self.out_channels[0], kernel_sizes=3, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.stage1 = nn.Sequential(conv(self.out_channels[0], self.out_channels[1], 3, 2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg), CSPLayer(self.out_channels[1], self.out_channels[1], n=layers[0], shortcut=True, depthwise=depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
self.stage2 = nn.Sequential(conv(self.out_channels[1], self.out_channels[2], 3, 2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg), CSPLayer(self.out_channels[2], self.out_channels[2], n=layers[1], shortcut=True, depthwise=depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
self.stage3 = nn.Sequential(conv(self.out_channels[2], self.out_channels[3], 3, 2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg), CSPLayer(self.out_channels[3], self.out_channels[3], n=layers[2], shortcut=True, depthwise=depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
self.stage4 = nn.Sequential(conv(self.out_channels[3], self.out_channels[4], 3, 2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg), SPPF(self.out_channels[4], self.out_channels[4], kernel_sizes=spp_ksizes, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg), CSPLayer(self.out_channels[4], self.out_channels[4], n=layers[3], shortcut=False, depthwise=depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
self.out_channels = self.out_channels[self.out_stages[0]:(self.out_stages[(- 1)] + 1)]
self.init_weights()
def forward(self, x):
x = self.stem(x)
output = []
for i in range(1, 5):
stage = getattr(self, 'stage{}'.format(i))
x = stage(x)
if (i in self.out_stages):
output.append(x)
return (output if (len(self.out_stages) > 1) else output[0])
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=math.sqrt(5))
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
m.eps = 0.001
m.momentum = 0.03
def _freeze_stages(self):
if (self.frozen_stages >= 0):
for i in range((self.frozen_stages + 1)):
m = getattr(self, self.layers[i])
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
super(CSPDarknet, self).train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() |
class idct_8x8(nn.Module):
def __init__(self):
super(idct_8x8, self).__init__()
alpha = np.array(([(1.0 / np.sqrt(2))] + ([1] * 7)))
self.alpha = nn.Parameter(torch.from_numpy(np.outer(alpha, alpha)).float())
tensor = np.zeros((8, 8, 8, 8), dtype=np.float32)
for (x, y, u, v) in itertools.product(range(8), repeat=4):
tensor[(x, y, u, v)] = (np.cos((((((2 * u) + 1) * x) * np.pi) / 16)) * np.cos((((((2 * v) + 1) * y) * np.pi) / 16)))
self.tensor = nn.Parameter(torch.from_numpy(tensor).float())
def forward(self, image):
image = (image * self.alpha)
result = ((0.25 * torch.tensordot(image, self.tensor, dims=2)) + 128)
result.view(image.shape)
return result |
def extractIndexedLayers(sequence, x, indexes, detach):
index = 0
output = []
indexes.sort()
for (iSeq, layer) in enumerate(sequence):
if (index >= len(indexes)):
break
x = layer(x)
if (iSeq == indexes[index]):
if detach:
output.append(x.view(x.size(0), x.size(1), (- 1)).detach())
else:
output.append(x.view(x.size(0), x.size(1), (- 1)))
index += 1
return output |
def quaternion_conv_op(input, r_weight, i_weight, j_weight, k_weight, bias, stride: int, padding: int, groups: int, dilation: int, conv1d: bool):
cat_kernels_4_r = torch.cat([r_weight, (- i_weight), (- j_weight), (- k_weight)], dim=1)
cat_kernels_4_i = torch.cat([i_weight, r_weight, (- k_weight), j_weight], dim=1)
cat_kernels_4_j = torch.cat([j_weight, k_weight, r_weight, (- i_weight)], dim=1)
cat_kernels_4_k = torch.cat([k_weight, (- j_weight), i_weight, r_weight], dim=1)
cat_kernels_4_quaternion = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=0)
if conv1d:
return F.conv1d(input=input, weight=cat_kernels_4_quaternion, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
else:
return F.conv2d(input=input, weight=cat_kernels_4_quaternion, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups) |
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
top5 = AverageMeter('', ':6.2f')
progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses, top1, top5], prefix='Epoch: [{}]'.format(epoch))
model.train()
end = time.time()
for (i, (images, target)) in enumerate(train_loader):
data_time.update((time.time() - end))
if (args.gpu is not None):
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
progress.display(i) |
def image_processor_class_from_name(class_name: str):
for (module_name, extractors) in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if (class_name in extractors):
module_name = model_type_to_module_name(module_name)
module = importlib.import_module(f'.{module_name}', 'transformers.models')
try:
return getattr(module, class_name)
except AttributeError:
continue
for (_, extractor) in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if (getattr(extractor, '__name__', None) == class_name):
return extractor
main_module = importlib.import_module('transformers')
if hasattr(main_module, class_name):
return getattr(main_module, class_name)
return None |
def register_Ns3DesMetrics_methods(root_module, cls):
cls.add_method('Initialize', 'void', [param('std::vector< std::string >', 'args'), param('std::string', 'outDir', default_value='""')])
cls.add_method('Trace', 'void', [param('ns3::Time const &', 'now'), param('ns3::Time const &', 'delay')])
cls.add_method('TraceWithContext', 'void', [param('uint32_t', 'context'), param('ns3::Time const &', 'now'), param('ns3::Time const &', 'delay')])
cls.add_constructor([])
return |
def run_failed_cases(fail_dir='failed'):
for path in Path(fail_dir).glob('**/*'):
result = run_case(path)[1]
print(str(path), result) |
class EGT(EGT_Base):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.EGT_layers = nn.ModuleList([EGT_Layer(**self.layer_common_kwargs, edge_update=(not self.egt_simple)) for _ in range((self.model_height - 1))])
if ((not self.node_ended) and (not self.edge_ended)):
pass
elif (not self.node_ended):
self.EGT_layers.append(EGT_Layer(**self.layer_common_kwargs, node_update=False))
elif (not self.edge_ended):
self.EGT_layers.append(EGT_Layer(**self.layer_common_kwargs, edge_update=False))
else:
self.EGT_layers.append(EGT_Layer(**self.layer_common_kwargs))
def forward(self, inputs):
g = self.input_block(inputs)
for layer in self.EGT_layers:
g = layer(g)
g = self.final_embedding(g)
outputs = self.output_block(g)
return outputs |
def test_malformed_quantity_error():
malformed_quantity_error = MalformedQuantityError('abcd')
assert (malformed_quantity_error.malformed_quantity_string == 'abcd')
assert (str(malformed_quantity_error) == 'Expecting a quantity string(e.g. "5 km/s") for keyword - supplied abcd') |
class Runtime(metaclass=ABCMeta):
def __init__(self) -> None:
self._interrupted = False
def add_run(self, run: Run) -> None:
pass
async def start(self) -> None:
pass
def interrupt_handler(self) -> None:
pass
def interrupt(self) -> None:
if (not self._interrupted):
self._interrupted = True
self.interrupt_handler() |
def pci_records():
records = []
command = shlex.split('lspci -vmm')
output = subprocess.check_output(command).decode()
for devices in output.strip().split('\n\n'):
record = {}
records.append(record)
for row in devices.split('\n'):
(key, value) = row.split('\t')
record[key.split(':')[0]] = value
return records |
def test_code_object_executed_other_thread():
tracer = ExecutionTracer()
tracer.current_thread_identifier = threading.current_thread().ident
tracer.register_code_object(MagicMock())
def wrapper(*args):
with pytest.raises(RuntimeError):
tracer.executed_code_object(*args)
thread = threading.Thread(target=wrapper, args=(0,))
thread.start()
thread.join()
assert (tracer.get_trace().executed_code_objects == OrderedSet()) |
def parallax_replica_prefix(replica_id):
return ('%s%s' % (PARALLAX_REPLICA_PREFIX, str(replica_id))) |
def conv_params(fn):
params = fn.params.get('convolution_param', fn.params)
axis = params.get('axis', 1)
ks = np.array(params['kernel_size'], ndmin=1)
dilation = np.array(params.get('dilation', 1), ndmin=1)
assert (len(({'pad_h', 'pad_w', 'kernel_h', 'kernel_w', 'stride_h', 'stride_w'} & set(fn.params))) == 0), 'cropping does not support legacy _h/_w params'
return (axis, np.array(params.get('stride', 1), ndmin=1), (((ks - 1) * dilation) + 1), np.array(params.get('pad', 0), ndmin=1)) |
class CTupleBaseTypeNode(CBaseTypeNode):
child_attrs = ['components']
def analyse(self, env, could_be_name=False):
component_types = []
for c in self.components:
type = c.analyse(env)
if type.is_pyobject:
error(c.pos, "Tuple types can't (yet) contain Python objects.")
return error_type
component_types.append(type)
entry = env.declare_tuple_type(self.pos, component_types)
entry.used = True
return entry.type |
class MalnetDataset(Dataset):
def __init__(self, args, root, files, labels, transform=None, pre_transform=None):
self.args = args
self.files = files
self.labels = labels
self.num_classes = len(np.unique(labels))
super(MalnetDataset, self).__init__(root, transform, pre_transform)
def raw_file_names(self):
return self.files
def processed_file_names(self):
return glob((self.processed_dir.replace('/processed', '') + '/*.pt'))
def download(self):
pass
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
x = torch.load((self.processed_dir.replace('/processed', '') + '/data_{}.pt'.format(idx)))
x.y = self.labels[idx]
return x |
def conjugate_gradients(Avp_f, b, nsteps, rdotr_tol=1e-10):
x = zeros(b.size())
r = b.clone()
p = b.clone()
rdotr = torch.dot(r, r)
for i in range(nsteps):
Avp = Avp_f(p)
alpha = (rdotr / torch.dot(p, Avp))
x += (alpha * p)
r -= (alpha * Avp)
new_rdotr = torch.dot(r, r)
betta = (new_rdotr / rdotr)
p = (r + (betta * p))
rdotr = new_rdotr
if (rdotr < rdotr_tol):
break
return x |
class Sampler():
def __init__(self, indexed_ratings, item_indices, cnn_features_path, epochs):
self._indexed_ratings = indexed_ratings
self._item_indices = item_indices
self._users = list(self._indexed_ratings.keys())
self._nusers = len(self._users)
self._items = list({k for a in self._indexed_ratings.values() for k in a.keys()})
self._nitems = len(self._items)
self._ui_dict = {u: list(set(indexed_ratings[u])) for u in indexed_ratings}
self._lui_dict = {u: len(v) for (u, v) in self._ui_dict.items()}
self._cnn_features_path = cnn_features_path
self._epochs = epochs
def read_features_triple(self, user, pos, neg):
feat_pos = np.load((os.path.join(self._cnn_features_path, str(pos.numpy())) + '.npy'))
feat_neg = np.load((os.path.join(self._cnn_features_path, str(neg.numpy())) + '.npy'))
return (user.numpy(), pos.numpy(), feat_pos, neg.numpy(), feat_neg)
def step(self, events: int, batch_size: int):
r_int = np.random.randint
n_users = self._nusers
n_items = self._nitems
ui_dict = self._ui_dict
lui_dict = self._lui_dict
actual_inter = (((events // batch_size) * batch_size) * self._epochs)
counter_inter = 1
def sample():
u = r_int(n_users)
ui = ui_dict[u]
lui = lui_dict[u]
if (lui == n_items):
sample()
i = ui[r_int(lui)]
j = r_int(n_items)
while (j in ui):
j = r_int(n_items)
return (u, i, j)
for ep in range(self._epochs):
for _ in range(events):
(yield sample())
if (counter_inter == actual_inter):
return
else:
counter_inter += 1
def pipeline(self, num_users, batch_size):
def load_func(u, p, n):
b = tf.py_function(self.read_features_triple, (u, p, n), (np.int64, np.int64, np.float32, np.int64, np.float32))
return b
data = tf.data.Dataset.from_generator(generator=self.step, output_shapes=((), (), ()), output_types=(tf.int64, tf.int64, tf.int64), args=(num_users, batch_size))
data = data.map(load_func, num_parallel_calls=tf.data.experimental.AUTOTUNE)
data = data.batch(batch_size=batch_size)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data
def step_eval(self):
for (i_rel, i_abs) in enumerate(self._item_indices):
(yield (i_rel, i_abs))
def pipeline_eval(self, batch_size):
def load_func(i_r, i_a):
b = tf.py_function(self.read_features, (i_r, i_a), (np.int64, np.int64, np.float32))
return b
data = tf.data.Dataset.from_generator(generator=self.step_eval, output_shapes=((), ()), output_types=(tf.int64, tf.int64))
data = data.map(load_func, num_parallel_calls=tf.data.experimental.AUTOTUNE)
data = data.batch(batch_size=batch_size)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data
def read_features(self, item_rel, item_abs):
feat = np.load((os.path.join(self._cnn_features_path, str(item_abs.numpy())) + '.npy'))
return (item_rel, item_abs, feat) |
def test_RegularArray_NumpyArray():
v2a = ak.contents.regulararray.RegularArray(ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])), 3)
roundtrip(v2a)
array = ak.highlevel.Array(v2a)
memoryleak(array, swallow)
memoryleak(array, passthrough)
memoryleak(array, passthrough2)
memoryleak(array, digest)
memoryleak(array, digest2)
v2b = ak.contents.regulararray.RegularArray(ak.contents.emptyarray.EmptyArray().to_NumpyArray(np.dtype(np.float64)), 0, zeros_length=10)
roundtrip(v2b)
array = ak.highlevel.Array(v2b)
memoryleak(array, swallow)
memoryleak(array, passthrough)
memoryleak(array, passthrough2)
memoryleak(array, digest)
memoryleak(array, digest2) |
class DecGaussianMLPPolicy(GaussianMLPModule):
def __init__(self, env_spec, hidden_sizes=(32, 32), hidden_nonlinearity=torch.tanh, hidden_w_init=nn.init.xavier_uniform_, hidden_b_init=nn.init.zeros_, output_nonlinearity=None, output_w_init=nn.init.xavier_uniform_, output_b_init=nn.init.zeros_, layer_normalization=False, share_std=False, name='CentralizedGaussianMLPPolicy'):
assert isinstance(env_spec.action_space, akro.Box), 'Gaussian policy only works with akro.Box action space.'
self.centralized = False
self.vectorized = True
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.shape[0]
self.name = name
self.share_std = share_std
GaussianMLPModule.__init__(self, input_dim=self._obs_dim, output_dim=self._action_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, learn_std=True, share_std=share_std, init_std=1.0, min_std=1e-06, max_std=None, std_parameterization='exp', layer_normalization=False)
def grad_norm(self):
return np.sqrt(np.sum([(p.grad.norm(2).item() ** 2) for p in self.parameters()]))
def forward(self, obs_n, avail_actions_n=None):
obs_n = torch.Tensor(obs_n)
(mean, std) = super().forward(obs_n)
if self.share_std:
dist = Independent(Normal(mean, std), 1)
else:
dist = MultivariateNormal(mean, std)
return dist
def get_actions(self, obs_n, avail_actions_n=None, greedy=False):
with torch.no_grad():
dists_n = self.forward(obs_n)
if (not greedy):
actions_n = dists_n.sample().numpy()
else:
actions_n = dists_n.mean.numpy()
agent_infos_n = []
for i in range(len(actions_n)):
agent_infos_n.append(dict(action_mean=dists_n.mean[i].numpy(), action_std=dists_n.stddev[i].numpy()))
return (actions_n, agent_infos_n)
def reset(self, dones):
pass
def entropy(self, observations, avail_actions_n=None):
dists_n = self.forward(observations)
return dists_n.entropy()
def log_likelihood(self, observations, avail_actions_n, actions):
dists_n = self.forward(observations)
if (self._action_dim == 1):
actions = actions.unsqueeze((- 1))
llhs = dists_n.log_prob(actions)
return llhs
def recurrent(self):
return False |
def query_ball_point(radius, nsample, xyz1, xyz2):
return grouping_module.query_ball_point(xyz1, xyz2, radius, nsample) |
def bar_interaction_plot(interaction_matrix, tokens, top_k=5, text_kwargs=None, pair_indices=None, zero_diagonals=True, **kwargs):
if (text_kwargs is None):
text_kwargs = {}
if zero_diagonals:
interaction_matrix = interaction_matrix.copy()
np.fill_diagonal(interaction_matrix, 0.0)
if (pair_indices is None):
pair_indices = np.argsort(np.flatten(np.triu(np.abs(interaction_matrix))))[::(- 1)][:top_k]
pair_indices = np.vstack(np.unravel_index(pair_indices, interaction_matrix.shape)).T
else:
top_k = len(pair_indices)
token_labels = []
interaction_values = []
for index in pair_indices[::(- 1)]:
token_labels.append('{}, {} ({}, {})'.format(tokens[index[0]], tokens[index[1]], index[0], index[1]))
interaction_values.append(interaction_matrix[(index[0], index[1])])
(fig, axis) = plt.subplots()
bounds = np.max(np.abs(interaction_matrix))
normalizer = mpl.colors.Normalize(vmin=(- bounds), vmax=bounds)
if ('cmap' in kwargs):
cmap = kwargs['cmap']
else:
cmap = colors.maroon_white_aqua()
axis.barh(np.arange(top_k), interaction_values, color=[cmap(normalizer(c)) for c in interaction_values], align='center', zorder=10, **kwargs)
axis.set_xlabel('Interaction Value', fontsize=14)
axis.set_ylabel('Strongest Interacting Pairs', fontsize=14)
axis.set_yticks(np.arange(top_k))
axis.tick_params(axis='y', which='both', left=False, labelsize=12)
axis.set_yticklabels(token_labels)
axis.grid(axis='x', zorder=0, linewidth=0.2)
axis.grid(axis='y', zorder=0, linestyle='--', linewidth=1.0)
_set_axis_config(axis, linewidths=(0.0, 0.0, 0.0, 1.0))
text_ax = fig.add_axes([0.1, 0.9, 0.8, 0.1])
axis_transform = text_ax.transData
_set_axis_config(text_ax, clear_y_ticks=True, clear_x_ticks=True)
space_text = text_ax.text(x=0.0, y=1.0, s=' ', transform=axis_transform)
space_text.draw(fig.canvas.get_renderer())
space_bounds = space_text.get_window_extent()
for (i, token) in enumerate(tokens):
text = text_ax.text(x=0.0, y=0.6, s=token, transform=axis_transform, fontsize=16, **text_kwargs)
index_spacing = 0.0
if (len(token) > 2):
index_spacing = (len(token) * 0.01)
text_ax.text(x=index_spacing, y=0.0, s=str(i), transform=axis_transform, fontsize=16, **text_kwargs)
text.draw(fig.canvas.get_renderer())
ex = text.get_window_extent()
axis_transform = mpl.transforms.offset_copy(text._transform, x=(ex.width + space_bounds.width), units='dots')
return (axis, text_ax) |
class WnliProcessor(DataProcessor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence1'].numpy().decode('utf-8'), tensor_dict['sentence2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
return ['0', '1']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, line[0]))
text_a = line[1]
text_b = line[2]
label = (None if (set_type == 'test') else line[(- 1)])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
def test_special_constants():
assert (S.Zero == Integer(0))
assert (S.One == Integer(1))
assert (S.NegativeOne == Integer((- 1)))
assert (S.Half == Rational(1, 2)) |
def densenet161_model(img_rows, img_cols, color_type=1, nb_dense_block=4, growth_rate=48, nb_filter=96, reduction=0.5, dropout_rate=0.0, weight_decay=0.0001, num_classes=None):
eps = 1.1e-05
compression = (1.0 - reduction)
global concat_axis
if (K.image_dim_ordering() == 'tf'):
concat_axis = 3
img_input = Input(shape=(224, 224, 3), name='data')
else:
concat_axis = 1
img_input = Input(shape=(3, 224, 224), name='data')
nb_filter = 96
nb_layers = [6, 12, 36, 24]
x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
x = Convolution2D(nb_filter, 7, 7, subsample=(2, 2), name='conv1', bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
x = Scale(axis=concat_axis, name='conv1_scale')(x)
x = Activation('relu', name='relu1')(x)
x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)
for block_idx in range((nb_dense_block - 1)):
stage = (block_idx + 2)
(x, nb_filter) = dense_block(x, stage, nb_layers[block_idx], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
x = transition_block(x, stage, nb_filter, compression=compression, dropout_rate=dropout_rate, weight_decay=weight_decay)
nb_filter = int((nb_filter * compression))
final_stage = (stage + 1)
(x, nb_filter) = dense_block(x, final_stage, nb_layers[(- 1)], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=(('conv' + str(final_stage)) + '_blk_bn'))(x)
x = Scale(axis=concat_axis, name=(('conv' + str(final_stage)) + '_blk_scale'))(x)
x = Activation('relu', name=(('relu' + str(final_stage)) + '_blk'))(x)
x_fc = GlobalAveragePooling2D(name=('pool' + str(final_stage)))(x)
x_fc = Dense(1000, name='fc6')(x_fc)
x_fc = Activation('softmax', name='prob')(x_fc)
model = Model(img_input, x_fc, name='densenet')
if (K.image_dim_ordering() == 'th'):
weights_path = 'imagenet_models/densenet161_weights_th.h5'
else:
weights_path = 'imagenet_models/densenet161_weights_tf.h5'
model.load_weights(weights_path, by_name=True)
x_newfc = GlobalAveragePooling2D(name=('pool' + str(final_stage)))(x)
x_newfc = Dense(num_classes, name='fc6')(x_newfc)
x_newfc = Activation('softmax', name='prob')(x_newfc)
model = Model(img_input, x_newfc)
sgd = SGD(lr=0.001, decay=1e-06, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model |
def _remove_right_units(string: str) -> str:
if ('\\text{ ' in string):
splits = string.split('\\text{ ')
assert (len(splits) == 2)
return splits[0]
else:
return string |
class MnliProcessor(DataProcessor):
def get_example_from_tensor_dict(self, tensor_dict):
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['premise'].numpy().decode('utf-8'), tensor_dict['hypothesis'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev_matched.tsv')), 'dev_matched')
def get_labels(self):
return ['contradiction', 'entailment', 'neutral']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, line[0]))
text_a = line[8]
text_b = line[9]
label = line[(- 1)]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
_tf
_retrieval
class TFRagDPRBartTest(TFRagTestMixin, unittest.TestCase):
_property
def config_and_inputs(self):
question_encoder_tester = TFDPRModelTester(self)
dpr_config_and_inputs = question_encoder_tester.prepare_config_and_inputs()
generator_tester = TFBartModelTester(self)
bart_config_and_inputs = generator_tester.prepare_config_and_inputs_for_common()
(question_encoder_config, input_ids, _, input_mask, _, _, _) = dpr_config_and_inputs
(generator_config, bart_inputs_dict) = bart_config_and_inputs
(decoder_input_ids, decoder_attention_mask) = (bart_inputs_dict['input_ids'], bart_inputs_dict['attention_mask'])
config = RagConfig.from_question_encoder_generator_configs(question_encoder_config, generator_config, n_docs=self.n_docs, retrieval_vector_size=self.retrieval_vector_size, max_combined_length=self.max_combined_length)
return {'config': config, 'input_ids': input_ids, 'attention_mask': input_mask, 'decoder_input_ids': decoder_input_ids, 'decoder_attention_mask': decoder_attention_mask} |
class IBN(nn.Module):
def __init__(self, planes):
super(IBN, self).__init__()
half1 = int((planes / 2))
self.half = half1
half2 = (planes - half1)
self.IN = nn.InstanceNorm2d(half1, affine=True)
self.BN = nn.BatchNorm2d(half2)
def forward(self, x):
split = torch.split(x, self.half, 1)
out1 = self.IN(split[0].float().contiguous())
out2 = self.BN(split[1].contiguous())
out = torch.cat((out1, out2), 1)
return out |
def _cos_theta(ncut: int) -> csc_matrix:
cos_op = (0.5 * (_exp_i_theta_operator(ncut) + _exp_i_theta_operator_conjugate(ncut)))
return cos_op |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('inshape, kernel, outmaps, pad, stride, dilation', [((2, 2, 10), (1,), 4, (3,), (2,), (1,)), ((2, 2, 10), (3,), 2, (0,), (1,), (2,))])
.parametrize('group', [1, 2])
.parametrize('channel_last', [False, True])
.parametrize('with_bias', [True, False])
.parametrize('base_axis', [1, (- 2)])
def test_convolution_1d_double_backward(inshape, kernel, outmaps, pad, stride, dilation, group, channel_last, with_bias, base_axis, seed, ctx, func_name):
core_test_convolution_double_backward(inshape, kernel, outmaps, pad, stride, dilation, group, channel_last, with_bias, base_axis, seed, ctx, func_name, non_accum_check=True) |
class TestDirac(unittest.TestCase):
def test_dirac_property1d(self):
(ni, no, k, pad) = (4, 4, 3, 1)
module = DiracConv1d(in_channels=ni, out_channels=no, kernel_size=k, padding=pad, bias=False)
module.alpha.data.fill_(1)
module.beta.data.fill_(0)
x = Variable(torch.randn(4, ni, 5))
y = module(x)
self.assertEqual(y.size(), x.size(), 'shape check')
self.assertEqual((y - x).data.abs().sum(), 0, 'dirac delta property check')
def test_dirac_property2d(self):
(ni, no, k, pad) = (4, 4, 3, 1)
module = DiracConv2d(in_channels=ni, out_channels=no, kernel_size=k, padding=pad, bias=False)
module.alpha.data.fill_(1)
module.beta.data.fill_(0)
x = Variable(torch.randn(4, ni, 5, 5))
y = module(x)
self.assertEqual(y.size(), x.size(), 'shape check')
self.assertEqual((y - x).data.abs().sum(), 0, 'dirac delta property check')
def test_dirac_property3d(self):
(ni, no, k, pad) = (4, 4, 3, 1)
module = DiracConv3d(in_channels=ni, out_channels=no, kernel_size=k, padding=pad, bias=False)
module.alpha.data.fill_(1)
module.beta.data.fill_(0)
x = Variable(torch.randn(4, ni, 5, 5, 5))
y = module(x)
self.assertEqual(y.size(), x.size(), 'shape check')
self.assertEqual((y - x).data.abs().sum(), 0, 'dirac delta property check')
def test_nonsquare(self):
(ni, no, k, pad) = (8, 4, 3, 1)
module = DiracConv2d(in_channels=ni, out_channels=no, kernel_size=k, padding=pad, bias=False)
x = Variable(torch.randn(4, ni, 5, 5))
y = module(x)
def test_cifar10(self):
inputs = Variable(torch.randn(1, 3, 32, 32))
(f, params, stats) = define_diracnet(34, 1, 'CIFAR10')
outputs = f(inputs, params, stats, mode=False)
self.assertEqual(outputs.size(), torch.Size((1, 10)))
def test_imagenet(self):
inputs = Variable(torch.randn(1, 3, 224, 224))
(f, params, stats) = define_diracnet(18, 1, 'ImageNet')
outputs = f(inputs, params, stats, mode=False)
self.assertEqual(outputs.size(), torch.Size((1, 1000))) |
.parametrize('traj,tolerance,output', [(trj_prob, 0.0, 1.0), (trj_prob, 1.0, (1.0 / 3.0))])
def test_location_probability_match(traj, tolerance, output):
at = attacks.LocationProbabilityAttack(knowledge_length=1, tolerance=tolerance)
results = []
for i in range(1, 5):
results.append(at._match(single_traj=trj_prob[(trj_prob[user_id] == i)], instance=fourth_instance))
assert ((1.0 / sum(results)) == output) |
class IoTest(absltest.TestCase):
def testProducesValidOutput(self):
with tempfile.NamedTemporaryFile() as output_file:
output_filename = output_file.name
scorer = rouge_scorer.RougeScorer(['rouge1'], False)
io.compute_scores_and_write_to_csv(test_util.TARGETS_FILE, test_util.PREDICTIONS_FILE, output_filename, scorer, scoring.BootstrapAggregator())
with open(output_filename) as f:
csv_lines = f.readlines()
output_types = tuple((line.split(',')[0] for line in csv_lines))
self.assertEqual(output_types[0], 'score_type')
self.assertSameElements(output_types[1:], ['rouge1-P', 'rouge1-R', 'rouge1-F'])
def testUnAggregated(self):
with tempfile.NamedTemporaryFile() as output_file:
output_filename = output_file.name
scorer = rouge_scorer.RougeScorer(['rouge1'], False)
io.compute_scores_and_write_to_csv(test_util.TARGETS_FILE, test_util.PREDICTIONS_FILE, output_filename, scorer, None)
with open(output_filename) as f:
csv_lines = f.readlines()
ids = tuple((line.split(',')[0] for line in csv_lines))
self.assertEqual(ids[0], 'id')
self.assertLen(csv_lines, 3)
def testDelimitedFile(self):
with tempfile.NamedTemporaryFile() as output_file:
output_filename = output_file.name
scorer = rouge_scorer.RougeScorer(['rouge1'], False)
io.compute_scores_and_write_to_csv(test_util.DELIMITED_FILE, test_util.DELIMITED_FILE, output_filename, scorer, None, delimiter=':')
with open(output_filename) as f:
csv_lines = f.readlines()
ids = tuple((line.split(',')[0] for line in csv_lines))
self.assertEqual(ids[0], 'id')
self.assertLen(csv_lines, 5)
def testAssertsOnInvalidInputFiles(self):
scorer = rouge_scorer.RougeScorer(['rouge1'], False)
with self.assertRaises(ValueError):
io.compute_scores_and_write_to_csv('invalid*', 'invalid*', 'invalid', scorer, scoring.BootstrapAggregator())
def testAssertsOnInvalidRougeTypes(self):
scorer = rouge_scorer.RougeScorer(['rougex'], False)
with self.assertRaises(ValueError):
io.compute_scores_and_write_to_csv(test_util.TARGETS_FILE, test_util.PREDICTIONS_FILE, '', scorer, scoring.BootstrapAggregator()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.