code stringlengths 101 5.91M |
|---|
class CoordConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
in_size = (in_channels + 1)
self.conv = nn.Conv2d(in_size, out_channels, **kwargs)
def forward(self, x):
ret = AddCoords()(x)
ret = self.conv(ret)
return ret |
class DecodeLayer(nn.Module):
def __init__(self, vocabs, inference_layers, embed_dim, ff_embed_dim, num_heads, token_size, rel_size, dropout):
super(DecodeLayer, self).__init__()
self.inference_core = Transformer(inference_layers, embed_dim, ff_embed_dim, num_heads, dropout, with_external=True)
self.token_generator = TokenGenerator(vocabs, embed_dim, token_size, dropout)
self.dropout = dropout
self.vocabs = vocabs
def forward(self, probe, graph_state, snt_state, graph_padding_mask, snt_padding_mask, attn_mask, copy_seq, target=None, work=False):
outs = F.dropout(probe, p=self.dropout, training=self.training)
outs = self.inference_core(outs, kv=snt_state, self_padding_mask=snt_padding_mask, self_attn_mask=attn_mask, external_memories=graph_state, external_padding_mask=graph_padding_mask)
if work:
concept_ll = self.token_generator(outs, graph_state, graph_padding_mask, copy_seq, work=True)
return concept_ll
token_loss = self.token_generator(outs, graph_state, graph_padding_mask, copy_seq, target=target, work=False)
token_tot = (snt_padding_mask.size(0) - snt_padding_mask.float().sum(0))
token_loss = (token_loss / token_tot)
return token_loss.mean() |
def build_fake_yaml2():
fake_yaml = '\n model:\n name: fake_yaml\n framework: tensorflow\n inputs: x\n outputs: op_to_store\n device: cpu\n evaluation:\n accuracy:\n metric:\n topk: 1\n tuning:\n strategy:\n name: mse\n exit_policy:\n max_trials: 5\n accuracy_criterion:\n relative: -0.01\n workspace:\n path: saved\n '
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml2.yaml', 'w', encoding='utf-8') as f:
yaml.dump(y, f)
f.close() |
class DictMetaDataInfo(object):
def __init__(self, input_element):
self.type = type(input_element)
random_key = list(input_element.keys())[0]
if hasattr(input_element, random_key):
self.class_fn = CustomDict
else:
self.class_fn = dict
self.length = len(input_element)
self.keys = list(input_element.keys())
self.infos = []
for k in self.keys:
v = input_element[k]
info = get_meta_info_from_input(v)
if isinstance(info, ListMetaDataInfo):
info.need_reconstruct = True
self.infos.append(info)
self.need_reconstruct = True |
def add_clip_prediction(predictions: Dict[(int, Dict[(int, Tensor)])], class_preds: Tensor, frames: Tensor, video_index: int, merge_predictions_type: str='max') -> None:
prev_class_preds = predictions[video_index][frames]
class_preds = class_preds.to(dtype=prev_class_preds.dtype)
if (merge_predictions_type == 'max'):
replace_predictions = torch.gt(class_preds, prev_class_preds)
predictions[video_index][frames] = torch.where(replace_predictions, class_preds, prev_class_preds)
elif (merge_predictions_type == 'average'):
average_predictions = torch.gt(prev_class_preds, 0)
predictions[video_index][frames] = torch.where(average_predictions, torch.mean(torch.stack((class_preds, prev_class_preds)), 0), class_preds)
return |
def score_pair_to_csv(rep1_dict: dict, rep2_dict: dict, filename: str, metrics: list) -> None:
rep1 = load_embedding(rep1_dict['dataset'], rep1_dict['architecture'], rep1_dict['seed'], rep1_dict['step'], rep1_dict['layer'])
rep2 = load_embedding(rep2_dict['dataset'], rep2_dict['architecture'], rep2_dict['seed'], rep2_dict['step'], rep2_dict['layer'])
logging.info(f'representation 1 shape: {rep1.shape}')
logging.info(f'representation 2 shape: {rep2.shape}')
results = {'dataset1': rep1_dict['dataset'], 'architecture1': rep1_dict['architecture'], 'seed1': rep1_dict['seed'], 'step1': rep1_dict['step'], 'layer1': rep1_dict['layer'], 'dataset2': rep2_dict['dataset'], 'architecture2': rep2_dict['architecture'], 'seed2': rep2_dict['seed'], 'step2': rep2_dict['step'], 'layer2': rep2_dict['layer']}
score_local_pair(rep1=rep1, rep2=rep2, metrics=metrics, filename=filename, metadata=results) |
def test_split_by_num_for_UI_bigraph():
e_list = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [1, 1], [1, 2], [1, 3], [1, 4], [2, 2], [2, 3], [2, 4], [3, 3], [3, 4], [4, 4]]
g = dhg.BiGraph(5, 5, e_list)
train_num = 3
(train_adj, test_adj) = split_by_num_for_UI_bigraph(g, train_num)
assert (len(train_adj) == 5)
assert (len(test_adj) == 2)
assert (len(train_adj[0]) == 4)
assert (len(train_adj[1]) == 4)
assert (len(train_adj[2]) == 4)
assert (len(train_adj[3]) == 3)
assert (len(train_adj[4]) == 2)
assert (len(test_adj[0]) == 3)
assert (len(test_adj[1]) == 2) |
def test_ssd_neck():
with pytest.raises(AssertionError):
SSDNeck(in_channels=[8, 16], out_channels=[8, 16, 32], level_strides=[2], level_paddings=[2, 1])
with pytest.raises(AssertionError):
SSDNeck(in_channels=[8, 16], out_channels=[8], level_strides=[2], level_paddings=[2])
with pytest.raises(AssertionError):
SSDNeck(in_channels=[8, 16], out_channels=[4, 16, 64], level_strides=[2, 2], level_paddings=[2, 2])
with pytest.raises(AssertionError):
SSDNeck(in_channels=[8, 16], out_channels=[4, 16, 64], level_strides=[2], level_paddings=[2])
ssd_neck = SSDNeck(in_channels=[4], out_channels=[4, 8, 16], level_strides=[2, 1], level_paddings=[1, 0])
feats = (torch.rand(1, 4, 16, 16),)
outs = ssd_neck(feats)
assert (outs[0].shape == (1, 4, 16, 16))
assert (outs[1].shape == (1, 8, 8, 8))
assert (outs[2].shape == (1, 16, 6, 6))
ssd_neck = SSDNeck(in_channels=[4, 8], out_channels=[4, 8, 16], level_strides=[1], level_paddings=[1], l2_norm_scale=None, use_depthwise=True, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU6'))
assert (not hasattr(ssd_neck, 'l2_norm'))
from mmcv.cnn.bricks import DepthwiseSeparableConvModule
assert isinstance(ssd_neck.extra_layers[0][(- 1)], DepthwiseSeparableConvModule)
feats = (torch.rand(1, 4, 8, 8), torch.rand(1, 8, 8, 8))
outs = ssd_neck(feats)
assert (outs[0].shape == (1, 4, 8, 8))
assert (outs[1].shape == (1, 8, 8, 8))
assert (outs[2].shape == (1, 16, 8, 8)) |
def CheckArgs(args):
if (args.c_fa <= 0):
raise Exception('--c-fa must be greater than 0')
if (args.c_miss <= 0):
raise Exception('--c-miss must be greater than 0')
if ((args.p_target <= 0) or (args.p_target >= 1)):
raise Exception('--p-target must be greater than 0 and less than 1')
return args |
class HighwayState():
ego_reaction_threshold = 8
ego_crash_threshold = 11
def __init__(self, ego_position, ego_speed, ego_acceleration, other_xs, other_speeds, other_accelerations):
self.ego_position = ego_position
self.ego_speed = ego_speed
self.ego_acceleration = ego_acceleration
self.other_xs = other_xs
self.other_speeds = other_speeds
self.other_accelerations = other_accelerations
def predict_step_without_ego(self, delta_t, min_crash_distance=5):
ego_s = control.get_ego_s(self.ego_position)
ego_x = self.ego_position[0]
if ((ego_s < self.ego_reaction_threshold) or (len(self.other_xs) == 0)):
return self.predict_step_with_ego(0, delta_t, min_crash_distance)
elif (self.other_xs[0] < ego_x):
modified_state = HighwayState(((- 20), (- 10)), 0, 0, self.other_xs, self.other_speeds, self.other_accelerations)
return modified_state.predict_step_with_ego(0, delta_t, min_crash_distance)
else:
last_speed = 0
last_x = 0
for (i, car_x) in enumerate(self.other_xs):
if (car_x < ego_x):
modified_state = HighwayState((((last_x - Settings.CAR_LENGTH) - 5), self.ego_position[1]), last_speed, 0, self.other_xs, self.other_speeds, self.other_accelerations)
return modified_state.predict_step_with_ego(last_speed, delta_t, min_crash_distance)
else:
last_speed = self.other_speeds[i]
last_x = car_x
return self.predict_step_with_ego(last_speed, delta_t, min_crash_distance)
def predict_step_with_ego(self, selected_speed, delta_t, min_crash_distance=5):
(current_x, current_y) = self.ego_position
if (current_x < control.merge_point2[0]):
direction = np.array([(control.merge_point2[0] - current_x), (control.merge_point2[1] - current_y)])
direction /= np.linalg.norm(direction)
direction *= (selected_speed * delta_t)
predicted_x = (current_x + direction[0])
predicted_y = (current_y + direction[1])
if (predicted_y < (- 1.6)):
predicted_y = (- 1.6)
else:
predicted_y = current_y
predicted_x = (current_x + (selected_speed * delta_t))
next_acceleration = ((selected_speed - self.ego_speed) / delta_t)
ego_can_crash = (control.get_ego_s((predicted_x, predicted_y)) > self.ego_crash_threshold)
ego_has_merged = (control.get_ego_s((predicted_x, predicted_y)) > self.ego_reaction_threshold)
new_other_xs = []
new_other_speeds = []
new_other_accelerations = []
last_x = np.inf
last_speed = 0
ego_encountered = False
for other_car_index in range(len(self.other_xs)):
other_speed = self.other_speeds[other_car_index]
other_x = self.other_xs[other_car_index]
if ((other_x < predicted_x) and (not ego_encountered)):
ego_encountered = True
if ego_has_merged:
last_x = predicted_x
last_speed = selected_speed
speed_diff = (last_speed - other_speed)
x_diff = (last_x - other_x)
if ((speed_diff < 0) and (x_diff < 30)):
new_other_acceleration = max(speed_diff, Settings.MAX_PREDICTED_DECELERATION)
new_other_speed = (other_speed + (new_other_acceleration * delta_t))
else:
new_other_acceleration = 0
new_other_speed = other_speed
predicted_next_position = (other_x + (new_other_speed * delta_t))
last_x = predicted_next_position
last_speed = new_other_speed
new_other_xs.append(predicted_next_position)
new_other_speeds.append(new_other_speed)
new_other_accelerations.append(new_other_acceleration)
crashed = False
crash_detection_distance = max(Settings.CAR_LENGTH, min_crash_distance)
for x in new_other_xs:
if ((abs((x - predicted_x)) < crash_detection_distance) and ego_can_crash):
crashed = True
return (HighwayState((predicted_x, predicted_y), selected_speed, next_acceleration, new_other_xs, new_other_speeds, new_other_accelerations), crashed)
def empty_state(cls):
return cls(0, 0, 0, [], [], [])
def from_sumo(cls):
ids = control.get_vehicle_ids()
positions = control.get_vehicle_positions(ids)
speeds = control.get_vehicle_speeds(ids)
accelerations = control.get_vehicle_accelerations(ids)
if ('ego' in ids):
ego_position = control.get_ego_position()
ego_speed = control.get_ego_speed()
ego_acceleration = control.get_ego_acceleration()
else:
ego_position = ((- 200), 0)
ego_speed = 0
ego_acceleration = 0
other_xs = []
other_speeds = []
other_accelerations = []
for vehicle in ids:
if (vehicle == 'ego'):
continue
else:
other_position = positions[vehicle]
if (control.distance(other_position, ego_position) < Settings.SENSOR_RADIUS):
other_xs.append(other_position[0])
other_speeds.append(speeds[vehicle])
other_accelerations.append(accelerations[vehicle])
index_order = np.argsort(other_xs)
other_xs = [other_xs[i] for i in reversed(index_order)]
other_speeds = [other_speeds[i] for i in reversed(index_order)]
other_accelerations = [other_accelerations[i] for i in reversed(index_order)]
return cls(ego_position, ego_speed, ego_acceleration, other_xs, other_speeds, other_accelerations)
def plot_state(self, i):
(ego_x, ego_y) = self.ego_position
plt.scatter(i, ego_x, color='r')
last_x = np.inf
had_car = False
encountered_ego = False
for (other_index, x) in enumerate(self.other_xs):
if ((not encountered_ego) and (x < ego_x)):
encountered_ego = True
if had_car:
plt.scatter(i, last_x, color='g')
plt.scatter(i, x, color='g')
break
last_x = x
had_car = True
if (had_car and (not encountered_ego)):
plt.scatter(i, last_x, color='g')
def get_closest_cars(self):
(ego_x, ego_y) = self.ego_position
index_behind = (- 1)
index_front = (- 1)
last_index = (- 1)
for (other_index, x) in enumerate(self.other_xs):
if (x < ego_x):
index_behind = other_index
break
last_index = other_index
if (last_index != (- 1)):
index_front = last_index
if (index_front != (- 1)):
car_front = (self.other_xs[index_front], self.other_speeds[index_front], self.other_accelerations[index_front])
else:
car_front = None
if (index_behind != (- 1)):
car_behind = (self.other_xs[index_behind], self.other_speeds[index_behind], self.other_accelerations[index_behind])
else:
car_behind = None
return (car_front, car_behind) |
class ResNeXt101_64x4d(nn.Module):
def __init__(self, num_classes=1000):
super(ResNeXt101_64x4d, self).__init__()
self.num_classes = num_classes
self.features = resnext101_64x4d_features
self.avg_pool = nn.AvgPool2d((7, 7), (1, 1))
self.last_linear = nn.Linear(2048, num_classes)
def logits(self, input):
x = self.avg_pool(input)
x = x.view(x.size(0), (- 1))
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x |
class LoadImage():
def __call__(self, results):
warnings.simplefilter('once')
warnings.warn('`LoadImage` is deprecated and will be removed in future releases. You may use `LoadImageFromWebcam` from `mmdet.datasets.pipelines.` instead.')
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_fields'] = ['img']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results |
class PreResNet20ImageNette():
base = PreResNet
args = list()
kwargs = {'depth': 20, 'planes': [4, 8, 16], 'input_size': 160}
transform_train = transforms.Compose([transforms.RandomCrop(160, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201])])
transform_test = transforms.Compose([transforms.CenterCrop(160), transforms.ToTensor(), transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201])]) |
def broyden(g, x_init, J_inv_init, max_steps=50, cvg_thresh=1e-05, dvg_thresh=1, eps=1e-06):
x = x_init.clone().detach()
J_inv = J_inv_init.clone().detach()
ids_val = torch.ones(x.shape[0]).bool()
gx = g(x, mask=ids_val)
update = (- J_inv.bmm(gx))
x_opt = x.clone()
gx_norm_opt = torch.linalg.norm(gx.squeeze((- 1)), dim=(- 1))
delta_gx = torch.zeros_like(gx)
delta_x = torch.zeros_like(x)
ids_val = torch.ones_like(gx_norm_opt).bool()
for solvestep in range(max_steps):
delta_x[ids_val] = update
x[ids_val] += delta_x[ids_val]
delta_gx[ids_val] = (g(x, mask=ids_val) - gx[ids_val])
gx[ids_val] += delta_gx[ids_val]
gx_norm = torch.linalg.norm(gx.squeeze((- 1)), dim=(- 1))
ids_opt = (gx_norm < gx_norm_opt)
gx_norm_opt[ids_opt] = gx_norm.clone().detach()[ids_opt]
x_opt[ids_opt] = x.clone().detach()[ids_opt]
ids_val = ((gx_norm_opt > cvg_thresh) & (gx_norm < dvg_thresh))
if (ids_val.sum() <= 0):
break
vT = delta_x[ids_val].transpose((- 1), (- 2)).bmm(J_inv[ids_val])
a = (delta_x[ids_val] - J_inv[ids_val].bmm(delta_gx[ids_val]))
b = vT.bmm(delta_gx[ids_val])
b[(b >= 0)] += eps
b[(b < 0)] -= eps
u = (a / b)
ubmmvT = u.bmm(vT)
J_inv[ids_val] += ubmmvT
update = (- J_inv[ids_val].bmm(gx[ids_val]))
return {'result': x_opt, 'diff': gx_norm_opt, 'valid_ids': (gx_norm_opt < cvg_thresh)} |
class TestSimulatorsJob(QiskitTestCase):
def test_multiple_execution(self):
taskcount = 10
target_tasks = [(lambda : None) for _ in range(taskcount)]
job_id = str(uuid.uuid4())
backend = FakeRueschlikon()
with mocked_executor() as (SimulatorJob, executor):
for index in range(taskcount):
job = SimulatorJob(backend, job_id, target_tasks[index], new_fake_qobj())
job.submit()
self.assertEqual(executor.submit.call_count, taskcount)
for index in range(taskcount):
(_, callargs, _) = executor.submit.mock_calls[index]
submitted_task = callargs[0]
target_task = target_tasks[index]
self.assertEqual(submitted_task, target_task)
def test_cancel(self):
job_id = str(uuid.uuid4())
backend = FakeRueschlikon()
with mocked_executor() as (BasicAerJob, executor):
job = BasicAerJob(backend, job_id, (lambda : None), new_fake_qobj())
job.submit()
job.cancel()
self.assertCalledOnce(executor.submit)
mocked_future = executor.submit.return_value
self.assertCalledOnce(mocked_future.cancel)
def assertCalledOnce(self, mocked_callable):
call_count = mocked_callable.call_count
self.assertEqual(call_count, 1, 'Callable object has been called more than once ({})'.format(call_count)) |
def download_objects365v2(url, dir, unzip=True, delete=False, threads=1):
def download_single(url, dir):
if ('train' in url):
saving_dir = (dir / Path('train_zip'))
mkdir_or_exist(saving_dir)
f = (saving_dir / Path(url).name)
unzip_dir = (dir / Path('train'))
mkdir_or_exist(unzip_dir)
elif ('val' in url):
saving_dir = (dir / Path('val'))
mkdir_or_exist(saving_dir)
f = (saving_dir / Path(url).name)
unzip_dir = (dir / Path('val'))
mkdir_or_exist(unzip_dir)
else:
raise NotImplementedError
if Path(url).is_file():
Path(url).rename(f)
elif (not f.exists()):
print(f'Downloading {url} to {f}')
torch.hub.download_url_to_file(url, f, progress=True)
if (unzip and str(f).endswith('.tar.gz')):
print(f'Unzipping {f.name}')
tar = tarfile.open(f)
tar.extractall(path=unzip_dir)
if delete:
f.unlink()
print(f'Delete {f}')
full_url = []
for _url in url:
if (('zhiyuan_objv2_train.tar.gz' in _url) or ('zhiyuan_objv2_val.json' in _url)):
full_url.append(_url)
elif ('train' in _url):
for i in range(51):
full_url.append(f'{_url}patch{i}.tar.gz')
elif ('val/images/v1' in _url):
for i in range(16):
full_url.append(f'{_url}patch{i}.tar.gz')
elif ('val/images/v2' in _url):
for i in range(16, 44):
full_url.append(f'{_url}patch{i}.tar.gz')
else:
raise NotImplementedError
dir = Path(dir)
if (threads > 1):
pool = ThreadPool(threads)
pool.imap((lambda x: download_single(*x)), zip(full_url, repeat(dir)))
pool.close()
pool.join()
else:
for u in full_url:
download_single(u, dir) |
def compute_predictions_log_probs(all_examples, all_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, start_n_top, end_n_top, version_2_with_negative, tokenizer, verbose_logging):
_PrelimPrediction = collections.namedtuple('PrelimPrediction', ['feature_index', 'start_index', 'end_index', 'start_log_prob', 'end_log_prob'])
_NbestPrediction = collections.namedtuple('NbestPrediction', ['text', 'start_log_prob', 'end_log_prob'])
logger.info('Writing predictions to: %s', output_prediction_file)
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
score_null = 1000000
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
cur_null_score = result.cls_logits
score_null = min(score_null, cur_null_score)
for i in range(start_n_top):
for j in range(end_n_top):
start_log_prob = result.start_logits[i]
start_index = result.start_top_index[i]
j_index = ((i * end_n_top) + j)
end_log_prob = result.end_logits[j_index]
end_index = result.end_top_index[j_index]
if (start_index >= (feature.paragraph_len - 1)):
continue
if (end_index >= (feature.paragraph_len - 1)):
continue
if (not feature.token_is_max_context.get(start_index, False)):
continue
if (end_index < start_index):
continue
length = ((end_index - start_index) + 1)
if (length > max_answer_length):
continue
prelim_predictions.append(_PrelimPrediction(feature_index=feature_index, start_index=start_index, end_index=end_index, start_log_prob=start_log_prob, end_log_prob=end_log_prob))
prelim_predictions = sorted(prelim_predictions, key=(lambda x: (x.start_log_prob + x.end_log_prob)), reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if (len(nbest) >= n_best_size):
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
tok_text = tok_text.strip()
tok_text = ' '.join(tok_text.split())
orig_text = ' '.join(orig_tokens)
if hasattr(tokenizer, 'do_lower_case'):
do_lower_case = tokenizer.do_lower_case
else:
do_lower_case = tokenizer.do_lowercase_and_remove_accent
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if (final_text in seen_predictions):
continue
seen_predictions[final_text] = True
nbest.append(_NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob))
if (not nbest):
nbest.append(_NbestPrediction(text='', start_log_prob=(- 1000000.0), end_log_prob=(- 1000000.0)))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append((entry.start_log_prob + entry.end_log_prob))
if (not best_non_null_entry):
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output['text'] = entry.text
output['probability'] = probs[i]
output['start_log_prob'] = entry.start_log_prob
output['end_log_prob'] = entry.end_log_prob
nbest_json.append(output)
assert (len(nbest_json) >= 1)
assert (best_non_null_entry is not None)
score_diff = score_null
scores_diff_json[example.qas_id] = score_diff
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, 'w') as writer:
writer.write((json.dumps(all_predictions, indent=4) + '\n'))
with open(output_nbest_file, 'w') as writer:
writer.write((json.dumps(all_nbest_json, indent=4) + '\n'))
if version_2_with_negative:
with open(output_null_log_odds_file, 'w') as writer:
writer.write((json.dumps(scores_diff_json, indent=4) + '\n'))
return all_predictions |
class DatasetConfig():
_zpy_init
def __init__(self, sim_name: str, **kwargs):
self._sim = None
self._config = {}
unique_sim_filters = {'project': _project['id'], 'name': sim_name}
sims = get(f'{_base_url}/api/v1/sims/', params=unique_sim_filters, headers=auth_header(_auth_token)).json()['results']
if (len(sims) > 1):
raise RuntimeError(f'Create DatasetConfig failed: Found more than 1 Sim for unique filters which should not be possible.')
elif (len(sims) == 1):
print(f"Found Sim<{sim_name}> in Project<{_project['name']}>")
self._sim = sims[0]
else:
raise RuntimeError(f"Create DatasetConfig failed: Could not find Sim<{sim_name}> in Project<{_project['name']}>.")
def sim(self):
return self._sim
def available_params(self):
return self._sim['run_kwargs']
def config(self):
return self._config
def set(self, path: str, value: any):
set_(self._config, path, value)
def unset(self, path):
unset(self._config, path) |
def gpu_info() -> list:
gpus = [line for line in _run_cmd(['nvidia-smi', '-L']) if line]
gpu_infos = [re.match('GPU ([0-9]+): ([^(]+) \\(UUID: ([^)]+)\\)', gpu).groups() for gpu in gpus]
gpu_infos = [dict(zip(['idx', 'name', 'uuid'], info)) for info in gpu_infos]
gpu_count = len(gpus)
lines = _run_cmd(['nvidia-smi'])
selected_lines = lines[7:(7 + (3 * gpu_count))]
for i in range(gpu_count):
(mem_used, mem_total) = [int(m.strip().replace('MiB', '')) for m in selected_lines[((3 * i) + 1)].split('|')[2].strip().split('/')]
(pw_tmp_info, mem_info, util_info) = [x.strip() for x in selected_lines[((3 * i) + 1)].split('|')[1:(- 1)]]
pw_tmp_info = [x[:(- 1)] for x in pw_tmp_info.split(' ') if (len(x) > 0)]
(fan_speed, temperature, pwr_used, pwr_cap) = [int(pw_tmp_info[i]) for i in (0, 1, 3, 5)]
gpu_infos[i]['fan_spd'] = fan_speed
gpu_infos[i]['temp'] = temperature
gpu_infos[i]['pwr_used'] = pwr_used
gpu_infos[i]['pwr_cap'] = pwr_cap
(mem_used, mem_total) = [int(x) for x in mem_info.replace('MiB', '').split(' / ')]
gpu_infos[i]['mem_used'] = mem_used
gpu_infos[i]['mem_total'] = mem_total
utilization = int(util_info.split(' ')[0][:(- 1)])
gpu_infos[i]['util'] = utilization
gpu_infos[i]['idx'] = int(gpu_infos[i]['idx'])
return gpu_infos |
class TFRemBertForSequenceClassification(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def get_config():
parser = ArgumentParser()
parser = common_config(parser)
parser.add_argument('--fit_gde', default=False, type=str_to_bool, help='Whether to fit GDE on normal data.')
parser.add_argument('--align', default=True, type=str_to_bool, help='align')
parser.add_argument('--dims', default=[512, 512, 512, 512, 512, 512, 512, 512, 128], help='list indicating number of hidden units for each layer of projection head')
parser.add_argument('--num_class', default=3)
parser.add_argument('--encoder', default='resnet18')
parser.add_argument('--cutpaste_type', default='3way')
parser.add_argument('--lr', default=0.03)
parser.add_argument('--momentum', default=0.9)
parser.add_argument('--weight_decay', default=3e-05)
parser.add_argument('--max_steps', default=10000, type=int)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--localization', '-loc', default=True, type=str_to_bool, help='If True train on (32,32) cropped patches and evaluate localization performance')
return parser.parse_args() |
def save_experiment_config(args, config, logger=None):
config_path = os.path.join(args.experiment_path, 'config.yaml')
os.system(('cp %s %s' % (args.config, config_path)))
print_log(f'Copy the Config file from {args.config} to {config_path}', logger=logger) |
def get_parser():
parser = argparse.ArgumentParser(description='Decoupling Graph Convolution Network with DropGraph Module')
parser.add_argument('--work-dir', default='./work_dir/temp', help='the work folder for storing results')
parser.add_argument('-model_saved_name', default='')
parser.add_argument('-Experiment_name', default='')
parser.add_argument('--config', default='./config/nturgbd-cross-view/test_bone.yaml', help='path to the configuration file')
parser.add_argument('--phase', default='train', help='must be train or test')
parser.add_argument('--save-score', type=str2bool, default=False, help='if ture, the classification score will be stored')
parser.add_argument('--seed', type=int, default=1, help='random seed for pytorch')
parser.add_argument('--log-interval', type=int, default=100, help='the interval for printing messages (#iteration)')
parser.add_argument('--save-interval', type=int, default=2, help='the interval for storing models (#iteration)')
parser.add_argument('--eval-interval', type=int, default=5, help='the interval for evaluating models (#iteration)')
parser.add_argument('--print-log', type=str2bool, default=True, help='print logging or not')
parser.add_argument('--show-topk', type=int, default=[1, 5], nargs='+', help='which Top K accuracy will be shown')
parser.add_argument('--feeder', default='feeder.feeder', help='data loader will be used')
parser.add_argument('--num-worker', type=int, default=32, help='the number of worker for data loader')
parser.add_argument('--train-feeder-args', default=dict(), help='the arguments of data loader for training')
parser.add_argument('--test-feeder-args', default=dict(), help='the arguments of data loader for test')
parser.add_argument('--model', default=None, help='the model will be used')
parser.add_argument('--model-args', type=dict, default=dict(), help='the arguments of model')
parser.add_argument('--weights', default=None, help='the weights for network initialization')
parser.add_argument('--ignore-weights', type=str, default=[], nargs='+', help='the name of weights which will be ignored in the initialization')
parser.add_argument('--base-lr', type=float, default=0.01, help='initial learning rate')
parser.add_argument('--step', type=int, default=[20, 40, 60], nargs='+', help='the epoch where optimizer reduce the learning rate')
parser.add_argument('--device', type=int, default=0, nargs='+', help='the indexes of GPUs for training or testing')
parser.add_argument('--optimizer', default='SGD', help='type of optimizer')
parser.add_argument('--nesterov', type=str2bool, default=False, help='use nesterov or not')
parser.add_argument('--batch-size', type=int, default=256, help='training batch size')
parser.add_argument('--test-batch-size', type=int, default=256, help='test batch size')
parser.add_argument('--start-epoch', type=int, default=0, help='start training from which epoch')
parser.add_argument('--num-epoch', type=int, default=80, help='stop training in which epoch')
parser.add_argument('--weight-decay', type=float, default=0.0005, help='weight decay for optimizer')
parser.add_argument('--keep_rate', type=float, default=0.9, help='keep probability for drop')
parser.add_argument('--groups', type=int, default=8, help='decouple groups')
parser.add_argument('--only_train_part', default=True)
parser.add_argument('--only_train_epoch', default=0)
parser.add_argument('--warm_up_epoch', default=0)
return parser |
_task('winogrande')
class WinograndeTask(WSCTask):
def setup_task(cls, args, **kwargs):
assert (args.criterion == 'winogrande'), 'Must set --criterion=winogrande'
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def load_dataset(self, split, epoch=0, combine=False, data_path=None, return_only=False, **kwargs):
if (data_path is None):
data_path = os.path.join(self.args.data, (split + '.jsonl'))
if (not os.path.exists(data_path)):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
itr = wsc_utils.winogrande_jsonl_iterator(data_path, eval=(split == 'test'))
for sample in itr:
(sentence, pronoun_span, query, cand_text) = sample
prefix = sentence[:pronoun_span[0]].rstrip()
suffix = sentence[pronoun_span[1]:]
leading_space = (' ' if sentence[:pronoun_span[0]].endswith(' ') else '')
trailing_space = ''
if (query is not None):
(query_toks, query_mask) = self.binarize_with_mask(query, prefix, suffix, leading_space, trailing_space)
query_len = len(query_toks)
else:
(query_toks, query_mask, query_len) = (None, None, 0)
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
(cand_toks, cand_mask) = self.binarize_with_mask(cand_text, prefix, suffix, leading_space, trailing_space)
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_mask)
candidate_lengths.append(cand_toks.size(0))
query_lengths = np.array(query_lengths)
def get_pad_dataset_fn(tokens, length, pad_idx):
return PadDataset(ListDataset(tokens, length), pad_idx=pad_idx, left_pad=False)
query_tokens = get_pad_dataset_fn(query_tokens, query_lengths, self.vocab.pad())
query_masks = get_pad_dataset_fn(query_masks, query_lengths, 0)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = get_pad_dataset_fn(candidate_tokens, candidate_lengths, self.vocab.pad())
candidate_masks = get_pad_dataset_fn(candidate_masks, candidate_lengths, 0)
dataset = {'id': IdDataset(), 'query_tokens': query_tokens, 'query_masks': query_masks, 'candidate_tokens': candidate_tokens, 'candidate_masks': candidate_masks, 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(query_tokens, reduce=True)}
nested_dataset = NestedDictionaryDataset(dataset, sizes=[query_lengths])
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(nested_dataset, sort_order=[shuffle])
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split] |
def assert_exactly_one(lst):
assert (sum((int(bool(el)) for el in lst)) == 1), ', '.join((str(el) for el in lst)) |
def make_json(scale=1):
with open('conf/rigidcloth/scale/scale.json', 'r') as f:
config = json.load(f)
config['cloths'][0]['transform']['scale'] = scale
def save_config(config, file):
with open(file, 'w') as f:
json.dump(config, f)
save_config(config, 'conf/rigidcloth/scale/scale_make.json') |
def split_strings(strings, start, chr_lens):
return [strings[(i - start):(j - start)] for (i, j) in zip(([start] + chr_lens[:(- 1)]), chr_lens)] |
def build_transforms(cfg, is_train=True):
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
if is_train:
transform = T.Compose([T.Resize(cfg.INPUT.SIZE_TRAIN), T.RandomHorizontalFlip(p=cfg.INPUT.PROB), T.Pad(cfg.INPUT.PADDING), T.RandomCrop(cfg.INPUT.SIZE_TRAIN), RandomPatch(prob_happen=cfg.INPUT.RANDOM_PATCH_PROB, patch_max_area=0.16), T.RandomApply([T.ColorJitter(brightness=0.2, contrast=0.15, saturation=0, hue=0)], p=cfg.INPUT.COLORJIT_PROB), AugMix(prob=cfg.INPUT.AUGMIX_PROB), RandomBlur(p=cfg.INPUT.RANDOM_BLUR_PROB), T.ToTensor(), normalize_transform, RandomErasing(probability=cfg.INPUT.RE_PROB, sh=cfg.INPUT.RE_SH, mean=cfg.INPUT.PIXEL_MEAN)])
else:
transform = T.Compose([T.Resize(cfg.INPUT.SIZE_TEST), T.ToTensor(), normalize_transform])
return transform |
def llama2_completion(pipeline, caption):
prompt = create_qg_prompt(caption)
sequences = pipeline(prompt, do_sample=False, num_beams=5, num_return_sequences=1, max_length=512)
output = sequences[0]['generated_text'][len(prompt):]
output = output.split('\n\n')[0]
return output |
def get_next_batch_new(dataloader, device):
data_dict = dataloader.__next__()
return data_dict.to(device) |
def get_model_key(base_model_key: str, dataset_key: str, train_key: str):
if (train_key is None):
return base_model_key
else:
return 'B_{}__D_{}__T_{}'.format(base_model_key, dataset_key, train_key) |
def test_kernel_expand_multi_d():
D = 3
k_base = list(fk.base_kernels(3))
k_expanded = grammar.expand_kernels(3, k_base)
assert (len(k_expanded) > len(k_base)) |
def test_format_results():
if (not torch.cuda.is_available()):
pytest.skip('test requires GPU and torch+cuda')
root_path = 'tests/data/nuscenes/'
ann_file = 'tests/data/nuscenes/nus_infos_mono3d.coco.json'
class_names = ['car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier']
pipeline = [dict(type='LoadImageFromFileMono3D'), dict(type='LoadAnnotations3D', with_bbox=True, with_label=True, with_attr_label=True, with_bbox_3d=True, with_label_3d=True, with_bbox_depth=True), dict(type='Resize', img_scale=(1600, 900), keep_ratio=True), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['img', 'gt_bboxes', 'gt_labels', 'attr_labels', 'gt_bboxes_3d', 'gt_labels_3d', 'centers2d', 'depths'])]
nus_dataset = NuScenesMonoDataset(ann_file=ann_file, pipeline=pipeline, data_root=root_path, test_mode=True)
results = mmcv.load('tests/data/nuscenes/mono3d_sample_results.pkl')
(result_files, tmp_dir) = nus_dataset.format_results(results)
result_data = mmcv.load(result_files['img_bbox'])
assert (len(result_data['results'].keys()) == 1)
assert (len(result_data['results']['e93e98b63d3bd129dc53ceee']) == 8)
det = result_data['results']['e93e98b63d3bd129dc53ceee'][0]
expected_token = 'e93e98b63d3bd129dc53ceee'
expected_trans = torch.tensor([1018., 605., 0.])
expected_size = torch.tensor([1., 4.25, 1.])
expected_rotation = torch.tensor([(- 0.), (- 0.), 0., (- 0.)])
expected_detname = 'car'
expected_attr = 'vehicle.moving'
assert (det['sample_token'] == expected_token)
assert torch.allclose(torch.tensor(det['translation']), expected_trans, 1e-05)
assert torch.allclose(torch.tensor(det['size']), expected_size, 1e-05)
assert torch.allclose(torch.tensor(det['rotation']), expected_rotation, 1e-05)
assert (det['detection_name'] == expected_detname)
assert (det['attribute_name'] == expected_attr) |
_model('s2t_transformer')
class S2TTransformerModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
def add_args(parser):
parser.add_argument('--conv-kernel-sizes', type=str, metavar='N', help='kernel sizes of Conv1d subsampling layers')
parser.add_argument('--conv-channels', type=int, metavar='N', help='# of channels in Conv1d subsampling layers')
parser.add_argument('--activation-fn', type=str, default='relu', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads')
parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--layernorm-embedding', action='store_true', help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true', help='if True, dont scale embeddings')
parser.add_argument('--load-pretrained-encoder-from', type=str, metavar='STR', help='model to take encoder weights from (for initialization)')
parser.add_argument('--encoder-freezing-updates', default=None, type=int, metavar='N', help='freeze encoder for first N updates')
def build_encoder(cls, args):
encoder = S2TTransformerEncoder(args)
pretraining_path = getattr(args, 'load_pretrained_encoder_from', None)
if (pretraining_path is not None):
if (not Path(pretraining_path).exists()):
logger.warning(f'skipped pretraining because {pretraining_path} does not exist')
else:
encoder = checkpoint_utils.load_pretrained_component_from_model(component=encoder, checkpoint=pretraining_path)
logger.info(f'loaded pretrained encoder from: {pretraining_path}')
return encoder
def build_decoder(cls, args, task, embed_tokens):
return TransformerDecoderScriptable(args, task.target_dictionary, embed_tokens)
def build_model(cls, args, task):
base_architecture(args)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
decoder_embed_tokens = build_embedding(task.target_dictionary, args.decoder_embed_dim)
encoder = cls.build_encoder(args)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output: Tuple[(Tensor, Optional[Dict[(str, List[Optional[Tensor]])]])], log_probs: bool, sample: Optional[Dict[(str, Tensor)]]=None):
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def forward(self, src_tokens, src_lengths, prev_output_tokens):
encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths)
decoder_out = self.decoder(prev_output_tokens=prev_output_tokens, encoder_out=encoder_out)
return decoder_out |
def register_model(name, dataclass=None):
def register_model_cls(cls):
if (name in MODEL_REGISTRY):
return MODEL_REGISTRY[name]
if (not issubclass(cls, BaseFairseqModel)):
raise ValueError('Model ({}: {}) must extend BaseFairseqModel'.format(name, cls.__name__))
MODEL_REGISTRY[name] = cls
if ((dataclass is not None) and (not issubclass(dataclass, FairseqDataclass))):
raise ValueError('Dataclass {} must extend FairseqDataclass'.format(dataclass))
cls.__dataclass = dataclass
if (dataclass is not None):
MODEL_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group='model', node=node, provider='fairseq')
_model_architecture(name, name)
def noop(_):
pass
return cls
return register_model_cls |
def environment_creation(args):
path = args.input
output_path = args.output
files = os.listdir(path)
augmentor = StyleAugmentor()
unloader = transforms.ToPILImage()
for (j, scan) in enumerate(files):
if os.path.isdir(((path + '/') + scan)):
print('scan:', scan, 'progress:', j, '/', len(files))
views = os.listdir(((path + '/') + scan))
for view in views:
print('view:', view)
imgs = os.listdir(((((path + '/') + scan) + '/') + view))
embedding = augmentor.sample_embedding(1)
for (i, img) in enumerate(imgs):
content_img = image_loader(((((((path + '/') + scan) + '/') + view) + '/') + img))
im_restyled = augmentor(content_img, embedding=embedding)
image = im_restyled.squeeze(0).cpu().detach()
image = unloader(image)
dir = ('%s/%s' % (output_path, ((scan + '/') + view)))
if (not os.path.exists(dir)):
os.makedirs(dir)
image.save(('%s/%s' % (output_path, ((((scan + '/') + view) + '/') + img)))) |
def test_actionAngleTorus_Isochrone_actions():
from galpy.actionAngle import actionAngleIsochrone, actionAngleTorus
from galpy.potential import IsochronePotential
ip = IsochronePotential(normalize=1.0, b=1.2)
aAI = actionAngleIsochrone(ip=ip)
tol = (- 6.0)
aAT = actionAngleTorus(pot=ip, tol=tol)
(jr, jphi, jz) = (0.075, 1.1, 0.05)
angler = numpy.array([0.0])
anglephi = numpy.array([numpy.pi])
anglez = numpy.array([(numpy.pi / 2.0)])
RvR = aAT(jr, jphi, jz, angler, anglephi, anglez).T
ji = aAI(*RvR)
djr = numpy.fabs(((ji[0] - jr) / jr))
dlz = numpy.fabs(((ji[1] - jphi) / jphi))
djz = numpy.fabs(((ji[2] - jz) / jz))
assert (djr < (10.0 ** tol)), ('actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djr * 100.0))
assert (dlz < (10.0 ** tol)), ('actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (dlz * 100.0))
assert (djz < (10.0 ** tol)), ('actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djz * 100.0))
return None |
def _timestamp_type_check(df_column):
_is_pd_datetime = pd.api.types.is_datetime64_any_dtype(df_column.dtypes)
if (_is_pd_datetime is not True):
logging.warning('Datetime column should be datetime64 dtype. You can manually modify the dtype, or set repair=True when initialize TSDataset.')
return False
return True |
def train_dmc_redq(args):
train_env = dc.envs.load_dmc(**vars(args))
test_env = dc.envs.load_dmc(**vars(args))
obs_shape = train_env.observation_space.shape
action_shape = train_env.action_space.shape
max_action = train_env.action_space.high[0]
agent = dc.redq.REDQAgent(obs_shape[0], action_shape[0], args.log_std_low, args.log_std_high, args.critic_ensemble_size)
if args.prioritized_replay:
buffer_t = dc.replay.PrioritizedReplayBuffer
else:
buffer_t = dc.replay.ReplayBuffer
buffer = buffer_t(args.buffer_size, state_dtype=float, state_shape=train_env.observation_space.shape, action_shape=train_env.action_space.shape)
agent = dc.redq.redq(agent=agent, train_env=train_env, test_env=test_env, buffer=buffer, **vars(args)) |
class GpuWaitResetCollector(DecorrelatingStartCollector):
mid_batch_reset = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.need_reset = np.zeros(len(self.envs), dtype=np.bool)
self.temp_observation = buffer_method(self.step_buffer_np.observation, 'copy')
def collect_batch(self, agent_inputs, traj_infos, itr):
(act_ready, obs_ready) = (self.sync.act_ready, self.sync.obs_ready)
step = self.step_buffer_np
b = np.where(step.done)[0]
step.observation[b] = self.temp_observation[b]
step.done[:] = False
(agent_buf, env_buf) = (self.samples_np.agent, self.samples_np.env)
agent_buf.prev_action[0] = step.action
env_buf.prev_reward[0] = step.reward
obs_ready.release()
completed_infos = list()
for t in range(self.batch_T):
env_buf.observation[t] = step.observation
act_ready.acquire()
for (b, env) in enumerate(self.envs):
if step.done[b]:
step.action[b] = 0
step.reward[b] = 0
if step.agent_info:
step.agent_info[b] = 0
continue
(o, r, d, env_info) = env.step(step.action[b])
traj_infos[b].step(step.observation[b], step.action[b], r, d, step.agent_info[b], env_info)
if getattr(env_info, 'traj_done', d):
completed_infos.append(traj_infos[b].terminate(o))
traj_infos[b] = self.TrajInfoCls()
self.need_reset[b] = True
if d:
self.temp_observation[b] = o
o = 0
step.observation[b] = o
step.reward[b] = r
step.done[b] = d
if env_info:
env_buf.env_info[(t, b)] = env_info
agent_buf.action[t] = step.action
env_buf.reward[t] = step.reward
env_buf.done[t] = step.done
if step.agent_info:
agent_buf.agent_info[t] = step.agent_info
obs_ready.release()
return (None, traj_infos, completed_infos)
def reset_if_needed(self, agent_inputs):
if np.any(self.need_reset):
step = self.step_buffer_np
for b in np.where(self.need_reset)[0]:
step.observation[b] = self.envs[b].reset()
step.action[b] = 0
step.reward[b] = 0
self.need_reset[:] = False |
def DistributedFairseqModel(args, model):
assert isinstance(model, BaseFairseqModel)
if (args.ddp_backend == 'c10d'):
if c10d_status.is_default:
ddp_class = parallel.DistributedDataParallel
elif c10d_status.has_c10d:
ddp_class = parallel._DistributedDataParallelC10d
else:
raise Exception("Can't find c10d version of DistributedDataParallel. Please update PyTorch.")
init_kwargs = dict(module=model, device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=False, bucket_cap_mb=args.bucket_cap_mb)
elif (args.ddp_backend == 'no_c10d'):
if c10d_status.is_default:
ddp_class = parallel.deprecated.DistributedDataParallel
else:
ddp_class = parallel.DistributedDataParallel
init_kwargs = dict(module=model, device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=False)
elif (args.ddp_backend == 'legacy'):
ddp_class = LegacyDistributedDataParallel
init_kwargs = dict(module=model, world_size=args.distributed_world_size, bucket_cap_mb=args.bucket_cap_mb)
else:
raise ValueError(('Unknown --ddp-backend: ' + args.ddp_backend))
class _DistributedFairseqModel(ddp_class):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getattr__(self, name):
wrapped_module = super().__getattr__('module')
if hasattr(wrapped_module, name):
return getattr(wrapped_module, name)
return super().__getattr__(name)
return _DistributedFairseqModel(**init_kwargs) |
class VectorFieldGDE_dev(torch.nn.Module):
def __init__(self, dX_dt, func_f, func_g):
super(VectorFieldGDE_dev, self).__init__()
if (not isinstance(func_f, torch.nn.Module)):
raise ValueError('func must be a torch.nn.Module.')
if (not isinstance(func_g, torch.nn.Module)):
raise ValueError('func must be a torch.nn.Module.')
self.dX_dt = dX_dt
self.func_f = func_f
self.func_g = func_g
def __call__(self, t, hz):
control_gradient = self.dX_dt(t)
h = hz[0]
z = hz[1]
vector_field_f = self.func_f(h)
vector_field_g = self.func_g(z)
vector_field_fg = torch.matmul(vector_field_g, vector_field_f)
dh = (vector_field_f control_gradient.unsqueeze((- 1))).squeeze((- 1))
out = (vector_field_fg control_gradient.unsqueeze((- 1))).squeeze((- 1))
return tuple([dh, out]) |
class ReconstractMaskedImageFromSceneGraphLoss(nn.Module):
def __init__(self, triple_dim, image_dim, num_img_patches=50, num_triple=15, sg_only=False):
super().__init__()
self.image_dim = image_dim
if sg_only:
self.register_buffer('attn_mask', self.build_attention_mask(tri_length=num_triple, img_length=num_img_patches), persistent=False)
else:
self.attn_mask = None
self.transformer = BasicTransformerBlock(dim=image_dim, n_heads=8, d_head=64, dropout=0.0, context_dim=triple_dim)
self.criterion = nn.MSELoss()
def forward(self, local_graph_fea, local_masked_image_fea, local_gt_image_fea):
local_masked_image_fea = local_masked_image_fea.permute(1, 0, 2).contiguous()
local_gt_image_fea = local_gt_image_fea.permute(1, 0, 2).contiguous()
local_reconstructed_img_fea = self.transformer(local_masked_image_fea, context=local_graph_fea)
rec_loss = self.criterion(local_reconstructed_img_fea, local_gt_image_fea)
return rec_loss |
def get_vars_maybe_avg(namespace, var_names, training, polyak_decay):
vars = []
for vn in var_names:
vars.append(get_var_maybe_avg(namespace, vn, training, polyak_decay))
return vars |
def _worker_fn(rank, world_size, main_fn, args_dict):
torch.cuda.set_device(rank)
dist.init_process_group(backend='nccl', rank=rank, world_size=world_size)
if (rank != 0):
sys.stdout = open('/dev/null', 'w')
main_fn(**args_dict)
dist.destroy_process_group() |
def f_score(precision, recall, beta=1):
score = (((1 + (beta ** 2)) * (precision * recall)) / (((beta ** 2) * precision) + recall))
return score |
class MIFCNet(nn.Module):
def __init__(self, n_input, n_units):
super().__init__()
assert (n_units >= n_input)
self.linear_shortcut = nn.Linear(n_input, n_units)
self.block_nonlinear = nn.Sequential(nn.Linear(n_input, n_units), nn.BatchNorm1d(n_units), nn.ReLU(), nn.Linear(n_units, n_units))
eye_mask = np.zeros((n_units, n_input), dtype=np.uint8)
for i in range(n_input):
eye_mask[(i, i)] = 1
self.linear_shortcut.weight.data.uniform_((- 0.01), 0.01)
self.linear_shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.0)
def forward(self, x):
h = (self.block_nonlinear(x) + self.linear_shortcut(x))
return h |
def bond_features(bond: Chem.rdchem.Bond) -> List[Union[(bool, int, float)]]:
if (bond is None):
fbond = ([1] + ([0] * (BOND_FDIM - 1)))
else:
bt = bond.GetBondType()
fbond = [0, (bt == Chem.rdchem.BondType.SINGLE), (bt == Chem.rdchem.BondType.DOUBLE), (bt == Chem.rdchem.BondType.TRIPLE), (bt == Chem.rdchem.BondType.AROMATIC), (bond.GetIsConjugated() if (bt is not None) else 0), (bond.IsInRing() if (bt is not None) else 0)]
fbond += onek_encoding_unk(int(bond.GetStereo()), list(range(6)))
return fbond |
class Pitenis2020(dataset.Dataset):
name = 'pitenis2020'
url = '
hash = '4b1cbbcf1795b078db6cd72686b6e326dcc65ef3a47bbb1'
files = [{'name': 'pitenis2020gr.csv', 'language': 'gr', 'type': 'training', 'platform': 'twitter'}]
license = 'UNKNOWN'
def process(cls, tmp_file_path, dataset_folder, api_config):
extracted_file_path = helpers.unzip_file(tmp_file_path)
file1 = helpers.clean_csv(os.path.join(extracted_file_path, 'offenseval-gr-testsetv1/offenseval-gr-labela-v1.csv'), names=['lid', 'category'])
file2 = helpers.clean_csv(os.path.join(extracted_file_path, 'offenseval-gr-testsetv1/offenseval-gr-test-v1.tsv'), names=['rid', 'tweet'], sep='\t', header=0)
tmp_file_path = helpers.join_csvs(file1, 'lid', file2, 'rid')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'pitenis2020gr.csv'))
def unify_row(cls, row):
row['text'] = row['tweet']
row['labels'] = [row['category']]
row = row.drop(['lid', 'rid', 'tweet', 'category'])
return row |
class PersonanliProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
return ['contradiction', 'entailment', 'neutral']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, line[0]))
text_a = line[1]
text_b = line[2]
label = line[3]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def create_batch(self, turn, persona, set_type='predict'):
examples = []
for (i, line) in enumerate(zip(turn, persona)):
guid = ('%s-%s' % (set_type, i))
text_a = line[0]
text_b = line[1]
label = 'entailment'
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
def sent_tuples_in_list(sent_tuple1, list_of_sent_tuples, keep_polarity=True):
(holder1, target1, exp1, pol1) = sent_tuple1
if (len(holder1) == 0):
holder1 = frozenset(['_'])
if (len(target1) == 0):
target1 = frozenset(['_'])
for (holder2, target2, exp2, pol2) in list_of_sent_tuples:
if (len(holder2) == 0):
holder2 = frozenset(['_'])
if (len(target2) == 0):
target2 = frozenset(['_'])
if ((len(holder1.intersection(holder2)) > 0) and (len(target1.intersection(target2)) > 0) and (len(exp1.intersection(exp2)) > 0)):
if keep_polarity:
if (pol1 == pol2):
return True
else:
return True
return False |
def dobldobl_membertest(wsys, gpts, dim, point, evatol=1e-06, memtol=1e-06, verbose=True, tasks=0):
from phcpy.interface import store_dobldobl_witness_set
from phcpy.phcpy2c3 import py2c_witset_dobldobl_membertest as membtest
store_dobldobl_witness_set(len(wsys), dim, wsys, gpts)
nvr = (len(point) // 4)
spt = str(point)
nbc = len(spt)
result = membtest(int(verbose), tasks, nvr, dim, nbc, evatol, memtol, spt)
return (result[2] == 1) |
def test_summarize(model, X):
d1 = model.distributions[0]
d2 = model.distributions[1]
model.summarize(X)
assert_array_almost_equal(model._xw_sum, [[0., 1.895245], [2.635103, 3.469387]], 4)
assert_array_almost_equal(model._xw_starts_sum, [0.136405, 1.863595], 4)
assert_array_almost_equal(model._xw_ends_sum, [0.876264, 1.123736], 4)
assert_array_almost_equal(d1._w_sum, [2.771771, 2.771771, 2.771771], 4)
assert_array_almost_equal(d1._xw_sum, [5.403805, 2.901283, 0.006904], 4)
assert_array_almost_equal(d2._w_sum, [7.228226, 7.228226, 7.228226], 4)
assert_array_almost_equal(d2._xw_sum, [10.596193, 8.098717, 11.993094], 4) |
class Publisher():
def __init__(self):
self._broker = _Broker()
def publish(self, event, *args, **kwargs):
return self._broker.dispatch(event, *args, **kwargs) |
def main():
output_dir_train = (os.path.dirname(args.input_csv) + '/train/ids')
output_dir_test = (os.path.dirname(args.input_csv) + '/test/ids')
with open(args.input_csv, 'r') as f:
lines = f.read().splitlines()
(x_train, x_test) = train_test_split(lines, train_size=(args.train_percentage / 100), random_state=args.seed, shuffle=False)
print(f'printing the TRAIN SET IDS file in: {output_dir_train}')
with open(output_dir_train, 'w') as of:
for row in x_train:
of.write((str(row) + '\n'))
print(f'printing the TEST SET IDS file in: {output_dir_test}')
with open(output_dir_test, 'w') as of:
for row in x_test:
of.write((str(row) + '\n')) |
def build_vocab(vocab_root_path, train_all_text, text_min_count):
print('building vocab,train')
vocab = []
for text in train_all_text:
words = text.split(' ')
for word in words:
if (word not in vocab):
vocab.append(word)
freq = dict(zip(vocab, [0 for i in range(len(vocab))]))
for text in train_all_text:
words = text.split(' ')
for word in words:
freq[word] += 1
if (not os.path.exists(os.path.join(vocab_root_path, 'freq.csv'))):
print('no freq.csv, so save it')
with open(os.path.join(vocab_root_path, 'vocab_new', 'freq.csv'), 'w') as f:
writer = csv.writer(f)
results = list(zip(freq.keys(), freq.values()))
writer.writerows(results)
results = []
for word in freq.keys():
if (freq[word] < text_min_count):
continue
else:
results.append(word)
results.insert(0, 'PAD')
results.insert(1, 'UNK')
with open(os.path.join(vocab_root_path, 'vocab_new', (('vocab-' + str(text_min_count)) + '.txt')), 'w') as f:
f.write('\n'.join(results))
return results |
def assets_dir():
return path.abspath(path.join(path.dirname(path.abspath(__file__)), '../assets')) |
def save_json(content, path, indent=4, **json_dump_kwargs):
with open(path, 'w') as f:
json.dump(content, f, indent=indent, **json_dump_kwargs) |
class AlbertTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, remove_space=True, keep_accents=False, bos_token='[CLS]', eos_token='[SEP]', unk_token='<unk>', sep_token='[SEP]', pad_token='<pad>', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> None:
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token)
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
super().__init__(do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
def vocab_size(self):
return len(self.sp_model)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
if (not hasattr(self, 'sp_model_kwargs')):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace('``', '"').replace("''", '"')
if (not self.keep_accents):
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if (not unicodedata.combining(c))])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text: str) -> List[str]:
text = self.preprocess_text(text)
pieces = self.sp_model.encode(text, out_type=str)
new_pieces = []
for piece in pieces:
if ((len(piece) > 1) and (piece[(- 1)] == str(',')) and piece[(- 2)].isdigit()):
cur_pieces = self.sp_model.EncodeAsPieces(piece[:(- 1)].replace(SPIECE_UNDERLINE, ''))
if ((piece[0] != SPIECE_UNDERLINE) and (cur_pieces[0][0] == SPIECE_UNDERLINE)):
if (len(cur_pieces[0]) == 1):
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[(- 1)])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
return new_pieces
def _convert_token_to_id(self, token):
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
return self.sp_model.IdToPiece(index)
def convert_tokens_to_string(self, tokens):
current_sub_tokens = []
out_string = ''
prev_is_special = False
for token in tokens:
if (token in self.all_special_tokens):
if (not prev_is_special):
out_string += ' '
out_string += (self.sp_model.decode(current_sub_tokens) + token)
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return ((cls + token_ids_0) + sep)
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
elif (not os.path.isfile(self.vocab_file)):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,) |
_metaclass(abc.ABCMeta)
class TrainingHook(tf.train.SessionRunHook, Configurable):
def __init__(self, params, model_dir, run_config):
tf.train.SessionRunHook.__init__(self)
Configurable.__init__(self, params, tf.contrib.learn.ModeKeys.TRAIN)
self._model_dir = model_dir
self._run_config = run_config
def model_dir(self):
return os.path.abspath(self._model_dir)
def is_chief(self):
return self._run_config.is_chief
def default_params():
raise NotImplementedError() |
def set_restricted_game_conversations_for_all_workers(trainer: Trainer, delegate_policy_id: PolicyID, agent_id_to_restricted_game_specs: Dict[(AgentID, List[StrategySpec])], load_policy_spec_fn):
def _set_conversions(worker: RolloutWorker):
def _set_restricted_env_convertions(restricted_env):
assert isinstance(restricted_env, RestrictedGame)
for (agent_id, action_policy_specs) in agent_id_to_restricted_game_specs.items():
if (len(action_policy_specs) > 0):
convertor = RestrictedToBaseGameActionSpaceConverter(delegate_policy=worker.policy_map[delegate_policy_id], policy_specs=action_policy_specs, load_policy_spec_fn=load_policy_spec_fn)
restricted_env.set_action_conversion(agent_id=agent_id, converter=convertor)
worker.foreach_env(_set_restricted_env_convertions)
trainer.workers.foreach_worker(_set_conversions) |
class SequenceClip(BaseLoader):
def __init__(self, split, name, starting_frame, regex='*.jpg', lmdb_env=None):
super(SequenceClip, self).__init__(split, osp.join(get_seq_path(split), name), regex, lmdb_env=lmdb_env)
self.starting_frame = starting_frame
def __str__(self):
return "< class: '{}' name: '{}', startingframe: {}, frames: {} >".format(type(self).__name__, self.name, self.starting_frame, len(self)) |
class LevitOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])
def atol_for_validation(self) -> float:
return 0.0001 |
def _score_ngrams(target_ngrams, prediction_ngrams):
intersection_ngrams_count = 0
for ngram in six.iterkeys(target_ngrams):
intersection_ngrams_count += min(target_ngrams[ngram], prediction_ngrams[ngram])
target_ngrams_count = sum(target_ngrams.values())
prediction_ngrams_count = sum(prediction_ngrams.values())
precision = (intersection_ngrams_count / max(prediction_ngrams_count, 1))
recall = (intersection_ngrams_count / max(target_ngrams_count, 1))
fmeasure = scoring.fmeasure(precision, recall)
return scoring.Score(precision=precision, recall=recall, fmeasure=fmeasure) |
class DownsamplingConvBlock(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(DownsamplingConvBlock, self).__init__()
ops = []
if (normalization != 'none'):
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
if (normalization == 'batchnorm'):
ops.append(nn.BatchNorm3d(n_filters_out))
elif (normalization == 'groupnorm'):
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif (normalization == 'instancenorm'):
ops.append(nn.InstanceNorm3d(n_filters_out))
else:
assert False
else:
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x |
_criterion('composite_loss')
class CompositeLoss(FairseqCriterion):
def add_args(parser):
parser.add_argument('--underlying-criterion', type=str, metavar='VAL', required=True, help='underlying criterion to use for the composite loss')
def build_underlying_criterion(args, task):
saved_criterion = args.criterion
args.criterion = args.underlying_criterion
assert (saved_criterion != args.underlying_criterion)
underlying_criterion = task.build_criterion(args)
args.criterion = saved_criterion
return underlying_criterion
def build_criterion(cls, args, task):
underlying_criterion = CompositeLoss.build_underlying_criterion(args, task)
class FakeModel(nn.Module):
def __init__(self, model, net_out, target):
super().__init__()
self.model = model
self.net_out = net_out
self.target = target
def forward(self, **unused):
return self.net_out
def get_normalized_probs(self, net_output, log_probs, sample=None):
return self.model.get_normalized_probs(net_output, log_probs, sample=sample)
def get_targets(self, *unused):
return self.target
def decoder(self):
return self.model.decoder
class _CompositeLoss(FairseqCriterion):
def __init__(self, args, task, underlying_criterion):
super().__init__(args, task)
self.underlying_criterion = underlying_criterion
def forward(self, model, sample, reduce=True):
net_outputs = model(**sample['net_input'])
targets = sample['target']
bsz = targets[0].size(0)
loss = net_outputs[0][0].new((1 if reduce else bsz)).float().zero_()
sample_size = 0
logging_output = {}
for (o, t) in zip(net_outputs[0], targets):
m = FakeModel(model, (o, net_outputs[1]), t)
sample['target'] = t
(l, ss, logging_output) = self.underlying_criterion(m, sample, reduce)
loss += l
sample_size += ss
loss.div_(len(targets))
sample_size /= len(targets)
logging_output['loss'] = (utils.item(loss.data) if reduce else loss.data)
return (loss, sample_size, logging_output)
def aggregate_logging_outputs(logging_outputs):
return underlying_criterion.__class__.aggregate_logging_outputs(logging_outputs)
def reduce_metrics(logging_outputs) -> None:
underlying_criterion.__class__.reduce_metrics(logging_outputs)
return _CompositeLoss(args, task, underlying_criterion) |
def get_results(df, restraints):
new_df = df
for key in restraints.keys():
new_df = new_df[(new_df[key] == restraints[key])]
val_f1_rows = new_df[pd.notnull(new_df['best_val_f1'])]
(l_max, l_min, avg) = sample(val_f1_rows)
return (l_max, l_min, avg) |
def powerset(iterable):
s = list(iterable)
return chain.from_iterable((combinations(s, r) for r in range((len(s) + 1)))) |
class GRU(KerasLayer):
def __init__(self, output_dim, activation='tanh', inner_activation='hard_sigmoid', return_sequences=False, go_backwards=False, W_regularizer=None, U_regularizer=None, b_regularizer=None, input_shape=None, **kwargs):
super(GRU, self).__init__(None, output_dim, activation, inner_activation, return_sequences, go_backwards, W_regularizer, U_regularizer, b_regularizer, (list(input_shape) if input_shape else None), **kwargs) |
def diagonal_gaussian_kl(mu0, log_std0, mu1, log_std1):
(var0, var1) = (torch.exp((2 * log_std0)), torch.exp((2 * log_std1)))
pre_sum = (((0.5 * (((((mu1 - mu0) ** 2) + var0) / (var1 + EPS)) - 1)) + log_std1) - log_std0)
all_kls = torch.sum(pre_sum, axis=1)
return torch.mean(all_kls) |
def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False):
def check_size(idx):
if (isinstance(max_positions, float) or isinstance(max_positions, int)):
return (size_fn(idx) <= max_positions)
elif isinstance(max_positions, dict):
idx_size = size_fn(idx)
assert isinstance(idx_size, dict)
intersect_keys = (set(max_positions.keys()) & set(idx_size.keys()))
return all((all((((a is None) or (b is None) or (a <= b)) for (a, b) in zip(idx_size[key], max_positions[key]))) for key in intersect_keys))
else:
if (isinstance(size_fn(idx), dict) and isinstance(max_positions, tuple)):
return all((((a is None) or (b is None) or (a <= b)) for (a, b) in zip(size_fn(idx).values(), max_positions)))
if (not isinstance(size_fn(idx), Iterable)):
return all(((size_fn(idx) <= b) for b in max_positions))
return all((((a is None) or (b is None) or (a <= b)) for (a, b) in zip(size_fn(idx), max_positions)))
ignored = []
itr = collect_filtered(check_size, indices, ignored)
indices = np.fromiter(itr, dtype=np.int64, count=(- 1))
return (indices, ignored) |
def generate_upload_workflow(base_workflow_name, os_type, btype, cu_version, *, filter_branch=None):
d = {'name': f'{base_workflow_name}_upload', 'context': 'org-member', 'requires': [base_workflow_name]}
if (btype == 'wheel'):
d['subfolder'] = ('' if (os_type == 'macos') else (cu_version + '/'))
if (filter_branch is not None):
d['filters'] = {'branches': {'only': filter_branch}, 'tags': {'only': '/v[0-9]+(\\.[0-9]+)*-rc[0-9]+/'}}
return {f'binary_{btype}_upload': d} |
def get_color(score: float, min_value: Union[(float, int)], max_value: Union[(float, int)], cmap: Colormap, return_alpha: bool=True, return_string: bool=True):
scaled_value = ((score - min_value) / (max_value - min_value))
color = cmap(scaled_value)
if return_alpha:
color = ((color[0] * 255), (color[1] * 255), (color[2] * 255), color[3])
if return_string:
color = ('rgba' + str(color))
else:
color = ((color[0] * 255), (color[1] * 255), (color[2] * 255))
if return_string:
color = ('rgba' + str(color))
return color |
def bi_cudnn_rnn_encoder(cell_type, hidden_size, num_layers, dropout_rate, inputs, input_lengths, is_train, output_layer=None):
if (cell_type == 'lstm'):
RnnLayer = CudnnLstm
elif (cell_type == 'gru'):
RnnLayer = CudnnGru
else:
raise ValueError()
layer = RnnLayer(n_units=hidden_size, n_layers=num_layers)
inputs = tf.layers.dropout(inputs, dropout_rate, training=is_train)
outputs = layer.apply(is_train, inputs, input_lengths)
print(outputs.get_shape().as_list())
if (output_layer is not None):
outputs = output_layer(outputs)
return (outputs, None) |
class GPTJForCausalLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def register_tracer(line_: str) -> None:
line_ = line_.strip()
usage = 'Usage: %flow register_tracer <module.path.to.tracer_class>'
tracer_cls = _resolve_tracer_class(line_)
if (tracer_cls is None):
warn(usage)
return
_deregister_tracers_for(tracer_cls)
tracer_cls.instance()
shell().registered_tracers.insert(0, tracer_cls) |
def train_one_epoch(model: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, log_writer=None, args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
accum_iter = args.accum_iter
optimizer.zero_grad()
if (log_writer is not None):
print('log_dir: {}'.format(log_writer.log_dir))
for (data_iter_step, (samples_x, samples_z)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
if ((data_iter_step % accum_iter) == 0):
lr_sched.adjust_learning_rate(optimizer, ((data_iter_step / len(data_loader)) + epoch), args)
samples_x = samples_x.to(device, non_blocking=True)
samples_z = samples_z.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
(loss, _, _) = model(samples_x, samples_z, mask_ratio=args.mask_ratio)
loss_value = loss.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
sys.exit(1)
loss /= accum_iter
loss_scaler(loss, optimizer, parameters=model.parameters(), update_grad=(((data_iter_step + 1) % accum_iter) == 0))
if (((data_iter_step + 1) % accum_iter) == 0):
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
lr = optimizer.param_groups[0]['lr']
metric_logger.update(lr=lr)
loss_value_reduce = misc.all_reduce_mean(loss_value)
if ((log_writer is not None) and (((data_iter_step + 1) % accum_iter) == 0)):
epoch_1000x = int((((data_iter_step / len(data_loader)) + epoch) * 1000))
log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)
log_writer.add_scalar('lr', lr, epoch_1000x)
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} |
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(nn.Linear((28 * 28), 16), nn.ReLU(), nn.Linear(16, 16), nn.ReLU(), nn.Linear(16, 10))
def forward(self, data):
x = self.flatten(data[0])
logits = self.linear_relu_stack(x)
return logits |
def GR(epsilon):
return ((epsilon ** 2) / (((- 0.5) * np.log((1 + (((2 / np.pi) * np.log((1 + epsilon))) ** 2)))) + (((2 / np.pi) * np.arctan(((2 / np.pi) * np.log((1 + epsilon))))) * np.log((1 + epsilon))))) |
.parametrize('data,allow_nan', itertools.product([(np.array([2, 3, 4]), np.array([1, 2, 3, 5, np.nan])), (np.array(['a', 'b', 'c']), np.array(['q', 'a', 'nan']))], [True, False]))
def test_NaNLabelEncoder(data, allow_nan):
(fit_data, transform_data) = data
encoder = NaNLabelEncoder(warn=False, add_nan=allow_nan)
encoder.fit(fit_data)
assert np.array_equal(encoder.inverse_transform(encoder.transform(fit_data)), fit_data), 'Inverse transform should reverse transform'
if (not allow_nan):
with pytest.raises(KeyError):
encoder.transform(transform_data)
else:
assert (encoder.transform(transform_data)[0] == 0), 'First value should be translated to 0 if nan'
assert (encoder.transform(transform_data)[(- 1)] == 0), 'Last value should be translated to 0 if nan'
assert (encoder.transform(fit_data)[0] > 0), 'First value should not be 0 if not nan' |
.parametrize('loader_parameters', [{'path_data': [str(Path(__data_testing_dir__, 'microscopy_png'))], 'target_suffix': [['_seg-myelin-manual', '_seg-axon-manual']], 'extensions': ['.png'], 'roi_params': {'suffix': None, 'slice_filter_roi': None}, 'contrast_params': {'contrast_lst': []}}])
def test_bids_df_microscopy_png(download_data_testing_test_files, loader_parameters):
bids_df = BidsDataframe(loader_parameters, __tmp_dir__, derivatives=True)
df_test = bids_df.df.drop(columns=['path'])
df_test = df_test.sort_values(by=['filename']).reset_index(drop=True)
csv_ref = Path(loader_parameters[LoaderParamsKW.PATH_DATA][0], 'df_ref.csv')
csv_test = Path(loader_parameters[LoaderParamsKW.PATH_DATA][0], 'df_test.csv')
df_test.to_csv(csv_test, index=False)
diff = csv_diff.compare(csv_diff.load_csv(open(csv_ref)), csv_diff.load_csv(open(csv_test)))
assert (diff == {'added': [], 'removed': [], 'changed': [], 'columns_added': [], 'columns_removed': []}) |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, batchNorm, in_planes, out_planes, stride, downsample, padding, dilation):
super(BasicBlock, self).__init__()
self.conv1 = conv_bn_relu(batchNorm=batchNorm, in_planes=in_planes, out_planes=out_planes, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=False)
self.conv2 = conv_bn(batchNorm=batchNorm, in_planes=out_planes, out_planes=out_planes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=False)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if (self.downsample is not None):
x = self.downsample(x)
out += x
return out |
class MarkerPose(torch.nn.Module):
def __init__(self, superpoint, ellipsegnet, imresize, crop_sz, Params):
super(MarkerPose, self).__init__()
self.superpoint = superpoint
self.ellipsegnet = ellipsegnet
self.imresize = imresize
self.crop_sz = crop_sz
self.mid = ((crop_sz - 1) // 2)
self.K1 = Params['K1']
self.K2 = Params['K2']
self.dist1 = Params['dist1']
self.dist2 = Params['dist2']
self.P1 = (self.K1 np.c_[(np.eye(3), np.zeros(3))])
self.P2 = (self.K2 np.c_[(Params['R'], Params['t'])])
def pixelPoints(self, out_det, out_cls):
scores = utils.labels2scores(out_det)
scores = utils.simple_nms(scores, 4)
out_cls = out_cls.argmax(1)
kp1 = utils.sortedPoints(scores[0], out_cls[0])
kp2 = utils.sortedPoints(scores[1], out_cls[1])
return (kp1, kp2)
def forward(self, x1, x2):
device = next(self.parameters()).device
if ((x1.ndim == 3) and (x2.ndim == 3)):
x1 = cv2.cvtColor(x1, cv2.COLOR_BGR2GRAY)
x2 = cv2.cvtColor(x2, cv2.COLOR_BGR2GRAY)
imr1 = cv2.resize(x1, self.imresize)
imr2 = cv2.resize(x2, self.imresize)
imr = np.stack([imr1, imr2], 0)
imt = torch.from_numpy(np.float32((imr / 255))).unsqueeze(1)
imt = imt.to(device)
(out_det, out_cls) = self.superpoint(imt)
(kp1, kp2) = self.pixelPoints(out_det, out_cls)
if ((kp1.shape[0] < 3) or (kp2.shape[0] < 3)):
return (None, None)
s = (np.array(x1.shape[::(- 1)]) / self.imresize)
kp1 = (s * kp1)
kp2 = (s * kp2)
patches = utils.extractPatches(x1, x2, kp1, kp2, self.crop_sz, self.mid)
patchest = torch.from_numpy(np.float32((patches / 255))).unsqueeze(1)
patchest = patchest.to(device)
out = torch.sigmoid(self.ellipsegnet(patchest))
out = out.squeeze(1).detach().cpu().numpy()
out = np.uint8((255 * (out > 0.5)))
centers = utils.ellipseFitting(out)
c1 = ((centers[:3] + np.int32(np.round(kp1))) - self.mid)
c2 = ((centers[3:] + np.int32(np.round(kp2))) - self.mid)
c1 = cv2.undistortPoints(c1.reshape((- 1), 1, 2), self.K1, self.dist1, None, None, self.K1)
c2 = cv2.undistortPoints(c2.reshape((- 1), 1, 2), self.K2, self.dist2, None, None, self.K2)
X = cv2.triangulatePoints(self.P1, self.P2, c1, c2)
X = (X[:3] / X[(- 1)])
(Xo, Xx, Xy) = X.T
(R, t) = utils.getPose(Xo, Xx, Xy)
return (R, t) |
def prepare_df(json_obj):
traceEvents = json_obj['traceEvents']
for traceEvent in traceEvents:
if ('cat' in traceEvent):
traceEvent['cat'] = traceEvent['cat'].lower()
if ('dur' in traceEvent):
traceEvent['dur'] = int(traceEvent['dur'])
else:
traceEvent['dur'] = 0
if ('ts' in traceEvent):
traceEvent['ts'] = int(traceEvent['ts'])
if (('ts' in traceEvent) and ('dur' in traceEvent)):
traceEvent['finish_time'] = (traceEvent['ts'] + traceEvent['dur'])
df = pd.DataFrame(traceEvents)
return df |
def _contraction_Cautun2020(r, M_DMO, Mbar, fbar):
func_M_DM_contract = (lambda M: ((M_DMO * 1.023) * (((M_DMO / (1.0 - fbar)) / (M + Mbar)) ** (- 0.54))))
M_DM = fixed_point(func_M_DM_contract, M_DMO)
return (((M_DM / M_DMO) * M_DMO) / (r ** 2.0)) |
class Compose(object):
def __init__(self, augmentations):
self.augmentations = augmentations
def __call__(self, img, mask):
assert (img.size == mask.size)
for a in self.augmentations:
(img, mask) = a(img, mask)
return (np.array(img), np.array(mask, dtype=np.uint8)) |
def load_county_level(data_dir='data', preprocess=True, discard=False):
print('loading county-level data...')
if (not ('county_data_abridged.csv' in os.listdir(data_dir))):
df = data.load_county_data(data_dir=data_dir, cached=False, preprocess=preprocess, discard=discard)
else:
df = data.load_county_data(data_dir=data_dir, cached=True, preprocess=preprocess, discard=discard)
return df.sort_values('tot_deaths', ascending=False) |
class UpSampling2D(ZooKerasLayer):
def __init__(self, size=(2, 2), dim_ordering='th', input_shape=None, **kwargs):
super(UpSampling2D, self).__init__(None, size, dim_ordering, (list(input_shape) if input_shape else None), **kwargs) |
class Mixed_4a(nn.Module):
def __init__(self):
super(Mixed_4a, self).__init__()
self.branch0 = nn.Sequential(BasicConv2d(160, 64, kernel_size=1, stride=1), BasicConv2d(64, 96, kernel_size=3, stride=1))
self.branch1 = nn.Sequential(BasicConv2d(160, 64, kernel_size=1, stride=1), BasicConv2d(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)), BasicConv2d(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)), BasicConv2d(64, 96, kernel_size=(3, 3), stride=1))
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
return out |
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if (c == ' '):
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = ''.join(ns_chars)
return (ns_text, ns_to_s_map)
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = ' '.join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if (start_position == (- 1)):
if verbose_logging:
logger.info(("Unable to find text: '%s' in '%s'" % (pred_text, orig_text)))
return orig_text
end_position = ((start_position + len(pred_text)) - 1)
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if (len(orig_ns_text) != len(tok_ns_text)):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text)
return orig_text
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if (start_position in tok_s_to_ns_map):
ns_start_position = tok_s_to_ns_map[start_position]
if (ns_start_position in orig_ns_to_s_map):
orig_start_position = orig_ns_to_s_map[ns_start_position]
if (orig_start_position is None):
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if (end_position in tok_s_to_ns_map):
ns_end_position = tok_s_to_ns_map[end_position]
if (ns_end_position in orig_ns_to_s_map):
orig_end_position = orig_ns_to_s_map[ns_end_position]
if (orig_end_position is None):
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text |
class ROIPooler(nn.Module):
def __init__(self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4):
super().__init__()
if isinstance(output_size, int):
output_size = (output_size, output_size)
assert (len(output_size) == 2)
assert (isinstance(output_size[0], int) and isinstance(output_size[1], int))
self.output_size = output_size
if (pooler_type == 'ROIAlign'):
self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False) for scale in scales))
elif (pooler_type == 'ROIAlignV2'):
self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True) for scale in scales))
elif (pooler_type == 'ROIPool'):
self.level_poolers = nn.ModuleList((RoIPool(output_size, spatial_scale=scale) for scale in scales))
elif (pooler_type == 'ROIAlignRotated'):
self.level_poolers = nn.ModuleList((ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) for scale in scales))
else:
raise ValueError('Unknown pooler type: {}'.format(pooler_type))
min_level = (- math.log2(scales[0]))
max_level = (- math.log2(scales[(- 1)]))
assert (math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level))), 'Featuremap stride is not power of 2!'
self.min_level = int(min_level)
self.max_level = int(max_level)
assert (len(scales) == ((self.max_level - self.min_level) + 1)), '[ROIPooler] Sizes of input featuremaps do not form a pyramid!'
assert ((0 < self.min_level) and (self.min_level <= self.max_level))
self.canonical_level = canonical_level
assert (canonical_box_size > 0)
self.canonical_box_size = canonical_box_size
def forward(self, x: List[torch.Tensor], box_lists):
num_level_assignments = len(self.level_poolers)
assert (isinstance(x, list) and isinstance(box_lists, list)), 'Arguments to pooler must be lists'
assert (len(x) == num_level_assignments), 'unequal value, num_level_assignments={}, but x is list of {} Tensors'.format(num_level_assignments, len(x))
assert (len(box_lists) == x[0].size(0)), 'unequal value, x[0] batch dim 0 is {}, but box_list has length {}'.format(x[0].size(0), len(box_lists))
pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists)
if (num_level_assignments == 1):
return self.level_poolers[0](x[0], pooler_fmt_boxes)
level_assignments = assign_boxes_to_levels(box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level)
num_boxes = len(pooler_fmt_boxes)
num_channels = x[0].shape[1]
output_size = self.output_size[0]
(dtype, device) = (x[0].dtype, x[0].device)
output = torch.zeros((num_boxes, num_channels, output_size, output_size), dtype=dtype, device=device)
for (level, (x_level, pooler)) in enumerate(zip(x, self.level_poolers)):
inds = nonzero_tuple((level_assignments == level))[0]
pooler_fmt_boxes_level = pooler_fmt_boxes[inds]
output[inds] = pooler(x_level, pooler_fmt_boxes_level)
return output |
def test_degenerate_gauss_emits_parent(archive_fixture):
(archive, x0) = archive_fixture
parent_sol = (x0 * 5)
archive.add_single(parent_sol, 1, np.array([0, 0]))
emitter = GaussianEmitter(archive, sigma=0, x0=x0, batch_size=2)
solutions = emitter.ask()
assert (solutions == np.expand_dims(parent_sol, axis=0)).all() |
class history(object):
def __init__(self, num_objectives):
self.num_objectives = num_objectives
self.pareto = pareto.Pareto(num_objectives=self.num_objectives)
self.num_runs = int(0)
self.total_num_search = int(0)
self.fx = np.zeros((MAX_SEARCH, self.num_objectives), dtype=float)
self.chosen_actions = np.zeros(MAX_SEARCH, dtype=int)
self.terminal_num_run = np.zeros(MAX_SEARCH, dtype=int)
self._time_total = np.zeros(MAX_SEARCH, dtype=float)
self._time_update_predictor = np.zeros(MAX_SEARCH, dtype=float)
self._time_get_action = np.zeros(MAX_SEARCH, dtype=float)
self._time_run_simulator = np.zeros(MAX_SEARCH, dtype=float)
def time_total(self):
return copy.copy(self._time_total[0:self.num_runs])
def time_update_predictor(self):
return copy.copy(self._time_update_predictor[0:self.num_runs])
def time_get_action(self):
return copy.copy(self._time_get_action[0:self.num_runs])
def time_run_simulator(self):
return copy.copy(self._time_run_simulator[0:self.num_runs])
def write(self, t, action, time_total=None, time_update_predictor=None, time_get_action=None, time_run_simulator=None):
t = np.array(t)
action = np.array(action)
if (t.ndim == 1):
N = 1
if (len(t) != self.num_objectives):
raise ValueError('t does not match the number of objectives')
else:
N = t.shape[0]
if (t.shape[1] != self.num_objectives):
raise ValueError('t does not match the number of objectives')
st = self.total_num_search
en = (st + N)
self.terminal_num_run[self.num_runs] = en
self.fx[st:en] = t
self.chosen_actions[st:en] = action
self.num_runs += 1
self.total_num_search += N
self.pareto.update_front(t)
if (time_total is None):
time_total = np.zeros(N, dtype=float)
self._time_total[st:en] = time_total
if (time_update_predictor is None):
time_update_predictor = np.zeros(N, dtype=float)
self._time_update_predictor[st:en] = time_update_predictor
if (time_get_action is None):
time_get_action = np.zeros(N, dtype=float)
self._time_get_action[st:en] = time_get_action
if (time_run_simulator is None):
time_run_simulator = np.zeros(N, dtype=float)
self._time_run_simulator[st:en] = time_run_simulator
def export_pareto_front(self):
return self.pareto.export_front()
def save(self, filename):
N = self.total_num_search
M = self.num_runs
obj = {'num_runs': M, 'total_num_search': N, 'fx': self.fx[0:N], 'chosen_actions': self.chosen_actions[0:N], 'terminal_num_run': self.terminal_num_run[0:M], 'pareto': self.pareto}
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def load(self, filename):
with open(filename, 'rb') as f:
data = pickle.load(f)
M = data['num_runs']
N = data['total_num_search']
self.num_runs = M
self.total_num_search = N
self.fx[0:N] = data['fx']
self.chosen_actions[0:N] = data['chosen_actions']
self.terminal_num_run[0:M] = data['terminal_num_run']
self.pareto = data['pareto'] |
def write_data_to_h5(data: np.ndarray, filename: Union[(str, Path)], compression='gzip', compression_level=9, dtype='uint8', verbose=False):
with h5py.File((filename if isinstance(filename, str) else str(filename)), 'w', libver='latest') as f:
if (data.dtype != dtype):
logging.warning(f'Found data with {data.dtype}, expected {dtype}.')
if verbose:
print(f'writing {filename} ...')
f.create_dataset('array', shape=data.shape, data=data, chunks=(1, *data.shape[1:]), dtype=dtype, compression=compression, compression_opts=compression_level)
if verbose:
print(f'... done writing {filename}') |
class HopperEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, xml_file='hopper.xml', forward_reward_weight=1.0, ctrl_cost_weight=0.001, healthy_reward=1.0, terminate_when_unhealthy=True, healthy_state_range=((- 100.0), 100.0), healthy_z_range=(0.7, float('inf')), healthy_angle_range=((- 0.2), 0.2), reset_noise_scale=0.005, exclude_current_positions_from_observation=True):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._healthy_reward = healthy_reward
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_state_range = healthy_state_range
self._healthy_z_range = healthy_z_range
self._healthy_angle_range = healthy_angle_range
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = exclude_current_positions_from_observation
mujoco_env.MujocoEnv.__init__(self, xml_file, 4)
def healthy_reward(self):
return (float((self.is_healthy or self._terminate_when_unhealthy)) * self._healthy_reward)
def control_cost(self, action):
control_cost = (self._ctrl_cost_weight * np.sum(np.square(action)))
return control_cost
def is_healthy(self):
(z, angle) = self.sim.data.qpos[1:3]
state = self.state_vector()[2:]
(min_state, max_state) = self._healthy_state_range
(min_z, max_z) = self._healthy_z_range
(min_angle, max_angle) = self._healthy_angle_range
healthy_state = np.all(np.logical_and((min_state < state), (state < max_state)))
healthy_z = (min_z < z < max_z)
healthy_angle = (min_angle < angle < max_angle)
is_healthy = all((healthy_state, healthy_z, healthy_angle))
return is_healthy
def done(self):
done = ((not self.is_healthy) if self._terminate_when_unhealthy else False)
return done
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = np.clip(self.sim.data.qvel.flat.copy(), (- 10), 10)
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def step(self, action):
x_position_before = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.sim.data.qpos[0]
x_velocity = ((x_position_after - x_position_before) / self.dt)
ctrl_cost = self.control_cost(action)
forward_reward = (self._forward_reward_weight * x_velocity)
healthy_reward = self.healthy_reward
rewards = (forward_reward + healthy_reward)
costs = ctrl_cost
observation = self._get_obs()
reward = (rewards - costs)
done = self.done
info = {'x_position': x_position_after, 'x_velocity': x_velocity}
return (observation, reward, done, info)
def reset_model(self):
noise_low = (- self._reset_noise_scale)
noise_high = self._reset_noise_scale
qpos = (self.init_qpos + self.np_random.uniform(low=noise_low, high=noise_high, size=self.model.nq))
qvel = (self.init_qvel + self.np_random.uniform(low=noise_low, high=noise_high, size=self.model.nv))
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for (key, value) in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value) |
def conv_init(conv):
nn.init.kaiming_normal_(conv.weight, mode='fan_out')
nn.init.constant_(conv.bias, 0) |
class ActNorm(AffineConstantFlow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.data_dep_init_done = False
def forward(self, x):
if (not self.data_dep_init_done):
assert ((self.s is not None) and (self.t is not None))
self.s.data = (- torch.log(x.std(dim=0, keepdim=True))).detach()
self.t.data = (- (x * torch.exp(self.s)).mean(dim=0, keepdim=True)).detach()
self.data_dep_init_done = True
return super().forward(x) |
class l1_rate_sparsity():
def __init__(self, Lambda=1e-05):
self.Lambda = Lambda
self.__name__ = 'l1_rate_sparsity'
def __call__(self, spk_out):
return (self.Lambda * torch.sum(spk_out)) |
class AnisotropicReadoutExperiment(AnisotropicExperiment):
def defineParameters(self):
aniP = super().defineParameters()
expP = {'seed': 3, 'trials': 25, 'stepsPerTrial': 110, 'isReset': True, 'refractoryDelay': 2, 'voltageTau': 10.24, 'currentTau': 10.78, 'thresholdMant': 1000, 'reservoirConnProb': 0.05, 'anisoStdE': 12, 'anisoStdI': 9, 'anisoShift': 1, 'anisoPerlinScale': 4, 'weightExCoefficient': 12, 'weightInCoefficient': 48, 'inputIsTopology': True, 'inputIsLeaveOut': True, 'patchNeuronsShiftX': 44, 'patchNeuronsShiftY': 24, 'inputNumTargetNeurons': 25, 'inputSteps': 5, 'inputWeightExponent': 0, 'inputGenSpikeProb': 1.0, 'partitioningClusterSize': 10, 'isExSpikeProbe': True, 'isInSpikeProbe': True, 'isOutSpikeProbe': True}
return {**aniP, **expP}
'\n : Build all networks\n '
def build(self):
self.net = ReservoirNetwork(self.p)
self.net.landscape = None
self.drawMaskAndWeights()
self.net.drawOutputMaskAndWeights()
self.net.connectReservoir()
self.net.connectOutput()
self.net.addInput()
self.net.addProbes() |
def airnet50_1x64d_r16(**kwargs):
return get_airnet(blocks=50, base_channels=64, ratio=16, model_name='airnet50_1x64d_r16', **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.