code stringlengths 101 5.91M |
|---|
def best_meter(ctx: Context, train_ctx: Context, best_branch: int=1, k: int=1) -> float:
def accuracy(output, target, k=1):
batch_size = target.size(0)
(_, pred) = output.topk(k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
return correct_k.mul_((100.0 / batch_size))
if (not ('meter' in ctx)):
ctx.meter = AverageMeter()
if (train_ctx.batch_idx == 0):
ctx.meter.reset()
acc = accuracy(train_ctx.output[best_branch], train_ctx.target, k)
ctx.meter.update(acc.item())
return ctx.meter.avg |
def save_model(model, output_dir):
save_R = robjects.r('save')
output_file = os.path.join(output_dir, ('rsf_model' + str(uuid.uuid4())))
save_R(rfsc, file=output_file) |
class BaseRegressionComponentTest(unittest.TestCase):
res = None
module = None
sk_module = None
step_hyperparameter = None
__test__ = False
def test_default_boston(self):
if (self.__class__ == BaseRegressionComponentTest):
return
for i in range(2):
(predictions, targets, n_calls) = _test_regressor(dataset='boston', Regressor=self.module)
if ('default_boston_le_ge' in self.res):
self.assertLessEqual(sklearn.metrics.r2_score(y_true=targets, y_pred=predictions), self.res['default_boston_le_ge'][0])
self.assertGreaterEqual(sklearn.metrics.r2_score(y_true=targets, y_pred=predictions), self.res['default_boston_le_ge'][1])
else:
score = sklearn.metrics.r2_score(targets, predictions)
fixture = self.res['default_boston']
if (score < (- .0)):
score = np.log((- score))
fixture = np.log((- fixture))
self.assertAlmostEqual(fixture, score, places=self.res.get('default_boston_places', 7))
if self.res.get('boston_n_calls'):
self.assertEqual(self.res['boston_n_calls'], n_calls)
def test_default_boston_iterative_fit(self):
if (self.__class__ == BaseRegressionComponentTest):
return
if (not hasattr(self.module, 'iterative_fit')):
return
for i in range(2):
(predictions, targets, regressor) = _test_regressor_iterative_fit(dataset='boston', Regressor=self.module)
score = sklearn.metrics.r2_score(targets, predictions)
fixture = self.res['default_boston_iterative']
if (score < (- .0)):
score = np.log((- score))
fixture = np.log((- fixture))
self.assertAlmostEqual(fixture, score, places=self.res.get('default_boston_iterative_places', 7))
if (self.step_hyperparameter is not None):
self.assertEqual(getattr(regressor.estimator, self.step_hyperparameter['name']), self.res.get('boston_iterative_n_iter', self.step_hyperparameter['value']))
def test_default_boston_iterative_sparse_fit(self):
if (self.__class__ == BaseRegressionComponentTest):
return
if (not hasattr(self.module, 'iterative_fit')):
return
if (SPARSE not in self.module.get_properties()['input']):
return
for i in range(2):
(predictions, targets, _) = _test_regressor_iterative_fit(dataset='boston', Regressor=self.module, sparse=True)
self.assertAlmostEqual(self.res['default_boston_iterative_sparse'], sklearn.metrics.r2_score(targets, predictions), places=self.res.get('default_boston_iterative_sparse_places', 7))
def test_default_boston_sparse(self):
if (self.__class__ == BaseRegressionComponentTest):
return
if (SPARSE not in self.module.get_properties()['input']):
return
for i in range(2):
(predictions, targets, _) = _test_regressor(dataset='boston', Regressor=self.module, sparse=True)
self.assertAlmostEqual(self.res['default_boston_sparse'], sklearn.metrics.r2_score(targets, predictions), places=self.res.get('default_boston_sparse_places', 7))
def test_default_diabetes(self):
if (self.__class__ == BaseRegressionComponentTest):
return
for i in range(2):
(predictions, targets, n_calls) = _test_regressor(dataset='diabetes', Regressor=self.module)
self.assertAlmostEqual(self.res['default_diabetes'], sklearn.metrics.r2_score(targets, predictions), places=self.res.get('default_diabetes_places', 7))
if self.res.get('diabetes_n_calls'):
self.assertEqual(self.res['diabetes_n_calls'], n_calls)
def test_default_diabetes_iterative_fit(self):
if (self.__class__ == BaseRegressionComponentTest):
return
if (not hasattr(self.module, 'iterative_fit')):
return
for i in range(2):
(predictions, targets, _) = _test_regressor_iterative_fit(dataset='diabetes', Regressor=self.module)
self.assertAlmostEqual(self.res['default_diabetes_iterative'], sklearn.metrics.r2_score(targets, predictions), places=self.res.get('default_diabetes_iterative_places', 7))
def test_default_diabetes_iterative_sparse_fit(self):
if (self.__class__ == BaseRegressionComponentTest):
return
if (not hasattr(self.module, 'iterative_fit')):
return
if (SPARSE not in self.module.get_properties()['input']):
return
for i in range(2):
(predictions, targets, regressor) = _test_regressor_iterative_fit(dataset='diabetes', Regressor=self.module, sparse=True)
self.assertAlmostEqual(self.res['default_diabetes_iterative_sparse'], sklearn.metrics.r2_score(targets, predictions), places=self.res.get('default_diabetes_iterative_sparse_places', 7))
if (self.step_hyperparameter is not None):
self.assertEqual(getattr(regressor.estimator, self.step_hyperparameter['name']), self.res.get('diabetes_iterative_n_iter', self.step_hyperparameter['value']))
def test_default_diabetes_sparse(self):
if (self.__class__ == BaseRegressionComponentTest):
return
if (SPARSE not in self.module.get_properties()['input']):
return
for i in range(2):
(predictions, targets, _) = _test_regressor(dataset='diabetes', Regressor=self.module, sparse=True)
self.assertAlmostEqual(self.res['default_diabetes_sparse'], sklearn.metrics.r2_score(targets, predictions), places=self.res.get('default_diabetes_sparse_places', 7)) |
def _populate_eval_frames(labelled_frames: dict, num_samples: int):
cur_index = 0
eval_frames = {}
while (cur_index < num_samples):
chosen_video_name = np.random.choice(list(labelled_frames.keys()))
valid_index_in_video = labelled_frames[chosen_video_name]
pick_index_at = np.random.randint(0, len(valid_index_in_video))
chosen_frame_index = valid_index_in_video[pick_index_at]
labelled_frames[chosen_video_name] = np.delete(labelled_frames[chosen_video_name], pick_index_at)
if (len(labelled_frames[chosen_video_name]) == 0):
del labelled_frames[chosen_video_name]
if (chosen_video_name in eval_frames):
eval_frames[chosen_video_name].append(chosen_frame_index)
else:
eval_frames[chosen_video_name] = [chosen_frame_index]
cur_index += 1
for video_name in eval_frames:
eval_frames[video_name].sort()
return eval_frames |
class RFCNMetaArchTest(faster_rcnn_meta_arch_test_lib.FasterRCNNMetaArchTestBase):
def _get_second_stage_box_predictor_text_proto(self):
box_predictor_text_proto = '\n rfcn_box_predictor {\n conv_hyperparams {\n op: CONV\n activation: NONE\n regularizer {\n l2_regularizer {\n weight: 0.0005\n }\n }\n initializer {\n variance_scaling_initializer {\n factor: 1.0\n uniform: true\n mode: FAN_AVG\n }\n }\n }\n }\n '
return box_predictor_text_proto
def _get_model(self, box_predictor, **common_kwargs):
return rfcn_meta_arch.RFCNMetaArch(second_stage_rfcn_box_predictor=box_predictor, **common_kwargs)
def _get_box_classifier_features_shape(self, image_size, batch_size, max_num_proposals, initial_crop_size, maxpool_stride, num_features):
return (batch_size, image_size, image_size, num_features) |
def get_max_iou(sources, targets):
maxIoUs = torch.zeros_like(sources.get_field('labels')).type(torch.float32)
for (s_idx, source) in enumerate(sources.bbox):
maxIoU = 0
for target in targets.bbox:
(bb1, bb2) = ({}, {})
(bb1['x1'], bb1['x2']) = (int(source[0]), int(source[2]))
(bb1['y1'], bb1['y2']) = (int(source[1]), int(source[3]))
(bb2['x1'], bb2['x2']) = (int(target[0]), int(target[2]))
(bb2['y1'], bb2['y2']) = (int(target[1]), int(target[3]))
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if ((x_right < x_left) or (y_bottom < y_top)):
continue
intersection_area = ((x_right - x_left) * (y_bottom - y_top))
bb1_area = ((bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1']))
bb2_area = ((bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1']))
iou = (intersection_area / float(((bb1_area + bb2_area) - intersection_area)))
assert (iou >= 0.0)
assert (iou <= 1.0)
if (maxIoU < iou):
maxIoU = iou
maxIoUs[s_idx] = maxIoU
return maxIoUs |
def build_openpose_model(name='mobilenet', *args, **kwargs):
if (name == 'BODY_25'):
from .openposenet import OpenPoseBody25Model
network = OpenPoseBody25Model(*args, **kwargs)
elif (name == 'MobileNet'):
from .mobilenet import PoseEstimationWithMobileNet
network = PoseEstimationWithMobileNet(*args, **kwargs)
else:
raise ValueError(f'Invalid name {name}, and currently, it only support {VALID_NAMES}.')
return network |
def deploy_model(model_s3_path: str):
endpoint_name = 'intent-{}-endpoint'.format(int(round((time.time() * 1000))))
print('\n\n\nEndpoint name is: {}\n\n'.format(endpoint_name))
model = TensorFlowModel(model_data=model_s3_path, image_uri=os.getenv('DOCKER_IMAGE'), role=os.getenv('IAM_SAGEMAKER_ROLE'))
predictor = model.deploy(initial_instance_count=1, instance_type=os.getenv('SAGEMAKER_INSTANCE'), endpoint_name=endpoint_name)
test_inp = {'instances': tf.one_hot(np.array([[0, 1, 1, 3, 4, 5]]), on_value=1, off_value=0, depth=7).numpy()}
result = predictor.predict(test_inp)
print(test_inp, result)
assert (result['predictions'][0][0] > 0)
return endpoint_name |
class MegaPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def run_kfold(data_fn, method='logit', prop_missing=0.0, max_num_feature=(- 1), feature_selection='random', k=10, which_half='both', data_dir='_data', cache_dir='_cache', out_dir='_out'):
start = time.time()
try:
param_path = get_param_path(cache_dir, method, data_fn, prop_missing, max_num_feature, feature_selection)
with open(param_path, 'rb') as f:
params = pickle.load(f)
except:
warnings.warn(('Cannot load parameters from: {}\n'.format(param_path) + 'Need to do parameter search; run parameter_search.py'))
raise
if (method == 'logit'):
from sklearn.linear_model import LogisticRegression as ModelClass
init_args = {'multi_class': 'multinomial', 'solver': 'lbfgs'}
elif (method == 'random_forest'):
from sklearn.ensemble import RandomForestClassifier as ModelClass
init_args = {}
elif (method == 'linear_svm'):
from sklearn.svm import SVC as ModelClass
init_args = {'kernel': 'poly', 'degree': 1, 'coef0': 0.0, 'gamma': 1.0, 'probability': True, 'cache_size': 1000}
elif (method == 'poly_svm'):
from sklearn.svm import SVC as ModelClass
init_args = {'kernel': 'poly', 'probability': True, 'cache_size': 1000}
elif (method == 'rbf_svm'):
from sklearn.svm import SVC as ModelClass
init_args = {'kernel': 'rbf', 'probability': True, 'cache_size': 1000}
elif (method == 'gbdt'):
from xgboost import XGBClassifier as ModelClass
init_args = {'objective': 'multi:softprob'}
else:
raise ValueError('unknown method: {}'.format(method))
(x_unvec, y, idx_feat_dict, idx_class_dict, _, perm_indices) = get_preprocessed_data(data_dir, data_fn, prop_missing=prop_missing)
num_feature = len(idx_feat_dict)
num_class = len(idx_class_dict)
base_out_dir = get_base_out_dir(out_dir, method, data_fn, prop_missing, max_num_feature, feature_selection)
recursive_mkdir(base_out_dir)
if (which_half == 'both'):
loop = range(0, k)
elif (which_half == 'first'):
loop = range(0, (k / 2))
elif (which_half == 'last'):
loop = range((k / 2), k)
else:
raise ValueError('Unknown which_half: {}'.format(which_half))
for k_idx in loop:
sub_out_dir = '{}/k_idx={}'.format(base_out_dir, k_idx)
recursive_mkdir(sub_out_dir)
run(ModelClass, x_unvec, y, idx_feat_dict, num_feature=num_feature, max_num_feature=max_num_feature, num_class=num_class, feature_selection=feature_selection, k_idx=k_idx, k=k, params=params, perm_indices=perm_indices, init_args=init_args, full_out_dir=sub_out_dir)
print('This k-fold {} multipipeline run script took {:.4f} seconds'.format(method, (time.time() - start))) |
def _get_learning_rate_schedule(optimizer_config: ConfigDict) -> LearningRateSchedule:
if (optimizer_config.schedule_type == 'constant'):
def learning_rate_schedule(t):
return optimizer_config.learning_rate
elif (optimizer_config.schedule_type == 'inverse_time'):
def learning_rate_schedule(t):
return (optimizer_config.learning_rate / (1.0 + (optimizer_config.learning_decay_rate * t)))
else:
raise ValueError('Learning rate schedule type not supported; {} was requested'.format(optimizer_config.schedule_type))
return learning_rate_schedule |
def get_gpu_memory_map():
result = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.used', '--format=csv,nounits,noheader'], encoding='utf-8')
gpu_memory = [int(x) for x in result.strip().split('\n')]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map |
def rgetattr(obj, attr_str):
return functools.reduce((lambda obj, attr: getattr(obj, attr)), ([obj] + attr_str.split('.'))) |
class LxmertModelTester():
def __init__(self, parent, vocab_size=300, hidden_size=28, num_attention_heads=2, num_labels=2, intermediate_size=64, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, num_qa_labels=30, num_object_labels=16, num_attr_labels=4, num_visual_features=10, l_layers=2, x_layers=1, r_layers=1, visual_feat_dim=128, visual_pos_dim=4, visual_loss_normalizer=6.67, seq_length=20, batch_size=4, is_training=True, task_matched=True, task_mask_lm=True, task_obj_predict=True, task_qa=True, visual_obj_loss=True, visual_attr_loss=True, visual_feat_loss=True, use_token_type_ids=True, use_lang_mask=True, output_attentions=False, output_hidden_states=False, scope=None):
self.parent = parent
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_labels = num_labels
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.pad_token_id = pad_token_id
self.num_qa_labels = num_qa_labels
self.num_object_labels = num_object_labels
self.num_attr_labels = num_attr_labels
self.l_layers = l_layers
self.x_layers = x_layers
self.r_layers = r_layers
self.visual_feat_dim = visual_feat_dim
self.visual_pos_dim = visual_pos_dim
self.visual_loss_normalizer = visual_loss_normalizer
self.seq_length = seq_length
self.batch_size = batch_size
self.is_training = is_training
self.use_lang_mask = use_lang_mask
self.task_matched = task_matched
self.task_mask_lm = task_mask_lm
self.task_obj_predict = task_obj_predict
self.task_qa = task_qa
self.visual_obj_loss = visual_obj_loss
self.visual_attr_loss = visual_attr_loss
self.visual_feat_loss = visual_feat_loss
self.num_visual_features = num_visual_features
self.use_token_type_ids = use_token_type_ids
self.output_attentions = output_attentions
self.output_hidden_states = output_hidden_states
self.scope = scope
self.num_hidden_layers = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
def prepare_config_and_inputs(self):
output_attentions = self.output_attentions
input_ids = ids_tensor([self.batch_size, self.seq_length], vocab_size=self.vocab_size)
visual_feats = torch.rand(self.batch_size, self.num_visual_features, self.visual_feat_dim, device=torch_device)
bounding_boxes = torch.rand(self.batch_size, self.num_visual_features, 4, device=torch_device)
input_mask = None
if self.use_lang_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
obj_labels = None
if self.task_obj_predict:
obj_labels = {}
if (self.visual_attr_loss and self.task_obj_predict):
obj_labels['attr'] = (ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels), ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels))
if (self.visual_feat_loss and self.task_obj_predict):
obj_labels['feat'] = (ids_tensor([self.batch_size, self.num_visual_features, self.visual_feat_dim], self.num_visual_features), ids_tensor([self.batch_size, self.num_visual_features], self.num_visual_features))
if (self.visual_obj_loss and self.task_obj_predict):
obj_labels['obj'] = (ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels), ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels))
ans = None
if self.task_qa:
ans = ids_tensor([self.batch_size], self.num_qa_labels)
masked_lm_labels = None
if self.task_mask_lm:
masked_lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
matched_label = None
if self.task_matched:
matched_label = ids_tensor([self.batch_size], self.num_labels)
config = LxmertConfig(vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_attention_heads=self.num_attention_heads, num_labels=self.num_labels, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, pad_token_id=self.pad_token_id, num_qa_labels=self.num_qa_labels, num_object_labels=self.num_object_labels, num_attr_labels=self.num_attr_labels, l_layers=self.l_layers, x_layers=self.x_layers, r_layers=self.r_layers, visual_feat_dim=self.visual_feat_dim, visual_pos_dim=self.visual_pos_dim, visual_loss_normalizer=self.visual_loss_normalizer, task_matched=self.task_matched, task_mask_lm=self.task_mask_lm, task_obj_predict=self.task_obj_predict, task_qa=self.task_qa, visual_obj_loss=self.visual_obj_loss, visual_attr_loss=self.visual_attr_loss, visual_feat_loss=self.visual_feat_loss, output_attentions=self.output_attentions, output_hidden_states=self.output_hidden_states)
return (config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions)
def create_and_check_lxmert_model(self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions):
model = LxmertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=output_attentions)
result = model(input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=(not output_attentions))
result = model(input_ids, visual_feats, bounding_boxes, return_dict=False)
result = model(input_ids, visual_feats, bounding_boxes, return_dict=True)
self.parent.assertEqual(result.language_output.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.vision_output.shape, (self.batch_size, self.num_visual_features, self.hidden_size))
self.parent.assertEqual(result.pooled_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_lxmert_for_question_answering(self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions):
model = LxmertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, labels=ans, output_attentions=output_attentions)
result = model(input_ids, visual_feats, bounding_boxes, labels=ans)
result = model(input_ids, visual_feats, bounding_boxes, labels=ans, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=output_attentions)
result = model(input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, labels=ans, output_attentions=(not output_attentions))
self.parent.assertEqual(result.question_answering_score.shape, (self.batch_size, self.num_qa_labels))
def create_and_check_lxmert_for_pretraining(self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions):
model = LxmertForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, obj_labels=obj_labels, matched_label=matched_label, ans=ans, output_attentions=output_attentions)
result = model(input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, output_attentions=(not output_attentions), return_dict=False)
result = model(input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels)
result = model(input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, obj_labels=obj_labels)
result = model(input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, matched_label=matched_label)
result = model(input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=ans)
result = model(input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, obj_labels=obj_labels, matched_label=matched_label, ans=ans, output_attentions=(not output_attentions))
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def resize_lxmert_num_qa_labels(self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions):
start_labels = config.num_qa_labels
num_large_labels = (config.num_qa_labels * 2)
num_small_labels = int((config.num_qa_labels * 2))
less_labels_ans = ids_tensor([self.batch_size], num_small_labels)
more_labels_ans = ids_tensor([self.batch_size], num_large_labels)
model_pretrain = LxmertForPreTraining(config=config).to(torch_device)
model_qa = LxmertForQuestionAnswering(config=config).to(torch_device)
config.num_labels = num_small_labels
end_labels = config.num_labels
result_pretrain = model_pretrain(input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=ans)
result_qa = model_qa(input_ids, visual_feats, bounding_boxes, labels=ans, token_type_ids=token_type_ids, attention_mask=input_mask)
model_pretrain.resize_num_qa_labels(num_small_labels)
model_qa.resize_num_qa_labels(num_small_labels)
result_pretrain_less = model_pretrain(input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=less_labels_ans)
result_qa_less = model_qa(input_ids, visual_feats, bounding_boxes, labels=less_labels_ans, token_type_ids=token_type_ids, attention_mask=input_mask)
model_pretrain.resize_num_qa_labels(num_large_labels)
model_qa.resize_num_qa_labels(num_large_labels)
result_pretrain_more = model_pretrain(input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=more_labels_ans)
result_qa_more = model_qa(input_ids, visual_feats, bounding_boxes, labels=more_labels_ans, token_type_ids=token_type_ids, attention_mask=input_mask)
model_qa_labels = model_qa.num_qa_labels
self.parent.assertNotEqual(start_labels, end_labels)
self.parent.assertNotEqual(model_qa_labels, start_labels)
self.parent.assertEqual(result_qa.question_answering_score.shape, (self.batch_size, start_labels))
self.parent.assertEqual(result_pretrain.question_answering_score.shape, (self.batch_size, start_labels))
self.parent.assertEqual(result_qa_less.question_answering_score.shape, (self.batch_size, num_small_labels))
self.parent.assertEqual(result_pretrain_less.question_answering_score.shape, (self.batch_size, num_small_labels))
self.parent.assertEqual(result_qa_more.question_answering_score.shape, (self.batch_size, num_large_labels))
self.parent.assertEqual(result_pretrain_more.question_answering_score.shape, (self.batch_size, num_large_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'visual_feats': visual_feats, 'visual_pos': bounding_boxes, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return (config, inputs_dict) |
def tokenize_line(line):
line = SPACE_NORMALIZER.sub(' ', line)
line = line.strip()
return line.split() |
class Trainer(object):
def __init__(self, option):
self.option = option
self._build_model()
self._set_optimizer()
self.logger = logger_setting(option.exp_name, option.save_dir)
def _build_model(self):
if (self.option.data == 'CelebA-HQ'):
if (self.option.model == 'vgg11'):
print('[MODEL] vgg11')
if self.option.imagenet_pretrain:
print('ImageNet Pre-trained')
self.net = models.vgg11(pretrained=True)
else:
self.net = models.vgg11(pretrained=False)
self.net.classifier[6] = nn.Linear(4096, self.option.n_class)
elif (self.option.model == 'resnet18'):
print('[MODEL] resnet18')
if self.option.imagenet_pretrain:
self.net = models.resnet18(pretrained=True)
else:
self.net = models.resnet18(pretrained=False)
self.net.fc = nn.Linear(512, self.option.n_class)
elif (self.option.model == 'alexnet'):
print('[MODEL] alexnet')
if self.option.imagenet_pretrain:
self.net = models.alexnet(pretrained=True)
else:
self.net = models.alexnet(pretrained=False)
self.net.classifier[6] = nn.Linear(4096, self.option.n_class)
if self.option.ubnet:
if (self.option.data == 'CelebA-HQ'):
self.orthnet = orthonet.OrthoNet(num_classes=self.option.n_class)
self.loss_orth = nn.CrossEntropyLoss(ignore_index=255)
self._load_model()
else:
self.loss = nn.CrossEntropyLoss(ignore_index=255)
if self.option.cuda:
if self.option.ubnet:
self.orthnet.cuda()
self.net.cuda()
self.loss_orth.cuda()
print(f'[PARAMETER:ORTHONET]: {self._count_parameters(self.orthnet)}')
print(f'[PARAMETER:BASELINE]: {self._count_parameters(self.net)}')
else:
self.net.cuda()
self.loss.cuda()
print(f'[PARAMETER:BASELINE]: {self._count_parameters(self.net)}')
def _count_parameters(self, model):
return sum((p.numel() for p in model.parameters() if p.requires_grad))
def _set_optimizer(self):
if self.option.ubnet:
self.optim_orth = optim.Adam(filter((lambda p: p.requires_grad), self.orthnet.parameters()), lr=self.option.lr, weight_decay=self.option.weight_decay)
else:
self.optim = optim.Adam(filter((lambda p: p.requires_grad), self.net.parameters()), lr=self.option.lr, weight_decay=self.option.weight_decay)
lr_lambda = (lambda step: (self.option.lr_decay_rate ** (step // self.option.lr_decay_period)))
if self.option.ubnet:
self.scheduler = optim.lr_scheduler.LambdaLR(self.optim_orth, lr_lambda=lr_lambda, last_epoch=(- 1))
else:
self.scheduler = optim.lr_scheduler.LambdaLR(self.optim, lr_lambda=lr_lambda, last_epoch=(- 1))
def _weights_init_xavier(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
nn.init.xavier_normal_(m.weight.data, gain=1.0)
elif (classname.find('Linear') != (- 1)):
nn.init.xavier_normal_(m.weight.data, gain=1.0)
def _initialization(self):
if self.option.is_train:
if self.option.imagenet_pretrain:
print('[INITIALIZED]')
if ((self.option.model == 'vgg11') or (self.option.model == 'alexnet')):
self.net.classifier.apply(self._weights_init_xavier)
elif (self.option.model == 'resnet18'):
self.net.fc.apply(self._weights_init_xavier)
else:
self.net.apply(self._weights_init_xavier)
if self.option.ubnet:
self.orthnet.apply(self._weights_init_xavier)
if self.option.use_pretrain:
if (self.option.checkpoint is not None):
self._load_model()
else:
print('[WARNING] no pre-trained model')
def _mode_setting(self, is_train=True):
if is_train:
if self.option.ubnet:
self.orthnet.train()
self.net.train()
else:
self.net.train()
elif self.option.ubnet:
self.orthnet.eval()
self.net.eval()
else:
self.net.eval()
def _train_step(self, data_loader, step):
self._mode_setting(is_train=True)
loss_sum = 0.0
loss_orth_sum = 0.0
loss_conv_sum = 0.0
loss_trans_sum = 0.0
total_num_train = 0
for (i, (images, labels)) in enumerate(tqdm(data_loader)):
images = self._get_variable(images)
labels = self._get_variable(labels)
total_num_train += images.shape[0]
if self.option.ubnet:
if (self.option.data == 'CelebA-HQ'):
if (self.option.model == 'vgg11'):
new_classifier = nn.Sequential(*list(self.net.children())[0])
extractor_1 = nn.Sequential(*list(new_classifier.children())[:3]).cuda()
extractor_2 = nn.Sequential(*list(new_classifier.children())[:6]).cuda()
extractor_3 = nn.Sequential(*list(new_classifier.children())[:11]).cuda()
extractor_4 = nn.Sequential(*list(new_classifier.children())[:16]).cuda()
extractor_5 = nn.Sequential(*list(new_classifier.children())[:21]).cuda()
elif (self.option.model == 'resnet18'):
extractor_1 = nn.Sequential(*list(self.net.children())[:4]).cuda()
extractor_2 = nn.Sequential(*list(self.net.children())[:5]).cuda()
extractor_3 = nn.Sequential(*list(self.net.children())[:6]).cuda()
extractor_4 = nn.Sequential(*list(self.net.children())[:7]).cuda()
extractor_5 = nn.Sequential(*list(self.net.children())[:8]).cuda()
elif (self.option.model == 'alexnet'):
new_classifier = nn.Sequential(*list(self.net.children())[0])
extractor_1 = nn.Sequential(*list(new_classifier.children())[:3]).cuda()
extractor_2 = nn.Sequential(*list(new_classifier.children())[:6]).cuda()
extractor_3 = nn.Sequential(*list(new_classifier.children())[:13]).cuda()
if (self.option.model == 'alexnet'):
for param in extractor_1.parameters():
param.requires_grad = False
for param in extractor_2.parameters():
param.requires_grad = False
for param in extractor_3.parameters():
param.requires_grad = False
feature_1 = extractor_1.forward(images)
feature_2 = extractor_2.forward(images)
feature_3 = extractor_3.forward(images)
out = {}
out['out1'] = feature_1
out['out2'] = feature_2
out['out3'] = feature_3
else:
for param in extractor_1.parameters():
param.requires_grad = False
for param in extractor_2.parameters():
param.requires_grad = False
for param in extractor_3.parameters():
param.requires_grad = False
for param in extractor_4.parameters():
param.requires_grad = False
for param in extractor_5.parameters():
param.requires_grad = False
feature_1 = extractor_1.forward(images)
feature_2 = extractor_2.forward(images)
feature_3 = extractor_3.forward(images)
feature_4 = extractor_4.forward(images)
feature_5 = extractor_5.forward(images)
out = {}
out['out1'] = feature_1
out['out2'] = feature_2
out['out3'] = feature_3
out['out4'] = feature_4
out['out5'] = feature_5
self.optim_orth.zero_grad()
if self.option.ablation:
pred_label_orth = self.orthnet(out)
loss_orth = self.loss_orth(pred_label_orth, labels)
loss_orth_sum += loss_orth
else:
(pred_label_orth, loss_conv, loss_trans) = self.orthnet(out)
loss_orth = self.loss_orth(pred_label_orth, labels)
loss_orth_sum += loss_orth
loss_conv_sum += loss_conv
loss_trans_sum += loss_trans
loss_orth.backward()
self.optim_orth.step()
else:
self.optim.zero_grad()
if (self.option.data == 'CelebA-HQ'):
pred_label = self.net(images)
loss = self.loss(pred_label, torch.squeeze(labels))
loss_sum += loss
loss.backward()
self.optim.step()
if self.option.ubnet:
msg = f'[TRAIN] ORTH LOSS : {(loss_orth_sum / len(data_loader))} LOSS_CONV : {(loss_conv_sum / total_num_train)} LOSS_TRANS : {(loss_trans_sum / total_num_train)}'
else:
msg = f'[TRAIN] BASE LOSS : {(loss_sum / len(data_loader))}'
self.logger.info(msg)
def _validate(self, data_loader, valid_type=None, step=None):
self._mode_setting(is_train=False)
if (not self.option.is_train):
print('[VALIDATING]')
self._initialization()
if (self.option.checkpoint is not None):
self._load_model()
else:
print('No trained model')
sys.exit()
total_num_correct = 0.0
total_num_correct_orth = 0.0
total_num_test = 0.0
total_loss = 0.0
total_loss_orth = 0.0
total_loss_conv = 0.0
total_loss_trans = 0.0
total_loss_trans = 0.0
for (i, (images, labels)) in enumerate(tqdm(data_loader)):
images = self._get_variable(images)
labels = self._get_variable(labels)
batch_size = images.shape[0]
total_num_test += batch_size
if self.option.ubnet:
self.optim_orth.zero_grad()
if (self.option.data == 'CelebA-HQ'):
if (self.option.model == 'vgg11'):
new_classifier = nn.Sequential(*list(self.net.children())[0])
extractor_1 = nn.Sequential(*list(new_classifier.children())[:3]).cuda()
extractor_2 = nn.Sequential(*list(new_classifier.children())[:6]).cuda()
extractor_3 = nn.Sequential(*list(new_classifier.children())[:11]).cuda()
extractor_4 = nn.Sequential(*list(new_classifier.children())[:16]).cuda()
extractor_5 = nn.Sequential(*list(new_classifier.children())[:21]).cuda()
elif (self.option.model == 'resnet18'):
extractor_1 = nn.Sequential(*list(self.net.children())[:4]).cuda()
extractor_2 = nn.Sequential(*list(self.net.children())[:5]).cuda()
extractor_3 = nn.Sequential(*list(self.net.children())[:6]).cuda()
extractor_4 = nn.Sequential(*list(self.net.children())[:7]).cuda()
extractor_5 = nn.Sequential(*list(self.net.children())[:8]).cuda()
elif (self.option.model == 'alexnet'):
new_classifier = nn.Sequential(*list(self.net.children())[0])
extractor_1 = nn.Sequential(*list(new_classifier.children())[:3]).cuda()
extractor_2 = nn.Sequential(*list(new_classifier.children())[:6]).cuda()
extractor_3 = nn.Sequential(*list(new_classifier.children())[:13]).cuda()
if (self.option.model == 'alexnet'):
for param in extractor_1.parameters():
param.requires_grad = False
for param in extractor_2.parameters():
param.requires_grad = False
for param in extractor_3.parameters():
param.requires_grad = False
feature_1 = extractor_1.forward(images)
feature_2 = extractor_2.forward(images)
feature_3 = extractor_3.forward(images)
out = {}
out['out1'] = feature_1
out['out2'] = feature_2
out['out3'] = feature_3
else:
for param in extractor_1.parameters():
param.requires_grad = False
for param in extractor_2.parameters():
param.requires_grad = False
for param in extractor_3.parameters():
param.requires_grad = False
for param in extractor_4.parameters():
param.requires_grad = False
for param in extractor_5.parameters():
param.requires_grad = False
feature_1 = extractor_1.forward(images)
feature_2 = extractor_2.forward(images)
feature_3 = extractor_3.forward(images)
feature_4 = extractor_4.forward(images)
feature_5 = extractor_5.forward(images)
out = {}
out['out1'] = feature_1
out['out2'] = feature_2
out['out3'] = feature_3
out['out4'] = feature_4
out['out5'] = feature_5
(pred_label_orth, loss_conv, loss_trans) = self.orthnet(out)
loss_orth = self.loss_orth(pred_label_orth, labels)
total_num_correct_orth += self._num_correct(pred_label_orth, labels, topk=1).data
total_loss_orth += (loss_orth.data * batch_size)
total_loss_conv += loss_conv
total_loss_trans += loss_trans
if (not self.option.ubnet):
self.optim.zero_grad()
pred_label = self.net(images)
loss = self.loss(pred_label, labels)
total_num_correct += self._num_correct(pred_label, labels, topk=1).data
total_loss += (loss.data * batch_size)
if self.option.ubnet:
avg_loss_orth = (total_loss_orth / total_num_test)
avg_acc_orth = (total_num_correct_orth / total_num_test)
if (valid_type != None):
msg = f'[EVALUATION - {valid_type}] LOSS : {avg_loss_orth}, ACCURACY : {avg_acc_orth}'
else:
msg = f'[EVALUATION] LOSS : {avg_loss_orth}, ACCURACY : {avg_acc_orth} LOSS_CONV : {(total_loss_conv / total_num_test)} LOSS_TRANS : {(total_loss_trans / total_num_test)}'
if (not self.option.ubnet):
avg_loss = (total_loss / total_num_test)
avg_acc = (float(total_num_correct) / total_num_test)
if (valid_type != None):
msg = f'[EVALUATION - {valid_type}] LOSS : {avg_loss}, ACCURACY : {avg_acc}'
else:
msg = f'[EVALUATION] LOSS : {avg_loss}, ACCURACY : {avg_acc}'
self.logger.info(msg)
def _num_correct(self, outputs, labels, topk=1):
(_, preds) = outputs.topk(k=topk, dim=1)
preds = preds.t()
correct = preds.eq(labels.view(1, (- 1)).expand_as(preds))
correct = correct.view((- 1)).sum()
return correct
def _accuracy(self, outputs, labels):
batch_size = labels.size(0)
(_, preds) = outputs.topk(k=1, dim=1)
preds = preds.t()
correct = preds.eq(labels.view(1, (- 1)).expand_as(preds))
correct = correct.view((- 1)).float().sum(0, keepdim=True)
accuracy = correct.mul_((100.0 / batch_size))
return accuracy
def _save_model(self, step):
if self.option.ubnet:
torch.save({'step': step, 'optim_state_dict': self.optim_orth.state_dict(), 'net_state_dict': self.orthnet.state_dict()}, os.path.join(self.option.save_dir, self.option.exp_name, f'checkpoint_step_{step}.pth'))
else:
torch.save({'step': step, 'optim_state_dict': self.optim.state_dict(), 'net_state_dict': self.net.state_dict()}, os.path.join(self.option.save_dir, self.option.exp_name, f'checkpoint_step_{step}.pth'))
print(f'[SAVE] checkpoint step: {step}')
def _load_model(self):
ckpt = torch.load(self.option.checkpoint)
self.net.load_state_dict(ckpt['net_state_dict'], strict=False)
def train(self, train_loader, ub1_val_loader=None, ub2_val_loader=None):
self._initialization()
if (self.option.checkpoint is not None):
self._load_model()
self._mode_setting(is_train=True)
start_epoch = 0
for step in range(start_epoch, self.option.max_step):
self._train_step(train_loader, step)
self.scheduler.step()
if ((step == 1) or ((step % self.option.save_step) == 0) or (step == (self.option.max_step - 1))):
if (self.option.data == 'CelebA-HQ'):
if ((ub1_val_loader is not None) and (ub2_val_loader is not None)):
self._validate(ub1_val_loader, step=step)
self._validate(ub2_val_loader, valid_type='ub2', step=step)
self._save_model(step)
def _get_variable(self, inputs):
if self.option.cuda:
return Variable(inputs.cuda())
return Variable(inputs) |
def trpo_gym_tf_cartpole(ctxt=None, seed=1):
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
env = GarageEnv(gym.make('CartPole-v1'))
policy = CategoricalMLPPolicy(name='policy', env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec, policy=policy, baseline=baseline, max_path_length=200, discount=0.99, max_kl_step=0.01)
runner.setup(algo, env)
runner.train(n_epochs=10, batch_size=10000, plot=False)
visualize_algorithm_as_video(env, policy) |
class Gate(Node):
def __init__(self, children):
super().__init__('gate', children, None)
self.id = children[0]
self.name = self.id.name
self.line = self.id.line
self.file = self.id.file
if (len(children) == 3):
self.arguments = None
self.bitlist = children[1]
self.body = children[2]
else:
self.arguments = children[1]
self.bitlist = children[2]
self.body = children[3]
def n_args(self):
if self.arguments:
return self.arguments.size()
return 0
def n_bits(self):
return self.bitlist.size()
def qasm(self, prec=15):
string = ('gate ' + self.name)
if (self.arguments is not None):
string += (('(' + self.arguments.qasm(prec)) + ')')
string += ((' ' + self.bitlist.qasm(prec)) + '\n')
string += (('{\n' + self.body.qasm(prec)) + '}')
return string |
def build_phoc(token):
token = token.lower().strip()
token = ''.join([c for c in token if (c in _alphabet)])
phoc = _build_phoc_raw(token)
phoc = np.array(phoc, dtype=np.float32)
return phoc |
def test_demo_iris():
iris = datasets.load_iris()
(X_train, X_test, y_train, y_test) = train_test_split(iris.data, iris.target, test_size=0.25, random_state=1)
estimator = hpsklearn.HyperoptEstimator(preprocessing=hpsklearn.components.any_preprocessing('pp'), classifier=hpsklearn.components.any_classifier('clf'), algo=tpe.suggest, trial_timeout=15.0, max_evals=10, seed=1)
print('', file=sys.stderr)
print('====Demo classification on Iris dataset====', file=sys.stderr)
iterator = estimator.fit_iter(X_train, y_train)
next(iterator)
n_trial = 0
while (len(estimator.trials.trials) < estimator.max_evals):
iterator.send(1)
n_trial += 1
print('Trial', n_trial, 'loss:', estimator.trials.losses()[(- 1)], file=sys.stderr)
estimator.retrain_best_model_on_full_data(X_train, y_train)
print('Test accuracy:', estimator.score(X_test, y_test), file=sys.stderr)
print('====End of demo====', file=sys.stderr) |
class VideoMAEConfig(PretrainedConfig):
model_type = 'videomae'
def __init__(self, image_size=224, patch_size=16, num_channels=3, num_frames=16, tubelet_size=2, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, qkv_bias=True, use_mean_pooling=True, decoder_num_attention_heads=6, decoder_hidden_size=384, decoder_num_hidden_layers=4, decoder_intermediate_size=1536, norm_pix_loss=True, **kwargs):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_frames = num_frames
self.tubelet_size = tubelet_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.use_mean_pooling = use_mean_pooling
self.decoder_num_attention_heads = decoder_num_attention_heads
self.decoder_hidden_size = decoder_hidden_size
self.decoder_num_hidden_layers = decoder_num_hidden_layers
self.decoder_intermediate_size = decoder_intermediate_size
self.norm_pix_loss = norm_pix_loss |
def process(data_dir, split, i):
data_dir = os.path.join(data_dir, split)
with open(os.path.join(data_dir, '{}.json'.format(i))) as f:
data = json.loads(f.read())
tokenize = compose(list, _split_words)
art_sents = tokenize(data['article'])
abs_sents = tokenize(data['abstract'])
if (art_sents and abs_sents):
(extracted, scores) = get_extract_label(art_sents, abs_sents)
else:
(extracted, scores) = ([], [])
data['extracted'] = extracted
data['score'] = scores
with open(os.path.join(data_dir, '{}.json'.format(i)), 'w') as f:
json.dump(data, f, indent=4) |
def EUNN_param(hidden_size, capacity=2, FFT=False, comp=False):
theta_phi_initializer = init_ops.random_uniform_initializer((- np.pi), np.pi)
if FFT:
capacity = int(np.log2(hidden_size))
params_theta_0 = vs.get_variable('theta_0', [capacity, (hidden_size / 2)], initializer=theta_phi_initializer)
cos_theta_0 = math_ops.cos(params_theta_0)
sin_theta_0 = math_ops.sin(params_theta_0)
if comp:
params_phi_0 = vs.get_variable('phi_0', [capacity, (hidden_size / 2)], initializer=theta_phi_initializer)
cos_phi_0 = math_ops.cos(params_phi_0)
sin_phi_0 = math_ops.sin(params_phi_0)
cos_list_0_re = array_ops.concat([cos_theta_0, math_ops.multiply(cos_theta_0, cos_phi_0)], 1)
cos_list_0_im = array_ops.concat([array_ops.zeros_like(cos_theta_0), math_ops.multiply(cos_theta_0, sin_phi_0)], 1)
sin_list_0_re = array_ops.concat([sin_theta_0, (- math_ops.multiply(sin_theta_0, cos_phi_0))], 1)
sin_list_0_im = array_ops.concat([array_ops.zeros_like(sin_theta_0), (- math_ops.multiply(sin_theta_0, sin_phi_0))], 1)
cos_list_0 = array_ops.unstack(math_ops.complex(cos_list_0_re, cos_list_0_im))
sin_list_0 = array_ops.unstack(math_ops.complex(sin_list_0_re, sin_list_0_im))
else:
cos_list_0 = array_ops.unstack(array_ops.concat([cos_theta_0, cos_theta_0], 1))
sin_list_0 = array_ops.unstack(array_ops.concat([sin_theta_0, (- sin_theta_0)], 1))
(ind, ind1) = permute_FFT(hidden_size)
ind1_list = array_ops.unstack(ind1)
diag_list_0 = []
off_list_0 = []
for i in range(capacity):
diag_list_0.append(permute(cos_list_0[i], ind1_list[i]))
off_list_0.append(permute(sin_list_0[i], ind1_list[i]))
v1 = array_ops.stack(diag_list_0, 0)
v2 = array_ops.stack(off_list_0, 0)
else:
params_theta_0 = vs.get_variable('theta_0', [int((capacity / 2)), int((hidden_size / 2))], initializer=theta_phi_initializer)
cos_theta_0 = math_ops.cos(params_theta_0)
sin_theta_0 = math_ops.sin(params_theta_0)
if comp:
params_phi_0 = vs.get_variable('phi_0', [int((capacity / 2)), int((hidden_size / 2))], initializer=theta_phi_initializer)
cos_phi_0 = math_ops.cos(params_phi_0)
sin_phi_0 = math_ops.sin(params_phi_0)
cos_list_0_re = array_ops.concat([cos_theta_0, math_ops.multiply(cos_theta_0, cos_phi_0)], 1)
cos_list_0_im = array_ops.concat([array_ops.zeros_like(cos_theta_0), math_ops.multiply(cos_theta_0, sin_phi_0)], 1)
sin_list_0_re = array_ops.concat([sin_theta_0, (- math_ops.multiply(sin_theta_0, cos_phi_0))], 1)
sin_list_0_im = array_ops.concat([array_ops.zeros_like(sin_theta_0), (- math_ops.multiply(sin_theta_0, sin_phi_0))], 1)
cos_list_0 = array_ops.unstack(math_ops.complex(cos_list_0_re, cos_list_0_im))
sin_list_0 = array_ops.unstack(math_ops.complex(sin_list_0_re, sin_list_0_im))
else:
cos_list_0 = array_ops.concat([cos_theta_0, cos_theta_0], 1)
sin_list_0 = array_ops.concat([sin_theta_0, (- sin_theta_0)], 1)
params_theta_1 = vs.get_variable('theta_1', [int((capacity / 2)), (int((hidden_size / 2)) - 1)], initializer=theta_phi_initializer)
cos_theta_1 = math_ops.cos(params_theta_1)
sin_theta_1 = math_ops.sin(params_theta_1)
if comp:
params_phi_1 = vs.get_variable('phi_1', [int((capacity / 2)), (int((hidden_size / 2)) - 1)], initializer=theta_phi_initializer)
cos_phi_1 = math_ops.cos(params_phi_1)
sin_phi_1 = math_ops.sin(params_phi_1)
cos_list_1_re = array_ops.concat([np.ones((int((capacity / 2)), 1)), cos_theta_1, math_ops.multiply(cos_theta_1, cos_phi_1), np.ones((int((capacity / 2)), 1))], 1)
cos_list_1_im = array_ops.concat([np.zeros((int((capacity / 2)), 1)), array_ops.zeros_like(cos_theta_1), math_ops.multiply(cos_theta_1, sin_phi_1), np.zeros((int((capacity / 2)), 1))], 1)
sin_list_1_re = array_ops.concat([np.zeros((int((capacity / 2)), 1)), sin_theta_1, (- math_ops.multiply(sin_theta_1, cos_phi_1)), np.zeros((int((capacity / 2)), 1))], 1)
sin_list_1_im = array_ops.concat([np.zeros((int((capacity / 2)), 1)), array_ops.zeros_like(sin_theta_1), (- math_ops.multiply(sin_theta_1, sin_phi_1)), np.zeros((int((capacity / 2)), 1))], 1)
cos_list_1 = array_ops.unstack(math_ops.complex(cos_list_1_re, cos_list_1_im))
sin_list_1 = array_ops.unstack(math_ops.complex(sin_list_1_re, sin_list_1_im))
else:
cos_list_1 = array_ops.concat([np.ones((int((capacity / 2)), 1)), cos_theta_1, cos_theta_1, np.ones((int((capacity / 2)), 1))], 1)
sin_list_1 = array_ops.concat([np.zeros((int((capacity / 2)), 1)), sin_theta_1, (- sin_theta_1), np.zeros((int((capacity / 2)), 1))], 1)
(ind, ind3, ind4) = permute_tunable(hidden_size, capacity)
diag_list_0 = permute(cos_list_0, ind3)
off_list_0 = permute(sin_list_0, ind3)
diag_list_1 = permute(cos_list_1, ind4)
off_list_1 = permute(sin_list_1, ind4)
v1 = tf.reshape(tf.concat([diag_list_0, diag_list_1], 1), [capacity, hidden_size])
v2 = tf.reshape(tf.concat([off_list_0, off_list_1], 1), [capacity, hidden_size])
if comp:
omega = vs.get_variable('omega', [hidden_size], initializer=theta_phi_initializer)
D = math_ops.complex(math_ops.cos(omega), math_ops.sin(omega))
else:
D = None
v1 = toTensorArray(v1)
v2 = toTensorArray(v2)
ind = toTensorArray(ind)
diag = D
return (v1, v2, ind, diag, capacity) |
def bf16_to_fp32(bf16_np):
assert (bf16_np.dtype == np.int16)
temp = copy.deepcopy(bf16_np)
int32_np = temp.astype(dtype=np.int32)
int32_np = (int32_np << 16)
fp32_np = int32_np.view(np.float32)
return fp32_np |
def build_roi_box_head(cfg, in_channels, BBAM=False):
return ROIBoxHead(cfg, in_channels, BBAM=BBAM) |
def find_dataset_using_name(dataset_name):
dataset_filename = (('data.' + dataset_name) + '_dataset')
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = (dataset_name.replace('_', '') + 'dataset')
for (name, cls) in datasetlib.__dict__.items():
if ((name.lower() == target_dataset_name.lower()) and issubclass(cls, BaseDataset)):
dataset = cls
if (dataset is None):
raise ValueError(('In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase.' % (dataset_filename, target_dataset_name)))
return dataset |
def parallel_transformer(model_parallel_size, num_att_heads_per_partition, hidden_size_per_att_head, batch_size, sequence_length):
mpu.initialize_model_parallel(model_parallel_size)
model_parallel_size = mpu.get_model_parallel_world_size()
seed = 12345
set_random_seed(seed)
num_att_heads = (num_att_heads_per_partition * torch.distributed.get_world_size())
hidden_size = (hidden_size_per_att_head * num_att_heads)
intermediate_size = (4 * hidden_size)
identity_layer = IdentityLayer3D(batch_size, sequence_length, hidden_size).cuda()
transformer_layer = mpu.BertParallelTransformerLayer(hidden_size, intermediate_size, num_att_heads, 0.0, 0.0, torch.nn.functional.relu, 1e-05).cuda()
loss_weight = torch.randn([batch_size, sequence_length, hidden_size]).cuda()
attention_mask = torch.randn([batch_size, 1, 1, sequence_length]).cuda()
input_ = identity_layer()
output = transformer_layer(input_, attention_mask)
loss = torch.mul(output, loss_weight).sum()
loss.backward()
rank = mpu.get_model_parallel_rank()
mpu.destroy_model_parallel()
return (rank, hidden_size, model_parallel_size, loss, transformer_layer, identity_layer) |
class RandomCrop(object):
def __init__(self, size):
self.crop = torchvision.transforms.transforms.RandomCrop(size)
def __call__(self, image, target=None):
image = self.crop(image)
if (target is None):
return image
return (image, target) |
def weight_variable(shape, name_idx):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=('weight' + str(name_idx))) |
def test_construct():
cfg = Config()
assert (cfg.filename is None)
assert (cfg.text == '')
assert (len(cfg) == 0)
assert (cfg._cfg_dict == {})
with pytest.raises(TypeError):
Config([0, 1])
cfg_dict = dict(item1=[1, 2], item2=dict(a=0), item3=True, item4='test')
cfg_file = osp.join(data_path, 'config/a.py')
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == cfg.pretty_text)
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'a.py')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
cfg_file = osp.join(data_path, 'config/b.json')
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == json.dumps(cfg_dict))
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'b.json')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
cfg_file = osp.join(data_path, 'config/c.yaml')
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == yaml.dump(cfg_dict))
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'c.yaml')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
cfg_file = osp.join(data_path, 'config/h.py')
path = osp.join(osp.dirname(__file__), 'data', 'config')
path = Path(path).as_posix()
cfg_dict = dict(item1='h.py', item2=path, item3='abc_h')
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == cfg.pretty_text)
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'h.py')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
assert (Config.fromfile(dump_file)['item1'] == cfg_dict['item1'])
assert (Config.fromfile(dump_file)['item2'] == cfg_dict['item2'])
assert (Config.fromfile(dump_file)['item3'] == cfg_dict['item3'])
cfg_dict = dict(item1='{{fileBasename}}', item2='{{ fileDirname}}', item3='abc_{{ fileBasenameNoExtension }}')
assert Config.fromfile(cfg_file, False)
assert (Config.fromfile(cfg_file, False)['item1'] == cfg_dict['item1'])
assert (Config.fromfile(cfg_file, False)['item2'] == cfg_dict['item2'])
assert (Config.fromfile(cfg_file, False)['item3'] == cfg_dict['item3'])
cfg_file = osp.join(data_path, 'config/p.yaml')
cfg_dict = dict(item1=osp.join(osp.dirname(__file__), 'data', 'config'))
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == yaml.dump(cfg_dict))
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'p.yaml')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
assert (Config.fromfile(dump_file)['item1'] == cfg_dict['item1'])
assert Config.fromfile(cfg_file, False)
assert (Config.fromfile(cfg_file, False)['item1'] == '{{ fileDirname }}')
cfg_file = osp.join(data_path, 'config/o.json')
cfg_dict = dict(item1=osp.join(osp.dirname(__file__), 'data', 'config'))
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == json.dumps(cfg_dict))
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'o.json')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
assert (Config.fromfile(dump_file)['item1'] == cfg_dict['item1'])
assert Config.fromfile(cfg_file, False)
assert (Config.fromfile(cfg_file, False)['item1'] == '{{ fileDirname }}') |
class MBartPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def read_beam_files3(file_base_path):
vit_path = (file_base_path + 'beam')
corr_path = (file_base_path + 'corr')
src_path = (file_base_path + 'src')
vit_lst = []
if True:
with open(vit_path, 'r') as f:
for line in f:
vit_lst.append(line.strip())
corr_lst = []
with open(corr_path, 'r') as f:
for line in f:
corr_lst.append(line.strip().split())
return (vit_lst, corr_lst) |
def _const_impute_timeseries_dataframe(df, const_num):
res_df = df.fillna(const_num)
return res_df |
def plot_nodes(hgf: 'HGF', node_idxs: Union[(int, List[int])], ci: bool=True, show_surprise: bool=True, show_observations: bool=False, show_current_state: bool=False, figsize: Tuple[(int, int)]=(12, 5), color: Optional[Union[(Tuple, str)]]=None, axs: Optional[Union[(List, Axes)]]=None):
if (not isinstance(node_idxs, list)):
node_idxs = [node_idxs]
trajectories_df = hgf.to_pandas()
if (axs is None):
(_, axs) = plt.subplots(nrows=len(node_idxs), figsize=figsize, sharex=True)
if ((isinstance(node_idxs, int) | len(node_idxs)) == 1):
axs = [axs]
for (i, node_idx) in enumerate(node_idxs):
if (node_idx in hgf.input_nodes_idx.idx):
input_type = hgf.input_nodes_idx.kind[hgf.input_nodes_idx.idx.index(node_idx)]
if (input_type == 'continuous'):
axs[i].scatter(x=trajectories_df.time, y=trajectories_df[f'observation_input_{node_idx}'], s=3, label='Input', color='#2a2a2a', zorder=10, alpha=0.5)
elif (input_type == 'binary'):
axs[i].scatter(x=trajectories_df.time, y=trajectories_df[f'observation_input_{node_idx}'], label='Input', color='#4c72b0', alpha=0.2, edgecolors='k', zorder=10)
axs[i].set_title(f'{input_type.capitalize()} Input Node {node_idx}', loc='left')
axs[i].legend()
else:
mean = trajectories_df[f'x_{node_idx}_expected_mean']
precision = trajectories_df[f'x_{node_idx}_expected_precision']
axs[i].plot(trajectories_df.time, mean, label='$\\hat{\\mu}$', color=color, linewidth=1, zorder=2)
axs[i].set_ylabel(f'$\mu_{{{node_idx}}}$')
if (ci is True):
sd = np.sqrt((1 / precision))
y1 = (trajectories_df[f'x_{node_idx}_expected_mean'] - sd)
y2 = (trajectories_df[f'x_{node_idx}_expected_mean'] + sd)
if (hgf.edges[node_idx].value_children is not None):
if np.any([((i in hgf.edges[node_idx].value_children) and (kind == 'binary')) for (i, kind) in enumerate(hgf.input_nodes_idx.kind)]):
parent_idx = hgf.edges[node_idx].value_parents[0]
mean_parent = trajectories_df[f'x_{parent_idx}_expected_mean']
precision_parent = trajectories_df[f'x_{parent_idx}_expected_precision']
sd = np.sqrt((1 / precision_parent))
y1 = (1 / (1 + np.exp(((- mean_parent) + sd))))
y2 = (1 / (1 + np.exp(((- mean_parent) - sd))))
axs[i].fill_between(x=trajectories_df.time, y1=y1, y2=y2, alpha=0.4, color=color, zorder=2)
axs[i].legend()
if show_current_state:
mean = trajectories_df[f'x_{node_idx}_mean']
precision = trajectories_df[f'x_{node_idx}_precision']
axs[i].plot(trajectories_df.time, mean, label='$\\mu$', color='gray', linewidth=0.5, zorder=2, linestyle='--')
if (ci is True):
sd = np.sqrt((1 / precision))
axs[i].fill_between(x=trajectories_df.time, y1=(trajectories_df[f'x_{node_idx}_mean'] - sd), y2=(trajectories_df[f'x_{node_idx}_mean'] + sd), alpha=0.1, color=color, zorder=2)
axs[i].legend()
if show_observations:
if (hgf.edges[node_idx].value_children is not None):
input_colors = plt.cm.cividis(np.linspace(0, 1, len(hgf.edges[node_idx].value_children)))
for (ii, child_idx) in enumerate(hgf.edges[node_idx].value_children):
if (child_idx not in hgf.input_nodes_idx.idx):
axs[i].scatter(trajectories_df.time, trajectories_df[f'x_{child_idx}_mean'], s=3, label=f'Value child node - {ii}', alpha=0.5, color=input_colors[ii], edgecolors='grey')
axs[i].plot(trajectories_df.time, trajectories_df[f'x_{child_idx}_mean'], linewidth=0.5, linestyle='--', alpha=0.5, color=input_colors[ii])
else:
child_idx = np.where((np.array(hgf.input_nodes_idx.idx) == child_idx))[0][0]
axs[i].scatter(trajectories_df.time, trajectories_df[f'observation_input_{child_idx}'], s=3, label=f'Value child node - {ii}', alpha=0.3, color=input_colors[ii], edgecolors='grey')
axs[i].plot(trajectories_df.time, trajectories_df[f'observation_input_{child_idx}'], linewidth=0.5, linestyle='--', alpha=0.3, color=input_colors[ii])
axs[i].legend()
if show_surprise:
if (not trajectories_df[f'x_{node_idx}_surprise'].isnull().all()):
surprise_ax = axs[i].twinx()
node_surprise = trajectories_df[f'x_{node_idx}_surprise'].to_numpy()
sp = node_surprise.sum()
surprise_ax.set_title(f'Node {node_idx} - Surprise: {sp:.2f}', loc='left')
surprise_ax.fill_between(x=trajectories_df.time, y1=node_surprise, y2=node_surprise.min(), where=hgf.node_trajectories[node_idx]['observed'], color='#7f7f7f', alpha=0.1, zorder=(- 1))
node_surprise[(hgf.node_trajectories[node_idx]['observed'] == 0)] = np.nan
surprise_ax.plot(trajectories_df.time, node_surprise, color='#2a2a2a', linewidth=0.5, zorder=(- 1), label='Surprise')
surprise_ax.set_ylabel('Surprise')
surprise_ax.legend()
return axs |
def orbit_stats_all(graph_ref_list, graph_pred_list):
total_counts_ref = []
total_counts_pred = []
graph_pred_list_remove_empty = [G for G in graph_pred_list if (not (G.number_of_nodes() == 0))]
for G in graph_ref_list:
try:
orbit_counts = orca(G)
except:
continue
orbit_counts_graph = (np.sum(orbit_counts, axis=0) / G.number_of_nodes())
total_counts_ref.append(orbit_counts_graph)
for G in graph_pred_list:
try:
orbit_counts = orca(G)
except:
continue
orbit_counts_graph = (np.sum(orbit_counts, axis=0) / G.number_of_nodes())
total_counts_pred.append(orbit_counts_graph)
total_counts_ref = np.array(total_counts_ref)
total_counts_pred = np.array(total_counts_pred)
mmd_dist = compute_mmd(total_counts_ref, total_counts_pred, kernel=gaussian_tv, is_hist=False, sigma=30.0)
return mmd_dist |
class ModelDO(Model):
def __init__(self, n_hidden, K, nonlinearity, bn, do, tau, dataset, in_dim=1, out_dim=1, regression=True, first_layer_do=False):
self.keep_prob_ph = tf.placeholder(tf.float32)
super(ModelDO, self).__init__(n_hidden, K, nonlinearity, bn, do, tau, dataset, in_dim, out_dim, regression, first_layer_do)
def run_train_step(self, batch, keep_prob):
self.train_step.run(feed_dict={self.x: self.dataset.normalize_X(batch[0]), self.y: self.dataset.normalize_y(batch[1]), self.keep_prob_ph: keep_prob})
def predict(self, x, keep_prob):
yHat = self.yHat.eval(feed_dict={self.x: self.dataset.normalize_X(x), self.keep_prob_ph: keep_prob})
return self.dataset.denormalize_y(yHat)
def predict_mc(self, n_samples, x, keep_prob):
samples = self.get_mc_samples(n_samples, x, keep_prob)
return self.get_mc_moments(samples)
def get_mc_samples(self, n_samples, x, keep_prob):
samples = None
for i in range(n_samples):
sample = self.predict(x, keep_prob)
samples = add_to_collection(sample, samples)
return samples |
def clearn_make(make):
if ('oppo' in make):
return 'oppo'
elif ('vivo' in make):
return 'vivo'
elif (('huawei' in make) or ('honor' in make)):
return 'huawei'
elif ('redmi' in make):
return 'xiaomi'
strs = make.split()
if (len(strs) > 1):
s = strs[0]
if ((s == 'mi') or (s == 'm1') or (s == 'm2') or (s == 'm3') or (s == 'm6')):
s = 'xiaomi'
return s
return make |
def get_stats_for_single_payoff_table(payoff_table: PayoffTable, highest_policy_num: int, poker_env_config, policy_class, policy_config, cache_dir: str, eval_every_nth_entry=1, is_symmetric_two_player=False):
ray.init(ignore_reinit_error=True, local_mode=True, num_cpus=1, num_gpus=0)
poker_game_version = poker_env_config['version']
temp_env = PokerMultiAgentEnv(env_config=poker_env_config)
openspiel_env_config = temp_env.open_spiel_env_config
def extra_action_out_fn(policy: Policy, input_dict, state_batches, model, action_dist: ActionDistribution) -> Dict[(str, TensorType)]:
action = action_dist.deterministic_sample()
action_probs = torch.zeros_like(policy.q_values).long()
action_probs[0][action[0]] = 1.0
return {'q_values': policy.q_values, 'action_probs': action_probs}
policy_class = policy_class.with_updates(extra_action_out_fn=extra_action_out_fn)
policies = [policy_class(obs_space=temp_env.observation_space, action_space=temp_env.action_space, config=policy_config) for _ in range(2)]
def set_policy_weights(policy: Policy, checkpoint_path: str):
checkpoint_data = deepdish.io.load(path=checkpoint_path)
weights = checkpoint_data['weights']
weights = {k.replace('_dot_', '.'): v for (k, v) in weights.items()}
policy.set_weights(weights)
exploitability_per_generation = []
total_steps_per_generation = []
total_episodes_per_generation = []
num_policies_per_generation = []
for (i, n_policies) in enumerate(range(1, (highest_policy_num + 1))):
exploitability_cached = get_exploitability_from_cache(cache_dir=cache_dir, policy_num=i)
if ((exploitability_cached is None) and ((i % eval_every_nth_entry) == 0)):
metanash_probs_0 = get_latest_metanash_strategies(payoff_table=payoff_table, as_player=1, as_policy_num=n_policies, fictitious_play_iters=2000, mix_with_uniform_dist_coeff=0.0, print_matrix=False)[0].probabilities_for_each_strategy()
if is_symmetric_two_player:
metanash_probs_1 = metanash_probs_0
else:
metanash_probs_1 = get_latest_metanash_strategies(payoff_table=payoff_table, as_player=0, as_policy_num=n_policies, fictitious_play_iters=2000, mix_with_uniform_dist_coeff=0.0, print_matrix=False)[1].probabilities_for_each_strategy()
policy_specs_0 = payoff_table.get_ordered_spec_list_for_player(player=0)[:n_policies]
policy_specs_1 = payoff_table.get_ordered_spec_list_for_player(player=1)[:n_policies]
assert (len(metanash_probs_1) == len(policy_specs_1)), f'len(metanash_probs_1): {len(metanash_probs_1)}, len(policy_specs_1): {len(policy_specs_1)}'
assert (len(metanash_probs_0) == len(policy_specs_0))
assert (len(policy_specs_0) == len(policy_specs_1))
br_checkpoint_paths = []
metanash_weights = []
for (spec_0, prob_0, spec_1, prob_1) in zip(policy_specs_0, metanash_probs_0, policy_specs_1, metanash_probs_1):
br_checkpoint_paths.append((spec_0.metadata['checkpoint_path'], spec_1.metadata['checkpoint_path']))
metanash_weights.append((prob_0, prob_1))
exploitability_this_gen = psro_measure_exploitability_nonlstm(br_checkpoint_path_tuple_list=br_checkpoint_paths, metanash_weights=metanash_weights, set_policy_weights_fn=set_policy_weights, rllib_policies=policies, poker_game_version=poker_game_version, open_spiel_env_config=openspiel_env_config)
write_exploitability_to_cache(cache_dir=cache_dir, policy_num=i, exploitability=exploitability_this_gen)
else:
exploitability_this_gen = exploitability_cached
print(f'{n_policies} policies, {exploitability_this_gen} exploitability')
policy_spec_added_this_gen = [payoff_table.get_spec_for_player_and_pure_strat_index(player=p, pure_strat_index=i) for p in range(2)]
latest_policy_steps = sum((policy_spec_added_this_gen[p].metadata['timesteps_training_br'] for p in range(2)))
latest_policy_episodes = sum((policy_spec_added_this_gen[p].metadata['episodes_training_br'] for p in range(2)))
if (i > 0):
total_steps_this_generation = (latest_policy_steps + total_steps_per_generation[(i - 1)])
total_episodes_this_generation = (latest_policy_episodes + total_episodes_per_generation[(i - 1)])
else:
total_steps_this_generation = latest_policy_steps
total_episodes_this_generation = latest_policy_episodes
exploitability_per_generation.append(exploitability_this_gen)
total_steps_per_generation.append(total_steps_this_generation)
total_episodes_per_generation.append(total_episodes_this_generation)
num_policies_per_generation.append(n_policies)
stats_out = {'num_policies': num_policies_per_generation, 'exploitability': exploitability_per_generation, 'timesteps': total_steps_per_generation, 'episodes': total_episodes_per_generation}
return stats_out |
class VehicleData(ObjectData):
def __init__(self, id, x, y, o, l, w):
super(VehicleData, self).__init__(id, x, y)
self.o = o
self.l = l
self.w = w
self.type = 'vehicle' |
def parse_args():
parser = argparse.ArgumentParser(description='Simple example of a training script.')
parser.add_argument('--pretrained_model_name_or_path', type=str, default=None, required=True, help='Path to pretrained model or model identifier from huggingface.co/models.')
parser.add_argument('--revision', type=str, default=None, required=False, help='Revision of pretrained model identifier from huggingface.co/models.')
parser.add_argument('--variant', type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16")
parser.add_argument('--dataset_name', type=str, default=None, help='The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private, dataset). It can also be a path pointing to a local copy of a dataset in your filesystem, or to a folder containing files that Datasets can understand.')
parser.add_argument('--dataset_config_name', type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.")
parser.add_argument('--train_data_dir', type=str, default=None, help='A folder containing the training data. Folder contents must follow the structure described in In particular, a `metadata.jsonl` file must exist to provide the captions for the images. Ignored if `dataset_name` is specified.')
parser.add_argument('--image_column', type=str, default='image', help='The column of the dataset containing an image.')
parser.add_argument('--caption_column', type=str, default='text', help='The column of the dataset containing a caption or a list of captions.')
parser.add_argument('--max_train_samples', type=int, default=None, help='For debugging purposes or quicker training, truncate the number of training examples to this value if set.')
parser.add_argument('--output_dir', type=str, default='sd-model-finetuned', help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--cache_dir', type=str, default=None, help='The directory where the downloaded models and datasets will be stored.')
parser.add_argument('--seed', type=int, default=0, help='A seed for reproducible training.')
parser.add_argument('--resolution', type=int, default=512, help='The resolution for input images, all the images in the train/validation dataset will be resized to this resolution')
parser.add_argument('--center_crop', default=False, action='store_true', help='Whether to center crop the input images to the resolution. If not set, the images will be randomly cropped. The images will be resized to the resolution first before cropping.')
parser.add_argument('--random_flip', action='store_true', help='whether to randomly flip images horizontally')
parser.add_argument('--train_batch_size', type=int, default=16, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--num_train_epochs', type=int, default=100)
parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='Initial learning rate (after the potential warmup period) to use.')
parser.add_argument('--scale_lr', action='store_true', default=False, help='Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.')
parser.add_argument('--lr_scheduler', type=str, default='constant', help='The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"]')
parser.add_argument('--adam_beta1', type=float, default=0.9, help='The beta1 parameter for the Adam optimizer.')
parser.add_argument('--adam_beta2', type=float, default=0.999, help='The beta2 parameter for the Adam optimizer.')
parser.add_argument('--adam_weight_decay', type=float, default=0.01, help='Weight decay to use.')
parser.add_argument('--adam_epsilon', type=float, default=1e-08, help='Epsilon value for the Adam optimizer')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.')
parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.')
parser.add_argument('--hub_token', type=str, default=None, help='The token to use to push to the Model Hub.')
parser.add_argument('--hub_model_id', type=str, default=None, help='The name of the repository to keep in sync with the local `output_dir`.')
parser.add_argument('--logging_dir', type=str, default='logs', help='[TensorBoard]( log directory. Will default to *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***.')
parser.add_argument('--report_to', type=str, default='tensorboard', help='The integration to report the results and logs to. Supported platforms are `"tensorboard"` (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.')
parser.add_argument('--mixed_precision', type=str, default='no', choices=['no', 'fp16', 'bf16'], help='Whether to use mixed precision. Choosebetween fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.and an Nvidia Ampere GPU.')
parser.add_argument('--local_rank', type=int, default=(- 1), help='For distributed training: local_rank')
parser.add_argument('--from_pt', action='store_true', default=False, help='Flag to indicate whether to convert models from PyTorch.')
args = parser.parse_args()
env_local_rank = int(os.environ.get('LOCAL_RANK', (- 1)))
if ((env_local_rank != (- 1)) and (env_local_rank != args.local_rank)):
args.local_rank = env_local_rank
if ((args.dataset_name is None) and (args.train_data_dir is None)):
raise ValueError('Need either a dataset name or a training folder.')
return args |
class Block(Module):
def __init__(self, inChannels, outChannels):
super().__init__()
self.conv1 = Conv2d(inChannels, outChannels, 3)
self.relu = ReLU()
self.conv2 = Conv2d(outChannels, outChannels, 3)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x))) |
_manager.LOSSES.add_module
class DepthHints_PhotoLoss(nn.Module):
def __init__(self, preds_n, idents_n, target_n, hints_n=None, hints_depth=None, pred_depth=None, automask=True, minproj=True, device='cpu'):
super().__init__()
self.init_opts = locals()
self.preds_n = preds_n
self.idents_n = idents_n
self.target_n = target_n
self.hints_n = hints_n
self.hints_depth = hints_depth
self.pred_depth = pred_depth
self.automask = automask
self.minproj = minproj
self.device = device
self.ssim = SSIM().to(device)
def forward(self, outputs, side):
self.target = outputs[self.target_n.format(side)]
if (self.hints_n is not None):
depthhints = outputs[self.hints_n.format(side)]
hints_loss_map = self._compute_photometric(depthhints)
hints_depth = outputs[self.hints_depth.format(side)]
pred_depth = outputs[self.pred_depth.format(side)]
hints_mask = (hints_depth > 0)
hints_loss_map[(~ hints_mask)] = 1000
pred_loss_maps = []
if self.automask:
ident_loss_maps = []
for (idx, pred_name) in enumerate(self.preds_n):
pred = outputs[pred_name.format(side)]
loss_map = self._compute_photometric(pred)
pred_loss_maps.append(loss_map)
if self.automask:
ident = outputs[self.idents_n[idx].format(side)]
ident_map = self._compute_photometric(ident)
ident_loss_maps.append(ident_map)
pred_loss_maps = torch.cat(pred_loss_maps, dim=1)
if self.minproj:
(pred_loss_map, _) = pred_loss_maps.min(dim=1, keepdim=True)
else:
(pred_loss_map, _) = pred_loss_maps.mean(dim=1, keepdim=True)
if self.automask:
ident_loss_maps = torch.cat(ident_loss_maps, dim=1)
if self.minproj:
(ident_loss_map, _) = ident_loss_maps.min(dim=1, keepdim=True)
else:
ident_loss_map = ident_loss_maps.mean(dim=1, keepdim=True)
if self.automask:
ident_loss_map += (torch.randn(ident_loss_map.shape).to(self.device) * 1e-05)
loss_maps = torch.cat([ident_loss_map, pred_loss_map], dim=1)
if (self.hints_n is not None):
loss_maps = torch.cat([loss_maps, hints_loss_map], dim=1)
(_, select_idx) = loss_maps.min(dim=1, keepdim=True)
auto_mask = (select_idx != 0).float()
hints_mask = (select_idx == 2).float()
final_loss_map = (pred_loss_map * auto_mask.detach())
final_loss_map += self._compute_hints_proxy(pred_depth, hints_depth, hints_mask)
else:
(_, select_idx) = loss_maps.min(dim=1, keepdim=True)
auto_mask = (select_idx != 0).float()
final_loss_map = (pred_loss_map * auto_mask.detach())
else:
loss_maps = pred_loss_map
if (self.hints_n is not None):
loss_maps = torch.cat([loss_maps, hints_loss_map], dim=1)
(_, select_idx) = loss_maps.min(dim=1, keepdim=True)
hints_mask = (select_idx == 1).float()
final_loss_map = pred_loss_map
final_loss_map += self._compute_hints_proxy(pred_depth, hints_depth, hints_mask)
else:
final_loss_map = pred_loss_map
return final_loss_map
def _compute_photometric(self, pred):
abs_diff = torch.abs((pred - self.target))
l1_loss = abs_diff.mean(1, True)
ssim_loss = self.ssim(pred, self.target).mean(1, True)
photometric_loss = ((0.85 * ssim_loss) + (0.15 * l1_loss))
return photometric_loss
def _compute_hints_proxy(self, pred, target, mask):
depth_hint_loss = (torch.log((torch.abs((target - pred)) + 1)) * mask)
return depth_hint_loss |
def main():
with open(sys.argv[1], 'r') as input_file:
for line in input_file:
if (line is '\n'):
print('')
else:
line = _run_split_on_punc(line)
line = replace_multi_whitespaces(line)
if (line is not ''):
print(line) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1):
super().__init__()
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=(1 * dilation), bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = (nn.AvgPool2d(stride) if (stride > 1) else nn.Identity())
self.conv3 = nn.Conv2d(planes, (planes * self.expansion), 1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if ((stride > 1) or (inplanes != (planes * Bottleneck.expansion))):
self.downsample = nn.Sequential(OrderedDict([('-1', nn.AvgPool2d(stride)), ('0', nn.Conv2d(inplanes, (planes * self.expansion), 1, stride=1, bias=False)), ('1', nn.BatchNorm2d((planes * self.expansion)))]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out |
def clean_input_utterance(input_text):
sequence = ' '.join(filter((lambda x: (x[0] != '<')), input_text.strip().split()))
sequence = ' '.join(filter((lambda x: (x[(- 1)] != '>')), sequence.split()))
sequence = ' '.join(filter((lambda x: (x[0] != '[')), sequence.split()))
sequence = ' '.join(filter((lambda x: (x[(- 1)] != ']')), sequence.split()))
sequence = ' '.join(filter((lambda x: ('(' not in x)), sequence.split()))
return sequence.lower() |
def tryload(stream):
try:
data = json.load(stream)
try:
data = list(data.keys())
except Exception:
data = [d['img_id'] for d in data]
except Exception:
try:
data = eval(stream.read())
except Exception:
data = stream.read().split('\n')
return data |
class Observation(NamedTuple):
agent_position: Position
target_position: Position
walls: chex.Array
action_mask: chex.Array
step_count: jnp.int32 |
class Frequency(object):
def __init__(self, iter=0, hours=0, minutes=0, seconds=0):
self.freq_iter = iter
self.freq_time_delta = datetime.timedelta(hours=hours, minutes=minutes, seconds=seconds).total_seconds()
self.n_resets = (- 1)
if ((self.freq_iter < 0) and (self.freq_time_delta < 0)):
raise Exception('invalid Frequency, will never be True')
def set_train_set_len(self, train_set_len):
if (self.freq_iter < 0):
self.freq_iter = ((- self.freq_iter) * train_set_len)
def reset(self):
self.n_resets += 1
self.start_time = time.time()
self.current_iter = 0
def advance(self):
self.current_time = time.time()
self.current_iter += 1
if (((self.freq_iter > 0) and (self.current_iter >= self.freq_iter)) or ((self.freq_time_delta > 0) and ((self.current_time - self.start_time) > self.freq_time_delta))):
self.reset()
return True
return False
def get_elapsed_time(self):
return (self.current_time - self.start_time)
def get_item_time(self):
return (self.get_elapsed_time() / (self.current_iter + 1))
def get_remaining_time(self):
iter_time = (self.get_item_time() * ((self.freq_iter - self.current_iter) + 1))
time_delta_time = (self.freq_time_delta - (self.current_time - self.start_time))
if ((self.freq_iter > 0) and (self.freq_time_delta > 0)):
return min(iter_time, time_delta_time)
elif (self.freq_iter > 0):
return iter_time
elif (self.freq_time_delta > 0):
return time_delta_time
else:
raise Exception('invalid Frequency')
def get_total_time(self):
iter_time = (self.get_item_time() * self.freq_iter)
if ((self.freq_iter > 0) and (self.freq_time_delta > 0)):
return min(iter_time, self.freq_time_delta)
elif (self.freq_iter > 0):
return iter_time
elif (self.freq_time_delta > 0):
return self.freq_time_delta
else:
raise Exception('invalid Frequency')
def get_elapsed_time_str(self, millis=True):
return utils.format_seconds(self.get_elapsed_time(), millis=millis)
def get_remaining_time_str(self, millis=True):
return utils.format_seconds(self.get_remaining_time(), millis=millis)
def get_percentage_str(self):
perc = ((self.get_elapsed_time() / self.get_total_time()) * 100)
return f'{int(perc):02d}%'
def get_str(self, percentage=True, elapsed=True, remaining=True, millis=False):
s = []
if percentage:
s.append(self.get_percentage_str())
if elapsed:
s.append(self.get_elapsed_time_str(millis=millis))
if remaining:
s.append(self.get_remaining_time_str(millis=millis))
return '/'.join(s) |
def P3D_ResNet50(**kwargs):
c3d_idx = [[], [0, 2], [0, 2, 4], []]
nl_idx = [[], [], [], []]
sa_idx = [[], [], [], []]
return ResNet503D(AP3D.P3DC, c3d_idx, nl_idx, sa_idx, **kwargs) |
def record_csv(filepath, row):
with open(filepath, 'a') as f:
writer = csv.writer(f)
writer.writerow(row)
return |
class ServerModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x: List[Tensor]):
x = torch.stack(x)
x = torch.sum(x, dim=0)
x = self.sigmoid(x)
return x |
def validate_json_against_schema(json_dict, schema, err_msg=None):
try:
if isinstance(schema, str):
schema_name = schema
schema = _SCHEMAS[schema_name]
validator = _get_validator(schema_name)
validator.validate(json_dict)
else:
jsonschema.validate(json_dict, schema)
except jsonschema.ValidationError as err:
if (err_msg is None):
err_msg = 'JSON failed validation. Set Qiskit log level to DEBUG for further information.'
newerr = SchemaValidationError(err_msg)
newerr.__cause__ = _SummaryValidationError(err)
logger.debug('%s', _format_causes(err))
raise newerr |
def load_layers_(layer_lst: nn.ModuleList, opus_state: dict, converter, is_decoder=False):
for (i, layer) in enumerate(layer_lst):
layer_tag = (f'decoder_l{(i + 1)}_' if is_decoder else f'encoder_l{(i + 1)}_')
sd = convert_encoder_layer(opus_state, layer_tag, converter)
layer.load_state_dict(sd, strict=True) |
class EsmFoldConfig():
esm_type: str = None
fp16_esm: bool = True
use_esm_attn_map: bool = False
esm_ablate_pairwise: bool = False
esm_ablate_sequence: bool = False
esm_input_dropout: float = 0
embed_aa: bool = True
bypass_lm: bool = False
lddt_head_hid_dim: int = 128
trunk: 'TrunkConfig' = None
def __post_init__(self):
if (self.trunk is None):
self.trunk = TrunkConfig()
elif isinstance(self.trunk, dict):
self.trunk = TrunkConfig(**self.trunk)
def to_dict(self):
output = asdict(self)
output['trunk'] = self.trunk.to_dict()
return output |
class Predictor(pl.LightningModule):
def __init__(self, hparams) -> None:
super().__init__()
self.save_hyperparameters(hparams)
cfg_pred = self.hparams.predictor.kwargs
Architecture = get_Architecture(cfg_pred.architecture, **cfg_pred.arch_kwargs)
self.predictor = Architecture(self.hparams.data.shape)
self.stage = self.hparams.stage
def forward(self, z: torch.Tensor) -> torch.Tensor:
Y_pred = self.predictor(z)
return Y_pred
def step(self, batch: torch.Tensor) -> tuple[(torch.Tensor, dict)]:
(x, y) = batch
Y_hat = self(x)
(loss, logs) = self.loss(Y_hat, y)
return (loss.mean(), logs)
def loss(self, Y_hat: torch.Tensor, y: torch.Tensor) -> tuple[(torch.Tensor, dict)]:
loss = prediction_loss(Y_hat, y)
logs = dict()
logs['loss'] = loss.mean()
logs['acc'] = accuracy(Y_hat.argmax(dim=(- 1)), y)
logs['err'] = (1 - logs['acc'])
return (loss, logs)
def training_step(self, batch: torch.Tensor, batch_idx: torch.Tensor) -> Optional[torch.Tensor]:
(loss, logs) = self.step(batch)
self.log_dict({f'train/{self.stage}/{self.hparams.task}/{k}': v for (k, v) in logs.items()}, sync_dist=True)
return loss
def test_val_step(self, batch: torch.Tensor, batch_idx: int, mode: str) -> Optional[torch.Tensor]:
(loss, logs) = self.step(batch)
self.log_dict({f'{mode}/{self.stage}/{self.hparams.task}/{k}': v for (k, v) in logs.items()}, sync_dist=True)
return loss
def validation_step(self, batch, batch_idx):
return self.test_val_step(batch, batch_idx, 'val')
def test_step(self, batch, batch_idx):
return self.test_val_step(batch, batch_idx, 'test')
def configure_optimizers(self):
(optimizers, schedulers) = ([], [])
append_optimizer_scheduler_(self.hparams.optimizer_pred, self.hparams.scheduler_pred, self.parameters(), optimizers, schedulers, name='lr_predictor')
return (optimizers, schedulers) |
class MrpcProcessor(DataProcessor):
def get_train_examples(self, data_dir, segment='train', combine=True):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train', combine=combine)
def get_dev_examples(self, data_dir, segment='dev', combine=False):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev', combine=combine)
def get_test_examples(self, data_dir, segment='test'):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test', is_test=True)
def get_labels(self):
return ['0', '1']
def _create_examples(self, lines, set_type, is_test=False, combine=True):
examples = [[] for i in range(len(self.get_labels()))]
all_examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, i))
text_a = line[(- 2)]
text_b = line[(- 1)]
if is_test:
label = self.get_labels()[0]
else:
label = line[0]
examples[self.get_labels().index(label)].append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
all_examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
if combine:
return all_examples
else:
return examples |
def arrays2others(iter, feature_cols, label_cols, shard_size=None, generate_func=None):
def init_result_lists(first_row, cols):
if shard_size:
if isinstance(first_row, np.ndarray):
return [np.empty(((shard_size,) + first_row.shape), first_row.dtype)]
if isinstance(first_row, dict):
res = dict()
for (k, _) in first_row.items():
res[k] = np.empty(((shard_size,) + first_row[k].shape), first_row[k].dtype)
return res
else:
return [np.empty(((shard_size,) + r.shape), r.dtype) for r in first_row]
else:
return [[] for r in cols]
def add_row(data, results, current):
if (not isinstance(data, (list, tuple, dict))):
arrays = [data]
else:
arrays = data
iter = (arrays.items() if isinstance(arrays, dict) else enumerate(arrays))
for (i, arr) in iter:
if shard_size:
current = (current % shard_size)
results[i][current] = arr
else:
results[i].append(arr)
feature_lists = None
label_lists = None
counter = 0
for row in iter:
if (feature_lists is None):
feature_lists = init_result_lists(row[0], feature_cols)
add_row(row[0], feature_lists, counter)
if (label_cols is not None):
if (label_lists is None):
label_lists = init_result_lists(row[1], label_cols)
add_row(row[1], label_lists, counter)
counter += 1
if (shard_size and ((counter % shard_size) == 0)):
(yield generate_func(feature_lists, label_lists, feature_cols, label_cols))
feature_lists = None
label_lists = None
if (feature_lists is not None):
if shard_size:
rest_size = (counter % shard_size)
if isinstance(feature_lists, dict):
feature_lists = {k: v[0:rest_size] for (k, v) in feature_lists.items()}
else:
feature_lists = [feature[0:rest_size] for feature in feature_lists]
if (label_cols is not None):
if isinstance(label_lists, dict):
label_lists = {k: v[0:rest_size] for (k, v) in label_lists.items()}
else:
label_lists = [label[0:rest_size] for label in label_lists]
(yield generate_func(feature_lists, label_lists, feature_cols, label_cols)) |
class Utils():
def __init__(self, parameters=None):
if (parameters is None):
logging.warning('Utils was initialized without specific parameters.')
warnings.warn('No parameters were specified and default parameters are used, this may cause unexpected behavior. It is more save to pass parameters when instanciating Utils singleton for the first time. Do it with Utils.instance(parameters=your_parameters)')
self.p = (Parameters() if (parameters is None) else parameters)
'\n : Parameters for utils can be changed manually\n '
def setParameters(self, parameters):
self.p = parameters
'\n : Import functions from files\n '
from .spikes import getSpikesFromActivity, cor, getFilteredSpikes, fano, cv, getGaussianFilteredSpikes, getSingleExponentialFilteredSpikes, getHoltDoubleExponentialFilteredSpikes
from .weights import getSpectralRadius, recombineExWeightMatrix, getSupportWeightsMask
from .target import loadTarget, prepareDataset, estimateMovement, estimateMultipleTrajectories3D
from .misc import trainOLS, pca |
def logTheLossHook(total_loss, n):
return tf.compat.v1.train.LoggingTensorHook({'\t Loss ': total_loss}, every_n_iter=n) |
def main(args):
utils.import_user_module(args)
if (args.buffer_size < 1):
args.buffer_size = 1
if ((args.max_tokens is None) and (args.max_sentences is None)):
args.max_sentences = 1
assert ((not args.sampling) or (args.nbest == args.beam)), '--sampling requires --nbest to be equal to --beam'
assert ((not args.max_sentences) or (args.max_sentences <= args.buffer_size)), '--max-sentences/--batch-size cannot be larger than --buffer-size'
logger.info(args)
use_cuda = (torch.cuda.is_available() and (not args.cpu))
task = tasks.setup_task(args)
logger.info('loading model(s) from {}'.format(args.path))
(models, _model_args) = checkpoint_utils.load_model_ensemble(args.path.split(os.pathsep), arg_overrides=eval(args.model_overrides), task=task)
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
for model in models:
model.make_generation_fast_(beamable_mm_beam_size=(None if args.no_beamable_mm else args.beam), need_attn=args.print_alignment)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
generator = task.build_generator(args)
tokenizer = encoders.build_tokenizer(args)
bpe = encoders.build_bpe(args)
def encode_fn(x):
if (tokenizer is not None):
x = tokenizer.encode(x)
if (bpe is not None):
x = bpe.encode(x)
return x
def decode_fn(x):
if (bpe is not None):
x = bpe.decode(x)
if (tokenizer is not None):
x = tokenizer.decode(x)
return x
align_dict = utils.load_align_dict(args.replace_unk)
max_positions = utils.resolve_max_positions(task.max_positions(), *[model.max_positions() for model in models])
if (args.buffer_size > 1):
logger.info('Sentence buffer size: %s', args.buffer_size)
logger.info('NOTE: hypothesis and token scores are output in base 2')
logger.info('Type the input sentence and press return:')
start_id = 0
for inputs in buffered_read(args.input, args.buffer_size):
results = []
for batch in make_batches(inputs, args, task, max_positions, encode_fn):
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
sample = {'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths}}
translations = task.inference_step(generator, models, sample)
for (i, (id, hypos)) in enumerate(zip(batch.ids.tolist(), translations)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
results.append(((start_id + id), src_tokens_i, hypos))
for (id, src_tokens, hypos) in sorted(results, key=(lambda x: x[0])):
if (src_dict is not None):
src_str = src_dict.string(src_tokens, args.remove_bpe)
print('S-{}\t{}'.format(id, src_str))
for hypo in hypos[:min(len(hypos), args.nbest)]:
(hypo_tokens, hypo_str, alignment) = utils.post_process_prediction(hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=hypo['alignment'], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=args.remove_bpe)
hypo_str = decode_fn(hypo_str)
score = (hypo['score'] / math.log(2))
print('H-{}\t{}\t{}'.format(id, score, hypo_str))
print('P-{}\t{}'.format(id, ' '.join(map((lambda x: '{:.4f}'.format(x)), hypo['positional_scores'].div_(math.log(2)).tolist()))))
if args.print_alignment:
alignment_str = ' '.join(['{}-{}'.format(src, tgt) for (src, tgt) in alignment])
print('A-{}\t{}'.format(id, alignment_str))
start_id += len(inputs) |
class Conv2D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super().__init__()
self.kernel_size = kernel_size
self.conv_base = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride)
self.pad = nn.ConstantPad2d(([(kernel_size // 2)] * 4), value=0)
self.normalize = torch.nn.GroupNorm(16, out_channels)
self.activ = nn.ELU(inplace=True)
def forward(self, x):
x = self.conv_base(self.pad(x))
return self.activ(self.normalize(x)) |
class Isotope(Ion):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.is_isotope() |
class SimpleCollectionPreProcessor():
tokenizer: PreTrainedTokenizer
separator: str = '\t'
max_length: int = 128
def process_line(self, line: str):
xx = line.strip().split(self.separator)
(text_id, text) = (xx[0], xx[1:])
text_encoded = self.tokenizer.encode(' '.join(text), add_special_tokens=False, max_length=self.max_length, truncation=True)
encoded = {'text_id': text_id, 'text': text_encoded}
return json.dumps(encoded) |
class KerasLayer(Layer, InferShape, KerasCreator):
def __init__(self, jvalue, *args, **kwargs):
allowed_kwargs = {'name', 'bigdl_type'}
for kwarg in kwargs.keys():
if (kwarg not in allowed_kwargs):
invalidInputError(False, 'Wrong argument for the layer:', kwarg)
bigdl_type = kwargs.get('bigdl_type')
if (not bigdl_type):
bigdl_type = 'float'
super(KerasCreator, self).__init__(jvalue, bigdl_type, *args)
name = kwargs.get('name')
if name:
self.set_name(name) |
def process_sdf(sdf_path, table, progress=True, verbose=True):
supplier = list(Chem.SDMolSupplier(sdf_path))
molecules = []
fragments = []
linkers = []
out_table = []
uuid = 0
supplier = (tqdm(supplier, total=len(supplier)) if progress else supplier)
for mol in supplier:
mol_name = mol.GetProp('_Name')
mol_smi = Chem.MolToSmiles(mol)
mol.SetProp('_Name', mol_smi)
for (linker_smi, frags_smi) in table[(table.molecule == mol_name)][['linker', 'fragments']].values:
try:
(frags, linker) = prepare_fragments_and_linker(frags_smi, linker_smi, mol)
except Exception as e:
if verbose:
print(f'{mol_name} | {linker_smi} | {frags_smi} : {e}')
continue
combined_frag = None
for frag in frags:
if (combined_frag is None):
combined_frag = frag
else:
combined_frag = Chem.CombineMols(combined_frag, frag)
anchors_idx = get_anchors_idx(combined_frag)
combined_link = None
for link in linker:
if (combined_link is None):
combined_link = link
else:
combined_link = Chem.CombineMols(combined_link, link)
molecules.append(mol)
fragments.append(combined_frag)
linkers.append(combined_link)
out_table.append({'uuid': uuid, 'molecule': mol_smi, 'fragments': Chem.MolToSmiles(combined_frag), 'linker': Chem.MolToSmiles(combined_link), 'anchors': '-'.join(map(str, anchors_idx)), 'energy': '0'})
uuid += 1
return (molecules, fragments, linkers, pd.DataFrame(out_table)) |
class STConv3d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0, dw_t_conv=False):
super(STConv3d, self).__init__()
self.conv = nn.Conv3d(in_planes, out_planes, kernel_size=(1, kernel_size, kernel_size), stride=(1, stride, stride), padding=(0, padding, padding), bias=False)
self.bn = nn.BatchNorm3d(out_planes, eps=0.001)
self.relu = nn.ReLU(inplace=True)
self.conv_t = nn.Conv3d(out_planes, out_planes, kernel_size=(kernel_size, 1, 1), stride=(stride, 1, 1), padding=(padding, 0, 0), groups=(out_planes if dw_t_conv else 1))
self.bn_t = nn.BatchNorm3d(out_planes, eps=0.001)
self.relu_t = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.conv_t(x)
x = self.bn_t(x)
x = self.relu_t(x)
return x |
def code(dtype):
for k in dtypes.keys():
if (dtypes[k] == dtype):
return k
raise ValueError(dtype) |
class KipfNet(torch.nn.Module):
def __init__(self, num_features, num_classes, nh1=64, K=8, K_mix=2, inout_skipconn=False):
super(KipfNet, self).__init__()
self.inout_skipconn = inout_skipconn
self.Kipfblock1 = Kipfblock(n_input=num_features, n_hidden=nh1, K=K)
if inout_skipconn:
self.conv_mix = ChebConv((nh1 + num_features), num_classes, K=K_mix)
else:
self.conv_mix = ChebConv(nh1, num_classes, K=K_mix)
def forward(self, data):
(x, edge_index) = (data.x, data.edge_index)
x = self.Kipfblock1(x, edge_index)
if self.inout_skipconn:
x = torch.cat((x, data.x), 1)
x = self.conv_mix(x, edge_index)
else:
x = self.conv_mix(x, edge_index)
return x |
def test_external_object_update_propagates_to_waiting_namespace_symbols():
cells = {0: 'import fakelib', 1: 'foo = fakelib.Foo()', 2: 'logging.info(foo.x)', 3: 'x = 42', 4: 'foo.x = x + 1', 5: 'x = 43', 6: 'foo = foo.set_x(10)'}
with override_settings(mark_waiting_symbol_usages_unsafe=False):
run_all_cells(cells)
response = flow().check_and_link_multiple_cells()
assert (response.waiting_cells == set()), ('got %s' % response.waiting_cells)
assert (response.ready_cells == {2, 4}) |
def is_supported_instance(module):
if isinstance(module, (torch.nn.Conv2d, torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, torch.nn.LeakyReLU, torch.nn.ReLU6, torch.nn.Linear, torch.nn.MaxPool2d, torch.nn.AvgPool2d, torch.nn.BatchNorm2d, torch.nn.Upsample, nn.AdaptiveMaxPool2d, nn.AdaptiveAvgPool2d)):
return True
return False |
class RandomHyperparameterSweeper(Sweeper):
def __init__(self, hyperparameters=None, default_kwargs=None):
if (default_kwargs is None):
default_kwargs = {}
self._hyperparameters = (hyperparameters or [])
self._validate_hyperparameters()
self._default_kwargs = default_kwargs
def _validate_hyperparameters(self):
names = set()
for hp in self._hyperparameters:
name = hp.name
if (name in names):
raise Exception("Hyperparameter '{0}' already added.".format(name))
names.add(name)
def set_default_parameters(self, default_kwargs):
self._default_kwargs = default_kwargs
def generate_random_hyperparameters(self):
hyperparameters = {}
for hp in self._hyperparameters:
hyperparameters[hp.name] = hp.generate()
hyperparameters = ppp.dot_map_dict_to_nested_dict(hyperparameters)
return ppp.merge_recursive_dicts(hyperparameters, copy.deepcopy(self._default_kwargs), ignore_duplicate_keys_in_second_dict=True)
def sweep_hyperparameters(self, function, num_configs):
returned_value_and_params = []
for _ in range(num_configs):
kwargs = self.generate_random_hyperparameters()
score = function(**kwargs)
returned_value_and_params.append((score, kwargs))
return returned_value_and_params |
class BertTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, **kwargs):
super().__init__(unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs)
self.max_len_single_sentence = (self.max_len - 2)
self.max_len_sentences_pair = (self.max_len - 3)
if (not os.path.isfile(vocab_file)):
raise ValueError("Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
def vocab_size(self):
return len(self.vocab)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formated with special tokens for the model.')
return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0))
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, vocab_path):
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file'])
else:
vocab_file = vocab_path
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning('Saving vocabulary to {}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!'.format(vocab_file))
index = token_index
writer.write((token + '\n'))
index += 1
return (vocab_file,) |
class NewsRecommendation(Environment):
def __init__(self, num_articles, dim, theta_mean=0, theta_std=1):
self.num_articles = num_articles
self.dim = dim
self.theta_mean = theta_mean
self.theta_std = theta_std
self.thetas = [(self.theta_mean + (self.theta_std * np.random.randn(self.dim))) for _ in range(self.num_articles)]
self.current_rewards = ([0] * self.num_articles)
def get_observation(self):
context = []
context_vector = np.random.binomial(1, max(0, (1 / (self.dim - 1))), self.dim)
context_vector[0] = 1
for i in range(self.num_articles):
context.append(context_vector)
self.current_rewards[i] = (1 / (1 + np.exp((- self.thetas[i].dot(context_vector)))))
return context
def get_optimal_reward(self):
return np.max(self.current_rewards)
def get_expected_reward(self, article):
return self.current_rewards[article]
def get_stochastic_reward(self, article):
expected_reward = self.get_expected_reward(article)
stochastic_reward = np.random.binomial(1, expected_reward)
return stochastic_reward |
def save_data_tables(data_tables: Dict[(str, List[Dict[(str, Any)]])], data_dir: Path):
dir = (data_dir / 'extracted_proof_data')
dir.mkdir(exist_ok=True)
for (table_name, table) in data_tables.items():
filename = (table_name + '.jsonl')
with jsonlines.open((dir / filename), 'w') as outfile:
for record in table:
outfile.write(record) |
class RegionBasedDiceLoss3D(nn.Module):
def __init__(self, classes: int, sigmoid_normalization: bool=True):
super(RegionBasedDiceLoss3D, self).__init__()
self.dice_loss = DiceLoss(classes=classes, sigmoid_normalization=sigmoid_normalization, eval_regions=False)
self.dice_loss_region_based = DiceLoss(classes=classes, sigmoid_normalization=sigmoid_normalization, eval_regions=True)
def forward(self, input: torch.tensor, target: torch.tensor, weight_reg: int=1, weight_dice: int=1):
(dice_loss, dice_score, _) = self.dice_loss(input, target)
(dice_loss_reg, _, subregions) = self.dice_loss_region_based(input, target)
total_loss = ((weight_dice * dice_loss) + (weight_reg * dice_loss_reg))
return (total_loss, dice_loss, dice_score, dice_loss_reg, subregions) |
def validate_submission(nusc: NuScenes, results_folder: str, eval_set: str, verbose: bool=False, zip_out: str=None) -> None:
mapper = LidarsegClassMapper(nusc)
num_classes = len(mapper.coarse_name_2_coarse_idx_mapping)
if verbose:
print('Checking if folder structure of {} is correct...'.format(results_folder))
results_meta_folder = os.path.join(results_folder, eval_set)
assert os.path.exists(results_meta_folder), 'Error: The folder containing the submission.json ({}) does not exist.'.format(results_meta_folder)
submisson_json_path = os.path.join(results_meta_folder, 'submission.json')
assert os.path.exists(submisson_json_path), 'Error: submission.json ({}) does not exist.'.format(submisson_json_path)
results_bin_folder = os.path.join(results_folder, 'lidarseg', eval_set)
assert os.path.exists(results_bin_folder), 'Error: The folder containing the .bin files ({}) does not exist.'.format(results_bin_folder)
if verbose:
print('\tPassed.')
if verbose:
print('Checking contents of {}...'.format(submisson_json_path))
with open(submisson_json_path) as f:
submission_meta = json.load(f)
valid_meta = {'use_camera', 'use_lidar', 'use_radar', 'use_map', 'use_external'}
assert (valid_meta == set(submission_meta['meta'].keys())), '{} must contain {}.'.format(submisson_json_path, valid_meta)
for meta_key in valid_meta:
meta_key_type = type(submission_meta['meta'][meta_key])
assert (meta_key_type == bool), 'Error: Value for {} should be bool, not {}.'.format(meta_key, meta_key_type)
if verbose:
print('\tPassed.')
if verbose:
print('Checking if all .bin files for {} exist and are valid...'.format(eval_set))
sample_tokens = get_samples_in_eval_set(nusc, eval_set)
for sample_token in tqdm(sample_tokens, disable=(not verbose)):
sample = nusc.get('sample', sample_token)
sd_token = sample['data']['LIDAR_TOP']
lidarseg_pred_filename = os.path.join(results_bin_folder, (sd_token + '_lidarseg.bin'))
assert os.path.exists(lidarseg_pred_filename), 'Error: The prediction .bin file {} does not exist.'.format(lidarseg_pred_filename)
lidarseg_pred = np.fromfile(lidarseg_pred_filename, dtype=np.uint8)
if (len(nusc.lidarseg) > 0):
lidarseg_label_filename = os.path.join(nusc.dataroot, nusc.get('lidarseg', sd_token)['filename'])
assert os.path.exists(lidarseg_label_filename), 'Error: The ground truth .bin file {} does not exist.'.format(lidarseg_label_filename)
lidarseg_label = np.fromfile(lidarseg_label_filename, dtype=np.uint8)
num_points = len(lidarseg_label)
else:
pointsensor = nusc.get('sample_data', sd_token)
pcl_path = os.path.join(nusc.dataroot, pointsensor['filename'])
pc = LidarPointCloud.from_file(pcl_path)
points = pc.points
num_points = points.shape[1]
assert (num_points == len(lidarseg_pred)), 'Error: There are {} predictions for lidar sample data token {} but there are only {} points in the point cloud.'.format(len(lidarseg_pred), sd_token, num_points)
assert all(((lidarseg_pred > 0) & (lidarseg_pred < num_classes))), 'Error: Array for predictions in {} must be between 1 and {} (inclusive).'.format(lidarseg_pred_filename, (num_classes - 1))
if verbose:
print('\tPassed.')
if verbose:
print('Results folder {} successfully validated!'.format(results_folder))
if zip_out:
assert os.path.exists(zip_out), 'Error: The folder {} to zip the results to does not exist.'.format(zip_out)
results_zip = os.path.join(zip_out, os.path.basename(os.path.normpath(results_folder)))
results_zip_name = shutil.make_archive(results_zip, 'zip', results_folder)
if verbose:
print('Results folder {} zipped to {}'.format(results_folder, results_zip_name)) |
def alloc_shared_buffer(space: Any):
if isinstance(space, (gym.spaces.Dict, dict)):
return {k: alloc_shared_buffer(v) for (k, v) in space.items()}
elif isinstance(space, (np.ndarray, gym.spaces.Box, gym.spaces.Discrete)):
size = int((np.prod(space.shape) * space.dtype.itemsize))
return mp.RawArray(ctypes.c_byte, size)
else:
raise ValueError('Unsupported type passed to `alloc_shared_mem`.') |
def normalize_image(image, forward=True, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
im_size = image.size()
mean = torch.FloatTensor(mean).unsqueeze(1).unsqueeze(2)
std = torch.FloatTensor(std).unsqueeze(1).unsqueeze(2)
if image.is_cuda:
mean = mean.cuda()
std = std.cuda()
if isinstance(image, torch.autograd.variable.Variable):
mean = Variable(mean, requires_grad=False)
std = Variable(std, requires_grad=False)
if forward:
if (len(im_size) == 3):
result = image.sub(mean.expand(im_size)).div(std.expand(im_size))
elif (len(im_size) == 4):
result = image.sub(mean.unsqueeze(0).expand(im_size)).div(std.unsqueeze(0).expand(im_size))
elif (len(im_size) == 3):
result = image.mul(std.expand(im_size)).add(mean.expand(im_size))
elif (len(im_size) == 4):
result = image.mul(std.unsqueeze(0).expand(im_size)).add(mean.unsqueeze(0).expand(im_size))
return result |
class Backbone(nn.Module):
def __init__(self):
super().__init__()
def forward(self):
pass
def out_features(self):
if (self.__dict__.get('_out_features') is None):
return None
return self._out_features |
def load_state_dict(path, **kwargs):
with bf.BlobFile(path, 'rb') as f:
data = f.read()
return th.load(io.BytesIO(data), **kwargs) |
def dilated_conv2d(input_, output_dim, ks=3, s=2, stddev=0.02, padding='SAME', name='conv2d'):
with tf.variable_scope(name):
(batch, in_height, in_width, in_channels) = [int(d) for d in input_.get_shape()]
filter = tf.get_variable('filter', [ks, ks, in_channels, output_dim], dtype=tf.float32, initializer=tf.random_normal_initializer(0, stddev))
conv = tf.nn.atrous_conv2d(input_, filter, rate=s, padding=padding, name=name)
return conv |
def output(infn, output_directory, sentences):
if (output_directory is None):
txtout = sys.stdout
soout = sys.stdout
else:
outfn = os.path.join(output_directory, os.path.basename(infn))
opfile_name = outfn.replace('_conll.txt', '')
txtout = codecs.open((opfile_name + '.txt'), 'w')
soout = codecs.open((opfile_name + '.ann'), 'w')
(offset, idnum) = (0, 1)
doctext = ''
for (si, sentence) in enumerate(sentences):
prev_token = None
(curr_start, curr_type) = (None, None)
quote_count = 0
for (token, ttag, ttype) in sentence:
if ((curr_type is not None) and ((ttag != 'I') or (ttype != curr_type))):
print(tagstr(curr_start, offset, curr_type, idnum, doctext[curr_start:offset]), file=soout)
idnum += 1
(curr_start, curr_type) = (None, None)
if ((prev_token is not None) and space(prev_token, token, quote_count)):
doctext = (doctext + ' ')
offset += 1
if ((curr_type is None) and (ttag != 'O')):
(curr_start, curr_type) = (offset, ttype)
doctext = (doctext + token)
offset += len(token)
if quote(token):
quote_count += 1
prev_token = token
if (curr_type is not None):
print(tagstr(curr_start, offset, curr_type, idnum, doctext[curr_start:offset]), file=soout)
idnum += 1
if ((si + 1) != len(sentences)):
doctext = (doctext + '\n')
offset += 1
print(doctext, file=txtout) |
def rm_blank_in_transcripts(transcripts):
(seq, idx) = ([], [])
for (index, x) in enumerate(transcripts):
if (x != ' '):
seq.append(x)
idx.append(index)
return (seq, idx) |
def _gen_constructor_wrapper(target):
(target)
def wrapper(*args, **kwargs):
proxy = None
def check_has_proxy(v):
if isinstance(v, Proxy):
nonlocal proxy
proxy = v
torch.fx.node.map_aggregate(args, check_has_proxy)
torch.fx.node.map_aggregate(kwargs, check_has_proxy)
if (proxy is not None):
return proxy.tracer.create_proxy('call_function', target, args, kwargs)
else:
return target(*args, **kwargs)
return (wrapper, target) |
def _m_dreg(model, x, K=1):
(qz_xs, px_zs, zss) = model(x, K)
qz_xs_ = [vae.qz_x(*[p.detach() for p in vae.qz_x_params]) for vae in model.vaes]
lws = []
for (r, vae) in enumerate(model.vaes):
lpz = model.pz(*model.pz_params).log_prob(zss[r]).sum((- 1))
lqz_x = log_mean_exp(torch.stack([qz_x_.log_prob(zss[r]).sum((- 1)) for qz_x_ in qz_xs_]))
lpx_z = [px_z.log_prob(x[d]).view(*px_z.batch_shape[:2], (- 1)).mul(model.vaes[d].llik_scaling).sum((- 1)) for (d, px_z) in enumerate(px_zs[r])]
lpx_z = torch.stack(lpx_z).sum(0)
lw = ((lpz + lpx_z) - lqz_x)
lws.append(lw)
return (torch.cat(lws), torch.cat(zss)) |
class CameraServer(Publisher):
def __init__(self, serial, **kwargs):
super().__init__(**kwargs)
(image_width, image_height, camera_matrix, dist_coeffs) = utils.get_camera_params(serial)
self.cap = utils.get_video_cap(serial, image_width, image_height)
self.last_read_time = time.time()
self.queue = Queue(maxsize=1)
Thread(target=self.camera_worker, daemon=True).start()
(self.map_x, self.map_y) = cv.initUndistortRectifyMap(camera_matrix, dist_coeffs, None, camera_matrix, (image_width, image_height), cv.CV_32FC1)
image = self.get_image()
self.shm = shared_memory.SharedMemory(create=True, size=image.nbytes)
self.image_shm = np.ndarray(image.shape, dtype=image.dtype, buffer=self.shm.buf)
def get_image(self):
image = None
while (image is None):
(_, image) = self.cap.read()
image = cv.remap(image, self.map_x, self.map_y, cv.INTER_LINEAR)
return image
def camera_worker(self):
while True:
if self.queue.empty():
image = self.get_image()
self.queue.put((time.time(), image))
assert (self.cap.get(cv.CAP_PROP_FOCUS) == CAMERA_FOCUS)
assert (self.cap.get(cv.CAP_PROP_TEMPERATURE) == CAMERA_TEMPERATURE)
assert (self.cap.get(cv.CAP_PROP_EXPOSURE) == CAMERA_EXPOSURE)
assert (self.cap.get(cv.CAP_PROP_GAIN) == CAMERA_GAIN)
time.sleep(0.0001)
def get_data(self):
while ((time.time() - self.last_read_time) < 0.0333):
time.sleep(0.0001)
(capture_time, image) = self.queue.get()
if ((time.time() - capture_time) > 0.1):
self.queue.get()
(_, image) = self.queue.get()
self.last_read_time = time.time()
np.copyto(self.image_shm, image)
return {'name': self.shm.name, 'shape': image.shape, 'dtype': image.dtype}
def clean_up(self):
self.cap.release()
self.shm.close()
self.shm.unlink() |
def speaker_normalization(f0, index_nonzero, mean_f0, std_f0):
f0 = f0.astype(float).copy()
f0[index_nonzero] = (((f0[index_nonzero] - mean_f0) / std_f0) / 4.0)
f0[index_nonzero] = np.clip(f0[index_nonzero], (- 1), 1)
f0[index_nonzero] = ((f0[index_nonzero] + 1) / 2.0)
return f0 |
def decode_affinity(affinity_code):
codes = affinity_code.split('_')
aff_kwargs = dict()
for code in codes:
abrv = code[(- 3):]
if (abrv not in ABBREVS):
raise ValueError(f'Unrecognized affinity code abbreviation: {abrv}')
value = int(code[:(- 3)])
aff_kwargs[abrv] = value
return aff_kwargs |
class GraphNet(MessagePassing):
def __init__(self):
super().__init__(aggr='max')
self.f_mess = Sequential(Linear((config.emb_size + 4), config.emb_size), LeakyReLU())
self.f_agg = Sequential(Linear(((config.emb_size + config.emb_size) + config.emb_size), config.emb_size), LeakyReLU())
def forward(self, x, edge_attr, edge_index, xg, batch_ind):
xg = xg[batch_ind]
return self.propagate(edge_index, x=x, edge_attr=edge_attr, xg=xg)
def message(self, x_j, edge_attr):
z = torch.cat([x_j, edge_attr], dim=1)
z = self.f_mess(z)
return z
def update(self, aggr_out, x, xg):
z = torch.cat([x, xg, aggr_out], dim=1)
z = (self.f_agg(z) + x)
return z |
def ukbiobank_data():
data_dir = config.data_root
code_dir = config.code_root
statistics_file = os.path.join(code_dir, 'Preprocessing', 'statistics_record.txt')
doubtful_case_file = os.path.join(code_dir, 'Preprocessing', 'doubtful_segmentation_cases2.txt')
base_slices_file = os.path.join(code_dir, 'Preprocessing', 'base_slices.txt')
with open(base_slices_file) as b_file:
base_slices = b_file.readlines()
base_slices = [x.strip() for x in base_slices]
base_slices = [[int(z) for z in y.split()] for y in base_slices]
with open(statistics_file) as s_file:
statistics = s_file.readlines()
statistics = [x.strip() for x in statistics]
statistics = [([int(z) for z in y.split()[:(- 4)]] + [float(z) for z in y.split()[(- 4):]]) for y in statistics]
with open(doubtful_case_file) as d_file:
doubtful_cases = d_file.readlines()
doubtful_cases = [x.strip() for x in doubtful_cases]
doubtful_cases = [int(x) for x in doubtful_cases]
used_statistics = [k for k in statistics if ((k[0] not in doubtful_cases) and (k[1] == 1) and (k[2] == 1) and (k[7] >= 0) and (k[8] >= 0))]
print('There will be {} used eids'.format(len(used_statistics)))
train_statistics = [x for x in used_statistics if ((x[0] % 5) != 2)]
test_statistics = [x for x in used_statistics if ((x[0] % 5) == 2)]
train_img_list0 = []
train_img_list1 = []
train_gt_list0 = []
train_gt_list1 = []
test_img_list0 = []
test_img_list1 = []
test_gt_list0 = []
test_gt_list1 = []
train_subject_count = 0
for k in train_statistics:
eid = k[0]
slices = k[5]
ed_es_instant0 = k[7]
ed_es_instant1 = k[8]
ed_es_instant0_min_slice = k[9]
ed_es_instant0_max_slice = k[10]
ed_es_instant1_min_slice = k[11]
ed_es_instant1_max_slice = k[12]
base_slice_list = [x for x in base_slices if (x[0] == eid)][0][1:]
train_subject_count += 1
crop_2D_path = os.path.join(data_dir, str(eid), 'crop_2D')
used_instants = []
if (ed_es_instant0 >= 0):
used_instants += [ed_es_instant0]
if (ed_es_instant1 >= 0):
used_instants += [ed_es_instant1]
for (idx, t) in enumerate(used_instants):
base_slice_t = base_slice_list[idx]
first_slice_idx = max((base_slice_t + 1), 0)
for s in range(first_slice_idx, slices):
s_t_image_file0 = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str((s - 1)).zfill(2), str(t).zfill(2)))
s_t_image_file1 = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2)))
if ((s - 1) != base_slice_t):
s_t_image_gt_file0 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str((s - 1)).zfill(2), str(t).zfill(2)))
else:
s_t_image_gt_file0 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str((- 1)).zfill(2), str(t).zfill(2)))
s_t_image_gt_file1 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2)))
train_img_list0.append(s_t_image_file0)
train_img_list1.append(s_t_image_file1)
train_gt_list0.append(s_t_image_gt_file0)
train_gt_list1.append(s_t_image_gt_file1)
test_subject_count = 0
for k in test_statistics:
eid = k[0]
slices = k[5]
ed_es_instant0 = k[7]
ed_es_instant1 = k[8]
ed_es_instant0_min_slice = k[9]
ed_es_instant0_max_slice = k[10]
ed_es_instant1_min_slice = k[11]
ed_es_instant1_max_slice = k[12]
base_slice_list = [x for x in base_slices if (x[0] == eid)][0][1:]
test_subject_count += 1
crop_2D_path = os.path.join(data_dir, str(eid), 'crop_2D')
used_instants = []
if (ed_es_instant0 >= 0):
used_instants += [ed_es_instant0]
if (ed_es_instant1 >= 0):
used_instants += [ed_es_instant1]
for (idx, t) in enumerate(used_instants):
base_slice_t = base_slice_list[idx]
first_slice_idx = max((base_slice_t + 1), 0)
for s in range(first_slice_idx, slices):
s_t_image_file0 = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str((s - 1)).zfill(2), str(t).zfill(2)))
s_t_image_file1 = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2)))
if ((s - 1) != base_slice_t):
s_t_image_gt_file0 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str((s - 1)).zfill(2), str(t).zfill(2)))
else:
s_t_image_gt_file0 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str((- 1)).zfill(2), str(t).zfill(2)))
s_t_image_gt_file1 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2)))
test_img_list0.append(s_t_image_file0)
test_img_list1.append(s_t_image_file1)
test_gt_list0.append(s_t_image_gt_file0)
test_gt_list1.append(s_t_image_gt_file1)
print('train_subject_count = {}'.format(train_subject_count))
print('test_subject_count = {}'.format(test_subject_count))
print('train_image_count = {}'.format(len(train_img_list0)))
print('test_image_count = {}'.format(len(test_img_list0)))
return (train_img_list0, train_img_list1, train_gt_list0, train_gt_list1, test_img_list0, test_img_list1, test_gt_list0, test_gt_list1) |
def check_avg_len(dir):
files = os.listdir(dir)
random.shuffle(files)
files = files[:500]
l_info = []
for f in files:
with open(os.path.join(dir, f), 'r') as fd:
lines = fd.read().splitlines()
lines = [x.split(' ') for x in lines]
lines = flatten(lines)
l_info.append(len(lines))
print('len: {}'.format(len(l_info)))
return (sum(l_info) / len(l_info)) |
class SolverArg():
def __init__(self, **kwargs):
self.layer_size = None
self.neuron_type = None
self.grid_num = None
self.coef = None
self.Q = None
self.test_fcn_num = None
self.epoch_num = None
self.model_name = None
self.data = None
self.layers = None
self.u_ref = None
self.grid_data = None
for (key, value) in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
def set_layer_size(self, new_layer_size):
self.layer_size = new_layer_size
def set_coef(self, loss_weight):
for i in range(min(len(loss_weight), len(self.coef))):
self.coef[i] = loss_weight[i]
def set_epoch_num(self, epoch_num):
self.epoch_num = epoch_num
def to_dict(self):
return {key: value for (key, value) in vars(self).items() if (value is not None)} |
def get_joystick_buttons(joy):
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetJoystickButtons(joy, count)
return (result, count_value.value) |
class global_pool(nn.Module):
def __init__(self, input_size, input_channels):
super(global_pool, self).__init__()
self.input_size = input_size
self.input_channels = input_channels
self.bn = nn.BatchNorm2d(self.input_channels)
self.pool = nn.AvgPool2d(kernel_size=self.input_size)
def forward(self, x):
output = self.bn(x)
output = F.relu(output)
output = self.pool(output)
return output |
class DanaUtil():
def __init__(self, domain=DANA_DOMAIN, serie_cfg_path=SERIE_CFG_PATH):
dir_path = os.path.dirname(serie_cfg_path)
if (not os.path.isdir(dir_path)):
os.makedirs(dir_path)
self._dana_cli = DanaCli(domain, serie_cfg_path)
self._valid_time = None
self._build_id = None
self._service_available = False
def fast_report_test(self, table_name, base_name, device_name, soc, target_abi, runtime, value):
return self.fast_report_sample(True, table_name, base_name, device_name, soc, target_abi, runtime, value)
def fast_report_benchmark(self, table_name, base_name, device_name, soc, target_abi, runtime, value, trend):
return self.fast_report_sample(False, table_name, base_name, device_name, soc, target_abi, runtime, value, trend)
def fast_report_sample(self, is_test, table_name, base_name, device_name, soc, target_abi, runtime, value, trend):
if (not self.service_available()):
return True
serie_id = self.create_serie_id(table_name, base_name, device_name, soc, target_abi, runtime)
if is_test:
succ = self.report_test(serie_id, value)
else:
succ = self.report_benchmark(serie_id, value, trend)
return succ
def get_and_create_build_from_git(self, project_id=MACE_PROJECT_ID):
commit_id = sh.git('log', '--no-merges', '-n1', '--pretty=format:%H', _tty_out=False).strip()
abbrev_commit_id = sh.git('rev-parse', ('%s~1' % commit_id), _tty_out=False).strip()
author_name = sh.git('log', '--pretty=format:%an', '-1', commit_id, _tty_out=False).strip()
author_mail = sh.git('log', '--pretty=format:%ae', '-1', commit_id, _tty_out=False).strip()
subject = sh.git('log', '--pretty=format:%s', '-1', commit_id, _tty_out=False).strip()
return self.get_and_create_build(commit_id, abbrev_commit_id, author_name, author_mail, subject, project_id)
def get_and_create_build(self, commit_id, abbrev_commit_id, author_name, author_mail, subject, project_id=MACE_PROJECT_ID):
if (self._build_id is None):
build_id = self.create_build_id()
succ = self._dana_cli.add_build(project_id=project_id, build_id=build_id, build_hash=commit_id, abbrev_hash=abbrev_commit_id, author_name=author_name, author_mail=author_mail, subject=subject)
if succ:
self._build_id = build_id
return self._build_id
def create_build_id(self):
if False:
latest_id = sh.git('log', '-n1', '--pretty=format:%H', _tty_out=False).strip()
timestamp = sh.git('log', '--pretty=format:%ad', '-1', '--date=format:%Y-%m-%d %H:%M:%S', latest_id, _tty_out=False).strip()
unix_time = time.mktime(time.strptime(timestamp, '%Y-%m-%d %H:%M:%S'))
build_id = int(unix_time)
else:
build_id = int(time.time())
return build_id
def create_serie_id(self, table_name, base_name, device_name, soc, target_abi, runtime):
base_serie_id = ('%s_%s_%s_%s_%s' % (base_name, device_name, soc, target_abi, runtime))
return self.create_serie_id_lite(table_name, base_serie_id)
def create_serie_id_lite(self, table_name, base_name):
serie_id = ('%s_%s' % (table_name, base_name))
serie_id = re.sub('\\s+', '', serie_id.strip())
return serie_id
def report_benchmark(self, serie_id, value, trend, project_id=MACE_PROJECT_ID):
if (not self._dana_cli.serie_available(serie_id)):
succ = self._dana_cli.add_benchmark_serie(project_id=project_id, serie_id=serie_id, range=DEFAULT_RANGE, required=DEFAULT_REQUIRED, trend=trend)
if (not succ):
print(('Add benchmark serie_id(%s) failed.' % serie_id))
return False
build_id = self.get_and_create_build_from_git()
if (build_id is None):
print('Add build id failed.')
return False
succ = self._dana_cli.add_sample(project_id=project_id, serie_id=serie_id, build_id=build_id, value=value)
return succ
def report_test(self, serie_id, value, project_id=MACE_PROJECT_ID):
if (not self._dana_cli.serie_available(serie_id)):
succ = self._dana_cli.add_test_serie(project_id=project_id, serie_id=serie_id)
if (not succ):
print(('Add test serie_id(%s) failed.' % serie_id))
return False
build_id = self.get_and_create_build_from_git()
if (build_id is None):
print('Add build id failed.')
return False
succ = self._dana_cli.add_sample(project_id=project_id, serie_id=serie_id, build_id=build_id, value=value)
return succ
def service_available(self):
cur_time = int(time.time())
if ((self._valid_time is None) or ((cur_time - self._valid_time) > SERVICE_LIFE)):
self._service_available = self._dana_cli.service_available()
self._valid_time = cur_time
return self._service_available |
class PPONetwork(nn.Module):
def __init__(self, state_size, action_size, hidden_size):
super(PPONetwork, self).__init__()
second_hidden_size = 500
third = (second_hidden_size - 100)
frames = 3
agents = 2
self.input_size = (state_size * frames)
self.input = nn.Linear(self.input_size, hidden_size)
self.hidden = nn.Linear(hidden_size, second_hidden_size)
self.actor_body = nn.Linear(third, third)
self.actor_head = nn.Linear(third, action_size)
self.critic_body = nn.Linear(third, third)
self.critic_head = nn.Linear(third, 1)
self.policy_body = nn.Linear(second_hidden_size, third)
self.policy_head = nn.Linear(third, third)
init_layers = [self.input, self.hidden, self.actor_body, self.critic_body, self.policy_body]
self.init_weights(init_layers)
self.batch_norm = nn.BatchNorm1d(second_hidden_size)
self.batch_norm_input = nn.BatchNorm1d(hidden_size)
self.alpha = nn.Linear(third, 2, bias=False)
self.beta = nn.Linear(third, 2, bias=False)
self.alpha.weight.data.mul_(0.125)
self.beta.weight.data.mul_(0.125)
self.std = nn.Parameter(torch.zeros(2))
self.state_size = state_size
device = 'cuda:0'
self.to(device)
summary(self, (1, self.input_size))
def init_weights(self, layers):
for layer in layers:
nn.init.kaiming_normal_(layer.weight)
layer.bias.data.mul_(0.1)
def forward(self, state, action=None):
x = state.view((- 1), self.input_size)
x = F.leaky_relu(self.batch_norm_input(self.input(x)))
x = F.leaky_relu(self.batch_norm(self.hidden(x)))
x = F.leaky_relu(self.policy_body(x))
act_x = F.tanh(self.actor_body(x))
mean = F.tanh(self.actor_head(act_x))
alpha = (F.softplus(self.alpha(act_x)) + 1)
beta = (F.softplus(self.beta(act_x)) + 1)
policy_dist = torch.distributions.Beta(alpha, beta)
if (action is None):
action = policy_dist.sample()
log_prob = policy_dist.log_prob(action).sum((- 1)).unsqueeze((- 1))
entropy = policy_dist.entropy().sum((- 1)).unsqueeze((- 1))
critic_x = F.leaky_relu(self.critic_body(x))
value = self.critic_head(critic_x)
return {'a': action, 'log_pi_a': log_prob, 'ent': entropy, 'mean': mean, 'v': value} |
def generate_aug_train_sentences(train_sentence_to_label, train_label_to_sentences, cfg, alpha):
sentence_to_aug_sentences = augmentation.get_augmented_sentences(cfg.aug_type, cfg.train_path, cfg.n_aug, alpha)
train_sentence_aug_to_label = {}
train_label_to_sentences_aug = {label: [] for label in train_label_to_sentences}
for (label, train_sentences) in train_label_to_sentences.items():
for train_sentence in train_sentences:
train_sentence_aug_to_label[train_sentence] = label
train_label_to_sentences_aug[label].append(train_sentence)
aug_sentences = sentence_to_aug_sentences[train_sentence]
for aug_sentence in aug_sentences:
train_sentence_aug_to_label[aug_sentence] = label
train_label_to_sentences_aug[label].append(aug_sentence)
return (train_sentence_aug_to_label, train_label_to_sentences_aug) |
def zip_longest(*itrs):
itr_longest = max(itrs, key=len)
len_longest = len(itr_longest)
return zip(*[(itr if (len(itr) == len_longest) else repeat_iterator(itr, math.ceil((len_longest / len(itr))))) for itr in itrs]) |
class Dataset():
def __init__(self):
(_, _, self.inputs, self.labels) = build_dataset()
def __getitem__(self, idx):
return (self.inputs[idx], self.labels[idx])
def __len__(self):
assert (len(self.inputs) == len(self.labels)), 'inputs should have equal len with labels'
return len(self.inputs) |
_grad()
def test_baseline(model, data, pos_encoding=None, opt=None):
model.eval()
feat = data.x
if model.opt['use_labels']:
feat = add_labels(feat, data.y, data.train_mask, model.num_classes, model.device)
(logits, accs) = (model(feat, pos_encoding), [])
for (_, mask) in data('train_mask', 'val_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = (pred.eq(data.y[mask]).sum().item() / mask.sum().item())
accs.append(acc)
if opt['wandb']:
lf = torch.nn.CrossEntropyLoss()
loss = lf(logits[data.train_mask], data.y.squeeze()[data.train_mask])
return accs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.