code stringlengths 101 5.91M |
|---|
def log_Normal_diag(sample, mean, log_var):
return ((- 0.5) * (log_var + (K.square((sample - mean)) / K.exp(log_var)))) |
def getAllWords(fName):
dat = open(fName).read()
dat = json.loads(dat)
dat = dat['questions']
wordsX = []
wordsY = []
for e in dat:
wordsX += e['question'].lower()[:(- 1)].split()
if ('answer' in e.keys()):
wordsY += e['answer'].lower().split()
return ((wordsX + ['?']), wordsY) |
def replace_unk(beam_lst, lst_src, int_order):
result = []
for (idx, num) in enumerate(int_order):
fields = get_wikibio_poswrds(lst_src[num])
fields = [wrd for ((k, idx), wrd) in fields.items()]
result.append(fields)
result_2 = []
x_idx = 0
temp_store = []
for ii in range(len(beam_lst)):
try:
x = result[x_idx]
y = beam_lst[ii]
except:
print('x_idx is out of range for x:', x_idx, ii)
try:
(y1, score_1, state_1, rank1, copy1) = y.split('|||')
except:
continue
if (int(rank1) == 0):
if (len(temp_store) > 0):
for (score_, elem) in sorted(temp_store, key=(lambda a: a[0]))[:1]:
(y, score_, state_, rank, copy) = elem
copy = ast.literal_eval(copy)
y = y.split()
for (idx, elem) in enumerate(y):
if (elem == '<unk>'):
if ((copy[idx] >= 0) and (copy[idx] < len(x))):
y[idx] = x[copy[idx]]
if ('<eos>' in y):
temp_id = y.index('<eos>')
y = y[:temp_id]
result_2.append(' '.join(y))
x_idx += 1
temp_store = []
rescore = ((5 + len(y1.split())) / 6)
score_ = float(score_1)
temp_store.append(((score_ / rescore), (y1, score_1, state_1, rank1, copy1)))
else:
rescore = ((5 + len(y1.split())) / 6)
score_ = float(score_1)
temp_store.append(((score_ / rescore), (y1, score_1, state_1, rank1, copy1)))
return result_2 |
class FusedBiasLeakyReLUFunctionBackward(Function):
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = ext_module.fused_bias_leakyrelu(grad_output, empty, out, act=3, grad=1, alpha=negative_slope, scale=scale)
dim = [0]
if (grad_input.ndim > 2):
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return (grad_input, grad_bias)
def backward(ctx, gradgrad_input, gradgrad_bias):
(out,) = ctx.saved_tensors
gradgrad_out = ext_module.fused_bias_leakyrelu(gradgrad_input, gradgrad_bias.to(out.dtype), out, act=3, grad=1, alpha=ctx.negative_slope, scale=ctx.scale)
return (gradgrad_out, None, None, None) |
def test_linear_regression():
dataset = load_diabetes()
(X, y) = (dataset.data, dataset.target)
feature_names = dataset.feature_names
sk_lr = SKLinear()
our_lr = LinearRegression(feature_names=feature_names)
sk_lr.fit(X, y)
our_lr.fit(X, y)
sk_pred = sk_lr.predict(X)
our_pred = our_lr.predict(X)
assert np.allclose(sk_pred, our_pred)
local_expl = our_lr.explain_local(X, y)
local_viz = local_expl.visualize(0)
assert (local_viz is not None)
local_expl = our_lr.explain_local(X)
local_viz = local_expl.visualize(0)
assert (local_viz is not None)
global_expl = our_lr.explain_global()
global_viz = global_expl.visualize()
assert (global_viz is not None) |
def sanity_check_paramter_updates(model, last_ckpt):
for (i, v) in model.named_modules():
if (hasattr(v, 'weight') and hasattr(v, 'popup_scores')):
if (getattr(v, 'weight') is not None):
w1 = getattr(v, 'weight').data.cpu()
w2 = last_ckpt[(i + '.weight')].data.cpu()
if (getattr(v, 'popup_scores') is not None):
s1 = getattr(v, 'popup_scores').data.cpu()
s2 = last_ckpt[(i + '.popup_scores')].data.cpu()
return ((not torch.allclose(w1, w2)), (not torch.allclose(s1, s2))) |
def get_keywords():
git_refnames = ' (HEAD -> main)'
git_full = 'e12c47d414dceb457ce128bf87c67fbd4479f14a'
git_date = '2021-12-04 07:26:07 -0800'
keywords = {'refnames': git_refnames, 'full': git_full, 'date': git_date}
return keywords |
def load_dataset_splits(task, splits):
for split in splits:
if (split == 'train'):
task.load_dataset(split, combine=True)
else:
for k in itertools.count():
split_k = (split + (str(k) if (k > 0) else ''))
try:
task.load_dataset(split_k, combine=False)
except FileNotFoundError as e:
if (k > 0):
break
raise e |
def _cfgs_to_fx_cfgs(op_cfgs, observer_type='post_training_static_quant'):
version = get_torch_version()
if (observer_type == 'post_training_dynamic_quant'):
model_qconfig = torch.quantization.default_dynamic_qconfig
elif (observer_type == 'quant_aware_training'):
model_qconfig = (torch.quantization.QConfig(activation=torch.quantization.FakeQuantize.with_args(dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=REDUCE_RANGE), weight=torch.quantization.default_weight_fake_quant) if (version.release < Version('1.10.0').release) else torch.quantization.QConfig(activation=torch.quantization.FusedMovingAvgObsFakeQuantize.with_args(dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=REDUCE_RANGE), weight=torch.quantization.default_fused_per_channel_wt_fake_quant))
else:
model_qconfig = torch.quantization.QConfig(activation=torch.quantization.HistogramObserver.with_args(reduce_range=REDUCE_RANGE), weight=torch.quantization.default_per_channel_weight_observer)
if (version.release >= Version('1.13.0').release):
from torch.ao.quantization import QConfigMapping
fx_op_cfgs = QConfigMapping()
if (observer_type != 'post_training_dynamic_quant'):
fx_op_cfgs.set_global(model_qconfig)
else:
fx_op_cfgs = dict()
if (observer_type != 'post_training_dynamic_quant'):
fx_op_cfgs[''] = model_qconfig
op_tuple_cfg_list = []
for (key, value) in op_cfgs.items():
if (key == 'default_qconfig'):
if (version.release >= Version('1.13.0').release):
fx_op_cfgs.set_global(value)
else:
fx_op_cfgs[''] = value
continue
if (version.release >= Version('1.13.0').release):
fx_op_cfgs.set_module_name(key, value)
else:
op_tuple = (key, value)
op_tuple_cfg_list.append(op_tuple)
if (version.release < Version('1.13.0').release):
fx_op_cfgs['module_name'] = op_tuple_cfg_list
elif (observer_type != 'post_training_dynamic_quant'):
from torch.ao.quantization import get_default_qconfig_mapping
for (name, q_config) in get_default_qconfig_mapping().to_dict()['object_type']:
fx_op_cfgs.set_object_type(name, q_config)
return fx_op_cfgs |
class TestDetInferencer(TestCase):
('mmengine.infer.infer._load_checkpoint', return_value=None)
def test_init(self, mock):
DetInferencer('rtmdet-t')
DetInferencer('configs/yolox/yolox_tiny_8xb8-300e_coco.py')
def assert_predictions_equal(self, preds1, preds2):
for (pred1, pred2) in zip(preds1, preds2):
if ('bboxes' in pred1):
self.assertTrue(np.allclose(pred1['bboxes'], pred2['bboxes'], 0.1))
if ('scores' in pred1):
self.assertTrue(np.allclose(pred1['scores'], pred2['scores'], 0.1))
if ('labels' in pred1):
self.assertTrue(np.allclose(pred1['labels'], pred2['labels']))
if ('panoptic_seg_path' in pred1):
self.assertTrue((pred1['panoptic_seg_path'] == pred2['panoptic_seg_path']))
(['rtmdet-t', 'mask-rcnn_r50_fpn_1x_coco', 'panoptic_fpn_r50_fpn_1x_coco'])
def test_call(self, model):
img_path = 'tests/data/color.jpg'
mock_load = Mock(return_value=None)
with patch('mmengine.infer.infer._load_checkpoint', mock_load):
inferencer = DetInferencer(model)
if (model == 'panoptic_fpn_r50_fpn_1x_coco'):
inferencer.visualizer.dataset_meta = {'classes': get_classes('coco_panoptic'), 'palette': 'random'}
res_path = inferencer(img_path, return_vis=True)
img = mmcv.imread(img_path)
res_ndarray = inferencer(img, return_vis=True)
self.assert_predictions_equal(res_path['predictions'], res_ndarray['predictions'])
self.assertIn('visualization', res_path)
self.assertIn('visualization', res_ndarray)
img_paths = ['tests/data/color.jpg', 'tests/data/gray.jpg']
res_path = inferencer(img_paths, return_vis=True)
imgs = [mmcv.imread(p) for p in img_paths]
res_ndarray = inferencer(imgs, return_vis=True)
self.assert_predictions_equal(res_path['predictions'], res_ndarray['predictions'])
self.assertIn('visualization', res_path)
self.assertIn('visualization', res_ndarray)
img_dir = 'tests/data/VOCdevkit/VOC2007/JPEGImages/'
res_bs1 = inferencer(img_dir, batch_size=1, return_vis=True)
res_bs3 = inferencer(img_dir, batch_size=3, return_vis=True)
self.assert_predictions_equal(res_bs1['predictions'], res_bs3['predictions'])
if (model == 'rtmdet-t'):
for (res_bs1_vis, res_bs3_vis) in zip(res_bs1['visualization'], res_bs3['visualization']):
self.assertTrue(np.allclose(res_bs1_vis, res_bs3_vis))
(['rtmdet-t', 'mask-rcnn_r50_fpn_1x_coco', 'panoptic_fpn_r50_fpn_1x_coco'])
def test_visualize(self, model):
img_paths = ['tests/data/color.jpg', 'tests/data/gray.jpg']
mock_load = Mock(return_value=None)
with patch('mmengine.infer.infer._load_checkpoint', mock_load):
inferencer = DetInferencer(model)
if (model == 'panoptic_fpn_r50_fpn_1x_coco'):
inferencer.visualizer.dataset_meta = {'classes': get_classes('coco_panoptic'), 'palette': 'random'}
with tempfile.TemporaryDirectory() as tmp_dir:
inferencer(img_paths, out_dir=tmp_dir)
for img_dir in ['color.jpg', 'gray.jpg']:
self.assertTrue(osp.exists(osp.join(tmp_dir, 'vis', img_dir)))
(['rtmdet-t', 'mask-rcnn_r50_fpn_1x_coco', 'panoptic_fpn_r50_fpn_1x_coco'])
def test_postprocess(self, model):
img_path = 'tests/data/color.jpg'
mock_load = Mock(return_value=None)
with patch('mmengine.infer.infer._load_checkpoint', mock_load):
inferencer = DetInferencer(model)
if (model == 'panoptic_fpn_r50_fpn_1x_coco'):
inferencer.visualizer.dataset_meta = {'classes': get_classes('coco_panoptic'), 'palette': 'random'}
res = inferencer(img_path, return_datasample=True)
self.assertTrue(is_list_of(res['predictions'], DetDataSample))
with tempfile.TemporaryDirectory() as tmp_dir:
res = inferencer(img_path, out_dir=tmp_dir, no_save_pred=False)
dumped_res = mmengine.load(osp.join(tmp_dir, 'preds', 'color.json'))
self.assertEqual(res['predictions'][0], dumped_res)
('mmengine.infer.infer._load_checkpoint', return_value=None)
def test_pred2dict(self, mock):
data_sample = DetDataSample()
data_sample.pred_instances = InstanceData()
data_sample.pred_instances.bboxes = np.array([[0, 0, 1, 1]])
data_sample.pred_instances.labels = np.array([0])
data_sample.pred_instances.scores = torch.FloatTensor([0.9])
res = DetInferencer('rtmdet-t').pred2dict(data_sample)
self.assertListAlmostEqual(res['bboxes'], [[0, 0, 1, 1]])
self.assertListAlmostEqual(res['labels'], [0])
self.assertListAlmostEqual(res['scores'], [0.9])
def assertListAlmostEqual(self, list1, list2, places=7):
for i in range(len(list1)):
if isinstance(list1[i], list):
self.assertListAlmostEqual(list1[i], list2[i], places=places)
else:
self.assertAlmostEqual(list1[i], list2[i], places=places) |
class NormalizationWrapper(torch.nn.Module):
def __init__(self, model, mean, std):
super().__init__()
mean = torch.tensor(mean)
std = torch.tensor(std)
mean = mean[(..., None, None)]
std = std[(..., None, None)]
self.train(model.training)
self.model = model
self.register_buffer('mean', mean)
self.register_buffer('std', std)
def forward(self, x, *args, **kwargs):
x_normalized = ((x - self.mean) / self.std)
return self.model(x_normalized, *args, **kwargs)
def state_dict(self, destination=None, prefix='', keep_vars=False):
return self.model.state_dict() |
def plot_precision_recall_curve_sklearn(y_hat: np.ndarray, y_true: np.ndarray, y_hat_probs: np.ndarray, save_plot: bool=True, save_fpath: str='2022_02_02_precision_recall.pdf') -> None:
y_true_list = []
probas_pred = []
for (y_hat_, y_true_, y_hat_prob_) in zip(y_hat, y_true, y_hat_probs):
y_true_list.append(y_true_)
if (y_hat_ == 1):
pos_prob = y_hat_prob_
else:
pos_prob = (1 - y_hat_prob_)
probas_pred.append(pos_prob)
(prec, recall, thresholds) = sklearn.metrics.precision_recall_curve(y_true=y_true_list, probas_pred=probas_pred, pos_label=1)
prec = interp_prec(prec)
if save_plot:
plt.style.use('ggplot')
palette = np.array(sns.color_palette('hls', 5))
color = palette[0]
plt.plot(recall, prec, color=color)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.tight_layout()
plt.savefig(save_fpath, dpi=500)
plt.close('all')
return (prec, recall, thresholds) |
class MultipleMetrics(object):
def __init__(self, metrics: List[Metric], prefix: str=''):
instantiated_metrics = []
for metric in metrics:
if isinstance(metric, type):
instantiated_metrics.append(metric())
else:
instantiated_metrics.append(metric)
self._metrics = instantiated_metrics
self.prefix = prefix
def reset(self):
for metric in self._metrics:
metric.reset()
def __call__(self, y_pred: Tensor, y_true: Tensor) -> Dict:
logs = {}
for metric in self._metrics:
if isinstance(metric, Metric):
logs[(self.prefix + metric._name)] = metric(y_pred, y_true)
if isinstance(metric, TorchMetric):
if (metric.num_classes == 2):
metric.update(torch.round(y_pred).int(), y_true.int())
if (metric.num_classes > 2):
metric.update(torch.max(y_pred, dim=1).indices, y_true.int())
logs[(self.prefix + type(metric).__name__)] = metric.compute().detach().cpu().numpy()
return logs |
class TFAutoModelForQuestionAnswering(object):
def __init__(self):
raise EnvironmentError('TFAutoModelForQuestionAnswering is designed to be instantiated using the `TFAutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)` or `TFAutoModelForQuestionAnswering.from_config(config)` methods.')
def from_config(cls, config):
for (config_class, model_class) in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError('Unrecognized configuration class {} for this kind of TFAutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()))))
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop('config', None)
if (not isinstance(config, PretrainedConfig)):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for (config_class, model_class) in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError('Unrecognized configuration class {} for this kind of TFAutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())))) |
class DwsConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super(DwsConvBlock, self).__init__()
self.dw_conv = dwconv3x3_block(in_channels=in_channels, out_channels=in_channels, stride=stride)
self.pw_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels)
def forward(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x |
def collate_custom(batch):
if isinstance(batch[0], np.int64):
return np.stack(batch, 0)
if isinstance(batch[0], torch.Tensor):
return torch.stack(batch, 0)
elif isinstance(batch[0], np.ndarray):
return np.stack(batch, 0)
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.FloatTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
batch_modified = {key: collate_custom([d[key] for d in batch]) for key in batch[0] if (key.find('idx') < 0)}
return batch_modified
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [collate_custom(samples) for samples in transposed]
raise TypeError('Type is {}'.format(type(batch[0]))) |
def header_properties(field_list, field_names):
lines = []
lines.append(('element vertex %d' % field_list[0].shape[0]))
i = 0
for fields in field_list:
for field in fields.T:
lines.append(('property %s %s' % (field.dtype.name, field_names[i])))
i += 1
return lines |
def unit_vector(elevation_angle: np.float64, azimuthal_angle: np.float64) -> np.ndarray:
elevation_angle_in_radians = np.deg2rad(elevation_angle)
azimuthal_angle_in_radians = np.deg2rad(azimuthal_angle)
return np.array([(np.cos(elevation_angle_in_radians) * np.sin(azimuthal_angle_in_radians)), (np.cos(elevation_angle_in_radians) * np.cos(azimuthal_angle_in_radians)), np.sin(elevation_angle_in_radians)]) |
class NXDOManagerWithServer(NXDOManager):
def __init__(self, solve_restricted_game: SolveRestrictedGame, n_players: int=2, log_dir: str=None, manager_metadata: dict=None, port: int=4545):
super(NXDOManagerWithServer, self).__init__(solve_restricted_game=solve_restricted_game, n_players=n_players, log_dir=log_dir, manager_metadata=manager_metadata)
self._grpc_server = grpc.server(futures.ThreadPoolExecutor(max_workers=1), options=[('grpc.max_send_message_length', GRPC_MAX_MESSAGE_LENGTH), ('grpc.max_receive_message_length', GRPC_MAX_MESSAGE_LENGTH)])
servicer = _NXDOMangerServerServicerImpl(manager=self, stop_server_fn=self.stop_server)
add_NXDOManagerServicer_to_server(servicer=servicer, server=self._grpc_server)
address = f'[::]:{port}'
self._grpc_server.add_insecure_port(address)
self._grpc_server.start()
logger.info(f'NXDO Manager gRPC server listening at {address}')
def wait_for_server_termination(self):
self._grpc_server.wait_for_termination()
def stop_server(self):
self._grpc_server.stop(grace=0) |
class Orthogonal(Initializer):
def __init__(self, gain=1.0):
if (gain == 'relu'):
gain = np.sqrt(2)
self.gain = gain
def sample(self, shape):
if (len(shape) < 2):
raise RuntimeError('Only shapes of length 2 or more are supported.')
flat_shape = (shape[0], np.prod(shape[1:]))
a = get_rng().normal(0.0, 1.0, flat_shape)
(u, _, v) = np.linalg.svd(a, full_matrices=False)
q = (u if (u.shape == flat_shape) else v)
q = q.reshape(shape)
return floatX((self.gain * q)) |
class TestRPN(TestCase):
def setUp(self):
register_all_modules()
(['rpn/rpn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
model.rpn_head.num_classes = 2
detector = MODELS.build(model)
self.assertEqual(detector.bbox_head.num_classes, 1)
([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_loss_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([(device in ['cpu', 'cuda']) for device in devices])
for device in devices:
detector = MODELS.build(model)
if (device == 'cuda'):
if (not torch.cuda.is_available()):
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([(device in ['cpu', 'cuda']) for device in devices])
for device in devices:
detector = MODELS.build(model)
if (device == 'cuda'):
if (not torch.cuda.is_available()):
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([(device in ['cpu', 'cuda']) for device in devices])
for device in devices:
detector = MODELS.build(model)
if (device == 'cuda'):
if (not torch.cuda.is_available()):
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
self.assertIsInstance(batch_results, tuple) |
def clip_noise_schedule(alphas2, clip_value=0.001):
alphas2 = np.concatenate([np.ones(1), alphas2], axis=0)
alphas_step = (alphas2[1:] / alphas2[:(- 1)])
alphas_step = np.clip(alphas_step, a_min=clip_value, a_max=1.0)
alphas2 = np.cumprod(alphas_step, axis=0)
return alphas2 |
def test_rvalue_ref_param():
r = m.RValueRefParam()
assert (r.func1('123') == 3)
assert (r.func2('1234') == 4)
assert (r.func3('12345') == 5)
assert (r.func4('123456') == 6) |
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), ('expected file or str, got %s' % filename_or_file)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
key2str = {}
for (key, val) in sorted(kvs.items()):
if isinstance(val, float):
valstr = ('%-8.3g' % (val,))
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
if (len(key2str) == 0):
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y-%m-%d %H:%M:%S.%f %Z')
dashes = ('-' * ((keywidth + valwidth) + 7))
dashes_time = put_in_middle(dashes, timestamp)
lines = [dashes_time]
for (key, val) in sorted(key2str.items()):
lines.append(('| %s%s | %s%s |' % (key, (' ' * (keywidth - len(key))), val, (' ' * (valwidth - len(val))))))
lines.append(dashes)
self.file.write(('\n'.join(lines) + '\n'))
self.file.flush()
def _truncate(self, s):
return ((s[:30] + '...') if (len(s) > 33) else s)
def writeseq(self, seq):
for arg in seq:
self.file.write(arg)
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close() |
_registry(pattern_type='Transformer2Dmodel_QKVReshapeTo4D')
class Transformer2Dmodel_QKVReshapeTo4D(Pattern):
def __call__(self, model):
pattern_mapping_config = {'Transformer2Dmodel_QKVReshapeTo4D': [{'patterns': {'in': [[(0, 'Shape'), (1, 'Gather'), (2, 'Div'), (3, 'Cast'), (4, 'Cast'), (5, 'Unsqueeze'), (12, 'Concat'), (13, 'Reshape')], [(), (6, 'Shape'), (7, 'Gather'), (8, 'Unsqueeze'), (12, 'Concat')], [(), (9, 'Shape'), (10, 'Gather'), (11, 'Unsqueeze'), (12, 'Concat')]], 'out': [[(0, 'Reshape')]]}, 'search_mode': 'op_type', 'node_names': {0: 13}, 'input_tensors': {0: [[{13: [0]}, {'input_data': [2]}], [[0, 1], 2]]}, 'output_tensors': {0: [[{13: [0]}], [[0], 1]]}, 'returns': [12, 13, 0, 2]}]}
for i in range(len(pattern_mapping_config['Transformer2Dmodel_QKVReshapeTo4D'])):
pattern_dict = pattern_mapping_config['Transformer2Dmodel_QKVReshapeTo4D'][i]
(model, new_node_names, ret_old_nodes) = util.pattern_mapping('Transformer2Dmodel_QKVReshapeTo4D', pattern_dict, model)
if (len(new_node_names) != 0):
logger.info('Transformer2Dmodel_QKVReshapeTo4D mathched...')
logger.debug('Transformer2Dmodel_QKVReshapeTo4D = {}'.format(new_node_names))
for j in range(len(new_node_names)):
concat_node = ret_old_nodes[j][0]
concat_node_input_size = len(concat_node.input_tensors)
if (concat_node_input_size == 4):
matmul_node_name = ret_old_nodes[j][1].input_tensors[0].source_op[0]
matmul_node = model.get_node_by_name(matmul_node_name)
if (matmul_node.op_type == 'MatMulWithBias'):
weight = matmul_node.input_tensors[1].data.shape[1]
elif (matmul_node.op_type == 'MatMul'):
weight = matmul_node.input_tensors[1].data.shape[1]
div_node = ret_old_nodes[j][3]
assert (div_node.op_type == 'Div')
div_value = int(div_node.input_tensors[1].data)
constant_weight_idx = []
for idx in range(len(concat_node.input_tensors)):
if (concat_node.input_tensors[idx].data != None):
constant_weight_idx.append(idx)
break
constant_weight_idx = constant_weight_idx[0]
attr = OrderedDict()
attr['dst_shape'] = []
for i in range(concat_node_input_size):
if (i == constant_weight_idx):
attr['dst_shape'].append(str(div_value))
elif (i == 3):
attr['dst_shape'].append(str(int((weight / div_value))))
else:
attr['dst_shape'].append('-1')
attr['dst_shape'] = ','.join(attr['dst_shape'])
attr['dims'] = 0
reshape_node_idx = model.get_node_id(new_node_names[j][0])
model.nodes[reshape_node_idx].attr = attr
return model |
def main():
cfg.merge_from_file(args.config)
dataset_root = os.path.join(your_dataset_path, args.dataset)
model = ModelBuilder()
model = load_pretrain(model, args.snapshot).cuda().eval()
tracker = build_tracker(model)
dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root, load_img=False)
model_name = args.snapshot.split('/')[(- 1)].split('.')[0]
model_name = (((model_name + '_pk-{:.3f}'.format(cfg.TRACK.PENALTY_K)) + '_wi-{:.3f}'.format(cfg.TRACK.WINDOW_INFLUENCE)) + '_lr-{:.3f}'.format(cfg.TRACK.LR))
total_lost = 0
if (args.dataset in ['VOT2016', 'VOT2018', 'VOT2019']):
for (v_idx, video) in enumerate(dataset):
if (args.video != ''):
if (video.name != args.video):
continue
frame_counter = 0
lost_number = 0
toc = 0
pred_bboxes = []
for (idx, (img, gt_bbox)) in enumerate(video):
if (len(gt_bbox) == 4):
gt_bbox = [gt_bbox[0], gt_bbox[1], gt_bbox[0], ((gt_bbox[1] + gt_bbox[3]) - 1), ((gt_bbox[0] + gt_bbox[2]) - 1), ((gt_bbox[1] + gt_bbox[3]) - 1), ((gt_bbox[0] + gt_bbox[2]) - 1), gt_bbox[1]]
tic = cv2.getTickCount()
if (idx == frame_counter):
(cx, cy, w, h) = get_axis_aligned_bbox(np.array(gt_bbox))
gt_bbox_ = [(cx - ((w - 1) / 2)), (cy - ((h - 1) / 2)), w, h]
tracker.init(img, gt_bbox_)
pred_bbox = gt_bbox_
pred_bboxes.append(1)
elif (idx > frame_counter):
outputs = tracker.track(img)
pred_bbox = outputs['bbox']
overlap = vot_overlap(pred_bbox, gt_bbox, (img.shape[1], img.shape[0]))
if (overlap > 0):
pred_bboxes.append(pred_bbox)
else:
pred_bboxes.append(2)
frame_counter = (idx + 5)
lost_number += 1
else:
pred_bboxes.append(0)
toc += (cv2.getTickCount() - tic)
toc /= cv2.getTickFrequency()
video_path = os.path.join('results', args.dataset, model_name, 'baseline', video.name)
if (not os.path.isdir(video_path)):
os.makedirs(video_path)
result_path = os.path.join(video_path, '{}_001.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in pred_bboxes:
if isinstance(x, int):
f.write('{:d}\n'.format(x))
else:
f.write((','.join([vot_float2str('%.4f', i) for i in x]) + '\n'))
print('({:3d}) Video: {:12s} Time: {:4.1f}s Speed: {:3.1f}fps Lost: {:d}'.format((v_idx + 1), video.name, toc, (idx / toc), lost_number))
total_lost += lost_number
print('{:s} total lost: {:d}'.format(model_name, total_lost))
else:
for (v_idx, video) in enumerate(dataset):
if (args.video != ''):
if (video.name != args.video):
continue
toc = 0
pred_bboxes = []
scores = []
track_times = []
for (idx, (img, gt_bbox)) in enumerate(video):
tic = cv2.getTickCount()
if (idx == 0):
(cx, cy, w, h) = get_axis_aligned_bbox(np.array(gt_bbox))
gt_bbox_ = [(cx - ((w - 1) / 2)), (cy - ((h - 1) / 2)), w, h]
tracker.init(img, gt_bbox_)
pred_bbox = gt_bbox_
scores.append(None)
if ('VOT2018-LT' == args.dataset):
pred_bboxes.append([1])
else:
pred_bboxes.append(pred_bbox)
else:
outputs = tracker.track(img)
pred_bbox = outputs['bbox']
pred_bboxes.append(pred_bbox)
scores.append(outputs['best_score'])
toc += (cv2.getTickCount() - tic)
track_times.append(((cv2.getTickCount() - tic) / cv2.getTickFrequency()))
toc /= cv2.getTickFrequency()
if ('VOT2018-LT' == args.dataset):
video_path = os.path.join('results', args.dataset, model_name, 'longterm', video.name)
if (not os.path.isdir(video_path)):
os.makedirs(video_path)
result_path = os.path.join(video_path, '{}_001.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in pred_bboxes:
f.write((','.join([str(i) for i in x]) + '\n'))
result_path = os.path.join(video_path, '{}_001_confidence.value'.format(video.name))
with open(result_path, 'w') as f:
for x in scores:
(f.write('\n') if (x is None) else f.write('{:.6f}\n'.format(x)))
result_path = os.path.join(video_path, '{}_time.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in track_times:
f.write('{:.6f}\n'.format(x))
elif ('GOT-10k' == args.dataset):
video_path = os.path.join('results', args.dataset, model_name, video.name)
if (not os.path.isdir(video_path)):
os.makedirs(video_path)
result_path = os.path.join(video_path, '{}_001.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in pred_bboxes:
f.write((','.join([str(i) for i in x]) + '\n'))
result_path = os.path.join(video_path, '{}_time.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in track_times:
f.write('{:.6f}\n'.format(x))
else:
model_path = os.path.join('results', args.dataset, model_name)
if (not os.path.isdir(model_path)):
os.makedirs(model_path)
result_path = os.path.join(model_path, '{}.txt'.format(video.name))
with open(result_path, 'w') as f:
for x in pred_bboxes:
f.write((','.join([str(i) for i in x]) + '\n'))
print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format((v_idx + 1), video.name, toc, (idx / toc))) |
class LinearSchedule(ScalarSchedule):
def __init__(self, init_value, final_value, ramp_duration):
self._init_value = init_value
self._final_value = final_value
self._ramp_duration = ramp_duration
def get_value(self, t):
return (self._init_value + ((self._final_value - self._init_value) * min(1.0, ((t * 1.0) / self._ramp_duration)))) |
def _split_string_to_tokens(text):
if (not text):
return []
ret = []
token_start = 0
is_alnum = [(c in _ALPHANUMERIC_CHAR_SET) for c in text]
for pos in xrange(1, len(text)):
if (is_alnum[pos] != is_alnum[(pos - 1)]):
token = text[token_start:pos]
if ((token != u' ') or (token_start == 0)):
ret.append(token)
token_start = pos
final_token = text[token_start:]
ret.append(final_token)
return ret |
def vgg19_bn(pretrained=False, **kwargs):
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn']))
return model |
def test_gym_environment(env, ctxt=None, seed=1):
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
policy = CategoricalMLPPolicy(name='policy', env_spec=env.spec, hidden_sizes=(32, 32))
obs_var = tf.compat.v1.placeholder(tf.float32, shape=[None, None, env.observation_space.flat_dim], name='obs')
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(env_spec=env.spec, policy=policy, baseline=baseline, max_path_length=hyper_parameters['max_path_length'], discount=hyper_parameters['discount'], gae_lambda=hyper_parameters['gae_lambda'], max_kl_step=hyper_parameters['max_kl'])
runner.setup(algo, env)
runner.train(n_epochs=hyper_parameters['n_epochs'], batch_size=hyper_parameters['batch_size']) |
class SupervisionStrategy(ABC):
def get_image_pair(self, batch, *args):
pass
def get_correlation(self, correlation_matrix):
pass
def compute_loss(self, correlation_matrix, *args):
pass |
class ManualTask(ABSTask):
def __init__(self, ns: str, obstacles_manager: ObstaclesManager, robot_manager: RobotManager):
super().__init__(obstacles_manager, robot_manager)
self.ns = ns
self.ns_prefix = ('' if (ns == '') else (('/' + ns) + '/'))
rospy.Subscriber(f'{self.ns}manual_goal', Pose2D, self._set_goal_callback)
self._goal = Pose2D()
self._new_goal_received = False
self._manual_goal_con = Condition()
def reset(self):
while True:
with self._map_lock:
self.obstacles_manager.reset_pos_obstacles_random()
self.robot_manager.set_start_pos_random()
with self._manual_goal_con:
self._manual_goal_con.wait_for(self._new_goal_received, timeout=60)
if (not self._new_goal_received):
raise Exception("TimeOut, User does't provide goal position!")
else:
self._new_goal_received = False
try:
self.robot_manager.publish_goal(self._goal.x, self._goal.y, self._goal.theta)
except Exception as e:
rospy.logwarn(repr(e))
def _set_goal_callback(self, goal: Pose2D):
with self._manual_goal_con:
self._goal = goal
self._new_goal_received = True
self._manual_goal_con.notify() |
class TensorboardLogger(object):
def __init__(self):
self._logger = None
self._global_step = 0
def create(self, path):
self._logger = tensorboardX.SummaryWriter(path)
def noop(self, *args, **kwargs):
return
def step(self):
self._global_step += 1
def global_step(self):
return self._global_step
def log_scaler_dict(self, log_dict, prefix=''):
if (self._logger is None):
return
if prefix:
prefix = f'{prefix}_'
for (name, value) in log_dict.items():
if isinstance(value, dict):
self.log_scaler_dict(value, self._global_step, prefix=f'{prefix}{name}')
else:
self._logger.add_scalar(f'{prefix}{name}', value, self._global_step)
def __getattr__(self, name):
if (self._logger is None):
return self.noop
return self._logger.__getattribute__(name) |
class BaseTask():
def __init__(self, work_root: Optional[Union[(str, Path)]], data: dict, model_builder: Callable, train_builder: Callable, evaluator: BaseEvaluator, device: torch.device, structure_builder: Optional[Callable]=None, study_name: Optional[str]=None, overwrite: bool=True):
self.data = data
self.model_builder = model_builder
self.train_builder = train_builder
self.structure_builder = structure_builder
self.evaluator = evaluator
self.device = device
self.study = None
if (study_name is None):
self.study_name = time.strftime('%Y-%m-%d--%H-%M-%S', time.localtime())
else:
self.study_name = study_name
work_root = Path(work_root)
self.study_root = (work_root / self.study_name)
if (overwrite and self.study_root.exists()):
shutil.rmtree(self.study_root)
self.log_file = (self.study_root / 'log.txt')
self.cache_root = (self.study_root / 'cache')
if (not work_root.exists()):
if work_root.parent.exists():
work_root.mkdir(exist_ok=True)
else:
raise ValueError(f'The work_root {work_root} does not exist.')
self.study_root.mkdir(exist_ok=True)
self.cache_root.mkdir(exist_ok=True)
self.logger = optuna.logging.get_logger('optuna')
self.logger.setLevel(logging.INFO)
out_file_handler = logging.FileHandler(self.log_file, mode='a', encoding='utf8')
out_file_handler.setFormatter(default_log_formatter())
self.logger.addHandler(out_file_handler)
self.logger.info(f'Logs will be saved to {self.log_file.absolute()}')
self.logger.info(f'Files in training will be saved in {self.study_root.absolute()}')
def experiment(self, trial: optuna.Trial):
if (self.structure_builder is not None):
self.data['structure'] = self.structure_builder(trial).to(self.device)
model = self.model_builder(trial).to(self.device)
train_configs: dict = self.train_builder(trial, model)
assert ('optimizer' in train_configs.keys())
optimizer = train_configs['optimizer']
assert ('criterion' in train_configs.keys())
criterion = train_configs['criterion']
scheduler = train_configs.get('scheduler', None)
best_model = None
if (self.direction == 'maximize'):
best_score = (- float('inf'))
else:
best_score = float('inf')
for epoch in range(self.max_epoch):
self.train(self.data, model, optimizer, criterion)
val_res = self.validate(self.data, model)
trial.report(val_res, epoch)
if trial.should_prune():
raise optuna.exceptions.TrialPruned()
if (scheduler is not None):
scheduler.step()
if (self.direction == 'maximize'):
if (val_res > best_score):
best_score = val_res
best_model = deepcopy(model)
with open((self.cache_root / f'{trial.number}_model.pth'), 'wb') as f:
torch.save(best_model.cpu().state_dict(), f)
self.data['structure'].save((self.cache_root / f'{trial.number}_structure.dhg'))
return best_score
def _remove_cached_data(self):
if (self.study is not None):
for filename in self.cache_root.glob('*'):
if (filename.stem.split('_')[0] != str(self.study.best_trial.number)):
filename.unlink()
def run(self, max_epoch: int, num_trials: int=1, direction: str='maximize'):
self.logger.info(f'Random seed is {dhg.random.seed()}')
sampler = TPESampler(seed=dhg.random.seed())
(self.max_epoch, self.direction) = (max_epoch, direction)
self.study = optuna.create_study(direction=direction, sampler=sampler)
self.study.optimize(self.experiment, n_trials=num_trials, timeout=600)
self._remove_cached_data()
self.best_model = self.model_builder(self.study.best_trial)
self.best_model.load_state_dict(torch.load(f'{self.cache_root}/{self.study.best_trial.number}_model.pth'))
self.best_structure = load_structure(f'{self.cache_root}/{self.study.best_trial.number}_structure.dhg')
self.best_model = self.best_model.to(self.device)
self.best_structure = self.best_structure.to(self.device)
self.logger.info('Best trial:')
self.best_trial = self.study.best_trial
self.logger.info(f' Value: {self.best_trial.value:.3f}')
self.logger.info(f' Params:')
for (key, value) in self.best_trial.params.items():
self.logger.info(f' {key} |-> {value}')
test_res = self.test()
self.logger.info(f'Final test results:')
for (key, value) in test_res.items():
self.logger.info(f' {key} |-> {value:.3f}')
def train(self, data: dict, model: nn.Module, optimizer: torch.optim.Optimizer, criterion: nn.Module):
_grad()
def validate(self, data: dict, model: nn.Module):
_grad()
def test(self, data: Optional[dict]=None, model: Optional[nn.Module]=None): |
def get_data_layer(roidb, num_classes):
if cfg.TRAIN.HAS_RPN:
if cfg.IS_MULTISCALE:
raise 'Calling caffe modules...'
else:
layer = RoIDataLayer(roidb, num_classes)
else:
layer = RoIDataLayer(roidb, num_classes)
return layer |
def dot(l1=[], l2=[]):
sum1 = 0
for i in range(0, len(l1)):
sum1 = ((l1[i] * l2[i]) + sum1)
return sum1 |
def resnet34(num_classes=1000, pretrained='imagenet'):
model = models.resnet34(pretrained=False)
if (pretrained is not None):
settings = pretrained_settings['resnet34'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model |
class DependenceData(object):
def __init__(self, module='', initialized_list=[], name_list=[]):
self.module = module
self.name_list = name_list
self.initialized_list = initialized_list |
def get_feature_iterator(feature_type, checkpoint_path, layer, manifest_path, sample_pct):
feature_reader_cls = get_feature_reader(feature_type)
with open(manifest_path, 'r') as fp:
lines = fp.read().split('\n')
root = lines.pop(0).strip()
file_path_list = [os.path.join(root, line.split('\t')[0]) for line in lines if (len(line) > 0)]
if (sample_pct < 1.0):
file_path_list = random.sample(file_path_list, int((sample_pct * len(file_path_list))))
num_files = len(file_path_list)
reader = feature_reader_cls(checkpoint_path=checkpoint_path, layer=layer)
def iterate():
for file_path in file_path_list:
feats = reader.get_feats(file_path)
(yield feats.cpu().numpy())
return (iterate, num_files) |
def run(args, error_queue):
try:
single_process_main(args)
except KeyboardInterrupt:
pass
except Exception:
import traceback
error_queue.put((args.rank, traceback.format_exc())) |
def triangle(x, loc=0, size=0.5, area=1):
return (0 if (abs(((x - loc) / size)) > 1) else ((1 - abs(((x - loc) / size))) * abs((area / size)))) |
def resume_checkpoint(model, optimizer, checkpoint_filename, opt, map_location='cpu'):
logging.info(('resuming from ' + checkpoint_filename))
checkpoint = torch.load(checkpoint_filename, map_location=map_location)
assert ('state_dict' in checkpoint)
state_dict = checkpoint['state_dict']
model.load_state_dict(state_dict, strict=True)
optimizer.load_state_dict(checkpoint['optimizer'])
opt.start_epoch = (checkpoint['epoch'] + 1)
training_status_info = checkpoint['training_status_info']
return (model, optimizer, training_status_info, opt) |
class GridWorldEnv(Env):
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
STAY = 4
CONTROL_NAMES = ['UP', 'RIGHT', 'DOWN', 'LEFT', 'STAY']
def __init__(self, shape=[2, 2], init_state=None):
self.shape = shape
self.n_states = np.prod(shape)
self.n_observations = self.n_states
self.n_control = 5
self.max_y = shape[0]
self.max_x = shape[1]
self._build()
self.set_init_state(init_state)
self.last_action = None
def reset(self, init_state=None):
self.set_init_state(init_state)
self.last_action = None
return self.state
def set_state(self, state):
self.state = state
return state
def step(self, action):
state = self.P[self.state][action]
self.state = state
self.last_action = action
return state
def render(self, title=None):
values = np.zeros(self.shape)
values[self.position] = 1.0
(_, ax) = plt.subplots(figsize=(3, 3))
if ((self.shape[0] == 1) or (self.shape[1] == 1)):
ax.imshow(values, cmap='OrRd')
else:
_ = sns.heatmap(values, cmap='OrRd', linewidth=2.5, cbar=False, ax=ax)
plt.xticks(range(self.shape[1]))
plt.yticks(range(self.shape[0]))
if (title != None):
plt.title(title)
plt.show()
def set_init_state(self, init_state=None):
if (init_state != None):
if ((init_state > (self.n_states - 1)) or (init_state < 0)):
raise ValueError('`init_state` is greater than number of states')
if (not isinstance(init_state, (int, float))):
raise ValueError('`init_state` must be [int/float]')
self.init_state = int(init_state)
else:
self.init_state = np.random.randint(0, self.n_states)
self.state = self.init_state
def _build(self):
P = {}
grid = np.arange(self.n_states).reshape(self.shape)
it = np.nditer(grid, flags=['multi_index'])
while (not it.finished):
s = it.iterindex
(y, x) = it.multi_index
P[s] = {a: [] for a in range(self.n_control)}
next_up = (s if (y == 0) else (s - self.max_x))
next_right = (s if (x == (self.max_x - 1)) else (s + 1))
next_down = (s if (y == (self.max_y - 1)) else (s + self.max_x))
next_left = (s if (x == 0) else (s - 1))
next_stay = s
P[s][self.UP] = next_up
P[s][self.RIGHT] = next_right
P[s][self.DOWN] = next_down
P[s][self.LEFT] = next_left
P[s][self.STAY] = next_stay
it.iternext()
self.P = P
def get_init_state_dist(self, init_state=None):
init_state_dist = np.zeros(self.n_states)
if (init_state == None):
init_state_dist[self.init_state] = 1.0
else:
init_state_dist[init_state] = 1.0
def get_transition_dist(self):
B = np.zeros([self.n_states, self.n_states, self.n_control])
for s in range(self.n_states):
for a in range(self.n_control):
ns = int(self.P[s][a])
B[(ns, s, a)] = 1
return B
def get_likelihood_dist(self):
A = np.eye(self.n_observations, self.n_states)
return A
def sample_action(self):
return np.random.randint(self.n_control)
def position(self):
return np.unravel_index(np.array(self.state), self.shape) |
class _ConvNdKernel(Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, bias):
super(_ConvNdKernel, self).__init__()
if ((in_channels % groups) != 0):
raise ValueError('in_channels must be divisible by groups')
if ((out_channels % groups) != 0):
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = (1.0 / math.sqrt(n))
if (self.bias is not None):
self.bias.data.uniform_((- stdv), stdv)
def __repr__(self):
s = '{name}({in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}'
if (self.padding != ((0,) * len(self.padding))):
s += ', padding={padding}'
if (self.dilation != ((1,) * len(self.dilation))):
s += ', dilation={dilation}'
if (self.output_padding != ((0,) * len(self.output_padding))):
s += ', output_padding={output_padding}'
if (self.groups != 1):
s += ', groups={groups}'
if (self.bias is None):
s += ', bias=False'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__) |
def data_collator(features: list) -> dict:
len_ids = [len(feature['input_ids']) for feature in features]
longest = max(len_ids)
input_ids = []
labels_list = []
for (ids_l, feature) in sorted(zip(len_ids, features), key=(lambda x: (- x[0]))):
ids = feature['input_ids']
seq_len = feature['seq_len']
labels = ((([(- 100)] * (seq_len - 1)) + ids[(seq_len - 1):]) + ([(- 100)] * (longest - ids_l)))
ids = (ids + ([tokenizer.pad_token_id] * (longest - ids_l)))
_ids = torch.LongTensor(ids)
labels_list.append(torch.LongTensor(labels))
input_ids.append(_ids)
input_ids = torch.stack(input_ids)
labels = torch.stack(labels_list)
return {'input_ids': input_ids, 'labels': labels} |
def test_actionAngle_input_wrongunits():
from galpy.actionAngle import actionAngleSpherical
from galpy.potential import PlummerPotential
pot = PlummerPotential(normalize=1.0, b=0.7)
aA = actionAngleSpherical(pot=pot, ro=8.0, vo=220.0)
with pytest.raises(units.UnitConversionError) as excinfo:
aA((1.0 * units.Gyr), ((0.1 * units.km) / units.s), ((1.1 * units.km) / units.s), (0.1 * units.kpc), ((0.2 * units.km) / units.s), (0.1 * units.rad))
with pytest.raises(units.UnitConversionError) as excinfo:
aA((1.0 * units.kpc), (0.1 * units.Gyr), ((1.1 * units.km) / units.s), (0.1 * units.kpc), ((0.2 * units.km) / units.s), (0.1 * units.rad))
return None |
class Data():
def __init__(self, data_dir='data/FB15k-237/', reverse=False):
self.train_data = self.load_data(data_dir, 'train', reverse=reverse)
self.valid_data = self.load_data(data_dir, 'valid', reverse=reverse)
self.test_data = self.load_data(data_dir, 'test', reverse=reverse)
self.data = ((self.train_data + self.valid_data) + self.test_data)
self.entities = self.get_entities(self.data)
self.train_relations = self.get_relations(self.train_data)
self.valid_relations = self.get_relations(self.valid_data)
self.test_relations = self.get_relations(self.test_data)
self.relations = ((self.train_relations + [i for i in self.valid_relations if (i not in self.train_relations)]) + [i for i in self.test_relations if (i not in self.train_relations)])
def load_data(self, data_dir, data_type='train', reverse=False):
with open(('%s%s.txt' % (data_dir, data_type)), 'r') as f:
data = f.read().strip().split('\n')
data = [i.split() for i in data]
if reverse:
data += [[i[2], (i[1] + '_reverse'), i[0]] for i in data]
return data
def get_relations(self, data):
relations = sorted(list(set([d[1] for d in data])))
return relations
def get_entities(self, data):
entities = sorted(list(set(([d[0] for d in data] + [d[2] for d in data]))))
return entities |
class DatabaseGenerator(Generator):
def __init__(self, database: chex.Array):
self._boards = jnp.asarray(database)
def __call__(self, key: chex.PRNGKey) -> State:
(key, idx_key) = jax.random.split(key)
idx = jax.random.randint(idx_key, shape=(), minval=0, maxval=self._boards.shape[0])
board = self._boards.take(idx, axis=0)
board = (jnp.asarray(board, dtype=jnp.int32) - 1)
action_mask = get_action_mask(board)
return State(board=board, action_mask=action_mask, key=key) |
def train_scan(scan_net, inference_vectorizer, train_Xy, val_Xy, epochs=1, batch_size=1, patience=3):
(train_Xy, val_Xy) = (train_reformat(train_Xy), scan_reform(val_Xy))
optimizer = optim.Adam(scan_net.parameters())
criterion = nn.BCELoss(reduction='sum')
total_epoch_loss = []
for epoch in range(epochs):
if early_stopping(total_epoch_loss, patience):
break
epoch_loss = 0
epoch_samples = sample_train(train_Xy)
for i in range(0, len(epoch_samples), batch_size):
instances = epoch_samples[i:(i + batch_size)]
ys = torch.FloatTensor([inst['y'] for inst in instances])
unk_idx = int(inference_vectorizer.str_to_idx[SimpleInferenceVectorizer.PAD])
sentences = [torch.LongTensor(inst['sentence_span']) for inst in instances]
I = [torch.LongTensor(inst['I']) for inst in instances]
C = [torch.LongTensor(inst['C']) for inst in instances]
O = [torch.LongTensor(inst['O']) for inst in instances]
(sens, I, C, O) = [PaddedSequence.autopad(to_enc, batch_first=True, padding_value=unk_idx) for to_enc in [sentences, I, C, O]]
optimizer.zero_grad()
if USE_CUDA:
sens = sens.cuda()
I = I.cuda()
C = C.cuda()
O = O.cuda()
ys = ys.cuda()
tags = scan_net(sens, I, C, O)
loss = criterion(tags, ys)
if (loss.item() != loss.item()):
import pdb
pdb.set_trace()
epoch_loss += loss.item()
loss.backward()
optimizer.step()
with torch.no_grad():
y_true = torch.FloatTensor([inst['y'] for inst in val_Xy])
instances = val_Xy
if USE_CUDA:
y_true = y_true.cuda()
unk_idx = int(inference_vectorizer.str_to_idx[SimpleInferenceVectorizer.PAD])
y_preds = []
for i in range(0, len(instances), batch_size):
batch_instances = instances[i:(i + batch_size)]
sentences = [torch.LongTensor(inst['sentence_span']) for inst in batch_instances]
I = [torch.LongTensor(inst['I']) for inst in batch_instances]
C = [torch.LongTensor(inst['C']) for inst in batch_instances]
O = [torch.LongTensor(inst['O']) for inst in batch_instances]
(sens, I, C, O) = [PaddedSequence.autopad(to_enc, batch_first=True, padding_value=unk_idx) for to_enc in [sentences, I, C, O]]
if USE_CUDA:
sens = sens.cuda()
I = I.cuda()
C = C.cuda()
O = O.cuda()
y_val_preds = scan_net(sens, I, C, O)
for p in y_val_preds:
y_preds.append(p)
y_preds = torch.FloatTensor(y_preds).cuda()
val_loss = criterion(y_preds, y_true)
y_bin = [(1 if (y > 0.5) else 0) for y in y_preds]
y_true = y_true.cpu()
acc = accuracy_score(y_true, y_bin)
f1 = f1_score(y_true, y_bin)
prc = precision_score(y_true, y_bin)
rc = recall_score(y_true, y_bin)
y_true = y_true.cuda()
print('epoch {}. train loss: {}; val loss: {}; val acc: {:.3f}; val f1: {:.3f}; val precision: {:.3f}; val recall: {:.3f}'.format(epoch, epoch_loss, val_loss, acc, f1, prc, rc))
total_epoch_loss.append(val_loss)
return scan_net |
def clean_hparams_dict(hparams_dict):
return {key: val for (key, val) in hparams_dict.items() if val} |
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [path for path in content if ((_re_checkpoint.search(path) is not None) and os.path.isdir(os.path.join(folder, path)))]
if (len(checkpoints) == 0):
return
return os.path.join(folder, max(checkpoints, key=(lambda x: int(_re_checkpoint.search(x).groups()[0])))) |
.vcr()
def test_extract_extract_references_from_url(app_client):
journal_kb_data = {'COMMUNICATIONS IN ASTEROSEISMOLOGY': 'Commun.Asteros.', 'PHYS REV': 'Phys.Rev.', 'PHYSICAL REVIEW': 'Phys.Rev.', 'PHYS REV LETT': 'Phys.Rev.Lett.', 'JINST': 'JINST', 'JOURNAL OF INSTRUMENTATION': 'JINST', 'SENS ACTUATORS B': 'Sens.Actuators B', 'SENSORS AND ACTUATORS B: CHEMICAL': 'Sens.Actuators B', 'PHYS SCRIPTA': 'Phys.Scripta', 'PHYSICA SCRIPTA': 'Phys.Scripta', 'BULL CALCUTTA MATH SOC': 'Bull.Calcutta Math.Soc.', 'BULLETIN OF THE CALCUTTA MATHEMATICAL SOCIETY': 'Bull.Calcutta Math.Soc.', 'QUANTUM MACHINE INTELLIGENCE': 'Quantum Machine Intelligence'}
headers = {'content-type': 'application/json'}
url = '
payload = {'url': url, 'journal_kb_data': journal_kb_data}
response = app_client.post('/extract_references_from_url', headers=headers, data=json.dumps(payload))
assert (response.status_code == 200)
assert ('extracted_references' in response.json)
assert (len(response.json['extracted_references']) == 2) |
_model
def hrnet_w18(pretrained=True, **kwargs):
return _create_model('hrnet_w18', pretrained, kwargs) |
class InvertedResidual(nn.Module):
def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, se_ratio=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, conv_kwargs=None, drop_path_rate=0.0):
super(InvertedResidual, self).__init__()
norm_kwargs = (norm_kwargs or {})
conv_kwargs = (conv_kwargs or {})
mid_chs = make_divisible((in_chs * exp_ratio))
has_se = ((se_ratio is not None) and (se_ratio > 0.0))
self.has_residual = (((in_chs == out_chs) and (stride == 1)) and (not noskip))
self.drop_path_rate = drop_path_rate
self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs)
self.bn1 = norm_layer(mid_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
self.conv_dw = create_conv2d(mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True, **conv_kwargs)
self.bn2 = norm_layer(mid_chs, **norm_kwargs)
self.act2 = act_layer(inplace=True)
if has_se:
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs)
else:
self.se = None
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs)
self.bn3 = norm_layer(out_chs, **norm_kwargs)
def feature_info(self, location):
if (location == 'expansion'):
info = dict(module='act1', hook_type='forward', num_chs=self.conv_pw.in_channels)
elif (location == 'depthwise'):
info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else:
info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels)
return info
def forward(self, x):
residual = x
x = self.conv_pw(x)
x = self.bn1(x)
x = self.act1(x)
x = self.conv_dw(x)
x = self.bn2(x)
x = self.act2(x)
if (self.se is not None):
x = self.se(x)
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_residual:
if (self.drop_path_rate > 0.0):
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x |
class DMFNet(MFNet):
def __init__(self, in_channels, num_classes, n=32, channels=128, groups=16, norm='bn'):
super(DMFNet, self).__init__(in_channels, num_classes, n, channels, groups, norm)
self.encoder_block2 = nn.Sequential(DMFUnit(n, channels, g=groups, stride=2, norm=norm, dilation=[1, 2, 3]), DMFUnit(channels, channels, g=groups, stride=1, norm=norm, dilation=[1, 2, 3]), DMFUnit(channels, channels, g=groups, stride=1, norm=norm, dilation=[1, 2, 3]))
self.encoder_block3 = nn.Sequential(DMFUnit(channels, (channels * 2), g=groups, stride=2, norm=norm, dilation=[1, 2, 3]), DMFUnit((channels * 2), (channels * 2), g=groups, stride=1, norm=norm, dilation=[1, 2, 3]), DMFUnit((channels * 2), (channels * 2), g=groups, stride=1, norm=norm, dilation=[1, 2, 3])) |
def new_ps_resource_optimizer(optimize_mode: str, job_uuid, resoure_limits: ResourceLimits):
logger.info('New %s resource optimizer for job %s', optimize_mode, job_uuid)
if (optimize_mode == OptimizeMode.CLUSTER):
if GlobalBrainClient.BRAIN_CLIENT.available():
return BrainResoureOptimizer(job_uuid, resoure_limits)
else:
logger.warning('Brain service is not available, use a local optimizer')
return PSLocalOptimizer(job_uuid, resoure_limits)
elif (optimize_mode == OptimizeMode.SINGLE_JOB):
return PSLocalOptimizer(job_uuid, resoure_limits)
else:
logger.warning('Not support optiimzem mode %s, use a simple optimizer', optimize_mode)
return SimpleOptimizer(job_uuid, resoure_limits) |
_registry(pattern_type='ReshapeBeforeRestoreHiddenStates')
class ReshapeBeforeRestoreHiddenStates(Pattern):
def __call__(self, model):
pattern_mapping_config = {'ReshapeBeforeRestoreHiddenStates': [{'patterns': {'in': [[(0, 'LayerNorm'), (1, 'ScatterElements')]], 'out': [[(0, 'LayerNorm'), (1, 'Reshape'), (2, 'ScatterElements')]]}, 'search_mode': 'op_type', 'node_names': {0: 0, 1: 'reshape_to_3d_after_layer_norm_in_restoration', 2: 1}, 'input_tensors': {0: [[{0: [0]}, {0: [1]}, {0: [2]}], [[0, 1, 2], 3]], 1: [[{'input_data': [0]}], [[1], 2]], 2: [[{1: [0]}, {1: [1]}], [[0, 1], 3]]}, 'output_tensors': {0: [[{0: [0]}], [[0], 1]], 1: [[], [[], 1]], 2: [[{1: [0]}], [[0], 1]]}, 'returns': [0, 1]}]}
def _set_attr(ln_attr, se_attr, hidden_size, node_names, model):
attr1 = OrderedDict()
attr1['dst_shape'] = ('-1,-1,' + str(hidden_size))
attr1['dims'] = 0
ln_node_idx = model.get_node_id(node_names[0])
model.nodes[ln_node_idx].attr = ln_attr
reshape_3d_node_idx = model.get_node_id(node_names[1])
model.nodes[reshape_3d_node_idx].attr = attr1
scatter_elements_node_idx = model.get_node_id(node_names[2])
model.nodes[scatter_elements_node_idx].attr = se_attr
layer_norm_idx = []
remove_list = []
pattern = pattern_mapping_config['ReshapeBeforeRestoreHiddenStates'][0]['patterns']['in']
patterns_nodes_name = util.search_pattern(pattern, model)
for pattern_nodes_name in patterns_nodes_name:
layer_norm_idx.append(model.get_node_id(pattern_nodes_name[0]))
pattern_dict = pattern_mapping_config['ReshapeBeforeRestoreHiddenStates'][0]
(model, new_node_names, ret_old_nodes) = util.pattern_mapping('ReshapeBeforeRestoreHiddenStates', pattern_dict, model)
if (len(new_node_names) != 0):
for i in range(len(new_node_names)):
hidden_size = int(ret_old_nodes[i][0].input_tensors[(- 1)].shape[0])
ln_attr = ret_old_nodes[i][0].attr
se_attr = ret_old_nodes[i][1].attr
_set_attr(ln_attr, se_attr, hidden_size, new_node_names[i], model)
import copy
ln_node = copy.deepcopy(model.get_node_by_name(new_node_names[i][0]))
model.remove_nodes([new_node_names[i][0]])
model.insert_nodes((layer_norm_idx[i] + i), [ln_node])
remove_list.append(new_node_names[i][0])
return model
return model |
def double_double_laurent_cascade_step(dim, embsys, esols, tasks=0):
from phcpy.phcpy2c3 import py2c_copy_dobldobl_Laurent_container_to_start_system
from phcpy.phcpy2c3 import py2c_copy_dobldobl_container_to_start_solutions
from phcpy.phcpy2c3 import py2c_dobldobl_Laurent_cascade_homotopy
from phcpy.phcpy2c3 import py2c_solve_by_dobldobl_Laurent_homotopy_continuation
from phcpy.phcpy2c3 import py2c_solcon_clear_dobldobl_solutions
from phcpy.phcpy2c3 import py2c_copy_dobldobl_target_solutions_to_container
from phcpy.interface import store_dobldobl_laurent_witness_set
from phcpy.interface import load_dobldobl_solutions
store_dobldobl_laurent_witness_set(len(embsys), dim, embsys, esols)
py2c_copy_dobldobl_Laurent_container_to_start_system()
py2c_copy_dobldobl_container_to_start_solutions()
py2c_dobldobl_Laurent_cascade_homotopy()
py2c_solve_by_dobldobl_Laurent_homotopy_continuation(tasks)
py2c_solcon_clear_dobldobl_solutions()
py2c_copy_dobldobl_target_solutions_to_container()
return load_dobldobl_solutions() |
def set_optimizer(cN, lrate_in, minibatch_multiplier, lazy_regularization=True, clip=None):
args = dict(cN.opt_args)
args['minibatch_multiplier'] = minibatch_multiplier
args['learning_rate'] = lrate_in
if lazy_regularization:
mb_ratio = (cN.reg_interval / (cN.reg_interval + 1))
args['learning_rate'] *= mb_ratio
if ('beta1' in args):
args['beta1'] **= mb_ratio
if ('beta2' in args):
args['beta2'] **= mb_ratio
cN.opt = tflib.Optimizer(name='Loss{}'.format(cN.name), clip=clip, **args)
cN.reg_opt = tflib.Optimizer(name='Reg{}'.format(cN.name), share=cN.opt, clip=clip, **args) |
class ChatGLMConfig(PretrainedConfig):
model_type = 'chatglm'
def __init__(self, vocab_size=150528, hidden_size=4096, num_layers=28, num_attention_heads=32, layernorm_epsilon=1e-05, use_cache=False, bos_token_id=150004, eos_token_id=150005, mask_token_id=150000, gmask_token_id=150001, pad_token_id=0, max_sequence_length=2048, inner_hidden_size=16384, position_encoding_2d=True, quantization_bit=0, pre_seq_len=None, prefix_projection=False, **kwargs):
self.num_layers = num_layers
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.max_sequence_length = max_sequence_length
self.layernorm_epsilon = layernorm_epsilon
self.inner_hidden_size = inner_hidden_size
self.use_cache = use_cache
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.mask_token_id = mask_token_id
self.gmask_token_id = gmask_token_id
self.position_encoding_2d = position_encoding_2d
self.quantization_bit = quantization_bit
self.pre_seq_len = pre_seq_len
self.prefix_projection = prefix_projection
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) |
def annotations_to_jsonl(annotations, output_file):
with open(output_file, 'w') as of:
for ann in sorted(annotations, key=(lambda x: x.annotation_id)):
as_json = _annotation_to_dict(ann)
as_str = json.dumps(as_json, sort_keys=True)
of.write(as_str)
of.write('\n') |
class RandomGrayscale(object):
def __init__(self, p=0.1):
self.p = p
self.tv_F = tv_t.Grayscale(self.size, self.vertical_flip)
self.cv_F = cv_t.Grayscale(self.size, self.vertical_flip)
def __call__(self, img):
if (type(img) == np.ndarray):
return self.cv_F.__call__(img)
else:
return self.tv_F.__call__(img)
def __repr__(self):
return (self.__class__.__name__ + '(p={})'.format(self.p)) |
def DenseNet201(Num_classes=10):
return DenseNet(Bottleneck, [6, 12, 48, 32], growth_rate=32, num_classes=Num_classes) |
class LayoutLMModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def save_zip(url, loc):
if (not os.path.exists(loc)):
urllib.request.urlretrieve(url, loc) |
def is_plugin_enabled(plugin_name):
if ((plugin_name in plugins) and plugins[plugin_name]['enable']):
return True
return False |
def open_image(path: str) -> 'JpegImageFile':
from PIL import Image
if path.startswith('hdfs'):
import pyarrow as pa
fs = pa.hdfs.connect()
with fs.open(path, 'rb') as f:
image = Image.open(f)
image.load()
return image
elif path.startswith('s3'):
access_key_id = os.environ['AWS_ACCESS_KEY_ID']
secret_access_key = os.environ['AWS_SECRET_ACCESS_KEY']
import boto3
from io import BytesIO
s3_client = boto3.Session(aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key).client('s3')
path_parts = path.split('://')[1].split('/')
bucket = path_parts.pop(0)
key = '/'.join(path_parts)
data = s3_client.get_object(Bucket=bucket, Key=key)
with BytesIO(data['Body'].read()) as f:
return Image.open(f)
else:
if path.startswith('file://'):
path = path[len('file://'):]
with open(path, 'rb') as f:
image = Image.open(f)
image.load()
return image |
def init_logger(log_file=None):
log_format = logging.Formatter('[%(levelname)s] %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_format)
logger.handlers = [console_handler]
if (log_file and (log_file != '')):
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(log_format)
logger.addHandler(file_handler)
return logger |
class CodeDataset(FairseqDataset):
def __init__(self, manifest, dictionary, dur_dictionary, f0_dictionary, config, discrete_dur, discrete_f0, log_f0, normalize_f0_mean, normalize_f0_std, interpolate_f0, return_filename=False, strip_filename=True, shifts='0,0', return_continuous_f0=False):
random.seed(1234)
self.dictionary = dictionary
self.dur_dictionary = dur_dictionary
self.f0_dictionary = f0_dictionary
self.config = config
self.discrete_dur = discrete_dur
self.discrete_f0 = discrete_f0
self.log_f0 = log_f0
self.normalize_f0_mean = normalize_f0_mean
self.normalize_f0_std = normalize_f0_std
self.interpolate_f0 = interpolate_f0
self.return_filename = return_filename
self.strip_filename = strip_filename
self.f0_code_ratio = (config.code_hop_size / (config.sampling_rate * F0_FRAME_SPACE))
self.manifest = manifest
self._codes = None
self._durs = None
self._f0s = None
with open(f'{manifest}.leng.txt', 'r') as f:
lengs = [int(line.rstrip()) for line in f]
edges = np.cumsum(([0] + lengs))
(self.starts, self.ends) = (edges[:(- 1)], edges[1:])
with open(f'{manifest}.path.txt', 'r') as f:
self.file_names = [line.rstrip() for line in f]
logger.info(f'num entries: {len(self.starts)}')
if os.path.exists(f'{manifest}.f0_stat.pt'):
self.f0_stats = torch.load(f'{manifest}.f0_stat.pt')
elif config.f0_stats:
self.f0_stats = torch.load(config.f0_stats)
self.multispkr = config.multispkr
if config.multispkr:
with open(f'{manifest}.speaker.txt', 'r') as f:
self.spkrs = [line.rstrip() for line in f]
self.id_to_spkr = sorted(self.spkrs)
self.spkr_to_id = {k: v for (v, k) in enumerate(self.id_to_spkr)}
self.pads = Paddings(dictionary.pad(), 0, (f0_dictionary.pad() if discrete_f0 else (- 5.0)))
self.shifts = Shifts(shifts, pads=self.pads)
self.return_continuous_f0 = return_continuous_f0
def get_data_handlers(self):
logging.info(f'loading data for {self.manifest}')
self._codes = np.load(f'{self.manifest}.code.npy', mmap_mode='r')
self._durs = np.load(f'{self.manifest}.dur.npy', mmap_mode='r')
if self.discrete_f0:
if (self.config.f0_vq_type == 'precomp'):
self._f0s = np.load(f'{self.manifest}.{self.config.f0_vq_name}.npy', mmap_mode='r')
elif (self.config.f0_vq_type == 'naive'):
self._f0s = np.load(f'{self.manifest}.f0.npy', mmap_mode='r')
quantizers_path = self.config.get_f0_vq_naive_quantizer(self.log_f0, self.normalize_f0_mean, self.normalize_f0_std)
quantizers = torch.load(quantizers_path)
n_units = self.config.f0_vq_n_units
self._f0_quantizer = torch.from_numpy(quantizers[n_units])
else:
raise ValueError(f'f0_vq_type {self.config.f0_vq_type} not supported')
else:
self._f0s = np.load(f'{self.manifest}.f0.npy', mmap_mode='r')
def preprocess_f0(self, f0, stats):
f0 = f0.clone()
if self.interpolate_f0:
f0 = interpolate_f0(f0)
mask = (f0 != 0)
if self.log_f0:
f0[mask] = f0[mask].log()
if self.normalize_f0_mean:
mean = (stats['logf0_mean'] if self.log_f0 else stats['f0_mean'])
f0[mask] = (f0[mask] - mean)
if self.normalize_f0_std:
std = (stats['logf0_std'] if self.log_f0 else stats['f0_std'])
f0[mask] = (f0[mask] / std)
return f0
def _get_raw_item(self, index):
(start, end) = (self.starts[index], self.ends[index])
if (self._codes is None):
self.get_data_handlers()
code = torch.from_numpy(np.array(self._codes[start:end])).long()
dur = torch.from_numpy(np.array(self._durs[start:end]))
f0 = torch.from_numpy(np.array(self._f0s[start:end]))
return (code, dur, f0)
def __getitem__(self, index):
(code, dur, f0) = self._get_raw_item(index)
code = torch.cat([code.new([self.dictionary.bos()]), code])
dur = torch.cat([dur.new([0]), dur])
if self.discrete_dur:
dur = self.dur_dictionary.encode_line(' '.join(map(str, dur.tolist())), append_eos=False).long()
else:
dur = dur.float()
raw_f0 = None
if self.discrete_f0:
if (self.config.f0_vq_type == 'precomp'):
f0 = self.f0_dictionary.encode_line(' '.join(map(str, f0.tolist())), append_eos=False).long()
else:
f0 = f0.float()
f0 = self.preprocess_f0(f0, self.f0_stats[self.spkrs[index]])
if self.return_continuous_f0:
raw_f0 = f0
raw_f0 = torch.cat([raw_f0.new([self.f0_dictionary.bos()]), raw_f0])
f0 = naive_quantize(f0, self._f0_quantizer)
f0 = torch.cat([f0.new([self.f0_dictionary.bos()]), f0])
else:
f0 = f0.float()
if self.multispkr:
f0 = self.preprocess_f0(f0, self.f0_stats[self.spkrs[index]])
else:
f0 = self.preprocess_f0(f0, self.f0_stats)
f0 = torch.cat([f0.new([0]), f0])
if (raw_f0 is not None):
(*_, raw_f0, raw_f0_mask) = self.shifts(code, dur, raw_f0)
else:
raw_f0_mask = None
(code, code_mask, dur, dur_mask, f0, f0_mask) = self.shifts(code, dur, f0)
if (raw_f0_mask is not None):
assert (raw_f0_mask == f0_mask).all()
feats = {'source': code[:(- 1)], 'target': code[1:], 'mask': code_mask[1:].logical_or(code_mask[:(- 1)]), 'dur_source': dur[:(- 1)], 'dur_target': dur[1:], 'dur_mask': dur_mask[1:].logical_or(dur_mask[:(- 1)]), 'f0_source': f0[:(- 1)], 'f0_target': f0[1:], 'f0_mask': f0_mask[1:].logical_or(f0_mask[:(- 1)])}
if (raw_f0 is not None):
feats['raw_f0'] = raw_f0[1:]
if self.return_filename:
fname = self.file_names[index]
feats['filename'] = (fname if (not self.strip_filename) else Path(fname).with_suffix('').name)
return feats
def __len__(self):
return len(self.starts)
def size(self, index):
return ((self.ends[index] - self.starts[index]) + self.shifts.extra_length)
def num_tokens(self, index):
return self.size(index)
def collater(self, samples):
(pad_idx, eos_idx) = (self.dictionary.pad(), self.dictionary.eos())
if (len(samples) == 0):
return {}
src_tokens = data_utils.collate_tokens([s['source'] for s in samples], pad_idx, eos_idx, left_pad=False)
tgt_tokens = data_utils.collate_tokens([s['target'] for s in samples], pad_idx=pad_idx, eos_idx=pad_idx, left_pad=False)
(src_durs, tgt_durs) = [data_utils.collate_tokens([s[k] for s in samples], pad_idx=self.pads.dur, eos_idx=self.pads.dur, left_pad=False) for k in ['dur_source', 'dur_target']]
(src_f0s, tgt_f0s) = [data_utils.collate_tokens([s[k] for s in samples], pad_idx=self.pads.f0, eos_idx=self.pads.f0, left_pad=False) for k in ['f0_source', 'f0_target']]
(mask, dur_mask, f0_mask) = [data_utils.collate_tokens([s[k] for s in samples], pad_idx=1, eos_idx=1, left_pad=False) for k in ['mask', 'dur_mask', 'f0_mask']]
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
n_tokens = sum((len(s['source']) for s in samples))
result = {'nsentences': len(samples), 'ntokens': n_tokens, 'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths, 'dur_src': src_durs, 'f0_src': src_f0s}, 'target': tgt_tokens, 'dur_target': tgt_durs, 'f0_target': tgt_f0s, 'mask': mask, 'dur_mask': dur_mask, 'f0_mask': f0_mask}
if ('filename' in samples[0]):
result['filename'] = [s['filename'] for s in samples]
if ('prefix' in samples[0]):
result['prefix'] = [s['prefix'] for s in samples]
if ('raw_f0' in samples[0]):
raw_f0s = data_utils.collate_tokens([s['raw_f0'] for s in samples], pad_idx=self.pads.f0, eos_idx=self.pads.f0, left_pad=False)
result['raw_f0'] = raw_f0s
return result |
def YOLO():
global metaMain, netMain, altNames
configPath = './cfg/yolov4.cfg'
weightPath = './yolov4.weights'
metaPath = './cfg/coco.data'
if (not os.path.exists(configPath)):
raise ValueError((('Invalid config path `' + os.path.abspath(configPath)) + '`'))
if (not os.path.exists(weightPath)):
raise ValueError((('Invalid weight path `' + os.path.abspath(weightPath)) + '`'))
if (not os.path.exists(metaPath)):
raise ValueError((('Invalid data file path `' + os.path.abspath(metaPath)) + '`'))
if (netMain is None):
netMain = darknet.load_net_custom(configPath.encode('ascii'), weightPath.encode('ascii'), 0, 1)
if (metaMain is None):
metaMain = darknet.load_meta(metaPath.encode('ascii'))
if (altNames is None):
try:
with open(metaPath) as metaFH:
metaContents = metaFH.read()
import re
match = re.search('names *= *(.*)$', metaContents, (re.IGNORECASE | re.MULTILINE))
if match:
result = match.group(1)
else:
result = None
try:
if os.path.exists(result):
with open(result) as namesFH:
namesList = namesFH.read().strip().split('\n')
altNames = [x.strip() for x in namesList]
except TypeError:
pass
except Exception:
pass
cap = cv2.VideoCapture('test.mp4')
cap.set(3, 1280)
cap.set(4, 720)
out = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc(*'MJPG'), 10.0, (darknet.network_width(netMain), darknet.network_height(netMain)))
print('Starting the YOLO loop...')
darknet_image = darknet.make_image(darknet.network_width(netMain), darknet.network_height(netMain), 3)
while True:
prev_time = time.time()
(ret, frame_read) = cap.read()
frame_rgb = cv2.cvtColor(frame_read, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (darknet.network_width(netMain), darknet.network_height(netMain)), interpolation=cv2.INTER_LINEAR)
darknet.copy_image_from_bytes(darknet_image, frame_resized.tobytes())
detections = darknet.detect_image(netMain, metaMain, darknet_image, thresh=0.25)
image = cvDrawBoxes(detections, frame_resized)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
print((1 / (time.time() - prev_time)))
cv2.imshow('Demo', image)
cv2.waitKey(3)
cap.release()
out.release() |
def standard_random_system(neq, nvr, nbrmon, deg, cff):
from phcpy.phcpy2c3 import py2c_syscon_random_system
from phcpy.interface import load_standard_system
py2c_syscon_random_system(nvr, nbrmon, deg, cff, neq)
return load_standard_system() |
def load_and_migrate_checkpoint(ckpt_path):
checkpoint = torch.load(ckpt_path, map_location='cpu')
migrated_state_dict = {}
for (key, value) in checkpoint['state_dict'].items():
key = key.replace('joint_net', 'joint.net')
migrated_state_dict[key] = value
del migrated_state_dict['audio_preprocessor.featurizer.fb']
del migrated_state_dict['audio_preprocessor.featurizer.window']
return migrated_state_dict |
def format_dataset_name(dataset_name):
if (dataset_name == 'cos_e'):
return 'cos_e/v1.11'
elif (dataset_name == 'wiki_hop'):
return 'wiki_hop/original'
elif (dataset_name == 'paws'):
return 'paws/labeled_final'
elif (dataset_name == 'glue_qqp'):
return 'glue/qqp'
elif (dataset_name == 'glue_mrpc'):
return 'glue/mrpc'
elif (dataset_name == 'adversarial_qa'):
return 'adversarial_qa/adversarialQA'
elif (dataset_name == 'duorc'):
return 'duorc/ParaphraseRC'
elif (dataset_name == 'hotpot_qa'):
return 'hotpot_qa/fullwiki'
elif (dataset_name == 'cnn_dailymail'):
return 'cnn_dailymail/3.0.0'
elif (dataset_name == 'story_cloze'):
return 'story_cloze/2016'
elif ((dataset_name == 'anli_r1') or (dataset_name == 'anli_r2') or (dataset_name == 'anli_r3')):
return 'anli'
elif (dataset_name == 'super_glue_copa'):
return 'super_glue/copa'
elif (dataset_name == 'super_glue_cb'):
return 'super_glue/cb'
elif (dataset_name == 'super_glue_rte'):
return 'super_glue/rte'
elif (dataset_name == 'super_glue_wsc.fixed'):
return 'super_glue/wsc.fixed'
elif (dataset_name == 'super_glue_wic'):
return 'super_glue/wic'
elif (dataset_name == 'winogrande'):
return 'winogrande/winogrande_xl'
else:
return dataset_name |
def split_rnn_outputs(model, rnn_outputs):
if using_skip_rnn(model):
return (rnn_outputs.h, rnn_outputs.state_gate)
else:
return (rnn_outputs, tf.no_op()) |
class UpBlockTemporalDecoder(nn.Module):
def __init__(self, in_channels: int, out_channels: int, num_layers: int=1, add_upsample: bool=True):
super().__init__()
resnets = []
for i in range(num_layers):
input_channels = (in_channels if (i == 0) else out_channels)
resnets.append(SpatioTemporalResBlock(in_channels=input_channels, out_channels=out_channels, temb_channels=None, eps=1e-06, temporal_eps=1e-05, merge_factor=0.0, merge_strategy='learned', switch_spatial_to_temporal_mix=True))
self.resnets = nn.ModuleList(resnets)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
def forward(self, hidden_states: torch.FloatTensor, image_only_indicator: torch.FloatTensor) -> torch.FloatTensor:
for resnet in self.resnets:
hidden_states = resnet(hidden_states, image_only_indicator=image_only_indicator)
if (self.upsamplers is not None):
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states)
return hidden_states |
def get_log_path(model_save_dir, args):
mkdir(model_save_dir)
if (args['task_name'] == 'qnli'):
output_result_path = (model_save_dir + '/training_logs_reduced_qnli_{0}_{1}'.format(str(args['lr']), str((args['per_device_train_batch_size'] * len(args['device'])))))
elif (args['task_name'] == 'cola'):
output_result_path = (model_save_dir + '/training_logs_reduced_cola_{0}_{1}'.format(str(args['lr']), str((args['per_device_train_batch_size'] * len(args['device'])))))
else:
output_result_path = (model_save_dir + '/training_logs_{0}_{1}'.format(str(args['lr']), str((args['per_device_train_batch_size'] * len(args['device'])))))
if args['finetune_from_existing_lp_results']:
output_result_path = (output_result_path + '_lpthenft')
elif args['linear_probing']:
output_result_path = (output_result_path + '_lp')
elif args['ftall']:
output_result_path = (output_result_path + '_ftall')
elif args['continue_lp_from_existing_lp_checkpoints']:
output_result_path = (output_result_path + '_continue_lp')
else:
raise ValueError('Fintuning method not specified, you need to select one of the finetuing method to be True')
if args['test']:
output_result_path = (output_result_path + '_test.json')
else:
output_result_path = (output_result_path + '_full.json')
return output_result_path |
class CameraClient():
def __init__(self, port):
self.conn = Client(('localhost', port), authkey=CONN_AUTHKEY)
self.conn.send(None)
data = self.conn.recv()
remove_shm_from_resource_tracker()
self.shm = shared_memory.SharedMemory(name=data['name'])
self.image = np.ndarray(data['shape'], dtype=data['dtype'], buffer=self.shm.buf)
self.conn.send(None)
def get_image(self):
self.conn.recv()
self.conn.send(None)
return self.image
def close(self):
self.shm.close() |
def get_block_fun(block_type):
block_funs = {'vanilla_block': VanillaBlock, 'res_basic_block': ResBasicBlock, 'res_bottleneck_block': ResBottleneckBlock}
assert (block_type in block_funs.keys()), "Block type '{}' not supported".format(block_type)
return block_funs[block_type] |
class InferenceRunner(Callback):
IOTensor = namedtuple('IOTensor', ['index', 'isOutput'])
def __init__(self, ds, infs, inf_epochs, input_tensors=None):
assert isinstance(ds, DataFlow), ds
self.ds = ds
if (not isinstance(infs, list)):
self.infs = [infs]
else:
self.infs = infs
for v in self.infs:
assert isinstance(v, Inferencer), v
self.input_tensors = input_tensors
self.inf_epochs = inf_epochs
def _setup_graph(self):
self._find_input_tensors()
self._find_output_tensors()
self.pred_func = self.trainer.get_predict_func(self.input_tensors, self.output_tensors)
def _find_input_tensors(self):
if (self.input_tensors is None):
input_vars = self.trainer.model.get_reuse_placehdrs()
def get_name(x):
if isinstance(x, tf.SparseTensor):
return x.op.name.split('/')[0]
return x.name
self.input_tensors = [get_name(x) for x in input_vars]
def _find_output_tensors(self):
dispatcer = OutputTensorDispatcer()
for inf in self.infs:
dispatcer.add_entry(inf.get_output_tensors())
all_names = dispatcer.get_all_names()
IOTensor = InferenceRunner.IOTensor
self.output_tensors = list(filter((lambda x: (x not in self.input_tensors)), all_names))
def find_oid(idxs):
ret = []
for idx in idxs:
name = all_names[idx]
if (name in self.input_tensors):
ret.append(IOTensor(self.input_tensors.index(name), False))
else:
ret.append(IOTensor(self.output_tensors.index(name), True))
return ret
self.inf_to_tensors = [find_oid(t) for t in dispatcer.get_idx_for_each_entry()]
def _trigger_epoch(self):
if np.any((self.inf_epochs[:] == self.epoch_num)):
for inf in self.infs:
inf.before_inference()
sess = tf.get_default_session()
self.ds.reset_state()
with get_tqdm(total=self.ds.size()) as pbar:
for dp in self.ds.get_data():
outputs = self.pred_func(dp)
for (inf, tensormap) in zip(self.infs, self.inf_to_tensors):
inf_output = [(outputs if k.isOutput else dp)[k.index] for k in tensormap]
inf.datapoint(inf_output)
pbar.update()
self._write_summary_after_inference()
def _write_summary_after_inference(self):
summary_inferencer(self.trainer, self.infs) |
def setup(rank, world_size, port='10231'):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = port
dist.init_process_group('gloo', rank=rank, world_size=world_size) |
class ToyGenerator(Generator):
def __init__(self) -> None:
super().__init__(max_num_items=20, max_num_ems=60, container_dims=TWENTY_FOOT_DIMS)
def __call__(self, key: chex.PRNGKey) -> State:
solution = self._generate_solved_instance(key)
state = self._unpack_items(solution)
return state
def generate_solution(self, key: chex.PRNGKey) -> State:
solution = self._generate_solved_instance(key)
return solution
def _generate_solved_instance(self, key: chex.PRNGKey) -> State:
container = make_container(self.container_dims)
list_of_ems = ([container] + ((self.max_num_ems - 1) * [empty_ems()]))
ems = tree_transpose(list_of_ems)
ems_mask = jnp.zeros(self.max_num_ems, bool)
items = Item(x_len=jnp.array([2445, 3083, 1950, 3425, 3083, 2787, 3425, 2787, 2787, 3425, 3083, 2787, 1295, 2787, 3083, 3425, 3425, 837, 1150, 3425], jnp.int32), y_len=jnp.array([1306, 1429, 1301, 321, 1165, 2330, 291, 1504, 826, 466, 901, 1029, 1024, 2330, 1165, 466, 663, 1301, 1024, 589], jnp.int32), z_len=jnp.array([1022, 549, 700, 1022, 629, 200, 1022, 157, 157, 363, 549, 700, 1022, 121, 629, 659, 1022, 700, 1022, 1022], jnp.int32))
items_mask = jnp.ones(self.max_num_items, bool)
sorted_ems_indexes = jnp.arange(0, self.max_num_ems, dtype=jnp.int32)
items_location = Location(x=jnp.array([0, 0, 3083, 2445, 0, 3083, 2445, 3083, 3083, 2445, 0, 3083, 0, 3083, 0, 2445, 2445, 5033, 1295, 2445], jnp.int32), y=jnp.array([0, 0, 0, 0, 0, 0, 910, 0, 1504, 1864, 1429, 1301, 1306, 0, 1165, 1864, 1201, 0, 1306, 321], jnp.int32), z=jnp.array([0, 1022, 1022, 0, 1571, 1722, 0, 2043, 2043, 0, 1022, 1022, 0, 1922, 1571, 363, 0, 1022, 0, 0], jnp.int32))
solution = State(container=container, ems=ems, ems_mask=ems_mask, items=items, items_mask=items_mask, items_placed=items_mask, items_location=items_location, action_mask=None, sorted_ems_indexes=sorted_ems_indexes, key=jax.random.PRNGKey(0))
return solution |
def _is_math_expr_safe(expr):
only_allowed_chars = _allowedchars.match(expr)
if (not only_allowed_chars):
return False
elif (not (only_allowed_chars.group(0) == expr)):
return False
sub_expressions = re.findall(_expr_regex, expr)
if (not all([_valid_sub_expr.match(sub_exp) for sub_exp in sub_expressions])):
return False
return True |
def compute_feature_stats_for_dataset(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, data_loader_kwargs=None, max_items=None, swav=False, sfid=False, **stats_kwargs):
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
if (data_loader_kwargs is None):
data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2)
cache_file = None
if opts.cache:
det_name = get_feature_detector_name(detector_url)
args = dict(dataset_kwargs=opts.dataset_kwargs, detector_url=detector_url, detector_kwargs=detector_kwargs, stats_kwargs=stats_kwargs)
md5 = hashlib.md5(repr(sorted(args.items())).encode('utf-8'))
cache_tag = f'{dataset.name}-{det_name}-{md5.hexdigest()}'
cache_file = os.path.join('.', 'dnnlib', 'gan-metrics', (cache_tag + '.pkl'))
flag = (os.path.isfile(cache_file) if (opts.rank == 0) else False)
if (opts.num_gpus > 1):
flag = torch.as_tensor(flag, dtype=torch.float32, device=opts.device)
torch.distributed.broadcast(tensor=flag, src=0)
flag = (float(flag.cpu()) != 0)
if flag:
return FeatureStats.load(cache_file)
print('Calculating the stats for this dataset the first time\n')
print(f'Saving them to {cache_file}')
num_items = len(dataset)
if (max_items is not None):
num_items = min(num_items, max_items)
stats = FeatureStats(max_items=num_items, **stats_kwargs)
progress = opts.progress.sub(tag='dataset features', num_items=num_items, rel_lo=rel_lo, rel_hi=rel_hi)
detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose)
item_subset = [(((i * opts.num_gpus) + opts.rank) % num_items) for i in range((((num_items - 1) // opts.num_gpus) + 1))]
for (images, _labels) in tqdm(torch.utils.data.DataLoader(dataset=dataset, sampler=item_subset, batch_size=batch_size, **data_loader_kwargs)):
if (images.shape[1] == 1):
images = images.repeat([1, 3, 1, 1])
with torch.no_grad():
features = detector(images.to(opts.device), **detector_kwargs)
stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank)
progress.update(stats.num_items)
if ((cache_file is not None) and (opts.rank == 0)):
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
temp_file = ((cache_file + '.') + uuid.uuid4().hex)
stats.save(temp_file)
os.replace(temp_file, cache_file)
return stats |
class PDO(PolicyGradientSafe, Serializable):
def __init__(self, optimizer=None, optimizer_args=None, safety_constraint=None, pdo_vf_mode=1, **kwargs):
Serializable.quick_init(self, locals())
if (optimizer is None):
if (optimizer_args is None):
optimizer_args = dict()
optimizer = ConjugateGradientOptimizer(**optimizer_args)
pop_keys = ['safety_constrained_optimizer', 'safety_tradeoff', 'learn_safety_tradeoff_coeff', 'safety_key']
for key in pop_keys:
if (key in kwargs.keys()):
kwargs.pop(key)
if (pdo_vf_mode == 1):
safety_key = 'returns'
else:
safety_key = 'advantages'
if ((pdo_vf_mode == 2) and (not hasattr(safety_constraint, 'baseline'))):
logger.log('Warning: selected two-VF PDO, without providing VF for safety constraint.')
logger.log('Defaulting to one-VF PDO.')
pdo_vf_mode = 1
safety_key = 'returns'
super(PDO, self).__init__(optimizer=optimizer, safety_constrained_optimizer=False, safety_constraint=safety_constraint, safety_tradeoff=True, learn_safety_tradeoff_coeff=True, safety_key=safety_key, pdo_vf_mode=pdo_vf_mode, **kwargs) |
class Attribute(Param):
def __init__(self, xml_var, value_type, required=True, default=None, var=None):
Param.__init__(self, xml_var, value_type, required, default, var)
self.type = 'attribute'
def set_from_string(self, obj, value):
setattr(obj, self.var, self.value_type.from_string(value))
def get_value(self, obj):
return getattr(obj, self.var)
def add_to_xml(self, obj, node):
value = getattr(obj, self.var)
if (value is None):
if self.required:
raise Exception('Required attribute not set in object: {}'.format(self.var))
elif (not skip_default):
value = self.default
if (value is not None):
node.set(self.xml_var, self.value_type.to_string(value)) |
class ModelArguments():
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Path to directory to store the pretrained models downloaded from huggingface.co'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `huggingface-cli login` (necessary to use this script with private models).'}) |
def test_render(multicvrp_env: MultiCVRP) -> None:
key = jax.random.PRNGKey(0)
reset_fn = jax.jit(multicvrp_env.reset)
step_fn = jax.jit(multicvrp_env.step)
(state, timestep) = reset_fn(key)
viewer = MultiCVRPViewer(name='MultiCVRP', num_vehicles=multicvrp_env._num_vehicles, num_customers=multicvrp_env._num_customers, map_max=multicvrp_env._map_max, render_mode='human')
new_actions = jnp.array(jnp.arange(1, (multicvrp_env._num_vehicles + 1)), dtype=np.int16)
(new_state, next_timestep) = step_fn(state, new_actions)
save_path = 'render_test.png'
viewer.render(new_state, save_path=save_path)
assert os.path.exists(save_path)
os.remove(save_path) |
class SpeechT5Model(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def download_voc2007(root):
path_devkit = os.path.join(root, 'VOCdevkit')
path_images = os.path.join(root, 'VOCdevkit', 'VOC2007', 'JPEGImages')
tmpdir = os.path.join(root, 'tmp')
if (not os.path.exists(root)):
os.makedirs(root)
if (not os.path.exists(path_devkit)):
if (not os.path.exists(tmpdir)):
os.makedirs(tmpdir)
parts = urlparse(urls2007['devkit'])
filename = os.path.basename(parts.path)
cached_file = os.path.join(tmpdir, filename)
if (not os.path.exists(cached_file)):
print('Downloading: "{}" to {}\n'.format(urls2007['devkit'], cached_file))
download_url(urls2007['devkit'], cached_file)
print('[dataset] Extracting tar file {file} to {path}'.format(file=cached_file, path=root))
cwd = os.getcwd()
tar = tarfile.open(cached_file, 'r')
os.chdir(root)
tar.extractall()
tar.close()
os.chdir(cwd)
print('[dataset] Done!')
if (not os.path.exists(path_images)):
parts = urlparse(urls2007['trainval_2007'])
filename = os.path.basename(parts.path)
cached_file = os.path.join(tmpdir, filename)
if (not os.path.exists(cached_file)):
print('Downloading: "{}" to {}\n'.format(urls2007['trainval_2007'], cached_file))
download_url(urls2007['trainval_2007'], cached_file)
print('[dataset] Extracting tar file {file} to {path}'.format(file=cached_file, path=root))
cwd = os.getcwd()
tar = tarfile.open(cached_file, 'r')
os.chdir(root)
tar.extractall()
tar.close()
os.chdir(cwd)
print('[dataset] Done!')
test_image = os.path.join(path_devkit, 'VOC2007/JPEGImages/000001.jpg')
if (not os.path.exists(test_image)):
parts = urlparse(urls2007['test_images_2007'])
filename = os.path.basename(parts.path)
cached_file = os.path.join(tmpdir, filename)
if (not os.path.exists(cached_file)):
print('Downloading: "{}" to {}\n'.format(urls2007['test_images_2007'], cached_file))
download_url(urls2007['test_images_2007'], cached_file)
print('[dataset] Extracting tar file {file} to {path}'.format(file=cached_file, path=root))
cwd = os.getcwd()
tar = tarfile.open(cached_file, 'r')
os.chdir(root)
tar.extractall()
tar.close()
os.chdir(cwd)
print('[dataset] Done!')
test_anno = os.path.join(path_devkit, 'VOC2007/ImageSets/Main/aeroplane_test.txt')
if (not os.path.exists(test_anno)):
parts = urlparse(urls2007['test_anno_2007'])
filename = os.path.basename(parts.path)
cached_file = os.path.join(tmpdir, filename)
if (not os.path.exists(cached_file)):
print('Downloading: "{}" to {}\n'.format(urls2007['test_anno_2007'], cached_file))
download_url(urls2007['test_anno_2007'], cached_file)
print('[dataset] Extracting tar file {file} to {path}'.format(file=cached_file, path=root))
cwd = os.getcwd()
tar = tarfile.open(cached_file, 'r')
os.chdir(root)
tar.extractall()
tar.close()
os.chdir(cwd)
print('[dataset] Done!') |
class AbstractRefTask(RefTask):
def __init__(self):
super(AbstractRefTask, self).__init__()
(scenes, _, _, vocab, freq) = corpus.load_abstract()
self.scenes = scenes
self.n_features = (scenes[0].features.size * 2)
self.vocab = vocab
self.freq_vocab = freq
self.n_vocab = len(vocab)
self.reverse_vocab = {}
for (k, v) in vocab.items():
assert (v not in self.reverse_vocab)
self.reverse_vocab[v] = k
self.random = np.random.RandomState(0)
self.n_examples = len(self.scenes)
self.max_desc_len = max((len(s.description) for s in scenes))
def get_pair(self):
(i1, i2) = self.random.randint(self.n_examples, size=2)
(s1, s2) = (self.scenes[i1], self.scenes[i2])
return (s1.features, s2.features, s1.description, s1.image_id, s2.image_id)
def visualize(self, state, agent):
url_template = '
url1 = (url_template % state.left_data)
url2 = (url_template % state.right_data)
if ((agent == 0) and (state.target == 1)):
(url1, url2) = (url2, url1)
html_template = "<img src='%s'>"
return (((html_template % url1) + '\n') + (html_template % url2))
def pp(self, indices):
return ' '.join([self.reverse_vocab[i] for i in indices]) |
class Batch(object):
def __init__(self, data=None, device=None, is_test=False):
if (data is not None):
self.batch_size = len(data)
pre_src = [x[0] for x in data]
pre_tgt = [x[1] for x in data]
pre_segs = [x[2] for x in data]
pre_clss = [x[3] for x in data]
pre_src_sent_labels = [x[4] for x in data]
src = torch.tensor(self._pad(pre_src, 0))
clss = torch.tensor(self._pad(pre_clss, (- 1)))
segs = torch.tensor(self._pad(pre_segs, 0))
src_sent_labels = torch.tensor(self._pad(pre_src_sent_labels, 0))
tgt = torch.tensor(self._pad(pre_tgt, 0))
mask_src = (~ (src == 0))
mask_tgt = (~ (tgt == 0))
mask_cls = (~ (clss == (- 1)))
clss[(clss == (- 1))] = 0
setattr(self, 'src', src.to(device))
setattr(self, 'mask_src', mask_src.to(device))
setattr(self, 'clss', clss.to(device))
setattr(self, 'mask_cls', mask_cls.to(device))
setattr(self, 'segs', segs.to(device))
setattr(self, 'src_sent_labels', src_sent_labels.to(device))
setattr(self, 'tgt', tgt.to(device))
setattr(self, 'mask_tgt', mask_tgt.to(device))
if is_test:
src_str = [x[(- 2)] for x in data]
setattr(self, 'src_str', src_str)
tgt_str = [x[(- 1)] for x in data]
setattr(self, 'tgt_str', tgt_str)
def __len__(self):
return self.batch_size
def _pad(self, data, pad_id, width=(- 1)):
if (width == (- 1)):
width = max((len(d) for d in data))
rtn_data = [(d + ([pad_id] * (width - len(d)))) for d in data]
return rtn_data |
def register_annotations_file(filename: str, should_compile_handlers_for_already_imported_modules: bool=False) -> Set[str]:
with open(filename, 'r') as f:
source = f.read()
modules = register_annotations_from_source(source, filename)
if should_compile_handlers_for_already_imported_modules:
compile_handlers_for_already_imported_modules(modules)
return modules |
class ASPP(nn.Module):
def __init__(self, num_classes, head=True):
super(ASPP, self).__init__()
self.conv_1x1_1 = nn.Conv2d(512, 256, kernel_size=1)
self.bn_conv_1x1_1 = nn.BatchNorm2d(256)
self.conv_3x3_1 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=6, dilation=6)
self.bn_conv_3x3_1 = nn.BatchNorm2d(256)
self.conv_3x3_2 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=12, dilation=12)
self.bn_conv_3x3_2 = nn.BatchNorm2d(256)
self.conv_3x3_3 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=18, dilation=18)
self.bn_conv_3x3_3 = nn.BatchNorm2d(256)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_1x1_2 = nn.Conv2d(512, 256, kernel_size=1)
self.bn_conv_1x1_2 = nn.BatchNorm2d(256)
self.conv_1x1_3 = nn.Conv2d(1280, 256, kernel_size=1)
self.bn_conv_1x1_3 = nn.BatchNorm2d(256)
if head:
self.conv_1x1_4 = nn.Conv2d(256, num_classes, kernel_size=1)
self.head = head
def forward(self, feature_map):
feature_map_h = feature_map.size()[2]
feature_map_w = feature_map.size()[3]
out_1x1 = F.relu(self.bn_conv_1x1_1(self.conv_1x1_1(feature_map)))
out_3x3_1 = F.relu(self.bn_conv_3x3_1(self.conv_3x3_1(feature_map)))
out_3x3_2 = F.relu(self.bn_conv_3x3_2(self.conv_3x3_2(feature_map)))
out_3x3_3 = F.relu(self.bn_conv_3x3_3(self.conv_3x3_3(feature_map)))
out_img = self.avg_pool(feature_map)
out_img = F.relu(self.bn_conv_1x1_2(self.conv_1x1_2(out_img)))
out_img = F.interpolate(out_img, size=(feature_map_h, feature_map_w), mode='bilinear')
out = torch.cat([out_1x1, out_3x3_1, out_3x3_2, out_3x3_3, out_img], 1)
out = F.relu(self.bn_conv_1x1_3(self.conv_1x1_3(out)))
if self.head:
out = self.conv_1x1_4(out)
return out |
(frozen=True)
class ValidationConfig(JsonSerializable):
n_cores: int
bug_pattern: str
def to_json(self) -> Any:
return {'n_cores': self.n_cores, 'bug_pattern': self.bug_pattern}
def from_json(cls, d: dict) -> 'ValidationConfig':
return ValidationConfig(int(d['n_cores']), str(d['bug_pattern'])) |
def read_uiuc(auto_src, gold_src):
path = os.path.join(auto_src, '*out')
call = coreference_reading.read_uiuc_coref
return multifile_process(path, call) |
class ResNet(nn.Module):
def __init__(self, block, num_block, num_classes=100):
super().__init__()
self.in_channels = 64
self.conv1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
self.conv2_x = self._make_layer(block, 64, num_block[0], 1)
self.conv3_x = self._make_layer(block, 128, num_block[1], 2)
self.conv4_x = self._make_layer(block, 256, num_block[2], 2)
self.conv5_x = self._make_layer(block, 512, num_block[3], 2)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), num_classes)
self.fc_roi = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, out_channels, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = (out_channels * block.expansion)
return nn.Sequential(*layers)
def forward(self, x, boxes=None, share_fc=False):
bs = x.shape[0]
sz = x.shape[(- 1)]
output = self.conv1(x)
output = self.conv2_x(output)
output = self.conv3_x(output)
output = self.conv4_x(output)
output = self.conv5_x(output)
feat_map = output
output = self.avg_pool(output)
output = output.view(output.size(0), (- 1))
output = self.fc(output)
if (boxes is not None):
index = torch.arange(bs).view((- 1), 1).to(x.device)
boxes = torch.cat([index, boxes], 1)
spatial_scale = (feat_map.shape[(- 1)] / sz)
roi_feat = roi_align(feat_map, boxes, output_size=(1, 1), spatial_scale=spatial_scale, sampling_ratio=(- 1), aligned=True).squeeze()
if share_fc:
out_roi = self.fc(roi_feat)
else:
out_roi = self.fc_roi(roi_feat)
return (output, out_roi)
return output |
def test_PointwiseSemanticHead():
if (not torch.cuda.is_available()):
pytest.skip('test requires GPU and torch+cuda')
from mmdet3d.models.builder import build_head
head_cfg = dict(type='PointwiseSemanticHead', in_channels=8, extra_width=0.2, seg_score_thr=0.3, num_classes=3, loss_seg=dict(type='FocalLoss', use_sigmoid=True, reduction='sum', gamma=2.0, alpha=0.25, loss_weight=1.0), loss_part=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
self = build_head(head_cfg)
self.cuda()
voxel_features = torch.rand([4, 8], dtype=torch.float32).cuda()
feats_dict = self.forward(voxel_features)
assert (feats_dict['seg_preds'].shape == torch.Size([voxel_features.shape[0], 1]))
assert (feats_dict['part_preds'].shape == torch.Size([voxel_features.shape[0], 3]))
assert (feats_dict['part_feats'].shape == torch.Size([voxel_features.shape[0], 4]))
voxel_centers = torch.tensor([[6.56126, 0.9648336, (- 1.7339306)], [6.8162713, (- 2.480431), (- 1.3616394)], [11.643568, (- 4.744306), (- 1.3580885)], [23.482342, 6.5036807, 0.5806964]], dtype=torch.float32).cuda()
coordinates = torch.tensor([[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232], [1, 35, 930, 469]], dtype=torch.int32).cuda()
voxel_dict = dict(voxel_centers=voxel_centers, coors=coordinates)
gt_bboxes = [LiDARInstance3DBoxes(torch.tensor([[6.4118, (- 3.4305), (- 1.7291), 1.7033, 3.4693, 1.6197, (- 0.9091)]], dtype=torch.float32).cuda()), LiDARInstance3DBoxes(torch.tensor([[16.9107, 9.7925, (- 1.9201), 1.6097, 3.2786, 1.5307, (- 2.4056)]], dtype=torch.float32).cuda())]
gt_labels = list(torch.tensor([[0], [1]], dtype=torch.int64).cuda())
target_dict = self.get_targets(voxel_dict, gt_bboxes, gt_labels)
assert (target_dict['seg_targets'].shape == torch.Size([voxel_features.shape[0]]))
assert torch.allclose(target_dict['seg_targets'], target_dict['seg_targets'].new_tensor([3, (- 1), 3, 3]))
assert (target_dict['part_targets'].shape == torch.Size([voxel_features.shape[0], 3]))
assert (target_dict['part_targets'].sum() == 0)
loss_dict = self.loss(feats_dict, target_dict)
assert (loss_dict['loss_seg'] > 0)
assert (loss_dict['loss_part'] == 0)
total_loss = (loss_dict['loss_seg'] + loss_dict['loss_part'])
total_loss.backward() |
def video_record(filename, duration):
print('recording video (.AVI)')
print(('--> ' + filename))
t0 = time.time()
video = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
frame_width = int(video.get(3))
frame_height = int(video.get(4))
out = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 20, (frame_width, frame_height))
a = 0
start = time.time()
while True:
a = (a + 1)
(check, frame) = video.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
out.write(frame)
end = time.time()
if ((end - start) > duration):
break
print(a)
video.release()
out.release()
cv2.destroyAllWindows() |
def load_airline_table(airline_table):
callword_mapping = dict()
skip_words = set((list(letters.values()) + list(numbers.values())))
for line in open(airline_table, 'r'):
if (line.strip().split('\t')[0] == 'ICAO'):
continue
if (line.strip() == ''):
continue
arr = line.rstrip().split('\t')
if (len(arr) < 3):
print(arr, file=sys.stderr)
(icao, name, callword) = arr[:3]
callword_ = callword.lower()
callword_underscore = re.sub('[ -]', '_', callword.lower())
if (not re.search(' ', callword_)):
continue
if (callword_ in callword_mapping):
continue
if (np.sum([(wrd in skip_words) for wrd in callword_.split()]) > 0):
continue
callword_mapping[callword_] = callword_underscore
return callword_mapping |
class Client(ABC):
def __init__(self, network_config, max_try=100):
self.network_config = network_config
self.socket = ClientSocket(network_config.SERVER_ADDR, network_config.SERVER_PORT)
self.train_loader = None
init_msg = self.socket.init_connections(max_try)
self.client_id = init_msg.client_id
self.exp_config = init_msg.exp_config
torch.manual_seed(self.exp_config.seed)
self.model = init_msg.model
self.model.train()
self.optimizer = self.exp_config.optimizer_class(params=self.model.parameters(), **self.exp_config.optimizer_params)
self.lr_scheduler = None
if (self.exp_config.lr_scheduler_class is not None):
self.lr_scheduler = self.exp_config.lr_scheduler_class(optimizer=self.optimizer, **self.exp_config.lr_scheduler_params)
self.optimizer_wrapper = OptimizerWrapper(self.model, self.optimizer, self.lr_scheduler)
if self.exp_config.use_adaptive:
self.dict_extra_sgrad = dict()
self.accum_dense_grad = dict()
self.is_adj_round = False
self.is_sparse = False
self.terminate = False
self.parse_init_extra_params(init_msg.extra_params)
(resume, cur_round, resume_to_sparse) = init_msg.resume_params
self.initialize(resume, cur_round, resume_to_sparse)
_grad()
def load_state_dict(self, state_dict):
param_dict = dict(self.model.named_parameters())
buffer_dict = dict(self.model.named_buffers())
for (key, param) in {**param_dict, **buffer_dict}.items():
if (key in state_dict.keys()):
if (state_dict[key].size() != param.size()):
param._values().copy_(state_dict[key])
elif state_dict[key].is_sparse:
param.copy_(state_dict[key])
param.mask.copy_(state_dict[key].mask)
else:
param.copy_(state_dict[key])
def initialize(self, resume, cur_round, resume_to_sparse):
if resume:
print('Resuming client...')
for _ in range((cur_round * self.exp_config.num_local_updates)):
self.optimizer_wrapper.lr_scheduler_step()
remaining_batches = (cur_round * self.exp_config.num_local_updates)
num_batches_epoch = len(self.train_loader)
while (remaining_batches >= num_batches_epoch):
self.train_loader.skip_epoch()
remaining_batches -= num_batches_epoch
for _ in range(remaining_batches):
self.train_loader.get_next_batch()
if resume_to_sparse:
self.convert_to_sparse()
print('Client resumed')
else:
print('Client initialized')
def parse_init_extra_params(self, extra_params):
pass
def cleanup_state_dict_to_server(self) -> dict:
clean_state_dict = copy_dict(self.model.state_dict())
if self.is_sparse:
for (layer, prefix) in zip(self.model.param_layers, self.model.param_layer_prefixes):
key = (prefix + '.bias')
if (isinstance(layer, SparseLinear) and (key in clean_state_dict.keys())):
clean_state_dict[key] = clean_state_dict[key].view((- 1))
del_list = []
del_suffix = 'placeholder'
for key in clean_state_dict.keys():
if key.endswith(del_suffix):
del_list.append(key)
for del_key in del_list:
del clean_state_dict[del_key]
return clean_state_dict
_grad()
def process_state_dict_to_server(self) -> dict:
clean_state_dict = self.cleanup_state_dict_to_server()
if self.is_sparse:
for (key, param) in clean_state_dict.items():
if param.is_sparse:
clean_state_dict[key] = param._values()
if self.is_adj_round:
clean_state_dict.update(self.dict_extra_sgrad)
self.dict_extra_sgrad = dict()
return clean_state_dict
def convert_to_sparse(self):
self.model = self.model.to_sparse()
old_lr = self.optimizer.state_dict()['param_groups'][0]['lr']
self.optimizer = self.exp_config.optimizer_class(params=self.model.parameters(), **self.exp_config.optimizer_params)
if (self.exp_config.lr_scheduler_class is not None):
lr_scheduler_state_dict = deepcopy(self.lr_scheduler.state_dict())
self.lr_scheduler = self.exp_config.lr_scheduler_class(optimizer=self.optimizer, **self.exp_config.lr_scheduler_params)
self.lr_scheduler.load_state_dict(lr_scheduler_state_dict)
self.optimizer.param_groups[0]['lr'] = old_lr
self.optimizer_wrapper = OptimizerWrapper(self.model, self.optimizer, self.lr_scheduler)
self.is_sparse = True
print('Model converted to sparse')
def accumulate_dense_grad_round(self):
for (key, param) in self.model.named_parameters():
if hasattr(param, 'is_sparse_param'):
if (key in self.accum_dense_grad.keys()):
self.accum_dense_grad[key] += param.dense.grad
else:
self.accum_dense_grad[key] = param.dense.grad
def accumulate_sgrad(self, num_proc_data):
prefix = 'extra.'
for (key, param) in self.accum_dense_grad.items():
pkey = (prefix + key)
if (pkey in self.dict_extra_sgrad.keys()):
self.dict_extra_sgrad[pkey] += ((param ** 2) * num_proc_data)
else:
self.dict_extra_sgrad[pkey] = ((param ** 2) * num_proc_data)
if self.is_adj_round:
param_mask = (dict(self.model.named_parameters())[key].mask == 0.0)
self.dict_extra_sgrad[pkey] = self.dict_extra_sgrad[pkey].masked_select(param_mask)
def main(self):
num_proc_data = 0
for _ in range(self.exp_config.num_local_updates):
(inputs, labels) = self.train_loader.get_next_batch()
self.optimizer_wrapper.step(inputs, labels)
if self.exp_config.use_adaptive:
self.accumulate_dense_grad_round()
num_proc_data += len(inputs)
if self.exp_config.use_adaptive:
self.accumulate_sgrad(num_proc_data)
self.accum_dense_grad = dict()
lr = self.optimizer_wrapper.get_last_lr()
state_dict_to_server = self.process_state_dict_to_server()
msg_to_server = ClientToServerUpdateMessage((state_dict_to_server, num_proc_data, lr))
self.socket.send_msg(msg_to_server)
update_msg = self.socket.recv_update_msg()
self.is_adj_round = update_msg.adjustment
if ((not self.is_sparse) and update_msg.to_sparse):
self.convert_to_sparse()
state_dict_received = update_msg.state_dict
self.load_state_dict(state_dict_received)
self.optimizer_wrapper.lr_scheduler_step()
terminate = update_msg.terminate
if terminate:
self.socket.send_ack_msg()
self.socket.close()
self.terminate = True
print('Task completed')
return terminate |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.