code stringlengths 101 5.91M |
|---|
def run_filter(mode):
tf.keras.backend.clear_session()
dim_x = 10
if (mode == True):
batch_size = 64
num_ensemble = 32
dropout_rate = 0.1
model = diff_enKF.enKFMLP(batch_size, num_ensemble, dropout_rate)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)
epoch = 200
for k in range(epoch):
print('end-to-end wholemodel')
print((' working on epoch %d : ' % k))
steps = math.floor(((200 * 1000) / batch_size))
for step in range(steps):
csv_path = './dataset/dataset_UR5.csv'
(gt_pre, gt_now, raw_sensor) = DataLoader.load_train_data_All(csv_path, batch_size)
with tf.GradientTape(persistent=True) as tape:
start = time.time()
states = DataLoader.format_state(gt_pre, batch_size, num_ensemble, dim_x)
out = model(raw_sensor, states)
state_h = out[1]
state_p = out[2]
y = out[3]
loss_1 = get_loss._mse((gt_now - state_p))
loss_2 = get_loss._mse((gt_now - y))
loss = get_loss._mse((gt_now - state_h))
end = time.time()
if ((step % 500) == 0):
print(('Training loss at step %d: %.4f (took %.3f seconds) ' % (step, float(loss), float((end - start)))))
print(state_p[0])
print(y[0])
print(state_h[0])
print(gt_now[0])
print('---')
grads = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
grads = tape.gradient(loss_1, model.layers[0].trainable_weights)
optimizer.apply_gradients(zip(grads, model.layers[0].trainable_weights))
grads = tape.gradient(loss_2, model.layers[3].trainable_weights)
optimizer.apply_gradients(zip(grads, model.layers[3].trainable_weights))
if (((k + 1) % epoch) == 0):
model.save_weights(((((('./models/bayes_enkf_' + version) + '_') + name[index]) + str(epoch).zfill(3)) + '.h5'))
print('model is saved at this epoch')
if (((k + 1) % 5) == 0):
model.save_weights(((((('./models/bayes_enkf_' + version) + '_') + name[index]) + str(k).zfill(3)) + '.h5'))
print('model is saved at this epoch')
test_batch_size = 1
test_num_ensemble = 32
test_dropout_rate = 0.1
model_test = diff_enKF.enKFMLPAll(test_batch_size, test_num_ensemble, test_dropout_rate)
csv_path = './dataset/dataset_UR5_test.csv'
(test_gt_pre, test_gt_now, test_raw_sensor) = DataLoader.load_test_data_All(csv_path, test_batch_size)
inputs = test_raw_sensor[0]
init_states = DataLoader.format_init_state(test_gt_pre[0], test_batch_size, test_num_ensemble, dim_x)
dummy = model_test(inputs, init_states)
model_test.load_weights(((((('./models/bayes_enkf_' + version) + '_') + name[index]) + str(k).zfill(3)) + '.h5'))
for layer in model_test.layers:
layer.trainable = False
model_test.summary()
data = {}
data_save = []
emsemble_save = []
gt_save = []
transition_save = []
observation_save = []
for t in range(test_gt_now.shape[0]):
if (t == 0):
states = init_states
out = model_test(test_raw_sensor[t], states)
if ((t % 10) == 0):
print('---')
print(out[1])
print(test_gt_now[t])
states = (out[0], out[1])
state_out = np.array(out[1])
gt_out = np.array(test_gt_now[t])
ensemble = np.array(tf.reshape(out[0], [test_num_ensemble, dim_x]))
transition_out = np.array(out[2])
observation_out = np.array(out[3])
data_save.append(state_out)
emsemble_save.append(ensemble)
gt_save.append(gt_out)
observation_save.append(observation_out)
transition_save.append(transition_out)
data['state'] = data_save
data['ensemble'] = emsemble_save
data['gt'] = gt_save
data['observation'] = observation_save
data['transition'] = transition_save
with open(((((('./output/bayes_enkf_' + version) + '_') + name[index]) + str(k).zfill(3)) + '.pkl'), 'wb') as f:
pickle.dump(data, f)
else:
k = 44
test_batch_size = 1
test_num_ensemble = 32
test_dropout_rate = 0.1
model_test = diff_enKF.enKFMLP(test_batch_size, test_num_ensemble, test_dropout_rate)
csv_path = './dataset/dataset_UR5_test.csv'
(test_gt_pre, test_gt_now, test_raw_sensor) = DataLoader.load_test_data_All(csv_path, test_batch_size)
inputs = test_raw_sensor[0]
init_states = DataLoader.format_init_state(test_gt_pre[0], test_batch_size, test_num_ensemble, dim_x)
dummy = model_test(inputs, init_states)
model_test.load_weights(((((('./models/bayes_enkf_' + version) + '_') + name[index]) + str(k).zfill(3)) + '.h5'))
for layer in model_test.layers:
layer.trainable = False
model_test.summary()
data = {}
data_save = []
emsemble_save = []
gt_save = []
transition_save = []
observation_save = []
for t in range(test_gt_now.shape[0]):
if (t == 0):
states = init_states
out = model_test(test_raw_sensor[t], states)
if ((t % 10) == 0):
print('---')
print(out[1])
print(out[2])
print(out[3])
print(test_gt_now[t])
states = (out[0], out[1])
state_out = np.array(out[1])
gt_out = np.array(test_gt_now[t])
ensemble = np.array(tf.reshape(out[0], [test_num_ensemble, dim_x]))
transition_out = np.array(out[2])
observation_out = np.array(out[3])
data_save.append(state_out)
emsemble_save.append(ensemble)
gt_save.append(gt_out)
observation_save.append(observation_out)
transition_save.append(transition_out)
data['state'] = data_save
data['ensemble'] = emsemble_save
data['gt'] = gt_save
data['observation'] = observation_save
data['transition'] = transition_save
with open(((((('./output/bayes_enkf_' + version) + '_') + name[index]) + str(k).zfill(3)) + 'test.pkl'), 'wb') as f:
pickle.dump(data, f) |
def test_keyword_only_args(msg):
assert (m.kw_only_all(i=1, j=2) == (1, 2))
assert (m.kw_only_all(j=1, i=2) == (2, 1))
with pytest.raises(TypeError) as excinfo:
assert (m.kw_only_all(i=1) == (1,))
assert ('incompatible function arguments' in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
assert (m.kw_only_all(1, 2) == (1, 2))
assert ('incompatible function arguments' in str(excinfo.value))
assert (m.kw_only_some(1, k=3, j=2) == (1, 2, 3))
assert (m.kw_only_with_defaults(z=8) == (3, 4, 5, 8))
assert (m.kw_only_with_defaults(2, z=8) == (2, 4, 5, 8))
assert (m.kw_only_with_defaults(2, j=7, k=8, z=9) == (2, 7, 8, 9))
assert (m.kw_only_with_defaults(2, 7, z=9, k=8) == (2, 7, 8, 9))
assert (m.kw_only_mixed(1, j=2) == (1, 2))
assert (m.kw_only_mixed(j=2, i=3) == (3, 2))
assert (m.kw_only_mixed(i=2, j=3) == (2, 3))
assert (m.kw_only_plus_more(4, 5, k=6, extra=7) == (4, 5, 6, {'extra': 7}))
assert (m.kw_only_plus_more(3, k=5, j=4, extra=6) == (3, 4, 5, {'extra': 6}))
assert (m.kw_only_plus_more(2, k=3, extra=4) == (2, (- 1), 3, {'extra': 4}))
with pytest.raises(TypeError) as excinfo:
assert (m.kw_only_mixed(i=1) == (1,))
assert ('incompatible function arguments' in str(excinfo.value))
with pytest.raises(RuntimeError) as excinfo:
m.register_invalid_kw_only(m)
assert (msg(excinfo.value) == '\n arg(): cannot specify an unnamed argument after a kw_only() annotation or args() argument\n ')
x = m.first_arg_kw_only(i=1)
x.method()
x.method(i=1, j=2)
assert (m.first_arg_kw_only.__init__.__doc__ == '__init__(self: pybind11_tests.kwargs_and_defaults.first_arg_kw_only, *, i: int = 0) -> None\n')
assert (m.first_arg_kw_only.method.__doc__ == 'method(self: pybind11_tests.kwargs_and_defaults.first_arg_kw_only, *, i: int = 1, j: int = 2) -> None\n') |
class ShuffleMomentumSiameseBaseModel(MomentumSiameseBaseModel, ABC):
def __init__(self, trunk: DictConfig, optimizer: DictConfig, projector: Optional[DictConfig]=None, predictor: Optional[DictConfig]=None, train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, normalize_outputs: bool=True, num_global_crops: int=2, num_local_crops: int=0, num_splits: int=0, num_splits_per_combination: int=2, mutual_pass: bool=False, initial_momentum: int=0.996, scheduler_momentum: str='cosine', shuffle_bn: bool=True, num_devices: int=1, simulate_n_devices: int=8) -> None:
super().__init__(trunk=trunk, optimizer=optimizer, projector=projector, predictor=predictor, train_transform=train_transform, val_transform=val_transform, test_transform=test_transform, normalize_outputs=normalize_outputs, num_global_crops=num_global_crops, num_local_crops=num_local_crops, num_splits=num_splits, num_splits_per_combination=num_splits_per_combination, mutual_pass=mutual_pass, initial_momentum=initial_momentum, scheduler_momentum=scheduler_momentum)
self.save_hyperparameters()
self.num_devices = num_devices
self.shuffle_bn = shuffle_bn
self.simulate_n_devices = simulate_n_devices
if ((self.num_devices == (- 1)) and self.shuffle_bn):
rank_zero_info(f'In {__class__.__name__} when num_devices=-1, it is assumed that there are more than one device.')
elif ((self.num_devices <= 1) and self.shuffle_bn):
if (self.simulate_n_devices <= 1):
AttributeError('if num_devices is 1 and shuffle_bn is True, the simulate_n_devices attribute should be superior to 1')
self.momentum_trunk = convert_to_split_batchnorm(self.momentum_trunk, self.simulate_n_devices)
if (self.momentum_projector is not None):
self.momentum_projector = convert_to_split_batchnorm(self.momentum_projector, self.simulate_n_devices)
def on_train_start(self):
old_num_devices = self.num_devices
self.num_devices = get_num_devices_in_trainer(self.trainer)
if (old_num_devices != self.num_devices):
rank_zero_info(f'Num devices passed to {__class__.__name__}: {old_num_devices} has been updated to {self.num_devices}.')
_grad()
def momentum_shared_step(self, x: Tensor) -> Dict[(str, Tensor)]:
if (self.num_devices > 1):
return self._momentum_shared_step_n_devices(x)
else:
return self._momentum_shared_step_single_device(x)
_grad()
def _momentum_shared_step_n_devices(self, x: Tensor) -> Dict[(str, Tensor)]:
if self.shuffle_bn:
(x, idx_unshuffle) = self._batch_shuffle_ddp(x)
h = self.momentum_trunk(x)
z = (self.momentum_projector(h) if (self.momentum_projector is not None) else h)
if self.shuffle_bn:
z = self._batch_unshuffle_ddp(z, idx_unshuffle)
if self.normalize_outputs:
z = nn.functional.normalize(z, dim=1)
return {'h': h, 'z': z}
_grad()
def _momentum_shared_step_single_device(self, x: Tensor) -> Dict[(str, Tensor)]:
if self.shuffle_bn:
(x, idx_unshuffle) = self._batch_shuffle_single_device(x)
h = self.momentum_trunk(x)
z = (self.momentum_projector(h) if (self.momentum_projector is not None) else h)
if self.shuffle_bn:
z = self._batch_unshuffle_single_device(z, idx_unshuffle)
if self.normalize_outputs:
z = nn.functional.normalize(z, dim=1)
return {'h': h, 'z': z}
_grad()
def _batch_shuffle_ddp(self, x: Tensor) -> Tuple[(Tensor, Tensor)]:
x_gather = concat_all_gather_without_backprop(x)
batch_size_all = x_gather.shape[0]
idx_shuffle = torch.randperm(batch_size_all, device=self.device)
torch.distributed.broadcast(idx_shuffle, src=0)
idx_unshuffle = torch.argsort(idx_shuffle)
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(self.num_devices, (- 1))[gpu_idx]
return (x_gather[idx_this], idx_unshuffle)
_grad()
def _batch_unshuffle_ddp(self, x: Tensor, idx_unshuffle: Tensor) -> Tensor:
x_gather = concat_all_gather_without_backprop(x)
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(self.num_devices, (- 1))[gpu_idx]
return x_gather[idx_this]
_grad()
def _batch_shuffle_single_device(self, x: Tensor) -> Tuple[(Tensor, Tensor)]:
batch_size = x.shape[0]
idx_shuffle = torch.randperm(batch_size, device=self.device)
idx_unshuffle = torch.argsort(idx_shuffle)
return (x[idx_shuffle], idx_unshuffle)
_grad()
def _batch_unshuffle_single_device(self, x: Tensor, idx_unshuffle: Tensor) -> Tensor:
return x[idx_unshuffle] |
def psi_prior(samples, min_value=0.0, max_value=(2 * np.pi)):
lower = (samples['psi'] > min_value)
upper = (samples['psi'] < max_value)
return np.logical_and(lower, upper) |
def dump_data(data, fn, mode='w'):
if (not isinstance(fn, Path)):
fn = Path(fn)
fp = fn.parent
if (not os.path.exists(fp)):
os.makedirs(fp, exist_ok=True)
with open(fn, mode) as f:
f.writelines(data) |
def convert_normal_to_point_form_of_line(rho, theta):
points = []
for x in [0, 1920]:
points.append(np.array([x, ((rho - (x * np.cos(theta))) / np.sin(theta))]))
return points |
def check_uniques(example, uniques):
if (example['hash'] in uniques):
uniques.remove(example['hash'])
return True
else:
return False |
class TestGaussianFocalLoss(unittest.TestCase):
def test_forward(self):
pred = torch.rand((10, 4))
target = torch.rand((10, 4))
gaussian_focal_loss = GaussianFocalLoss()
loss1 = gaussian_focal_loss(pred, target)
self.assertIsInstance(loss1, torch.Tensor)
loss2 = gaussian_focal_loss(pred, target, avg_factor=0.5)
self.assertIsInstance(loss2, torch.Tensor)
gaussian_focal_loss = GaussianFocalLoss(reduction='none')
loss = gaussian_focal_loss(pred, target)
self.assertTrue((loss.shape == (10, 4)))
loss = gaussian_focal_loss(pred, target, reduction_override='mean')
self.assertTrue((loss.ndim == 0))
with self.assertRaises(AssertionError):
gaussian_focal_loss(pred, target, reduction_override='max')
pos_inds = (torch.rand(5) * 8).long()
pos_labels = (torch.rand(5) * 2).long()
gaussian_focal_loss = GaussianFocalLoss()
loss = gaussian_focal_loss(pred, target, pos_inds, pos_labels)
self.assertIsInstance(loss, torch.Tensor) |
def build_reverse_dictionary(word_to_id):
reverse_dictionary = dict(zip(word_to_id.values(), word_to_id.keys()))
return reverse_dictionary |
def test_generate_shapes():
poly = Polygon(create_star_polygon(15, 8, 5, 1.5))
transform = SE2_from_xytheta((3, 3, deg2rad(5)))
viz = ShapelyViz()
arena_size = 100
boundary = LinearRing([[0, 0], [arena_size, 0], [arena_size, arena_size], [0, arena_size]])
viz.add_shape(boundary, color='r')
gen_poly = Polygon(create_random_starshaped_polygon(50, 50, 10, 0.5, 0.5, 10))
viz.add_shape(gen_poly, facecolor='gold', edgecolor='r')
for i in range(10):
poly = apply_SE2_to_shapely_geo(poly, transform)
viz.add_shape(poly, facecolor='gold', edgecolor='r')
viz.ax.autoscale()
viz.ax.set_aspect('equal')
f = os.path.join(OUT_TESTS_DIR, 'test_generate_shapes.png')
plt.savefig(f, dpi=300) |
class LinearFloatParam(RandomHyperparameter):
def __init__(self, name, min_value, max_value):
super(LinearFloatParam, self).__init__(name)
self._min = min_value
self._delta = (max_value - min_value)
def generate_next_value(self):
return ((random.random() * self._delta) + self._min) |
def dowmsampleBottleneck(channel_in, channel_out, stride=2):
return nn.Sequential(nn.Conv2d(channel_in, 128, kernel_size=1, stride=1), nn.BatchNorm2d(128), nn.ReLU(), nn.Conv2d(128, 128, kernel_size=3, stride=stride, padding=1), nn.BatchNorm2d(128), nn.ReLU(), nn.Conv2d(128, channel_out, kernel_size=1, stride=1), nn.BatchNorm2d(channel_out), nn.ReLU()) |
def write_cameras_binary(cameras, path_to_model_file):
with open(path_to_model_file, 'wb') as fid:
write_next_bytes(fid, len(cameras), 'Q')
for (_, cam) in cameras.items():
model_id = CAMERA_MODEL_NAMES[cam.model].model_id
camera_properties = [cam.id, model_id, cam.width, cam.height]
write_next_bytes(fid, camera_properties, 'iiQQ')
for p in cam.params:
write_next_bytes(fid, float(p), 'd')
return cameras |
def make_loss_report(exp_list, title, path_fig):
fig = plt.figure(dpi=150)
plt.title(title)
for (idx, (exp, exp_label)) in enumerate(exp_list):
path_log = os.path.join(exp, 'log_value.txt')
(data_val, data_tra) = load_loss(path_log)
plt.plot(data_val['step'], data_val['vals'], label=exp_label, linestyle='-')
plt.yscale('log')
plt.legend(loc='upper right')
plt.tight_layout()
plt.savefig(path_fig) |
class RandomHorizontalFlip(RecursiveTransform):
def __init__(self, p=0.5):
self.p = p
def __call__(self, x, flip=None):
flip = ((random.random() < self.p) if (flip is None) else flip)
if (not flip):
return x
if isinstance(x, (list, tuple)):
x = [self.__call__(a, flip) for a in x]
elif is_pose(x):
x[0] *= (- 1.0)
elif is_landmarks(x):
x = hflip_face_landmarks_98pts(x)
elif is_img(x):
x = cv2.flip(x, 1)
elif is_binary_mask(x):
x = cv2.flip(x.astype('uint8'), 1).astype(bool)
return x
def __repr__(self):
return (self.__class__.__name__ + '(p={})'.format(self.p)) |
class InnerProductDecoder(nn.Module):
def __init__(self, act=torch.sigmoid, dropout=0.0):
super(InnerProductDecoder, self).__init__()
self.act = act
self.dropout = dropout
def forward(self, inp):
inp = F.dropout(inp, self.dropout, training=self.training)
x = torch.transpose(inp, dim0=0, dim1=1)
x = torch.mm(inp, x)
return self.act(x) |
def get_prediction(model, tokenizer, premise, hypothesis, max_len=50):
def softmax(x):
return (np.exp(x) / np.sum(np.exp(x), axis=(- 1), keepdims=True))
data = {'premise': premise, 'hypothesis': hypothesis, 'label': ([1] * len(premise))}
m_input = create_data_matrices(tokenizer, data, max_len=max_len, padding='post')
predictions = model.predict(m_input)
probabilities = softmax(predictions)
probabilities = probabilities.tolist()
return probabilities |
def rasterize_gaussians(means3D, means2D, sh, colors_precomp, opacities, scales, rotations, aos, transforms, cov3Ds_precomp, raster_settings):
return _RasterizeGaussians.apply(means3D, means2D, sh, colors_precomp, opacities, scales, rotations, aos, transforms, cov3Ds_precomp, raster_settings) |
class ConvBlock(nn.Module):
def __init__(self, f1, f2, kernel_size=3, padding=1, use_groupnorm=True, groups=8, dilation=1, transpose=False):
super().__init__()
self.transpose = transpose
self.conv = nn.Conv2d(f1, f2, (kernel_size, kernel_size), dilation=dilation, padding=(padding * dilation))
if self.transpose:
self.convt = nn.ConvTranspose2d(f1, f1, (3, 3), dilation=dilation, stride=2, padding=dilation, output_padding=1)
if use_groupnorm:
self.bn = nn.GroupNorm(groups, f1)
else:
self.bn = nn.BatchNorm2d(f1)
def forward(self, x):
x = self.bn(x)
if self.transpose:
x = F.relu(self.convt(x))
x = F.relu(self.conv(x))
return x |
class DarkNetBlock(nn.Module):
expansion = 2
def __init__(self, in_channels, channels):
super().__init__()
self.conv1 = darknetconvlayer(in_channels, channels, kernel_size=1)
self.conv2 = darknetconvlayer(channels, (channels * self.expansion), kernel_size=3, padding=1)
def forward(self, x):
return (self.conv2(self.conv1(x)) + x) |
def train_model(model, criterion, optimizer, scheduler, num_epochs=25, print_freq=500):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
if ((epoch % print_freq) == 0):
print(('-' * 10))
print(f'Epoch {epoch}/{(num_epochs - 1)}')
for phase in ['train', 'val']:
if (phase == 'train'):
scheduler.step()
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
idx = (idx_train if (phase == 'train') else idx_test)
optimizer.zero_grad()
with torch.set_grad_enabled((phase == 'train')):
outputs = model(fts, G)
loss = criterion(outputs[idx], lbls[idx])
(_, preds) = torch.max(outputs, 1)
if (phase == 'train'):
loss.backward()
optimizer.step()
running_loss += (loss.item() * fts.size(0))
running_corrects += torch.sum((preds[idx] == lbls.data[idx]))
epoch_loss = (running_loss / len(idx))
epoch_acc = (running_corrects.double() / len(idx))
if ((epoch % print_freq) == 0):
print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')
if ((phase == 'val') and (epoch_acc > best_acc)):
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if ((epoch % print_freq) == 0):
print(f'Best val Acc: {best_acc:4f}')
print(('-' * 20))
time_elapsed = (time.time() - since)
print(f'''
Training complete in {(time_elapsed // 60):.0f}m {(time_elapsed % 60):.0f}s''')
print(f'Best val Acc: {best_acc:4f}')
model.load_state_dict(best_model_wts)
return model |
def list_files(path):
files = [os.path.join(root, f) for (root, dirs, files) in os.walk(path) for f in files]
return files |
_module()
class DDOD(SingleStageDetector):
def __init__(self, backbone: ConfigType, neck: ConfigType, bbox_head: ConfigType, train_cfg: OptConfigType=None, test_cfg: OptConfigType=None, data_preprocessor: OptConfigType=None, init_cfg: OptMultiConfig=None) -> None:
super().__init__(backbone=backbone, neck=neck, bbox_head=bbox_head, train_cfg=train_cfg, test_cfg=test_cfg, data_preprocessor=data_preprocessor, init_cfg=init_cfg) |
def get_train_iterator(options, dataset):
return make_batch_iterator(options, dataset, shuffle=True, include_partial=False, filter_length=options.train_filter_length, batch_size=options.batch_size, length_to_size=options.length_to_size) |
_model('masked_lm')
class MaskedLMModel(BaseFairseqModel):
def __init__(self, args, encoder):
super().__init__()
self.args = args
self.encoder = encoder
if getattr(args, 'apply_bert_init', False):
self.apply(init_bert_params)
def add_args(parser):
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--act-dropout', type=float, metavar='D', help='dropout probability after activation in FFN')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads')
parser.add_argument('--bias-kv', action='store_true', help='if set, adding a learnable bias kv')
parser.add_argument('--zero-attn', action='store_true', help='if set, pads attn with zero')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--share-encoder-input-output-embed', action='store_true', help='share encoder input and output embeddings')
parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder')
parser.add_argument('--no-token-positional-embeddings', action='store_true', help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--num-segment', type=int, metavar='N', help='num segment in the input')
parser.add_argument('--sentence-class-num', type=int, metavar='N', help='number of classes for sentence task')
parser.add_argument('--sent-loss', action='store_true', help='if set, calculate sentence level predictions')
parser.add_argument('--apply-bert-init', action='store_true', help='use custom param initialization for BERT')
parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--pooler-activation-fn', choices=utils.get_available_activation_fns(), help='Which activation function to use for pooler layer.')
parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block')
def forward(self, src_tokens, segment_labels, **kwargs):
return self.encoder(src_tokens, segment_labels, **kwargs)
def max_positions(self):
return self.encoder.max_positions
def build_model(cls, args, task):
base_architecture(args)
if (not hasattr(args, 'max_positions')):
args.max_positions = args.tokens_per_sample
print('Model args: ', args)
encoder = MaskedLMEncoder(args, task.dictionary)
return cls(args, encoder) |
def is_ngram_content(ngram):
for gram in ngram:
if (not (gram in stopset)):
return True
return False |
class HelenDataset(BaseDataset):
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.path = make_dataset(os.path.join(opt.dataroot))
self.path = sorted(self.path)
self.size = len(self.path)
self.transform = get_transform(opt)
self.target_transform = get_target_transform(opt)
def channel_1toN(self, img, num_channel):
transform1 = transforms.Compose([transforms.ToTensor()])
img = (transform1(img) * 255.0).long()
T = torch.LongTensor(num_channel, img.size(1), img.size(2)).zero_()
mask = torch.LongTensor(img.size(1), img.size(2)).zero_()
for i in range(num_channel):
T[i] = (T[i] + i)
layer = (T[i] - img)
T[i] = torch.from_numpy(np.logical_not(np.logical_xor(layer.numpy(), mask.numpy())).astype(int))
return T.float()
def __getitem__(self, index):
A_label = 0
A_path = self.path[index]
A_path_face = A_path.replace('landmark', 'images')
A_path_face = A_path_face.replace('npy', 'jpg')
A_path_parsing = A_path.replace('landmark', 'labels')
A_path_parsing = A_path_parsing.replace('npy', 'png')
A_img_face = Image.open(A_path_face).convert('RGB')
A_img_parsing = Image.open(A_path_parsing)
A = np.load(A_path)
A = torch.from_numpy(A).float()
A_face = self.transform(A_img_face)
A_parsing = self.channel_1toN(A_img_parsing, 11)
return {'A': A, 'A_face': A_face, 'A_label': A_label, 'A_parsing': A_parsing, 'A_path': A_path}
def __len__(self):
max_size = 0
max_size = self.size
return max_size
def name(self):
return 'HelenDataset' |
_traceback
def handle_dm_reply(cpu, data, size):
t4 = time.time()
def ieee_to_float(sec, nsec):
val = float(socket.ntohl(sec))
val += (float(socket.ntohl(nsec)) / (10 ** 9))
return val
dm = ct.cast(data, ct.POINTER(DM_TLV)).contents
if (not (dm.session_id in Link.dm_sessions)):
return
session = Link.dm_sessions[dm.session_id]
t1 = ieee_to_float(dm.timestamp1_sec, dm.timestamp1_nsec)
t2 = ieee_to_float(dm.timestamp2_sec, dm.timestamp2_nsec)
t3 = ieee_to_float(dm.timestamp3_sec, dm.timestamp3_nsec)
session.store_delays((t2 - t1), (t4 - t3))
if ((session.batch_id > Link.last_batch_completed) and all(map((lambda x: x.has_reply()), session.batch))):
update_tc_delays(session.batch_id, session.batch) |
def build_stats(counts):
stats = {'status': 0, 'reportnum': counts['reportnum'], 'title': counts['title'], 'author': counts['auth_group'], 'url': counts['url'], 'doi': counts['doi'], 'misc': counts['misc']}
stats_str = ('%(status)s-%(reportnum)s-%(title)s-%(author)s-%(url)s-%(doi)s-%(misc)s' % stats)
stats['old_stats_str'] = stats_str
stats['date'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
return stats |
def decode_ans(ans_json, px='p1'):
ans = json.loads(ans_json)[0]
ans1 = [ans[key]['on'] for key in [(px + '1a'), (px + '1b'), (px + '1c')]]
if any(ans1):
x = ans1.index(True)
else:
x = (- 1)
ans2 = [ans[key]['on'] for key in [(px + '2a'), (px + '2b'), (px + '2c')]]
if any(ans2):
y = ans2.index(True)
else:
y = (- 1)
if ((x == (- 1)) or (y == (- 1)) or (x == y)):
return [0, 0, 0]
else:
l = list(range(3))
l.remove(x)
l.remove(y)
z = l[0]
return [x, y, z] |
def collect_words(path, lower):
word_set = set()
with jsonlines.open(path, 'r') as reader:
for obj in reader:
for key in ['sentence1', 'sentence2']:
sentence = obj[key]
if lower:
sentence = sentence.lower()
words = word_tokenize(sentence)
word_set.update(words)
return word_set |
def process(sentence, frames, elements, tokenizer, frame_vocabulary, element_vocabulary, max_length):
(input_ids, is_heads) = ([], [])
sentence = ((['[CLS]'] + sentence) + ['[SEP]'])
frame_label = (['<unk>'] * len(sentence))
element_id = []
for word in sentence:
token = (tokenizer.tokenize(word) if (word not in ['[CLS]', '[SEP]']) else [word])
input_id = tokenizer.convert_tokens_to_ids(token)
if (word in ['[CLS]', '[SEP]']):
is_head = [0]
else:
is_head = ([1] + ([0] * (len(token) - 1)))
input_ids.extend(input_id)
is_heads.extend(is_head)
attention_mask = ([1] * len(input_ids))
for (frame, element) in zip(frames, elements):
for i in range(len(frame)):
if (frame[i] != '<unk>'):
frame_label[i] = frame[i]
element_list = [element_vocabulary.to_index(e) for e in element]
element_list = (element_list + ([element_vocabulary.to_index('<pad>')] * (max_length - len(element_list))))
element_label = {tuple([i, (i + 1), frame[i]]): element_list}
element_id.append(element_label)
break
frame_id = [frame_vocabulary.to_index(f) for f in frame_label]
label_mask = ([1] * len(frame_id))
head_indexes = []
for i in range(len(is_heads)):
if is_heads[i]:
head_indexes.append(i)
input_ids = (input_ids + ([0] * (max_length - len(input_ids))))
attention_mask = (attention_mask + ([0] * (max_length - len(attention_mask))))
head_indexes = (head_indexes + ([0] * (max_length - len(head_indexes))))
frame_id = (frame_id + ([frame_vocabulary.to_index('<pad>')] * (max_length - len(frame_id))))
label_mask = (label_mask + ([0] * (max_length - len(label_mask))))
return (input_ids, attention_mask, head_indexes, frame_id, element_id, label_mask) |
class SensorManager(Singleton):
def __init__(self, world, blueprint, vehicle, param_dict):
self.world = world
self.blueprint = blueprint
self.vehicle = vehicle
self.param_dict = param_dict
self.sensor_dict = {}
self.known_sensors = ['camera', 'lidar', 'imu', 'gnss', 'semantic', 'collision']
def init(self, key):
if (key in self.param_dict):
sensor_type = self.get_type(key)
sensor = globals()[('add_' + sensor_type)](self.world, self.blueprint, self.vehicle, self.param_dict[key]['transform'])
sensor.listen((lambda data: self.param_dict[key]['callback'](data)))
self.sensor_dict[key] = sensor
debug(info=(key + ' successfully initialized !'), info_type='success')
else:
debug(info=('Unknown sensor ' + str(key)), info_type='error')
return None
def init_all(self):
for key in self.param_dict:
try:
self.init(key)
except:
debug(info=(str(key) + ' initialize failed'), info_type='error')
def close_all(self):
for key in self.param_dict:
try:
self.sensor_dict[key].destroy()
debug(info=(str(key) + ' closed'), info_type='success')
except:
debug(info=(str(key) + " has no attribute called 'close'"), info_type='message')
def __del__(self):
self.close_all()
def __getitem__(self, key):
if (key in self.sensor_dict):
return self.sensor_dict[key]
else:
debug(info=('No sensor called ' + str(key)), info_type='error')
return None
def __setitem__(self, key, value):
if (key in self.param_dict):
self.param_dict[key] = value
return True
else:
debug(info=('No sensor called ' + str(key)), info_type='error')
return None
def get_type(self, key):
sensor_type = key.split(':')[0]
if (sensor_type in self.known_sensors):
return sensor_type
else:
debug(info=('Unknown sensor type ' + str(key)), info_type='error')
return None |
class TestBatchNormalization(object):
def test_batch_normalization(self):
input_shape = [1, 3, 224, 224]
output_shape = [1, 3, 224, 224]
X = onnx.helper.make_tensor_value_info('X', onnx.TensorProto.FLOAT, input_shape)
scale = onnx.helper.make_tensor_value_info('scale', onnx.TensorProto.FLOAT, input_shape[:2])
bias = onnx.helper.make_tensor_value_info('bias', onnx.TensorProto.FLOAT, input_shape[:2])
mean = onnx.helper.make_tensor_value_info('mean', onnx.TensorProto.FLOAT, input_shape[:2])
var = onnx.helper.make_tensor_value_info('var', onnx.TensorProto.FLOAT, input_shape[:2])
Y = onnx.helper.make_tensor_value_info('Y', onnx.TensorProto.FLOAT, output_shape)
scale_vals = (np.random.random(input_shape[1]) * 10)
bias_vals = (np.random.random(input_shape[1]) * 10)
mean_vals = (np.random.random(input_shape[1]) * 10)
var_vals = (np.random.random(input_shape[1]) * 10)
input_x = (np.random.random(input_shape) * 10)
epsilon = float(1e-05)
momentum = float(0.9)
init_scale = onnx.helper.make_tensor(name='scale', data_type=onnx.TensorProto.FLOAT, dims=input_shape[:2], vals=scale_vals.tolist())
init_bias = onnx.helper.make_tensor(name='bias', data_type=onnx.TensorProto.FLOAT, dims=input_shape[:2], vals=bias_vals.tolist())
init_mean = onnx.helper.make_tensor(name='mean', data_type=onnx.TensorProto.FLOAT, dims=input_shape[:2], vals=mean_vals.tolist())
init_var = onnx.helper.make_tensor(name='var', data_type=onnx.TensorProto.FLOAT, dims=input_shape[:2], vals=var_vals.tolist())
batch_norm_node = onnx.helper.make_node(op_type='BatchNormalization', inputs=['X', 'scale', 'bias', 'mean', 'var'], outputs=['Y'], epsilon=epsilon, momentum=momentum)
onnx_graph = onnx.helper.make_graph(nodes=[batch_norm_node], name='test-batch_norm', inputs=[X], outputs=[Y], initializer=[init_scale, init_bias, init_mean, init_var])
onnx_model = onnx.helper.make_model(onnx_graph, producer_name='ONNX')
onnx.checker.check_model(onnx_model)
loaded_model = load_model_proto(onnx_model)
bigdl_model = SpatialBatchNormalization(n_output=input_shape[1], eps=epsilon, momentum=momentum, init_weight=scale_vals, init_bias=bias_vals, init_grad_weight=None, init_grad_bias=None)
bigdl_model.set_running_mean(mean_vals)
bigdl_model.set_running_std(var_vals)
loaded_out = loaded_model.forward(input_x)
expected_out = bigdl_model.forward(input_x)
assert np.array_equal(loaded_out, expected_out) |
class TaggedValueMeta(type):
def __init__(cls, name, bases, dict):
for fn_name in cls._proxies.keys():
try:
dummy = getattr(cls, fn_name)
except AttributeError:
setattr(cls, fn_name, ProxyDelegate(fn_name, cls._proxies[fn_name])) |
def get_midpoint(tuple_1, tuple_2):
return tuple([(sum(_) / 2.0) for _ in zip(tuple_1, tuple_2)]) |
class GLPNForDepthEstimation(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def create_dummy_class(klass, dependency, message=''):
err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, klass)
if message:
err = ((err + ' ') + message)
class _DummyMetaClass(type):
def __getattr__(_, __):
raise ImportError(err)
class _Dummy(object, metaclass=_DummyMetaClass):
def __init__(self, *args, **kwargs):
raise ImportError(err)
return _Dummy |
def build_sampler(cfg, **kwargs):
if isinstance(cfg, samplers.BaseSampler):
return cfg
elif isinstance(cfg, dict):
return mmcv.runner.obj_from_dict(cfg, samplers, default_args=kwargs)
else:
raise TypeError('Invalid type {} for building a sampler'.format(type(cfg))) |
class MultivariateEuclideanNormal(torch.distributions.MultivariateNormal, VaeDistribution):
def log_prob(self, value):
return super().log_prob(value).sum(dim=(- 1)) |
def main(unused_argv):
tf.logging.info('Reading list of images...')
image_paths = _ReadImageList(cmd_args.list_images_path)
batch_get_feature(image_paths, cmd_args.config_path, cmd_args.output_dir) |
def test_interpolation_potential_dvcircdR():
rzpot = potential.interpRZPotential(RZPot=potential.MWPotential, rgrid=(0.01, 2.0, 201), logR=False, interpdvcircdr=True, zsym=True)
rs = numpy.linspace(0.01, 2.0, 21)
for r in rs:
assert (numpy.fabs(((rzpot.dvcircdR(r) - potential.dvcircdR(potential.MWPotential, r)) / potential.dvcircdR(potential.MWPotential, r))) < (10.0 ** (- 10.0))), ('RZPot interpolation of dvcircdR w/ interpRZPotential fails at R = %g' % r)
rs = numpy.linspace(0.01, 2.0, 20)
for r in rs:
dvcdrdiff = numpy.fabs(((rzpot.dvcircdR(r) - potential.dvcircdR(potential.MWPotential, r)) / potential.dvcircdR(potential.MWPotential, r)))
assert (dvcdrdiff < (10.0 ** (- 5.0))), f'RZPot interpolation of dvcircdR w/ interpRZPotential fails at R = {r:g} by {dvcdrdiff:g}'
assert numpy.all((numpy.fabs(((rzpot.dvcircdR(rs) - potential.dvcircdR(potential.MWPotential, rs)) / potential.dvcircdR(potential.MWPotential, rs))) < (10.0 ** (- 5.0)))), 'RZPot interpolation of dvcircdR w/ interpRZPotential fails for vector input'
rzpot = potential.interpRZPotential(RZPot=potential.MWPotential, rgrid=(numpy.log(0.01), numpy.log(20.0), 201), logR=True, interpdvcircdr=True, zsym=True)
rs = numpy.linspace(0.01, 20.0, 20)
assert numpy.all((numpy.fabs(((rzpot.dvcircdR(rs) - potential.dvcircdR(potential.MWPotential, rs)) / potential.dvcircdR(potential.MWPotential, rs))) < (10.0 ** (- 5.0)))), 'RZPot interpolation of dvcircdR w/ interpRZPotential fails for vector input, w/ logR'
rzpot = potential.interpRZPotential(RZPot=potential.MWPotential, rgrid=(0.01, 2.0, 201), logR=False, interpdvcircdr=True, numcores=1, zsym=True)
rs = numpy.linspace(0.01, 2.0, 20)
assert numpy.all((numpy.fabs(((rzpot.dvcircdR(rs) - potential.dvcircdR(potential.MWPotential, rs)) / potential.dvcircdR(potential.MWPotential, rs))) < (10.0 ** (- 5.0)))), 'RZPot interpolation of dvcircdR w/ interpRZPotential fails for vector input'
return None |
def osnet_x1_0_efdmix23_a0d1(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
model = OSNet(num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2], channels=[64, 256, 384, 512], loss=loss, efdmix_layers=['conv2', 'conv3'], efdmix_alpha=0.1, **kwargs)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model |
class TreeLSTMNode():
def __init__(self, h=None, c=None, parent=None, children=[], num=0):
self.label = None
self.h = h
self.c = c
self.parent = parent
self.children = children
self.num = num |
class STResUNetBase(ResUNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STResUNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs) |
class DotProduct_Classifier(nn.Module):
def __init__(self, num_classes=1000, feat_dim=2048, *args):
super(DotProduct_Classifier, self).__init__()
self.fc = nn.Linear(feat_dim, num_classes)
def forward(self, x, *args):
x = self.fc(x)
return (x, None) |
def __name_getter(dictionary: mapType, previous_name, previous_names):
for (k, v) in dictionary.items():
if (previous_name == ''):
previous_names.append(k)
else:
previous_names.append(((str(previous_name) + '.') + str(k)))
for (k, v) in dictionary.items():
if isinstance(v, mapType):
__name_getter(v, (str(k) if (previous_name == '') else ((str(previous_name) + '.') + str(k))), previous_names) |
_vision
class AlignProcessorTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
image_processor_map = {'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0., 0.4578275, 0.], 'image_std': [0., 0., 0.]}
self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(image_processor_map, fp)
def get_tokenizer(self, **kwargs):
return BertTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs):
return BertTokenizerFast.from_pretrained(self.tmpdirname, **kwargs)
def get_image_processor(self, **kwargs):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, (- 1))) for x in image_inputs]
return image_inputs
def test_save_load_pretrained_default(self):
tokenizer_slow = self.get_tokenizer()
tokenizer_fast = self.get_rust_tokenizer()
image_processor = self.get_image_processor()
processor_slow = AlignProcessor(tokenizer=tokenizer_slow, image_processor=image_processor)
processor_slow.save_pretrained(self.tmpdirname)
processor_slow = AlignProcessor.from_pretrained(self.tmpdirname, use_fast=False)
processor_fast = AlignProcessor(tokenizer=tokenizer_fast, image_processor=image_processor)
processor_fast.save_pretrained(self.tmpdirname)
processor_fast = AlignProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, BertTokenizer)
self.assertIsInstance(processor_fast.tokenizer, BertTokenizerFast)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, EfficientNetImageProcessor)
self.assertIsInstance(processor_fast.image_processor, EfficientNetImageProcessor)
def test_save_load_pretrained_additional_features(self):
processor = AlignProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0)
processor = AlignProcessor.from_pretrained(self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=False, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, BertTokenizerFast)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, EfficientNetImageProcessor)
def test_image_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor)
image_input = self.prepare_image_inputs()
input_image_proc = image_processor(image_input, return_tensors='np')
input_processor = processor(images=image_input, return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=0.01)
def test_tokenizer(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str, padding='max_length', max_length=64)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'])
with pytest.raises(ValueError):
processor()
def test_tokenizer_decode(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_model_input_names(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), processor.model_input_names) |
def _match_checkpoint_pattern(name):
pattern = _checkpoint_pattern()
return pattern.match(name) |
class ToLabel(object):
def __call__(self, image):
return torch.from_numpy(np.array(image)).long().unsqueeze(0) |
_registry(operator_type='StopGradient')
class StopGradient(Operator):
def __init__(self):
super().__init__() |
class LocalResNetEncoderGroupNorm(Encoder):
def __init__(self, levels, in_planes, out_planes, hidden_planes, activation, num_groups):
super(LocalResNetEncoderGroupNorm, self).__init__()
layers = list()
assert (len(hidden_planes) == levels)
assert (len(num_groups) == levels)
for level in range(levels):
hidden_channels = hidden_planes[level]
n_groups = num_groups[level]
layers.append(('resnet{}'.format(level), ResNetGroupNorm(in_planes, [hidden_channels, hidden_channels], [1, 2], activation, num_groups=n_groups)))
in_planes = hidden_channels
in_planes = hidden_planes[(- 1)]
hidden_planes = ([out_planes] + hidden_planes)
for level in reversed(range(levels)):
hidden_channels = hidden_planes[level]
n_groups = num_groups[level]
layers.append(('deresnet{}'.format(level), DeResNetGroupNorm(in_planes, [in_planes, hidden_channels], [1, 2], [0, 0], activation, num_groups=n_groups)))
in_planes = hidden_channels
self.net = nn.Sequential(OrderedDict(layers))
def forward(self, x):
return self.net(x)
def init(self, x, init_scale=1.0):
with torch.no_grad():
return self(x)
def from_params(cls, params: Dict) -> 'LocalResNetEncoderGroupNorm':
return LocalResNetEncoderGroupNorm(**params) |
def evaluate(encoder, args, batch_trains, classifier, classifiers, eval_sents, domain_encs):
good_sent = bad_sent = good = bad = 0.0
for sent in eval_sents:
(words, golds) = zip(*sent)
probs = [classifier(encoder(words, volatile=True)) for ath in classifiers]
outputs = sum(probs)
tags = [encoder.vt.i2w[i] for i in outputs.data.max(1)[1].cpu().view((- 1))]
if (tags == list(golds)):
good_sent += 1
else:
bad_sent += 1
for (go, gu) in zip(golds, tags):
if (go == gu):
good += 1
else:
bad += 1
print(('tag_acc=%.4f, sent_acc=%.4f' % ((good / (good + bad)), (good_sent / (good_sent + bad_sent)))))
return ((1.0 * good) / (good + bad)) |
class HyperOptimizer(PathOptimizer):
compressed = False
multicontraction = False
def __init__(self, methods=None, minimize='flops', max_repeats=128, max_time=None, parallel='auto', slicing_opts=None, slicing_reconf_opts=None, reconf_opts=None, optlib=None, space=None, score_compression=0.75, on_trial_error='warn', max_training_steps=None, progbar=False, **optlib_opts):
self.max_repeats = max_repeats
self._repeats_start = 0
self.max_time = max_time
self.parallel = parallel
self.method_choices = []
self.param_choices = []
self.scores = []
self.times = []
self.costs_flops = []
self.costs_write = []
self.costs_size = []
if (methods is None):
self._methods = get_default_hq_methods()
elif isinstance(methods, str):
self._methods = [methods]
else:
self._methods = list(methods)
if (optlib is None):
optlib = get_default_optlib()
self.minimize = minimize
self.score_compression = score_compression
self.on_trial_error = on_trial_error
self.best_score = float('inf')
self.max_training_steps = max_training_steps
inf = float('inf')
self.best = {'score': inf, 'size': inf, 'flops': inf}
self.trials_since_best = 0
self.slicing_opts = (None if (slicing_opts is None) else dict(slicing_opts))
self.reconf_opts = (None if (reconf_opts is None) else dict(reconf_opts))
self.slicing_reconf_opts = (None if (slicing_reconf_opts is None) else dict(slicing_reconf_opts))
self.progbar = progbar
if (space is None):
space = get_hyper_space()
self._optimizer = dict(zip(['init', 'get_setting', 'report_result'], _OPTLIB_FNS[optlib]))
self._optimizer['init'](self, self._methods, space, **optlib_opts)
def minimize(self):
return self._minimize
def minimize(self, minimize):
self._minimize = minimize
if callable(minimize):
self._minimize_score_fn = minimize
else:
self._minimize_score_fn = get_score_fn(minimize)
def parallel(self):
return self._parallel
def parallel(self, parallel):
self._parallel = parallel
self._pool = parse_parallel_arg(parallel)
if (self._pool is not None):
self._num_workers = get_n_workers(self._pool)
self.pre_dispatch = max((self._num_workers + 4), int((1.2 * self._num_workers)))
def tree(self):
return self.best['tree']
def path(self):
return self.tree.get_path()
def setup(self, inputs, output, size_dict):
trial_fn = base_trial_fn
if self.compressed:
assert (not self.multicontraction)
trial_fn = TrialConvertTree(trial_fn, ContractionTreeCompressed)
if self.multicontraction:
assert (not self.compressed)
trial_fn = TrialTreeMulti(trial_fn, self.varmults, self.numconfigs)
nested_parallel = should_nest(self._pool)
if (self.slicing_opts is not None):
self.slicing_opts.setdefault('minimize', self.minimize)
trial_fn = SlicedTrialFn(trial_fn, **self.slicing_opts)
if (self.slicing_reconf_opts is not None):
self.slicing_reconf_opts.setdefault('minimize', self.minimize)
self.slicing_reconf_opts.setdefault('parallel', nested_parallel)
trial_fn = SlicedReconfTrialFn(trial_fn, **self.slicing_reconf_opts)
if (self.reconf_opts is not None):
if self.compressed:
self.reconf_opts.setdefault('minimize', self.minimize)
trial_fn = CompressedReconfTrial(trial_fn, **self.reconf_opts)
else:
self.reconf_opts.setdefault('minimize', self.minimize)
self.reconf_opts.setdefault('parallel', nested_parallel)
trial_fn = ReconfTrialFn(trial_fn, **self.reconf_opts)
trial_fn = ComputeScore(trial_fn, score_fn=self._minimize_score_fn, score_compression=self.score_compression, on_trial_error=self.on_trial_error)
return (trial_fn, (inputs, output, size_dict))
def _maybe_cancel_futures(self):
if (self._pool is not None):
while self._futures:
f = self._futures.pop()[(- 1)]
f.cancel()
def _maybe_report_result(self, setting, trial):
score = trial['score']
new_best = (score < self.best_score)
if new_best:
self.best_score = score
should_report = (((self.max_training_steps is None) or (len(self.scores) < self.max_training_steps) or new_best) and (trial['score'] < float('inf')))
if should_report:
self._optimizer['report_result'](self, setting, trial, score)
self.method_choices.append(setting['method'])
self.param_choices.append(setting['params'])
self.costs_flops.append(trial['flops'])
self.costs_write.append(trial['write'])
self.costs_size.append(trial['size'])
self.scores.append(trial['score'])
self.times.append(trial['time'])
def _gen_results(self, repeats, trial_fn, trial_args):
constants = get_hyper_constants()
for _ in repeats:
setting = self._optimizer['get_setting'](self)
method = setting['method']
trial = trial_fn(*trial_args, method=method, **setting['params'], **constants[method])
self._maybe_report_result(setting, trial)
(yield trial)
def _get_and_report_next_future(self):
while True:
for i in range(len(self._futures)):
(setting, future) = self._futures[i]
if future.done():
del self._futures[i]
trial = future.result()
self._maybe_report_result(setting, trial)
return trial
time.sleep(1e-06)
def _gen_results_parallel(self, repeats, trial_fn, trial_args):
constants = get_hyper_constants()
self._futures = []
for _ in repeats:
setting = self._optimizer['get_setting'](self)
method = setting['method']
future = submit(self._pool, trial_fn, *trial_args, method=method, **setting['params'], **constants[method])
self._futures.append((setting, future))
if (len(self._futures) >= self.pre_dispatch):
(yield self._get_and_report_next_future())
while self._futures:
(yield self._get_and_report_next_future())
def _search(self, inputs, output, size_dict):
if (self.max_time is not None):
t0 = time.time()
if isinstance(self.max_time, str):
(which, amount) = re.match('(rate|equil):(.+)', self.max_time).groups()
if (which == 'rate'):
rate = float(amount)
def should_stop():
return ((time.time() - t0) > (self.best['flops'] / rate))
elif (which == 'equil'):
amount = int(amount)
def should_stop():
return (self.trials_since_best > amount)
else:
def should_stop():
return ((time.time() - t0) > self.max_time)
else:
def should_stop():
return False
(trial_fn, trial_args) = self.setup(inputs, output, size_dict)
r_start = (self._repeats_start + len(self.scores))
r_stop = (r_start + self.max_repeats)
repeats = range(r_start, r_stop)
if (self._pool is not None):
trials = self._gen_results_parallel(repeats, trial_fn, trial_args)
else:
trials = self._gen_results(repeats, trial_fn, trial_args)
if self.progbar:
import tqdm
pbar = tqdm.tqdm(trials, total=self.max_repeats)
pbar.set_description(progress_description(self.best), refresh=False)
trials = pbar
for trial in trials:
if (trial['score'] < self.best['score']):
self.trials_since_best = 0
self.best = trial
self.best['params'] = dict(self.param_choices[(- 1)])
self.best['params']['method'] = self.method_choices[(- 1)]
if self.progbar:
pbar.set_description(progress_description(self.best), refresh=False)
else:
self.trials_since_best += 1
if should_stop():
break
if self.progbar:
pbar.close()
self._maybe_cancel_futures()
def search(self, inputs, output, size_dict):
self._search(inputs, output, size_dict)
return self.tree
def get_tree(self):
return self.tree
def __call__(self, inputs, output, size_dict, memory_limit=None):
self._search(inputs, output, size_dict)
return tuple(self.path)
def get_trials(self, sort=None):
trials = list(zip(self.method_choices, self.costs_size, self.costs_flops, self.costs_write, self.param_choices))
if (sort == 'method'):
trials.sort(key=(lambda t: t[0]))
if (sort == 'combo'):
trials.sort(key=(lambda t: ((log2(t[1]) / 1000.0) + log2((t[2] + (256 * t[3]))))))
if (sort == 'size'):
trials.sort(key=(lambda t: ((log2(t[1]) + (log2(t[2]) / 1000.0)) + (log2(t[3]) / 1000.0))))
if (sort == 'flops'):
trials.sort(key=(lambda t: (((log2(t[1]) / 1000.0) + log2(t[2])) + (log2(t[3]) / 1000.0))))
if (sort == 'write'):
trials.sort(key=(lambda t: (((log2(t[1]) / 1000.0) + (log2(t[2]) / 1000.0)) + log2(t[3]))))
return trials
def print_trials(self, sort=None):
header = '{:>11} {:>11} {:>11} {}'
print(header.format('METHOD', 'log2[SIZE]', 'log10[FLOPS]', 'log10[WRITE]', 'PARAMS'))
row = '{:>11} {:>11.2f} {:>11.2f} {:>11.2f} {}'
for (choice, size, flops, write, params) in self.get_trials(sort):
print(row.format(choice, log2(size), log10(flops), log10(write), params))
def to_df(self):
import pandas
return pandas.DataFrame(data={'run': list(range(len(self.costs_size))), 'time': self.times, 'method': self.method_choices, 'size': list(map(log2, self.costs_size)), 'flops': list(map(log10, self.costs_flops)), 'write': list(map(log10, self.costs_write)), 'random_strength': [p.get('random_strength', 1e-06) for p in self.param_choices], 'score': self.scores}).sort_values(by='method')
def to_dfs_parametrized(self):
import pandas as pd
rows = {}
for i in range(len(self.scores)):
row = {'run': i, 'time': self.times[i], **self.param_choices[i], 'flops': log10(self.costs_flops[i]), 'write': log2(self.costs_write[i]), 'size': log2(self.costs_size[i]), 'score': self.scores[i]}
method = self.method_choices[i]
rows.setdefault(method, []).append(row)
return {method: pd.DataFrame(rows[method]).sort_values(by='score') for method in rows}
plot_trials = plot_trials
plot_trials_alt = plot_trials_alt
plot_scatter = plot_scatter
plot_scatter_alt = plot_scatter_alt |
.parametrize('input_data,expected', testdata)
def test_sieve(input_data, expected):
assert (sieve(*input_data) == expected) |
class InputFeatures(object):
def __init__(self, example_id, choices_features, label):
self.example_id = example_id
self.choices_features = [{'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids} for (_, input_ids, input_mask, segment_ids) in choices_features]
self.label = label |
def inference_prob_recurrent(images, cams, depth_num, depth_start, depth_interval, is_master_gpu=True):
depth_end = (depth_start + ((tf.cast(depth_num, tf.float32) - 1) * depth_interval))
ref_image = tf.squeeze(tf.slice(images, [0, 0, 0, 0, 0], [(- 1), 1, (- 1), (- 1), 3]), axis=1)
ref_cam = tf.squeeze(tf.slice(cams, [0, 0, 0, 0, 0], [(- 1), 1, 2, 4, 4]), axis=1)
if is_master_gpu:
ref_tower = UNetDS2GN({'data': ref_image}, is_training=True, reuse=False)
else:
ref_tower = UNetDS2GN({'data': ref_image}, is_training=True, reuse=True)
view_towers = []
for view in range(1, FLAGS.view_num):
view_image = tf.squeeze(tf.slice(images, [0, view, 0, 0, 0], [(- 1), 1, (- 1), (- 1), (- 1)]), axis=1)
view_tower = UNetDS2GN({'data': view_image}, is_training=True, reuse=True)
view_towers.append(view_tower)
view_homographies = []
for view in range(1, FLAGS.view_num):
view_cam = tf.squeeze(tf.slice(cams, [0, view, 0, 0, 0], [(- 1), 1, 2, 4, 4]), axis=1)
homographies = get_homographies(ref_cam, view_cam, depth_num=depth_num, depth_start=depth_start, depth_interval=depth_interval)
view_homographies.append(homographies)
gru1_filters = 16
gru2_filters = 4
gru3_filters = 2
feature_shape = [FLAGS.batch_size, (FLAGS.max_h / 4), (FLAGS.max_w / 4), 32]
gru_input_shape = [feature_shape[1], feature_shape[2]]
state1 = tf.zeros([FLAGS.batch_size, feature_shape[1], feature_shape[2], gru1_filters])
state2 = tf.zeros([FLAGS.batch_size, feature_shape[1], feature_shape[2], gru2_filters])
state3 = tf.zeros([FLAGS.batch_size, feature_shape[1], feature_shape[2], gru3_filters])
conv_gru1 = ConvGRUCell(shape=gru_input_shape, kernel=[3, 3], filters=gru1_filters)
conv_gru2 = ConvGRUCell(shape=gru_input_shape, kernel=[3, 3], filters=gru2_filters)
conv_gru3 = ConvGRUCell(shape=gru_input_shape, kernel=[3, 3], filters=gru3_filters)
exp_div = tf.zeros([FLAGS.batch_size, feature_shape[1], feature_shape[2], 1])
soft_depth_map = tf.zeros([FLAGS.batch_size, feature_shape[1], feature_shape[2], 1])
with tf.name_scope('cost_volume_homography'):
depth_costs = []
for d in range(depth_num):
ave_feature = ref_tower.get_output()
ave_feature2 = tf.square(ref_tower.get_output())
for view in range(0, (FLAGS.view_num - 1)):
homography = tf.slice(view_homographies[view], begin=[0, d, 0, 0], size=[(- 1), 1, 3, 3])
homography = tf.squeeze(homography, axis=1)
warped_view_feature = tf_transform_homography(view_towers[view].get_output(), homography)
ave_feature = (ave_feature + warped_view_feature)
ave_feature2 = (ave_feature2 + tf.square(warped_view_feature))
ave_feature = (ave_feature / FLAGS.view_num)
ave_feature2 = (ave_feature2 / FLAGS.view_num)
cost = (ave_feature2 - tf.square(ave_feature))
(reg_cost1, state1) = conv_gru1((- cost), state1, scope='conv_gru1')
(reg_cost2, state2) = conv_gru2(reg_cost1, state2, scope='conv_gru2')
(reg_cost3, state3) = conv_gru3(reg_cost2, state3, scope='conv_gru3')
reg_cost = tf.layers.conv2d(reg_cost3, 1, 3, padding='same', reuse=tf.AUTO_REUSE, name='prob_conv')
depth_costs.append(reg_cost)
prob_volume = tf.stack(depth_costs, axis=1)
prob_volume = tf.nn.softmax(prob_volume, axis=1, name='prob_volume')
return prob_volume |
def ade_palette():
return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], [102, 255, 0], [92, 0, 255]] |
class NoamScheduler(BaseScheduler):
def __init__(self, hidden_size: int, optimizer: torch.optim.Optimizer, factor: float=1.0, warmup: int=4000):
super().__init__(optimizer)
self.warmup = warmup
self.factor = factor
self.hidden_size = hidden_size
def _compute_rate(self):
step = self._step
upper_bound = min((step ** (- 0.5)), (step * (self.warmup ** (- 1.5))))
return (self.factor * ((self.hidden_size ** (- 0.5)) * upper_bound))
def state_dict(self):
super().state_dict()
self._state_dict['warmup'] = self.warmup
self._state_dict['factor'] = self.factor
self._state_dict['hidden_size'] = self.hidden_size
return self._state_dict
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.warmup = state_dict['warmup']
self.factor = state_dict['factor']
self.hidden_size = state_dict['hidden_size']
def __repr__(self):
return f'{self.__class__.__name__}(warmup={self.warmup}, factor={self.factor}, hidden_size={self.hidden_size})' |
def find_lr(init_value=1e-06, final_value=0.001, beta=0.7):
num = (len(trainset_loader) - 1)
mult = ((final_value / init_value) ** (1 / num))
lr = init_value
optimizer.param_groups[0]['lr'] = lr
avg_loss = 0.0
best_loss = 0.0
batch_num = 0
losses = []
log_lrs = []
for (imgs, dicts) in tqdm(trainset_loader):
batch_num += 1
imgs = imgs.to(device)
imgs = Variable(imgs)
target_locations = [dictt['locations'].to(device) for dictt in dicts]
target_counts = [dictt['count'].to(device) for dictt in dicts]
target_orig_heights = [dictt['orig_height'].to(device) for dictt in dicts]
target_orig_widths = [dictt['orig_width'].to(device) for dictt in dicts]
target_counts = torch.stack(target_counts)
target_orig_heights = torch.stack(target_orig_heights)
target_orig_widths = torch.stack(target_orig_widths)
target_orig_sizes = torch.stack((target_orig_heights, target_orig_widths)).transpose(0, 1)
optimizer.zero_grad()
(est_maps, est_counts) = model.forward(imgs)
(term1, term2) = loss_loc.forward(est_maps, target_locations, target_orig_sizes)
target_counts = target_counts.view((- 1))
est_counts = est_counts.view((- 1))
target_counts = target_counts.view((- 1))
term3 = loss_regress.forward(est_counts, target_counts)
term3 *= args.lambdaa
loss = ((term1 + term2) + term3)
avg_loss = ((beta * avg_loss) + ((1 - beta) * loss.item()))
smoothed_loss = (avg_loss / (1 - (beta ** batch_num)))
if ((batch_num > 1) and (smoothed_loss > (4 * best_loss))):
return (log_lrs, losses)
if ((smoothed_loss < best_loss) or (batch_num == 1)):
best_loss = smoothed_loss
losses.append(smoothed_loss)
log_lrs.append(math.log10(lr))
loss.backward()
optimizer.step()
lr *= mult
optimizer.param_groups[0]['lr'] = lr
return (log_lrs, losses) |
def test_get_by_dotted_path():
assert (get_by_dotted_path({'a': 12}, 'a') == 12)
assert (get_by_dotted_path({'a': 12}, '') == {'a': 12})
assert (get_by_dotted_path({'foo': {'a': 12}}, 'foo.a') == 12)
assert (get_by_dotted_path({'foo': {'a': 12}}, 'foo.b') is None) |
def get_repo(path=PROJECT_PATH, search_parent_directories=True):
repo = git.Repo(path, search_parent_directories=search_parent_directories)
return repo |
def _assert_tensors_equal(a, b, atol=1e-12, prefix=''):
if ((a is None) and (b is None)):
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
msg = '{} != {}'.format(a, b)
if prefix:
msg = ((prefix + ': ') + msg)
raise AssertionError(msg) |
def clean_custom_task(task_info):
import transformers
if ('impl' not in task_info):
raise RuntimeError('This model introduces a custom pipeline without specifying its implementation.')
pt_class_names = task_info.get('pt', ())
if isinstance(pt_class_names, str):
pt_class_names = [pt_class_names]
task_info['pt'] = tuple((getattr(transformers, c) for c in pt_class_names))
tf_class_names = task_info.get('tf', ())
if isinstance(tf_class_names, str):
tf_class_names = [tf_class_names]
task_info['tf'] = tuple((getattr(transformers, c) for c in tf_class_names))
return (task_info, None) |
def query_environment(name):
env = gym.make(name)
spec = gym.spec(name)
print(f'Action Space: {env.action_space}')
print(f'Observation Space: {env.observation_space}')
print(f'Max Episode Steps: {spec.max_episode_steps}')
print(f'Nondeterministic: {spec.nondeterministic}')
print(f'Reward Range: {env.reward_range}')
print(f'Reward Threshold: {spec.reward_threshold}') |
def _goes_first(is_main):
if (is_main is False):
wait_for_everyone()
(yield)
if (is_main is True):
wait_for_everyone() |
def initialize_replay_buffer(self, examples, batch_spec, async_=False):
example_to_buffer = SamplesToBuffer(observation=examples['observation'], action=examples['action'], reward=examples['reward'], done=examples['done'])
replay_kwargs = dict(example=example_to_buffer, size=self.replay_size, B=batch_spec.B, rnn_state_interval=0, discount=self.discount, n_step_return=self.n_step_return)
replay_buffer = UniformSequenceReplayBuffer(**replay_kwargs)
return replay_buffer |
def create_visdom(session_name, configuration):
if ((configuration is None) or (configuration.server is None)):
return None
from visdom import Visdom
return Visdom(env=session_name, **configuration.as_dict()) |
_module()
class COCOStuffDataset(CustomDataset):
CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'branch', 'bridge', 'building-other', 'bush', 'cabinet', 'cage', 'cardboard', 'carpet', 'ceiling-other', 'ceiling-tile', 'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain', 'desk-stuff', 'dirt', 'door-stuff', 'fence', 'floor-marble', 'floor-other', 'floor-stone', 'floor-tile', 'floor-wood', 'flower', 'fog', 'food-other', 'fruit', 'furniture-other', 'grass', 'gravel', 'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', 'metal', 'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net', 'paper', 'pavement', 'pillow', 'plant-other', 'plastic', 'platform', 'playingfield', 'railing', 'railroad', 'river', 'road', 'rock', 'roof', 'rug', 'salad', 'sand', 'sea', 'shelf', 'sky-other', 'skyscraper', 'snow', 'solid-other', 'stairs', 'stone', 'straw', 'structural-other', 'table', 'tent', 'textile-other', 'towel', 'tree', 'vegetable', 'wall-brick', 'wall-concrete', 'wall-other', 'wall-panel', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'waterdrops', 'window-blind', 'window-other', 'wood')
PALETTE = [[0, 192, 64], [0, 192, 64], [0, 64, 96], [128, 192, 192], [0, 64, 64], [0, 192, 224], [0, 192, 192], [128, 192, 64], [0, 192, 96], [128, 192, 64], [128, 32, 192], [0, 0, 224], [0, 0, 64], [0, 160, 192], [128, 0, 96], [128, 0, 192], [0, 32, 192], [128, 128, 224], [0, 0, 192], [128, 160, 192], [128, 128, 0], [128, 0, 32], [128, 32, 0], [128, 0, 128], [64, 128, 32], [0, 160, 0], [0, 0, 0], [192, 128, 160], [0, 32, 0], [0, 128, 128], [64, 128, 160], [128, 160, 0], [0, 128, 0], [192, 128, 32], [128, 96, 128], [0, 0, 128], [64, 0, 32], [0, 224, 128], [128, 0, 0], [192, 0, 160], [0, 96, 128], [128, 128, 128], [64, 0, 160], [128, 224, 128], [128, 128, 64], [192, 0, 32], [128, 96, 0], [128, 0, 192], [0, 128, 32], [64, 224, 0], [0, 0, 64], [128, 128, 160], [64, 96, 0], [0, 128, 192], [0, 128, 160], [192, 224, 0], [0, 128, 64], [128, 128, 32], [192, 32, 128], [0, 64, 192], [0, 0, 32], [64, 160, 128], [128, 64, 64], [128, 0, 160], [64, 32, 128], [128, 192, 192], [0, 0, 160], [192, 160, 128], [128, 192, 0], [128, 0, 96], [192, 32, 0], [128, 64, 128], [64, 128, 96], [64, 160, 0], [0, 64, 0], [192, 128, 224], [64, 32, 0], [0, 192, 128], [64, 128, 224], [192, 160, 0], [0, 192, 0], [192, 128, 96], [192, 96, 128], [0, 64, 128], [64, 0, 96], [64, 224, 128], [128, 64, 0], [192, 0, 224], [64, 96, 128], [128, 192, 128], [64, 0, 224], [192, 224, 128], [128, 192, 64], [192, 0, 96], [192, 96, 0], [128, 64, 192], [0, 128, 96], [0, 224, 0], [64, 64, 64], [128, 128, 224], [0, 96, 0], [64, 192, 192], [0, 128, 224], [128, 224, 0], [64, 192, 64], [128, 128, 96], [128, 32, 128], [64, 0, 192], [0, 64, 96], [0, 160, 128], [192, 0, 64], [128, 64, 224], [0, 32, 128], [192, 128, 192], [0, 64, 224], [128, 160, 128], [192, 128, 0], [128, 64, 32], [128, 32, 64], [192, 0, 128], [64, 192, 32], [0, 160, 64], [64, 0, 0], [192, 192, 160], [0, 32, 64], [64, 128, 128], [64, 192, 160], [128, 160, 64], [64, 128, 0], [192, 192, 32], [128, 96, 192], [64, 0, 128], [64, 64, 32], [0, 224, 192], [192, 0, 0], [192, 64, 160], [0, 96, 192], [192, 128, 128], [64, 64, 160], [128, 224, 192], [192, 128, 64], [192, 64, 32], [128, 96, 64], [192, 0, 192], [0, 192, 32], [64, 224, 64], [64, 0, 64], [128, 192, 160], [64, 96, 64], [64, 128, 192], [0, 192, 160], [192, 224, 64], [64, 128, 64], [128, 192, 32], [192, 32, 192], [64, 64, 192], [0, 64, 32], [64, 160, 192], [192, 64, 64], [128, 64, 160], [64, 32, 192], [192, 192, 192], [0, 64, 160], [192, 160, 192], [192, 192, 0], [128, 64, 96], [192, 32, 64], [192, 64, 128], [64, 192, 96], [64, 160, 64], [64, 64, 0]]
def __init__(self, **kwargs):
super(COCOStuffDataset, self).__init__(img_suffix='.jpg', seg_map_suffix='_labelTrainIds.png', **kwargs) |
def spotifyShuffle(songs_list, artists_list):
artist2songs = defaultdict(list)
for (artist, song) in zip(artists_list, songs_list):
artist2songs[artist].append(song)
songList = []
songsLocs = []
for (artist, songs) in artist2songs.items():
songs = fisherYatesShuffle(songs)
songList += songs
songsLocs += get_locs(len(songs))
return [songList[idx] for idx in argsort(songsLocs)] |
def main():
desc_dict = datasets.load_code_descriptions()
print('loading attn windows')
attn_windows = {}
attn_window_szs = {}
with open(ATTN_FILENAME, 'r') as f:
r = csv.reader(f)
next(r)
for row in r:
attn_windows[(int(row[0]), row[1])] = int(row[2])
attn_window_szs[(int(row[0]), row[1])] = int(row[3])
print('loading conv windows')
conv_windows = {}
with open(CONV_FILENAME, 'r') as f:
r = csv.reader(f)
next(r)
for row in r:
conv_windows[(int(row[0]), row[1])] = int(row[2])
print('loading lr windows')
lr_windows = {}
with open(LR_FILENAME, 'r') as f:
r = csv.reader(f)
next(r)
for row in r:
lr_windows[(int(row[1]), row[2])] = int(row[3])
print('loading sim windows')
sim_windows = {}
sim_vals = {}
with open(SIM_FILENAME, 'r') as f:
r = csv.reader(f)
next(r)
for row in r:
sim_windows[(int(row[1]), row[2])] = int(row[3])
sim_vals[(int(row[1]), row[2])] = float(row[(- 1)])
attn_keys = set(attn_windows.keys())
conv_keys = set(conv_windows.keys())
lr_keys = set(lr_windows.keys())
sim_keys = set(sim_windows.keys())
valid_texts = []
print('building evaluation document')
with open(('%s/qualitative_eval_full.md' % MIMIC_3_DIR), 'w') as of:
with open(('%s/qualitative_eval_full_key.md' % MIMIC_3_DIR), 'w') as kf:
code_counts = Counter()
of.write('### Instructions\n')
of.write((INSTRUCTIONS + '\n\n'))
with open(('%s/test_full.csv' % MIMIC_3_DIR), 'r') as f:
r = csv.reader(f)
next(r)
num_pairs = 0
for (idx, row) in tqdm(enumerate(r)):
codes = str(row[3]).split(';')
toks = row[2].split()
hadm_id = int(row[1])
for code in codes:
num_pairs += 1
key = (hadm_id, code)
if ((key in conv_keys) and (key in lr_keys) and (key in sim_keys) and (key in attn_keys) and (code_counts[code] < MAX_CODE_OCCURRENCES)):
if (sim_vals[key] == 0):
continue
code_counts[code] += 1
valid_texts.append((key, toks))
valid_texts = np.random.permutation(valid_texts)
opts = 'ABCD'
for (i, (key, toks)) in enumerate(valid_texts[:NUM_QUESTIONS]):
(hadm_id, code) = key
of.write(('### Question %d\n' % (i + 1)))
kf.write(('### Question %d\n' % (i + 1)))
of.write(('Code: %s\n' % code))
kf.write(('Code: %s\n' % code))
of.write(('Full descriptions: %s\n\n' % desc_dict[code]))
kf.write(('Full descriptions: %s\n\n' % desc_dict[code]))
for (i, (method, window)) in enumerate(np.random.permutation([('attn', attn_windows[key]), ('conv', conv_windows[key]), ('lr', lr_windows[key]), ('sim', sim_windows[key])])):
window = int(window)
if (method == 'attn'):
filter_size = attn_window_szs[key]
else:
filter_size = FILTER_SIZE
pre = toks[(window - (CONTEXT_SIZE / 2)):window]
mid = toks[window:(window + filter_size)]
post = toks[(window + filter_size):((window + filter_size) + (CONTEXT_SIZE / 2))]
md_out = ((((' '.join(pre) + ' **') + ' '.join(mid)) + '** ') + ' '.join(post))
of.write(('%s) %s\n\n' % (opts[i], md_out)))
kf.write(('%s (%s) %s\n\n' % (opts[i], method, md_out)))
print(('percentage of valid document-code pairs: %f' % (len(valid_texts) / float(num_pairs)))) |
class AlexNet(nn.Module):
def __init__(self, num_classes=(- 1)):
super(AlexNet, self).__init__()
self.num_classes = num_classes
self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2))
if (self.num_classes > 0):
self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(((256 * 6) * 6), 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes))
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.features(x)
if (self.num_classes > 0):
x = x.view(x.size(0), ((256 * 6) * 6))
x = self.classifier(x)
return x |
def generator_loss(loss_func, fake):
loss = []
fake_loss = 0
for i in range(2):
if loss_func.__contains__('wgan'):
fake_loss = (- tf.reduce_mean(fake[i]))
if (loss_func == 'lsgan'):
fake_loss = tf.reduce_mean(tf.squared_difference(fake[i], 1.0))
if ((loss_func == 'gan') or (loss_func == 'dragan')):
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(fake[i]), logits=fake[i]))
if (loss_func == 'hinge'):
fake_loss = (- tf.reduce_mean(fake[i]))
loss.append(fake_loss)
return sum(loss) |
def print_usage():
usageStr = '\n Make sure to keep the terminal window in focus!\r\n \n Use the following keys to drive the robot:\r\n\n \tW: Increase speed\r\n \tS: Decrease speed\r\n \tA: Turn more left\r\n \tD: Turn more right\r\n \tR: Reset controls\r\n\n \tM: Drive mode\r\n \tN: Toggle noise\r\n \t: Left indicator\r\n \t: Right indicator\r\n \t: Cancel indicators\r\n \t: Network mode\r\n \tSPACE: Toggle logging\r\n \tESC: Quit\r\n '
print(usageStr) |
def mlp(sizes, activation, output_activation=nn.Identity):
layers = []
for j in range((len(sizes) - 1)):
act = (activation if (j < (len(sizes) - 2)) else output_activation)
layers += [nn.Linear(sizes[j], sizes[(j + 1)]), act()]
return nn.Sequential(*layers) |
def test_tetris_env_step(tetris_env: Tetris) -> None:
chex.clear_trace_counter()
step_fn = jax.jit(chex.assert_max_traces(tetris_env.step, n=1))
key = jax.random.PRNGKey(0)
(state, timestep) = tetris_env.reset(key)
action = (0, 4)
step_fn(state, action)
step_fn(state, action)
step_fn(state, action)
action = (0, 0)
(next_state, next_timestep) = step_fn(state, action)
assert (not jnp.array_equal(next_state.grid_padded, state.grid_padded))
assert (next_state.grid_padded.sum() == (state.grid_padded.sum() + 4))
assert_is_jax_array_tree(next_state)
assert_is_jax_array_tree(next_timestep) |
class XFMREncoder(nn.Module):
def __init__(self, d_model, num_layers, self_attn, feed_forward, use_residual=False, dropout=0.1):
super(XFMREncoder, self).__init__()
self.layers = nn.ModuleList([EncoderLayer(d_model, self_attn, feed_forward, use_residual, dropout) for _ in range(num_layers)])
self.norm = nn.LayerNorm(d_model)
def forward(self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x) |
class handpose_model(nn.Module):
def __init__(self):
super(handpose_model, self).__init__()
no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3', 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']
block1_0 = OrderedDict([('conv1_1', [3, 64, 3, 1, 1]), ('conv1_2', [64, 64, 3, 1, 1]), ('pool1_stage1', [2, 2, 0]), ('conv2_1', [64, 128, 3, 1, 1]), ('conv2_2', [128, 128, 3, 1, 1]), ('pool2_stage1', [2, 2, 0]), ('conv3_1', [128, 256, 3, 1, 1]), ('conv3_2', [256, 256, 3, 1, 1]), ('conv3_3', [256, 256, 3, 1, 1]), ('conv3_4', [256, 256, 3, 1, 1]), ('pool3_stage1', [2, 2, 0]), ('conv4_1', [256, 512, 3, 1, 1]), ('conv4_2', [512, 512, 3, 1, 1]), ('conv4_3', [512, 512, 3, 1, 1]), ('conv4_4', [512, 512, 3, 1, 1]), ('conv5_1', [512, 512, 3, 1, 1]), ('conv5_2', [512, 512, 3, 1, 1]), ('conv5_3_CPM', [512, 128, 3, 1, 1])])
block1_1 = OrderedDict([('conv6_1_CPM', [128, 512, 1, 1, 0]), ('conv6_2_CPM', [512, 22, 1, 1, 0])])
blocks = {}
blocks['block1_0'] = block1_0
blocks['block1_1'] = block1_1
for i in range(2, 7):
blocks[('block%d' % i)] = OrderedDict([(('Mconv1_stage%d' % i), [150, 128, 7, 1, 3]), (('Mconv2_stage%d' % i), [128, 128, 7, 1, 3]), (('Mconv3_stage%d' % i), [128, 128, 7, 1, 3]), (('Mconv4_stage%d' % i), [128, 128, 7, 1, 3]), (('Mconv5_stage%d' % i), [128, 128, 7, 1, 3]), (('Mconv6_stage%d' % i), [128, 128, 1, 1, 0]), (('Mconv7_stage%d' % i), [128, 22, 1, 1, 0])])
for k in blocks.keys():
blocks[k] = make_layers(blocks[k], no_relu_layers)
self.model1_0 = blocks['block1_0']
self.model1_1 = blocks['block1_1']
self.model2 = blocks['block2']
self.model3 = blocks['block3']
self.model4 = blocks['block4']
self.model5 = blocks['block5']
self.model6 = blocks['block6']
def forward(self, x):
out1_0 = self.model1_0(x)
out1_1 = self.model1_1(out1_0)
concat_stage2 = torch.cat([out1_1, out1_0], 1)
out_stage2 = self.model2(concat_stage2)
concat_stage3 = torch.cat([out_stage2, out1_0], 1)
out_stage3 = self.model3(concat_stage3)
concat_stage4 = torch.cat([out_stage3, out1_0], 1)
out_stage4 = self.model4(concat_stage4)
concat_stage5 = torch.cat([out_stage4, out1_0], 1)
out_stage5 = self.model5(concat_stage5)
concat_stage6 = torch.cat([out_stage5, out1_0], 1)
out_stage6 = self.model6(concat_stage6)
return out_stage6 |
def set_parser():
parser = argparse.ArgumentParser(description='')
parser.add_argument('-d', '--data', type=str, required=True, help='path to a folder containing days to be predicted (e.g. the test folder of the test dataset)')
parser.add_argument('-r', '--region', type=str, required=False, default='R1', help='Region where the data belongs.')
parser.add_argument('-w', '--weights', type=str, required=True, help='path to a folder containing all required weights of the model')
parser.add_argument('-o', '--output', type=str, required=True, help='path to save the outputs of the model for each day.')
parser.add_argument('-g', '--gpu_id', type=int, required=False, default=1, help='specify a gpu ID. 1 as default, -1 for CPU.')
return parser |
class Wav2Vec2ForXVector(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class SGDP(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0, weight_decay=0, nesterov=False, eps=1e-08, delta=0.1, wd_ratio=0.1):
defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio)
super(SGDP, self).__init__(params, defaults)
def _channel_view(self, x):
return x.view(x.size(0), (- 1))
def _layer_view(self, x):
return x.view(1, (- 1))
def _cosine_similarity(self, x, y, eps, view_func):
x = view_func(x)
y = view_func(y)
x_norm = x.norm(dim=1).add_(eps)
y_norm = y.norm(dim=1).add_(eps)
dot = (x * y).sum(dim=1)
return ((dot.abs() / x_norm) / y_norm)
def _projection(self, p, grad, perturb, delta, wd_ratio, eps):
wd = 1
expand_size = ([(- 1)] + ([1] * (len(p.shape) - 1)))
for view_func in [self._channel_view, self._layer_view]:
cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)
if (cosine_sim.max() < (delta / math.sqrt(view_func(p.data).size(1)))):
p_n = (p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps))
perturb -= (p_n * view_func((p_n * perturb)).sum(dim=1).view(expand_size))
wd = wd_ratio
return (perturb, wd)
return (perturb, wd)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
state = self.state[p]
if (len(state) == 0):
state['momentum'] = torch.zeros_like(p.data)
buf = state['momentum']
buf.mul_(momentum).add_((1 - dampening), grad)
if nesterov:
d_p = (grad + (momentum * buf))
else:
d_p = buf
wd_ratio = 1
if (len(p.shape) > 1):
(d_p, wd_ratio) = self._projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps'])
if (weight_decay != 0):
p.data.mul_((1 - (((group['lr'] * group['weight_decay']) * wd_ratio) / (1 - momentum))))
p.data.add_((- group['lr']), d_p)
return loss |
def get_n_params(model):
return (str(np.round((np.array([p.numel() for p in model.parameters()]).sum() / 1000000.0), 3)) + ' M params') |
def test_dev_vis():
r = MPRenderer()
scenario_name = 'USA_Lanker-1_1_T-1'
(scenario, _) = load_commonroad_scenario(scenario_name)
draw_params = MPDrawParams()
fn = os.path.join(OUT_TESTS_DIR, 'default_params.yaml')
draw_params.save(fn)
lanelet_net_params = LaneletNetworkParams()
lanelet_net_params.traffic_light.draw_traffic_lights = True
scenario.lanelet_network.draw(r, draw_params=lanelet_net_params)
fn = os.path.join(OUT_TESTS_DIR, 'vis_test')
r.render(filename=fn) |
def PrintIndentifiers(filename, should_print):
source = utils.ReadFile(filename, False)
if (source is None):
sys.stderr.write(('Unable to find: %s\n' % filename))
return
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass |
class CTRLConfig(PretrainedConfig):
pretrained_config_archive_map = CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = 'ctrl'
def __init__(self, vocab_size=246534, n_positions=256, n_ctx=256, n_embd=1280, dff=8192, n_layer=48, n_head=16, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-06, initializer_range=0.02, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.dff = dff
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
def max_position_embeddings(self):
return self.n_positions
def hidden_size(self):
return self.n_embd
def num_attention_heads(self):
return self.n_head
def num_hidden_layers(self):
return self.n_layer |
def packed_dtype_fmt():
from sys import byteorder
return "[('bool_', '?'), ('uint_', '{e}u4'), ('float_', '{e}f4'), ('ldbl_', '{e}f{}')]".format(np.dtype('longdouble').itemsize, e=('<' if (byteorder == 'little') else '>')) |
def decode_tf(FILENAME):
basename = os.path.basename(FILENAME)[:(- 9)]
if (not os.path.exists((('./waymo_decode_val/' + basename) + '/intrinsic/'))):
os.makedirs((('./waymo_decode_val/' + basename) + '/intrinsic/'))
dataset = tf.data.TFRecordDataset(FILENAME, compression_type='')
count = 0
frame = open_dataset.Frame()
for data in dataset:
frame.ParseFromString(bytearray(data.numpy()))
cam_file = open((((('./waymo_decode_val/' + basename) + '/intrinsic/') + str(count).zfill(5)) + '_cam.txt'), 'w')
intrinsics = np.array([frame.context.camera_calibrations[0].intrinsic])
width = frame.context.camera_calibrations[0].width
height = frame.context.camera_calibrations[0].height
cam_file.write(('%f,%f,%f,%f,%f,%f,%f,%f,%f,%d,%d' % (intrinsics[(0, 0)], intrinsics[(0, 1)], intrinsics[(0, 2)], intrinsics[(0, 3)], intrinsics[(0, 4)], intrinsics[(0, 5)], intrinsics[(0, 6)], intrinsics[(0, 7)], intrinsics[(0, 8)], width, height)))
cam_file.close()
count += 1 |
class MobileNetV2(nn.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if (inverted_residual_setting is None):
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
if ((len(inverted_residual_setting) == 0) or (len(inverted_residual_setting[0]) != 4)):
raise ValueError('inverted_residual_setting should be non-empty or a 4-element list, got {}'.format(inverted_residual_setting))
input_channel = _make_divisible((input_channel * width_mult), round_nearest)
self.last_channel = _make_divisible((last_channel * max(1.0, width_mult)), round_nearest)
features = [ConvBNReLU(1, input_channel, stride=2)]
for (t, c, n, s) in inverted_residual_setting:
output_channel = _make_divisible((c * width_mult), round_nearest)
for i in range(n):
stride = (s if (i == 0) else 1)
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
self.features = nn.Sequential(*features)
self.classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(self.last_channel, num_classes[0]), nn.LogSoftmax(dim=1))
self.normalize = nn.BatchNorm1d(6420)
self.maxpool1d = nn.MaxPool1d(3, stride=2)
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm1d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self, x):
if (len(x.shape) == 2):
x = self.normalize(x)
x = x.reshape([x.shape[0], 1, x.shape[1]])
x = self.maxpool1d(x)
x = self.features(x)
x = x.mean(2)
x = self.classifier(x)
return x |
_module()
class OBBDoubleConvFCBBoxHead(OBBoxHead):
def __init__(self, num_convs=0, num_fcs=0, conv_out_channels=1024, fc_out_channels=1024, conv_cfg=None, norm_cfg=dict(type='BN'), **kwargs):
kwargs.setdefault('with_avg_pool', True)
super(OBBDoubleConvFCBBoxHead, self).__init__(**kwargs)
assert self.with_avg_pool
assert (num_convs > 0)
assert (num_fcs > 0)
self.num_convs = num_convs
self.num_fcs = num_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.res_block = BasicResBlock(self.in_channels, self.conv_out_channels)
self.conv_branch = self._add_conv_branch()
self.fc_branch = self._add_fc_branch()
out_dim_reg = (self.reg_dim if self.reg_class_agnostic else (self.reg_dim * self.num_classes))
self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)
self.fc_cls = nn.Linear(self.fc_out_channels, (self.num_classes + 1))
self.relu = nn.ReLU(inplace=True)
def _add_conv_branch(self):
branch_convs = nn.ModuleList()
for i in range(self.num_convs):
branch_convs.append(Bottleneck(inplanes=self.conv_out_channels, planes=(self.conv_out_channels // 4), conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
return branch_convs
def _add_fc_branch(self):
branch_fcs = nn.ModuleList()
for i in range(self.num_fcs):
fc_in_channels = ((self.in_channels * self.roi_feat_area) if (i == 0) else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
return branch_fcs
def init_weights(self):
normal_init(self.fc_cls, std=0.01)
normal_init(self.fc_reg, std=0.001)
for m in self.fc_branch.modules():
if isinstance(m, nn.Linear):
xavier_init(m, distribution='uniform')
def forward(self, x_cls, x_reg):
x_conv = self.res_block(x_reg)
for conv in self.conv_branch:
x_conv = conv(x_conv)
if self.with_avg_pool:
x_conv = self.avg_pool(x_conv)
x_conv = x_conv.view(x_conv.size(0), (- 1))
bbox_pred = self.fc_reg(x_conv)
x_fc = x_cls.view(x_cls.size(0), (- 1))
for fc in self.fc_branch:
x_fc = self.relu(fc(x_fc))
cls_score = self.fc_cls(x_fc)
return (cls_score, bbox_pred) |
def load_model(model_name, tokenizer_name, device='cpu', use_hpu_graphs=False, cpu_jit=False, ipex_int8=False, use_cache=True, peft_path=None, use_deepspeed=False, optimization_config=None, hf_access_token=None, use_llm_runtime=False, assistant_model=None):
print('Loading model {}'.format(model_name))
if (device == 'hpu'):
if use_deepspeed:
import_deepspeed()
from optimum.habana.transformers.modeling_utils import adapt_transformers_to_gaudi
adapt_transformers_to_gaudi()
if isinstance(optimization_config, MixedPrecisionConfig):
dtype = optimization_config.dtype
else:
dtype = 'float32'
bitsandbytes_quant_config = None
if isinstance(optimization_config, BitsAndBytesConfig):
if ((device == 'cuda') and is_bitsandbytes_available() and torch.cuda.is_available()):
bitsandbytes_quant_config = optimization_config
else:
logging.warning(('CUDA device or bitsandbytes is not available, please make sure CUDA device and bitsandbytes' + ' library is available, ignoring bitsandbytes config now.'))
if (dtype == 'bfloat16'):
torch_dtype = torch.bfloat16
elif (dtype == 'float16'):
torch_dtype = torch.float16
elif (dtype == 'float32'):
torch_dtype = torch.float32
else:
logging.warning(f'Unsupported dtype {dtype}, using float32 now.')
torch_dtype = torch.float32
MODELS[model_name] = {}
if assistant_model:
print('Loading assistant model...')
assistant_model_class = AutoModelForCausalLM
print(f'Loading assistant model via {assistant_model_class}')
assis_model = assistant_model_class.from_pretrained(assistant_model, low_cpu_mem_usage=True, torch_dtype=torch_dtype)
assis_model = assis_model.eval().to(device)
assis_model = assis_model.to(memory_format=torch.channels_last)
MODELS[model_name]['assistant_model'] = assis_model
else:
MODELS[model_name]['assistant_model'] = None
try:
config = AutoConfig.from_pretrained(model_name, use_auth_token=hf_access_token, trust_remote_code=(True if (re.search('chatglm', model_name, re.IGNORECASE) or re.search('qwen', model_name, re.IGNORECASE)) else False))
except ValueError as e:
logging.error(f'Exception: {e}')
if ('Unrecognized model in' in str(e)):
raise ValueError(f'load_model: model config is not found, {e}')
else:
raise ValueError(f'load_model: unknown ValueError occurred, {e}')
except EnvironmentError as e:
logging.error(f'Exception: {e}')
if ('not a local folder and is not a valid model identifier' in str(e)):
raise ValueError(f'load_model: model name or path is not found, {e}')
else:
raise ValueError(f'load_model: unknown EnvironmentError occurred, {e}')
except Exception as e:
logging.error(f'Exception: {e}')
raise ValueError(f'load_model: an unexpected error occurred, {e}')
MODELS[model_name]['model_type'] = config.model_type
try:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, use_fast=(False if (re.search('llama', model_name, re.IGNORECASE) or re.search('neural-chat-7b-v2', model_name, re.IGNORECASE)) else True), use_auth_token=hf_access_token, trust_remote_code=(True if (re.search('qwen', model_name, re.IGNORECASE) or re.search('chatglm', model_name, re.IGNORECASE)) else False))
except EnvironmentError as e:
logging.error(f'Exception: {e}')
if ('not a local folder and is not a valid model identifier' in str(e)):
raise ValueError(f'load_model: tokenizer is not found, {e}')
else:
raise ValueError(f'load_model: unknown EnvironmentError occurred, {e}')
except Exception as e:
logging.error(f'Exception: {e}')
raise ValueError(f'load_model: an unexpected error occurred, {e}')
load_to_meta = model_on_meta(config)
if isinstance(optimization_config, WeightOnlyQuantConfig):
from intel_extension_for_transformers.neural_chat.chatbot import optimize_model
model = optimize_model(model_name, optimization_config, use_llm_runtime)
if (not model.config.is_encoder_decoder):
tokenizer.padding_side = 'left'
if ((tokenizer.pad_token is None) and (tokenizer.pad_token_id is None)):
tokenizer.pad_token = tokenizer.eos_token
MODELS[model_name]['model'] = model
MODELS[model_name]['tokenizer'] = tokenizer
logging.info('Optimized Model loaded.')
return
try:
if ((device == 'hpu') and use_deepspeed and load_to_meta):
with deepspeed.OnDevice(dtype=torch.bfloat16, device='meta'):
model = AutoModelForCausalLM.from_config(config, torch_dtype=torch.bfloat16)
elif (re.search('flan-t5', model_name, re.IGNORECASE) and (not ipex_int8)):
with smart_context_manager(use_deepspeed=use_deepspeed):
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_auth_token=hf_access_token, quantization_config=bitsandbytes_quant_config)
elif (re.search('chatglm', model_name, re.IGNORECASE) and (not ipex_int8)):
with smart_context_manager(use_deepspeed=use_deepspeed):
model = AutoModel.from_pretrained(model_name, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_auth_token=hf_access_token, trust_remote_code=True)
elif (((re.search('gpt', model_name, re.IGNORECASE) or (config.model_type == 'bloom') or (config.model_type == 'qwen') or (config.model_type == 'gpt_bigcode') or (config.model_type == 'mpt') or (config.model_type == 'llama') or (config.model_type == 'mistral') or (config.model_type == 'mixtral')) and (not ipex_int8)) or (config.model_type == 'opt')):
with smart_context_manager(use_deepspeed=use_deepspeed):
model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=hf_access_token, torch_dtype=torch_dtype, low_cpu_mem_usage=True, quantization_config=bitsandbytes_quant_config, trust_remote_code=(True if ((config.model_type == 'qwen') or re.search('codegen', model_name, re.IGNORECASE)) else False))
elif (((config.model_type == 'gpt_bigcode') or (config.model_type == 'llama')) and ipex_int8):
with smart_context_manager(use_deepspeed=use_deepspeed):
try:
import intel_extension_for_pytorch as ipex
except ImportError:
warnings.warn('Please install Intel Extension for PyTorch to accelerate the model inference.')
assert (ipex.__version__ >= '2.1.0+cpu'), 'Please use Intel Extension for PyTorch >=2.1.0+cpu.'
from optimum.intel.generation.modeling import TSModelForCausalLM
model = TSModelForCausalLM.from_pretrained(model_name, file_name='best_model.pt')
elif (((config.model_type == 'llama') or (config.model_type == 'opt') or re.search('gpt_neox', model_name, re.IGNORECASE) or re.search('gptj', model_name, re.IGNORECASE) or re.search('falcon', model_name, re.IGNORECASE)) and ipex_int8):
with smart_context_manager(use_deepspeed=use_deepspeed):
try:
import intel_extension_for_pytorch as ipex
except ImportError:
warnings.warn('Please install Intel Extension for PyTorch to accelerate the model inference.')
assert (ipex.__version__ >= '2.1.0+cpu'), 'Please use Intel Extension for PyTorch >=2.1.0+cpu.'
if re.search('falcon', model_name, re.IGNORECASE):
assert (transformers.__version__ <= '4.33.3'), 'Please pip install transformers==4.33.3'
from intel_extension_for_transformers.llm.evaluation.models import TSModelCausalLMForITREX
model = TSModelCausalLMForITREX.from_pretrained(model_name, file_name='best_model.pt')
else:
raise ValueError(f'unsupported model name or path {model_name}, only supports t5/llama/mpt/gptj/bloom/opt/qwen/mistral/mixtral/gpt_bigcode model type now.')
except EnvironmentError as e:
logging.error(f'Exception: {e}')
if ('not a local folder and is not a valid model identifier' in str(e)):
raise ValueError('load_model: model name or path is not found')
else:
raise ValueError(f'load_model: unknown EnvironmentError occurred, {e}')
except Exception as e:
logging.error(f'Exception: {e}')
raise ValueError(f'load_model: an unexpected error occurred, {e}')
if (re.search('llama', model.config.architectures[0], re.IGNORECASE) and (not re.search('magicoder', model_name, re.IGNORECASE))):
model.generation_config.pad_token_id = 0
model.generation_config.bos_token_id = 1
model.generation_config.eos_token_id = 2
if (hasattr(model.generation_config, 'pad_token_id') and (model.generation_config.pad_token_id is not None) and (not ('chatglm' in model_name))):
tokenizer.pad_token_id = model.generation_config.pad_token_id
if (hasattr(model.generation_config, 'eos_token_id') and (model.generation_config.eos_token_id is not None) and (not ('chatglm' in model_name))):
tokenizer.eos_token_id = model.generation_config.eos_token_id
if (hasattr(model.generation_config, 'bos_token_id') and (model.generation_config.bos_token_id is not None)):
tokenizer.bos_token_id = model.generation_config.bos_token_id
if (tokenizer.pad_token_id is None):
model.generation_config.pad_token_id = tokenizer.pad_token_id = tokenizer.eos_token_id
if (model.generation_config.eos_token_id is None):
model.generation_config.eos_token_id = tokenizer.eos_token_id
if (device == 'hpu'):
if (peft_path and (not (use_deepspeed and load_to_meta))):
from peft import PeftModel
model = PeftModel.from_pretrained(model, peft_path)
model = model.to(torch.bfloat16)
model = model.merge_and_unload()
if (not use_deepspeed):
model = model.eval().to('hpu')
if use_hpu_graphs:
from habana_frameworks.torch.hpu import wrap_in_hpu_graph
model = wrap_in_hpu_graph(model)
if use_deepspeed:
model = init_deepspeed_inference(model=model, model_name_or_path=model_name, peft_path=peft_path, use_hpu_graphs=use_hpu_graphs, is_meta=load_to_meta, token=hf_access_token)
else:
if peft_path:
from peft import PeftModel
model = PeftModel.from_pretrained(model, peft_path)
model = model.to(dtype=torch_dtype)
if (device == 'cpu'):
if ((torch_dtype == torch.bfloat16) and (not ipex_int8)):
import intel_extension_for_pytorch as intel_ipex
model = intel_ipex.optimize(model.eval(), dtype=torch_dtype, inplace=True, level='O1', auto_kernel_selection=True)
if (cpu_jit and (re.search('mpt-7b', model_name, re.IGNORECASE) or re.search('neural-chat-7b-v1', model_name, re.IGNORECASE))):
from intel_extension_for_transformers.llm.utils.mpt_trace import jit_trace_mpt_7b, MPTTSModelForCausalLM
model.config.use_cache = use_cache
model = jit_trace_mpt_7b(model)
config = AutoConfig.from_pretrained(model_name, use_auth_token=hf_access_token)
model = MPTTSModelForCausalLM(model, config, use_cache=use_cache, model_dtype=torch.bfloat16)
elif (device in ['cuda', 'xpu']):
if (hasattr(model, 'device') and (model.device.type != device)):
model = model.eval().to(device)
else:
raise ValueError(f'unsupported device {device}, only supports cpu, xpu, cuda and hpu now.')
if (not model.config.is_encoder_decoder):
tokenizer.padding_side = 'left'
if ((tokenizer.pad_token is None) and (tokenizer.pad_token_id is None)):
tokenizer.pad_token = tokenizer.eos_token
model.generation_config.pad_token_id = model.generation_config.eos_token_id
if ipex_int8:
input_ids = tokenizer('A chat between a curious human and an artificial intelligence assistant.\n Human: Tell me about Intel.\n Assistant:', return_tensors='pt').input_ids.to('cpu')
with torch.inference_mode(), torch.no_grad():
for i in range(2):
model.generate(input_ids, max_new_tokens=32, do_sample=False, temperature=0.9)
MODELS[model_name]['model'] = model
MODELS[model_name]['tokenizer'] = tokenizer
print('Model loaded.') |
def test_fetch_metadata_function_with_exp_name(tmpdir):
root = tmpdir.strpath
run_test_experiment(exp_name='experiment 1 alpha', exp_id='1234', root_dir=root)
run_test_experiment(exp_name='experiment 2 beta', exp_id='5678', root_dir=root)
run_test_experiment(exp_name='experiment 3 alpha', exp_id='9990', root_dir=root)
tinydb_reader = TinyDbReader(root)
res1 = tinydb_reader.fetch_metadata(exp_name='alpha')
assert (len(res1) == 2)
res2 = tinydb_reader.fetch_metadata(exp_name='experiment 1')
assert (len(res2) == 1)
assert (res2[0]['experiment']['name'] == 'experiment 1 alpha')
res2 = tinydb_reader.fetch_metadata(exp_name='foo')
assert (len(res2) == 0) |
def convert_data():
datasets = [x for x in os.listdir(RAW_DIR) if ('.' not in x)]
for dataset in tqdm(datasets):
save_loc = ((PROCESSED_DIR + '/') + dataset)
if os.path.exists(save_loc):
print('Skipping {} as folder exists at {}. Remove it to reconvert.'.format(dataset, save_loc))
try:
loc = ((RAW_DIR + '/') + dataset)
(X_train, y_train) = load_from_tsfile_to_dataframe((((loc + '/') + dataset) + '_TRAIN.ts'))
(X_test, y_test) = load_from_tsfile_to_dataframe((((loc + '/') + dataset) + '_TEST.ts'))
all_frames = pd.concat((X_train, X_test))
tensor_labels = torch.Tensor(np.concatenate((y_train, y_test)))
tensor_data = []
for idx in range(all_frames.shape[0]):
tensor_data.append(torch.Tensor(pd.concat(all_frames.iloc[idx].values, axis=1).values))
(num_train, num_test) = (X_train.shape[0], X_test.shape[0])
original_idxs = (torch.arange(0, num_train), torch.arange(num_train, (num_test + num_train)))
try:
tensor_data = torch.stack(tensor_data)
except:
raise Exception('Could not stack the data for dataset: {}'.format(dataset))
np.savez((save_loc + '/data.npz'), data=tensor_data.numpy(), labels=tensor_labels.numpy(), original_idxs=original_idxs)
except Exception as e:
print('Failed for: {}\nError: {}'.format(dataset, e)) |
def tokenize(key_to_word):
key_to_sentence = {}
for (k, v) in key_to_word.items():
key_to_sentence[k] = [clean(w) for w in v if (clean(w) != '')]
return key_to_sentence |
def build_fake_yaml():
fake_yaml = '\n model:\n name: fake_yaml\n framework: tensorflow\n inputs: input\n outputs: op_to_store\n device: cpu\n quantization:\n model_wise:\n weight:\n granularity: per_tensor\n scheme: sym\n dtype: int8\n algorithm: minmax\n activation:\n algorithm: kl\n evaluation:\n accuracy:\n metric:\n topk: 1\n tuning:\n strategy:\n name: mse\n accuracy_criterion:\n relative: 0.01\n exit_policy:\n performance_only: True\n workspace:\n path: saved\n '
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', 'w', encoding='utf-8') as f:
yaml.dump(y, f)
f.close() |
(sample=sampled_from([(((- 1), 10), (5, 2, 10), (5, 10), (5, 10)), (((- 1), 10), (5, 4, 2, 10), (5, 4, 10), (20, 10)), ((10, 2, 10), (20, 2, 10), (20, 10), (10, 2, 10)), (((- 1), 10), (2, 5), (5,), RuntimeError), ((2, 10), (5, 2, 10), (5, 10), RuntimeError)]))
def test_reshape(sample):
(target_shape, input_data_shape, self_shape, expected) = sample
box = BoxTensor(torch.tensor(np.random.rand(*input_data_shape)))
assert (box.box_shape == self_shape)
if (expected == RuntimeError):
with pytest.raises(expected):
box.box_reshape(target_shape)
else:
new = box.box_reshape(target_shape)
assert (new.box_shape == expected) |
class PipelineChunkIterator(PipelineIterator):
def __init__(self, loader, infer, params, loader_batch_size=None):
super().__init__(loader, infer, params)
def __iter__(self):
self.iterator = iter(self.loader)
self.subiterator = None
return self
def __next__(self):
if (self.subiterator is None):
self.subiterator = self.infer(next(self.iterator), **self.params)
try:
processed = next(self.subiterator)
except StopIteration:
self.subiterator = self.infer(next(self.iterator), **self.params)
processed = next(self.subiterator)
return processed |
def evaluate_metrics_from_lists(predictions: List[str], ground_truths: List[List[str]], ids: Union[(List[int], None)]=None) -> Tuple[(Dict[(str, float)], Dict[(int, Dict[(str, float)])])]:
assert (len(predictions) == len(ground_truths))
assert all([(len(i) == 1) for i in ground_truths])
if (ids is None):
ids = range(len(predictions))
(pred, ref) = reformat_to_coco(predictions, ground_truths, ids)
tmp_dir = Path('tmp')
if (not tmp_dir.is_dir()):
tmp_dir.mkdir()
ref_file = tmp_dir.joinpath('ref.json')
pred_file = tmp_dir.joinpath('pred.json')
write_json(ref, ref_file)
write_json(pred, pred_file)
(metrics, per_file_metrics) = evaluate_metrics_from_files(pred_file, ref_file)
ref_file.unlink()
pred_file.unlink()
return (metrics, per_file_metrics) |
class PeriodicBoxingDynamics(PeriodicVelocityVerlet):
def __init__(self, Force_, BoxingLatp_=np.eye(3), name_='PdicBoxMD', BoxingT_=400):
self.PForce = Force_
self.BoxingLat0 = Force_.lattice.lattice.copy()
self.BoxingLatp = BoxingLatp_.copy()
self.BoxingT = BoxingT_
VelocityVerlet.__init__(self, None, self.PForce.mol0, name_, self.PForce.__call__)
if (PARAMS['MDThermostat'] == 'Nose'):
self.Tstat = PeriodicNoseThermostat(self.m, self.v)
else:
print('Unthermostated Periodic Velocity Verlet.')
return
def Prop(self):
step = 0
self.md_log = np.zeros((self.maxstep, 7))
while (step < self.maxstep):
t = time.time()
self.t = (step * self.dt)
self.KE = KineticEnergy(self.v, self.m)
if (self.t > self.BoxingT):
print('Exceeded Boxtime\n', self.BoxingLatp)
else:
newlattice = ((((self.BoxingT - self.t) / self.BoxingT) * self.BoxingLat0) + ((1.0 - ((self.BoxingT - self.t) / self.BoxingT)) * self.BoxingLatp))
self.x = self.PForce.AdjustLattice(self.x, self.PForce.lattice.lattice, newlattice)
self.v = self.PForce.AdjustLattice(self.v, self.PForce.lattice.lattice, newlattice)
self.a = self.PForce.AdjustLattice(self.a, self.PForce.lattice.lattice, newlattice)
self.PForce.ReLattice(newlattice)
print('Density:', self.Density())
Teff = (((2.0 / 3.0) * self.KE) / IDEALGASR)
if (PARAMS['MDThermostat'] == None):
(self.x, self.v, self.a, self.EPot) = PeriodicVelocityVerletStep(self.PForce, self.a, self.x, self.v, self.m, self.dt)
else:
(self.x, self.v, self.a, self.EPot) = self.Tstat.step(self.PForce, self.a, self.x, self.v, self.m, self.dt)
self.md_log[(step, 0)] = self.t
self.md_log[(step, 4)] = self.KE
self.md_log[(step, 5)] = self.EPot
self.md_log[(step, 6)] = (self.KE + ((self.EPot - self.EPot0) * JOULEPERHARTREE))
if (((step % 3) == 0) and PARAMS['MDLogTrajectory']):
self.WriteTrajectory()
if ((step % 500) == 0):
np.savetxt(((('./results/' + 'MDLog') + self.name) + '.txt'), self.md_log)
step += 1
LOGGER.info('Step: %i time: %.1f(fs) <KE>(kJ/mol): %.5f <|a|>(m/s2): %.5f <EPot>(Eh): %.5f <Etot>(kJ/mol): %.5f Teff(K): %.5f', step, self.t, (self.KE / 1000.0), np.linalg.norm(self.a), self.EPot, ((self.KE / 1000.0) + (self.EPot * KJPERHARTREE)), Teff)
print(('per step cost:', (time.time() - t)))
return |
def roc(tests=[]):
x = FPR = (lambda TP, TN, FP, FN: (float(FP) / ((FP + TN) or 1)))
y = TPR = (lambda TP, TN, FP, FN: (float(TP) / ((TP + FN) or 1)))
return sorted(([(0.0, 0.0), (1.0, 1.0)] + [(x(*m), y(*m)) for m in tests])) |
class TFParkSampleToMiniBatch(Preprocessing):
def __init__(self, batch_size, drop_remainder, bigdl_type='float'):
super(TFParkSampleToMiniBatch, self).__init__(bigdl_type, batch_size, drop_remainder) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.