code stringlengths 101 5.91M |
|---|
def test(model, test_loader, theta, device):
top_1 = 0
top_10 = 0
len_test = len(test_loader)
for triplets in tqdm(test_loader):
batch_text = []
batch_img = []
for i in range(len(triplets[1])):
(subject, predicate, object) = triplets[1][i].split('--')
batch_text.append((subject.lower(), predicate.lower(), object.lower()))
batch_img.append(triplets[0][i])
(output, label) = model(batch_text, batch_img, weight, theta, device)
predictions = output[1]
cluster_dict = json.load(open('utils_data/cluster/CaCao_map50_dict_07.json', 'r'))
word_1 = []
word_3 = []
word_5 = []
word_10 = []
word_candidates_1 = torch.argsort(predictions[0], descending=True)[:1].tolist()
word_candidates_3 = torch.argsort(predictions[0], descending=True)[:3].tolist()
word_candidates_5 = torch.argsort(predictions[0], descending=True)[:5].tolist()
word_candidates_10 = torch.argsort(predictions[0], descending=True)[:10].tolist()
for k in word_candidates_1:
for c in cluster_dict.keys():
if (words[k] in cluster_dict[c]['words']):
for w in cluster_dict[c]['words']:
word_1.append(w)
break
for k in word_candidates_10:
for c in cluster_dict.keys():
if (words[k] in cluster_dict[c]['words']):
for w in cluster_dict[c]['words']:
word_10.append(w)
break
if (words[label.item()] in word_1):
top_1 += 1
if (words[label.item()] in word_10):
top_10 += 1
print('top_1 acc: ', (top_1 / len_test))
print('top_10 acc: ', (top_10 / len_test))
return (top_1 / len_test) |
def show_ae(autoencoder, data):
x_test = data.x_test
decoded_imgs = autoencoder.predict(x_test)
print(decoded_imgs.shape, data.x_test.shape)
if (backend.image_data_format() == 'channels_first'):
(N, n_ch, n_i, n_j) = x_test.shape
else:
(N, n_i, n_j, n_ch) = x_test.shape
x_test = x_test.reshape(N, n_i, n_j)
decoded_imgs = decoded_imgs.reshape(decoded_imgs.shape[0], n_i, n_j)
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
ax = plt.subplot(2, n, (i + 1))
plt.imshow(x_test[i], cmap='gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, n, ((i + 1) + n))
plt.imshow(decoded_imgs[i], cmap='gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show() |
def modify_tilt(path, bin_factor, exclude_angles=[]):
f = open(path, 'r')
content = [l.strip() for l in f]
f.close()
if (not ('UseGPU 0' in content)):
content.insert((len(content) - 1), 'UseGPU 0')
binned_idx = [i for (i, s) in enumerate(content) if ('IMAGEBINNED' in s)][0]
content[binned_idx] = ('IMAGEBINNED ' + str(bin_factor))
if (len(exclude_angles) > 0):
exclude_idx = [i for (i, s) in enumerate(content) if ('EXCLUDELIST2 ' in s)]
if (len(exclude_idx) == 0):
exclude_idx = (len(content) - 1)
else:
exclude_idx = exclude_idx[0]
content[exclude_idx] = ('EXCLUDELIST2 ' + str(exclude_angles)[1:(- 1)])
f = open(path, 'w')
for l in content:
f.writelines(('%s\n' % l))
print(l)
f.close() |
class Decoder(nn.Module):
def __init__(self, layers, norm_layer=None, projection=None):
super(Decoder, self).__init__()
self.layers = nn.ModuleList(layers)
self.norm = norm_layer
self.projection = projection
def forward(self, x, cross, x_mask=None, cross_mask=None, tau=None, delta=None):
for layer in self.layers:
x = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask, tau=tau, delta=delta)
if (self.norm is not None):
x = self.norm(x)
if (self.projection is not None):
x = self.projection(x)
return x |
class Transformer(AbstractTransformer):
def __init__(self, translation_x=8, translation_y=8):
self.max_tx = translation_x
self.max_ty = translation_y
super().__init__()
def _create_transformation_list(self):
transformation_list = []
for (is_flip, tx, ty, k_rotate) in itertools.product((False, True), (0, (- self.max_tx), self.max_tx), (0, (- self.max_ty), self.max_ty), range(4)):
transformation = AffineTransformation(is_flip, tx, ty, k_rotate)
transformation_list.append(transformation)
self._transformation_list = transformation_list |
def _get_default_logging_level():
env_level_str = os.getenv('DIFFUSERS_VERBOSITY', None)
if env_level_str:
if (env_level_str in log_levels):
return log_levels[env_level_str]
else:
logging.getLogger().warning(f"Unknown option DIFFUSERS_VERBOSITY={env_level_str}, has to be one of: {', '.join(log_levels.keys())}")
return _default_log_level |
def _construct_agent(algo):
if algo.isdigit():
agent = None
agent_type = 'nn'
net_dir = (('./train_package/' + algo) + '/netfile')
elif (algo in ALGOS):
agent = ALGOS[algo]()
agent_type = 'traditional'
net_dir = None
else:
message = ((('The algorithm name ' + algo) + ' is not support. Supported algos are ') + str(list(ALGOS.keys())))
raise LookupError(message)
return (agent, agent_type, net_dir) |
def write_vocab(word2idx, idx2word, path):
f = open(os.path.join(OUTPUT_PATH, 'vocab.pkl'), 'wb')
pickle.dump([word2idx, idx2word], f)
f.close() |
def load_csv(path):
df = pd.read_csv(path, compression='gzip', dtype='str', header=None)
return list(df[0].values) |
def seedj(epoch, j, cycle, conts):
return ((int(os.environ['MYSEED']) + ((epoch + j) * conts)) + ((cycle + 1) * 10)) |
def run_test(rank, world_size, tmp_file):
set_seed(42)
os.environ['RANK'] = f'{rank}'
os.environ['WORLD_SIZE'] = f'{world_size}'
atorch.init_distributed('nccl')
torch.cuda.set_device(rank)
device = torch.device(f'cuda:{rank}')
model_params = ModelArgs(dim=64, n_layers=4, n_heads=8, vocab_size=512, norm_eps=1e-05, max_batch_size=8, max_seq_len=8)
atorch_mlp = init_atorch(model_params)
fairscale_mlp = init_fairscale(model_params)
input_ids = torch.randint(low=0, high=model_params.vocab_size, size=(model_params.max_batch_size, model_params.max_seq_len), dtype=torch.long, device=device)
labels = torch.rand(model_params.max_batch_size, model_params.max_seq_len, model_params.dim, device=device)
atorch_loss_fct = MSELoss()
fairscale_loss_fct = MSELoss()
atorch_mlp.train()
fairscale_mlp.train()
atorch_optimizer = optim.SGD(atorch_mlp.parameters(), lr=0.1)
fairscale_optimizer = optim.SGD(fairscale_mlp.parameters(), lr=0.1)
for _ in range(2):
atorch_logits = atorch_mlp(input_ids)
fairscale_logits = fairscale_mlp(input_ids)
atorch_optimizer.zero_grad()
fairscale_optimizer.zero_grad()
atorch_loss = atorch_loss_fct(atorch_logits.view((- 1)), labels.view((- 1)))
fairscale_loss = fairscale_loss_fct(fairscale_logits.view((- 1)), labels.view((- 1)))
assert torch.allclose(atorch_loss, fairscale_loss), 'torch and fairscale should return the same loss'
atorch_loss.backward(retain_graph=True)
fairscale_loss.backward(retain_graph=True)
atorch_optimizer.step()
fairscale_optimizer.step()
atorch.reset_distributed() |
class DetectionEvaluator(object):
__metaclass__ = ABCMeta
def __init__(self, categories):
self._categories = categories
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
pass
def add_single_detected_image_info(self, image_id, detections_dict):
pass
def evaluate(self):
pass
def clear(self):
pass |
def get_grad_step_data(args, labels, wandb_username, wandb_project):
data_experiments_auc = []
data_experiments_ap = []
for (runs, label) in zip(labels.get('experiments_key'), labels.get('experiments_name')):
if (len(runs[0]) > 0):
for exp_key in runs:
try:
my_run = api.run(('%s/%s/%s' % (wandb_username, wandb_project, exp_key)))
except:
temp_wandb_project = 'meta-graph'
my_run = api.run(('%s/%s/%s' % (wandb_username, temp_wandb_project, exp_key)))
raw_data = my_run.history(samples=1000000)
keys = raw_data.keys().values
test_inner_avg_auc = 'Test_Complete_AUC'
test_inner_avg_ap = 'Test_Complete_AP'
test_avg_auc = 'Test_Avg__AUC'
test_avg_ap = 'Test_Avg__AP'
try:
data_points_auc = raw_data[test_inner_avg_auc].dropna().values
data_points_ap = raw_data[test_inner_avg_ap].dropna().values
data_points_auc_array = [[i, point] for (i, point) in enumerate(data_points_auc)]
data_points_ap_array = [[i, point] for (i, point) in enumerate(data_points_ap)]
data_experiments_auc.append(np.asarray(data_points_auc_array).T)
data_experiments_ap.append(np.asarray(data_points_ap_array).T)
except:
data_points_auc = raw_data[test_avg_auc].dropna().values
data_points_ap = raw_data[test_avg_ap].dropna().values
data_points_auc_array = [[i, point] for (i, point) in enumerate(data_points_auc)]
data_points_ap_array = [[i, point] for (i, point) in enumerate(data_points_ap)]
data_experiments_auc.append(np.asarray(data_points_auc_array).T)
data_experiments_ap.append(np.asarray(data_points_ap_array).T)
print(('%s AUC %f After %d Grad Steps' % (my_run.name, data_points_auc[int((args.get_step / 5))], args.get_step)))
print(('%s AP %f After %d Grad Steps' % (my_run.name, data_points_ap[int((args.get_step / 5))], args.get_step)))
print(('Max AUC %f' % data_points_auc.max()))
print(('Max AP %f' % data_points_ap.max()))
clean_data_experiments_auc = truncate_array(data_experiments_auc)
clean_data_experiments_ap = truncate_array(data_experiments_ap)
return (clean_data_experiments_auc, clean_data_experiments_ap) |
class BratsDataset(Dataset):
def __init__(self, data: list, sampling_method, patch_size: tuple, compute_patch: bool=False, transform=None):
self.data = data
self.sampling_method = sampling_method
self.patch_size = patch_size
self.compute_patch = compute_patch
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
modalities = self.data[idx].load_mri_volumes(normalize=True)
brain_mask = self.data[idx].get_brain_mask()
segmentation_mask = self.data[idx].load_gt_mask()
segmentation_mask = brats_labels.convert_from_brats_labels(segmentation_mask)
if self.transform:
(modalities, segmentation_mask, brain_mask) = self.transform((modalities, segmentation_mask, brain_mask))
if self.compute_patch:
(modalities, segmentation_mask) = self.sampling_method.patching(modalities, segmentation_mask, self.patch_size, brain_mask)
modalities = torch.from_numpy(modalities.astype(float))
segmentation_mask = torch.from_numpy(segmentation_mask.astype(int))
return (modalities, segmentation_mask)
def get_patient_info(self, idx):
return {attr[0]: attr[1] for attr in vars(self.data[idx]).items()} |
def remap_edge_list(e_list: List[tuple], bipartite_graph: bool=False, ret_map: bool=False) -> Union[(List[tuple], tuple)]:
e_list = [[str(v) for v in e] for e in e_list]
if bipartite_graph:
(u_set, v_set) = (set(), set())
for (u, v) in e_list:
u_set.add(u)
v_set.add(v)
(u_list, v_list) = (sorted(u_set), sorted(v_set))
(u_map, v_map) = ({raw_u: new_u for (new_u, raw_u) in enumerate(u_list)}, {raw_v: new_v for (new_v, raw_v) in enumerate(v_list)})
e_list = [(u_map[u], v_map[v]) for (u, v) in e_list]
if ret_map:
return (e_list, u_map, v_map)
else:
return e_list
else:
v_set = set()
for e in e_list:
for v in e:
v_set.add(v)
v_list = sorted(v_set)
v_map = {raw_v: new_v for (new_v, raw_v) in enumerate(v_list)}
e_list = [tuple([v_map[v] for v in e]) for e in e_list]
if ret_map:
return (e_list, v_map)
else:
return e_list |
def run():
logging_GOCD.init_logging(log_file_path=param_log_file_path, log_file_mode=param_log_mode)
logging.info('Preparing before training.')
sys.path.append('..')
from symbol_farm import symbol_10_560_25L_8scales_v1 as net
(net_symbol, data_names, label_names) = net.get_net_symbol()
net_initializer = mxnet.initializer.Xavier()
logging.info('Get net symbol successfully.')
from data_provider_farm.pickle_provider import PickleProvider
from data_iterator_farm.multithread_dataiter_for_cross_entropy_v1 import Multithread_DataIter_for_CrossEntropy as DataIter
train_data_provider = PickleProvider(param_trainset_pickle_file_path)
train_dataiter = DataIter(mxnet_module=mxnet, num_threads=param_num_thread_train_dataiter, data_provider=train_data_provider, batch_size=param_train_batch_size, enable_horizon_flip=param_enable_horizon_flip, enable_vertical_flip=param_enable_vertical_flip, enable_random_brightness=param_enable_random_brightness, brightness_params=param_brightness_factors, enable_random_saturation=param_enable_random_saturation, saturation_params=param_saturation_factors, enable_random_contrast=param_enable_random_contrast, contrast_params=param_contrast_factors, enable_blur=param_enable_blur, blur_params=param_blur_factors, blur_kernel_size_list=param_blur_kernel_size_list, neg_image_ratio=param_neg_image_ratio, num_image_channels=param_num_image_channel, net_input_height=param_net_input_height, net_input_width=param_net_input_width, num_output_scales=param_num_output_scales, receptive_field_list=param_receptive_field_list, receptive_field_stride=param_receptive_field_stride, feature_map_size_list=param_feature_map_size_list, receptive_field_center_start=param_receptive_field_center_start, bbox_small_list=param_bbox_small_list, bbox_large_list=param_bbox_large_list, bbox_small_gray_list=param_bbox_small_gray_list, bbox_large_gray_list=param_bbox_large_gray_list, num_output_channels=param_num_output_channels, neg_image_resize_factor_interval=param_neg_image_resize_factor_interval)
val_dataiter = None
if ((param_valset_pickle_file_path != '') and (param_val_batch_size != 0) and (param_num_val_loops != 0) and (param_num_thread_val_dataiter != 0)):
val_data_provider = PickleProvider(param_valset_pickle_file_path)
val_dataiter = DataIter(mxnet_module=mxnet, num_threads=param_num_thread_val_dataiter, data_provider=val_data_provider, batch_size=param_val_batch_size, enable_horizon_flip=param_enable_horizon_flip, enable_vertical_flip=param_enable_vertical_flip, enable_random_brightness=param_enable_random_brightness, brightness_params=param_brightness_factors, enable_random_saturation=param_enable_random_saturation, saturation_params=param_saturation_factors, enable_random_contrast=param_enable_random_contrast, contrast_params=param_contrast_factors, enable_blur=param_enable_blur, blur_params=param_blur_factors, blur_kernel_size_list=param_blur_kernel_size_list, neg_image_ratio=param_neg_image_ratio, num_image_channels=param_num_image_channel, net_input_height=param_net_input_height, net_input_width=param_net_input_width, num_output_scales=param_num_output_scales, receptive_field_list=param_receptive_field_list, receptive_field_stride=param_receptive_field_stride, feature_map_size_list=param_feature_map_size_list, receptive_field_center_start=param_receptive_field_center_start, bbox_small_list=param_bbox_small_list, bbox_large_list=param_bbox_large_list, bbox_small_gray_list=param_bbox_small_gray_list, bbox_large_gray_list=param_bbox_large_gray_list, num_output_channels=param_num_output_channels, neg_image_resize_factor_interval=param_neg_image_resize_factor_interval)
from metric_farm.metric_default import Metric
train_metric = Metric(param_num_output_scales)
val_metric = None
if (val_dataiter is not None):
val_metric = Metric(param_num_output_scales)
train_GOCD.start_train(param_dict=param_dict, mxnet_module=mxnet, context=[mxnet.gpu(i) for i in param_GPU_idx_list], train_dataiter=train_dataiter, train_metric=train_metric, train_metric_update_frequency=param_train_metric_update_frequency, num_train_loops=param_num_train_loops, val_dataiter=val_dataiter, val_metric=val_metric, num_val_loops=param_num_val_loops, validation_interval=param_validation_interval, optimizer_name=param_optimizer_name, optimizer_params=param_optimizer_params, net_symbol=net_symbol, net_initializer=net_initializer, net_data_names=data_names, net_label_names=label_names, pretrained_model_param_path=param_pretrained_model_param_path, display_interval=param_display_interval, save_prefix=param_save_prefix, model_save_interval=param_model_save_interval, start_index=param_start_index) |
_grad()
def copy_params_and_buffers(src_module, dst_module, require_all=False):
assert isinstance(src_module, torch.nn.Module)
assert isinstance(dst_module, torch.nn.Module)
src_tensors = dict(named_params_and_buffers(src_module))
for (name, tensor) in named_params_and_buffers(dst_module):
assert ((name in src_tensors) or (not require_all))
if (name in src_tensors):
tensor.copy_(src_tensors[name]) |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x1(inplanes, planes, stride)
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x1(planes, planes)
self.bn2 = nn.BatchNorm1d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out |
class Cat(Dataset):
def __init__(self, dataset_path, output_size, **kwargs):
super().__init__()
self.data = glob.glob(dataset_path)
assert (len(self.data) > 0), "Can't find data; make sure you specify the path to your dataset"
self.transform = transforms.Compose([transforms.CenterCrop(472), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), transforms.RandomHorizontalFlip(p=0.5), transforms.Resize((output_size, output_size), interpolation=0)])
def __len__(self):
return len(self.data)
def __getitem__(self, index):
X = PIL.Image.open(self.data[index])
X = self.transform(X)
return (X, 0) |
class AGNN(nn.Module):
def __init__(self, g, in_feats, n_hidden, n_classes, n_layers, init_beta=1, learn_beta=1):
super(AGNN, self).__init__()
self.g = g
self.proj = nn.Sequential(nn.Linear(in_feats, n_hidden), nn.ReLU())
self.layers = nn.ModuleList([AGNNConv(init_beta, learn_beta, allow_zero_in_degree=True) for _ in range(n_layers)])
self.cls = nn.Sequential(nn.Linear(n_hidden, n_classes))
def forward(self, features):
h = self.proj(features)
for layer in self.layers:
h = layer(self.g, h)
return self.cls(h) |
class InitializeParams(WorkDoneProgressParams):
processId: (integer | null)
clientInfo: NotRequired[ClientInfo]
locale: NotRequired[string]
rootPath: NotRequired[(string | null)]
rootUri: (DocumentUri | null)
initializationOptions: NotRequired[LSPAny]
capabilities: ClientCapabilities
trace: NotRequired[TraceValue]
workspaceFolders: NotRequired[(list[WorkspaceFolder] | null)] |
('/signup', methods=['POST'])
def signup():
if g.loggedIn:
flash('You can not sign up while you are already logged in.', 'danger')
return redirect(url_for('articles.index'))
user_dict = request.form.to_dict()
try:
user = User(user_dict)
except ValidationError as e:
flash(e.message, 'danger')
return render_template('signup.html', user=user_dict, signup=True, categoryList=db.getCategoryNames())
if db.userExist(user.email):
flash('Email already used by another account.', 'danger')
return render_template('signup.html', user=user_dict, signup=True, categoryList=db.getCategoryNames())
id = db.insertUser(user)
send_confirmation_email(user.email)
return make_auth_token_response(id, user.email, url_for('general.confirm_email_page')) |
class CDCM(nn.Module):
def __init__(self, in_channels, out_channels):
super(CDCM, self).__init__()
self.relu1 = nn.ReLU()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
self.conv2_1 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=5, padding=5, bias=False)
self.conv2_2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=7, padding=7, bias=False)
self.conv2_3 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=9, padding=9, bias=False)
self.conv2_4 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=11, padding=11, bias=False)
nn.init.constant_(self.conv1.bias, 0)
def forward(self, x):
x = self.relu1(x)
x = self.conv1(x)
x1 = self.conv2_1(x)
x2 = self.conv2_2(x)
x3 = self.conv2_3(x)
x4 = self.conv2_4(x)
return (((x1 + x2) + x3) + x4) |
.parametrize('parallel', [False, True])
def test_hyper_reconf(parallel):
if parallel:
pytest.importorskip('distributed')
pytest.importorskip('opt_einsum')
import opt_einsum as oe
(eq, shapes) = oe.helpers.rand_equation(30, reg=5, seed=42, d_max=3)
optimizer = ctg.HyperOptimizer(max_repeats=16, parallel=parallel, optlib='random', reconf_opts={'subtree_size': 6}, progbar=True)
oe.contract_path(eq, *shapes, shapes=True, optimize=optimizer)
assert (optimizer.best['flops'] < optimizer.best['original_flops']) |
def id2trainId(label, reverse=False):
label_copy = label.copy()
if reverse:
for (v, k) in id_to_trainid.items():
label_copy[(label == k)] = v
else:
for (k, v) in id_to_trainid.items():
label_copy[(label == k)] = v
return label_copy |
def inception_v3_base(inputs, final_endpoint='Mixed_7c', min_depth=16, depth_multiplier=1.0, scope=None):
end_points = {}
if (depth_multiplier <= 0):
raise ValueError('depth_multiplier is not greater than zero.')
depth = (lambda d: max(int((d * depth_multiplier)), min_depth))
with tf.variable_scope(scope, 'InceptionV3', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='VALID'):
end_point = 'Conv2d_1a_3x3'
net = slim.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Conv2d_2a_3x3'
net = slim.conv2d(net, depth(32), [3, 3], scope=end_point)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Conv2d_2b_3x3'
net = slim.conv2d(net, depth(64), [3, 3], padding='SAME', scope=end_point)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Conv2d_3b_1x1'
net = slim.conv2d(net, depth(80), [1, 1], scope=end_point)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Conv2d_4a_3x3'
net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'MaxPool_5a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'):
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(32), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5], scope='Conv_1_0c_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_5d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_6a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(384), [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_1x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_6b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(128), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(128), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_6c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_6d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_6e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(192), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, depth(192), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_7a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(320), [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_7b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'), slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'), slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = 'Mixed_7c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'), slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = tf.concat(axis=3, values=[slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'), slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
raise ValueError(('Unknown final endpoint %s' % final_endpoint)) |
.parametrize('dist', ['normal', 'binary'])
def test_dense_model(dist):
shape = (1,)
units = 20
feature_size = 20
layers = 5
batch_size = 2
features = torch.randn((batch_size, feature_size))
try:
dense = DenseModel(feature_size, shape, layers, units, dist)
except NotImplementedError:
return
output = dense(features)
sample = output.sample()
assert isinstance(sample, torch.Tensor)
assert (sample.size() == (batch_size, *shape)) |
def main(n_splits=10, random_state=1):
logger = util.get_logger('log.txt')
logger.info('timestamp: {}'.format(datetime.now()))
start = time.time()
df = pd.read_csv('file2ed11cebe25.csv')
print('\ntime to read in data...{:.3f}s'.format((time.time() - start)))
columns = list(df.columns)
remove_cols = []
if (len(remove_cols) > 0):
df = df.drop(columns=remove_cols)
columns = [x for x in columns if (x not in remove_cols)]
features = {}
features['label'] = ['Sale_Price']
features['numeric'] = ['Lot_Frontage', 'Lot_Area', 'Year_Built', 'Year_Remod_Add', 'Mas_Vnr_Area', 'BsmtFin_SF_2', 'Bsmt_Unf_SF', 'Total_Bsmt_SF', 'First_Flr_SF', 'Second_Flr_SF', 'Low_Qual_Fin_SF', 'Gr_Liv_Area', 'Bsmt_Full_Bath', 'Bsmt_Half_Bath', 'Full_Bath', 'Half_Bath', 'Bedroom_AbvGr', 'Kitchen_AbvGr', 'TotRms_AbvGrd', 'Fireplaces', 'Garage_Cars', 'Garage_Area', 'Wood_Deck_SF', 'Open_Porch_SF', 'Enclosed_Porch', 'Three_season_porch', 'Screen_Porch', 'Pool_Area', 'Misc_Val', 'Mo_Sold', 'Year_Sold', 'Longitude', 'Latitude']
features['categorical'] = list(((set(columns) - set(features['numeric'])) - set(features['label'])))
fold = 1
data = {}
rs = KFold(n_splits=n_splits, random_state=random_state, shuffle=True)
for (train_idxs, test_idxs) in rs.split(df):
logger.info(f'''
fold {fold}...''')
train_df = df.iloc[train_idxs]
test_df = df.iloc[test_idxs]
(X_train, y_train, X_test, y_test, feature) = util.preprocess(train_df, test_df, features, logger=(logger if (fold == 1) else None), objective='regression')
data[fold] = {'X_train': X_train, 'y_train': y_train, 'X_test': X_test, 'y_test': y_test, 'feature': feature}
fold += 1
logger.info(f'''
Fold {(fold - 1)} preview:''')
logger.info(f'train (head): {X_train[:5]}, {y_train[:5]}')
logger.info(f'test (head): {X_test[:5]}, {y_test[:5]}')
logger.info(f'feature (head): {feature[:5]}')
logger.info(f'X_train.shape: {X_train.shape}')
logger.info(f'X_test.shape: {X_test.shape}')
logger.info(f'y_train.shape: {y_train.shape}, min., max.: {y_train.min()}, {y_train.max()}')
logger.info(f'y_test.shape: {y_test.shape}, min., max.: {y_test.min()}, {y_test.max()}')
np.save(os.path.join('data.npy'), data) |
def wrap_action(self, action):
action = np.squeeze(action)
out = (((action * (self.action_high - self.action_low)) / 2) + ((self.action_high + self.action_low) / 2.0))
return out |
class InputHook(object):
def __init__(self):
super(InputHook, self).__init__()
self.inputs = None
def hook(self, module, input, output):
self.inputs = input
def clear(self):
self.inputs = None |
def test_log_volume() -> None:
box1 = BoxTensor(torch.tensor([[[1, 1], [3, 5]], [[1, 1], [3, 3]]]).float())
box2 = BoxTensor(torch.tensor([[[2, 0], [6, 2]], [[3, 2], [4, 4]]]).float())
volume_layer = HardVolume(log_scale=True)
expected1 = torch.tensor([2.07944, 1.3862]).float()
expected2 = torch.tensor([2.07944, 0.69314]).float()
res1 = volume_layer(box1)
res2 = volume_layer(box2)
assert torch.allclose(res1, expected1, rtol=0.0001)
assert torch.allclose(res2, expected2, rtol=0.0001) |
def conv3d(inputs, num_output_channels, kernel_size, scope, stride=[1, 1, 1], padding='SAME', use_xavier=True, stddev=0.001, weight_decay=0.0, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None):
with tf.variable_scope(scope) as sc:
(kernel_d, kernel_h, kernel_w) = kernel_size
num_in_channels = inputs.get_shape()[(- 1)].value
kernel_shape = [kernel_d, kernel_h, kernel_w, num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay)
(stride_d, stride_h, stride_w) = stride
outputs = tf.nn.conv3d(inputs, kernel, [1, stride_d, stride_h, stride_w, 1], padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv3d(outputs, is_training, bn_decay=bn_decay, scope='bn')
if (activation_fn is not None):
outputs = activation_fn(outputs)
return outputs |
class STL(nn.Module):
def __init__(self, hp):
super().__init__()
self.embed = nn.Parameter(torch.FloatTensor(hp.token_num, (hp.E // hp.num_heads)))
d_q = (hp.E // 2)
d_k = (hp.E // hp.num_heads)
self.attention = MultiHeadAttention(query_dim=d_q, key_dim=d_k, num_units=hp.E, num_heads=hp.num_heads)
init.normal_(self.embed, mean=0, std=0.5)
def forward(self, inputs):
N = inputs.size(0)
query = inputs.unsqueeze(1)
keys = F.tanh(self.embed).unsqueeze(0).expand(N, (- 1), (- 1))
style_embed = self.attention(query, keys)
return style_embed |
class Optimizer():
def __init__(self, para, target):
trainable = target.parameters()
optimizer_name = para.optimizer
lr = para.lr
module = import_module('torch.optim')
self.optimizer = getattr(module, optimizer_name)(trainable, lr=lr)
try:
if (para.lr_scheduler == 'multi_step'):
milestones = para.milestones
gamma = para.decay_gamma
self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=milestones, gamma=gamma)
elif (para.lr_scheduler == 'cosine'):
print('using cosine scheduler')
self.scheduler = lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=para.end_epoch, eta_min=1e-08)
elif (para.lr_scheduler == 'cosineW'):
self.scheduler = lr_scheduler.CosineAnnealingWarmRestarts(self.optimizer, T_0=10, T_mult=2, eta_min=1e-08)
else:
raise NotImplementedError
except:
raise NotImplementedError
def get_lr(self):
return self.optimizer.param_groups[0]['lr']
def step(self):
self.optimizer.step()
def zero_grad(self):
self.optimizer.zero_grad()
def lr_schedule(self):
self.scheduler.step() |
def powell_bs(x):
return (((((10000.0 * x[0]) * x[1]) - 1) ** 2) + ((((- x[0]).exp() + (- x[1]).exp()) - 1.0001) ** 2)) |
class DNN(models.Sequential):
def __init__(self, Nin, Nh_l, Nout):
super().__init__()
self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1'))
self.add(layers.Dropout(0.01))
self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2'))
self.add(layers.Dropout(0))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) |
def run_wrapper(submit_config: SubmitConfig) -> None:
is_local = (submit_config.submit_target == SubmitTarget.LOCAL)
if is_local:
logger = util.Logger(file_name=os.path.join(submit_config.run_dir, 'log.txt'), file_mode='w', should_flush=True)
else:
logger = util.Logger(file_name=None, should_flush=True)
import dnnlib
dnnlib.submit_config = submit_config
exit_with_errcode = False
try:
print('dnnlib: Running {0}() on {1}...'.format(submit_config.run_func_name, submit_config.host_name))
start_time = time.time()
run_func_obj = util.get_obj_by_name(submit_config.run_func_name)
assert callable(run_func_obj)
sig = inspect.signature(run_func_obj)
if ('submit_config' in sig.parameters):
run_func_obj(submit_config=submit_config, **submit_config.run_func_kwargs)
else:
run_func_obj(**submit_config.run_func_kwargs)
print('dnnlib: Finished {0}() in {1}.'.format(submit_config.run_func_name, util.format_time((time.time() - start_time))))
except:
if is_local:
raise
else:
traceback.print_exc()
log_src = os.path.join(submit_config.run_dir, 'log.txt')
log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), '{0}-error.txt'.format(submit_config.run_name))
shutil.copyfile(log_src, log_dst)
exit_with_errcode = True
finally:
open(os.path.join(submit_config.run_dir, '_finished.txt'), 'w').close()
dnnlib.RunContext.get().close()
dnnlib.submit_config = None
logger.close()
if exit_with_errcode:
sys.exit(1)
return submit_config |
def main_defense_script():
classifier_net = cifar_loader.load_pretrained_cifar_resnet(flavor=32)
classifier_net.eval()
cifar_normer = utils.DifferentiableNormalize(mean=config.CIFAR10_MEANS, std=config.CIFAR10_STDS)
if True:
FGSM_L_INF = (8.0 / 255.0)
FGSM_TRAINING_ATTACK_PROPORTION = 0.5
FGSM_TRAINING_EPOCHS = 10
fgsm_attack_loss = plf.VanillaXentropy(classifier_net, cifar_normer)
fgsm_xentropy_attack_obj = aa.FGSM(classifier_net, cifar_normer, fgsm_attack_loss)
fgsm_xentropy_attack_params = advtrain.AdversarialAttackParameters(fgsm_xentropy_attack_obj, FGSM_TRAINING_ATTACK_PROPORTION, {'attack_kwargs': {'l_inf_bound': FGSM_L_INF}})
half_fgsm_cifar = advtrain.AdversarialTraining(classifier_net, cifar_normer, 'half_fgsm_cifar', 'cifar_resnet32')
train_loss = nn.CrossEntropyLoss()
train_loader = cifar_loader.load_cifar_data('train', normalize=False)
half_fgsm_cifar.train(train_loader, FGSM_TRAINING_EPOCHS, train_loss, attack_parameters=fgsm_xentropy_attack_params, verbosity='snoop') |
class InPlane(ReSampleDomain):
def do_re_sample(self):
self.points = sc.Variables(in_line.sample_boundary(param_ranges=param_ranges, density=DENSITY, low_discrepancy=True)).to_torch_tensor_()
self.constraints = {'T': torch.ones_like(self.points['x'])} |
class SPVCNN(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.dropout = kwargs['dropout']
cr = kwargs.get('cr', 1.0)
cs = [32, 64, 128, 96, 96]
cs = [int((cr * x)) for x in cs]
if (('pres' in kwargs) and ('vres' in kwargs)):
self.pres = kwargs['pres']
self.vres = kwargs['vres']
self.stem = nn.Sequential(spnn.Conv3d(kwargs['in_channels'], cs[0], kernel_size=3, stride=1), spnn.BatchNorm(cs[0]), spnn.ReLU(True))
self.stage1 = nn.Sequential(BasicConvolutionBlock(cs[0], cs[0], ks=2, stride=2, dilation=1), ResidualBlock(cs[0], cs[1], ks=3, stride=1, dilation=1), ResidualBlock(cs[1], cs[1], ks=3, stride=1, dilation=1))
self.stage2 = nn.Sequential(BasicConvolutionBlock(cs[1], cs[1], ks=2, stride=2, dilation=1), ResidualBlock(cs[1], cs[2], ks=3, stride=1, dilation=1), ResidualBlock(cs[2], cs[2], ks=3, stride=1, dilation=1))
self.up1 = nn.ModuleList([BasicDeconvolutionBlock(cs[2], cs[3], ks=2, stride=2), nn.Sequential(ResidualBlock((cs[3] + cs[1]), cs[3], ks=3, stride=1, dilation=1), ResidualBlock(cs[3], cs[3], ks=3, stride=1, dilation=1))])
self.up2 = nn.ModuleList([BasicDeconvolutionBlock(cs[3], cs[4], ks=2, stride=2), nn.Sequential(ResidualBlock((cs[4] + cs[0]), cs[4], ks=3, stride=1, dilation=1), ResidualBlock(cs[4], cs[4], ks=3, stride=1, dilation=1))])
self.point_transforms = nn.ModuleList([nn.Sequential(nn.Linear(cs[0], cs[2]), nn.BatchNorm1d(cs[2]), nn.ReLU(True)), nn.Sequential(nn.Linear(cs[2], cs[4]), nn.BatchNorm1d(cs[4]), nn.ReLU(True))])
self.weight_initialization()
if self.dropout:
self.dropout = nn.Dropout(0.3, True)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, z):
x0 = initial_voxelize(z, self.pres, self.vres)
x0 = self.stem(x0)
z0 = voxel_to_point(x0, z, nearest=False)
z0.F = z0.F
x1 = point_to_voxel(x0, z0)
x1 = self.stage1(x1)
x2 = self.stage2(x1)
z1 = voxel_to_point(x2, z0)
z1.F = (z1.F + self.point_transforms[0](z0.F))
y3 = point_to_voxel(x2, z1)
if self.dropout:
y3.F = self.dropout(y3.F)
y3 = self.up1[0](y3)
y3 = torchsparse.cat([y3, x1])
y3 = self.up1[1](y3)
y4 = self.up2[0](y3)
y4 = torchsparse.cat([y4, x0])
y4 = self.up2[1](y4)
z3 = voxel_to_point(y4, z1)
z3.F = (z3.F + self.point_transforms[1](z1.F))
return z3.F |
('Please use `bigdl.orca.automl.hp` instead.')
class MTNetGridRandomRecipe(Recipe):
def __init__(self, num_rand_samples=1, epochs=5, training_iteration=10, time_step=[3, 4], long_num=[3, 4], cnn_height=[2, 3], cnn_hid_size=[32, 50, 100], ar_size=[2, 3], batch_size=[32, 64]):
super(self.__class__, self).__init__()
self.num_samples = num_rand_samples
self.training_iteration = training_iteration
self.lr = hp.uniform(0.001, 0.01)
self.batch_size = hp.grid_search(batch_size)
self.epochs = epochs
self.cnn_dropout = hp.uniform(0.2, 0.5)
self.rnn_dropout = hp.uniform(0.2, 0.5)
self.time_step = hp.choice(time_step)
self.long_num = hp.choice(long_num)
self.cnn_height = hp.choice(cnn_height)
self.cnn_hid_size = hp.choice(cnn_hid_size)
self.ar_size = hp.choice(ar_size)
self.past_seq_len = hp.sample_from((lambda spec: ((spec.config.long_num + 1) * spec.config.time_step)))
def search_space(self):
return {'model': 'MTNet', 'lr': self.lr, 'batch_size': self.batch_size, 'epochs': self.epochs, 'cnn_dropout': self.cnn_dropout, 'rnn_dropout': self.rnn_dropout, 'time_step': self.time_step, 'long_num': self.long_num, 'ar_size': self.ar_size, 'past_seq_len': self.past_seq_len, 'cnn_hid_size': self.cnn_hid_size, 'cnn_height': self.cnn_height} |
def strip_tokenizer_prefix(model_config, token, ellipsis_partial_tokens=False):
token = token.lstrip(model_config['token_prefix'])
token = token.lstrip(model_config['partial_token_prefix'])
token = token.lstrip(' ')
return token |
class Exponential(Scheduler):
def __init__(self, decay_step: int, decay_rate: float, stair_case: bool=False) -> None:
from bigdl.dllib.optim.optimizer import Exponential as BExponential
self.scheduler = BExponential(decay_step, decay_rate, stair_case)
def get_scheduler(self) -> 'optimizer.Exponential':
return self.scheduler |
def find_sub_seq(seq_a, seq_b, shift=0, uncased=False, lemmatizer=None):
if uncased:
seq_a = [token.lower() for token in seq_a]
seq_b = [token.lower() for token in seq_b]
if (lemmatizer is not None):
seq_a = [lemmatizer.lemmatize(token) for token in seq_a]
seq_b = [lemmatizer.lemmatize(token) for token in seq_b]
for i in range(shift, len(seq_a)):
if (seq_a[i:(i + len(seq_b))] == seq_b):
return (i, (i + len(seq_b)))
return ((- 1), (- 1)) |
class NormLayer(nn.Module):
def __init__(self, mu=0.1307, std=0.3081):
super(NormLayer, self).__init__()
self.mean = mu
self.std = std
def forward(self, x):
return ((x - self.mean) / self.std) |
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device('cuda', hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info('device: {} n_gpu: {}, rank: {}, 16-bits training: {}'.format(device, n_gpu, hvd.rank(), opts.fp16))
if (opts.gradient_accumulation_steps < 1):
raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(opts.gradient_accumulation_steps))
set_random_seed(opts.seed)
ans2label = json.load(open(f'{dirname(abspath(__file__))}/misc/ans2label.json'))
label2ans = {label: ans for (ans, label) in ans2label.items()}
all_img_dbs = ImageLmdbGroup(opts.conf_th, opts.max_bb, opts.min_bb, opts.num_bb, opts.compressed_db)
LOGGER.info(f'Loading Train Dataset {opts.train_txt_dbs}, {opts.train_img_dbs}')
train_datasets = []
for (txt_path, img_path) in zip(opts.train_txt_dbs, opts.train_img_dbs):
img_db = all_img_dbs[img_path]
txt_db = TxtTokLmdb(txt_path, opts.max_txt_len)
train_datasets.append(VqaDataset(len(ans2label), txt_db, img_db))
train_dataset = ConcatDatasetWithLens(train_datasets)
train_dataloader = build_dataloader(train_dataset, vqa_collate, True, opts)
LOGGER.info(f'Loading Train Dataset {opts.val_txt_db}, {opts.val_img_db}')
val_img_db = all_img_dbs[opts.val_img_db]
val_txt_db = TxtTokLmdb(opts.val_txt_db, (- 1))
val_dataset = VqaEvalDataset(len(ans2label), val_txt_db, val_img_db)
val_dataloader = build_dataloader(val_dataset, vqa_eval_collate, False, opts)
if opts.checkpoint:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
all_dbs = (opts.train_txt_dbs + [opts.val_txt_db])
toker = json.load(open(f'{all_dbs[0]}/meta.json'))['bert']
assert all(((toker == json.load(open(f'{db}/meta.json'))['bert']) for db in all_dbs))
model = UniterForVisualQuestionAnswering.from_pretrained(opts.model_config, checkpoint, img_dim=IMG_DIM, num_answer=len(ans2label))
model.to(device)
broadcast_tensors([p.data for p in model.parameters()], 0)
set_dropout(model, opts.dropout)
optimizer = build_optimizer(model, opts)
(model, optimizer) = amp.initialize(model, optimizer, enabled=opts.fp16, opt_level='O2')
global_step = 0
if (rank == 0):
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
json.dump(ans2label, open(join(opts.output_dir, 'ckpt', 'ans2label.json'), 'w'))
os.makedirs(join(opts.output_dir, 'results'))
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
LOGGER.info(f'***** Running training with {n_gpu} GPUs *****')
LOGGER.info(' Num examples = %d', (len(train_dataset) * hvd.size()))
LOGGER.info(' Batch size = %d', opts.train_batch_size)
LOGGER.info(' Accumulate steps = %d', opts.gradient_accumulation_steps)
LOGGER.info(' Num steps = %d', opts.num_train_steps)
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
start = time()
optimizer.zero_grad()
optimizer.step()
while True:
for (step, batch) in enumerate(train_dataloader):
n_examples += batch['input_ids'].size(0)
loss = model(batch, compute_loss=True)
loss = (loss.mean() * batch['targets'].size(1))
delay_unscale = (((step + 1) % opts.gradient_accumulation_steps) != 0)
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale) as scaled_loss:
scaled_loss.backward()
if (not delay_unscale):
grads = [p.grad.data for p in model.parameters() if (p.requires_grad and (p.grad is not None))]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
if (((step + 1) % opts.gradient_accumulation_steps) == 0):
global_step += 1
lr_this_step = get_lr_sched(global_step, opts)
for (i, param_group) in enumerate(optimizer.param_groups):
if ((i == 0) or (i == 1)):
param_group['lr'] = (lr_this_step * opts.lr_mul)
elif ((i == 2) or (i == 3)):
param_group['lr'] = lr_this_step
else:
raise ValueError()
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
losses = all_gather_list(running_loss)
running_loss = RunningMeter('loss', (sum((l.val for l in losses)) / len(losses)))
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
TB_LOGGER.step()
if (opts.grad_norm != (- 1)):
grad_norm = clip_grad_norm_(amp.master_params(optimizer), opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if ((global_step % 100) == 0):
LOGGER.info(f'Step {global_step}')
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int((tot_ex / (time() - start)))
LOGGER.info(f'{tot_ex} examples trained at {ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s', ex_per_sec, global_step)
LOGGER.info(f'')
if ((global_step % opts.valid_steps) == 0):
(val_log, results) = validate(model, val_dataloader, label2ans)
with open(f'{opts.output_dir}/results/results_{global_step}_rank{rank}.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
if (global_step >= opts.num_train_steps):
break
if (global_step >= opts.num_train_steps):
break
n_epoch += 1
LOGGER.info(f'finished {n_epoch} epochs')
(val_log, results) = validate(model, val_dataloader, label2ans)
with open(f'{opts.output_dir}/results/results_{global_step}_rank{rank}_final.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, f'{global_step}_final') |
def configure_setup(dataset_name, split_seed):
logger.info('Evaluating dataset: {} split seed: {}'.format(dataset_name, split_seed))
setup = get_setup()
setup['datasets'] = [dataset_name]
setup['split_seed'] = split_seed
dump_yaml(setup, os.getcwd(), 'setup.yml') |
def test_osipkovmerritt_nfw_dens_massprofile():
pot = potential.NFWPotential(amp=2.3, a=1.3)
ras = [2.3, 5.7]
for ra in ras:
dfh = osipkovmerrittNFWdf(pot=pot, ra=ra)
numpy.random.seed(10)
samp = dfh.sample(n=100000)
tol = (5 * 0.001)
check_spherical_massprofile(samp, (lambda r: (pot.mass(r) / pot.mass(numpy.amax(samp.r())))), tol, skip=1000)
return None |
class NormalizeVideo(object):
def __init__(self, mean, std, inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, clip):
return F.normalize(clip, self.mean, self.std, self.inplace)
def __repr__(self):
return (self.__class__.__name__ + '(mean={0}, std={1}, inplace={2})'.format(self.mean, self.std, self.inplace)) |
def get_windows_from_folder(folder, width=56, compressed=True):
lvls = get_lvls(folder)
(str2index, index2str, types) = build_indeces(lvls, compressed)
windows = get_windows(lvls, width, str2index)
return (windows, types, index2str) |
class IMSATTrainer(_Trainer):
def __init__(self, model: Model, train_loader: DataLoader, val_loader: DataLoader, max_epoch: int=1, save_dir: str='./runs/IMSAT', checkpoint_path: str=None, device='cpu', config: dict=None) -> None:
super().__init__(model, train_loader, val_loader, max_epoch, save_dir, checkpoint_path, device, config)
self.SAT_criterion = Perturbation_Loss()
self.MI_criterion = MultualInformaton_IMSAT()
nearest_dict = np.loadtxt((Path(PROJECT_PATH) / 'playground/IMSAT/10th_neighbor.txt'))
self.nearest_dict = torch.from_numpy(nearest_dict).float().to(self._device)
def __init_meters__(self) -> List[str]:
METER_CONFIG = {'train_adv_loss': AverageValueMeter(), 'train_sat_loss': AverageValueMeter(), 'train_mi_loss': AverageValueMeter(), 'val_avg_acc': AverageValueMeter(), 'val_best_acc': AverageValueMeter(), 'val_worst_acc': AverageValueMeter()}
self.METERINTERFACE = MeterInterface(METER_CONFIG)
return ['train_mi_loss_mean', 'train_sat_loss_mean', 'train_adv_loss_mean', ['val_avg_acc_mean', 'val_best_acc_mean', 'val_worst_acc_mean']]
def _training_report_dict(self):
report_dict = {'mi': self.METERINTERFACE['train_mi_loss'].summary()['mean'], 'sat': self.METERINTERFACE['train_sat_loss'].summary()['mean'], 'adv': self.METERINTERFACE['train_adv_loss'].summary()['mean']}
return report_dict
def _eval_report_dict(self):
report_dict = {'val_avg_acc': self.METERINTERFACE.val_avg_acc.summary()['mean'], 'val_best_acc': self.METERINTERFACE.val_best_acc.summary()['mean'], 'val_worst_acc': self.METERINTERFACE.val_worst_acc.summary()['mean']}
return report_dict
def _train_loop(self, train_loader=None, epoch=0, mode: ModelMode=ModelMode.TRAIN, **kwargs):
self._model.set_mode(mode)
assert self._model.training, f'Model should be in train() model, given {self._model.training}.'
train_loader_: tqdm = tqdm_(train_loader)
train_loader_.set_description(f'Training epoch: {epoch}')
for (batch, image_labels) in enumerate(train_loader_):
(images, _, (index, *_)) = list(zip(*image_labels))
tf1_images = torch.cat([images[0] for _ in range((images.__len__() - 1))], dim=0).to(self._device)
tf2_images = torch.cat(images[1:], dim=0).to(self._device).to(self._device)
index = torch.cat([index for _ in range((images.__len__() - 1))], dim=0)
assert (tf1_images.shape == tf2_images.shape)
tf1_pred_logit = self._model.torchnet(tf1_images)
tf2_pred_logit = self._model.torchnet(tf2_images)
assert (assert_list(simplex, tf1_pred_logit) and (tf1_pred_logit[0].shape == tf2_pred_logit[0].shape))
sat_losses = []
ml_losses = []
for (subhead_num, (tf1_pred, tf2_pred)) in enumerate(zip(tf1_pred_logit, tf2_pred_logit)):
sat_loss = self.SAT_criterion(tf2_pred, tf1_pred.detach())
(ml_loss, *_) = self.MI_criterion(tf1_pred)
ml_losses.append(ml_loss)
ml_losses = (sum(ml_losses) / len(ml_losses))
VAT_generator = VATLoss_Multihead(eps=10)
(vat_loss, adv_tf1_images, _) = VAT_generator(self._model.torchnet, tf1_images)
batch_loss: torch.Tensor = (vat_loss - (0.1 * ml_losses))
self.METERINTERFACE['train_mi_loss'].add(ml_losses.item())
self.METERINTERFACE['train_adv_loss'].add(vat_loss.item())
self._model.zero_grad()
batch_loss.backward()
self._model.step()
report_dict = self._training_report_dict
train_loader_.set_postfix(report_dict)
def _eval_loop(self, val_loader: DataLoader=None, epoch: int=0, mode: ModelMode=ModelMode.EVAL, **kwargs) -> float:
self._model.set_mode(mode)
assert (not self._model.training), f'Model should be in eval model in _eval_loop, given {self._model.training}.'
val_loader_: tqdm = tqdm_(val_loader)
preds = torch.zeros(self._model.arch_dict['num_sub_heads'], val_loader.dataset.__len__(), dtype=torch.long, device=self._device)
target = torch.zeros(val_loader.dataset.__len__(), dtype=torch.long, device=self._device)
slice_done = 0
subhead_accs = []
val_loader_.set_description(f'Validating epoch: {epoch}')
for (batch, image_labels) in enumerate(val_loader_):
(images, gt, *_) = list(zip(*image_labels))
(images, gt) = (images[0].to(self._device), gt[0].to(self._device))
_pred = self._model.torchnet(images)
assert (assert_list(simplex, _pred) and (_pred.__len__() == self._model.arch_dict['num_sub_heads']))
bSlicer = slice(slice_done, (slice_done + images.shape[0]))
for subhead in range(self._model.arch_dict['num_sub_heads']):
preds[subhead][bSlicer] = _pred[subhead].max(1)[1]
target[bSlicer] = gt
slice_done += gt.shape[0]
assert (slice_done == val_loader.dataset.__len__()), 'Slice not completed.'
for subhead in range(self._model.arch_dict['num_sub_heads']):
(reorder_pred, remap) = hungarian_match(flat_preds=preds[subhead], flat_targets=target, preds_k=self._model.arch_dict['output_k_B'], targets_k=self._model.arch_dict['output_k_B'])
_acc = flat_acc(reorder_pred, target)
subhead_accs.append(_acc)
self.METERINTERFACE.val_avg_acc.add(_acc)
self.METERINTERFACE.val_best_acc.add(max(subhead_accs))
self.METERINTERFACE.val_worst_acc.add(min(subhead_accs))
report_dict = self._eval_report_dict
report_dict_str = ', '.join([f'{k}:{v:.3f}' for (k, v) in report_dict.items()])
print(f'Validating epoch: {epoch} : {report_dict_str}')
return self.METERINTERFACE.val_best_acc.summary()['mean'] |
def get_1x_lr_params(model):
b = [model.xception_features]
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
(yield k) |
def make_builder(out_file, impl, vocab_size=None):
if (impl == 'mmap'):
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
else:
return IndexedDatasetBuilder(out_file) |
def test_fuse_conv_bn():
inputs = torch.rand((1, 3, 5, 5))
modules = nn.ModuleList()
modules.append(nn.BatchNorm2d(3))
modules.append(ConvModule(3, 5, 3, norm_cfg=dict(type='BN')))
modules.append(ConvModule(5, 5, 3, norm_cfg=dict(type='BN')))
modules = nn.Sequential(*modules)
fused_modules = fuse_conv_bn(modules)
assert torch.equal(modules(inputs), fused_modules(inputs)) |
class Evaluation(object):
def __init__(self, split_tag, instrType, mapFile=''):
self.error_margin = 3.0
self.splits = split_tag
bboxDir = osp.join(file_path, 'data', 'BBox')
(self.objProposals, self.obj2viewpoint) = self.loadObjProposals(bboxDir)
self.gt = {}
self.instr_ids = []
self.scans = []
self.instrType = instrType
for item in load_datasets([split_tag]):
self.gt[item['id']] = item
self.scans.append(item['scan'])
self.instr_ids += [('%s_%d' % (item['id'], i)) for i in range(len(item[instrType]))]
self.scans = set(self.scans)
self.instr_ids = set(self.instr_ids)
self.graphs = load_nav_graphs(self.scans)
self.distances = {}
for (scan, G) in self.graphs.items():
self.distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
def _get_nearest(self, scan, goal_id, path):
near_id = path[0][0]
near_d = self.distances[scan][near_id][goal_id]
for item in path:
d = self.distances[scan][item[0]][goal_id]
if (d < near_d):
near_id = item[0]
near_d = d
return near_id
def _score_item(self, instr_id, item_in, evalType):
path = item_in['trajectory']
if (evalType == 'whole'):
predObjId = item_in['predObjId']
if (self.splits == 'test'):
gt = self.gt[instr_id.split('_')[0]]
else:
gt = self.gt[((instr_id.split('_')[0] + '_') + instr_id.split('_')[1])]
objId = str(gt['objId'])
start = gt['path'][0]
assert (start == path[0][0]), print(item_in)
goal = gt['path'][(- 1)]
scan = gt['scan']
candidate_vps = []
for cvp in self.obj2viewpoint[((scan + '_') + objId)]:
if self.distances[scan][start].__contains__(cvp):
candidate_vps.append(cvp)
if (evalType == 'whole'):
if (objId == str(predObjId)):
self.scores['rgs'].append(1)
else:
self.scores['rgs'].append(0)
if self.objProposals.__contains__(((scan + '_') + path[(- 1)][0])):
if (objId in self.objProposals[((scan + '_') + path[(- 1)][0])]['objId']):
self.scores['visible'].append(1)
else:
self.scores['visible'].append(0)
else:
self.scores['visible'].append(0)
oracle_succ = 0
for passvp in path:
if self.objProposals.__contains__(((scan + '_') + passvp[0])):
if (objId in self.objProposals[((scan + '_') + passvp[0])]['objId']):
oracle_succ = 1
break
self.scores['oracle_visible'].append(oracle_succ)
distance = 0
prev = path[0]
for curr in path[1:]:
distance += self.distances[gt['scan']][prev[0]][curr[0]]
prev = curr
self.scores['trajectory_lengths'].append(distance)
self.scores['shortest_path_lengths'].append(self.distances[gt['scan']][start][goal])
def loadObjProposals(self, bboxDir):
objProposals = {}
obj2viewpoint = {}
for efile in os.listdir(bboxDir):
if efile.endswith('.json'):
with open(osp.join(bboxDir, efile)) as f:
scan = efile.split('_')[0]
(scanvp, _) = efile.split('.')
data = json.load(f)
for (vp, vv) in data.items():
for (objid, objinfo) in vv.items():
if objinfo['visible_pos']:
if obj2viewpoint.__contains__(((scan + '_') + objid)):
if (vp not in obj2viewpoint[((scan + '_') + objid)]):
obj2viewpoint[((scan + '_') + objid)].append(vp)
else:
obj2viewpoint[((scan + '_') + objid)] = [vp]
if objProposals.__contains__(scanvp):
for (ii, bbox) in enumerate(objinfo['bbox2d']):
objProposals[scanvp]['bbox'].append(bbox)
objProposals[scanvp]['visible_pos'].append(objinfo['visible_pos'][ii])
objProposals[scanvp]['objId'].append(objid)
else:
objProposals[scanvp] = {'bbox': objinfo['bbox2d'], 'visible_pos': objinfo['visible_pos']}
objProposals[scanvp]['objId'] = []
for _ in objinfo['visible_pos']:
objProposals[scanvp]['objId'].append(objid)
return (objProposals, obj2viewpoint)
def score(self, output_file, evalType):
self.scores = defaultdict(list)
instr_ids = set(self.instr_ids)
with open(output_file) as f:
data = json.load(f)
for item in data:
if (item['instr_id'] in instr_ids):
instr_ids.remove(item['instr_id'])
self._score_item(item['instr_id'], item, evalType)
assert (len(instr_ids) == 0), ('Missing %d of %d instruction ids from %s - not in %s' % (len(instr_ids), len(self.instr_ids), ','.join(self.splits), output_file))
if (evalType == 'whole'):
assert (len(self.scores['rgs']) == len(self.instr_ids))
num_rgs = sum(self.scores['rgs'])
num_successes = sum(self.scores['visible'])
oracle_successes = sum(self.scores['oracle_visible'])
spls = []
for (visible, length, sp) in zip(self.scores['visible'], self.scores['trajectory_lengths'], self.scores['shortest_path_lengths']):
if visible:
spls.append((sp / max(length, sp)))
else:
spls.append(0)
if (evalType == 'whole'):
wrgs = []
for (rgs, length, sp) in zip(self.scores['rgs'], self.scores['trajectory_lengths'], self.scores['shortest_path_lengths']):
if rgs:
wrgs.append((sp / max(length, sp)))
else:
wrgs.append(0)
score_summary = {'length': np.average(self.scores['trajectory_lengths']), 'success_rate': (float(num_successes) / float(len(self.scores['visible']))), 'oracle success_rate': (float(oracle_successes) / float(len(self.scores['oracle_visible']))), 'spl': np.average(spls)}
if (evalType == 'whole'):
score_summary['rgs'] = (float(num_rgs) / float(len(self.scores['rgs'])))
score_summary['rgspl'] = np.average(wrgs)
return score_summary |
class INFALL(object):
def __init__(self, t, sfr):
self.t = t
self.sfr = sfr
def constant(self, paramet=1):
amount = paramet
self.infall = (np.zeros(len(self.t)) + amount)
def linear(self, paramet=(6.3, (- 0.5))):
(start, slope) = paramet
def polynomial(self, paramet=[(- 0.003), 0.03, (- 0.3), 5.0]):
coeff = paramet
poly = np.poly1d(coeff)
def gamma_function(self, mass_factor=1, a_parameter=2, loc=0, scale=3):
from scipy.stats import gamma
self.infall = gamma.pdf(self.t, a_parameter, loc, scale)
norm = (sum(self.sfr) * mass_factor)
self.infall = np.divide((self.infall * norm), sum(self.infall))
def exponential(self, paramet=((- 0.24), 0.0, 1.0)):
(b, d, e) = paramet
sfr_norm = e
norm = (sum(self.sfr) * sfr_norm)
self.infall = (np.exp((b * self.t)) + d)
self.infall = np.divide((self.infall * norm), sum(self.infall))
def sfr_related(self):
self.infall = np.zeros_like(self.sfr) |
class FactorGNNZinc(nn.Module):
def __init__(self, g, num_layers, in_dim, num_hidden, num_latent, feat_drop, residual, num_atom_type, num_bond_type):
super(FactorGNNZinc, self).__init__()
self.g = g
self.layers = nn.ModuleList()
self.BNs = nn.ModuleList()
self.feat_drop = feat_drop
self.num_atom_type = num_atom_type
self.num_bond_type = num_bond_type
self.activate = torch.nn.LeakyReLU(negative_slope=0.2)
self.embed = nn.Embedding(self.num_atom_type, in_dim)
self.layers.append(DisentangleLayer(num_latent, in_dim, num_hidden, cat=True))
self.BNs.append(nn.BatchNorm1d(num_hidden))
self.layers.append(DisentangleLayer(num_latent, num_hidden, num_hidden, cat=True))
self.BNs.append(nn.BatchNorm1d(num_hidden))
self.layers.append(DisentangleLayer(max((num_latent // 2), 1), num_hidden, num_hidden, cat=True))
self.BNs.append(nn.BatchNorm1d(num_hidden))
self.layers.append(DisentangleLayer(max(((num_latent // 2) // 2), 1), num_hidden, num_hidden, cat=True))
self.BNs.append(nn.BatchNorm1d(num_hidden))
self.layers.append(None)
self.BNs.append(None)
self.BN1 = nn.BatchNorm1d(num_hidden)
self.regressor1 = nn.Linear(num_hidden, (num_hidden // 2))
self.regressor2 = nn.Linear((num_hidden // 2), 1)
def forward(self, x, e, snorm_n, snorm_e):
feat = self.embed(x)
for (layer, bn) in zip(self.layers[:(- 1)], self.BNs[:(- 1)]):
feat_prim = feat
feat = torch_fn.dropout(feat, self.feat_drop)
feat = layer(self.g, feat)
feat = (feat * snorm_n)
feat = bn(feat)
feat = self.activate(feat)
feat = torch_fn.dropout(feat, self.feat_drop)
self.g.ndata['h'] = feat
h = dgl.mean_nodes(self.g, 'h')
h = torch.relu(h)
h = self.regressor1(h)
h = torch.relu(h)
h = self.regressor2(h)
return h
def get_factor(self):
factor_list = []
for layer in self.layers:
if isinstance(layer, DisentangleLayer):
factor_list.append(layer.get_factor())
return factor_list
def compute_disentangle_loss(self):
loss_list = []
for layer in self.layers:
if isinstance(layer, DisentangleLayer):
loss_list.append(layer.compute_disentangle_loss())
return loss_list
def merge_loss(list_loss):
total_loss = 0
for loss in list_loss:
(discrimination_loss, distribution_loss) = (loss[0], loss[1])
total_loss += discrimination_loss
return total_loss |
_module(force=True)
class DiceLoss(nn.Module):
def __init__(self, use_sigmoid=True, activate=True, reduction='mean', naive_dice=False, loss_weight=1.0, eps=0.001):
super(DiceLoss, self).__init__()
self.use_sigmoid = use_sigmoid
self.reduction = reduction
self.naive_dice = naive_dice
self.loss_weight = loss_weight
self.eps = eps
self.activate = activate
def forward(self, pred, target, weight=None, reduction_override=None, avg_factor=None):
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
if self.activate:
if self.use_sigmoid:
pred = pred.sigmoid()
else:
raise NotImplementedError
if self.naive_dice:
loss = (self.loss_weight * naive_dice_loss(pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor))
else:
loss = (self.loss_weight * dice_loss(pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor))
return loss |
def str2bool(v):
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.') |
def test_captured_utf8_4byte_offset1(capsys):
msg = '\U0010ffff'
msg = ('1' + (msg * ((1024 // len(msg)) + 1)))
m.captured_output_default(msg)
(stdout, stderr) = capsys.readouterr()
assert (stdout == msg)
assert (stderr == '') |
def get_compiler() -> str:
compiler = 'gcc'
if (shutil.which(compiler) is None):
compiler = 'cc'
if (shutil.which(compiler) is None):
raise ValueError('compiler (`gcc` or `cc`) not present. install one first.')
return compiler |
class WatchModel(Callback):
def __init__(self, log: str='gradients', log_freq: int=100):
self.log = log
self.log_freq = log_freq
_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
logger.watch(model=trainer.model, log=self.log, log_freq=self.log_freq) |
def build_tokenizer(args):
global tokenizer
if (tokenizer is None):
tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model)
logger.info('Build tokenizer from {}'.format(args.pretrained_model)) |
class KITTI_RAW(object):
def __init__(self, data_dir, static_frames_txt, test_scenes_txt):
self.data_dir = data_dir
self.static_frames_txt = static_frames_txt
self.test_scenes_txt = test_scenes_txt
def __len__(self):
raise NotImplementedError
def collect_static_frame(self):
f = open(self.static_frames_txt)
static_frames = {}
for line in f.readlines():
line = line.strip()
(date, drive, frame_id) = line.split(' ')
curr_fid = ('%.10d' % np.int(frame_id))
if (os.path.join(date, drive) not in static_frames.keys()):
static_frames[os.path.join(date, drive)] = []
static_frames[os.path.join(date, drive)].append(curr_fid)
return static_frames
def collect_test_scenes(self):
f = open(self.test_scenes_txt)
test_scenes = []
for line in f.readlines():
line = line.strip()
test_scenes.append(line)
return test_scenes
def prepare_data_mp(self, output_dir, stride=1):
num_processes = 8
processes = []
q = mp.Queue()
static_frames = self.collect_static_frame()
test_scenes = self.collect_test_scenes()
if (not os.path.isfile(os.path.join(output_dir, 'train.txt'))):
os.makedirs(output_dir)
print('Preparing sequence data....')
if (not os.path.isdir(self.data_dir)):
raise
dirlist = os.listdir(self.data_dir)
total_dirlist = []
for d in dirlist:
seclist = os.listdir(os.path.join(self.data_dir, d))
for s in seclist:
if os.path.isdir(os.path.join(self.data_dir, d, s)):
total_dirlist.append(os.path.join(d, s))
q.put(os.path.join(d, s))
for rank in range(num_processes):
p = mp.Process(target=process_folder, args=(q, static_frames, test_scenes, self.data_dir, output_dir, stride))
p.start()
processes.append(p)
for p in processes:
p.join()
f = open(os.path.join(output_dir, 'train.txt'), 'w')
for date in os.listdir(output_dir):
if os.path.isdir(os.path.join(output_dir, date)):
drives = os.listdir(os.path.join(output_dir, date))
for d in drives:
train_file = open(os.path.join(output_dir, date, d, 'train.txt'), 'r')
for l in train_file.readlines():
f.write(l)
for date in os.listdir(self.data_dir):
command = ((('cp ' + os.path.join(self.data_dir, date, 'calib_cam_to_cam.txt')) + ' ') + os.path.join(output_dir, date, 'calib_cam_to_cam.txt'))
os.system(command)
print('Data Preparation Finished.')
def prepare_data(self, output_dir):
static_frames = self.collect_static_frame()
test_scenes = self.collect_test_scenes()
if (not os.path.isfile(os.path.join(output_dir, 'train.txt'))):
os.makedirs(output_dir)
f = open(os.path.join(output_dir, 'train.txt'), 'w')
print('Preparing sequence data....')
if (not os.path.isdir(self.data_dir)):
raise
dirlist = os.listdir(self.data_dir)
total_dirlist = []
for d in dirlist:
seclist = os.listdir(os.path.join(self.data_dir, d))
for s in seclist:
if os.path.isdir(os.path.join(self.data_dir, d, s)):
total_dirlist.append(os.path.join(d, s))
for folder in tqdm(total_dirlist):
if (folder in static_frames.keys()):
static_ids = static_frames[folder]
else:
static_ids = []
scene = folder.split('/')[1]
if (scene in test_scenes):
continue
image_path = os.path.join(self.data_dir, folder, 'image_02/data')
dump_image_path = os.path.join(output_dir, folder)
if (not os.path.isdir(dump_image_path)):
os.makedirs(dump_image_path)
numbers = len(os.listdir(image_path))
for n in range((numbers - 2)):
s_idx = n
m_idx = (s_idx + 1)
e_idx = (s_idx + 2)
if ((('%.10d' % s_idx) in static_ids) or (('%.10d' % e_idx) in static_ids) or (('%.10d' % m_idx) in static_ids)):
print(('%.10d' % s_idx))
continue
curr_image = imageio.imread((os.path.join(image_path, ('%.10d' % s_idx)) + '.png'))
midd_image = imageio.imread((os.path.join(image_path, ('%.10d' % m_idx)) + '.png'))
next_image = imageio.imread((os.path.join(image_path, ('%.10d' % e_idx)) + '.png'))
seq_images = np.concatenate([curr_image, midd_image, next_image], axis=0)
imageio.imsave((os.path.join(dump_image_path, ('%.10d' % s_idx)) + '.png'), seq_images.astype('uint8'))
date = folder.split('/')[0]
f.write(('%s %s\n' % ((os.path.join(folder, ('%.10d' % s_idx)) + '.png'), os.path.join(date, 'calib_cam_to_cam.txt'))))
print(folder)
for date in os.listdir(self.data_dir):
command = ((('cp ' + os.path.join(self.data_dir, date, 'calib_cam_to_cam.txt')) + ' ') + os.path.join(output_dir, date, 'calib_cam_to_cam.txt'))
os.system(command)
return os.path.join(output_dir, 'train.txt')
def __getitem__(self, idx):
raise NotImplementedError |
class Chebbase_prop(MessagePassing):
def __init__(self, K, q, bias=True, **kwargs):
super(Chebbase_prop, self).__init__(aggr='add', **kwargs)
self.K = K
self.temp = Parameter(torch.Tensor((self.K + 1)))
self.reset_parameters()
self.q = q
def reset_parameters(self):
self.temp.data.fill_(0.0)
self.temp.data[0] = 1.0
def forward(self, x, edge_index, edge_weight=None):
coe = self.temp
(edge_index1, norm1) = get_laplacian(edge_index, edge_weight, normalization='sym', dtype=x.dtype, num_nodes=x.size(self.node_dim))
(edge_index_tilde, norm_tilde) = add_self_loops(edge_index1, norm1, fill_value=(- 1.0), num_nodes=x.size(self.node_dim))
Tx_0 = x
Tx_1 = self.propagate(edge_index_tilde, x=x, norm=norm_tilde, size=None)
out = ((coe[0] * Tx_0) + (coe[1] * Tx_1))
for i in range(2, (self.K + 1)):
Tx_2 = self.propagate(edge_index_tilde, x=Tx_1, norm=norm_tilde, size=None)
Tx_2 = ((2 * Tx_2) - Tx_0)
out = (out + ((coe[i] / (i ** self.q)) * Tx_2))
(Tx_0, Tx_1) = (Tx_1, Tx_2)
return out
def message(self, x_j, norm):
return (norm.view((- 1), 1) * x_j)
def __repr__(self):
return '{}(K={}, temp={})'.format(self.__class__.__name__, self.K, self.temp) |
class runningScore_recall(object):
def __init__(self, n_classes):
self.n_classes = n_classes
self.confusion_matrix = np.zeros((n_classes, n_classes))
def _fast_hist(self, label_true, label_pred, n_class):
mask = ((label_true >= 0) & (label_true < n_class))
hist = np.bincount(((n_class * label_true[mask].astype(int)) + label_pred[mask]), minlength=(n_class ** 2)).reshape(n_class, n_class)
return hist
def update(self, label_trues, label_preds):
for (lt, lp) in zip(label_trues, label_preds):
self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes)
def get_scores(self, return_class=False):
hist = self.confusion_matrix
iou = (np.diag(hist) / hist.sum(axis=0))
miou = np.nanmean(iou)
if return_class:
return (miou, iou)
else:
return miou
def reset(self):
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes)) |
class A2CModel():
def __init__(self, max_time_steps, *args, **kwargs):
super().__init__(*args, **kwargs)
self.entropy_coefficient = 0.01
self.value_coefficient = 0.5
self.max_gradient_norm = 0.5
self.rms_alpha = 0.99
self.rms_epsilon = 1e-05
self.data_parallel = True
self.learning_rate = 0.0007
def not_initialized(*args, **kwargs):
raise Exception('Not initialized')
self._train = self._step = self._value = not_initialized
def create_model(self, **kwargs):
pass
def show_summary(self, model):
batch_shape = (self.num_processes, self.num_steps)
def get_shape_rec(shapes):
if isinstance(shapes, tuple):
return tuple(get_shape_rec(list(shapes)))
elif isinstance(shapes, list):
return [get_shape_rec(x) for x in shapes]
else:
return shapes.size()
shapes = ((batch_shape + self.env.observation_space.shape), batch_shape, get_shape_rec(self._initial_states(self.num_processes)))
minimal_summary(model, shapes)
def _build_train(self, model, main_device):
optimizer = optim.RMSprop(model.parameters(), self.learning_rate, eps=self.rms_epsilon, alpha=self.rms_alpha)
_call(main_device)
def train(observations, returns, actions, masks, states=[]):
(policy_logits, value, _) = model(observations, masks, states)
dist = torch.distributions.Categorical(logits=policy_logits)
action_log_probs = dist.log_prob(actions)
dist_entropy = dist.entropy().mean()
advantages = (returns - value.squeeze((- 1)))
value_loss = advantages.pow(2).mean()
action_loss = (- (advantages.detach() * action_log_probs).mean())
loss = (((value_loss * self.value_coefficient) + action_loss) - (dist_entropy * self.entropy_coefficient))
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), self.max_gradient_norm)
optimizer.step()
return (loss.item(), action_loss.item(), value_loss.item(), dist_entropy.item())
return train
def _build_graph(self, allow_gpu, **model_kwargs):
model = self.create_model(**model_kwargs)
if hasattr(model, 'initial_states'):
self._initial_states = getattr(model, 'initial_states')
else:
self._initial_states = (lambda _: [])
self.show_summary(model)
cuda_devices = torch.cuda.device_count()
if ((cuda_devices == 0) or (not allow_gpu)):
print('Using CPU only')
main_device = torch.device('cpu')
def get_state_dict():
return model.state_dict()
elif ((cuda_devices > 1) and self.data_parallel):
print(('Using %s GPUs' % cuda_devices))
main_device = torch.device('cuda:0')
model = nn.DataParallel(model, output_device=main_device)
model = model.to(main_device)
def get_state_dict():
return model.module.state_dict()
else:
print('Using single GPU')
main_device = torch.device('cuda:0')
model = model.to(main_device)
def get_state_dict():
return model.state_dict()
model.train()
self._train = self._build_train(model, main_device)
_call(main_device)
def step(observations, masks, states):
with torch.no_grad():
batch_size = get_batch_size(observations)
observations = expand_time_dimension(observations)
masks = masks.view(batch_size, 1)
(policy_logits, value, states) = model(observations, masks, states)
dist = torch.distributions.Categorical(logits=policy_logits)
action = dist.sample()
action_log_probs = dist.log_prob(action)
return (action.squeeze(1).detach(), value.squeeze(1).squeeze((- 1)).detach(), action_log_probs.squeeze(1).detach(), KeepTensor(detach_all(states)))
_call(main_device)
def value(observations, masks, states):
with torch.no_grad():
batch_size = get_batch_size(observations)
observations = expand_time_dimension(observations)
masks = masks.view(batch_size, 1)
(_, value, states) = model(observations, masks, states)
return (value.squeeze(1).squeeze((- 1)).detach(), KeepTensor(detach_all(states)))
self._step = step
self._value = value
self._save = (lambda path: torch.save(get_state_dict(), os.path.join(path, 'weights.pth')))
self.main_device = main_device
return model |
class OutFile(object):
def __init__(self, outfile=None, silent=False, overwrite=False):
if (outfile is None):
self.f = sys.stdout
self.open = False
return
self.open = isinstance(outfile, file)
if (not self.open):
filename = os.path.expanduser(os.path.expandvars(outfile))
assert (overwrite or (not os.path.exists(filename)))
f = open(filename, 'w')
else:
f = outfile
self.f = f
def __enter__(self):
return self.f
def __exit__(self, exc_type, exc_val, exc_tb):
if self.open:
self.f.close() |
def main_worker(gpu, args):
args.gpu = gpu
args.rank = gpu
print(f'Process Launching at GPU {gpu}')
if args.distributed:
torch.cuda.set_device(args.gpu)
dist.init_process_group(backend='nccl')
print(f'Building train loader at GPU {gpu}')
train_loader = get_loader(args, split=args.train, mode='train', batch_size=args.batch_size, distributed=args.distributed, gpu=args.gpu, workers=args.num_workers, topk=args.train_topk)
print(f'Building val loader at GPU {gpu}')
val_loader = get_loader(args, split=args.valid, mode='val', batch_size=args.batch_size, distributed=args.distributed, gpu=args.gpu, workers=4, topk=args.valid_topk)
print(f'Building test loader at GPU {gpu}')
test_loader = get_loader(args, split=args.test, mode='val', batch_size=args.batch_size, distributed=args.distributed, gpu=args.gpu, workers=4, topk=args.valid_topk)
trainer = Trainer(args, train_loader, val_loader, test_loader, train=True)
trainer.train() |
def adagrad(loss_or_grads, params, learning_rate=1.0, epsilon=1e-06):
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for (param, grad) in zip(params, grads):
value = param.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
accu_new = (accu + (grad ** 2))
updates[accu] = accu_new
updates[param] = (param - ((learning_rate * grad) / T.sqrt((accu_new + epsilon))))
return updates |
def unzip_dataset(data_name):
if (data_name == 'cora'):
subprocess.call(['unzip', 'datasets/cora.zip', '-d', 'datasets/cora'])
subprocess.call(['rm', 'datasets/cora.zip'])
print('Downloaded the cora dataset!\n')
elif (data_name == 'ppi'):
subprocess.call(['unzip', 'datasets/ppi.zip', '-d', 'datasets/ppi'])
subprocess.call(['rm', 'datasets/ppi.zip'])
print('Downloaded the ppi dataset!\n')
elif (data_name == 'reddit'):
subprocess.call(['unzip', 'datasets/reddit.zip', '-d', 'datasets/reddit'])
subprocess.call(['rm', 'datasets/reddit.zip'])
print('Downloaded the reddit dataset!\n') |
def test_wasserstein_bounds():
np.random.seed(341)
d2 = 5.0
stdev = 3.5
samples = norm.rvs(scale=stdev, size=MC_SAMPLES)
res = viabel.wasserstein_bounds(d2, samples=samples)
np.testing.assert_allclose(res['W1'], ((2 * stdev) * np.sqrt(np.expm1(d2))), rtol=MC_TOL, err_msg='incorrect W1 value')
np.testing.assert_allclose(res['W2'], ((2 * stdev) * ((3 * np.expm1(d2)) ** 0.25)), rtol=MC_TOL, err_msg='incorrect W2 value') |
def fuse_source_reference_output(output_mp4_path, src_img_paths, ref_img_paths, out_img_paths, audio_path=None, image_size=512, pad=10, fps=25, pool_size=15):
global default_ffmpeg_vcodec, default_ffmpeg_pix_fmt, default_ffmpeg_exe_path
ffmpeg_exc_path = os.environ.get('ffmpeg_exe_path', default_ffmpeg_exe_path)
vcodec = os.environ.get('ffmpeg_vcodec', default_ffmpeg_vcodec)
total = len(ref_img_paths)
assert (total == len(out_img_paths)), '{} != {}'.format(total, len(out_img_paths))
fused_src_img = fuse_source(src_img_paths, image_size)
pad_region = np.zeros((image_size, pad, 3), dtype=np.uint8)
pool_size = min(pool_size, os.cpu_count())
tmp_avi_video_path = ('%s.avi' % output_mp4_path)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
W = (fused_src_img.shape[1] + ((image_size + pad) * 2))
videoWriter = cv2.VideoWriter(tmp_avi_video_path, fourcc, fps, (W, image_size))
with ProcessPoolExecutor(pool_size) as pool:
for img in tqdm(pool.map(merge, ([fused_src_img] * total), ref_img_paths, out_img_paths, ([pad_region] * total))):
videoWriter.write(img)
videoWriter.release()
if ((audio_path is not None) and audio_path and os.path.exists(audio_path)):
fuse_video_audio_output(tmp_avi_video_path, audio_path, output_mp4_path)
os.remove(tmp_avi_video_path)
else:
cmd = [ffmpeg_exc_path, '-y', '-i', tmp_avi_video_path, '-vcodec', vcodec, output_mp4_path, '-loglevel', 'quiet']
print(' '.join(cmd))
subprocess.call(cmd)
os.remove(tmp_avi_video_path) |
class TFSegformerEncoder(tf.keras.layers.Layer):
def __init__(self, config: SegformerConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
drop_path_decays = [x.numpy() for x in tf.linspace(0.0, config.drop_path_rate, sum(config.depths))]
embeddings = []
for i in range(config.num_encoder_blocks):
embeddings.append(TFSegformerOverlapPatchEmbeddings(patch_size=config.patch_sizes[i], stride=config.strides[i], hidden_size=config.hidden_sizes[i], name=f'patch_embeddings.{i}'))
self.embeddings = embeddings
blocks = []
cur = 0
for i in range(config.num_encoder_blocks):
layers = []
if (i != 0):
cur += config.depths[(i - 1)]
for j in range(config.depths[i]):
layers.append(TFSegformerLayer(config, hidden_size=config.hidden_sizes[i], num_attention_heads=config.num_attention_heads[i], drop_path=drop_path_decays[(cur + j)], sequence_reduction_ratio=config.sr_ratios[i], mlp_ratio=config.mlp_ratios[i], name=f'block.{i}.{j}'))
blocks.append(layers)
self.block = blocks
self.layer_norms = [tf.keras.layers.LayerNormalization(epsilon=1e-05, name=f'layer_norm.{i}') for i in range(config.num_encoder_blocks)]
def call(self, pixel_values: tf.Tensor, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, training: bool=False) -> Union[(Tuple, TFBaseModelOutput)]:
all_hidden_states = (() if output_hidden_states else None)
all_self_attentions = (() if output_attentions else None)
batch_size = shape_list(pixel_values)[0]
hidden_states = pixel_values
for (idx, x) in enumerate(zip(self.embeddings, self.block, self.layer_norms)):
(embedding_layer, block_layer, norm_layer) = x
(hidden_states, height, width) = embedding_layer(hidden_states)
for (i, blk) in enumerate(block_layer):
layer_outputs = blk(hidden_states, height, width, output_attentions, training=training)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = (all_self_attentions + (layer_outputs[1],))
hidden_states = norm_layer(hidden_states)
if ((idx != (len(self.embeddings) - 1)) or ((idx == (len(self.embeddings) - 1)) and self.config.reshape_last_stage)):
num_channels = shape_list(hidden_states)[(- 1)]
hidden_states = tf.reshape(hidden_states, (batch_size, height, width, num_channels))
if output_hidden_states:
all_hidden_states = (all_hidden_states + (hidden_states,))
if (not return_dict):
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if (v is not None)))
return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions) |
def wt_xxz_output_to_disk(dir, fname_list, full_docs, decision):
for (output, doc, dec) in zip(fname_list, full_docs, decision):
sent_picked = [doc[idx] for idx in dec]
wt_content = '\n'.join(sent_picked)
with open(os.path.join(dir, (output + '.txt')), 'w') as fd:
fd.write(wt_content) |
def MiDaS_small(pretrained=True, **kwargs):
model = MidasNet_small(None, features=64, backbone='efficientnet_lite3', exportable=True, non_negative=True, blocks={'expand': True})
if pretrained:
checkpoint = '
state_dict = torch.hub.load_state_dict_from_url(checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True)
model.load_state_dict(state_dict)
return model |
def fully_connected(x, units, use_bias=True, sn=False, scope='fully_0'):
with tf.variable_scope(scope):
x = flatten(x)
shape = x.get_shape().as_list()
channels = shape[(- 1)]
if sn:
w = tf.get_variable('kernel', [channels, units], tf.float32, initializer=weight_init, regularizer=weight_regularizer)
if use_bias:
bias = tf.get_variable('bias', [units], initializer=tf.constant_initializer(0.0))
x = (tf.matmul(x, spectral_norm(w)) + bias)
else:
x = tf.matmul(x, spectral_norm(w))
else:
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init, kernel_regularizer=weight_regularizer, use_bias=use_bias)
return x |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
logger.info(f'Training/evaluation parameters {training_args}')
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if (data_args.task_name is not None):
raw_datasets = load_dataset('glue', data_args.task_name, cache_dir=model_args.cache_dir)
elif (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
else:
data_files = {'train': data_args.train_file, 'validation': data_args.validation_file}
if training_args.do_predict:
if (data_args.test_file is not None):
train_extension = data_args.train_file.split('.')[(- 1)]
test_extension = data_args.test_file.split('.')[(- 1)]
assert (test_extension == train_extension), '`test_file` should have the same extension (csv or json) as `train_file`.'
data_files['test'] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.')
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}')
if data_args.train_file.endswith('.csv'):
raw_datasets = load_dataset('csv', data_files=data_files, cache_dir=model_args.cache_dir)
else:
raw_datasets = load_dataset('json', data_files=data_files, cache_dir=model_args.cache_dir)
if (data_args.task_name is not None):
is_regression = (data_args.task_name == 'stsb')
if (not is_regression):
label_list = raw_datasets['train'].features['label'].names
num_labels = len(label_list)
else:
num_labels = 1
else:
is_regression = (raw_datasets['train'].features['label'].dtype in ['float32', 'float64'])
if is_regression:
num_labels = 1
else:
label_list = raw_datasets['train'].unique('label')
label_list.sort()
num_labels = len(label_list)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if model_args.int8:
from neural_compressor.utils.load_huggingface import OptimizedModel
model = OptimizedModel.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
else:
model = AutoModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (data_args.task_name is not None):
(sentence1_key, sentence2_key) = task_to_keys[data_args.task_name]
else:
non_label_column_names = [name for name in raw_datasets['train'].column_names if (name != 'label')]
if (('sentence1' in non_label_column_names) and ('sentence2' in non_label_column_names)):
(sentence1_key, sentence2_key) = ('sentence1', 'sentence2')
elif (len(non_label_column_names) >= 2):
(sentence1_key, sentence2_key) = non_label_column_names[:2]
else:
(sentence1_key, sentence2_key) = (non_label_column_names[0], None)
if data_args.pad_to_max_length:
padding = 'max_length'
else:
padding = False
label_to_id = None
if ((model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id) and (data_args.task_name is not None) and (not is_regression)):
label_name_to_id = {k.lower(): v for (k, v) in model.config.label2id.items()}
if (list(sorted(label_name_to_id.keys())) == list(sorted(label_list))):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warning(f'''Your model seems to have been trained with labels, but they don't match the dataset: model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}.
Ignoring the model labels as a result.''')
elif ((data_args.task_name is None) and (not is_regression)):
label_to_id = {v: i for (i, v) in enumerate(label_list)}
if (label_to_id is not None):
model.config.label2id = label_to_id
model.config.id2label = {id: label for (label, id) in config.label2id.items()}
if (data_args.max_seq_length > tokenizer.model_max_length):
logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.')
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
args = ((examples[sentence1_key],) if (sentence2_key is None) else (examples[sentence1_key], examples[sentence2_key]))
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
if ((label_to_id is not None) and ('label' in examples)):
result['label'] = [(label_to_id[l] if (l != (- 1)) else (- 1)) for l in examples['label']]
return result
with training_args.main_process_first(desc='dataset map pre-processing'):
raw_datasets = raw_datasets.map(preprocess_function, batched=True, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on dataset')
if training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = raw_datasets['train']
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if (('validation' not in raw_datasets) and ('validation_matched' not in raw_datasets)):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = raw_datasets[('validation_matched' if (data_args.task_name == 'mnli') else 'validation')]
if (data_args.max_eval_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
if (training_args.do_predict or (data_args.task_name is not None) or (data_args.test_file is not None)):
if (('test' not in raw_datasets) and ('test_matched' not in raw_datasets)):
raise ValueError('--do_predict requires a test dataset')
predict_dataset = raw_datasets[('test_matched' if (data_args.task_name == 'mnli') else 'test')]
if (data_args.max_predict_samples is not None):
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
if training_args.do_train:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
if (data_args.task_name is not None):
metric = load_metric('glue', data_args.task_name)
else:
metric = load_metric('accuracy')
def compute_metrics(p: EvalPrediction):
preds = (p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions)
preds = (np.squeeze(preds) if is_regression else np.argmax(preds, axis=1))
if (data_args.task_name is not None):
result = metric.compute(predictions=preds, references=p.label_ids)
if (len(result) > 1):
result['combined_score'] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {'mse': ((preds - p.label_ids) ** 2).mean().item()}
else:
return {'accuracy': (preds == p.label_ids).astype(np.float32).mean().item()}
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
trainer = Trainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator)
early_stopping_patience = 2
early_stopping_threshold = 0.001
trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, early_stopping_threshold))
eval_dataloader = trainer.get_eval_dataloader()
if (eval_dataloader.batch_size is None):
def _build_inc_dataloader(dataloader):
class INCDataLoader():
__iter__ = dataloader.__iter__
def __init__(self) -> None:
self.dataloader = dataloader
self.batch_size = dataloader.total_batch_size
return INCDataLoader()
eval_dataloader = _build_inc_dataloader(eval_dataloader)
batch_size = eval_dataloader.batch_size
def eval_func(model):
trainer.model = model
result = trainer.evaluate(eval_dataset=eval_dataset)
accu = result['eval_f1']
print(('Accuracy: %.3f ' % accu), flush=True)
return accu
def benchmark(model):
print(model)
trainer.model = model
result = trainer.evaluate(eval_dataset=eval_dataset)
throughput = result['eval_samples_per_second']
print(('Batch size = %d' % batch_size))
print(('Latency: %.3f ms' % (1000 / throughput)))
print(('Throughput: %.3f samples/sec' % result['eval_samples_per_second']))
if model_args.tune:
from neural_compressor.training import prepare_compression
from neural_compressor.config import QuantizationAwareTrainingConfig
conf = QuantizationAwareTrainingConfig()
compression_manager = prepare_compression(model, conf)
compression_manager.callbacks.on_train_begin()
trainer.train()
compression_manager.callbacks.on_train_end()
from neural_compressor.utils.load_huggingface import save_for_huggingface_upstream
save_for_huggingface_upstream(compression_manager.model, tokenizer, training_args.output_dir)
if model_args.performance:
from neural_compressor.config import BenchmarkConfig
from neural_compressor import benchmark
b_conf = BenchmarkConfig(warmup=5, iteration=model_args.iters, cores_per_instance=4, num_of_instance=1)
benchmark.fit(model, b_conf, b_dataloader=eval_dataloader)
else:
eval_func(model)
return |
def index(x, start=None, end=None):
if isinstance(x, list):
return [index_numpy(x_i, start, end) for x_i in x]
else:
return index_numpy(x, start, end) |
class PatchEmbed(nn.Module):
def __init__(self, patch_size=16, stride=16, padding=0, in_chans=3, embed_dim=768, norm_layer=None):
super().__init__()
patch_size = to_2tuple(patch_size)
stride = to_2tuple(stride)
padding = to_2tuple(padding)
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride, padding=padding)
self.norm = (norm_layer(embed_dim) if norm_layer else nn.Identity())
def forward(self, x):
x = self.proj(x)
x = self.norm(x)
return x |
def train_net(args, config):
(logger, final_output_path) = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TRAIN_IMAGE_SET, split='train')
model_prefix = os.path.join(final_output_path, config.MODEL_PREFIX)
if (args.log_dir is None):
args.log_dir = os.path.join(final_output_path, 'tensorboard_logs')
pprint.pprint(args)
logger.info('training args:{}\n'.format(args))
pprint.pprint(config)
logger.info('training config:{}\n'.format(pprint.pformat(config)))
if (config.RNG_SEED > (- 1)):
np.random.seed(config.RNG_SEED)
torch.random.manual_seed(config.RNG_SEED)
torch.cuda.manual_seed_all(config.RNG_SEED)
torch.backends.cudnn.benchmark = False
if args.cudnn_off:
torch.backends.cudnn.enabled = False
if args.dist:
model = eval(config.MODULE)(config)
local_rank = int((os.environ.get('LOCAL_RANK') or 0))
config.GPUS = str(local_rank)
torch.cuda.set_device(local_rank)
master_address = os.environ['MASTER_ADDR']
master_port = int((os.environ['MASTER_PORT'] or 23456))
world_size = int((os.environ['WORLD_SIZE'] or 1))
rank = int((os.environ['RANK'] or 0))
if args.slurm:
distributed.init_process_group(backend='nccl')
else:
distributed.init_process_group(backend='nccl', init_method='tcp://{}:{}'.format(master_address, master_port), world_size=world_size, rank=rank, group_name='mtorch')
print(f'native distributed, size: {world_size}, rank: {rank}, local rank: {local_rank}')
torch.cuda.set_device(local_rank)
config.GPUS = str(local_rank)
model = model.cuda()
if (not config.TRAIN.FP16):
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
if (rank == 0):
summary_parameters((model.module if isinstance(model, torch.nn.parallel.DistributedDataParallel) else model), logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(eval(config.MODULE)), final_output_path)
writer = None
if (args.log_dir is not None):
tb_log_dir = os.path.join(args.log_dir, 'rank{}'.format(rank))
if (not os.path.exists(tb_log_dir)):
os.makedirs(tb_log_dir)
writer = SummaryWriter(log_dir=tb_log_dir)
(train_loader, train_sampler) = make_dataloader(config, mode='train', distributed=True, num_replicas=world_size, rank=rank, expose_sampler=True)
val_loader = make_dataloader(config, mode='val', distributed=True, num_replicas=world_size, rank=rank)
batch_size = (world_size * (sum(config.TRAIN.BATCH_IMAGES) if isinstance(config.TRAIN.BATCH_IMAGES, list) else config.TRAIN.BATCH_IMAGES))
if (config.TRAIN.GRAD_ACCUMULATE_STEPS > 1):
batch_size = (batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS)
base_lr = (config.TRAIN.LR * batch_size)
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (_k in n)], 'lr': (base_lr * _lr_mult)} for (_k, _lr_mult) in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for (n, p) in model.named_parameters() if all([(_k not in n) for (_k, _) in config.TRAIN.LR_MULT])]})
if (config.TRAIN.OPTIMIZER == 'SGD'):
optimizer = optim.SGD(optimizer_grouped_parameters, lr=(config.TRAIN.LR * batch_size), momentum=config.TRAIN.MOMENTUM, weight_decay=config.TRAIN.WD)
elif (config.TRAIN.OPTIMIZER == 'Adam'):
optimizer = optim.Adam(optimizer_grouped_parameters, lr=(config.TRAIN.LR * batch_size), weight_decay=config.TRAIN.WD)
elif (config.TRAIN.OPTIMIZER == 'AdamW'):
optimizer = AdamW(optimizer_grouped_parameters, lr=(config.TRAIN.LR * batch_size), betas=(0.9, 0.999), eps=1e-06, weight_decay=config.TRAIN.WD, correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(config.TRAIN.OPTIMIZER))
total_gpus = world_size
else:
model = eval(config.MODULE)(config)
summary_parameters(model, logger)
shutil.copy(args.cfg, final_output_path)
shutil.copy(inspect.getfile(eval(config.MODULE)), final_output_path)
num_gpus = len(config.GPUS.split(','))
assert ((num_gpus <= 1) or (not config.TRAIN.FP16)), 'Not support fp16 with torch.nn.DataParallel. Please use amp.parallel.DistributedDataParallel instead.'
total_gpus = num_gpus
rank = None
writer = (SummaryWriter(log_dir=args.log_dir) if (args.log_dir is not None) else None)
if (num_gpus > 1):
model = torch.nn.DataParallel(model, device_ids=[int(d) for d in config.GPUS.split(',')]).cuda()
else:
torch.cuda.set_device(int(config.GPUS))
model.cuda()
train_loader = make_dataloader(config, mode='train', distributed=False)
val_loader = make_dataloader(config, mode='val', distributed=False)
train_sampler = None
batch_size = (num_gpus * (sum(config.TRAIN.BATCH_IMAGES) if isinstance(config.TRAIN.BATCH_IMAGES, list) else config.TRAIN.BATCH_IMAGES))
if (config.TRAIN.GRAD_ACCUMULATE_STEPS > 1):
batch_size = (batch_size * config.TRAIN.GRAD_ACCUMULATE_STEPS)
base_lr = (config.TRAIN.LR * batch_size)
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (_k in n)], 'lr': (base_lr * _lr_mult)} for (_k, _lr_mult) in config.TRAIN.LR_MULT]
optimizer_grouped_parameters.append({'params': [p for (n, p) in model.named_parameters() if all([(_k not in n) for (_k, _) in config.TRAIN.LR_MULT])]})
if (config.TRAIN.OPTIMIZER == 'SGD'):
optimizer = optim.SGD(optimizer_grouped_parameters, lr=(config.TRAIN.LR * batch_size), momentum=config.TRAIN.MOMENTUM, weight_decay=config.TRAIN.WD)
elif (config.TRAIN.OPTIMIZER == 'Adam'):
optimizer = optim.Adam(optimizer_grouped_parameters, lr=(config.TRAIN.LR * batch_size), weight_decay=config.TRAIN.WD)
elif (config.TRAIN.OPTIMIZER == 'AdamW'):
optimizer = AdamW(optimizer_grouped_parameters, lr=(config.TRAIN.LR * batch_size), betas=(0.9, 0.999), eps=1e-06, weight_decay=config.TRAIN.WD, correct_bias=True)
else:
raise ValueError('Not support optimizer {}!'.format(config.TRAIN.OPTIMIZER))
if (config.NETWORK.PARTIAL_PRETRAIN != ''):
pretrain_state_dict = torch.load(config.NETWORK.PARTIAL_PRETRAIN, map_location=(lambda storage, loc: storage))['state_dict']
prefix_change = [prefix_change.split('->') for prefix_change in config.NETWORK.PARTIAL_PRETRAIN_PREFIX_CHANGES]
if (len(prefix_change) > 0):
pretrain_state_dict_parsed = {}
for (k, v) in pretrain_state_dict.items():
no_match = True
for (pretrain_prefix, new_prefix) in prefix_change:
if k.startswith(pretrain_prefix):
k = (new_prefix + k[len(pretrain_prefix):])
pretrain_state_dict_parsed[k] = v
no_match = False
break
if no_match:
pretrain_state_dict_parsed[k] = v
pretrain_state_dict = pretrain_state_dict_parsed
smart_partial_load_model_state_dict(model, pretrain_state_dict)
if config.NETWORK.CLASSIFIER_PRETRAINED:
print('Initializing classifier weight from pretrained word embeddings...')
answers_word_embed = []
for (k, v) in model.state_dict().items():
if ('word_embeddings.weight' in k):
word_embeddings = v.detach().clone()
break
for answer in train_loader.dataset.answer_vocab:
a_tokens = train_loader.dataset.tokenizer.tokenize(answer)
a_ids = train_loader.dataset.tokenizer.convert_tokens_to_ids(a_tokens)
a_word_embed = torch.stack([word_embeddings[a_id] for a_id in a_ids], dim=0).mean(dim=0)
answers_word_embed.append(a_word_embed)
answers_word_embed_tensor = torch.stack(answers_word_embed, dim=0)
for (name, module) in model.named_modules():
if name.endswith('final_mlp'):
module[(- 1)].weight.data = answers_word_embed_tensor.to(device=module[(- 1)].weight.data.device)
train_metrics_list = [vqa_metrics.SoftAccuracy(allreduce=args.dist, num_replicas=(world_size if args.dist else 1))]
val_metrics_list = [vqa_metrics.SoftAccuracy(allreduce=args.dist, num_replicas=(world_size if args.dist else 1))]
for (output_name, display_name) in config.TRAIN.LOSS_LOGGERS:
train_metrics_list.append(vqa_metrics.LossLogger(output_name, display_name=display_name, allreduce=args.dist, num_replicas=(world_size if args.dist else 1)))
train_metrics = CompositeEvalMetric()
val_metrics = CompositeEvalMetric()
for child_metric in train_metrics_list:
train_metrics.add(child_metric)
for child_metric in val_metrics_list:
val_metrics.add(child_metric)
epoch_end_callbacks = []
if ((rank is None) or (rank == 0)):
epoch_end_callbacks = [Checkpoint(model_prefix, config.CHECKPOINT_FREQUENT)]
validation_monitor = ValidationMonitor(do_validation, val_loader, val_metrics, host_metric_name='SoftAcc', label_index_in_batch=config.DATASET.LABEL_INDEX_IN_BATCH)
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
if ((rank is None) or (rank == 0)):
smart_resume(model, optimizer, validation_monitor, config, model_prefix, logger)
if args.dist:
begin_epoch = torch.tensor(config.TRAIN.BEGIN_EPOCH).cuda()
distributed.broadcast(begin_epoch, src=0)
config.TRAIN.BEGIN_EPOCH = begin_epoch.item()
batch_size = (len(config.GPUS.split(',')) * config.TRAIN.BATCH_IMAGES)
batch_end_callbacks = [Speedometer(batch_size, config.LOG_FREQUENT, batches_per_epoch=len(train_loader), epochs=(config.TRAIN.END_EPOCH - config.TRAIN.BEGIN_EPOCH))]
if (config.TRAIN.LR_SCHEDULE == 'plateau'):
print('Warning: not support resuming on plateau lr schedule!')
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=config.TRAIN.LR_FACTOR, patience=1, verbose=True, threshold=0.0001, threshold_mode='rel', cooldown=2, min_lr=0, eps=1e-08)
elif (config.TRAIN.LR_SCHEDULE == 'triangle'):
lr_scheduler = WarmupLinearSchedule(optimizer, (config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0), t_total=int(((config.TRAIN.END_EPOCH * len(train_loader)) / config.TRAIN.GRAD_ACCUMULATE_STEPS)), last_epoch=(int(((config.TRAIN.BEGIN_EPOCH * len(train_loader)) / config.TRAIN.GRAD_ACCUMULATE_STEPS)) - 1))
elif (config.TRAIN.LR_SCHEDULE == 'step'):
lr_iters = [int(((epoch * len(train_loader)) / config.TRAIN.GRAD_ACCUMULATE_STEPS)) for epoch in config.TRAIN.LR_STEP]
lr_scheduler = WarmupMultiStepLR(optimizer, milestones=lr_iters, gamma=config.TRAIN.LR_FACTOR, warmup_factor=config.TRAIN.WARMUP_FACTOR, warmup_iters=(config.TRAIN.WARMUP_STEPS if config.TRAIN.WARMUP else 0), warmup_method=config.TRAIN.WARMUP_METHOD, last_epoch=(int(((config.TRAIN.BEGIN_EPOCH * len(train_loader)) / config.TRAIN.GRAD_ACCUMULATE_STEPS)) - 1))
else:
raise ValueError('Not support lr schedule: {}.'.format(config.TRAIN.LR_SCHEDULE))
if args.dist:
for v in model.state_dict().values():
distributed.broadcast(v, src=0)
best_epoch = torch.tensor(validation_monitor.best_epoch).cuda()
best_val = torch.tensor(validation_monitor.best_val).cuda()
distributed.broadcast(best_epoch, src=0)
distributed.broadcast(best_val, src=0)
validation_monitor.best_epoch = best_epoch.item()
validation_monitor.best_val = best_val.item()
if config.TRAIN.FP16:
(model, optimizer) = amp.initialize(model, optimizer, opt_level='O2', keep_batchnorm_fp32=False, loss_scale=config.TRAIN.FP16_LOSS_SCALE, min_loss_scale=32.0)
if args.dist:
model = Apex_DDP(model, delay_allreduce=True)
train(model, optimizer, lr_scheduler, train_loader, train_sampler, train_metrics, config.TRAIN.BEGIN_EPOCH, config.TRAIN.END_EPOCH, logger, rank=rank, batch_end_callbacks=batch_end_callbacks, epoch_end_callbacks=epoch_end_callbacks, writer=writer, validation_monitor=validation_monitor, fp16=config.TRAIN.FP16, clip_grad_norm=config.TRAIN.CLIP_GRAD_NORM, gradient_accumulate_steps=config.TRAIN.GRAD_ACCUMULATE_STEPS)
return (rank, model) |
def chunk_pad(it, size, padval=None):
it = chain(iter(it), repeat(padval))
return iter((lambda : tuple(islice(it, size))), ((padval,) * size)) |
def move_cache(old_cache_dir: Optional[str]=None, new_cache_dir: Optional[str]=None) -> None:
if (new_cache_dir is None):
new_cache_dir = DIFFUSERS_CACHE
if (old_cache_dir is None):
old_cache_dir = old_diffusers_cache
old_cache_dir = Path(old_cache_dir).expanduser()
new_cache_dir = Path(new_cache_dir).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*'):
if (old_blob_path.is_file() and (not old_blob_path.is_symlink())):
new_blob_path = (new_cache_dir / old_blob_path.relative_to(old_cache_dir))
new_blob_path.parent.mkdir(parents=True, exist_ok=True)
os.replace(old_blob_path, new_blob_path)
try:
os.symlink(new_blob_path, old_blob_path)
except OSError:
logger.warning('Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.') |
def run_task(arg_vv, log_dir, exp_name):
if (arg_vv['algorithm'] == 'planet'):
from planet.config import DEFAULT_PARAMS
else:
raise NotImplementedError
vv = DEFAULT_PARAMS
vv.update(**arg_vv)
vv = update_env_kwargs(vv)
vv['max_episode_length'] = vv['env_kwargs']['horizon']
logger.configure(dir=log_dir, exp_name=exp_name)
logdir = logger.get_dir()
assert (logdir is not None)
os.makedirs(logdir, exist_ok=True)
if torch.cuda.is_available():
device = (torch.device('cuda:1') if (torch.cuda.device_count() > 1) else torch.device('cuda:0'))
torch.cuda.manual_seed(vv['seed'])
else:
device = torch.device('cpu')
with open(osp.join(logger.get_dir(), 'variant.json'), 'w') as f:
json.dump(vv, f, indent=2, sort_keys=True)
env = Env(vv['env_name'], vv['symbolic_env'], vv['seed'], vv['max_episode_length'], vv['action_repeat'], vv['bit_depth'], vv['image_dim'], env_kwargs=vv['env_kwargs'])
if (vv['algorithm'] == 'planet'):
from planet.planet_agent import PlaNetAgent
agent = PlaNetAgent(env, vv, device)
agent.train(train_epoch=vv['train_epoch'])
env.close()
elif (vv['algorithm'] == 'dreamer'):
from dreamer.dreamer_agent import DreamerAgent
agent = DreamerAgent(env, vv, device)
agent.train(train_episode=vv['train_episode'])
env.close() |
class MSGMSLoss(Module):
def __init__(self, num_scales: int=3, in_channels: int=3) -> None:
super().__init__()
self.num_scales = num_scales
self.in_channels = in_channels
(self.prewitt_x, self.prewitt_y) = self._create_prewitt_kernel()
self.mean_filter = (torch.ones((1, 1, 21, 21)) / (21 * 21))
def forward(self, img1: Tensor, img2: Tensor) -> Tuple[(Tensor, Tensor)]:
if ((not self.prewitt_x.is_cuda) or (not self.prewitt_y.is_cuda)):
self.prewitt_x = self.prewitt_x.to(img1.device)
self.prewitt_y = self.prewitt_y.to(img1.device)
if (not self.mean_filter.is_cuda):
self.mean_filter = self.mean_filter.to(img1.device)
(b, c, h, w) = img1.shape
msgms_map = torch.zeros_like(img1)
for scale in range(self.num_scales):
if (scale > 0):
img1 = F.avg_pool2d(img1, kernel_size=2, stride=2, padding=0)
img2 = F.avg_pool2d(img2, kernel_size=2, stride=2, padding=0)
gms_map = self._gms(img1, img2)
msgms_map += F.interpolate(gms_map, size=(h, w), mode='bilinear', align_corners=False)
msgms_loss = torch.mean((1 - (msgms_map / self.num_scales)))
msgms_map = torch.mean((1 - (msgms_map / self.num_scales)), dim=1, keepdim=True)
msgms_map = F.conv2d(msgms_map, self.mean_filter, stride=1, padding=10)
return (msgms_loss, msgms_map)
def _gms(self, img1: Tensor, img2: Tensor) -> Tensor:
gm1_x = F.conv2d(img1, self.prewitt_x, stride=1, padding=1, groups=self.in_channels)
gm1_y = F.conv2d(img1, self.prewitt_y, stride=1, padding=1, groups=self.in_channels)
gm1 = torch.sqrt((((gm1_x ** 2) + (gm1_y ** 2)) + 1e-12))
gm2_x = F.conv2d(img2, self.prewitt_x, stride=1, padding=1, groups=self.in_channels)
gm2_y = F.conv2d(img2, self.prewitt_y, stride=1, padding=1, groups=self.in_channels)
gm2 = torch.sqrt((((gm2_x ** 2) + (gm2_y ** 2)) + 1e-12))
c = 0.0026
numerator = (((2 * gm1) * gm2) + c)
denominator = (((gm1 ** 2) + (gm2 ** 2)) + c)
return (numerator / (denominator + 1e-12))
def _create_prewitt_kernel(self) -> Tuple[(Tensor, Tensor)]:
prewitt_x = (torch.Tensor([[[[1, 0, (- 1)], [1, 0, (- 1)], [1, 0, (- 1)]]]]) / 3.0)
prewitt_x = prewitt_x.repeat(self.in_channels, 1, 1, 1)
prewitt_y = (torch.Tensor([[[[1, 1, 1], [0, 0, 0], [(- 1), (- 1), (- 1)]]]]) / 3.0)
prewitt_y = prewitt_y.repeat(self.in_channels, 1, 1, 1)
return (prewitt_x, prewitt_y) |
def test_stack_fusion_new():
fusion_module = CrossAttentionNew(32, 2, (- 1))
print('CrossAttention init success!')
fake_img = Variable(torch.randn(16, 49, 32))
fake_txt = Variable(torch.randn(8, 14, 32))
score = fusion_module(fake_img, fake_txt, get_score=True)
print(score.size())
print('----CrossAttention module success!----') |
def triplet_loss(q_vec, pos_vecs, neg_vecs, margin):
best_pos = best_pos_distance(q_vec, pos_vecs)
num_neg = neg_vecs.get_shape()[1]
batch = q_vec.get_shape()[0]
query_copies = tf.tile(q_vec, [1, int(num_neg), 1])
best_pos = tf.tile(tf.reshape(best_pos, ((- 1), 1)), [1, int(num_neg)])
m = tf.fill([int(batch), int(num_neg)], margin)
triplet_loss = tf.reduce_mean(tf.reduce_sum(tf.maximum(tf.add(m, tf.subtract(best_pos, tf.reduce_sum(tf.squared_difference(neg_vecs, query_copies), 2))), tf.zeros([int(batch), int(num_neg)])), 1))
return triplet_loss |
def combine_datasets(config, output_file_name='combined.tsv'):
filedir = config['file_directory']
output_file = os.path.join(filedir, output_file_name)
combined_df = pd.DataFrame()
for dataset in __filter_datasets_from_config(config):
for ds_file in dataset.files:
try:
df = pd.read_csv(os.path.join(filedir, dataset.name, ds_file['name']), sep='\t')
df.insert(loc=0, column='file_name', value=([ds_file['name']] * df.count().max()))
df.insert(loc=0, column='file_language', value=([ds_file['language']] * df.count().max()))
df.insert(loc=0, column='file_platform', value=([ds_file['platform']] * df.count().max()))
df.drop(columns=['id'], inplace=True)
combined_df = combined_df.append(df)
except:
print('Could not add {0} to the combined dataset. Continuing with next file.'.format(ds_file['name']))
combined_df.to_csv(output_file, index_label='id', quoting=csv.QUOTE_NONNUMERIC, sep='\t') |
def generate_ring_data(smiles, n_pos, n_neg):
ring_data = {}
mol = Chem.MolFromSmiles(smiles)
ssr = [list(x) for x in Chem.GetSymmSSSR(mol)]
n_rings = len(ssr)
ring_indices = list(range(n_rings))
if (n_rings <= 1):
return ring_data
for idx in range(n_pos):
ring_idx = random.sample(ring_indices, 1)[0]
ring_atoms = ssr[ring_idx]
for i in range(MAX_IT):
atom_pair = ordered_pair(random.sample(ring_atoms, 2))
if (atom_pair not in ring_data):
ring_data[atom_pair] = 1
break
for idx in range(n_neg):
ring_pair = random.sample(ring_indices, 2)
ring_1 = ssr[ring_pair[0]]
ring_2 = ssr[ring_pair[1]]
for i in range(MAX_IT):
a1 = random.sample(ring_1, 1)[0]
a2 = random.sample(ring_2, 1)[0]
if (a1 == a2):
continue
same_ring = False
for ring in ssr:
if ((a1 in ring) and (a2 in ring)):
same_ring = True
break
if same_ring:
continue
atom_pair = ordered_pair([a1, a2])
if (atom_pair not in ring_data):
ring_data[atom_pair] = 0
break
return ring_data |
def build_molecules(one_hot, x, node_mask, is_geom, margins=const.MARGINS_EDM):
molecules = []
for i in range(len(one_hot)):
mask = (node_mask[i].squeeze() == 1)
atom_types = one_hot[i][mask].argmax(dim=1).detach().cpu()
positions = x[i][mask].detach().cpu()
mol = build_molecule(positions, atom_types, is_geom, margins=margins)
molecules.append(mol)
return molecules |
class Bretschneider2016lol(dataset.Dataset):
name = 'bretschneider2016lol'
url = '
hash = '901e0d51428f34b94bf6b3f59b0e9cf71dabe94fc74fd81fd1e9be199d2902bc'
files = [{'name': 'bretschneider2016en_lol.csv', 'language': 'en', 'type': 'training', 'platform': 'League of Legends'}]
comment = ' '
license = 'UNKNOWN'
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
tmp_file_path = helpers.extract_sql_tables(os.path.join(tmp_file_path, 'lol_anonymized.sql'))
tmp_file_path = helpers.join_csvs(os.path.join(tmp_file_path, 'posts.csv'), ['topic_id', 'post_number'], os.path.join(tmp_file_path, 'annotations.csv'), ['topic_id', 'post_number'], how='left')
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'bretschneider2016en_lol.csv'))
def unify_row(cls, row):
row['text'] = row['html_message']
labels = []
if (type(row['offender']) != float):
labels.append('offensive')
row['labels'] = labels
row = row.drop(['topic_id', 'post_number', 'annotator', 'offender', 'victim', 'author', 'html_message', 'timestamp'])
return row |
class ResConvBlock(nn.Module):
def __init__(self, n_in, n_state):
super().__init__()
self.model = nn.Sequential(nn.ReLU(), nn.Conv2d(n_in, n_state, 3, 1, 1), nn.ReLU(), nn.Conv2d(n_state, n_in, 1, 1, 0))
def forward(self, x):
return (x + self.model(x)) |
def load_train_labels(data_folder: Path, city: str, competition: str):
train_label_frames = []
train_label_files = sorted((((data_folder / 'train') / city) / 'labels').glob(f'{competition}_labels_*.parquet'))
for train_label_file in tqdm.tqdm(train_label_files, total=len(sorted(train_label_files))):
df = pd.read_parquet(train_label_file)
day_str = re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', str(train_label_file)).group()
day = datetime.datetime.strptime(day_str, '%Y-%m-%d')
df['day'] = day_str
df['weekday'] = day.weekday()
df['h'] = (df['t'] // 4)
df['timestamp'] = [(day + datetime.timedelta(hours=(t // 4), minutes=((t % 4) * 15))) for t in df['t']]
train_label_frames.append(df)
logging.info(f'Read {len(train_label_frames)} training label files for {city} {competition}')
train_labels = pd.concat(train_label_frames)
logging.info(f'Labels loaded: {len(train_labels)} for {city} {competition}')
return train_labels |
class ATLoss(nn.Module):
def __init__(self):
super().__init__()
self.mode = 'code'
if (self.mode not in ('code', 'paper')):
raise ValueError('mode `{}` is not expected'.format(self.mode))
def attention_transfer_paper(feature_map):
return normalize(feature_map.pow(2).sum(1).flatten(1))
def compute_at_loss_paper(self, student_feature_map, teacher_feature_map):
at_student = self.attention_transfer_paper(student_feature_map)
at_teacher = self.attention_transfer_paper(teacher_feature_map)
return torch.norm((at_student - at_teacher), dim=1).sum()
def attention_transfer(feature_map):
return normalize(feature_map.pow(2).mean(1).flatten(1))
def compute_at_loss(self, student_feature_map, teacher_feature_map):
at_student = self.attention_transfer(student_feature_map)
at_teacher = self.attention_transfer(teacher_feature_map)
return (at_student - at_teacher).pow(2).mean()
def forward(self, student_feature, teacher_feature):
at_loss = 0
batch_size = cfg.train_batch_size
factor = 1.0
for (i, j) in zip(student_feature, teacher_feature):
if (self.mode == 'paper'):
at_loss += (factor * self.compute_at_loss_paper(i, j))
else:
at_loss += (factor * self.compute_at_loss(i, j))
if (batch_size is None):
batch_size = len(i)
return (((at_loss / batch_size) * 100) if (self.mode == 'paper') else (at_loss * 100)) |
def _get_max_epoch_model(output_dir):
fn_model_list = glob.glob(os.path.join(output_dir, 'model.*.bin'))
fn_optim_list = glob.glob(os.path.join(output_dir, 'optim.*.bin'))
if ((not fn_model_list) or (not fn_optim_list)):
return None
both_set = (set([int(Path(fn).stem.split('.')[(- 1)]) for fn in fn_model_list]) & set([int(Path(fn).stem.split('.')[(- 1)]) for fn in fn_optim_list]))
if both_set:
return max(both_set)
else:
return None |
def test_initialization():
d = [Exponential(), Exponential()]
model = SparseHMM(d)
assert (model.inertia == 0.0)
assert (model.frozen == False)
assert (model.n_distributions == 2)
assert_raises(AttributeError, getattr, model, '_xw_sum')
assert_raises(AttributeError, getattr, model, '_xw_starts_sum')
assert_raises(AttributeError, getattr, model, '_xw_ends_sum')
assert (model.starts is None)
assert (model.ends is None)
assert (model.edges is None) |
class FFTOp(gof.Op):
__props__ = ()
def output_type(self, inp):
return T.TensorType(inp.dtype, broadcastable=([False] * inp.type.ndim))
def make_node(self, a, s=None):
a = T.as_tensor_variable(a)
if (a.ndim < 3):
raise TypeError((('%s: input must have dimension >= 3, with ' % self.__class__.__name__) + 'first dimension batches and last real/imag parts'))
if (s is None):
s = a.shape[(- 2)]
s = T.as_tensor_variable(s)
else:
s = T.as_tensor_variable(s)
if ((not s.dtype.startswith('int')) and (not s.dtype.startswith('uint'))):
raise TypeError(('%s: length of the transformed axis must be of type integer' % self.__class__.__name__))
return gof.Apply(self, [a, s], [self.output_type(a)()])
def perform(self, node, inputs, output_storage):
a = inputs[0]
s = inputs[1]
a_in = (a[(..., 0)] + (1j * a[(..., 1)]))
A = np.fft.fft(a_in)
out = np.zeros((A.shape + (2,)), dtype=a.dtype)
(out[(..., 0)], out[(..., 1)]) = (np.real(A), np.imag(A))
output_storage[0][0] = out
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [ifft_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]] |
class PSI(JavaValue):
def __init__(self, jvalue=None, *args):
self.bigdl_type = 'float'
super().__init__(jvalue, self.bigdl_type, *args)
def get_salt(self, secure_code=''):
return callBigDlFunc(self.bigdl_type, 'psiGetSalt', self.value, secure_code)
def upload_set(self, ids, salt):
callBigDlFunc(self.bigdl_type, 'psiUploadSet', self.value, ids, salt)
def download_intersection(self, max_try=100, retry=3000):
return callBigDlFunc(self.bigdl_type, 'psiDownloadIntersection', self.value, max_try, retry)
def get_intersection(self, ids, max_try=100, retry=3000):
return callBigDlFunc(self.bigdl_type, 'psiGetIntersection', self.value, ids, max_try, retry) |
def make_model():
inputs = tf.keras.Input(shape=(None,), dtype='int64')
x = layers.Embedding(max_features, embedding_dim)(inputs)
predictions = make_backbone()(x)
model = Model(inputs, predictions)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model |
class SubsetSampler(Sampler):
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return iter(self.indices)
def __len__(self):
return len(self.indices) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.