code stringlengths 101 5.91M |
|---|
def test_type_tracing_max_depth_after_get_attr():
mock = MagicMock()
mock.foo = mock
proxy = tt.ObjectProxy(mock)
for i in range((tt._MAX_PROXY_NESTING + 1)):
proxy = proxy.foo
assert (not isinstance(proxy, tt.ObjectProxy)) |
.parametrize('teacher,student', [(likelihood, AbsLikelihood(y=None)) for likelihood in LIKELIHOODS])
def test_likelihood_grad_RS(teacher, student):
df = check_likelihood_grad_RS(teacher, student)
assert_allclose(df['mz'], df['grad_mz_hat_A'], rtol=0, atol=EPSILON)
assert_allclose(df['qz'], ((- 2) * df['grad_qz_hat_A']), rtol=0, atol=EPSILON)
assert_allclose(df['tz'], ((- 2) * df['grad_tz_hat_A']), rtol=0, atol=EPSILON) |
class DataManager(object):
def __init__(self, data, num_epoch, batch_size, *, shuffle=True, align=False, simple=True, infinite=False):
self.data = data
self.data_length = len(data)
self.num_epochs = num_epoch
self.batch_size = batch_size
self.cur_epoch = 1
self.cur_batch = 1
self.cur_pos = 0
self.num_batch = int(math.ceil((float(self.data_length) / batch_size)))
self.simple = simple
self.align = align
self.infinite = infinite
self.data_index = []
res = (batch_size - (self.data_length % batch_size))
if shuffle:
np.random.shuffle(self.data)
for i in range(num_epoch):
if shuffle:
ids = list(np.random.permutation(self.data_length))
else:
ids = [d for d in range(self.data_length)]
if (align and (res != 0)):
add_on = ids[:res]
ids.extend(add_on)
self.data_index.extend(ids)
def get_batch(self):
if self.infinite:
if (self.data_length < self.batch_size):
replace = True
else:
replace = False
idx = list(np.random.choice(self.data_length, self.batch_size, replace=replace))
return [self.data[i] for i in idx]
if self.simple:
batch = self.data[((self.cur_batch - 1) * self.batch_size):(self.cur_batch * self.batch_size)]
if (self.align and (len(batch) < self.batch_size)):
batch = (batch + batch[:(self.batch_size - len(batch))])
self.cur_pos += self.batch_size
else:
start = self.cur_pos
end = ((self.cur_pos + self.batch_size) - 1)
batch = []
while ((start != (end + 1)) and (start < len(self.data_index))):
batch.append(self.data[self.data_index[start]])
start += 1
self.cur_pos = (end + 1)
self.cur_batch += 1
if (((self.cur_batch - 1) % self.num_batch) == 0):
self.cur_epoch += 1
self.cur_batch = 1
return batch |
def generate_csrf(secret_key=None, token_key=None):
secret_key = _get_config(secret_key, 'WTF_CSRF_SECRET_KEY', current_app.secret_key, message='A secret key is required to use CSRF.')
field_name = _get_config(token_key, 'WTF_CSRF_FIELD_NAME', 'csrf_token', message='A field name is required to use CSRF.')
if (field_name not in g):
s = URLSafeTimedSerializer(secret_key, salt='wtf-csrf-token')
if (field_name not in session):
session[field_name] = hashlib.sha1(os.urandom(64)).hexdigest()
try:
token = s.dumps(session[field_name])
except TypeError:
session[field_name] = hashlib.sha1(os.urandom(64)).hexdigest()
token = s.dumps(session[field_name])
setattr(g, field_name, token)
return g.get(field_name) |
_assert
class LocLabel(Node):
def __init__(self, loc_id_str: str) -> None:
super().__init__()
self.loc_id_str = loc_id_str.strip()
def id(self):
if (len(self.loc_id_str) == 4):
return (- 1)
return int(self.loc_id_str[4:])
def dump(self):
return f'loc({self.loc_id_str})'
def parse(loc_label_str):
return LocLabel(loc_label_str.strip()[4:(- 1)]) |
def test_setup_path_invalid_dir(tmp_path):
gen.set_configuration(configuration=MagicMock(log_file=None, project_path=(tmp_path / 'nope')))
assert (gen._setup_path() is False) |
def reduce_max(seq_batch):
sums = tf.reduce_sum(seq_batch.mask, 1, keep_dims=True)
with tf.control_dependencies([tf.assert_positive(sums)]):
seq_batch = seq_batch.with_pad_value(float('-inf'))
result = tf.reduce_max(seq_batch.values, 1)
return result |
class SE_Block(nn.Module):
def __init__(self, c, r=16):
super().__init__()
self.squeeze = nn.AdaptiveAvgPool2d(1)
self.excitation = nn.Sequential(nn.Linear(c, (c // r), bias=False), nn.ReLU(inplace=True), nn.Linear((c // r), c, bias=False), nn.Sigmoid())
def forward(self, x):
(bs, c, _, _) = x.shape
y = self.squeeze(x).view(bs, c)
y = self.excitation(y).view(bs, c, 1, 1)
return (x * y.expand_as(x)) |
class EdgeAndMatcher(BaseEdgeMatcher):
def __init__(self, matcher_a: BaseEdgeMatcher, matcher_b: BaseEdgeMatcher):
self.matcher_a = matcher_a
self.matcher_b = matcher_b
def apply(self, input_object) -> bool:
return (self.matcher_a.apply(input_object) and self.matcher_b.apply(input_object)) |
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, att_mode=False):
super(UnetGenerator, self).__init__()
unet_block = UnetSkipConnectionBlock((ngf * 8), (ngf * 8), input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range((num_downs - 5)):
unet_block = UnetSkipConnectionBlock((ngf * 8), (ngf * 8), input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock((ngf * 4), (ngf * 8), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock((ngf * 2), (ngf * 4), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, (ngf * 2), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer, att_mode=att_mode)
def forward(self, input):
return self.model(input) |
class Status(object):
Waiting = 'waiting'
Chat = 'chat'
Finished = 'finished'
Survey = 'survey'
Redirected = 'redirected'
Incomplete = 'incomplete'
Reporting = 'reporting' |
class ScalarTrackingFunctional(Functional):
def __init__(self, integrand: ufl.Form, tracking_goal: Union[(float, int, ctypes.c_float, ctypes.c_double)], weight: Union[(float, int)]=1.0) -> None:
super().__init__()
self.integrand = integrand
self.tracking_goal = tracking_goal
if (not isinstance(self.tracking_goal, (ctypes.c_float, ctypes.c_double))):
self.tracking_goal_value = self.tracking_goal
else:
self.tracking_goal_value = self.tracking_goal.value
mesh = self.integrand.integrals()[0].ufl_domain().ufl_cargo()
self.integrand_value = fenics.Function(fenics.FunctionSpace(mesh, 'R', 0))
self.weight = fenics.Function(fenics.FunctionSpace(mesh, 'R', 0))
self.weight.vector().vec().set(weight)
self.weight.vector().apply('')
def evaluate(self) -> float:
if isinstance(self.tracking_goal, (ctypes.c_float, ctypes.c_double)):
self.tracking_goal_value = self.tracking_goal.value
scalar_integral_value = fenics.assemble(self.integrand)
self.integrand_value.vector().vec().set(scalar_integral_value)
self.integrand_value.vector().apply('')
val: float = ((self.weight.vector().vec().sum() / 2.0) * pow((scalar_integral_value - self.tracking_goal_value), 2))
return val
def derivative(self, argument: ufl.core.expr.Expr, direction: ufl.core.expr.Expr) -> ufl.Form:
if isinstance(self.tracking_goal, (ctypes.c_float, ctypes.c_double)):
self.tracking_goal_value = self.tracking_goal.value
derivative = fenics.derivative(((self.weight * (self.integrand_value - fenics.Constant(self.tracking_goal_value))) * self.integrand), argument, direction)
return derivative
def coefficients(self) -> Tuple[fenics.Function]:
coeffs: Tuple[fenics.Function] = self.integrand.coefficients()
return coeffs
def scale(self, scaling_factor: Union[(float, int)]) -> None:
self.weight.vector().vec().set(scaling_factor)
self.weight.vector().apply('')
def update(self) -> None:
scalar_integral_value = fenics.assemble(self.integrand)
self.integrand_value.vector().vec().set(scalar_integral_value)
self.integrand_value.vector().apply('') |
def _cfg(url=''):
return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0.conv', 'classifier': 'head.fc'} |
_grad()
def calculate_lpips_intervals(group_of_images, intervals):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
lpips = LPIPS().eval().to(device)
lpips_values = [[] for _ in range(len(intervals))]
inr_idx = {i: j for (j, i) in enumerate(intervals)}
num_rand_outputs = len(group_of_images)
for i in range((num_rand_outputs - 1)):
for j in range((i + 1), num_rand_outputs):
if ((j - i) in intervals):
lpips_values[inr_idx[(j - i)]].append(lpips(group_of_images[i], group_of_images[j]))
lpips_values = [torch.mean(torch.stack(lpips_value, dim=0)).item() for lpips_value in lpips_values]
return lpips_values |
def run_se(a0, alpha, prior_rho, prior_mean):
model = glm_state_evolution(alpha=alpha, prior_type='gauss_bernoulli', output_type='abs', prior_rho=prior_rho, prior_mean=prior_mean)
a_init = [('x', 'bwd', a0)]
initializer = CustomInit(a_init=a_init)
records = run_state_evolution(x_ids=['x', 'z'], model=model, max_iter=200, initializer=initializer)
return records |
def get_args():
parser = argparse.ArgumentParser()
ffn_train.add_ffn_train_args(parser)
nn_utils.add_hyperopt_args(parser)
return parser.parse_args() |
class LitDataset(Dataset):
def __init__(self, dataset, use_lab=True):
self.dataset = dataset
self.use_lab = use_lab
self.dlcj_transform = transforms.Compose([transforms.RandomApply([transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.2, hue=0.2)], p=0.8), transforms.RandomGrayscale(p=0.2), ToTensorDeepLabNormalized()])
self.dl_transform = transforms.Compose([ToTensorDeepLabNormalized()])
self.lab = transforms.Compose([ToLAB()])
self.vgg_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def __getitem__(self, idx):
example = self.dataset[idx]
img = example['img'].copy()
elem = {'img_vgg': self.vgg_transform(img), 'img_rec': (self.lab(img) if self.use_lab else (to_tensor(img) - 128)), 'img': self.dl_transform(img), 'img_cj1': self.dlcj_transform(img), 'img_cj2': self.dlcj_transform(img), 'mask': torch.from_numpy(example['mask']).permute(2, 0, 1), 'seg': (torch.from_numpy(example['seg']).permute(2, 0, 1) if ('seg' in example) else []), 'inds': (example['inds'] if ('inds' in example) else []), 'kp': (example['kp'] if ('kp' in example) else []), 'label': (example['label'] if ('label' in example) else []), 'img_path': (example['img_path'] if ('img_path' in example) else []), 'landmarks': (example['landmarks'] if ('landmarks' in example) else [])}
return elem
def __len__(self):
return len(self.dataset) |
def run_thread(iteration_dir, design_file, opt):
opt_dir = os.path.join(iteration_dir, opt)
(opt_file, delay, area) = run_optimization(opt_dir, opt, design_file, library_file)
log(((((('Optimization: ' + opt) + ' -> delay: ') + str(delay)) + ', area: ') + str(area)))
return (opt, opt_file, delay, area) |
class CriterionCrossEntropy(nn.Module):
def __init__(self, ignore_index=255):
super(CriterionCrossEntropy, self).__init__()
self.ignore_index = ignore_index
self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index)
def forward(self, preds, target):
(h, w) = (target.size(1), target.size(2))
scale_pred = F.upsample(input=preds, size=(h, w), mode='bilinear', align_corners=True)
loss = self.criterion(scale_pred, target)
return loss |
def finish(config: DictConfig, model: pl.LightningModule, datamodule: pl.LightningDataModule, trainer: pl.Trainer, callbacks: List[pl.Callback], logger: List[pl.loggers.LightningLoggerBase]) -> None:
for lg in logger:
if isinstance(lg, WandbLogger):
wandb.finish() |
def _find_compiler_bindir():
patterns = ['C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64', 'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64', 'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64', 'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin']
for pattern in patterns:
matches = sorted(glob.glob(pattern))
if len(matches):
return matches[(- 1)]
return None |
def anti_wrapping_function(x):
return torch.abs((x - ((torch.round((x / (2 * np.pi))) * 2) * np.pi))) |
def extract_frames(filename: str, save_dir: str, transforms: transforms.Compose=None) -> None:
basename = os.path.basename(filename)
if (not os.path.exists(filename)):
raise FileNotFoundError(('%s does not exist!' % filename))
print(('Decomposing %s.' % filename))
capture = cv2.VideoCapture(filename)
(ret, frame) = capture.read()
frame_no = 0
while ret:
save_path = os.path.join(save_dir, ('%s.png' % frame_no))
if (not os.path.exists(save_dir)):
os.makedirs(save_dir)
if (transforms is not None):
frame = transforms(frame)
frame = frame.permute(1, 2, 0).numpy()
cv2.imwrite(save_path, frame)
frame_no += 1
(ret, frame) = capture.read() |
def pytest_configure(config):
config.addinivalue_line('markers', 'gpu: run opencl-based tests on the gpu')
_limit_tf_gpu_memory() |
def filter_class(dataset, classes):
(data, labels) = (dataset.data, dataset.targets)
if (type(labels) == list):
labels = torch.tensor(labels)
data_filter = []
labels_filter = []
for _class in classes:
idx = (labels == _class)
data_filter.append(data[idx])
labels_filter.append(labels[idx])
if (type(dataset.data) == np.ndarray):
dataset.data = np.vstack(data_filter)
dataset.targets = np.hstack(labels_filter)
elif (type(dataset.data) == torch.Tensor):
dataset.data = torch.cat(data_filter)
dataset.targets = torch.cat(labels_filter)
else:
raise TypeError('dataset.data type neither np.ndarray nor torch.Tensor')
return (dataset, len(classes)) |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
cfg = replace_cfg_vals(cfg)
update_data_root(cfg)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
if args.auto_scale_lr:
if (('auto_scale_lr' in cfg) and ('enable' in cfg.auto_scale_lr) and ('base_batch_size' in cfg.auto_scale_lr)):
cfg.auto_scale_lr.enable = True
else:
warnings.warn('Can not find "auto_scale_lr" or "auto_scale_lr.enable" or "auto_scale_lr.base_batch_size" in your configuration file. Please update all the configuration files to mmdet >= 2.24.1.')
setup_multi_processes(cfg)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
elif (cfg.get('work_dir', None) is None):
cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
if (args.resume_from is not None):
cfg.resume_from = args.resume_from
cfg.auto_resume = args.auto_resume
if (args.gpus is not None):
cfg.gpu_ids = range(1)
warnings.warn('`--gpus` is deprecated because we only support single GPU mode in non-distributed training. Use `gpus=1` now.')
if (args.gpu_ids is not None):
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. Because we only support single GPU mode in non-distributed training. Use the first GPU in `gpu_ids` now.')
if ((args.gpus is None) and (args.gpu_ids is None)):
cfg.gpu_ids = [args.gpu_id]
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
(_, world_size) = get_dist_info()
cfg.gpu_ids = range(world_size)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
meta = dict()
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for (k, v) in env_info_dict.items()])
dash_line = (('-' * 60) + '\n')
logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line))
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
logger.info(f'Distributed training: {distributed}')
logger.info(f'''Config:
{cfg.pretty_text}''')
cfg.device = get_device()
seed = init_random_seed(args.seed, device=cfg.device)
seed = ((seed + dist.get_rank()) if args.diff_seed else seed)
logger.info(f'Set random seed to {seed}, deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if (len(cfg.workflow) == 2):
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if (cfg.checkpoint_config is not None):
cfg.checkpoint_config.meta = dict(mmdet_version=(__version__ + get_git_hash()[:7]), CLASSES=datasets[0].CLASSES)
model.CLASSES = datasets[0].CLASSES
train_detector(model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta) |
def _get_next_run_id_local(run_dir_root: str) -> int:
dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))]
r = re.compile('^\\d+')
run_id = 0
for dir_name in dir_names:
m = r.match(dir_name)
if (m is not None):
i = int(m.group())
run_id = max(run_id, (i + 1))
return run_id |
def grid_points_in_poly(shape, verts, binarize=True):
output = _grid_points_in_poly(shape, verts)
if binarize:
output = output.astype(bool)
return output |
def write_cpp_head(f, chip, file_name):
cpp_head = f'''// ====- {chip.lower()}RefDef.cpp - {chip.upper()} register definition
//
// Copyright (C) 2022 Sophgo Technologies Inc. All rights reserved.
//
// TPU-MLIR is licensed under the 2-Clause BSD License except for the
// third-party components.
//
//
//
// automatically generated by {__file__}
// time: {strftime('%Y-%m-%d %H:%M:%S', gmtime())}
// this file should not be changed except format.
// reg_def_file: {file_name}
'''
f.write(cpp_head)
f.write('#pragma once\n')
f.write('#include <cstdint>\n') |
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--input_json', type=str, default='data/coco.json', help='path to the json file containing additional info and vocab')
parser.add_argument('--input_fc_dir', type=str, default='data/cocotalk_fc', help='path to the directory containing the preprocessed fc feats')
parser.add_argument('--input_att_dir', type=str, default='data/cocotalk_att', help='path to the directory containing the preprocessed att feats')
parser.add_argument('--input_box_dir', type=str, default='data/cocotalk_box', help='path to the directory containing the boxes of att feats')
parser.add_argument('--input_label_h5', type=str, default='data/coco_label.h5', help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--data_in_memory', action='store_true', help='True if we want to save the features in memory')
parser.add_argument('--start_from', type=str, default=None, help="continue training from saved model at this path. Path must contain files saved by previous training process: \n 'infos.pkl' : configuration;\n 'model.pth' : weights\n ")
parser.add_argument('--cached_tokens', type=str, default='coco-train-idxs', help='Cached token file for calculating cider score during self critical training.')
parser.add_argument('--caption_model', type=str, default='show_tell', help='show_tell, show_attend_tell, all_img, fc, att2in, att2in2, att2all2, adaatt, adaattmo, updown, stackatt, denseatt, transformer')
parser.add_argument('--rnn_size', type=int, default=512, help='size of the rnn in number of hidden nodes in each layer')
parser.add_argument('--num_layers', type=int, default=1, help='number of layers in the RNN')
parser.add_argument('--rnn_type', type=str, default='lstm', help='rnn, gru, or lstm')
parser.add_argument('--input_encoding_size', type=int, default=512, help='the encoding size of each token in the vocabulary, and the image.')
parser.add_argument('--att_hid_size', type=int, default=512, help='the hidden size of the attention MLP; only useful in show_attend_tell; 0 if not using hidden layer')
parser.add_argument('--fc_feat_size', type=int, default=2048, help='2048 for resnet, 4096 for vgg')
parser.add_argument('--att_feat_size', type=int, default=2048, help='2048 for resnet, 512 for vgg')
parser.add_argument('--logit_layers', type=int, default=1, help='number of layers in the RNN')
parser.add_argument('--use_bn', type=int, default=0, help='If 1, then do batch_normalization first in att_embed, if 2 then do bn both in the beginning and the end of att_embed')
parser.add_argument('--norm_att_feat', type=int, default=0, help='If normalize attention features')
parser.add_argument('--use_box', type=int, default=0, help='If use box features')
parser.add_argument('--norm_box_feat', type=int, default=0, help='If use box, do we normalize box feature')
parser.add_argument('--max_epochs', type=int, default=(- 1), help='number of epochs')
parser.add_argument('--batch_size', type=int, default=16, help='minibatch size')
parser.add_argument('--grad_clip_mode', type=str, default='value', help='value or norm')
parser.add_argument('--grad_clip_value', type=float, default=0.1, help='clip gradients at this value/max_norm, 0 means no clipping')
parser.add_argument('--drop_prob_lm', type=float, default=0.5, help='strength of dropout in the Language Model RNN')
parser.add_argument('--self_critical_after', type=int, default=(- 1), help='After what epoch do we start finetuning the CNN? (-1 = disable; never finetune, 0 = finetune from start)')
parser.add_argument('--seq_per_img', type=int, default=5, help='number of captions to sample for each image during training. Done for efficiency since CNN forward pass is expensive. E.g. coco has 5 sents/image')
add_eval_sample_opts(parser)
parser.add_argument('--optim', type=str, default='adam', help='what update to use? rmsprop|sgd|sgdmom|adagrad|adam|adamw')
parser.add_argument('--learning_rate', type=float, default=0.0004, help='learning rate')
parser.add_argument('--learning_rate_decay_start', type=int, default=(- 1), help='at what iteration to start decaying learning rate? (-1 = dont) (in epoch)')
parser.add_argument('--learning_rate_decay_every', type=int, default=3, help='every how many iterations thereafter to drop LR?(in epoch)')
parser.add_argument('--learning_rate_decay_rate', type=float, default=0.8, help='every how many iterations thereafter to drop LR?(in epoch)')
parser.add_argument('--optim_alpha', type=float, default=0.9, help='alpha for adam')
parser.add_argument('--optim_beta', type=float, default=0.999, help='beta used for adam')
parser.add_argument('--optim_epsilon', type=float, default=1e-08, help='epsilon that goes into denominator for smoothing')
parser.add_argument('--weight_decay', type=float, default=0, help='weight_decay')
parser.add_argument('--label_smoothing', type=float, default=0, help='')
parser.add_argument('--noamopt', action='store_true', help='')
parser.add_argument('--noamopt_warmup', type=int, default=2000, help='')
parser.add_argument('--noamopt_factor', type=float, default=1, help='')
parser.add_argument('--reduce_on_plateau', action='store_true', help='')
parser.add_argument('--reduce_on_plateau_factor', type=float, default=0.5, help='')
parser.add_argument('--reduce_on_plateau_patience', type=int, default=3, help='')
parser.add_argument('--cached_transformer', action='store_true', help='')
parser.add_argument('--use_warmup', action='store_true', help='warm up the learing rate?')
parser.add_argument('--scheduled_sampling_start', type=int, default=(- 1), help='at what iteration to start decay gt probability')
parser.add_argument('--scheduled_sampling_increase_every', type=int, default=5, help='every how many iterations thereafter to gt probability')
parser.add_argument('--scheduled_sampling_increase_prob', type=float, default=0.05, help='How much to update the prob')
parser.add_argument('--scheduled_sampling_max_prob', type=float, default=0.25, help='Maximum scheduled sampling prob.')
parser.add_argument('--val_images_use', type=int, default=3200, help='how many images to use when periodically evaluating the validation loss? (-1 = all)')
parser.add_argument('--save_checkpoint_every', type=int, default=2500, help='how often to save a model checkpoint (in iterations)?')
parser.add_argument('--save_every_epoch', action='store_true', help='Save checkpoint every epoch, will overwrite save_checkpoint_every')
parser.add_argument('--save_history_ckpt', type=int, default=0, help='If save checkpoints at every save point')
parser.add_argument('--checkpoint_path', type=str, default=None, help='directory to store checkpointed models')
parser.add_argument('--language_eval', type=int, default=0, help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.')
parser.add_argument('--losses_log_every', type=int, default=25, help='How often do we snapshot losses, for inclusion in the progress dump? (0 = disable)')
parser.add_argument('--load_best_score', type=int, default=1, help='Do we load previous best score when resuming training.')
parser.add_argument('--id', type=str, default='', help='an id identifying this run/job. used in cross-val and appended when writing progress files')
parser.add_argument('--train_only', type=int, default=0, help='if true then use 80k, else use 110k')
parser.add_argument('--cider_reward_weight', type=float, default=1, help='The reward weight from cider')
parser.add_argument('--bleu_reward_weight', type=float, default=0, help='The reward weight from bleu4')
parser.add_argument('--structure_loss_weight', type=float, default=1, help='')
parser.add_argument('--structure_after', type=int, default=(- 1), help='')
parser.add_argument('--structure_loss_type', type=str, default='seqnll', help='')
parser.add_argument('--struc_use_logsoftmax', action='store_true', help='')
parser.add_argument('--entropy_reward_weight', type=float, default=0, help='Entropy reward, seems very interesting')
parser.add_argument('--self_cider_reward_weight', type=float, default=0, help='self cider reward')
parser.add_argument('--use_ppo', type=int, default=0, help='if use ppo. when using ppo, we reuse things like structure_loss_weight and structure_after.')
parser.add_argument('--ppo_old_model_path', type=str, default=None, help='The old model used to calculate PPO loss.')
parser.add_argument('--ppo_cliprange', type=float, default=0.2, help='cliprange for PPO. 0.2 is used by InstructGPT')
parser.add_argument('--ppo_kl_coef', type=float, default=0.02, help='kl reward cooef for PPO. 0.02 is used by InstructGPT')
parser.add_argument('--train_sample_n', type=int, default=16, help='The reward weight from cider')
parser.add_argument('--train_sample_method', type=str, default='sample', help='')
parser.add_argument('--train_beam_size', type=int, default=1, help='')
parser.add_argument('--sc_sample_method', type=str, default='greedy', help='')
parser.add_argument('--sc_beam_size', type=int, default=1, help='')
parser.add_argument('--drop_worst_after', type=float, default=(- 1), help='')
parser.add_argument('--drop_worst_rate', type=float, default=0, help='')
add_diversity_opts(parser)
parser.add_argument('--cfg', type=str, default=None, help='configuration; similar to what is used in detectron')
parser.add_argument('--set_cfgs', dest='set_cfgs', help='Set config keys. Key value sequence seperate by whitespace.e.g. [key] [value] [key] [value]\n This has higher prioritythan cfg file but lower than other args. (You can only overwritearguments that have alerady been defined in config file.)', default=[], nargs='+')
args = parser.parse_args()
if ((args.cfg is not None) or (args.set_cfgs is not None)):
from .config import CfgNode
if (args.cfg is not None):
cn = CfgNode(CfgNode.load_yaml_with_base(args.cfg))
else:
cn = CfgNode()
if (args.set_cfgs is not None):
cn.merge_from_list(args.set_cfgs)
for (k, v) in cn.items():
if (not hasattr(args, k)):
print(('Warning: key %s not in args' % k))
setattr(args, k, v)
args = parser.parse_args(namespace=args)
assert (args.rnn_size > 0), 'rnn_size should be greater than 0'
assert (args.num_layers > 0), 'num_layers should be greater than 0'
assert (args.input_encoding_size > 0), 'input_encoding_size should be greater than 0'
assert (args.batch_size > 0), 'batch_size should be greater than 0'
assert ((args.drop_prob_lm >= 0) and (args.drop_prob_lm < 1)), 'drop_prob_lm should be between 0 and 1'
assert (args.seq_per_img > 0), 'seq_per_img should be greater than 0'
assert (args.beam_size > 0), 'beam_size should be greater than 0'
assert (args.save_checkpoint_every > 0), 'save_checkpoint_every should be greater than 0'
assert (args.losses_log_every > 0), 'losses_log_every should be greater than 0'
assert ((args.language_eval == 0) or (args.language_eval == 1)), 'language_eval should be 0 or 1'
assert ((args.load_best_score == 0) or (args.load_best_score == 1)), 'language_eval should be 0 or 1'
assert ((args.train_only == 0) or (args.train_only == 1)), 'language_eval should be 0 or 1'
args.checkpoint_path = (args.checkpoint_path or ('./log_%s' % args.id))
args.start_from = (args.start_from or args.checkpoint_path)
(args.use_fc, args.use_att) = if_use_feat(args.caption_model)
if args.use_box:
args.att_feat_size = (args.att_feat_size + 5)
return args |
def _seg_20():
return [(8093, 'M', u''), (8094, 'M', u''), (8095, 'M', u''), (8096, 'M', u''), (8097, 'M', u''), (8098, 'M', u''), (8099, 'M', u''), (8100, 'M', u''), (8101, 'M', u''), (8102, 'M', u''), (8103, 'M', u''), (8104, 'M', u''), (8105, 'M', u''), (8106, 'M', u''), (8107, 'M', u''), (8108, 'M', u''), (8109, 'M', u''), (8110, 'M', u''), (8111, 'M', u''), (8112, 'V'), (8114, 'M', u''), (8115, 'M', u''), (8116, 'M', u''), (8117, 'X'), (8118, 'V'), (8119, 'M', u''), (8120, 'M', u''), (8121, 'M', u''), (8122, 'M', u''), (8123, 'M', u''), (8124, 'M', u''), (8125, '3', u' '), (8126, 'M', u''), (8127, '3', u' '), (8128, '3', u' '), (8129, '3', u' '), (8130, 'M', u''), (8131, 'M', u''), (8132, 'M', u''), (8133, 'X'), (8134, 'V'), (8135, 'M', u''), (8136, 'M', u''), (8137, 'M', u''), (8138, 'M', u''), (8139, 'M', u''), (8140, 'M', u''), (8141, '3', u' '), (8142, '3', u' '), (8143, '3', u' '), (8144, 'V'), (8147, 'M', u''), (8148, 'X'), (8150, 'V'), (8152, 'M', u''), (8153, 'M', u''), (8154, 'M', u''), (8155, 'M', u''), (8156, 'X'), (8157, '3', u' '), (8158, '3', u' '), (8159, '3', u' '), (8160, 'V'), (8163, 'M', u''), (8164, 'V'), (8168, 'M', u''), (8169, 'M', u''), (8170, 'M', u''), (8171, 'M', u''), (8172, 'M', u''), (8173, '3', u' '), (8174, '3', u' '), (8175, '3', u'`'), (8176, 'X'), (8178, 'M', u''), (8179, 'M', u''), (8180, 'M', u''), (8181, 'X'), (8182, 'V'), (8183, 'M', u''), (8184, 'M', u''), (8185, 'M', u''), (8186, 'M', u''), (8187, 'M', u''), (8188, 'M', u''), (8189, '3', u' '), (8190, '3', u' '), (8191, 'X'), (8192, '3', u' '), (8203, 'I'), (8204, 'D', u''), (8206, 'X'), (8208, 'V'), (8209, 'M', u''), (8210, 'V'), (8215, '3', u' '), (8216, 'V'), (8228, 'X'), (8231, 'V'), (8232, 'X')] |
def subsample_dataset(dataset, idxs):
mask = np.zeros(len(dataset)).astype('bool')
mask[idxs] = True
dataset.data = dataset.data[mask]
dataset.uq_idxs = dataset.uq_idxs[idxs]
return dataset |
class storage():
instance = None
client = None
def __init__(self):
self.client = BlobServiceClient.from_connection_string(os.getenv('STORAGE_CONNECTION_STRING'))
def unique_name(name):
(name, extension) = os.path.splitext('.')
return '{name}.{random}.{extension}'.format(name=name, extension=extension, random=str(uuid.uuid4()).split('-')[0])
def upload(self, container, file, filepath):
with open(filepath, 'rb') as data:
return self.upload_stream(container, file, data)
def download(self, container, file, filepath):
with open(filepath, 'wb') as download_file:
download_file.write(self.download_stream(container, file))
def download_directory(self, container, prefix, path):
client = self.client.get_container_client(container=container)
objects = client.list_blobs(name_starts_with=prefix)
for obj in objects:
file_name = obj.name
path_to_file = os.path.dirname(file_name)
os.makedirs(os.path.join(path, path_to_file), exist_ok=True)
self.download(container, file_name, os.path.join(path, file_name))
def upload_stream(self, container, file, data):
key_name = storage.unique_name(file)
client = self.client.get_blob_client(container=container, blob=key_name)
client.upload_blob(data)
return key_name
def download_stream(self, container, file):
client = self.client.get_blob_client(container=container, blob=file)
return client.download_blob().readall()
def get_instance():
if (storage.instance is None):
storage.instance = storage()
return storage.instance |
def test_extract_nodes_dups():
modela = ModelA()
modelb = ModelB()
modela.ref_field = modelb
modela.ref_field2 = modelb
model_list = []
schema._extract_nodes(modela, model_list)
assert (len(model_list) == 2)
assert (modela in model_list)
assert (modelb in model_list) |
class TrainingStats(object):
def __init__(self, misc_args, log_period=20, tensorboard_logger=None):
self.misc_args = misc_args
self.LOG_PERIOD = log_period
self.tblogger = tensorboard_logger
self.tb_ignored_keys = ['iter', 'eta']
self.iter_timer = Timer()
self.WIN_SZ = 20
def create_smoothed_value():
return SmoothedValue(self.WIN_SZ)
self.smoothed_losses = defaultdict(create_smoothed_value)
self.smoothed_metrics = defaultdict(create_smoothed_value)
self.smoothed_total_loss = SmoothedValue(self.WIN_SZ)
self.inner_total_loss = []
self.inner_losses = defaultdict(list)
if cfg.FPN.FPN_ON:
self.inner_loss_rpn_cls = []
self.inner_loss_rpn_bbox = []
self.inner_metrics = defaultdict(list)
def IterTic(self):
self.iter_timer.tic()
def IterToc(self):
return self.iter_timer.toc(average=False)
def ResetIterTimer(self):
self.iter_timer.reset()
def UpdateIterStats(self, model_out, inner_iter=None):
if ((inner_iter is not None) and (self.misc_args.iter_size > 1)):
return self._UpdateIterStats_inner(model_out, inner_iter)
total_loss = 0
if cfg.FPN.FPN_ON:
loss_rpn_cls_data = 0
loss_rpn_bbox_data = 0
for (k, loss) in model_out['losses'].items():
assert (loss.shape[0] == cfg.NUM_GPUS)
loss = loss.mean(dim=0, keepdim=True)
total_loss += loss
loss_data = loss.item()
model_out['losses'][k] = loss
if cfg.FPN.FPN_ON:
if k.startswith('loss_rpn_cls_'):
loss_rpn_cls_data += loss_data
elif k.startswith('loss_rpn_bbox_'):
loss_rpn_bbox_data += loss_data
self.smoothed_losses[k].AddValue(loss_data)
model_out['total_loss'] = total_loss
self.smoothed_total_loss.AddValue(total_loss.item())
if cfg.FPN.FPN_ON:
self.smoothed_losses['loss_rpn_cls'].AddValue(loss_rpn_cls_data)
self.smoothed_losses['loss_rpn_bbox'].AddValue(loss_rpn_bbox_data)
for (k, metric) in model_out['metrics'].items():
metric = metric.mean(dim=0, keepdim=True)
self.smoothed_metrics[k].AddValue(metric.item())
def _UpdateIterStats_inner(self, model_out, inner_iter):
assert (inner_iter < self.misc_args.iter_size)
total_loss = 0
if cfg.FPN.FPN_ON:
loss_rpn_cls_data = 0
loss_rpn_bbox_data = 0
if (inner_iter == 0):
self.inner_total_loss = []
for k in model_out['losses']:
self.inner_losses[k] = []
if cfg.FPN.FPN_ON:
self.inner_loss_rpn_cls = []
self.inner_loss_rpn_bbox = []
for k in model_out['metrics']:
self.inner_metrics[k] = []
for (k, loss) in model_out['losses'].items():
assert (loss.shape[0] == cfg.NUM_GPUS)
loss = loss.mean(dim=0, keepdim=True)
total_loss += loss
loss_data = loss.item()
model_out['losses'][k] = loss
if cfg.FPN.FPN_ON:
if k.startswith('loss_rpn_cls_'):
loss_rpn_cls_data += loss_data
elif k.startswith('loss_rpn_bbox_'):
loss_rpn_bbox_data += loss_data
self.inner_losses[k].append(loss_data)
if (inner_iter == (self.misc_args.iter_size - 1)):
loss_data = self._mean_and_reset_inner_list('inner_losses', k)
self.smoothed_losses[k].AddValue(loss_data)
model_out['total_loss'] = total_loss
total_loss_data = total_loss.item()
self.inner_total_loss.append(total_loss_data)
if cfg.FPN.FPN_ON:
self.inner_loss_rpn_cls.append(loss_rpn_cls_data)
self.inner_loss_rpn_bbox.append(loss_rpn_bbox_data)
if (inner_iter == (self.misc_args.iter_size - 1)):
total_loss_data = self._mean_and_reset_inner_list('inner_total_loss')
self.smoothed_total_loss.AddValue(total_loss_data)
if cfg.FPN.FPN_ON:
loss_rpn_cls_data = self._mean_and_reset_inner_list('inner_loss_rpn_cls')
loss_rpn_bbox_data = self._mean_and_reset_inner_list('inner_loss_rpn_bbox')
self.smoothed_losses['loss_rpn_cls'].AddValue(loss_rpn_cls_data)
self.smoothed_losses['loss_rpn_bbox'].AddValue(loss_rpn_bbox_data)
for (k, metric) in model_out['metrics'].items():
metric = metric.mean(dim=0, keepdim=True)
metric_data = metric.item()
self.inner_metrics[k].append(metric_data)
if (inner_iter == (self.misc_args.iter_size - 1)):
metric_data = self._mean_and_reset_inner_list('inner_metrics', k)
self.smoothed_metrics[k].AddValue(metric_data)
def _mean_and_reset_inner_list(self, attr_name, key=None):
if key:
mean_val = (sum(getattr(self, attr_name)[key]) / self.misc_args.iter_size)
getattr(self, attr_name)[key] = []
else:
mean_val = (sum(getattr(self, attr_name)) / self.misc_args.iter_size)
setattr(self, attr_name, [])
return mean_val
def LogIterStats(self, cur_iter, lr):
if (((cur_iter % self.LOG_PERIOD) == 0) or (cur_iter == (cfg.SOLVER.MAX_ITER - 1))):
stats = self.GetStats(cur_iter, lr)
log_stats(stats, self.misc_args)
if self.tblogger:
self.tb_log_stats(stats, cur_iter)
def tb_log_stats(self, stats, cur_iter):
for k in stats:
if (k not in self.tb_ignored_keys):
v = stats[k]
if isinstance(v, dict):
self.tb_log_stats(v, cur_iter)
else:
self.tblogger.add_scalar(k, v, cur_iter)
def GetStats(self, cur_iter, lr):
eta_seconds = (self.iter_timer.average_time * (cfg.SOLVER.MAX_ITER - cur_iter))
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
stats = OrderedDict(iter=(cur_iter + 1), time=self.iter_timer.average_time, eta=eta, loss=self.smoothed_total_loss.GetMedianValue(), lr=lr)
stats['metrics'] = OrderedDict()
for k in sorted(self.smoothed_metrics):
stats['metrics'][k] = self.smoothed_metrics[k].GetMedianValue()
head_losses = []
rpn_losses = []
rpn_fpn_cls_losses = []
rpn_fpn_bbox_losses = []
for (k, v) in self.smoothed_losses.items():
toks = k.split('_')
if (len(toks) == 2):
head_losses.append((k, v.GetMedianValue()))
elif (len(toks) == 3):
rpn_losses.append((k, v.GetMedianValue()))
elif ((len(toks) == 4) and (toks[2] == 'cls')):
rpn_fpn_cls_losses.append((k, v.GetMedianValue()))
elif ((len(toks) == 4) and (toks[2] == 'bbox')):
rpn_fpn_bbox_losses.append((k, v.GetMedianValue()))
else:
raise ValueError(('Unexpected loss key: %s' % k))
stats['head_losses'] = OrderedDict(head_losses)
stats['rpn_losses'] = OrderedDict(rpn_losses)
stats['rpn_fpn_cls_losses'] = OrderedDict(rpn_fpn_cls_losses)
stats['rpn_fpn_bbox_losses'] = OrderedDict(rpn_fpn_bbox_losses)
return stats |
def _forward_from_src(src: str):
gbls: Dict[(str, Any)] = {'torch': torch}
exec_with_source(src, gbls)
return gbls['forward'] |
def verify_plan(source_models, pred_models, plan):
if (not all(((m in source_models) for m in pred_models))):
print('Not all pred_models are in source_models.')
return False
for (i, pl) in enumerate(plan):
if (pl[0][0] == 'RST'):
pl_models = set(pl[0][2])
elif (i == 0):
pl_models = set(pl[0][0])
else:
pl_models = set(pl[0])
if ((pl[0][0] != 'RST') and (i == 0)):
if set(pl[0][0]).symmetric_difference(source_models):
print('source_models not equal to plan[0][0]')
return False
if set(pl[0][1]).symmetric_difference(pred_models):
print('pred_models not equal to plan[0][1]')
return False
for (j, stp) in enumerate(pl):
if (j == 0):
continue
if (not all(((m in pl_models) for (m, _) in stp[:(- 2)]))):
print('Model not loaded in phase {0} step {1}'.format(i, j))
return False
return True |
def convert_doc_to_sciie_format(input_dict):
processed_sentences = []
for doc_id in input_dict:
content = input_dict[doc_id]
content = clean_raw_input.clean_dict(content)
for (sent_id, sentence) in content.items():
sent_dict = {'clusters': [], 'doc_key': ((doc_id + '_') + str(sent_id))}
doc = spacy_nlp(sentence['sentence'])
sent_dict['ner'] = [[]]
sent_dict['relations'] = [[]]
sent_dict['sentences'] = [[token.text for token in doc]]
processed_sentences.append(sent_dict)
return processed_sentences |
def block_reduction_a(inputs, scope=None, reuse=None):
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
return tf.concat(3, [branch_0, branch_1, branch_2]) |
def incomplete_orthogonal_array(k, n, holes, resolvable=False, existence=False):
from sage.combinat.designs.database import QDM
for h in holes:
if (h < 0):
raise ValueError('Holes must have size >=0, but {} was in the list').format(h)
holes = [h for h in holes if (h > 0)]
if (not holes):
return orthogonal_array(k, n, existence=existence, resolvable=resolvable)
sum_of_holes = sum(holes)
number_of_holes = len(holes)
max_hole = max(holes)
min_hole = min(holes)
if (sum_of_holes > n):
if existence:
return False
raise EmptySetError('The total size of holes must be smaller or equal than the size of the ground set')
if ((max_hole == 1) and resolvable and (sum_of_holes != n)):
if existence:
return False
raise EmptySetError("There is no resolvable incomplete OA({},{}) whose holes' sizes sum to {}<n(={})".format(k, n, sum_of_holes, n))
if ((max_hole == 1) and resolvable):
if existence:
return orthogonal_array((k + 1), n, existence=True)
OA = sorted(orthogonal_array((k + 1), n))
OA = [B[1:] for B in OA]
relabel = [([0] * n) for _ in range(k)]
for (i, B) in enumerate(OA[(- n):]):
for (ii, xx) in enumerate(B):
relabel[ii][xx] = i
OA = [[relabel[i][xx] for (i, xx) in enumerate(B)] for B in OA]
assert all(((OA[((- n) + i)] == ([i] * k)) for i in range(n))), 'The last n blocks should be [i,i,...]'
return OA[:(- n)]
elif ((max_hole == 1) and (number_of_holes <= 1)):
if existence:
return orthogonal_array(k, n, existence=True)
OA = orthogonal_array(k, n)
independent_set = OA[:number_of_holes]
elif ((k >= 3) and (2 <= number_of_holes <= 3) and (n > ((k - 1) * max_hole)) and (holes.count(1) == (number_of_holes - 1)) and incomplete_orthogonal_array(k, n, [max_hole], existence=True)):
if existence:
return True
IOA = incomplete_orthogonal_array(k, n, [max_hole])
i = holes.index(max_hole)
holes[i] = [([ii] * k) for ii in range((n - max_hole), n)]
i = holes.index(1)
for h1 in IOA:
if all(((x < (n - max_hole)) for x in h1)):
break
holes[i] = [h1]
IOA.remove(h1)
if (number_of_holes == 3):
i = holes.index(1)
for h2 in IOA:
if all((((h1[j] != x) and (x < (n - max_hole))) for (j, x) in enumerate(h2))):
break
holes[i] = [h2]
IOA.remove(h2)
holes = sum(holes, [])
holes = [list(h) for h in zip(*holes)]
for l in holes:
for i in range(n):
if (i not in l):
l.insert(0, i)
for i in range(len(holes)):
holes[i] = {v: i for (i, v) in enumerate(holes[i])}
IOA = OA_relabel(IOA, k, n, matrix=holes)
return IOA
elif ((max_hole == 1) and (number_of_holes >= 2) and (k == (n + 1))):
if existence:
return False
raise EmptySetError('There is no OA(n+1,n) - {}.OA(n+1,1) as all blocks intersect in a projective plane.'.format(number_of_holes))
elif ((max_hole == 1) and (orthogonal_array((k + 1), n, existence=True) is True)):
if existence:
return True
OA = orthogonal_array((k + 1), n)
independent_set = [B[:(- 1)] for B in OA if (B[(- 1)] == 0)][:number_of_holes]
OA = [B[:(- 1)] for B in OA]
elif ((max_hole == 1) and (orthogonal_array(k, n, existence=True) is True)):
OA = orthogonal_array(k, n)
try:
independent_set = OA_find_disjoint_blocks(OA, k, n, number_of_holes)
except ValueError:
if existence:
return Unknown
raise NotImplementedError('I was not able to build this OA({},{})-{}.OA({},1)'.format(k, n, number_of_holes, k))
if existence:
return True
independent_set = OA_find_disjoint_blocks(OA, k, n, number_of_holes)
elif ((max_hole == 1) and (not (orthogonal_array(k, n, existence=True) is True))):
return orthogonal_array(k, n, existence=existence)
elif ((number_of_holes == 1) and any((((uu == sum_of_holes) and (mu <= 1) and (lmbda == 1) and (k <= (kk + 1))) for ((nn, lmbda, mu, uu), (kk, _)) in QDM.get((n, 1), {}).items()))):
for ((nn, lmbda, mu, uu), (kk, f)) in QDM[(n, 1)].items():
if ((uu == sum_of_holes) and (mu <= 1) and (lmbda == 1) and (k <= (kk + 1))):
break
(G, M) = f()
OA = OA_from_quasi_difference_matrix(M, G, fill_hole=False)
return [B[:k] for B in OA]
elif ((min_hole > 1) and (max_hole == min_hole) and ((n % min_hole) == 0) and orthogonal_array(k, min_hole, existence=True) and incomplete_orthogonal_array(k, (n // min_hole), ([1] * number_of_holes), existence=True)):
if existence:
return True
h = min_hole
iOA1 = incomplete_orthogonal_array(k, (n // holes[0]), ([1] * number_of_holes))
iOA2 = orthogonal_array(k, h)
return [[((B1[i] * h) + B2[i]) for i in range(k)] for B1 in iOA1 for B2 in iOA2]
else:
if existence:
return Unknown
f = (lambda x: ('' if (x == 1) else '{}.'.format(x)))
holes_string = ''.join(('-{}OA({},{})'.format(f(holes.count(x)), k, x) for x in sorted(set(holes))))
raise NotImplementedError('I was not able to build this OA({},{}){}'.format(k, n, holes_string))
assert (number_of_holes == len(independent_set))
for B in independent_set:
OA.remove(B)
OA = OA_relabel(OA, k, n, blocks=independent_set)
return OA |
class AccuracyRobustnessBenchmark():
def __init__(self, dataset, burnin=10):
self.dataset = dataset
self.burnin = burnin
def eval(self, eval_trackers=None):
if (eval_trackers is None):
eval_trackers = self.dataset.tracker_names
if isinstance(eval_trackers, str):
eval_trackers = [eval_trackers]
result = {}
for tracker_name in eval_trackers:
(accuracy, failures) = self._calculate_accuracy_robustness(tracker_name)
result[tracker_name] = {'overlaps': accuracy, 'failures': failures}
return result
def show_result(self, result, eao_result=None, show_video_level=False, helight_threshold=0.5):
tracker_name_len = max((max([len(x) for x in result.keys()]) + 2), 12)
if (eao_result is not None):
header = (('|{:^' + str(tracker_name_len)) + '}|{:^10}|{:^12}|{:^13}|{:^7}|')
header = header.format('Tracker Name', 'Accuracy', 'Robustness', 'Lost Number', 'EAO')
formatter = (('|{:^' + str(tracker_name_len)) + '}|{:^10.3f}|{:^12.3f}|{:^13.1f}|{:^7.3f}|')
else:
header = (('|{:^' + str(tracker_name_len)) + '}|{:^10}|{:^12}|{:^13}|')
header = header.format('Tracker Name', 'Accuracy', 'Robustness', 'Lost Number')
formatter = (('|{:^' + str(tracker_name_len)) + '}|{:^10.3f}|{:^12.3f}|{:^13.1f}|')
bar = ('-' * len(header))
print(bar)
print(header)
print(bar)
if (eao_result is not None):
tracker_eao = sorted(eao_result.items(), key=(lambda x: x[1]['all']), reverse=True)[:20]
tracker_names = [x[0] for x in tracker_eao]
else:
tracker_names = list(result.keys())
for tracker_name in tracker_names:
ret = result[tracker_name]
overlaps = list(itertools.chain(*ret['overlaps'].values()))
accuracy = np.nanmean(overlaps)
length = sum([len(x) for x in ret['overlaps'].values()])
failures = list(ret['failures'].values())
lost_number = np.mean(np.sum(failures, axis=0))
robustness = (np.mean((np.sum(np.array(failures), axis=0) / length)) * 100)
if (eao_result is None):
print(formatter.format(tracker_name, accuracy, robustness, lost_number))
else:
print(formatter.format(tracker_name, accuracy, robustness, lost_number, eao_result[tracker_name]['all']))
print(bar)
if (show_video_level and (len(result) < 10)):
print('\n\n')
header1 = '|{:^14}|'.format('Tracker name')
header2 = '|{:^14}|'.format('Video name')
for tracker_name in result.keys():
header1 += '{:^17}|'.format(tracker_name)
header2 += '{:^8}|{:^8}|'.format('Acc', 'LN')
print(('-' * len(header1)))
print(header1)
print(('-' * len(header1)))
print(header2)
print(('-' * len(header1)))
videos = list(result[tracker_name]['overlaps'].keys())
for video in videos:
row = '|{:^14}|'.format(video)
for tracker_name in result.keys():
overlaps = result[tracker_name]['overlaps'][video]
accuracy = np.nanmean(overlaps)
failures = result[tracker_name]['failures'][video]
lost_number = np.mean(failures)
accuracy_str = '{:^8.3f}'.format(accuracy)
if (accuracy < helight_threshold):
row += f'{Fore.RED}{accuracy_str}{Style.RESET_ALL}|'
else:
row += (accuracy_str + '|')
lost_num_str = '{:^8.3f}'.format(lost_number)
if (lost_number > 0):
row += f'{Fore.RED}{lost_num_str}{Style.RESET_ALL}|'
else:
row += (lost_num_str + '|')
print(row)
print(('-' * len(header1)))
def _calculate_accuracy_robustness(self, tracker_name):
overlaps = {}
failures = {}
all_length = {}
for i in range(len(self.dataset)):
video = self.dataset[i]
gt_traj = video.gt_traj
if (tracker_name not in video.pred_trajs):
tracker_trajs = video.load_tracker(self.dataset.tracker_path, tracker_name, False)
else:
tracker_trajs = video.pred_trajs[tracker_name]
overlaps_group = []
num_failures_group = []
for tracker_traj in tracker_trajs:
num_failures = calculate_failures(tracker_traj)[0]
overlaps_ = calculate_accuracy(tracker_traj, gt_traj, burnin=10, bound=(video.width, video.height))[1]
overlaps_group.append(overlaps_)
num_failures_group.append(num_failures)
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
overlaps[video.name] = np.nanmean(overlaps_group, axis=0).tolist()
failures[video.name] = num_failures_group
return (overlaps, failures) |
_utils.test()
def test_single_compare():
def foo(a: ti.template(), b: ti.template(), c: ti.template()):
for i in ti.static(range(3)):
c[(i * 6)] = (a[i] == b[i])
c[((i * 6) + 1)] = (a[i] != b[i])
c[((i * 6) + 2)] = (a[i] < b[i])
c[((i * 6) + 3)] = (a[i] <= b[i])
c[((i * 6) + 4)] = (a[i] > b[i])
c[((i * 6) + 5)] = (a[i] >= b[i])
a = ti.Vector([1, 1, 2])
b = ti.Vector([2, 1, 1])
c = ti.field(ti.i32, shape=(18,))
d = ti.field(ti.i32, shape=(18,))
for i in range(3):
c[(i * 6)] = (a[i] == b[i])
c[((i * 6) + 1)] = (a[i] != b[i])
c[((i * 6) + 2)] = (a[i] < b[i])
c[((i * 6) + 3)] = (a[i] <= b[i])
c[((i * 6) + 4)] = (a[i] > b[i])
c[((i * 6) + 5)] = (a[i] >= b[i])
foo(a, b, d)
for i in range(18):
assert (c[i] == d[i]) |
def load_trained_lora_model(model_name_or_path: str, model_lora_path: str, model_cls: Optional[Type]=None, modalities: Optional[List[Modality]]=None, load_bits: int=16, device_map: str='auto'):
load_kwargs = {'device_map': device_map}
if (load_bits == 8):
load_kwargs['load_in_8bit'] = True
elif (load_bits == 4):
load_kwargs['load_in_4bit'] = True
load_kwargs['quantization_config'] = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type='nf4')
elif (load_bits == 16):
load_kwargs['torch_dtype'] = torch.float16
else:
raise ValueError(f'Invalid load_bits: {load_bits}')
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
fix_tokenizer(tokenizer)
cfg = AutoConfig.from_pretrained(model_lora_path)
if (model_cls is None):
model_cls = LANGUAGE_MODEL_NAME_TO_CLASS[cfg.model_cls]
if (modalities is None):
modalities = MODALITY_BUILDERS[cfg.modality_builder]()
logging.info(f'Loading base model from {model_name_or_path} as {load_bits} bits')
model = model_cls.from_pretrained(model_name_or_path, low_cpu_mem_usage=True, config=cfg, **load_kwargs)
model.modalities = modalities
logging.info(f'Loading projector weights for {[m.name for m in modalities]}')
if os.path.exists(os.path.join(model_lora_path, 'non_lora_trainables.bin')):
non_lora_trainables = torch.load(os.path.join(model_lora_path, 'non_lora_trainables.bin'), map_location='cpu')
else:
local_fn = hf_hub_download(repo_id=model_lora_path, filename='non_lora_trainables.bin', repo_type='model')
non_lora_trainables = torch.load(local_fn, map_location='cpu')
model.get_model().initialize_pretrained_modules(modalities, non_lora_trainables)
logging.info(f'Loading and merging LoRA weights from {model_lora_path}')
model = PeftModel.from_pretrained(model, model_lora_path)
if (load_bits == 16):
model = model.merge_and_unload()
model.eval()
return (model, tokenizer) |
class PretrainDataset(data.Dataset):
PRETRAIN_DATA_LIST = ['COCO', 'ECSSD', 'MSRA10K', 'PASCAL-S', 'PASCALVOC2012']
sample_ratio = 1
def __init__(self, root, output_size, clip_n=3, max_obj_n=11, crop=False):
self.root = root
self.clip_n = clip_n
self.output_size = output_size
self.max_obj_n = max_obj_n
self.max_skip = None
self.crop = crop
self.img_list = list()
self.mask_list = list()
dataset_list = list()
for dataset_name in PretrainDataset.PRETRAIN_DATA_LIST:
img_dir = os.path.join(root, 'JPEGImages', dataset_name)
mask_dir = os.path.join(root, 'Annotations', dataset_name)
img_list = (sorted(glob(os.path.join(img_dir, '*.jpg'))) + sorted(glob(os.path.join(img_dir, '*.png'))))
mask_list = sorted(glob(os.path.join(mask_dir, '*.png')))
if (len(img_list) > 0):
if (len(img_list) == len(mask_list)):
dataset_list.append(dataset_name)
self.img_list += img_list
self.mask_list += mask_list
print(f' {dataset_name}: {len(img_list)} imgs.')
else:
print(f' PreTrain dataset {dataset_name} has {len(img_list)} imgs and {len(mask_list)} annots. Not match! Skip.')
else:
print(f" PreTrain dataset {dataset_name} doesn't exist. Skip.")
print(gct(), f'{len(self.img_list)} imgs are used for PreTrain. They are from {dataset_list}.')
self.random_horizontal_flip = mytrans.RandomHorizontalFlip(0.3)
self.color_jitter = TF.ColorJitter(0.1, 0.1, 0.1, 0.03)
self.random_affine = mytrans.RandomAffine(degrees=20, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=10)
if self.crop:
self.random_resize_crop = mytrans.RandomResizedCrop(400, (0.8, 1))
else:
self.resize = mytrans.Resize(output_size)
self.to_tensor = TF.ToTensor()
self.normalize = TF.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.to_onehot = mytrans.ToOnehot(max_obj_n, shuffle=True)
def increase_max_skip(self):
pass
def set_max_skip(self, max_skip):
self.max_skip = max_skip
def __len__(self):
return int((PretrainDataset.sample_ratio * len(self.img_list)))
def __getitem__(self, idx):
obj_n = 1
while (obj_n == 1):
img_pil = load_image_in_PIL(self.img_list[idx], 'RGB')
mask_pil = load_image_in_PIL(self.mask_list[idx], 'P')
if (not self.crop):
frames = torch.zeros((self.clip_n, 3, *self.output_size), dtype=torch.float)
masks = torch.zeros((self.clip_n, self.max_obj_n, *self.output_size), dtype=torch.float)
else:
frames = torch.zeros((self.clip_n, 3, 400, 400), dtype=torch.float)
masks = torch.zeros((self.clip_n, self.max_obj_n, 400, 400), dtype=torch.float)
for i in range(self.clip_n):
(img, mask) = (img_pil, mask_pil)
if (i > 0):
(img, mask) = self.random_horizontal_flip(img, mask)
img = self.color_jitter(img)
(img, mask) = self.random_affine(img, mask)
if self.crop:
(img, mask) = self.random_resize_crop(img, mask)
else:
(img, mask) = self.resize(img, mask)
mask = np.array(mask, np.uint8)
if (i == 0):
(mask, obj_list) = self.to_onehot(mask)
obj_n = (len(obj_list) + 1)
else:
(mask, _) = self.to_onehot(mask, obj_list)
frames[i] = self.normalize(self.to_tensor(img))
masks[i] = mask
info = {'name': self.img_list[idx]}
if (obj_n == 1):
idx = random.choice(range(len(self.img_list)))
return (frames, masks, obj_n, info) |
class EvaluationUtilsTest(tf.test.TestCase):
def testEvaluate(self):
output = 'nmt/testdata/deen_output'
ref_bpe = 'nmt/testdata/deen_ref_bpe'
ref_spm = 'nmt/testdata/deen_ref_spm'
expected_bleu_score = 22.
expected_rouge_score = 50.
bpe_bleu_score = evaluation_utils.evaluate(ref_bpe, output, 'bleu', 'bpe')
bpe_rouge_score = evaluation_utils.evaluate(ref_bpe, output, 'rouge', 'bpe')
self.assertAlmostEqual(expected_bleu_score, bpe_bleu_score)
self.assertAlmostEqual(expected_rouge_score, bpe_rouge_score)
spm_bleu_score = evaluation_utils.evaluate(ref_spm, output, 'bleu', 'spm')
spm_rouge_score = evaluation_utils.evaluate(ref_spm, output, 'rouge', 'spm')
self.assertAlmostEqual(expected_rouge_score, spm_rouge_score)
self.assertAlmostEqual(expected_bleu_score, spm_bleu_score)
def testAccuracy(self):
pred_output = 'nmt/testdata/pred_output'
label_ref = 'nmt/testdata/label_ref'
expected_accuracy_score = 60.0
accuracy_score = evaluation_utils.evaluate(label_ref, pred_output, 'accuracy')
self.assertAlmostEqual(expected_accuracy_score, accuracy_score)
def testWordAccuracy(self):
pred_output = 'nmt/testdata/pred_output'
label_ref = 'nmt/testdata/label_ref'
expected_word_accuracy_score = 60.0
word_accuracy_score = evaluation_utils.evaluate(label_ref, pred_output, 'word_accuracy')
self.assertAlmostEqual(expected_word_accuracy_score, word_accuracy_score) |
def get_default_qconfig(backend='fbgemm'):
if (backend == 'fbgemm'):
qconfig = QConfig(activation=HistogramObserver.with_args(reduce_range=True), weight=default_per_channel_weight_observer)
elif (backend == 'qnnpack'):
qconfig = QConfig(activation=HistogramObserver.with_args(reduce_range=False), weight=default_weight_observer)
else:
qconfig = default_qconfig
return qconfig |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='dataset', help='path to datasets')
parser.add_argument('--margin', default=0.2, type=float, help='Rank loss margin.')
parser.add_argument('--num_epochs', default=2, type=int, help='Number of training epochs.')
parser.add_argument('--batch_size', default=32, type=int, help='Size of a training mini-batch.')
parser.add_argument('--embed_size', default=2048, type=int, help='Dimensionality of the joint embedding.')
parser.add_argument('--grad_clip', default=2.0, type=float, help='Gradient clipping threshold.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='Initial learning rate.')
parser.add_argument('--lr_update', default=15, type=int, help='Number of epochs to update the learning rate.')
parser.add_argument('--workers', default=10, type=int, help='Number of data loader workers.')
parser.add_argument('--log_step', default=10, type=int, help='Number of steps to print and record the log.')
parser.add_argument('--val_step', default=500, type=int, help='Number of steps to run validation.')
parser.add_argument('--save_step', default=2000, type=int, help='Number of steps to run validation.')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--max_violation', action='store_true', help='Use max instead of sum in the rank loss.')
parser.add_argument('--img_dim', default=2048, type=int, help='Dimensionality of the image embedding.')
parser.add_argument('--finetune', action='store_true', help='Fine-tune the image encoder.')
parser.add_argument('--measure', default='cosine', help='Similarity measure used (cosine|order)')
parser.add_argument('--use_abs', action='store_true', help='Take the absolute value of embedding vectors.')
parser.add_argument('--output_dir', default='output', type=str)
parser.add_argument('--model_name', type=str, required=True)
parser.add_argument('--gpu_id', type=int, required=True)
parser.add_argument('--checkpoint_path', type=str)
parser.add_argument('--max_context_len', type=int, default=150, help='max length of context(containing <sos>,<eos>)')
parser.add_argument('--max_target_len', type=int, default=30, help='max length of target dialog(containing <sos>,<eos>)')
parser.add_argument('--mode', type=str, default='finetune', help='finetune or inference')
parser.add_argument('--task', type=str, default='current', help='current or next')
parser.add_argument('--no_context', action='store_true')
parser.add_argument('--no_image', action='store_true')
opt = parser.parse_args()
print(opt)
device = torch.device('cuda:{}'.format(opt.gpu_id))
torch.cuda.set_device(device)
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer.add_tokens('<start>')
tokenizer.add_tokens('<end>')
print('Adding Special tokens : <start>, <end> : {}'.format(tokenizer.convert_tokens_to_ids(['<start>', '<end>'])))
print('Vocab Size : ', (tokenizer.vocab_size + 2))
opt.vocab_size = (tokenizer.vocab_size + 2)
model = VSRNFinetune(opt, tokenizer)
model.to(device)
if (opt.mode == 'inference'):
checkpoint = torch.load(opt.checkpoint_path, map_location=device)
model.load_state_dict(checkpoint['model'])
test_loader = data.get_test_loader(tokenizer, opt.batch_size, opt)
(test_rsum, test_info) = validate(opt, test_loader, model, 0, opt.mode, split='test')
return
write_path = os.path.join(opt.output_dir, 'finetune_{}_{}'.format(opt.model_name, opt.task))
if (not os.path.exists(write_path)):
os.makedirs(write_path)
else:
res = input('Do you want to remove the directory : {} '.format(write_path))
if (res == 'y'):
shutil.rmtree(write_path)
os.makedirs(write_path)
else:
return
print('Making dataloader...')
(train_loader, val_loader, test_loader) = data.get_loaders(tokenizer, opt.batch_size, opt)
best_rsum = 0
best_epoch = 0
best_test_info = None
with torch.no_grad():
(test_rsum, test_info) = validate(opt, test_loader, model, 0, opt.mode, split='test')
print(test_rsum, best_rsum)
if (test_rsum > best_rsum):
best_rsum = test_rsum
best_epoch = 0
best_test_info = test_info
for epoch in range(1, (opt.num_epochs + 1)):
train(train_loader, model, epoch)
with torch.no_grad():
(test_rsum, test_info) = validate(opt, test_loader, model, epoch, mode=opt.mode, split='test')
print(test_rsum, best_rsum)
if (test_rsum > best_rsum):
best_rsum = test_rsum
best_epoch = epoch
best_test_info = test_info
save_checkpoint({'epoch': epoch, 'model': model.state_dict(), 'rsum': best_rsum, 'opt': opt, 'Eiters': model.finetune_Eiters}, write_path, '{}_{}_{}.pth.tar'.format(opt.model_name, epoch, round(best_rsum, 2)))
print(best_epoch, best_test_info) |
def Discriminator32(n_gpu, nc, ndf):
model = _netD32(n_gpu, nc, ndf)
model.apply(weights_init)
return model |
def find_cxx_compiler():
global CXX, CXX_COMPILERS
if (CXX is not None):
if test_cxx_compiler(CXX):
return CXX
for cxx in CXX_COMPILERS:
if test_cxx_compiler(cxx):
CXX = cxx
return CXX
raise MKException('C++ compiler was not found. Try to set the environment variable CXX with the C++ compiler available in your system.') |
def get_fid(fakes, model, npz, device, batch_size=1, use_tqdm=True):
(m1, s1) = (npz['mu'], npz['sigma'])
fakes = torch.cat(fakes, dim=0)
fakes = util.tensor2im(fakes).astype(float)
(m2, s2) = _compute_statistics_of_ims(fakes, model, batch_size, 2048, device, use_tqdm=use_tqdm)
return float(calculate_frechet_distance(m1, s1, m2, s2)) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [314])
def test_batch_det_forward_backward(seed, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
inputs = [np.clip(rng.randn(2, 3, 3).astype(np.float32), (- 0.9), 0.9)]
function_tester(rng, F.batch_det, ref_det, inputs, ctx=ctx, func_name=func_name, atol_b=0.02, dstep=0.0001, disable_half_test=True) |
('/predict', methods=['POST'])
def predict():
st = [str(x) for x in request.form.values()]
prediction = recommend(st[0])
pr = ((((((('1) ' + prediction[0][0]) + ' // ') + '2) ') + prediction[0][1]) + ' // ') + '3) ') + prediction[0][2])
return render_template('index.html', recommended='Recommended Titles: {}'.format(pr)) |
class ResultFilter(base.Logger):
def __init__(self, to: base.Logger, game_name: str):
self._to = to
game_name = re.sub('(?<!^)(?=[A-Z])', '_', game_name).lower()
if (game_name in BASELINES):
random_score = BASELINES[game_name]['random']
dqn_score = BASELINES[game_name]['online_dqn']
elif (game_name in SCORES):
random_score = SCORES[game_name]['random']
dqn_score = SCORES[game_name]['online_dqn']
def normalizer(score: float) -> float:
return ((score - random_score) / (dqn_score - random_score))
self._normalizer = normalizer
def write(self, data: base.LoggingData) -> None:
if ('episode_return' in data):
data = {**data, 'normalized_score': self._normalizer(data.get('episode_return', 0))}
self._to.write(data)
def close(self) -> None:
self._to.close() |
def test_precomputed_nearest_neighbors_filtering():
(X, y) = make_blobs(n_samples=200, random_state=0, centers=[[1, 1], [(- 1), (- 1)]], cluster_std=0.01)
n_neighbors = 2
results = []
for additional_neighbors in [0, 10]:
nn = NearestNeighbors(n_neighbors=(n_neighbors + additional_neighbors)).fit(X)
graph = nn.kneighbors_graph(X, mode='connectivity')
labels = SpectralClustering(random_state=0, n_clusters=2, affinity='precomputed_nearest_neighbors', n_neighbors=n_neighbors).fit(graph).labels_
results.append(labels)
assert_array_equal(results[0], results[1]) |
def test():
N = dp.symbol('N')
N.set(20)
input = dp.ndarray([N], dp.int32)
output = dp.ndarray([N], dp.int32)
input[:] = dp.int32(5)
output[:] = dp.int32(0)
mysdfg = SDFG('mysdfg')
state = mysdfg.add_state()
A_ = state.add_array('A', [N], dp.int32)
B_ = state.add_array('B', [N], dp.int32)
(tasklet, map_entry, map_exit) = state.add_mapped_tasklet('mytasklet', dict(i='0:N'), dict(a=Memlet.simple(A_, 'i')), 'b = 5*a', dict(b=Memlet.simple(B_, 'i')))
state.add_edge(A_, None, map_entry, None, Memlet.simple(A_, '0:N'))
state.add_edge(map_exit, None, B_, None, Memlet.simple(B_, '0:N'))
mysdfg(A=input, B=output, N=N)
diff = (np.linalg.norm(((5 * input) - output)) / N.get())
print('Difference:', diff)
assert (diff <= 1e-05) |
def bmes_decode(char_label_list: List[Tuple[(str, str)]]) -> Tuple[(str, List[Tag])]:
idx = 0
length = len(char_label_list)
tags = []
while (idx < length):
(term, label) = char_label_list[idx]
current_label = label[0]
if (((idx + 1) == length) and (current_label == 'B')):
current_label = 'S'
if (current_label == 'O'):
idx += 1
continue
if (current_label == 'S'):
tags.append(Tag(term, label[2:], idx, (idx + 1)))
idx += 1
continue
if (current_label == 'B'):
end = (idx + 1)
while (((end + 1) < length) and (char_label_list[end][1][0] == 'M')):
end += 1
if (char_label_list[end][1][0] == 'E'):
entity = ''.join((char_label_list[i][0] for i in range(idx, (end + 1))))
tags.append(Tag(entity, label[2:], idx, (end + 1)))
idx = (end + 1)
else:
entity = ''.join((char_label_list[i][0] for i in range(idx, end)))
tags.append(Tag(entity, label[2:], idx, end))
idx = end
continue
else:
idx += 1
continue
sentence = ''.join((term for (term, _) in char_label_list))
return (sentence, tags) |
((not have_sympy), 'SymPy not installed')
def test_conjugate():
x = Symbol('x')
e1 = sympy.conjugate(sympy.Symbol('x'))
e2 = conjugate(x)
assert (sympify(e1) == e2)
assert (e2._sympy_() == e1) |
class TestBirchAlgo():
def setup(self):
pass
def test_fit_none_input(self, empty_feature):
params = BirchParams()
detector = BirchAlgo(params)
assert isinstance(params, BirchParams), 'params must be BirchParams'
assert isinstance(detector, BirchAlgo), 'detector must be BirchAlgo'
with pytest.raises(ValueError):
assert detector.fit(empty_feature)
def test_fit_predict(self, log_features):
params = BirchParams()
detector = BirchAlgo(params)
assert isinstance(params, BirchParams), 'params must be BirchParams'
assert isinstance(detector, BirchAlgo), 'detector must be BirchAlgo'
detector.fit(log_features)
assert isinstance(detector.model, Birch), 'Model must be Birch'
res = detector.predict(log_features)
assert isinstance(res, pd.Series), 'result must be pd.Series' |
def wrap_generate_func(original_generate):
def _convert_generator(self, loop, args, kwargs):
async_gen = self.generate_async(*args, **kwargs)
try:
while 1:
(yield loop.run_until_complete(async_gen.__anext__()))
except StopAsyncIteration:
pass
def generate(self, *args, **kwargs):
if (not self.environment.is_async):
return original_generate(self, *args, **kwargs)
return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
return update_wrapper(generate, original_generate) |
class ParallelRangeNode(ParallelStatNode):
child_attrs = ['body', 'target', 'else_clause', 'args', 'num_threads', 'chunksize']
body = target = else_clause = args = None
start = stop = step = None
is_prange = True
nogil = None
schedule = None
valid_keyword_arguments = ['schedule', 'nogil', 'num_threads', 'chunksize']
def __init__(self, pos, **kwds):
super(ParallelRangeNode, self).__init__(pos, **kwds)
self.iterator = PassStatNode(pos)
def analyse_declarations(self, env):
super(ParallelRangeNode, self).analyse_declarations(env)
self.target.analyse_target_declaration(env)
if (self.else_clause is not None):
self.else_clause.analyse_declarations(env)
if ((not self.args) or (len(self.args) > 3)):
error(self.pos, 'Invalid number of positional arguments to prange')
return
if (len(self.args) == 1):
(self.stop,) = self.args
elif (len(self.args) == 2):
(self.start, self.stop) = self.args
else:
(self.start, self.stop, self.step) = self.args
if hasattr(self.schedule, 'decode'):
self.schedule = self.schedule.decode('ascii')
if (self.schedule not in (None, 'static', 'dynamic', 'guided', 'runtime')):
error(self.pos, ('Invalid schedule argument to prange: %s' % (self.schedule,)))
def analyse_expressions(self, env):
was_nogil = env.nogil
if self.nogil:
env.nogil = True
if (self.target is None):
error(self.pos, 'prange() can only be used as part of a for loop')
return self
self.target = self.target.analyse_target_types(env)
if (not self.target.type.is_numeric):
if (not self.target.type.is_pyobject):
error(self.target.pos, ('Must be of numeric type, not %s' % self.target.type))
self.index_type = PyrexTypes.c_py_ssize_t_type
else:
self.index_type = self.target.type
self.names = ('start', 'stop', 'step')
start_stop_step = (self.start, self.stop, self.step)
for (node, name) in zip(start_stop_step, self.names):
if (node is not None):
node.analyse_types(env)
if (not node.type.is_numeric):
error(node.pos, ('%s argument must be numeric' % name))
continue
if (not node.is_literal):
node = node.coerce_to_temp(env)
setattr(self, name, node)
self.index_type = PyrexTypes.widest_numeric_type(self.index_type, node.type)
if (self.else_clause is not None):
self.else_clause = self.else_clause.analyse_expressions(env)
if hasattr(self.target, 'entry'):
self.assignments[self.target.entry] = (self.target.pos, None)
node = super(ParallelRangeNode, self).analyse_expressions(env)
if node.chunksize:
if (not node.schedule):
error(node.chunksize.pos, 'Must provide schedule with chunksize')
elif (node.schedule == 'runtime'):
error(node.chunksize.pos, 'Chunksize not valid for the schedule runtime')
elif (node.chunksize.type.is_int and node.chunksize.is_literal and (node.chunksize.compile_time_value(env) <= 0)):
error(node.chunksize.pos, 'Chunksize must not be negative')
node.chunksize = node.chunksize.coerce_to(PyrexTypes.c_int_type, env).coerce_to_temp(env)
if node.nogil:
env.nogil = was_nogil
node.is_nested_prange = (node.parent and node.parent.is_prange)
if node.is_nested_prange:
parent = node
while (parent.parent and parent.parent.is_prange):
parent = parent.parent
parent.assignments.update(node.assignments)
parent.privates.update(node.privates)
parent.assigned_nodes.extend(node.assigned_nodes)
return node
def nogil_check(self, env):
names = ('start', 'stop', 'step', 'target')
nodes = (self.start, self.stop, self.step, self.target)
for (name, node) in zip(names, nodes):
if ((node is not None) and node.type.is_pyobject):
error(node.pos, ("%s may not be a Python object as we don't have the GIL" % name))
def generate_execution_code(self, code):
self.declare_closure_privates(code)
target_index_cname = self.target.entry.cname
fmt_dict = {'target': target_index_cname, 'target_type': self.target.type.empty_declaration_code()}
start_stop_step = (self.start, self.stop, self.step)
defaults = ('0', '0', '1')
for (node, name, default) in zip(start_stop_step, self.names, defaults):
if (node is None):
result = default
elif node.is_literal:
result = node.get_constant_c_result_code()
else:
node.generate_evaluation_code(code)
result = node.result()
fmt_dict[name] = result
fmt_dict['i'] = code.funcstate.allocate_temp(self.index_type, False)
fmt_dict['nsteps'] = code.funcstate.allocate_temp(self.index_type, False)
code.putln(('if ((%(step)s == 0)) abort();' % fmt_dict))
self.setup_parallel_control_flow_block(code)
code.putln(('%(nsteps)s = (%(stop)s - %(start)s + %(step)s - %(step)s/abs(%(step)s)) / %(step)s;' % fmt_dict))
code.putln(('if (%(nsteps)s > 0)' % fmt_dict))
code.begin_block()
self.generate_loop(code, fmt_dict)
code.end_block()
self.restore_labels(code)
if self.else_clause:
if self.breaking_label_used:
code.put(('if (%s < 2)' % Naming.parallel_why))
code.begin_block()
code.putln('/* else */')
self.else_clause.generate_execution_code(code)
code.end_block()
self.end_parallel_control_flow_block(code)
for temp in (start_stop_step + (self.chunksize,)):
if (temp is not None):
temp.generate_disposal_code(code)
temp.free_temps(code)
code.funcstate.release_temp(fmt_dict['i'])
code.funcstate.release_temp(fmt_dict['nsteps'])
self.release_closure_privates(code)
def generate_loop(self, code, fmt_dict):
if self.is_nested_prange:
code.putln('#if 0')
else:
code.putln('#ifdef _OPENMP')
if (not self.is_parallel):
code.put('#pragma omp for')
self.privatization_insertion_point = code.insertion_point()
reduction_codepoint = self.parent.privatization_insertion_point
else:
code.put('#pragma omp parallel')
self.privatization_insertion_point = code.insertion_point()
reduction_codepoint = self.privatization_insertion_point
code.putln('')
code.putln('#endif /* _OPENMP */')
code.begin_block()
self.begin_parallel_block(code)
if self.is_nested_prange:
code.putln('#if 0')
else:
code.putln('#ifdef _OPENMP')
code.put('#pragma omp for')
for (entry, (op, lastprivate)) in sorted(self.privates.items()):
if (op and (op in '+*-&^|') and (entry != self.target.entry)):
if entry.type.is_pyobject:
error(self.pos, 'Python objects cannot be reductions')
else:
reduction_codepoint.put((' reduction(%s:%s)' % (op, entry.cname)))
else:
if (entry == self.target.entry):
code.put((' firstprivate(%s)' % entry.cname))
code.put((' lastprivate(%s)' % entry.cname))
continue
if (not entry.type.is_pyobject):
if lastprivate:
private = 'lastprivate'
else:
private = 'private'
code.put((' %s(%s)' % (private, entry.cname)))
if self.schedule:
if self.chunksize:
chunksize = (', %s' % self.evaluate_before_block(code, self.chunksize))
else:
chunksize = ''
code.put((' schedule(%s%s)' % (self.schedule, chunksize)))
self.put_num_threads(reduction_codepoint)
code.putln('')
code.putln('#endif /* _OPENMP */')
code.put(('for (%(i)s = 0; %(i)s < %(nsteps)s; %(i)s++)' % fmt_dict))
code.begin_block()
guard_around_body_codepoint = code.insertion_point()
code.begin_block()
code.putln(('%(target)s = (%(target_type)s)(%(start)s + %(step)s * %(i)s);' % fmt_dict))
self.initialize_privates_to_nan(code, exclude=self.target.entry)
if (self.is_parallel and (not self.is_nested_prange)):
code.funcstate.start_collecting_temps()
self.body.generate_execution_code(code)
self.trap_parallel_exit(code, should_flush=True)
if (self.is_parallel and (not self.is_nested_prange)):
self.privatize_temps(code)
if self.breaking_label_used:
guard_around_body_codepoint.putln(('if (%s < 2)' % Naming.parallel_why))
code.end_block()
code.end_block()
if self.is_parallel:
self.end_parallel_block(code)
code.end_block() |
class CategoricalJointVarField(CategoricalJointField):
def __init__(self, *args, **kwargs):
super().__init__(*args, field_type='varm', **kwargs) |
def eval1(mask_path, gt_path, m):
files = os.listdir(gt_path)
maes = 0
precesions = 0
recalls = 0
fmeasures = 0
for file in files:
mask1 = ((mask_path + '/') + file)
gt1 = ((gt_path + '/') + file)
mask1 = Image.open(mask1)
mask1 = mask1.resize((320, 320))
mask = np.array(mask1)
mask = (mask.astype(float) / 255.0)
mask_1 = mask
(w, h) = mask.shape
zeros = np.zeros((w, h))
if (m > 1):
mean = (np.mean(mask) * 1.5)
else:
mean = m
if (mean > 1):
mean = 1
for i in range(w):
for j in range(h):
if (mask_1[(i, j)] >= mean):
zeros[(i, j)] = 1.0
else:
zeros[(i, j)] = 0.0
gt = (np.array(Image.open(gt1)).astype(float) / 255.0)
for i in range(w):
for j in range(h):
if (gt[(i, j)] > 0.1):
gt[(i, j)] = 1.0
else:
gt[(i, j)] = 0.0
mae = np.mean(np.abs((gt - mask)))
maes += mae
precesion = metrics.precision_score(gt.reshape((- 1)), zeros.reshape((- 1)))
precesions += precesion
recall = metrics.recall_score(gt.reshape((- 1)), zeros.reshape((- 1)))
recalls += recall
if ((precesion == 0) and (recall == 0)):
fmeasure = 0.0
else:
fmeasure = ((((1 + 0.3) * precesion) * recall) / ((0.3 * precesion) + recall))
fmeasures += fmeasure
mae1 = (maes / len(files))
fmeasure1 = (fmeasures / len(files))
recall1 = (recalls / len(files))
precesion1 = (precesions / len(files))
return (mae1, fmeasure1, recall1, precesion1) |
_module()
class Darknet(nn.Module):
arch_settings = {53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512), (512, 1024)))}
def __init__(self, depth=53, out_indices=(3, 4, 5), frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), norm_eval=True):
super(Darknet, self).__init__()
if (depth not in self.arch_settings):
raise KeyError(f'invalid depth {depth} for darknet')
self.depth = depth
self.out_indices = out_indices
self.frozen_stages = frozen_stages
(self.layers, self.channels) = self.arch_settings[depth]
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg)
self.cr_blocks = ['conv1']
for (i, n_layers) in enumerate(self.layers):
layer_name = f'conv_res_block{(i + 1)}'
(in_c, out_c) = self.channels[i]
self.add_module(layer_name, self.make_conv_res_block(in_c, out_c, n_layers, **cfg))
self.cr_blocks.append(layer_name)
self.norm_eval = norm_eval
def forward(self, x):
outs = []
for (i, layer_name) in enumerate(self.cr_blocks):
cr_block = getattr(self, layer_name)
x = cr_block(x)
if (i in self.out_indices):
outs.append(x)
return tuple(outs)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def _freeze_stages(self):
if (self.frozen_stages >= 0):
for i in range(self.frozen_stages):
m = getattr(self, self.cr_blocks[i])
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
super(Darknet, self).train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
def make_conv_res_block(in_channels, out_channels, res_repeat, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
model = nn.Sequential()
model.add_module('conv', ConvModule(in_channels, out_channels, 3, stride=2, padding=1, **cfg))
for idx in range(res_repeat):
model.add_module('res{}'.format(idx), ResBlock(out_channels, **cfg))
return model |
class FlaxElectraModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def get_all_eval_values(llm_name):
agent_types = available_agent_names
for a_type in agent_types:
get_eval_values(llm_name, a_type) |
def get_audiosegment_from_nparray(nparr, frame_rate=48000):
audio_segment = AudioSegment(nparr.tobytes(), frame_rate=frame_rate, sample_width=nparr.dtype.itemsize, channels=nparr.shape[1])
return audio_segment |
def bbox_payload_parser(accessor, x1='bbox_x1', y1='bbox_y1', x2='bbox_x2', y2='bbox_y2'):
return dict_payload_parser(accessor, {'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2}) |
def test_reassembly_reference_failures():
bad_addition_tokenization = [['Joe', 'Smith', 'lives', 'in', 'Southern', 'California', '.']]
bad_addition_mwts = [[False for _ in range(len(bad_addition_tokenization[0]))]]
bad_addition_expansions = [[None for _ in range(len(bad_addition_tokenization[0]))]]
bad_inline_tokenization = [['Joe', 'Smith', 'lives', 'in', 'Californiaa', '.']]
bad_inline_mwts = [[False for _ in range(len(bad_inline_tokenization[0]))]]
bad_inline_expansions = [[None for _ in range(len(bad_inline_tokenization[0]))]]
good_tokenization = [['Joe', 'Smith', 'lives', 'in', 'California', '.']]
good_mwts = [[False for _ in range(len(good_tokenization[0]))]]
good_expansions = [[None for _ in range(len(good_tokenization[0]))]]
text = 'Joe Smith lives in California.'
with pytest.raises(ValueError):
utils.reassemble_doc_from_tokens(bad_addition_tokenization, bad_addition_mwts, bad_addition_expansions, text)
with pytest.raises(ValueError):
utils.reassemble_doc_from_tokens(bad_inline_tokenization, bad_inline_mwts, bad_inline_mwts, text)
utils.reassemble_doc_from_tokens(good_tokenization, good_mwts, good_expansions, text) |
def register_Ns3WifiMode_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::WifiMode const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('std::string', 'name')])
cls.add_method('GetCodeRate', 'ns3::WifiCodeRate', [], is_const=True)
cls.add_method('GetConstellationSize', 'uint16_t', [], is_const=True)
cls.add_method('GetDataRate', 'uint64_t', [param('uint8_t', 'channelWidth'), param('uint16_t', 'guardInterval'), param('uint8_t', 'nss')], is_const=True)
cls.add_method('GetDataRate', 'uint64_t', [param('ns3::WifiTxVector', 'txVector')], is_const=True)
cls.add_method('GetDataRate', 'uint64_t', [param('uint8_t', 'channelWidth')], is_const=True)
cls.add_method('GetMcsValue', 'uint8_t', [], is_const=True)
cls.add_method('GetModulationClass', 'ns3::WifiModulationClass', [], is_const=True)
cls.add_method('GetNonHtReferenceRate', 'uint64_t', [], is_const=True)
cls.add_method('GetPhyRate', 'uint64_t', [param('uint8_t', 'channelWidth'), param('uint16_t', 'guardInterval'), param('uint8_t', 'nss')], is_const=True)
cls.add_method('GetPhyRate', 'uint64_t', [param('ns3::WifiTxVector', 'txVector')], is_const=True)
cls.add_method('GetUid', 'uint32_t', [], is_const=True)
cls.add_method('GetUniqueName', 'std::string', [], is_const=True)
cls.add_method('IsAllowed', 'bool', [param('uint8_t', 'channelWidth'), param('uint8_t', 'nss')], is_const=True)
cls.add_method('IsHigherCodeRate', 'bool', [param('ns3::WifiMode', 'mode')], is_const=True)
cls.add_method('IsHigherDataRate', 'bool', [param('ns3::WifiMode', 'mode')], is_const=True)
cls.add_method('IsMandatory', 'bool', [], is_const=True)
return |
def check_train_sentences(raw_data, direction, all_test_data, mess_up_train={}):
(src, tgt) = direction.split('-')
tgt_path = f'{raw_data}/train.{direction}.{tgt}'
src_path = f'{raw_data}/train.{direction}.{src}'
print(f'check training data in {raw_data}/train.{direction}')
size = 0
if ((not os.path.exists(tgt_path)) or (not os.path.exists(src_path))):
return (mess_up_train, size)
with open(src_path) as f, open(tgt_path) as g:
for (src_line, tgt_line) in zip(f, g):
s = src_line.strip()
t = tgt_line.strip()
size += 1
if (s in all_test_data):
langs = mess_up_train.get(s, set())
langs.add(direction)
mess_up_train[s] = langs
if (t in all_test_data):
langs = mess_up_train.get(t, set())
langs.add(direction)
mess_up_train[t] = langs
return (mess_up_train, size) |
def mask_loss(x, labels, masks):
cnt_nonzero = tf.to_float(tf.count_nonzero(masks))
loss = (tf.reduce_sum(tf.multiply(tf.math.pow((x - labels), 2), masks)) / cnt_nonzero)
return loss |
def get_coppeliasim_root():
if ('COPPELIASIM_ROOT' not in os.environ):
raise RuntimeError('Please set env COPPELIASIM_ROOT')
return os.environ['COPPELIASIM_ROOT'] |
def add(g, self, other, alpha=None):
if (sym_help._is_value(self) and sym_help._is_tensor_list(self)):
return sym_help._onnx_opset_unsupported_detailed('Add', 9, 11, 'Add between list of tensors not supported')
if (alpha and (sym_help._scalar(sym_help._maybe_get_scalar(alpha)) != 1)):
return _unimplemented('add', 'alpha != 1')
return g.op('Add', self, other) |
def test_not_app_with_asgi(schema):
case = Case(schema['/users']['GET'])
case.operation.app = None
with pytest.raises(RuntimeError, match='ASGI application instance is required. Please, set `app` argument in the schema constructor or pass it to `call_asgi`'):
case.call_asgi() |
class sSFU_reg(atomic_reg):
OP_NAME = 'sSFU'
_fields_ = [('cmd_short', ctypes.c_uint64, 1), ('cmd_id', ctypes.c_uint64, 20), ('cmd_id_dep', ctypes.c_uint64, 20), ('tsk_typ', ctypes.c_uint64, 4), ('tsk_eu_typ', ctypes.c_uint64, 5), ('rsvd0', ctypes.c_uint64, 5), ('cmd_id_en', ctypes.c_uint64, 4), ('pwr_step', ctypes.c_uint64, 4), ('intr_en', ctypes.c_uint64, 1), ('res0_prec', ctypes.c_uint64, 3), ('opd0_prec', ctypes.c_uint64, 3), ('opd2_n_str', ctypes.c_uint64, 2), ('rsvd3', ctypes.c_uint64, 8), ('res0_n', ctypes.c_uint64, 16), ('res0_c', ctypes.c_uint64, 16), ('res0_h', ctypes.c_uint64, 16), ('res0_w', ctypes.c_uint64, 16), ('opd1_n', ctypes.c_uint64, 16), ('res0_addr', ctypes.c_uint64, 32), ('opd0_addr', ctypes.c_uint64, 32), ('opd1_addr', ctypes.c_uint64, 32)]
cmd_short: int
cmd_id: int
cmd_id_dep: int
tsk_typ: int
tsk_eu_typ: int
rsvd0: int
cmd_id_en: int
pwr_step: int
intr_en: int
res0_prec: int
opd0_prec: int
opd2_n_str: int
rsvd3: int
res0_n: int
res0_c: int
res0_h: int
res0_w: int
opd1_n: int
res0_addr: int
opd0_addr: int
opd1_addr: int
length: int = 256 |
def only_binary():
return Option('--only-binary', dest='format_control', action='callback', callback=_handle_only_binary, type='str', default=FormatControl(set(), set()), help='Do not use source packages. Can be supplied multiple times, and each time adds to the existing value. Accepts either :all: to disable all source packages, :none: to empty the set, or one or more package names with commas between them. Packages without binary distributions will fail to install when this option is used on them.') |
class AP1SNmat(SpectralMatrix):
def assemble(self, method):
(test, trial) = (self.testfunction, self.trialfunction)
assert isinstance(test[0], P1)
assert isinstance(trial[0], SN)
k = np.arange((test[0].N - 2))
d = {0: (- ((k / (k + 2)) ** 2)), 2: 1}
if (not test[0].is_scaled()):
d = {0: ((- (k ** 2)) / (k + 2)), 2: (k[:(- 2)] + 2)}
return d
def get_solver(self):
return TwoDMA |
def init_cfg_for_merge(cfg, new_cfg):
keys = [*cfg.keys(), *new_cfg.keys()]
for k in keys:
if (k == BASE_KEY):
continue
cfg.setdefault(k, new_cfg.get(k))
if isinstance(new_cfg.get(k), CfgNode):
init_cfg_for_merge(cfg.get(k), new_cfg.get(k)) |
def jsd_grad(go, o, pq_list):
(p, q) = pq_list
m = ((p + q) / 2.0)
return [((np.log((((p * (1 - m)) / (1 - p)) / m)) / 2.0) * go), None] |
class DistTrainer():
def __init__(self, train_data, model, optimizer=None, loss=None, callbacks_all=None, callbacks_master=None, batch_size_per_gpu=8, n_epochs=1, num_workers=1, drop_last=False, dev_data=None, metrics=None, metric_key=None, update_every=1, print_every=10, validate_every=(- 1), save_path=None, device='auto', fp16='', use_tqdm=True):
assert (device in ['auto', 'cuda', 'cpu']), "Please set correct device in [auto', 'cuda', 'cpu']"
if (device == 'auto'):
device = ('cuda' if torch.cuda.is_available() else 'cpu')
if (device == 'cuda'):
torch.cuda.set_device(get_local_rank())
self.device = torch.device('cuda', get_local_rank())
else:
self.device = torch.device(device)
init_logger_dist()
self.world_size = dist.get_world_size()
self.rank = dist.get_rank()
self.train_data = train_data
self.batch_size_per_gpu = int(batch_size_per_gpu)
self.n_epochs = int(n_epochs)
self.num_data_workers = int(num_workers)
self.drop_last = drop_last
self.update_every = int(update_every)
self.print_every = int(print_every)
self.validate_every = int(validate_every)
self.save_path = save_path
self.losser = _prepare_losser(loss)
self.fp16 = fp16
self.local_rank = get_local_rank()
self._forward_func = model.forward
self.callback_manager = DistCallbackManager(env={'trainer': self}, callbacks_all=callbacks_all, callbacks_master=callbacks_master)
self.test_manager = DistCallbackManager(env={'trainer': self})
self.metric_key = metric_key
self.use_tqdm = use_tqdm
model.to(self.device)
optimizer = self._get_optimizer(optimizer)
if len(self.fp16):
assert isinstance(self.fp16, str), "Please set Apex AMP optimization level selected in ['O0', 'O1', 'O2', 'O3']"
_check_fp16()
assert (device == 'cuda'), 'Amp requires cuda device'
(model, optimizer) = amp.initialize(model, optimizer, opt_level=self.fp16)
if (parse_version(torch.__version__) >= parse_version('1.1')):
self.ddp_model = DDP(model, device_ids=[self.local_rank], output_device=self.local_rank, find_unused_parameters=True)
else:
self.ddp_model = DDP(model, device_ids=[self.local_rank], output_device=self.local_rank)
self.model = self.ddp_model.module
self.optimizer = optimizer
self.sampler = DistributedSampler(self.train_data)
self.data_iterator = self._get_data_iter(self.train_data)
self.batch_size = (self.world_size * self.batch_size_per_gpu)
self.n_steps = self._get_n_steps()
if (dev_data and metrics):
cb = _TesterCallback(dev_data, model, metrics, batch_size=batch_size_per_gpu, num_workers=num_workers)
self.test_manager.add_callback([cb], master=True)
sync_time = torch.tensor(time.time(), dtype=torch.double).to(self.device)
dist.broadcast(sync_time, src=0)
self.start_time = datetime.fromtimestamp(sync_time.item()).strftime('%Y-%m-%d-%H-%M-%S-%f')
if self.save_path:
self.cp_save_path = self.save_path
else:
self.cp_save_path = None
self.logger = logger
self.logger.info('Setup Distributed Trainer')
self.logger.warning('Process pid: {}, rank: {}, local rank: {}, device: {}, fp16: {}'.format(os.getpid(), self.rank, self.local_rank, self.device, (self.fp16 if self.fp16 else False)))
self.logger.info('Num of processes: {}'.format(self.world_size))
self.logger.info('Use device: {}'.format(device))
self.logger.info('Training with fp16: {}, optimization level: {}'.format((len(self.fp16) > 0), (self.fp16 if self.fp16 else None)))
def _maybe_no_sync(self):
i = (self.step % self.update_every)
if ((self.world_size > 1) and hasattr(self.ddp_model, 'no_sync') and (i != 0)):
return self.ddp_model.no_sync()
else:
return contextlib.ExitStack()
def _get_n_steps(self):
return (len(self.data_iterator) * self.n_epochs)
def _get_data_iter(self, dataset):
if isinstance(dataset, DataSet):
return DataSetIter(dataset=dataset, batch_size=self.batch_size_per_gpu, sampler=self.sampler, num_workers=self.num_data_workers, drop_last=self.drop_last)
elif isinstance(dataset, BatchIter):
return dataset
else:
raise TypeError('train_data type {} not support'.format(type(dataset)))
def _get_optimizer(self, optimizer):
if isinstance(optimizer, torch.optim.Optimizer):
return optimizer
elif isinstance(optimizer, Optimizer):
return optimizer.construct_from_pytorch(self.ddp_model.parameters())
elif (optimizer is None):
return torch.optim.Adam(self.ddp_model.parameters(), lr=0.004)
else:
raise TypeError('optimizer can only be torch.optim.Optimizer type, not {}.'.format(type(optimizer)))
def is_master(self):
return (self.rank == 0)
def train(self, load_best_model=True, on_exception='auto'):
try:
self.logger.info('###### Training epochs started ######')
self.logger.info(('Total epochs: %d' % self.n_epochs))
self.logger.info(('Total steps: %d' % self.n_steps))
self.logger.info(('Num instances per GPU: %d' % self.batch_size_per_gpu))
self.logger.info(('Num of steps per update: %d' % self.update_every))
self.logger.info(('Total batch_size: %d' % ((self.batch_size_per_gpu * dist.get_world_size()) * self.update_every)))
self.logger.info(('Total num of samples: %d' % len(self.train_data)))
self.logger.info('Num of callbacks for all workers: {}'.format(len(self.callback_manager.callbacks_all)))
self.logger.info('Num of callbacks for master workers: {}'.format(len(self.callback_manager.callbacks_master)))
self.logger.info('Callbacks for all workers: {}'.format([repr(cb) for cb in self.callback_manager.callbacks_all]))
self.logger.info('Callbacks for master workers: {}'.format([repr(cb) for cb in self.callback_manager.callbacks_master]))
start_time = time.time()
results = {}
if (self.n_epochs <= 0):
self.logger.info('Training epoch is {}, nothing was done.'.format(self.n_epochs))
results['seconds'] = 0.0
return results
try:
self.callback_manager.on_train_begin()
self._train()
self.callback_manager.on_train_end()
except BaseException as e:
self.callback_manager.on_exception(e)
if (on_exception == 'auto'):
if (not isinstance(e, (CallbackException, KeyboardInterrupt))):
raise e
else:
self.logger.info('Catch {}, ignored.'.format(e.__class__.__name__))
elif (on_exception == 'raise'):
raise e
results['seconds'] = round((time.time() - start_time), 2)
self.logger.info('###### Train finished ######')
self.logger.info('Total train time: {} seconds.'.format(results['seconds']))
if (load_best_model and self.cp_save_path and len(self.test_manager.callbacks)):
self.load_check_point(self._best_save_name())
finally:
pass
dist.barrier()
return results
def _train(self):
dist.barrier()
if (not self.use_tqdm):
from .utils import _pseudo_tqdm as inner_tqdm
else:
inner_tqdm = tqdm
self.step = 0
self.epoch = 0
self.pbar = inner_tqdm(total=self.n_steps, postfix='loss:{0:<6.5f}', leave=False, dynamic_ncols=True, disable=(not self.is_master))
pbar = self.pbar
avg_loss = 0
data_iterator = self.data_iterator
self.ddp_model.zero_grad()
for epoch in range(1, (self.n_epochs + 1)):
self.epoch = epoch
pbar.set_description_str(desc='Epoch {}/{}'.format(epoch, self.n_epochs))
self.callback_manager.on_epoch_begin()
for (batch_x, batch_y) in data_iterator:
self.step += 1
self.ddp_model.train()
_move_dict_value_to_device(batch_x, batch_y, device=self.device)
indices = data_iterator.get_batch_indices()
self.callback_manager.on_batch_begin(batch_x, batch_y, indices)
prediction = self._data_forward(self.ddp_model, batch_x)
self.callback_manager.on_loss_begin(batch_y, prediction)
loss = self._compute_loss(prediction, batch_y)
if (self.update_every > 1):
loss = (loss / self.update_every)
avg_loss += loss.item()
self.callback_manager.on_backward_begin(loss)
if self.fp16:
with amp.scale_loss(loss, self.optimizer) as scale_loss:
scale_loss.backward()
else:
loss.backward()
self.callback_manager.on_backward_end()
self._update()
self.callback_manager.on_step_end()
if ((self.step % self.print_every) == 0):
avg_loss = (float(avg_loss) / self.print_every)
print_output = 'loss:{:<6.5f}'.format(avg_loss)
pbar.update(self.print_every)
pbar.set_postfix_str(print_output)
avg_loss = 0
self.callback_manager.on_batch_end()
if ((self.validate_every > 0) and ((self.step % self.validate_every) == 0)):
self._do_validation()
if (self.validate_every < 0):
self._do_validation()
self.callback_manager.on_epoch_end()
pbar.close()
self.pbar = None
def _update(self):
if ((self.step % self.update_every) == 0):
self.optimizer.step()
self.ddp_model.zero_grad()
def _data_forward(self, network, x):
x = _build_args(self._forward_func, **x)
y = network(**x)
if (not isinstance(y, dict)):
raise TypeError(f'The return value of {_get_func_signature(self._forward_func)} should be dict, got {type(y)}.')
return y
def _compute_loss(self, predict, truth):
loss = self.losser(predict, truth)
if (self.update_every > 1):
loss = (loss / self.update_every)
if (loss.dim() > 0):
loss = loss.mean()
return loss
def save_check_point(self, name=None, only_params=False):
if (name is None):
name = 'checkpoint-{}.bin'.format(self.step)
os.makedirs(self.cp_save_path, exist_ok=True)
path = os.path.join(self.cp_save_path, name)
self.logger.info('Save checkpoint to {}'.format(path))
model_to_save = self.ddp_model.module
if only_params:
model_to_save = model_to_save.state_dict()
if self.is_master:
torch.save(model_to_save, path)
def load_check_point(self, name):
path = os.path.join(self.cp_save_path, name)
self.logger.info('reload best model from %s', path)
model_load = torch.load(path, map_location=(lambda s, l: default_restore_location(s, 'cpu')))
if (not isinstance(model_load, dict)):
model_load = model_load.state_dict()
self.model.load_state_dict(model_load)
def _best_save_name(self, auto_fix=True):
best_name = ('best_' + '_'.join([self.model.__class__.__name__, str(self.metric_key), self.start_time]))
return best_name
def _do_validation(self):
with self.ddp_model.no_sync():
self.callback_manager.on_valid_begin()
eval_res = self.test_manager.on_valid_begin()
eval_res = list(filter((lambda x: (x is not None)), eval_res))
if len(eval_res):
(eval_res, is_better) = list(zip(*eval_res))
eval_res = eval_res[0]
is_better = is_better[0]
else:
(eval_res, is_better) = (None, None)
if ((self.metric_key is None) and (eval_res is not None)):
eval_res0 = list(eval_res.values())[0]
self.metric_key = list(eval_res0.keys())[0]
if ((is_better is not None) and self.cp_save_path):
if is_better:
self.save_check_point(self._best_save_name(), only_params=False)
dist.barrier()
if ((not self.is_master) and (self.metric_key is None)):
prefix = ('best_' + self.model.__class__.__name__)
suffix = self.start_time
fn_list = os.listdir(self.cp_save_path)
fn_list = [fn for fn in fn_list if (fn.startswith(prefix) and fn.endswith(suffix))]
if (len(fn_list) == 1):
best_name = fn_list[0]
self.metric_key = best_name[len(prefix):(- len(suffix))].strip('_')
self.callback_manager.on_valid_end(eval_res, self.metric_key, self.optimizer, is_better)
self.ddp_model.train()
def close(self):
dist.destroy_process_group() |
_arg_scope
def stack_blocks_dense(net, blocks, output_stride=None, outputs_collections=None):
current_stride = 1
rate = 1
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
for (i, unit) in enumerate(block.args):
if ((output_stride is not None) and (current_stride > output_stride)):
raise ValueError('The target output_stride cannot be reached.')
with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]):
(unit_depth, unit_depth_bottleneck, unit_stride) = unit
if ((output_stride is not None) and (current_stride == output_stride)):
net = block.unit_fn(net, depth=unit_depth, depth_bottleneck=unit_depth_bottleneck, stride=1, rate=rate)
rate *= unit_stride
else:
net = block.unit_fn(net, depth=unit_depth, depth_bottleneck=unit_depth_bottleneck, stride=unit_stride, rate=1)
current_stride *= unit_stride
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
if ((output_stride is not None) and (current_stride != output_stride)):
raise ValueError('The target output_stride cannot be reached.')
return net |
def __generate_resolv_file(args, conf_path):
with open('{}/{}'.format(conf_path, RESOLV_FILENAME), 'w') as resolvfile:
resolvfile.write('nameserver 127.0.0.1\n') |
def merge(list_a, list_b, pr=False):
result = OrderedDict()
for (n, c) in list_a:
result[n] = c
for (n, c) in list_b:
if (n in result):
result[n] = (result[n] + c)
else:
result[n] = c
return sorted(result.items(), key=(lambda x: x[1]), reverse=True) |
def build_pnasnet_large(images, num_classes, is_training=True, final_endpoint=None, config=None):
hparams = (copy.deepcopy(config) if config else large_imagenet_config())
nasnet._update_hparams(hparams, is_training)
if (tf.test.is_gpu_available() and (hparams.data_format == 'NHWC')):
tf.logging.info('A GPU is available on the machine, consider using NCHW data format for increased speed on GPU.')
if (hparams.data_format == 'NCHW'):
images = tf.transpose(images, [0, 3, 1, 2])
total_num_cells = (hparams.num_cells + 2)
normal_cell = PNasNetNormalCell(hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm], is_training=is_training):
with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm, slim.separable_conv2d, nasnet_utils.factorized_reduction, nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index, nasnet_utils.get_channel_dim], data_format=hparams.data_format):
return _build_pnasnet_base(images, normal_cell=normal_cell, num_classes=num_classes, hparams=hparams, is_training=is_training, final_endpoint=final_endpoint) |
def random_entropy(traj, show_progress=True):
if (constants.UID not in traj.columns):
return pd.DataFrame([_random_entropy_individual(traj)], columns=[sys._getframe().f_code.co_name])
if show_progress:
df = traj.groupby(constants.UID).progress_apply((lambda x: _random_entropy_individual(x)))
else:
df = traj.groupby(constants.UID).apply((lambda x: _random_entropy_individual(x)))
return pd.DataFrame(df).reset_index().rename(columns={0: sys._getframe().f_code.co_name}) |
class Benchmark():
def run(self, systems=None, timeout=60, trials=1, sort=False, optional=False):
if sort:
systems.sort()
print(('\n\n\n' + str(self)))
print((' %-12s%-12s%-12s%-12s%-12s%15s' % ('System', 'min', 'avg', 'max', 'trials', 'cpu or wall')))
if (systems is None):
systems = STD_SYSTEMS
if optional:
systems += OPT_SYSTEMS
for S in systems:
try:
X = []
wall = False
for i in range(trials):
alarm(timeout)
t = getattr(self, S)()
cancel_alarm()
if isinstance(t, tuple):
wall = True
t = t[1]
X.append(t)
mn = min(X)
mx = max(X)
av = avg(X)
s = ('* %-12s%-12f%-12f%-12f%-12s' % (S, mn, av, mx, trials))
if wall:
s += ('%15fw' % t)
else:
s += ('%15fc' % t)
print(s)
except AlarmInterrupt:
print(('%-12sinterrupted (timeout: %s seconds wall time)' % (S, timeout)))
except AttributeError:
pass
except Exception as msg:
print(msg)
bench = run
def __repr__(self):
try:
return self.repr_str
except AttributeError:
return 'sage.tests.benchmark.Benchmark instance' |
class DummyExampleForPicklingTest():
start = 10
stop = 100
_from_method
def f(self):
from sage.arith.srange import xsrange
return xsrange(self.start, self.stop) |
def findMisplacedChildren(allnodes):
misplaced_children = []
for node in allnodes:
node.nodelist = orderNodeList(node.nodelist)
eduCovered = sorted(list(set([m.eduspan[0] for m in node.nodelist])))
eduCovered.extend(list(set([m.eduspan[1] for m in node.nodelist])))
eduCovered = sorted(list(set(eduCovered)))
if ((len(eduCovered) != 0) and (tuple([min(eduCovered), max(eduCovered)]) != node.eduspan)):
for m in node.nodelist:
if ((m.eduspan[(- 1)] < node.eduspan[0]) or (m.eduspan[0] > node.eduspan[(- 1)])):
cnode = findNodeT(m, allnodes)
misplaced_children.append(cnode)
node.nodelist.remove(m)
return misplaced_children |
def test_eq_statements_5(default_test_case):
default_test_case._statements = []
other = dtc.DefaultTestCase(ModuleTestCluster(0))
other._statements = []
assert default_test_case.__eq__(other) |
class TestOptions(BaseOptions):
def __init__(self):
super().__init__()
self.isTrain = False
def initialize(self, parser):
super().initialize(parser)
parser.add_argument('--result_dir', type=str, default='results')
return parser |
class SameModule(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.query = QueryModule(**kwargs)
self.attend = AttentionModule(**kwargs)
self.attnNot = NotModule()
def forward(self, attn, feat, query):
value_query = self.query(attn, feat, query)
out = self.attend(self.attnNot(attn), feat, value_query)
return out |
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: bool=False, use_cache: bool=False) -> Tuple[(torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]])]:
(bsz, q_len, _) = hidden_states.size()
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
kv_seq_len = key_states.shape[(- 2)]
assert (past_key_value is None), 'past_key_value is not supported'
(cos, sin) = self.rotary_emb(value_states, seq_len=kv_seq_len)
(query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
assert (not output_attentions), 'output_attentions is not supported'
assert (not use_cache), 'use_cache is not supported'
qkv = torch.stack([query_states, key_states, value_states], dim=2)
qkv = qkv.transpose(1, 3)
key_padding_mask = attention_mask
if (key_padding_mask is None):
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
max_s = q_len
cu_q_lens = torch.arange(0, ((bsz + 1) * q_len), step=q_len, dtype=torch.int32, device=qkv.device)
output = flash_attn_unpadded_qkvpacked_func(qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True)
output = rearrange(output, '(b s) ... -> b s ...', b=bsz)
else:
nheads = qkv.shape[(- 2)]
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
(x_unpad, indices, cu_q_lens, max_s) = unpad_input(x, key_padding_mask)
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
output_unpad = flash_attn_unpadded_qkvpacked_func(x_unpad, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True)
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices, bsz, q_len), 'b s (h d) -> b s h d', h=nheads)
return (self.o_proj(rearrange(output, 'b s h d -> b s (h d)')), None, None) |
class TriangleDataset(torch.utils.data.Dataset):
def __init__(self, num_examples=60000):
self.num_examples = num_examples
def __len__(self):
return self.num_examples
def __getitem__(self, i):
n = random.randint(0, 1)
if (n == 0):
image = make_equilateral_triangle(make_equilateral=True)
elif (n == 1):
image = make_equilateral_triangle(make_equilateral=False)
image = torch.from_numpy(image)
image = image.permute(2, 0, 1)
return (image.float(), torch.tensor(n)) |
def test_denoise():
from topaz.commands import denoise
parser = denoise.add_arguments()
args = parser.parse_args(['--patch-size', '1024', '-o', 'data/EMPIAR-10025/denoised/', 'data/EMPIAR-10025/rawdata/micrographs/*.mrc']) |
_config
def task_mlm_itm():
exp_name = 'mlm_itm'
datasets = ['cc3m']
loss_names = _loss_names({'itm': 1, 'mlm': 1})
batch_size = 1024
max_epoch = 10
max_image_len = (- 1) |
def generate_split(image_path, output_path, seed=42):
if ((image_path is None) or (not os.path.isdir(image_path))):
print('Invalid input image folder!')
return
if ((output_path is None) or (not os.path.isdir(output_path))):
print('Invalid output image folder!')
return
random.seed(seed)
np.random.seed(seed)
cv2.setRNGSeed(seed)
image_paths = glob.glob((image_path + '/*'))
clean_path = (output_path + '/clean/')
dirty_path = (output_path + '/dirty/')
os.makedirs(clean_path, exist_ok=True)
os.makedirs(dirty_path, exist_ok=True)
shabby_pipeline = get_pipeline()
for (i, image_path) in enumerate(image_paths):
image = cv2.imread(image_path)
image_augmented = shabby_pipeline(image)
filename = os.path.basename(image_path)
clean_output_path = (clean_path + filename)
dirty_output_path = (dirty_path + filename)
print(((('Processing image ' + str(i)) + ' - ') + filename))
cv2.imwrite(clean_output_path, image)
cv2.imwrite(dirty_output_path, image_augmented) |
class AccuracyMonitor(object):
def __init__(self, sess, early_stop_steps):
self._early_stop_steps = early_stop_steps
self._sess = sess
self.best = (0, 0, 0)
self.params_at_best = None
def mark_accuracy(self, validate_accuracy, test_accuracy, i):
curr_accuracy = (float(validate_accuracy), float(test_accuracy), i)
self.curr_accuracy = curr_accuracy
if (curr_accuracy > self.best):
self.best = curr_accuracy
all_variables = tf.global_variables()
all_variable_values = self._sess.run(all_variables)
params_at_best_validate = {var.name: val for (var, val) in zip(all_variables, all_variable_values)}
self.params_at_best = params_at_best_validate
if (i > (self.best[(- 1)] + self._early_stop_steps)):
return False
return True |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('including_pad', [True, False])
.parametrize('ignore_border', [True, False])
.parametrize('channel_last', [False, True])
.parametrize('inshape, kernel, stride, pad', [((4, 6), (2, 2), (2, 1), (1, 0)), ((2, 4, 6), (2, 2), (2, 1), (1, 0)), ((2, 2, 4, 6), (2, 2), (2, 1), (1, 0)), ((2, 2, 2, 4, 6), (2, 2), (1, 2), (0, 1))])
def test_average_pooling_2d_double_backward(seed, inshape, kernel, stride, pad, ignore_border, channel_last, including_pad, ctx, func_name):
from nbla_test_utils import backward_function_tester, grad_function_forward_function_output
from nnabla.backward_function.average_pooling import AveragePoolingDataGrad
if (channel_last and (not func_name.endswith('Cudnn'))):
pytest.skip('Channel last is only supported in Cudnn so far')
if channel_last:
t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
inshape = tuple((inshape[i] for i in t.inv_axes))
rng = np.random.RandomState(seed)
inputs = [rng.randn(*inshape).astype(np.float32)]
func_args = [kernel, stride, ignore_border, pad, channel_last, including_pad]
backward_function_tester(rng, F.average_pooling, inputs=inputs, func_args=func_args, ctx=ctx)
(average_pooling_data_grad, y) = grad_function_forward_function_output(AveragePoolingDataGrad, F.average_pooling, ctx, inputs, *func_args)
average_pooling_data_grad.xshape = inputs[0].shape
ginputs = [rng.randn(*y.shape)]
backward_function_tester(rng, average_pooling_data_grad, inputs=ginputs, func_args=[], ctx=ctx) |
class SumMeter(UnivariateStatistic):
def __init__(self):
self.sum = 0.0
self.num_items = 0
def update(self, num):
self.sum += num
self.num_items += 1
return self
def remove(self, num):
self.sum -= num
self.num_items -= 1
return self
def get(self):
return self.sum |
def _load_data(_nrows=None, debug=False):
train_x = pd.read_csv(config.TRAIN_X, header=None, sep=' ', nrows=_nrows, dtype=np.float)
train_y = pd.read_csv(config.TRAIN_Y, header=None, sep=' ', nrows=_nrows, dtype=np.int32)
train_x = train_x.values
train_y = train_y.values.reshape([(- 1)])
print('data loading done!')
print(('training data : %d' % train_y.shape[0]))
assert (train_x.shape[0] == train_y.shape[0])
return (train_x, train_y) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.