code stringlengths 101 5.91M |
|---|
def detect(cfgfile, weightfile, imgfile):
if (cfgfile.find('.prototxt') >= 0):
from caffenet import CaffeNet
m = CaffeNet(cfgfile)
else:
m = Darknet(cfgfile)
m.print_network()
m.load_weights(weightfile)
print(('Loading weights from %s... Done!' % weightfile))
if (m.num_classes == 20):
namesfile = 'data/voc.names'
elif (m.num_classes == 80):
namesfile = 'data/coco.names'
else:
namesfile = 'data/names'
use_cuda = 1
if use_cuda:
m.cuda()
img = Image.open(imgfile).convert('RGB')
sized = img.resize((m.width, m.height))
for i in range(2):
start = time.time()
boxes = do_detect(m, sized, 0.5, 0.4, use_cuda)
finish = time.time()
if (i == 1):
print(('%s: Predicted in %f seconds.' % (imgfile, (finish - start))))
class_names = load_class_names(namesfile)
plot_boxes(img, boxes, 'predictions.jpg', class_names) |
def create_bsram_tiletype(chip: Chip, db: chipdb, x: int, y: int, ttyp: int, tdesc: TypeDesc):
typename = 'BSRAM'
tiletype = f'{typename}_{ttyp}'
if (tdesc.sfx != 0):
tiletype += f'_{tdesc.sfx}'
tt = chip.create_tile_type(tiletype)
tt.extra_data = TileExtraData(chip.strs.id(typename))
portmap = db.grid[y][x].bels['BSRAM'].portmap
bsram = tt.create_bel('BSRAM', 'BSRAM', z=BSRAM_Z)
def add_port_wire(tt, bel, name, wire_type='BSRAM_I', port_type=PinType.INPUT):
wire = portmap[name]
if (not tt.has_wire(wire)):
if name.startswith('CLK'):
tt.create_wire(wire, 'TILE_CLK')
else:
tt.create_wire(wire, wire_type)
tt.add_bel_pin(bel, name, wire, port_type)
for sfx in {'', 'A', 'B'}:
for inp in _bsram_inputs:
add_port_wire(tt, bsram, f'{inp}{sfx}')
for idx in range(3):
add_port_wire(tt, bsram, f'BLKSEL{sfx}{idx}')
for idx in range(14):
add_port_wire(tt, bsram, f'AD{sfx}{idx}')
for idx in range(18):
add_port_wire(tt, bsram, f'DI{sfx}{idx}')
add_port_wire(tt, bsram, f'DO{sfx}{idx}', 'BSRAM_O', PinType.OUTPUT)
if (not sfx):
for idx in range(18, 36):
add_port_wire(tt, bsram, f'DI{idx}')
add_port_wire(tt, bsram, f'DO{idx}', 'BSRAM_O', PinType.OUTPUT)
tdesc.tiletype = tiletype
return tt |
class FirstCell(nn.Module):
def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''):
super(FirstCell, self).__init__()
self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1)
self.act = nn.ReLU()
self.path_1 = nn.Sequential()
self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_1.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False))
self.path_2 = nn.Sequential()
self.path_2.add_module('pad', nn.ZeroPad2d(((- 1), 1, (- 1), 1)))
self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_2.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False))
self.final_path_bn = nn.BatchNorm2d((out_chs_left * 2), eps=0.001, momentum=0.1)
self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type)
self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
self.comb_iter_1_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type)
self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type)
self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type)
def forward(self, x, x_prev):
x_relu = self.act(x_prev)
x_path1 = self.path_1(x_relu)
x_path2 = self.path_2(x_relu)
x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = (x_comb_iter_0_left + x_comb_iter_0_right)
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = (x_comb_iter_1_left + x_comb_iter_1_right)
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2 = (x_comb_iter_2_left + x_left)
x_comb_iter_3_left = self.comb_iter_3_left(x_left)
x_comb_iter_3_right = self.comb_iter_3_right(x_left)
x_comb_iter_3 = (x_comb_iter_3_left + x_comb_iter_3_right)
x_comb_iter_4_left = self.comb_iter_4_left(x_right)
x_comb_iter_4 = (x_comb_iter_4_left + x_right)
x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out |
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=4, stride=2, padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = ((down + [submodule]) + up)
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = (down + up)
else:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = (((down + [submodule]) + up) + [nn.Dropout(0.5)])
else:
model = ((down + [submodule]) + up)
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1) |
def test_bert_embedding():
process = subprocess.check_call(['python', './bert_embedding/cli.py', '--model', 'bert_12_768_12', '--dataset_name', 'book_corpus_wiki_en_uncased', '--max_seq_length', '25', '--batch_size', '256', '--oov_way', 'avg', '--sentences', '"is this jacksonville ?"'])
time.sleep(5) |
def get_all_path(d, with_type=False, leaf_only=True, with_list=True):
assert (not with_type), 'will not support'
all_path = []
if isinstance(d, dict):
for (k, v) in d.items():
all_sub_path = get_all_path(v, with_type, leaf_only=leaf_only, with_list=with_list)
all_path.extend([((k + '$') + p) for p in all_sub_path])
if ((not leaf_only) or (len(all_sub_path) == 0)):
all_path.append(k)
elif ((isinstance(d, tuple) or isinstance(d, list)) and with_list):
for (i, _v) in enumerate(d):
all_sub_path = get_all_path(_v, with_type, leaf_only=leaf_only, with_list=with_list)
all_path.extend([('{}$'.format(i) + p) for p in all_sub_path])
if ((not leaf_only) or (len(all_sub_path) == 0)):
all_path.append('{}'.format(i))
return all_path |
def compute_distance(location_1, location_2):
x = (location_2.x - location_1.x)
y = (location_2.y - location_1.y)
z = (location_2.z - location_1.z)
norm = (np.linalg.norm([x, y, z]) + np.finfo(float).eps)
return norm |
def resdropresnet20_svhn(classes=10, **kwargs):
return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name='resdropresnet20_svhn', **kwargs) |
class YosoModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def repackage_hidden(h):
if isinstance(h, torch.Tensor):
return h.detach()
return tuple((repackage_hidden(v) for v in h)) |
def test_two_sided_pval_from_zscore():
zscore = np.asarray([(- 5.87), 1.96, 0])
(two_sided_pval, two_sided_pval_corr) = two_sided_pval_from_zscore(zscore)
expected = np.asarray([[0.0, 0.05, 1.0], [0.0, 0.15, 1.0]])
assert_almost_equal(two_sided_pval, expected[0], decimal=2)
assert_almost_equal(two_sided_pval_corr, expected[1], decimal=2) |
def local_env_settings():
settings = EnvSettings()
settings.davis_dir = ''
settings.got10k_path = ''
settings.got_packed_results_path = ''
settings.got_reports_path = ''
settings.lasot_path = ''
settings.network_path = '/data/zzy/ablation/V11_V909_swin_base_rank/ltr/checkpoints/ltr/transt/transt/TransT_ep0081.pth.tar'
settings.nfs_path = ''
settings.otb_path = ''
settings.result_plot_path = '/data/zhu_19/TransT-test/17-09-22/pytracking/result_plots/'
settings.results_path = '/data/zzy/ablation/V11_V909_swin_base_rank/pytracking/tracking_results_0081_new/'
settings.tn_packed_results_path = ''
settings.tpl_path = ''
settings.trackingnet_path = ''
settings.uav_path = ''
settings.vot_path = ''
settings.youtubevos_dir = ''
return settings |
class ViTPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class MLP(object):
def __init__(self, D_layers):
self.D_layers = D_layers
self.params = []
for layer in self.D_layers:
self.params = (self.params + layer.params)
def encode(self, input):
output = input
for layer in self.D_layers:
output = layer.encode(output)
return output
def get_name(self):
return 'MLP' |
def assert_valid_explanation(explanation):
assert valid_internal_obj(explanation._internal_obj)
assert valid_data_dict(explanation.data())
assert valid_visualization(explanation.visualize())
try:
_ = explanation.data(0)
has_specific = True
except Exception:
has_specific = False
if has_specific:
assert valid_data_dict(explanation.data(0))
assert valid_visualization(explanation.visualize(0)) |
class MinkLoc(torch.nn.Module):
def __init__(self, backbone: nn.Module, pooling: PoolingWrapper, normalize_embeddings: bool=False):
super().__init__()
self.backbone = backbone
self.pooling = pooling
self.normalize_embeddings = normalize_embeddings
self.stats = {}
def forward(self, batch):
x = ME.SparseTensor(batch['features'], coordinates=batch['coords'])
x = self.backbone(x)
assert (x.shape[1] == self.pooling.in_dim), f'Backbone output tensor has: {x.shape[1]} channels. Expected: {self.pooling.in_dim}'
x = self.pooling(x)
if hasattr(self.pooling, 'stats'):
self.stats.update(self.pooling.stats)
assert (x.dim() == 2), f'Expected 2-dimensional tensor (batch_size,output_dim). Got {x.dim()} dimensions.'
assert (x.shape[1] == self.pooling.output_dim), f'Output tensor has: {x.shape[1]} channels. Expected: {self.pooling.output_dim}'
if self.normalize_embeddings:
x = F.normalize(x, dim=1)
return {'global': x}
def print_info(self):
print('Model class: MinkLoc')
n_params = sum([param.nelement() for param in self.parameters()])
print(f'Total parameters: {n_params}')
n_params = sum([param.nelement() for param in self.backbone.parameters()])
print(f'Backbone: {type(self.backbone).__name__} #parameters: {n_params}')
n_params = sum([param.nelement() for param in self.pooling.parameters()])
print(f'Pooling method: {self.pooling.pool_method} #parameters: {n_params}')
print('# channels from the backbone: {}'.format(self.pooling.in_dim))
print('# output channels : {}'.format(self.pooling.output_dim))
print(f'Embedding normalization: {self.normalize_embeddings}') |
def get_poseaug_model(args, dataset):
print('==> Creating model...')
device = torch.device('cuda')
num_joints = dataset.skeleton().num_joints()
model_G = PoseGenerator(args, (num_joints * 3)).to(device)
model_G.apply(init_weights)
print('==> Total parameters: {:.2f}M'.format((sum((p.numel() for p in model_G.parameters())) / 1000000.0)))
model_d3d = Pos3dDiscriminator(num_joints).to(device)
model_d3d.apply(init_weights)
print('==> Total parameters: {:.2f}M'.format((sum((p.numel() for p in model_d3d.parameters())) / 1000000.0)))
model_d2d = Pos2dDiscriminator(num_joints).to(device)
model_d2d.apply(init_weights)
print('==> Total parameters: {:.2f}M'.format((sum((p.numel() for p in model_d2d.parameters())) / 1000000.0)))
g_optimizer = torch.optim.Adam(model_G.parameters(), lr=args.lr_g)
d3d_optimizer = torch.optim.Adam(model_d3d.parameters(), lr=args.lr_d)
d2d_optimizer = torch.optim.Adam(model_d2d.parameters(), lr=args.lr_d)
g_lr_scheduler = get_scheduler(g_optimizer, policy='lambda', nepoch_fix=0, nepoch=args.epochs)
d3d_lr_scheduler = get_scheduler(d3d_optimizer, policy='lambda', nepoch_fix=0, nepoch=args.epochs)
d2d_lr_scheduler = get_scheduler(d2d_optimizer, policy='lambda', nepoch_fix=0, nepoch=args.epochs)
return {'model_G': model_G, 'model_d3d': model_d3d, 'model_d2d': model_d2d, 'optimizer_G': g_optimizer, 'optimizer_d3d': d3d_optimizer, 'optimizer_d2d': d2d_optimizer, 'scheduler_G': g_lr_scheduler, 'scheduler_d3d': d3d_lr_scheduler, 'scheduler_d2d': d2d_lr_scheduler} |
def quad_double_hessian_step(vrblvl=0):
if (vrblvl > 0):
print('in quad_double_hessian_step ...')
phc = get_phcfun()
apar = pointer(c_int32(2))
bvrb = pointer(c_int32(0))
cstep = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> quad_double_hessian_step calls phc', end='')
retval = phc(888, apar, bvrb, cstep, vrb)
if (vrblvl > 0):
print(', return value :', retval)
print('the step size :', cstep[0])
return cstep[0] |
def save_dictionary(worddict, wordcount, loc):
with open(loc, 'wb') as f:
pkl.dump(worddict, f)
pkl.dump(wordcount, f) |
def apply_warmup_lr(global_step, lr, base_lr, warmup_steps):
if (warmup_steps > 0):
warmup_lr = (tf.cast(global_step, tf.float32) * (base_lr / warmup_steps))
lr = tf.cond(tf.less(tf.cast(global_step, tf.float32), warmup_steps), (lambda : warmup_lr), (lambda : lr))
return lr |
def convert(size, box):
dw = (1.0 / size[0])
dh = (1.0 / size[1])
x = (box[0] * dw)
y = (box[1] * dh)
w = (box[2] * dw)
h = (box[3] * dh)
return (x, y, w, h) |
def create_env(flags, level_name, seed=1):
level_name = ('contributed/dmlab30/' + level_name)
config = {'width': 96, 'height': 72, 'logLevel': 'WARN'}
return dmlab_wrappers.createDmLab(level_name, config, seed) |
def gen_4_normal():
return [mn(mean=np.array([1.0, 1.0]), cov=np.array([[1.0, 0.0], [0.0, 1.0]])), mn(mean=np.array([1.0, (- 1.0)]), cov=np.array([[1.0, 0.0], [0.0, 1.0]])), mn(mean=np.array([(- 1.0), (- 1.0)]), cov=np.array([[1.0, 0.0], [0.0, 1.0]])), mn(mean=np.array([(- 1.0), 1.0]), cov=np.array([[1.0, 0.0], [0.0, 1.0]]))] |
def evaluate_model(model, test_data):
test_filenames = util.get_files_in_directory(((FLAGS.data_dir + '/') + test_data))
total_psnr = total_ssim = 0
for filename in test_filenames:
(psnr, ssim) = model.do_for_evaluate_with_output(filename, output_directory=FLAGS.output_dir, print_console=False)
total_psnr += psnr
total_ssim += ssim
logging.info(('Model Average [%s] PSNR:%f, SSIM:%f' % (test_data, (total_psnr / len(test_filenames)), (total_ssim / len(test_filenames))))) |
class RewardsComposerConfig(RewardConfig):
def __init__(self, reward_shaper_configs):
self.reward_shapers_configs = reward_shaper_configs
def create_reward_shaper(self):
reward_shapers = [config.create_reward_shaper() for config in self.reward_shapers_configs]
return RewardsComposer(reward_shapers) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.')
parser.add_argument('--bert_model', default=None, type=str, required=True, help='Bert pre-trained model selected in the list: bert-base-uncased, bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, bert-base-multilingual-cased, bert-base-chinese.')
parser.add_argument('--task_name', default=None, type=str, required=True, help='The name of the task to train.')
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--do_resume', action='store_true', help='Whether to run eval on the resumed pretrained model.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--train_batch_size', default=32, type=int, help='Total batch size for training.')
parser.add_argument('--eval_batch_size', default=8, type=int, help='Total batch size for eval.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.')
parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--local_rank', type=int, default=(- 1), help='local_rank for distributed training on gpus')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit float precision instead of 32-bit')
parser.add_argument('--loss_scale', type=float, default=0, help='Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n0 (default value): dynamic loss scaling.\nPositive power of 2: static loss scaling value.\n')
parser.add_argument('--server_ip', type=str, default='', help='Can be used for distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='Can be used for distant debugging.')
args = parser.parse_args()
if (args.server_ip and args.server_port):
import ptvsd
print('Waiting for debugger attach')
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
processors = {'cola': ColaProcessor, 'mnli': MnliProcessor, 'snli': SnliProcessor, 'mrpc': MrpcProcessor, 'mr': MRProcessor, 'ag': AGProcessor, 'imdb': IMDBProcessor, 'yelp': YelpProcessor, 'fake': FakeProcessor}
num_labels_task = {'cola': 2, 'mnli': 3, 'snli': 3, 'mrpc': 2, 'mr': 2, 'ag': 4, 'imdb': 2, 'yelp': 2, 'fake': 2}
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info('device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}'.format(device, n_gpu, bool((args.local_rank != (- 1))), args.fp16))
if (args.gradient_accumulation_steps < 1):
raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps))
args.train_batch_size = (args.train_batch_size // args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (n_gpu > 0):
torch.cuda.manual_seed_all(args.seed)
if ((not args.do_train) and (not args.do_eval)):
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
if (os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train):
raise ValueError('Output directory ({}) already exists and is not empty.'.format(args.output_dir))
if (not os.path.exists(args.output_dir)):
os.makedirs(args.output_dir)
task_name = args.task_name.lower()
if (task_name not in processors):
raise ValueError(('Task not found: %s' % task_name))
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_optimization_steps = None
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
num_train_optimization_steps = (int(((len(train_examples) / args.train_batch_size) / args.gradient_accumulation_steps)) * args.num_train_epochs)
if (args.local_rank != (- 1)):
num_train_optimization_steps = (num_train_optimization_steps // torch.distributed.get_world_size())
cache_dir = (args.cache_dir if args.cache_dir else os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, 'distributed_{}'.format(args.local_rank)))
model = BertForSequenceClassification.from_pretrained(args.bert_model, cache_dir=cache_dir, num_labels=num_labels)
if args.fp16:
model.half()
model.to(device)
if (args.local_rank != (- 1)):
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError('Please install apex from to use distributed and fp16 training.')
model = DDP(model)
elif (n_gpu > 1):
model = torch.nn.DataParallel(model)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError('Please install apex from to use distributed and fp16 training.')
optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0)
if (args.loss_scale == 0):
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
if args.do_train:
train_features = convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_examples))
logger.info(' Batch size = %d', args.train_batch_size)
logger.info(' Num steps = %d', num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if (args.local_rank == (- 1)):
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc='Epoch'):
tr_loss = 0
(nb_tr_examples, nb_tr_steps) = (0, 0)
for (step, batch) in enumerate(tqdm(train_dataloader, desc='Iteration')):
batch = tuple((t.to(device) for t in batch))
(input_ids, input_mask, segment_ids, label_ids) = batch
loss = model(input_ids, segment_ids, input_mask, label_ids)
if (n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
lr_this_step = (args.learning_rate * warmup_linear((global_step / num_train_optimization_steps), args.warmup_proportion))
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if args.do_train:
model_to_save = (model.module if hasattr(model, 'module') else model)
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
with open(output_config_file, 'w') as f:
f.write(model_to_save.config.to_json_string())
config = BertConfig(output_config_file)
model = BertForSequenceClassification(config, num_labels=num_labels)
model.load_state_dict(torch.load(output_model_file))
elif args.do_resume:
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
config = BertConfig(output_config_file)
model = BertForSequenceClassification(config, num_labels=num_labels)
model.load_state_dict(torch.load(output_model_file))
else:
model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
model.to(device)
if (args.do_eval and ((args.local_rank == (- 1)) or (torch.distributed.get_rank() == 0))):
eval_examples = processor.get_dev_examples(args.data_dir)
eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer)
logger.info('***** Running evaluation *****')
logger.info(' Num examples = %d', len(eval_examples))
logger.info(' Batch size = %d', args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
(eval_loss, eval_accuracy) = (0, 0)
(nb_eval_steps, nb_eval_examples) = (0, 0)
for (input_ids, input_mask, segment_ids, label_ids) in tqdm(eval_dataloader, desc='Evaluating'):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_eval_accuracy = accuracy(logits, label_ids)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = (eval_loss / nb_eval_steps)
eval_accuracy = (eval_accuracy / nb_eval_examples)
loss = ((tr_loss / nb_tr_steps) if args.do_train else None)
result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'global_step': global_step, 'loss': loss}
output_eval_file = os.path.join(args.output_dir, 'eval_results.txt')
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results *****')
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key])))) |
_tf
class TFCLIPTextModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = ((TFCLIPTextModel,) if is_tf_available() else ())
test_pruning = False
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFCLIPTextModelTester(self)
self.config_tester = ConfigTester(self, config_class=CLIPTextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_inputs_embeds(self):
pass
def test_model_from_pretrained(self):
for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFCLIPTextModel.from_pretrained(model_name)
self.assertIsNotNone(model) |
class DiffusionPipeline(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch']) |
def _download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True):
import warnings
try:
import requests
except ImportError:
class requests_failed_to_import(object):
pass
requests = requests_failed_to_import
if (path is None):
fname = url.split('/')[(- 1)]
assert fname, "Can't construct file-name from this URL. Please set the `path` option manually."
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[(- 1)])
else:
fname = path
assert (retries >= 0), 'Number of retries should be at least 0'
if (not verify_ssl):
warnings.warn('Unverified HTTPS request is being made (verify_ssl=False). Adding certificate verification is strongly advised.')
if (overwrite or (not os.path.exists(fname)) or (sha1_hash and (not _check_sha1(fname, sha1_hash)))):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if (not os.path.exists(dirname)):
os.makedirs(dirname)
while ((retries + 1) > 0):
try:
print('Downloading {} from {}...'.format(fname, url))
r = requests.get(url, stream=True, verify=verify_ssl)
if (r.status_code != 200):
raise RuntimeError('Failed downloading url {}'.format(url))
with open(fname, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
if (sha1_hash and (not _check_sha1(fname, sha1_hash))):
raise UserWarning('File {} is downloaded but the content hash does not match. The repo may be outdated or download may be incomplete. If the `repo_url` is overridden, consider switching to the default repo.'.format(fname))
break
except Exception as e:
retries -= 1
if (retries <= 0):
raise e
else:
print('download failed, retrying, {} attempt{} left'.format(retries, ('s' if (retries > 1) else '')))
return fname |
def load_audface_data(basedir, testskip=1, test_file=None, aud_file=None):
if (test_file is not None):
with open(os.path.join(basedir, test_file)) as fp:
meta = json.load(fp)
poses = []
auds = []
aud_features = np.load(os.path.join(basedir, aud_file))
for frame in meta['frames'][::testskip]:
poses.append(np.array(frame['transform_matrix']))
auds.append(aud_features[frame['frame_id']])
poses = np.array(poses).astype(np.float32)
auds = np.array(auds).astype(np.float32)
bc_img = imageio.imread(os.path.join(basedir, 'bc.jpg'))
(H, W) = (bc_img.shape[0], bc_img.shape[1])
(focal, cx, cy) = (float(meta['focal_length']), float(meta['cx']), float(meta['cy']))
return (poses, auds, bc_img, [H, W, focal, cx, cy])
splits = ['train', 'val']
metas = {}
for s in splits:
with open(os.path.join(basedir, 'transforms_{}.json'.format(s)), 'r') as fp:
metas[s] = json.load(fp)
all_imgs = []
all_poses = []
all_auds = []
all_sample_rects = []
aud_features = np.load(os.path.join(basedir, 'aud.npy'))
counts = [0]
for s in splits:
meta = metas[s]
imgs = []
poses = []
auds = []
sample_rects = []
mouth_rects = []
if ((s == 'train') or (testskip == 0)):
skip = 1
else:
skip = testskip
for frame in meta['frames'][::skip]:
fname = os.path.join(basedir, 'head_imgs', (str(frame['img_id']) + '.jpg'))
imgs.append(fname)
poses.append(np.array(frame['transform_matrix']))
auds.append(aud_features[min(frame['aud_id'], (aud_features.shape[0] - 1))])
sample_rects.append(np.array(frame['face_rect'], dtype=np.int32))
imgs = np.array(imgs)
poses = np.array(poses).astype(np.float32)
auds = np.array(auds).astype(np.float32)
counts.append((counts[(- 1)] + imgs.shape[0]))
all_imgs.append(imgs)
all_poses.append(poses)
all_auds.append(auds)
all_sample_rects.append(sample_rects)
i_split = [np.arange(counts[i], counts[(i + 1)]) for i in range(len(splits))]
imgs = np.concatenate(all_imgs, 0)
poses = np.concatenate(all_poses, 0)
auds = np.concatenate(all_auds, 0)
sample_rects = np.concatenate(all_sample_rects, 0)
bc_img = imageio.imread(os.path.join(basedir, 'bc.jpg'))
(H, W) = bc_img.shape[:2]
(focal, cx, cy) = (float(meta['focal_len']), float(meta['cx']), float(meta['cy']))
return (imgs, poses, auds, bc_img, [H, W, focal, cx, cy], sample_rects, sample_rects, i_split) |
def _split_divisible(num, num_ways, divisible_by=8):
assert ((num % divisible_by) == 0)
assert ((num / num_ways) >= divisible_by)
base = (((num // num_ways) // divisible_by) * divisible_by)
result = []
accumulated = 0
for i in range(num_ways):
r = base
while ((accumulated + r) < ((num * (i + 1)) / num_ways)):
r += divisible_by
result.append(r)
accumulated += r
assert (accumulated == num)
return result |
def test_base_non_implemented_error():
with pytest.raises(NotImplementedError):
incomplete_preprocessor = IncompletePreprocessor()
incomplete_preprocessor.fit_transform(df) |
def space(t1, t2, quote_count=None):
if re.match('^[\\(]$', t1):
return False
if re.match('^[.,\\)\\?\\!]$', t2):
return False
if (quote(t1) and (quote_count is not None) and ((quote_count % 2) == 1)):
return False
if (quote(t2) and (quote_count is not None) and ((quote_count % 2) == 1)):
return False
return True |
def get_video_cap(serial, frame_width, frame_height):
if (sys.platform == 'darwin'):
return cv2.VideoCapture(0)
cap = cv2.VideoCapture('/dev/v4l/by-id/usb-046d_Logitech_Webcam_C930e_{}-video-index0'.format(serial))
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
cap.set(cv2.CAP_PROP_AUTOFOCUS, 0)
cap.set(cv2.CAP_PROP_FOCUS, 30)
assert (cap.get(cv2.CAP_PROP_FRAME_WIDTH) == frame_width)
assert (cap.get(cv2.CAP_PROP_FRAME_HEIGHT) == frame_height)
assert (cap.get(cv2.CAP_PROP_BUFFERSIZE) == 1)
assert (cap.get(cv2.CAP_PROP_AUTOFOCUS) == 0)
assert (cap.get(cv2.CAP_PROP_FOCUS) == 30)
return cap |
def main():
args = parse_args()
accelerator = Accelerator()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.info(accelerator.state)
logger.setLevel((logging.INFO if accelerator.is_local_main_process else logging.ERROR))
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
if (args.seed is not None):
set_seed(args.seed)
if accelerator.is_main_process:
if args.push_to_hub:
if (args.hub_model_id is None):
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
if (args.dataset_name is not None):
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
else:
data_files = {}
if (args.train_file is not None):
data_files['train'] = args.train_file
if (args.validation_file is not None):
data_files['validation'] = args.validation_file
extension = args.train_file.split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files)
if args.config_name:
config = AutoConfig.from_pretrained(args.model_name_or_path)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=(not args.use_slow_tokenizer))
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=(not args.use_slow_tokenizer))
else:
raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.You can do it from another script, save it, and load it from here, using --tokenizer_name.')
if args.model_name_or_path:
model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config)
else:
logger.info('Training new model from scratch')
model = AutoModelForSeq2SeqLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
if ((model.config.decoder_start_token_id is None) and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast))):
assert ((args.target_lang is not None) and (args.source_lang is not None)), 'mBart requires --target_lang and --source_lang'
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(args.target_lang)
if (model.config.decoder_start_token_id is None):
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined')
prefix = (args.source_prefix if (args.source_prefix is not None) else '')
column_names = raw_datasets['train'].column_names
if isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if (args.source_lang is not None):
tokenizer.src_lang = args.source_lang
if (args.target_lang is not None):
tokenizer.tgt_lang = args.target_lang
source_lang = args.source_lang.split('_')[0]
target_lang = args.target_lang.split('_')[0]
padding = ('max_length' if args.pad_to_max_length else False)
max_target_length = args.max_target_length
padding = ('max_length' if args.pad_to_max_length else False)
def preprocess_function(examples):
inputs = [ex[source_lang] for ex in examples['translation']]
targets = [ex[target_lang] for ex in examples['translation']]
inputs = [(prefix + inp) for inp in inputs]
model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True)
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
if ((padding == 'max_length') and args.ignore_pad_token_for_loss):
labels['input_ids'] = [[(l if (l != tokenizer.pad_token_id) else (- 100)) for l in label] for label in labels['input_ids']]
model_inputs['labels'] = labels['input_ids']
return model_inputs
with accelerator.main_process_first():
processed_datasets = raw_datasets.map(preprocess_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not args.overwrite_cache), desc='Running tokenizer on dataset')
train_dataset = processed_datasets['train']
eval_dataset = processed_datasets['validation']
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
label_pad_token_id = ((- 100) if args.ignore_pad_token_for_loss else tokenizer.pad_token_id)
if args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))
train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
(model, optimizer, train_dataloader, eval_dataloader) = accelerator.prepare(model, optimizer, train_dataloader, eval_dataloader)
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if (args.max_train_steps is None):
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
else:
args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch))
lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps)
metric = load_metric('sacrebleu')
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return (preds, labels)
total_batch_size = ((args.per_device_train_batch_size * accelerator.num_processes) * args.gradient_accumulation_steps)
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.per_device_train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
progress_bar = tqdm(range(args.max_train_steps), disable=(not accelerator.is_local_main_process))
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for (step, batch) in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = (loss / args.gradient_accumulation_steps)
accelerator.backward(loss)
if (((step % args.gradient_accumulation_steps) == 0) or (step == (len(train_dataloader) - 1))):
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if (completed_steps >= args.max_train_steps):
break
model.eval()
if (args.val_max_target_length is None):
args.val_max_target_length = args.max_target_length
gen_kwargs = {'max_length': (args.val_max_target_length if (args is not None) else config.max_length), 'num_beams': args.num_beams}
for (step, batch) in enumerate(eval_dataloader):
with torch.no_grad():
generated_tokens = accelerator.unwrap_model(model).generate(batch['input_ids'], attention_mask=batch['attention_mask'], **gen_kwargs)
generated_tokens = accelerator.pad_across_processes(generated_tokens, dim=1, pad_index=tokenizer.pad_token_id)
labels = batch['labels']
if (not args.pad_to_max_length):
labels = accelerator.pad_across_processes(batch['labels'], dim=1, pad_index=tokenizer.pad_token_id)
generated_tokens = accelerator.gather(generated_tokens).cpu().numpy()
labels = accelerator.gather(labels).cpu().numpy()
if args.ignore_pad_token_for_loss:
labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id)
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
(decoded_preds, decoded_labels) = postprocess_text(decoded_preds, decoded_labels)
metric.add_batch(predictions=decoded_preds, references=decoded_labels)
eval_metric = metric.compute()
logger.info({'bleu': eval_metric['score']})
if (args.push_to_hub and (epoch < (args.num_train_epochs - 1))):
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
repo.push_to_hub(commit_message=f'Training in progress epoch {epoch}', blocking=False, auto_lfs_prune=True)
if (args.output_dir is not None):
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message='End of training', auto_lfs_prune=True) |
class Selection(ABC):
def __init__(self, batch_size, ref_point=None, **kwargs):
self.batch_size = batch_size
self.ref_point = ref_point
def fit(self, X, Y):
pass
def set_ref_point(self, ref_point):
self.ref_point = ref_point
def select(self, solution, surrogate_model, status, transformation):
pass |
class MultiMarginCriterion(Criterion):
def __init__(self, p=1, weights=None, margin=1.0, size_average=True, bigdl_type='float'):
super(MultiMarginCriterion, self).__init__(None, bigdl_type, p, JTensor.from_ndarray(weights), margin, size_average) |
class ASKCResNetFPN(HybridBlock):
def __init__(self, layers, channels, fuse_mode, act_dilation, classes=1, tinyFlag=False, norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
super(ASKCResNetFPN, self).__init__(**kwargs)
self.layer_num = len(layers)
self.tinyFlag = tinyFlag
with self.name_scope():
stem_width = int(channels[0])
self.stem = nn.HybridSequential(prefix='stem')
self.stem.add(norm_layer(scale=False, center=False, **({} if (norm_kwargs is None) else norm_kwargs)))
if tinyFlag:
self.stem.add(nn.Conv2D(channels=(stem_width * 2), kernel_size=3, strides=1, padding=1, use_bias=False))
self.stem.add(norm_layer(in_channels=(stem_width * 2)))
self.stem.add(nn.Activation('relu'))
else:
self.stem.add(nn.Conv2D(channels=stem_width, kernel_size=3, strides=2, padding=1, use_bias=False))
self.stem.add(norm_layer(in_channels=stem_width))
self.stem.add(nn.Activation('relu'))
self.stem.add(nn.Conv2D(channels=stem_width, kernel_size=3, strides=1, padding=1, use_bias=False))
self.stem.add(norm_layer(in_channels=stem_width))
self.stem.add(nn.Activation('relu'))
self.stem.add(nn.Conv2D(channels=(stem_width * 2), kernel_size=3, strides=1, padding=1, use_bias=False))
self.stem.add(norm_layer(in_channels=(stem_width * 2)))
self.stem.add(nn.Activation('relu'))
self.stem.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
self.head = _FCNHead(in_channels=channels[1], channels=classes)
self.layer1 = self._make_layer(block=CIFARBasicBlockV1, layers=layers[0], channels=channels[1], stride=1, stage_index=1, in_channels=channels[1])
self.layer2 = self._make_layer(block=CIFARBasicBlockV1, layers=layers[1], channels=channels[2], stride=2, stage_index=2, in_channels=channels[1])
self.layer3 = self._make_layer(block=CIFARBasicBlockV1, layers=layers[2], channels=channels[3], stride=2, stage_index=3, in_channels=channels[2])
if (self.layer_num == 4):
self.layer4 = self._make_layer(block=CIFARBasicBlockV1, layers=layers[3], channels=channels[4], stride=2, stage_index=4, in_channels=channels[3])
if (self.layer_num == 4):
self.fuse34 = self._fuse_layer(fuse_mode, channels=channels[3], act_dilation=act_dilation)
self.fuse23 = self._fuse_layer(fuse_mode, channels=channels[2], act_dilation=act_dilation)
self.fuse12 = self._fuse_layer(fuse_mode, channels=channels[1], act_dilation=act_dilation)
def _make_layer(self, block, layers, channels, stride, stage_index, in_channels=0, norm_layer=BatchNorm, norm_kwargs=None):
layer = nn.HybridSequential(prefix=('stage%d_' % stage_index))
with layer.name_scope():
downsample = ((channels != in_channels) or (stride != 1))
layer.add(block(channels, stride, downsample, in_channels=in_channels, prefix='', norm_layer=norm_layer, norm_kwargs=norm_kwargs))
for _ in range((layers - 1)):
layer.add(block(channels, 1, False, in_channels=channels, prefix='', norm_layer=norm_layer, norm_kwargs=norm_kwargs))
return layer
def _fuse_layer(self, fuse_mode, channels, act_dilation):
if (fuse_mode == 'Direct_Add'):
fuse_layer = Direct_AddFuse_Reduce(channels=channels)
elif (fuse_mode == 'Concat'):
fuse_layer = ConcatFuse_Reduce(channels=channels)
elif (fuse_mode == 'SK'):
fuse_layer = SKFuse_Reduce(channels=channels)
elif (fuse_mode == 'LocalGlobalCha'):
fuse_layer = LocalGlobalChaFuse_Reduce(channels=channels)
elif (fuse_mode == 'LocalLocalCha'):
fuse_layer = LocalLocalChaFuse_Reduce(channels=channels)
elif (fuse_mode == 'GlobalGlobalCha'):
fuse_layer = GlobalGlobalChaFuse_Reduce(channels=channels)
elif (fuse_mode == 'IASKCChaFuse'):
fuse_layer = IASKCChaFuse_Reduce(channels=channels)
elif (fuse_mode == 'AYforXplusY'):
fuse_layer = AYforXplusYChaFuse_Reduce(channels=channels)
elif (fuse_mode == 'AXYforXplusY'):
fuse_layer = AXYforXplusYChaFuse_Reduce(channels=channels)
elif (fuse_mode == 'XplusAYforY'):
fuse_layer = XplusAYforYChaFuse_Reduce(channels=channels)
elif (fuse_mode == 'GAU'):
fuse_layer = GAUChaFuse_Reduce(channels=channels)
elif (fuse_mode == 'LocalGAU'):
fuse_layer = LocalGAUChaFuse_Reduce(channels=channels)
elif (fuse_mode == 'SpaFuse'):
fuse_layer = SpaFuse_Reduce(channels=channels, act_dialtion=act_dilation)
elif (fuse_mode == 'BiLocalCha'):
fuse_layer = BiLocalChaFuse_Reduce(channels=channels)
elif (fuse_mode == 'BiGlobalLocalCha'):
fuse_layer = BiGlobalLocalChaFuse_Reduce(channels=channels)
elif (fuse_mode == 'AsymBiLocalCha'):
fuse_layer = AsymBiLocalChaFuse_Reduce(channels=channels)
elif (fuse_mode == 'BiGlobalCha'):
fuse_layer = BiGlobalChaFuse_Reduce(channels=channels)
elif (fuse_mode == 'BiSpaCha'):
fuse_layer = BiSpaChaFuse_Reduce(channels=channels)
elif (fuse_mode == 'AsymBiSpaCha'):
fuse_layer = AsymBiSpaChaFuse_Reduce(channels=channels)
else:
raise ValueError('Unknown fuse_mode')
return fuse_layer
def hybrid_forward(self, F, x):
(_, _, hei, wid) = x.shape
x = self.stem(x)
c1 = self.layer1(x)
c2 = self.layer2(c1)
out = self.layer3(c2)
if (self.layer_num == 4):
c4 = self.layer4(out)
if self.tinyFlag:
c4 = F.contrib.BilinearResize2D(c4, height=(hei // 4), width=(wid // 4))
else:
c4 = F.contrib.BilinearResize2D(c4, height=(hei // 16), width=(wid // 16))
out = self.fuse34(c4, out)
if self.tinyFlag:
out = F.contrib.BilinearResize2D(out, height=(hei // 2), width=(wid // 2))
else:
out = F.contrib.BilinearResize2D(out, height=(hei // 8), width=(wid // 8))
out = self.fuse23(out, c2)
if self.tinyFlag:
out = F.contrib.BilinearResize2D(out, height=hei, width=wid)
else:
out = F.contrib.BilinearResize2D(out, height=(hei // 4), width=(wid // 4))
out = self.fuse12(out, c1)
pred = self.head(out)
if self.tinyFlag:
out = pred
else:
out = F.contrib.BilinearResize2D(pred, height=hei, width=wid)
return out
def evaluate(self, x):
return self.forward(x) |
class HashingVectorizerFeatures(object):
def __init__(self, name, get_field, norm=None):
self.name = name
self.get_field = get_field
from sklearn.feature_extraction.text import HashingVectorizer
self.model = HashingVectorizer(analyzer=(lambda x: [xx for xx in x]), alternate_sign=False, dtype=np.float32, norm=norm)
def encode(self, things_to_encode):
return self.model.transform([self.get_field(x) for x in things_to_encode]) |
def _try_get_shm(name, timeout=300):
for _ in range(timeout):
try:
shm = shared_memory.SharedMemory(name=name)
return shm
except Exception:
time.sleep(1)
return None |
('elmo_characters')
class ELMoTokenCharactersIndexer(TokenIndexer[List[int]]):
def __init__(self, namespace: str='elmo_characters') -> None:
self._namespace = namespace
def count_vocab_items(self, token: Token, counter: Dict[(str, Dict[(str, int)])]):
pass
def tokens_to_indices(self, tokens: List[Token], vocabulary: Vocabulary, index_name: str) -> Dict[(str, List[List[int]])]:
texts = [token.text for token in tokens]
if any(((text is None) for text in texts)):
raise ConfigurationError('ELMoTokenCharactersIndexer needs a tokenizer that retains text')
return {index_name: [ELMoCharacterMapper.convert_word_to_char_ids(text) for text in texts]}
def get_padding_lengths(self, token: List[int]) -> Dict[(str, int)]:
return {}
def get_padding_token(self) -> List[int]:
return []
def _default_value_for_padding():
return ([0] * ELMoCharacterMapper.max_word_length)
def pad_token_sequence(self, tokens: Dict[(str, List[List[int]])], desired_num_tokens: Dict[(str, int)], padding_lengths: Dict[(str, int)]) -> Dict[(str, List[List[int]])]:
return {key: pad_sequence_to_length(val, desired_num_tokens[key], default_value=self._default_value_for_padding) for (key, val) in tokens.items()} |
class AdamW(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
if (group['weight_decay'] != 0):
p.data.add_(((- group['weight_decay']) * group['lr']), p.data)
p.data.addcdiv_((- step_size), exp_avg, denom)
return loss |
_registry(operator_type='LogSoftmax')
class LogSoftmax(Operator):
def __init__(self):
super().__init__()
def set_attr(self, framework, node):
if (framework == 'onnxruntime'):
for attribute in node.attribute:
if (attribute.name == 'axis'):
self._attr['axis'] = attribute.i |
.parametrize('dataloader', ['with_covariates', 'different_encoder_decoder_size', 'fixed_window_without_covariates', 'multi_target', 'quantiles', 'multivariate-quantiles', 'implicit-quantiles'])
def test_integration(dataloaders_with_covariates, dataloaders_with_different_encoder_decoder_length, dataloaders_fixed_window_without_covariates, dataloaders_multi_target, tmp_path, dataloader):
kwargs = {}
if (dataloader == 'with_covariates'):
dataloader = dataloaders_with_covariates
kwargs['backcast_loss_ratio'] = 0.5
elif (dataloader == 'different_encoder_decoder_size'):
dataloader = dataloaders_with_different_encoder_decoder_length
elif (dataloader == 'fixed_window_without_covariates'):
dataloader = dataloaders_fixed_window_without_covariates
elif (dataloader == 'multi_target'):
dataloader = dataloaders_multi_target
kwargs['loss'] = QuantileLoss()
elif (dataloader == 'quantiles'):
dataloader = dataloaders_with_covariates
kwargs['loss'] = QuantileLoss()
elif (dataloader == 'implicit-quantiles'):
dataloader = dataloaders_with_covariates
kwargs['loss'] = ImplicitQuantileNetworkDistributionLoss()
elif (dataloader == 'multivariate-quantiles'):
dataloader = dataloaders_with_covariates
kwargs['loss'] = MQF2DistributionLoss(prediction_length=dataloader['train'].dataset.max_prediction_length)
kwargs['learning_rate'] = 1e-09
kwargs['trainer_kwargs'] = dict(accelerator='cpu')
else:
raise ValueError(f'dataloader {dataloader} unknown')
_integration(dataloader, tmp_path=tmp_path, **kwargs) |
class SubsetSum(BinaryProblem):
def __init__(self, C: int, W: list):
super(SubsetSum, self).__init__()
self.C = C
self.W = W
self.number_of_bits = len(self.W)
self.obj_directions = [self.MAXIMIZE]
self.obj_labels = ['Sum']
def number_of_variables(self) -> int:
return 1
def number_of_objectives(self) -> int:
return 1
def number_of_constraints(self) -> int:
return 0
def evaluate(self, solution: BinarySolution) -> BinarySolution:
total_sum = 0.0
for (index, bits) in enumerate(solution.variables[0]):
if bits:
total_sum += self.W[index]
if (total_sum > self.C):
total_sum = (self.C - (total_sum * 0.1))
if (total_sum < 0.0):
total_sum = 0.0
solution.objectives[0] = ((- 1.0) * total_sum)
return solution
def create_solution(self) -> BinarySolution:
new_solution = BinarySolution(number_of_variables=self.number_of_variables(), number_of_objectives=self.number_of_objectives())
new_solution.variables[0] = [(True if (random.randint(0, 1) == 0) else False) for _ in range(self.number_of_bits)]
return new_solution
def name(self) -> str:
return 'Subset Sum' |
class CPOBuffer():
def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95):
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
self.cost_buf = np.zeros(size, dtype=np.float32)
self.cost_ret_buf = np.zeros(size, dtype=np.float32)
self.cost_val_buf = np.zeros(size, dtype=np.float32)
self.adc_buf = np.zeros(size, dtype=np.float32)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.mu_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.logstd_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
(self.gamma, self.lam) = (gamma, lam)
(self.ptr, self.path_start_idx, self.max_size) = (0, 0, size)
def store(self, obs, act, rew, val, logp, cost, cost_val, mu, logstd):
assert (self.ptr < self.max_size)
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.val_buf[self.ptr] = val
self.logp_buf[self.ptr] = logp
self.cost_buf[self.ptr] = cost
self.cost_val_buf[self.ptr] = cost_val
self.mu_buf[self.ptr] = mu
self.logstd_buf[self.ptr] = logstd
self.ptr += 1
def finish_path(self, last_val=0, last_cost_val=0):
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
costs = np.append(self.cost_buf[path_slice], last_cost_val)
cost_vals = np.append(self.cost_val_buf[path_slice], last_cost_val)
deltas = ((rews[:(- 1)] + (self.gamma * vals[1:])) - vals[:(- 1)])
self.adv_buf[path_slice] = core.discount_cumsum(deltas, (self.gamma * self.lam))
cost_deltas = ((costs[:(- 1)] + (self.gamma * cost_vals[1:])) - cost_vals[:(- 1)])
self.adc_buf[path_slice] = core.discount_cumsum(cost_deltas, (self.gamma * self.lam))
self.ret_buf[path_slice] = core.discount_cumsum(rews, self.gamma)[:(- 1)]
self.cost_ret_buf[path_slice] = core.discount_cumsum(costs, self.gamma)[:(- 1)]
self.path_start_idx = self.ptr
def get(self):
assert (self.ptr == self.max_size)
(self.ptr, self.path_start_idx) = (0, 0)
(adv_mean, adv_std) = mpi_statistics_scalar(self.adv_buf)
self.adv_buf = ((self.adv_buf - adv_mean) / adv_std)
(adc_mean, adc_std) = mpi_statistics_scalar(self.adc_buf)
self.adc_buf = (self.adc_buf - adc_mean)
data = dict(obs=torch.FloatTensor(self.obs_buf).to(device), act=torch.FloatTensor(self.act_buf).to(device), ret=torch.FloatTensor(self.ret_buf).to(device), adv=torch.FloatTensor(self.adv_buf).to(device), cost_ret=torch.FloatTensor(self.cost_ret_buf).to(device), adc=torch.FloatTensor(self.adc_buf).to(device), logp=torch.FloatTensor(self.logp_buf).to(device), mu=torch.FloatTensor(self.mu_buf).to(device), logstd=torch.FloatTensor(self.logstd_buf).to(device))
return {k: torch.as_tensor(v, dtype=torch.float32) for (k, v) in data.items()} |
def test_epoch(epoch, val_dataloader, model, criterion, args):
model.eval()
device = next(model.parameters()).device
if (args.metric == 'mse'):
metric_dB_name = 'psnr'
metric_name = 'mse_loss'
else:
metric_dB_name = 'ms_db'
metric_name = 'ms_ssim_loss'
metric_dB = AverageMeter(metric_dB_name, ':.4e')
metric_loss = AverageMeter(args.metric, ':.4e')
loss = AverageMeter('Loss', ':.4e')
bpp_loss = AverageMeter('BppLoss', ':.4e')
aux_loss = AverageMeter('AuxLoss', ':.4e')
loop = tqdm(val_dataloader)
with torch.no_grad():
for (i, batch) in enumerate(loop):
d = [frame.to(device) for frame in batch]
out_net = model(d)
out_criterion = criterion(out_net, d, args.lmbda)
out_aux_loss = compute_aux_loss(model.aux_loss(), backward=False)
loss.update(out_criterion['loss'].item())
bpp_loss.update(out_criterion['bpp_loss'].item())
aux_loss.update(out_aux_loss.item())
metric_loss.update(out_criterion[metric_name].item())
metric_dB.update(out_criterion[metric_dB_name].item())
loop.set_description('[{}/{}]'.format(i, len(val_dataloader)))
loop.set_postfix({'Loss': loss.avg, 'Bpp': bpp_loss.avg, args.metric: metric_loss.avg, 'Aux': aux_loss.avg, metric_dB_name: metric_dB.avg})
out = {'loss': loss.avg, metric_name: metric_loss.avg, 'bpp_loss': bpp_loss.avg, 'aux_loss': aux_loss.avg, metric_dB_name: metric_dB.avg}
return out |
def test_cocoscorer():
gts = {'184321': [{u'image_id': '184321', u'cap_id': 0, u'caption': u'A train traveling down tracks next to lights.', 'tokenized': 'a train traveling down tracks next to lights'}, {u'image_id': '184321', u'cap_id': 1, u'caption': u'A train coming down the tracks arriving at a station.', 'tokenized': 'a train coming down the tracks arriving at a station'}], '81922': [{u'image_id': '81922', u'cap_id': 0, u'caption': u'A large jetliner flying over a traffic filled street.', 'tokenized': 'a large jetliner flying over a traffic filled street'}, {u'image_id': '81922', u'cap_id': 1, u'caption': u'The plane is flying over top of the cars', 'tokenized': 'the plan is flying over top of the cars'}]}
samples = {'184321': [{u'image_id': '184321', u'caption': u'train traveling down a track in front of a road'}], '81922': [{u'image_id': '81922', u'caption': u'plane is flying through the sky'}]}
IDs = ['184321', '81922']
scorer = COCOScorer()
scorer.score(gts, samples, IDs) |
def random_translation_offset(max_pixels, seed=None):
max_pixels = tf.cast(max_pixels, tf.float32)
off_y = tf.random_uniform([1], minval=(- max_pixels), maxval=max_pixels, seed=seed, dtype=tf.float32)
off_x = tf.random_uniform([1], minval=(- max_pixels), maxval=max_pixels, seed=seed, dtype=tf.float32)
random_offset = tf.concat([(- off_y), (- off_x), [0.0]], axis=(- 1))
return random_offset |
def mutate_new_gene_structure(self, genome):
for gene in range(0, self.number_of_genes):
if (uniform(0, 1) < self.mutation_probability):
genome.medium_mutate_gene_position(gene) |
class DepthDecoder(nn.Module):
def __init__(self, layers, filter_size):
super(DepthDecoder, self).__init__()
padding = int(((filter_size - 1) / 2))
self.dec2 = nn.Sequential(nn.ReLU(), nn.ConvTranspose2d((layers // 2), (layers // 2), filter_size, stride=2, padding=padding, output_padding=padding), nn.ReLU(), nn.Conv2d((layers // 2), (layers // 2), filter_size, stride=1, padding=padding))
self.dec1 = nn.Sequential(nn.ReLU(), nn.ConvTranspose2d((layers // 2), (layers // 2), filter_size, stride=2, padding=padding, output_padding=padding), nn.ReLU(), nn.Conv2d((layers // 2), (layers // 2), filter_size, stride=1, padding=padding))
self.prdct = nn.Sequential(nn.ReLU(), nn.Conv2d((layers // 2), (layers // 2), filter_size, stride=1, padding=padding), nn.ReLU(), nn.Conv2d((layers // 2), 1, filter_size, stride=1, padding=padding))
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if (isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d)):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, pre_dx, pre_cx):
x2 = (pre_dx[2] + pre_cx[2])
x1 = (pre_dx[1] + pre_cx[1])
x0 = (pre_dx[0] + pre_cx[0])
x3 = self.dec2(x2)
x4 = self.dec1((x1 + x3))
output_d = self.prdct((x4 + x0))
return (x4, output_d) |
def upconv_block(in_channels, out_channels, upscale_factor=2, kernel_size=3, stride=1, act_type='relu'):
upsample = nn.Upsample(scale_factor=upscale_factor, mode='nearest')
conv = conv_layer(in_channels, out_channels, kernel_size, stride)
act = activation(act_type)
return sequential(upsample, conv, act) |
class DataLoaderWrap():
def __init__(self, dataloader, input_desc):
self.dataloader = dataloader
self.input_desc = input_desc
self._iter = None
def __iter__(self):
self._iter = iter(self.dataloader)
return self
def __next__(self):
return next(self._iter) |
class DataIterator(object):
def __init__(self, args, dataset, batch_size, device=None, is_test=False, shuffle=True):
self.args = args
self.dataset = dataset
self.batch_size = batch_size
self.device = device
self.is_test = is_test
self.shuffle = shuffle
self.iterations = 0
self.sort_key = (lambda x: len(x[1]))
self._iterations_this_epoch = 0
if (self.args.task == 'abs'):
self.batch_size_fn = abs_batch_size_fn
else:
raise 'Currently only support abstractive summarization.'
def __iter__(self):
while True:
self.batches = self.create_batches()
for (idx, minibatch) in enumerate(self.batches):
if (self._iterations_this_epoch > idx):
continue
self.iterations += 1
self._iterations_this_epoch += 1
batch = Batch(minibatch, self.device, self.is_test)
(yield batch)
return
def create_batches(self):
data = self.data()
buffer_coeff = 300
for buffer in self.batch_buffer(data, (self.batch_size * buffer_coeff)):
if (self.args.task == 'abs'):
p_batch = sorted(buffer, key=(lambda x: len(x[2])))
p_batch = sorted(p_batch, key=(lambda x: len(x[1])))
else:
p_batch = sorted(buffer, key=(lambda x: len(x[2])))
p_batch = self.batch(p_batch, self.batch_size)
p_batch = list(p_batch)
if self.shuffle:
random.shuffle(p_batch)
for b in p_batch:
if (len(b) == 0):
continue
if (self.args.deterministic_batch_size and (len(b) != self.batch_size) and (len(p_batch) > 1)):
continue
(yield b)
def batch_buffer(self, data, batch_size):
(minibatch, size_so_far) = ([], 0)
for ex in data:
if (len(ex['src']) == 0):
continue
ex = self.preprocess(ex, self.is_test)
if (ex is None):
continue
minibatch.append(ex)
size_so_far = self.batch_size_fn(ex, len(minibatch))
if (size_so_far == batch_size):
(yield minibatch)
(minibatch, size_so_far) = ([], 0)
elif (size_so_far > batch_size):
(yield minibatch[:(- 1)])
(minibatch, size_so_far) = (minibatch[(- 1):], self.batch_size_fn(ex, 1))
if minibatch:
(yield minibatch)
def data(self):
if self.shuffle:
random.shuffle(self.dataset)
xs = self.dataset
return xs
def preprocess(self, ex, is_test):
src = ex['src']
clss = ex['clss']
segs = ex['segs']
if (not self.args.use_interval):
segs = ([0] * len(segs))
src_sent_labels = ex['src_sent_labels']
tgt = (ex['tgt'][:self.args.max_tgt_len][:(- 1)] + [2])
src_txt = ex['src_txt']
tgt_txt = ex['tgt_txt']
end_id = [src[(- 1)]]
src = (src[:(- 1)][:(self.args.max_pos - 1)] + end_id)
segs = segs[:self.args.max_pos]
max_sent_id = bisect.bisect_left(clss, self.args.max_pos)
clss = clss[:max_sent_id]
src_sent_labels = src_sent_labels[:max_sent_id]
if is_test:
return (src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt)
else:
return (src, tgt, segs, clss, src_sent_labels)
def batch(self, data, batch_size):
(minibatch, size_so_far) = ([], 0)
if self.args.deterministic_batch_size:
for (data_idx, ex) in enumerate(data):
minibatch.append(ex)
if (((data_idx + 1) % batch_size) == 0):
(yield minibatch)
(minibatch, size_so_far) = ([], 0)
if minibatch:
(yield minibatch)
else:
for ex in data:
minibatch.append(ex)
size_so_far = self.batch_size_fn(ex, len(minibatch))
if (size_so_far == batch_size):
(yield minibatch)
(minibatch, size_so_far) = ([], 0)
elif (size_so_far > batch_size):
(yield minibatch[:(- 1)])
(minibatch, size_so_far) = (minibatch[(- 1):], self.batch_size_fn(ex, 1))
if minibatch:
(yield minibatch) |
def save_to_dict(int_sols_dict, multiplier, int_sol, train_acc, test_acc, train_auc, test_auc, logisticLoss):
int_sols_dict['multipliers'].append(multiplier)
int_sols_dict['int_sols'].append(int_sol)
int_sols_dict['train_accs'].append(train_acc)
int_sols_dict['test_accs'].append(test_acc)
int_sols_dict['train_aucs'].append(train_auc)
int_sols_dict['test_aucs'].append(test_auc)
int_sols_dict['logisticLosses'].append(logisticLoss) |
def convert_bertabs_checkpoints(path_to_checkpoints, dump_path):
config = BertAbsConfig(temp_dir='.', finetune_bert=False, large=False, share_emb=True, use_bert_emb=False, encoder='bert', max_pos=512, enc_layers=6, enc_hidden_size=512, enc_heads=8, enc_ff_size=512, enc_dropout=0.2, dec_layers=6, dec_hidden_size=768, dec_heads=8, dec_ff_size=2048, dec_dropout=0.2)
checkpoints = torch.load(path_to_checkpoints, (lambda storage, loc: storage))
original = AbsSummarizer(config, torch.device('cpu'), checkpoints)
original.eval()
new_model = BertAbsSummarizer(config, torch.device('cpu'))
new_model.eval()
logging.info('convert the model')
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
logging.info("Make sure that the models' outputs are identical")
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
encoder_input_ids = tokenizer.encode("This is sample eaalj'-.")
encoder_input_ids.extend(([tokenizer.pad_token_id] * (512 - len(encoder_input_ids))))
encoder_input_ids = torch.tensor(encoder_input_ids).unsqueeze(0)
decoder_input_ids = tokenizer.encode("This is sample 3 eaalj'-.")
decoder_input_ids.extend(([tokenizer.pad_token_id] * (512 - len(decoder_input_ids))))
decoder_input_ids = torch.tensor(decoder_input_ids).unsqueeze(0)
assert (torch.max(torch.abs((original.generator[0].weight - new_model.generator[0].weight))) == 0)
src = encoder_input_ids
tgt = decoder_input_ids
segs = token_type_ids = None
clss = None
mask_src = encoder_attention_mask = None
mask_tgt = decoder_attention_mask = None
mask_cls = None
output_original_model = original(src, tgt, segs, clss, mask_src, mask_tgt, mask_cls)[0]
output_original_generator = original.generator(output_original_model)
output_converted_model = new_model(encoder_input_ids, decoder_input_ids, token_type_ids, encoder_attention_mask, decoder_attention_mask)[0]
output_converted_generator = new_model.generator(output_converted_model)
maximum_absolute_difference = torch.max(torch.abs((output_converted_model - output_original_model))).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(maximum_absolute_difference))
maximum_absolute_difference = torch.max(torch.abs((output_converted_generator - output_original_generator))).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(maximum_absolute_difference))
are_identical = torch.allclose(output_converted_model, output_original_model, atol=0.001)
if are_identical:
logging.info('all weights are equal up to 1e-3')
else:
raise ValueError('the weights are different. The new model is likely different from the original one.')
logging.info("saving the model's state dictionary")
torch.save(new_model.state_dict(), 'bertabs-finetuned-cnndm-extractive-abstractive-summarization-pytorch_model.bin') |
def draw_keypoints_on_image_array(image, keypoints, color='red', radius=2, use_normalized_coordinates=True):
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius, use_normalized_coordinates)
np.copyto(image, np.array(image_pil)) |
def test_complicated_subscripting():
run_cell('\n class Foo:\n def __init__(self):\n self.counter = 0\n \n def inc(self):\n self.counter += 1\n\n def bar(self, indicator):\n if indicator == 1:\n return the_dictionary\n else:\n return x + 5\n \n def new(self, indicator):\n if indicator == 1:\n return Foo()\n \n return Bar()\n ')
run_cell('\n class Bar:\n def __init__(self):\n self.counter = 0\n \n def foo(self, indicator):\n if indicator == 1:\n return the_dictionary\n \n return Foo()\n ')
run_cell('x = 1')
run_cell("the_dictionary = {'something': 1}")
run_cell('Foo().new(0).foo(1)')
deps = set(compute_unparsed_slice(5).keys())
assert (deps == {1, 2, 4, 5}), ('got %s' % deps)
slice_size = num_stmts_in_slice(5)
assert (slice_size == 4), ('got %d' % slice_size) |
def cifar10_grayscale_normalization():
return transforms.Normalize(mean=(122.6 / 255.0), std=(61.0 / 255.0)) |
def Huffman_Decoding(encoded_data, huffman_tree):
tree_head = huffman_tree
decoded_output = []
for x in encoded_data:
if (x == '1'):
huffman_tree = huffman_tree.right
elif (x == '0'):
huffman_tree = huffman_tree.left
try:
if ((huffman_tree.left.symbol == None) and (huffman_tree.right.symbol == None)):
pass
except AttributeError:
decoded_output.append(huffman_tree.symbol)
huffman_tree = tree_head
string = ''.join([str(item) for item in decoded_output])
return string |
def main():
print('Called with argument:', args)
ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
train_net(args, ctx, config.network.pretrained, config.network.pretrained_flow, config.network.pretrained_epoch, config.TRAIN.model_prefix, config.TRAIN.begin_epoch, config.TRAIN.end_epoch, config.TRAIN.lr, config.TRAIN.lr_step) |
def do_train(cur_step, optimizer, sim, param_g):
epoch = 0
while True:
steps = ((4 * 25) * spf)
reset_sim(sim, epoch)
st = time.time()
(loss, ans) = run_sim(steps, sim, param_g)
en0 = time.time()
optimizer.zero_grad()
en1 = time.time()
print('')
f.write('epoch {}: loss={} \n'.format(epoch, loss.data))
print('epoch {}: loss={} \n'.format(epoch, loss.data))
print('forward time={}'.format((en0 - st)))
print('backward time={}'.format((en1 - en0)))
optimizer.step()
if (epoch >= 0):
quit()
epoch = (epoch + 1) |
class QnliProcessor(DataProcessor):
def get_example_from_tensor_dict(self, tensor_dict):
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['question'].numpy().decode('utf-8'), tensor_dict['sentence'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
return ['entailment', 'not_entailment']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, line[0]))
text_a = line[1]
text_b = line[2]
label = (None if (set_type == 'test') else line[(- 1)])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
def timestamp_to_frame_index(timestamp, video_duration, n_frames=32):
video_duration = int(video_duration)
if (n_frames < 0):
n_frames = video_duration
bins = np.linspace(0, (video_duration - 1), n_frames)
bin_index = np.digitize(timestamp, bins, right=True)
bin_index = min(bin_index, (n_frames - 1))
bin_index = int(bin_index)
return bin_index |
def templatemethod(name_):
def template_decorator(func):
def func_wrapper(*args, **kwargs):
templated_func = tf.make_template(name_, func)
return templated_func(*args, **kwargs)
return func_wrapper
return template_decorator |
def normalize_double_n(str):
str = re.sub('nn', "n'", str)
str = re.sub("n'(?=[^aiueoyn]|$)", 'n', str)
return str |
_rjieba
_tokenizers
class RoFormerTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = RoFormerTokenizer
rust_tokenizer_class = RoFormerTokenizerFast
space_between_special_tokens = True
test_rust_tokenizer = True
def setUp(self):
super().setUp()
def get_tokenizer(self, **kwargs):
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base', **kwargs)
def get_rust_tokenizer(self, **kwargs):
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base', **kwargs)
def get_chinese_input_output_texts(self):
input_text = ','
output_text = ' , '
return (input_text, output_text)
def test_tokenizer(self):
tokenizer = self.get_tokenizer()
(input_text, output_text) = self.get_chinese_input_output_texts()
tokens = tokenizer.tokenize(input_text)
self.assertListEqual(tokens, output_text.split())
input_tokens = (tokens + [tokenizer.unk_token])
exp_tokens = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), exp_tokens)
def test_rust_tokenizer(self):
tokenizer = self.get_rust_tokenizer()
(input_text, output_text) = self.get_chinese_input_output_texts()
tokens = tokenizer.tokenize(input_text)
self.assertListEqual(tokens, output_text.split())
input_tokens = (tokens + [tokenizer.unk_token])
exp_tokens = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), exp_tokens)
def test_training_new_tokenizer(self):
pass
def test_training_new_tokenizer_with_special_tokens_change(self):
pass
def test_save_slow_from_fast_and_reload_fast(self):
pass |
def main():
args = parse_command_line()
cfg.update_args(vars(args))
logger.debug('COMMAND LINE: {}'.format(str(sys.argv)))
logger.info('CONFIG:')
for k in sorted(vars(cfg)):
logger.info('\t{}: {}'.format(k, vars(cfg)[k]))
cudnn.enabled = True
cudnn.benchmark = True
trainer = Trainer(cfg.tag)
tester = Tester(trainer.model)
for epoch in range(trainer.start_epoch, trainer.end_epoch):
logger.info('Epoch {} of exp {} on gpu {}'.format(epoch, cfg.exp_tag, cfg.gpus))
step = trainer.train_an_epoch(epoch)
best_model = tester.test_an_epoch(epoch, step)
trainer.save_model(epoch, best_model) |
class SAKTModel(nn.Module):
def __init__(self, h, length, d_model, n_question, dropout):
super(SAKTModel, self).__init__()
self.embedding = Embedding(n_question, length, d_model)
self.encoder = Encoder(h, length, d_model, dropout)
self.w = nn.Linear(d_model, n_question)
self.sig = nn.Sigmoid()
def forward(self, y):
(x, y) = self.embedding(y)
encode = self.encoder(x, y)
res = self.sig(self.w(encode))
return res |
def compute_continuum(forest, get_mean_cont, get_eta, get_var_lss, get_fudge, use_constant_weight, order):
mean_cont = get_mean_cont((forest.log_lambda - np.log10((1 + forest.z))))
mean_cont *= forest.transmission_correction
mean_cont_kwargs = {'mean_cont': mean_cont}
mean_cont_kwargs['log_lambda_max'] = (Forest.log_lambda_rest_frame_grid[(- 1)] + np.log10((1 + forest.z)))
mean_cont_kwargs['log_lambda_min'] = (Forest.log_lambda_rest_frame_grid[0] + np.log10((1 + forest.z)))
weights_kwargs = {'use_constant_weight': use_constant_weight, 'eta': get_eta(forest.log_lambda), 'var_lss': get_var_lss(forest.log_lambda), 'fudge': get_fudge(forest.log_lambda)}
leasts_squares = LeastsSquaresContModel(forest=forest, mean_cont_kwargs=mean_cont_kwargs, weights_kwargs=weights_kwargs)
zero_point = ((forest.flux * forest.ivar).sum() / forest.ivar.sum())
slope = 0.0
minimizer = iminuit.Minuit(leasts_squares, zero_point=zero_point, slope=slope)
minimizer.errors['zero_point'] = (zero_point / 2.0)
minimizer.errors['slope'] = (zero_point / 2.0)
minimizer.errordef = 1.0
minimizer.print_level = 0
minimizer.fixed['slope'] = (order == 0)
minimizer.migrad()
bad_continuum_reason = None
cont_model = leasts_squares.get_continuum_model(forest, minimizer.values['zero_point'], minimizer.values['slope'], **mean_cont_kwargs)
if (not minimizer.valid):
bad_continuum_reason = "minuit didn't converge"
if np.any((cont_model < 0)):
bad_continuum_reason = 'negative continuum'
if (bad_continuum_reason is None):
continuum_fit_parameters = (minimizer.values['zero_point'], minimizer.values['slope'], minimizer.fval, leasts_squares.get_ndata())
else:
cont_model = None
continuum_fit_parameters = (np.nan, np.nan, np.nan, leasts_squares.get_ndata())
return (cont_model, bad_continuum_reason, continuum_fit_parameters) |
def save_checkpoint(model, save_path):
if (not os.path.exists(os.path.dirname(save_path))):
os.makedirs(os.path.dirname(save_path))
torch.save(model.state_dict(), save_path) |
class CountsForHistory(object):
def __init__(self):
self.word_to_count = defaultdict(int)
self.total_count = 0
def Words(self):
return list(self.word_to_count.keys())
def __str__(self):
return ' total={0} {1}'.format(str(self.total_count), ' '.join(['{0} -> {1}'.format(word, count) for (word, count) in self.word_to_count.items()]))
def AddCount(self, predicted_word, count):
self.total_count += count
assert (self.total_count >= 0)
old_count = self.word_to_count[predicted_word]
new_count = (old_count + count)
if (new_count < 0):
print('predicted-word={0}, old-count={1}, count={2}'.format(predicted_word, old_count, count))
assert (new_count >= 0)
if (new_count == 0):
del self.word_to_count[predicted_word]
else:
self.word_to_count[predicted_word] = new_count |
def output_result(outfile, script, math_script, cpu, mem, bond_order, input_file):
final_bonds = (('(' + ', '.join([BOND_NAMES[b] for b in bond_order])) + ')')
BR = '\n'
SP = ' '
output = [(config.COMMENT_PREFIX * 30), ((config.COMMENT_PREFIX + SP) + input_file), (config.COMMENT_PREFIX * 30), ((config.COMMENT_PREFIX + SP) + math_script), ((config.COMMENT_PREFIX + SP) + 'cpu_cost= {0:g} memory= {1:g}'.format(cpu, mem)), (((config.COMMENT_PREFIX + SP) + 'final_bond_order ') + final_bonds), (config.COMMENT_PREFIX * 30)]
output += script
outfile.write((BR.join(output) + BR)) |
def CORR(pred, true):
u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0)
d = np.sqrt((((true - true.mean(0)) ** 2) * ((pred - pred.mean(0)) ** 2)).sum(0))
return (u / d).mean((- 1)) |
def _compute_new_amount(model, amount):
unmaskeds = _count_unmasked_weights(model)
total_unmaskeds = unmaskeds.sum()
to_kill = int((total_unmaskeds * amount))
return float((to_kill / unmaskeds.sum())) |
def voc_info(json_dataset):
year = json_dataset.name[4:8]
image_set = json_dataset.name[9:]
devkit_path = DATASETS[json_dataset.name][DEVKIT_DIR]
assert os.path.exists(devkit_path), 'Devkit directory {} not found'.format(devkit_path)
anno_path = os.path.join(devkit_path, ('VOC' + year), 'Annotations', '{:s}.xml')
image_set_path = os.path.join(devkit_path, ('VOC' + year), 'ImageSets', 'Main', (image_set + '.txt'))
return dict(year=year, image_set=image_set, devkit_path=devkit_path, anno_path=anno_path, image_set_path=image_set_path) |
def select_gpu():
import pynvml
pynvml.nvmlInit()
gpu_count = pynvml.nvmlDeviceGetCount()
gpu_devices = list(range(gpu_count))
max_memo = (((24 * 1024) * 1024) * 1024)
gpu_selected = gpu_devices[0]
for i in range(len(gpu_devices)):
handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_devices[i])
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
if (meminfo.used <= max_memo):
max_memo = meminfo.used
gpu_selected = gpu_devices[i]
return str(gpu_selected) |
class SupervisedModel(EztorchBaseModule):
def __init__(self, model: DictConfig, optimizer: DictConfig, train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, val_time_augmentation: Optional[DictConfig]=None, test_time_augmentation: Optional[DictConfig]=None) -> None:
super().__init__()
self.save_hyperparameters()
self.model = hydra.utils.instantiate(model)
self.optimizer_cfg = optimizer
self.train_transform = (hydra.utils.instantiate(train_transform) if (train_transform is not None) else None)
self.val_transform = (hydra.utils.instantiate(val_transform) if (val_transform is not None) else None)
self.test_transform = (hydra.utils.instantiate(test_transform) if (test_transform is not None) else None)
self.val_time_augmentation = (get_test_time_augmentation_fn(**val_time_augmentation) if val_time_augmentation else None)
self.test_time_augmentation = (get_test_time_augmentation_fn(**test_time_augmentation) if test_time_augmentation else None)
def learnable_params(self) -> List[Parameter]:
return list(self.model.parameters())
def training_steps_per_epoch(self) -> Optional[int]:
if ((self.trainer.datamodule is not None) and (self.trainer.datamodule.train_num_samples > 0)):
return (self.trainer.datamodule.train_num_samples // self.trainer.datamodule.train_global_batch_size)
else:
return None
def on_fit_start(self) -> None:
num_classes = self.trainer.datamodule.num_classes
task = ('binary' if (num_classes <= 2) else 'multiclass')
self.train_acc_1 = Accuracy(task=task, num_classes=num_classes, top_k=1).to(self.device)
self.train_acc_5 = Accuracy(task=task, num_classes=num_classes, top_k=5).to(self.device)
self.val_acc_1 = Accuracy(task=task, num_classes=num_classes, top_k=1).to(self.device)
self.val_acc_5 = Accuracy(task=task, num_classes=num_classes, top_k=5).to(self.device)
def on_test_start(self) -> None:
num_classes = self.trainer.datamodule.num_classes
task = ('binary' if (num_classes <= 2) else 'multiclass')
self.test_acc_1 = Accuracy(task=task, num_classes=num_classes, top_k=1).to(self.device)
self.test_acc_5 = Accuracy(task=task, num_classes=num_classes, top_k=5).to(self.device)
def configure_optimizers(self) -> Dict[(Any, Any)]:
(optimizer, scheduler) = hydra.utils.instantiate(self.optimizer_cfg, num_steps_per_epoch=self.optimizer_cfg.get('num_steps_per_epoch', self.training_steps_per_epoch), model=self)
if (scheduler is None):
return optimizer
return {'optimizer': optimizer, 'lr_scheduler': scheduler}
def forward(self, x: Tensor) -> Tensor:
x = self.model(x)
return x
def training_step(self, batch: Tensor, batch_idx: int) -> Tensor:
(x, targets) = (batch['input'], batch['label'])
if (self.train_transform is not None):
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=False):
x = self.train_transform(x)
preds = self(x)
loss = nn.functional.cross_entropy(preds, targets)
acc_1 = self.train_acc_1(preds, targets)
acc_5 = self.train_acc_5(preds, targets)
self.log('train/loss', loss, on_epoch=True)
self.log('train/acc_1', acc_1, on_epoch=True, prog_bar=True)
self.log('train/acc_5', acc_5, on_epoch=True)
return loss
def num_layers(self) -> int:
return self.model.num_layers
def get_param_layer_id(self, name: str) -> int:
if name.startswith('model.'):
return self.model.get_param_layer_id(name[len('model.'):])
else:
raise NotImplementedError(f'{name} should not have been used.')
def validation_step(self, batch: Tensor, batch_idx: int) -> Tensor:
if (self.val_time_augmentation is not None):
(x, targets, idx) = (batch['input'], batch['label'], batch['idx'])
if (self.val_transform is not None):
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=False):
x = self.val_transform(x)
preds = self(x)
preds = preds.softmax((- 1))
(preds, targets, idx) = self.val_time_augmentation(preds, targets, idx)
else:
(x, targets) = (batch['input'], batch['label'])
if (self.val_transform is not None):
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=False):
x = self.val_transform(x)
preds = self(x)
loss = nn.functional.cross_entropy(preds, targets)
self.val_acc_1(preds, targets)
self.val_acc_5(preds, targets)
self.log('val/loss', loss)
self.log('val/acc_1', self.val_acc_1, prog_bar=True)
self.log('val/acc_5', self.val_acc_5)
return loss
def test_step(self, batch: Tensor, batch_idx: int) -> Tensor:
if (self.test_time_augmentation is not None):
(x, targets, idx) = (batch['input'], batch['label'], batch['idx'])
if (self.test_transform is not None):
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=False):
x = self.test_transform(x)
preds = self(x)
preds = preds.softmax((- 1))
(preds, targets, idx) = self.test_time_augmentation(preds, targets, idx)
else:
(x, targets) = (batch['input'], batch['label'])
if (self.test_transform is not None):
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=False):
x = self.test_transform(x)
preds = self(x)
loss = nn.functional.cross_entropy(preds, targets)
self.test_acc_1(preds, targets)
self.test_acc_5(preds, targets)
self.log('test/loss', loss)
self.log('test/acc_1', self.test_acc_1, prog_bar=True)
self.log('test/acc_5', self.test_acc_5)
return loss |
class MlpBlock(nn.Module):
def __init__(self, hidden_dim, ff_dim):
super(MlpBlock, self).__init__()
self.fc0 = nn.Linear(hidden_dim, ff_dim, bias=True)
self.fc1 = nn.Linear(ff_dim, hidden_dim, bias=True)
self.act_fn = nn.GELU()
def forward(self, x):
x = self.fc0(x)
x = self.act_fn(x)
x = self.fc1(x)
return x |
class FinetuneAPIRouter(APIRouter):
def __init__(self) -> None:
super().__init__()
self.chatbot = None
def set_chatbot(self, chatbot, use_deepspeed, world_size, host, port) -> None:
self.chatbot = chatbot
self.use_deepspeed = use_deepspeed
self.world_size = world_size
self.host = host
self.port = port
def get_chatbot(self):
if (self.chatbot is None):
logger.error('Chatbot instance is not found.')
raise RuntimeError('Chatbot instance has not been set.')
return self.chatbot
def handle_finetune_request(self, request: FinetuneRequest) -> str:
try:
model_args = ModelArguments(model_name_or_path=request.model_name_or_path)
data_args = DataArguments(train_file=request.train_file, dataset_name=request.dataset_name, dataset_concatenation=request.dataset_concatenation)
training_args = TrainingArguments(output_dir=request.output_dir, do_train=True, max_steps=request.max_steps, overwrite_output_dir=request.overwrite_output_dir)
finetune_args = FinetuningArguments(peft=request.peft)
finetune_cfg = TextGenerationFinetuningConfig(model_args=model_args, data_args=data_args, training_args=training_args, finetune_args=finetune_args)
finetune_model(finetune_cfg)
except Exception as e:
raise Exception(e)
else:
logger.info('Model finetuning finished.')
return 'Succeed' |
class COCOTextImageDataset(Dataset):
def __init__(self, split='karpathy_test', image_dir='/playpen3/home/jmincho/workspace/datasets/COCO/images/', text_data_file='/playpen3/home/jmincho/workspace/datasets/COCO/dataset_coco.json', text_len=256, image_size=128, truncate_captions=False, resize_ratio=0.75, tokenizer=None, shuffle=False, load_image=False):
super().__init__()
self.shuffle = shuffle
self.split = split
with open(text_data_file, 'r') as f:
karpathy_data = json.load(f)
print('Loaded text data from {}'.format(text_data_file))
self.load_image = load_image
if self.load_image:
self.image_dir = Path(image_dir).resolve()
self.text_len = text_len
self.truncate_captions = truncate_captions
self.resize_ratio = resize_ratio
self.tokenizer = tokenizer
if self.load_image:
self.image_transform = T.Compose([T.Lambda((lambda img: (img.convert('RGB') if (img.mode != 'RGB') else img))), T.RandomResizedCrop(image_size, scale=(self.resize_ratio, 1.0), ratio=(1.0, 1.0)), T.ToTensor()])
split_rename = {'train': 'train', 'restval': 'train', 'val': 'val', 'test': 'test'}
karpathy_split_name = self.split.split('_')[(- 1)]
data = []
for datum in karpathy_data['images']:
re_split = split_rename[datum['split']]
if (re_split != karpathy_split_name):
continue
if (re_split == 'train'):
for d in datum['sentences']:
img_id = datum['filename'].split('.')[0]
new_datum = {'filename': datum['filename'], 'img_id': img_id, 'sent': d['raw'].strip(), 'targets': [d['raw'].strip() for d in datum['sentences']], 'is_train': True}
data.append(new_datum)
else:
img_id = datum['filename'].split('.')[0]
new_datum = {'filename': datum['filename'], 'img_id': img_id, 'targets': [d['raw'].strip() for d in datum['sentences']], 'is_train': False}
data.append(new_datum)
self.text_data = data
self.keys = list(range(len(self.text_data)))
def __len__(self):
return len(self.keys)
def random_sample(self):
return self.__getitem__(randint(0, (self.__len__() - 1)))
def sequential_sample(self, ind):
if (ind >= (self.__len__() - 1)):
return self.__getitem__(0)
return self.__getitem__((ind + 1))
def skip_sample(self, ind):
if self.shuffle:
return self.random_sample()
return self.sequential_sample(ind=ind)
def __getitem__(self, ind):
key = self.keys[ind]
datum = self.text_data[key]
caption = datum['targets'][0]
tokenized_text = self.tokenizer.tokenize(caption, self.text_len, truncate_text=self.truncate_captions).squeeze(0)
if self.load_image:
if ('val2014' in datum['img_id']):
img_path = ((self.image_dir / 'val2014') / datum['filename'])
elif ('train2014' in datum['img_id']):
img_path = ((self.image_dir / 'train2014') / datum['filename'])
image_tensor = self.image_transform(PIL.Image.open(img_path))
return (tokenized_text, image_tensor)
else:
out = {'img_id': datum['img_id'], 'caption': caption, 'filename': datum['filename'], 'tokenized_text': tokenized_text}
return out
def text_collate_fn(self, batch):
B = len(batch)
L = max([len(b['tokenized_text']) for b in batch])
batch_datum = {'img_id': [], 'filenames': [], 'caption': [], 'tokenized_text': torch.LongTensor(B, L)}
if self.load_image:
batch_datum['img_path'] = []
for (i, datum) in enumerate(batch):
batch_datum['img_id'].append(datum['img_id'])
batch_datum['filenames'].append(datum['filename'])
batch_datum['caption'].append(datum['caption'])
batch_datum['tokenized_text'][i] = datum['tokenized_text']
if self.load_image:
batch_datum['img_path'].append(datum['img_path'])
return batch_datum |
def Read_Files_in_Input_Folder(input_folder):
start_dir = input_folder
pattern = '*.txt'
file_location_list = []
for (dir, _, _) in os.walk(start_dir):
file_location_list.extend(glob(os.path.join(dir, pattern)))
return sorted(file_location_list) |
def execute_global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size):
distance_threshold = (voxel_size * 1.5)
print(':: RANSAC registration on downsampled point clouds.')
print((' Since the downsampling voxel size is %.3f,' % voxel_size))
print((' we use a liberal distance threshold %.3f.' % distance_threshold))
result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(source_down, target_down, source_fpfh, target_fpfh, True, distance_threshold, o3d.pipelines.registration.TransformationEstimationPointToPoint(False), 3, [o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9), o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold)], o3d.pipelines.registration.RANSACConvergenceCriteria(100000, 0.999))
return result |
class Generator(nn.Module):
def __init__(self, flow: Flow):
super(Generator, self).__init__()
self.flow = flow
def sync(self):
self.flow.sync()
def generate(self, epsilon: torch.Tensor, h: Union[(None, torch.Tensor)]=None) -> Tuple[(torch.Tensor, torch.Tensor)]:
(z, logdet) = self.flow.fwdpass(epsilon, h)
return (z, logdet)
def encode(self, x: torch.Tensor, h: Union[(None, torch.Tensor)]=None) -> torch.Tensor:
return self.flow.bwdpass(x, h)[0]
def log_probability(self, x: torch.Tensor, h: Union[(None, torch.Tensor)]=None) -> torch.Tensor:
(epsilon, logdet) = self.flow.bwdpass(x, h)
epsilon = epsilon.view(epsilon.size(0), (- 1))
log_probs = (epsilon.mul(epsilon).sum(dim=1) + (math.log((math.pi * 2.0)) * epsilon.size(1)))
return (log_probs.mul((- 0.5)) + logdet)
def init(self, data: torch.Tensor, h=None, init_scale=1.0):
return self.flow.bwdpass(data, h, init=True, init_scale=init_scale)
def from_params(cls, params: Dict) -> 'Generator':
flow_params = params.pop('flow')
flow = Flow.by_name(flow_params.pop('type')).from_params(flow_params)
return Generator(flow) |
def kinverted_res_block(x, expansion, filters, kernel_size, stride, se_ratio, activation, block_id, kType=0):
channel_axis = cai.layers.GetChannelAxis()
shortcut = x
prefix = 'expanded_conv/'
infilters = backend.int_shape(x)[channel_axis]
if block_id:
prefix = 'expanded_conv_{}/'.format(block_id)
x = cai.layers.kPointwiseConv2D(x, filters=_depth((infilters * expansion)), channel_axis=channel_axis, name=(prefix + 'expand'), activation=activation, has_batch_norm=True, use_bias=False, kType=kType)
if (stride == 2):
x = layers.ZeroPadding2D(padding=correct_pad(backend, x, kernel_size), name=(prefix + 'depthwise/pad'))(x)
x = layers.DepthwiseConv2D(kernel_size, strides=stride, padding=('same' if (stride == 1) else 'valid'), use_bias=False, name=(prefix + 'depthwise'))(x)
x = layers.BatchNormalization(axis=channel_axis, epsilon=0.001, momentum=0.999, name=(prefix + 'depthwise/BatchNorm'))(x)
x = layers.Activation(activation)(x)
if se_ratio:
x = kse_block(x, _depth((infilters * expansion)), se_ratio, prefix, kType=kType)
x = cai.layers.kPointwiseConv2D(x, filters=filters, channel_axis=channel_axis, name=(prefix + 'project'), activation=None, has_batch_norm=True, use_bias=False, kType=kType)
if ((stride == 1) and (infilters == filters)):
x = layers.Add(name=(prefix + 'Add'))([shortcut, x])
return x |
class KeyLine3DAnnotationList(Annotation):
def __init__(self, ontology, linelist):
super().__init__(ontology)
assert isinstance(self._ontology, KeyLineOntology), 'Trying to load annotation with wrong type of ontology!'
for line in linelist:
assert isinstance(line, KeyLine3D), f'Can only instantate an annotation from a list of KeyLine3D, not {type(line)}'
self._linelist = linelist
def load(cls, annotation_file, ontology):
_annotation_pb2 = parse_pbobject(annotation_file, KeyLine3DAnnotations)
linelist = [KeyLine3D(line=np.float32([[vertex.x, vertex.y, vertex.z] for vertex in ann.vertices]), class_id=ontology.class_id_to_contiguous_id[ann.class_id], color=ontology.colormap[ann.class_id], attributes=getattr(ann, 'attributes', {})) for ann in _annotation_pb2.annotations]
return cls(ontology, linelist)
def to_proto(self):
return KeyLine3DAnnotations(annotations=[KeyLine3DAnnotation(class_id=self._ontology.contiguous_id_to_class_id[line.class_id], vertices=[KeyPoint3D(point=np.float32([x, y, z]), class_id=line.class_id, instance_id=line.instance_id, color=line.color, attributes=line.attributes).to_proto() for (x, y, z) in zip(line.x, line.y, line.z)], attributes=line.attributes) for line in self._linelist])
def save(self, save_dir):
return save_pbobject_as_json(self.to_proto(), save_path=save_dir)
def __len__(self):
return len(self._linelist)
def __getitem__(self, index):
return self._linelist[index]
def render(self):
raise NotImplementedError
def xyz(self):
return np.array([line.xyz.tolist() for line in self._linelist], dtype=np.float32)
def class_ids(self):
return np.array([line.class_id for line in self._linelist], dtype=np.int64)
def attributes(self):
return [line.attributes for line in self._linelist]
def instance_ids(self):
return np.array([line.instance_id for line in self._linelist], dtype=np.int64)
def hexdigest(self):
return generate_uid_from_pbobject(self.to_proto()) |
def select_and_sample_hyperparameter_config_for_cnn(configurations):
conf = configurations[(task_id % len(configurations))]
hyperparameter_config = ParameterConfiguration(optimization_algo=torch.optim.Adam, criterion=conf['criterion'], scheduler_partial=None, num_model_initializations=1, scaler=conf['scaler'], input_size=2028, output_size=2048, num_epochs=conf['num_epochs'], input_data_source=conf['data_source'], mat_path=conf['mat_path'], model=conf['model'](num_conv_layer=conf['num_conv_layer'], num_filters=conf['num_filters'], filter_size=conf['filter_size']))
batch_size_exp_lower_limit = conf['batch_size_exp_lower_limit']
batch_size_exp_upper_limit = conf['batch_size_exp_upper_limit']
learning_rate_lower_limit = conf['learning_rate_lower_limit']
learning_rate_upper_limit = conf['learning_rate_upper_limit']
dataset = RadarDataset(hyperparameter_config.input_data_source, hyperparameter_config.mat_path, hyperparameter_config.scaler, is_classification=False)
hyperparameter_config.input_size = dataset.num_values_per_sample
if (learning_rate_lower_limit == learning_rate_upper_limit):
lr = learning_rate_lower_limit
else:
lr = loguniform(learning_rate_lower_limit, learning_rate_upper_limit, 1)[0]
assert (learning_rate_lower_limit <= lr <= learning_rate_upper_limit)
hyperparameter_config.learning_rate = lr
if (batch_size_exp_lower_limit == batch_size_exp_upper_limit):
batch_size = int(math.pow(2, batch_size_exp_lower_limit))
else:
batch_size = int(math.pow(2, int(np.random.randint(batch_size_exp_lower_limit, batch_size_exp_upper_limit, 1))))
if (batch_size > hyperparameter_config.model.max_batch_size):
batch_size = hyperparameter_config.model.max_batch_size
hyperparameter_config.batch_size = batch_size
return (dataset, hyperparameter_config) |
def main() -> int:
input_text = 'Transformers provides APIs to easily download and train state-of-the-art pretrained models. Using pretrained models can reduce your compute costs, carbon footprint, and save you time from training a model from scratch. The models can be used across different modalities such as: Text: text classification, information extraction, question answering, summarization, translation, and text generation in over 100 languages. Images: image classification, object detection, and segmentation. Our library supports seamless integration between three of the most popular deep learning libraries: PyTorch, TensorFlow and JAX. Train your model in three lines of code in one framework, and load it for inference with another.'
args = process_arg()
logging.info(args)
args.task = 'sum'
args.dataset = 'xsum'
args.task = 'custom'
args.dataset = 'custom_input'
(tokenizer, model, dataset, dec_prefix) = setup_model(args.task, args.dataset, args.hf_model_name, args.device)
param_sim_function = {'ngram_suffix': args.ngram_suffix, 'len_diff': args.len_diff, 'merge': args.merge}
config_search = {'post': args.post, 'post_ratio': args.post_ratio, 'dfs_expand': args.dfs_expand, 'heu': args.use_heu}
input_ids = tokenizer(input_text, return_tensors='pt').input_ids.to(args.device)
if (args.max_len == (- 1)):
cur_max_len = (input_ids.squeeze().size()[0] * 2)
comp_budget = (cur_max_len * args.beam_size)
else:
assert (args.max_len > 1)
comp_budget = (args.max_len * args.beam_size)
cur_max_len = args.max_len
output = bfs(doc_input_ids=input_ids, model=model, tokenizer=tokenizer, dec_prefix=dec_prefix, avg_score=args.avg_score, max_len=cur_max_len, k_best=args.k_best, comp_budget=comp_budget, config_heu=None, config_search=config_search)
mo = SearchModelOutput(ends=output)
print(mo) |
class ResNet34(ResNet):
def __init__(self, num_classes=10, num_channels=3):
super().__init__(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, num_channels=num_channels) |
class TwoStageRandomDirectionRanker(RankerBase):
def __init__(self, seed=None):
super().__init__(seed)
self._target_measure_dir = None
def target_measure_dir(self):
return self._target_measure_dir
_measure_dir.setter
def target_measure_dir(self, value):
self._target_measure_dir = value
def rank(self, emitter, archive, data, add_info):
if (self._target_measure_dir is None):
raise RuntimeError('target measure direction not set')
projections = np.dot(data['measures'], self._target_measure_dir)
ranking_values = np.stack((add_info['status'], projections), axis=(- 1))
return (np.flip(np.lexsort(np.flip(ranking_values, axis=(- 1)).T)), ranking_values)
rank.__doc__ = f'''
Ranks the solutions first by whether they are added, then by their projection
onto a random direction in the archive.
{_RANK_ARGS}
'''
def reset(self, emitter, archive):
ranges = (archive.upper_bounds - archive.lower_bounds)
measure_dim = len(ranges)
unscaled_dir = self._rng.standard_normal(measure_dim)
self._target_measure_dir = (unscaled_dir * ranges)
reset.__doc__ = RandomDirectionRanker.reset.__doc__ |
def sum(input_tensor, dim, keepdims=False):
return tf.math.reduce_sum(input_tensor, axis=dim, keepdims=keepdims) |
def test_find_duplicates_dict(cnn, mocker):
encoding_map = data_encoding_map()
threshold = 0.9
scores = True
outfile = True
find_dup_dict_mocker = mocker.patch('imagededup.methods.cnn.CNN._find_duplicates_dict')
cnn.find_duplicates(encoding_map=encoding_map, min_similarity_threshold=threshold, outfile=outfile, scores=scores, num_sim_workers=cpu_count())
find_dup_dict_mocker.assert_called_once_with(encoding_map=encoding_map, min_similarity_threshold=threshold, scores=scores, outfile=outfile, num_sim_workers=cpu_count()) |
_module()
class CustomDataset(Dataset):
CLASSES = None
def __init__(self, ann_file, pipeline, classes=None, data_root=None, img_prefix='', seg_prefix=None, proposal_file=None, test_mode=False, filter_empty_gt=True):
self.ann_file = ann_file
self.data_root = data_root
self.img_prefix = img_prefix
self.seg_prefix = seg_prefix
self.proposal_file = proposal_file
self.test_mode = test_mode
self.filter_empty_gt = filter_empty_gt
self.CLASSES = self.get_classes(classes)
if (self.data_root is not None):
if (not osp.isabs(self.ann_file)):
self.ann_file = osp.join(self.data_root, self.ann_file)
if (not ((self.img_prefix is None) or osp.isabs(self.img_prefix))):
self.img_prefix = osp.join(self.data_root, self.img_prefix)
if (not ((self.seg_prefix is None) or osp.isabs(self.seg_prefix))):
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
if (not ((self.proposal_file is None) or osp.isabs(self.proposal_file))):
self.proposal_file = osp.join(self.data_root, self.proposal_file)
self.data_infos = self.load_annotations(self.ann_file)
if (self.proposal_file is not None):
self.proposals = self.load_proposals(self.proposal_file)
else:
self.proposals = None
if (not test_mode):
valid_inds = self._filter_imgs()
self.data_infos = [self.data_infos[i] for i in valid_inds]
if (self.proposals is not None):
self.proposals = [self.proposals[i] for i in valid_inds]
self._set_group_flag()
self.pipeline = Compose(pipeline)
def __len__(self):
return len(self.data_infos)
def load_annotations(self, ann_file):
return mmcv.load(ann_file)
def load_proposals(self, proposal_file):
return mmcv.load(proposal_file)
def get_ann_info(self, idx):
return self.data_infos[idx]['ann']
def get_cat_ids(self, idx):
return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()
def pre_pipeline(self, results):
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
results['seg_fields'] = []
def _filter_imgs(self, min_size=32):
if self.filter_empty_gt:
warnings.warn('CustomDataset does not support filtering empty gt images.')
valid_inds = []
for (i, img_info) in enumerate(self.data_infos):
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
return valid_inds
def _set_group_flag(self):
self.flag = np.zeros(len(self), dtype=np.uint8)
for i in range(len(self)):
img_info = self.data_infos[i]
if ((img_info['width'] / img_info['height']) > 1):
self.flag[i] = 1
def _rand_another(self, idx):
pool = np.where((self.flag == self.flag[idx]))[0]
return np.random.choice(pool)
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if (data is None):
idx = self._rand_another(idx)
continue
return data
def prepare_train_img(self, idx):
img_info = self.data_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
if (self.proposals is not None):
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
img_info = self.data_infos[idx]
results = dict(img_info=img_info)
if (self.proposals is not None):
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def get_classes(cls, classes=None):
if (classes is None):
return cls.CLASSES
if isinstance(classes, str):
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
return class_names
def format_results(self, results, **kwargs):
def evaluate(self, results, metric='mAP', logger=None, proposal_nums=(100, 300, 1000), iou_thr=0.5, scale_ranges=None):
if (not isinstance(metric, str)):
assert (len(metric) == 1)
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if (metric not in allowed_metrics):
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = ([iou_thr] if isinstance(iou_thr, float) else iou_thr)
if (metric == 'mAP'):
assert isinstance(iou_thrs, list)
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'''
{('-' * 15)}iou_thr: {iou_thr}{('-' * 15)}''')
(mean_ap, _) = eval_map(results, annotations, scale_ranges=scale_ranges, iou_thr=iou_thr, dataset=self.CLASSES, logger=logger)
mean_aps.append(mean_ap)
eval_results[f'AP{int((iou_thr * 100)):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = (sum(mean_aps) / len(mean_aps))
elif (metric == 'recall'):
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
for (i, num) in enumerate(proposal_nums):
for (j, iou) in enumerate(iou_thrs):
eval_results[f'{num}{iou}'] = recalls[(i, j)]
if (recalls.shape[1] > 1):
ar = recalls.mean(axis=1)
for (i, num) in enumerate(proposal_nums):
eval_results[f'{num}'] = ar[i]
return eval_results |
def setupVLANEnvironment(cfg, mode):
with open(cfg, 'r') as f:
config = json.load(f)
HOST_IPS = [server['ip'] for server in config['vlan']['servers']]
if (mode in [0, 1]):
MAIN_DIR = os.getcwd().replace('\\', '/').replace('C:', '/mnt/c')
password = getpass(((color.BOLD + 'Please enter linux password:') + color.ENDC))
run_cmd_pwd('rm /etc/ansible/hosts', password)
run_cmd_pwd('cp framework/install_scripts/ssh_keys/id_rsa ~/id_rsa', password)
run_cmd_pwd('cp framework/install_scripts/ssh_keys/id_rsa.pub ~/id_rsa.pub', password)
with open('framework/config/hosts', 'w') as f:
f.write('[agents]\n')
for ip in HOST_IPS:
f.write((ip + ' ansible_ssh_private_key_file=~/id_rsa ansible_ssh_user=ansible\n'))
run_cmd_pwd('cp framework/config/hosts /etc/ansible/hosts', password)
run_cmd_pwd('cp framework/config/ansible.cfg /etc/ansible/ansible.cfg', password)
run_cmd('ansible-playbook framework/config/VLAN_ansible.yml')
uname = 'ansible'
for ip in HOST_IPS:
res = os.system((((('ssh -o StrictHostKeyChecking=no -i framework/install_scripts/ssh_keys/id_rsa ' + uname) + '') + ip) + ' ~/agent/scripts/delete.sh > /dev/null 2>&1'))
res = os.system((((('ssh -o StrictHostKeyChecking=no -i framework/install_scripts/ssh_keys/id_rsa ' + uname) + '') + ip) + ' sudo service docker restart > /dev/null 2>&1'))
return HOST_IPS |
def train_data_creator(config, batch_size):
return DataLoader(TcmfTrainDatasetHorovod(config), batch_size=None) |
def calc_model_parameters(model):
total_params = 0
params = list(model.parameters())
for param in params:
cnt = 1
for d in param.size():
cnt *= d
total_params += cnt
return round((total_params / 1000000.0), 2) |
class DimshuffleLayer(Layer):
def __init__(self, incoming, pattern, **kwargs):
super(DimshuffleLayer, self).__init__(incoming, **kwargs)
used_dims = set()
for p in pattern:
if isinstance(p, int):
if (p in used_dims):
raise ValueError('pattern contains dimension {0} more than once'.format(p))
used_dims.add(p)
elif (p == 'x'):
pass
else:
raise ValueError("pattern should only contain dimensionindices or 'x', not {0}".format(p))
self.pattern = pattern
self.get_output_shape_for(self.input_shape)
def get_output_shape_for(self, input_shape):
output_shape = []
dims_used = ([False] * len(input_shape))
for p in self.pattern:
if isinstance(p, int):
if ((p < 0) or (p >= len(input_shape))):
raise ValueError('pattern contains {0}, but input shape has {1} dimensions only'.format(p, len(input_shape)))
o = input_shape[p]
dims_used[p] = True
elif (p == 'x'):
o = 1
output_shape.append(o)
for (i, (dim_size, used)) in enumerate(zip(input_shape, dims_used)):
if ((not used) and (dim_size != 1) and (dim_size is not None)):
raise ValueError('pattern attempted to collapse dimension {0} of size {1}; dimensions with size != 1/None are notbroadcastable and cannot be collapsed'.format(i, dim_size))
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
return input.dimshuffle(self.pattern) |
class MinAtarEnv(gym.Wrapper):
def __init__(self, *args, **kwargs):
self.env = Environment(*args, **kwargs)
self.env.action_space = gym.spaces.Discrete(6)
self.env.observation_space = None
self.env.reward_range = None
self.env.metadata = None
super().__init__(self.env)
def reset(self):
self.env.reset()
return self.env.state().astype(np.uint8).transpose(2, 0, 1)
def step(self, act):
(reward, done) = self.env.act(act)
state = self.env.state().astype(np.uint8).transpose(2, 0, 1)
return (state, reward, done, {})
def num_channels(self):
return self.env.state_shape()[(- 1)]
def render(self, *args, **kwargs):
self.env.display_state() |
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if (not torch.cuda.is_available()):
print('using CPU, this will be slow')
elif args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
elif (args.arch.startswith('alexnet') or args.arch.startswith('vgg')):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = StepLR(optimizer, step_size=30, gamma=0.1)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if (args.gpu is None):
checkpoint = torch.load(args.resume)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if (args.gpu is not None):
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
if args.dummy:
print('=> Dummy data is used!')
train_dataset = datasets.FakeData(1281167, (3, 224, 224), 1000, transforms.ToTensor())
val_dataset = datasets.FakeData(50000, (3, 224, 224), 1000, transforms.ToTensor())
else:
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
val_dataset = datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, drop_last=True)
else:
train_sampler = None
val_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, sampler=val_sampler)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train(train_loader, model, criterion, optimizer, epoch, args)
acc1 = validate(val_loader, model, criterion, args)
scheduler.step()
is_best = (acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))):
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict()}, is_best) |
def receptive_field(location, fieldmap):
return compose_fieldmap(fieldmap, (location, (1, 1), (1, 1)))[:2] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.