code stringlengths 101 5.91M |
|---|
.parametrize('constraint_declaration, expected_constraint_class', [(Interval(Real, 0, 1, closed='both'), Interval), (StrOptions({'option1', 'option2'}), StrOptions), (Options(Real, {0.42, 1.23}), Options), ('array-like', _ArrayLikes), ('sparse matrix', _SparseMatrices), ('random_state', _RandomStates), (None, _NoneConstraint), (callable, _Callables), (int, _InstancesOf), ('boolean', _Booleans), ('verbose', _VerboseHelper), (MissingValues(numeric_only=True), MissingValues), (HasMethods('fit'), HasMethods), ('cv_object', _CVObjects)])
def test_make_constraint(constraint_declaration, expected_constraint_class):
constraint = make_constraint(constraint_declaration)
assert (constraint.__class__ is expected_constraint_class) |
class SchemaNode(ASTNode):
def __init__(self, val, data_type, fields):
super().__init__('SCHEMA', val, data_type, fields)
def textual_form_core(self):
return self.val |
def parse_int_from_env(key, default=None):
try:
value = os.environ[key]
except KeyError:
_value = default
else:
try:
_value = int(value)
except ValueError:
raise ValueError('If set, {} must be a int.'.format(key))
return _value |
def run_cn(_trainMode, _dataType, _oRate, _var, _GPU_ID):
(_n, _oRange, _hdims, _actv, _maxEpoch, _PLOT_EVERY, _SAVE_NET, _SAVE_FIG) = get_common_config()
(x, y, t) = data4reg(_type=_dataType, _n=_n, _oRange=_oRange, _oRate=_oRate, measVar=_var)
xtest = np.linspace(start=(- 3), stop=3, num=1000).reshape(((- 1), 1))
tf.reset_default_graph()
tf.set_random_seed(0)
np.random.seed(0)
C = choiceNet_reg_class(_name=('CN_%s_oRate%d_var%.1e' % (_dataType, (_oRate * 100), _var)), _xdim=1, _ydim=1, _hdims=_hdims, _kmix=5, _actv=_actv, _bn=slim.batch_norm, _rho_ref_train=0.99, _tau_inv=0.01, _var_eps=1e-08, _pi1_bias=0.0, _logSigmaZval=0, _kl_reg_coef=1e-06, _l2_reg_coef=1e-06, _SCHEDULE_MDN_REG=False, _GPU_ID=_GPU_ID, _VERBOSE=False)
sess = gpusession()
sess.run(tf.global_variables_initializer())
C.train(_sess=sess, _x=x, _y=y, _yref=t, _lr=0.1, _batchSize=256, _maxEpoch=_maxEpoch, _kp=1.0, _LR_SCHEDULE=True, _PRINT_EVERY=50, _PLOT_EVERY=_PLOT_EVERY, _SAVE_TXT=True, _SAVE_BEST_NET=_SAVE_NET, _SAVE_FINAL=_SAVE_NET)
C.test(_sess=sess, _xdata=x, _ydata=y, _yref=t, _xtest=xtest, _titleStr=C.name, _PLOT_TRAIN=True, _PLOT_RES=True, _SAVE_FIG=_SAVE_FIG)
sess.close() |
class SpeedMonitor(Callback):
def __init__(self, intra_step_time: bool=True, inter_step_time: bool=True, epoch_time: bool=True, verbose=False):
super().__init__()
self._log_stats = AttributeDict({'intra_step_time': intra_step_time, 'inter_step_time': inter_step_time, 'epoch_time': epoch_time})
self.verbose = verbose
def on_train_start(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule') -> None:
self._snap_epoch_time = None
def on_train_epoch_start(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule') -> None:
self._snap_intra_step_time = None
self._snap_inter_step_time = None
self._snap_epoch_time = time.time()
def on_validation_epoch_start(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule') -> None:
self._snap_inter_step_time = None
def on_test_epoch_start(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule') -> None:
self._snap_inter_step_time = None
_zero_only
def on_train_batch_start(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule', batch: Any, batch_idx: int) -> None:
if self._log_stats.intra_step_time:
self._snap_intra_step_time = time.time()
if (not trainer._logger_connector.should_update_logs):
return
logs = {}
if (self._log_stats.inter_step_time and self._snap_inter_step_time):
logs['time/inter_step (ms)'] = ((time.time() - self._snap_inter_step_time) * 1000)
if (trainer.logger is not None):
trainer.logger.log_metrics(logs, step=trainer.global_step)
_zero_only
def on_train_batch_end(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule', outputs: STEP_OUTPUT, batch: Any, batch_idx: int) -> None:
if self._log_stats.inter_step_time:
self._snap_inter_step_time = time.time()
if (self.verbose and self._log_stats.intra_step_time and self._snap_intra_step_time):
pl_module.print(f'time/intra_step (ms): {((time.time() - self._snap_intra_step_time) * 1000)}')
if (not trainer._logger_connector.should_update_logs):
return
logs = {}
if (self._log_stats.intra_step_time and self._snap_intra_step_time):
logs['time/intra_step (ms)'] = ((time.time() - self._snap_intra_step_time) * 1000)
if (trainer.logger is not None):
trainer.logger.log_metrics(logs, step=trainer.global_step)
_zero_only
def on_train_epoch_end(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule') -> None:
logs = {}
if (self._log_stats.epoch_time and self._snap_epoch_time):
logs['time/epoch (s)'] = (time.time() - self._snap_epoch_time)
if (trainer.logger is not None):
trainer.logger.log_metrics(logs, step=trainer.global_step) |
def iter_corpus(filename, callback, skip_empty_lines=True):
if _is_bliss(filename):
_iter_bliss(filename=filename, callback=callback)
else:
_iter_txt(filename=filename, callback=callback, skip_empty_lines=skip_empty_lines) |
class CyExec(CythonCommand, libpython.PyExec, EvaluateOrExecuteCodeMixin):
name = '-cy-exec'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
def invoke(self, expr, from_tty):
(expr, input_type) = self.readcode(expr)
executor = libpython.PythonCodeExecutor()
executor.xdecref(self.evalcode(expr, executor.Py_single_input)) |
def check_ppf_private(distfn, arg, msg):
ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg)
npt.assert_((not np.any(np.isnan(ppfs))), (msg + 'ppf private is nan')) |
class LLVMCodeGenExecuted(ExecutionCounter):
def __init__(self):
super(LLVMCodeGenExecuted, self).__init__('llvm_codegen_executed') |
class ImagesDataset(Dataset):
def __init__(self, source_root, target_root, target_transform=None, source_transform=None, mode='train', num_imgs=1000):
self.source_paths = sorted(make_dataset(source_root))[:num_imgs]
self.target_paths = sorted(make_dataset(target_root))[:num_imgs]
self.source_transform = source_transform
self.target_transform = target_transform
self.mode = mode
def __len__(self):
return len(self.source_paths)
def __getitem__(self, index):
from_path = self.source_paths[index]
from_im = cv2.imread(from_path)
from_im = cv2.cvtColor(from_im, cv2.COLOR_BGR2RGB)
from_im = self.source_transform(from_im)
if (self.mode == 'test'):
label = torch.tensor([1, 0, 0, 0, 0])
else:
label = torch.randint(0, 2, (5,))
to_path = self.target_paths[index]
to_im = cv2.cvtColor(cv2.imread(to_path), cv2.COLOR_BGR2RGB)
to_im = self.target_transform(to_im)
return {'A': from_im, 'B': to_im, 'A_paths': from_path, 'B_paths': to_path, 'label': label} |
class ServeCommand(BaseTransformersCLICommand):
def register_subcommand(parser: ArgumentParser):
serve_parser = parser.add_parser('serve', help='CLI tool to run inference requests through REST and GraphQL endpoints.')
serve_parser.add_argument('--task', type=str, choices=get_supported_tasks(), help='The task to run the pipeline on')
serve_parser.add_argument('--host', type=str, default='localhost', help='Interface the server will listen on.')
serve_parser.add_argument('--port', type=int, default=8888, help='Port the serving will listen to.')
serve_parser.add_argument('--workers', type=int, default=1, help='Number of http workers')
serve_parser.add_argument('--model', type=str, help="Model's name or path to stored model.")
serve_parser.add_argument('--config', type=str, help="Model's config name or path to stored model.")
serve_parser.add_argument('--tokenizer', type=str, help='Tokenizer name to use.')
serve_parser.add_argument('--device', type=int, default=(- 1), help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)')
serve_parser.set_defaults(func=serve_command_factory)
def __init__(self, pipeline: Pipeline, host: str, port: int, workers: int):
self._pipeline = pipeline
self.host = host
self.port = port
self.workers = workers
if (not _serve_dependencies_installed):
raise RuntimeError('Using serve command requires FastAPI and uvicorn. Please install transformers with [serving]: pip install "transformers[serving]".Or install FastAPI and uvicorn separately.')
else:
logger.info(f'Serving model over {host}:{port}')
self._app = FastAPI(routes=[APIRoute('/', self.model_info, response_model=ServeModelInfoResult, response_class=JSONResponse, methods=['GET']), APIRoute('/tokenize', self.tokenize, response_model=ServeTokenizeResult, response_class=JSONResponse, methods=['POST']), APIRoute('/detokenize', self.detokenize, response_model=ServeDeTokenizeResult, response_class=JSONResponse, methods=['POST']), APIRoute('/forward', self.forward, response_model=ServeForwardResult, response_class=JSONResponse, methods=['POST'])], timeout=600)
def run(self):
run(self._app, host=self.host, port=self.port, workers=self.workers)
def model_info(self):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
def tokenize(self, text_input: str=Body(None, embed=True), return_ids: bool=Body(False, embed=True)):
try:
tokens_txt = self._pipeline.tokenizer.tokenize(text_input)
if return_ids:
tokens_ids = self._pipeline.tokenizer.convert_tokens_to_ids(tokens_txt)
return ServeTokenizeResult(tokens=tokens_txt, tokens_ids=tokens_ids)
else:
return ServeTokenizeResult(tokens=tokens_txt)
except Exception as e:
raise HTTPException(status_code=500, detail={'model': '', 'error': str(e)})
def detokenize(self, tokens_ids: List[int]=Body(None, embed=True), skip_special_tokens: bool=Body(False, embed=True), cleanup_tokenization_spaces: bool=Body(True, embed=True)):
try:
decoded_str = self._pipeline.tokenizer.decode(tokens_ids, skip_special_tokens, cleanup_tokenization_spaces)
return ServeDeTokenizeResult(model='', text=decoded_str)
except Exception as e:
raise HTTPException(status_code=500, detail={'model': '', 'error': str(e)})
async def forward(self, inputs=Body(None, embed=True)):
if (len(inputs) == 0):
return ServeForwardResult(output=[], attention=[])
try:
output = self._pipeline(inputs)
return ServeForwardResult(output=output)
except Exception as e:
raise HTTPException(500, {'error': str(e)}) |
class StandfordCars(CoOp):
def __init__(self, data_root: str, mode: str, backbone_name='resnet12', image_root='', split_path='splits/split_zhou_StanfordCars.json', image_sz=84) -> None:
self.image_root = os.path.join(data_root, 'stanford_cars', image_root)
super().__init__(data_root, mode, backbone_name, self.image_root, split_path, image_sz)
def __getitem__(self, index: int):
image = Image.open(self.image_path[index]).convert('RGB')
image = self.transform(image)
return (image, self.label[index])
def __len__(self):
return len(self.image_path) |
def get_parser(disable: List[str]=None, lang: str='en', merge_terms: Optional[Set]=None, max_sent_len: Optional[int]=None) -> Callable:
disable = (['ner', 'parser', 'tagger', 'lemmatizer'] if (not disable) else disable)
merge_terms = ({} if (not merge_terms) else merge_terms)
nlp = spacy.load(lang, disable=disable)
nlp.tokenizer = ct_tokenizer(nlp)
sbd_func = partial(ct_sbd_rules, merge_terms=merge_terms, max_sent_len=max_sent_len)
sbd = SentenceSegmenter(nlp.vocab, strategy=sbd_func)
nlp.add_pipe(sbd)
return nlp |
def main():
args = config.args
train_conf = config.train
checkpoint = train_conf.checkpoint
start_epoch = train_conf.start_epoch
epochs = train_conf.epochs
phase = 'Multispectral'
if (checkpoint is None):
model = SSD300(n_classes=args.n_classes)
biases = list()
not_biases = list()
for (param_name, param) in model.named_parameters():
if param.requires_grad:
if param_name.endswith('.bias'):
biases.append(param)
else:
not_biases.append(param)
optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': (2 * train_conf.lr)}, {'params': not_biases}], lr=train_conf.lr, momentum=train_conf.momentum, weight_decay=train_conf.weight_decay, nesterov=False)
optim_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int((epochs * 0.5)), int((epochs * 0.9))], gamma=0.1)
else:
checkpoint = torch.load(checkpoint)
start_epoch = (checkpoint['epoch'] + 1)
train_loss = checkpoint['loss']
print(('\nLoaded checkpoint from epoch %d. Best loss so far is %.3f.\n' % (start_epoch, train_loss)))
model = checkpoint['model']
optimizer = checkpoint['optimizer']
optim_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int((epochs * 0.5))], gamma=0.1)
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
model = model.to(device)
model = nn.DataParallel(model)
criterion = MultiBoxLoss(priors_cxcy=model.module.priors_cxcy).to(device)
train_dataset = KAISTPed(args, condition='train')
train_loader = DataLoader(train_dataset, batch_size=train_conf.batch_size, shuffle=True, num_workers=config.dataset.workers, collate_fn=train_dataset.collate_fn, pin_memory=True)
test_dataset = KAISTPed(args, condition='test')
test_batch_size = (args['test'].eval_batch_size * torch.cuda.device_count())
test_loader = DataLoader(test_dataset, batch_size=test_batch_size, shuffle=False, num_workers=config.dataset.workers, collate_fn=test_dataset.collate_fn, pin_memory=True)
if (args.exp_time is None):
args.exp_time = datetime.now().strftime('%Y-%m-%d_%Hh%Mm')
exp_name = (('_' + args.exp_name) if args.exp_name else '_')
jobs_dir = os.path.join('jobs', (args.exp_time + exp_name))
os.makedirs(jobs_dir, exist_ok=True)
args.jobs_dir = jobs_dir
logger = utils.make_logger(args)
kwargs = {'grad_clip': args['train'].grad_clip, 'print_freq': args['train'].print_freq}
for epoch in range(start_epoch, epochs):
logger.info(((('#' * 20) + f' << Epoch {epoch:3d} >> ') + ('#' * 20)))
train_loss = train_epoch(model=model, dataloader=train_loader, criterion=criterion, optimizer=optimizer, logger=logger, **kwargs)
optim_scheduler.step()
utils.save_checkpoint(epoch, model.module, optimizer, train_loss, jobs_dir)
if (epoch >= 3):
result_filename = os.path.join(jobs_dir, f'Epoch{epoch:03d}_test_det.txt')
results = val_epoch(model, test_loader, config.test.input_size, min_score=0.1)
save_results(results, result_filename)
evaluate(config.PATH.JSON_GT_FILE, result_filename, phase) |
class PromptTrainer():
def __init__(self, model, config, train_loader, valid_loader, test_loader) -> None:
self.model = model
self.config = config
(self.train_loader, self.valid_loader, self.test_loader) = (train_loader, valid_loader, test_loader)
self.save_name = os.path.join(config.target_dir, config.save_name)
self.final_score = 0
self.final_res = ''
(self.scores, self.lines) = ([], [])
self.re_init()
def train(self):
(best_score, best_iter) = (0, (- 1))
for epoch in tqdm(range(self.config.epoch_size)):
self.model.global_epoch = epoch
self.global_epoch = epoch
self.train_step()
result = self.evaluate_step(mode='valid')
self.re_init()
score = result['default']
self.add_instance(result)
res = self.get_best()
if (score > best_score):
(best_score, best_iter) = (score, epoch)
save_name = self.save_name.format(epoch)
if (not os.path.exists(self.config.target_dir)):
os.makedirs(self.config.target_dir)
torch.save({'epoch': epoch, 'model': self.model.cpu().state_dict(), 'best_score': best_score}, save_name)
self.model.to(self.config.device)
elif ((epoch - best_iter) > self.config.patience):
print('Not upgrade for {} steps, early stopping...'.format(self.config.patience))
break
self.model.to(self.config.device)
res = self.final_evaluate(best_iter)
score = res['default']
self.add_instance(res)
save_name = self.save_name.format(epoch)
(self.final_score, self.final_res) = (score, res)
def train_step(self):
self.model.train()
train_data = tqdm(self.train_loader)
losses = []
for (i, data) in enumerate(train_data):
loss = self.model(**data)
losses.append(loss.item())
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm)
description = 'Epoch {}, loss:{:.4f}'.format(self.global_epoch, np.mean(losses))
train_data.set_description(description)
self.config.optimizer.step()
self.config.scheduler.step()
self.model.zero_grad()
def evaluate_step(self, dataLoader=None, mode='valid'):
self.model.eval()
dataLoader = (self.valid_loader if (dataLoader is None) else dataLoader)
dataiter = dataLoader
for (i, data) in tqdm(enumerate(dataiter), total=dataLoader.data_length):
with torch.no_grad():
output = self.model.evaluate(**data)
self.add_output(data, output)
result = self.report_score(mode=mode)
return result
def final_evaluate(self, epoch=0):
PATH = self.save_name.format(epoch)
self.model.load_state_dict(torch.load(PATH, map_location=self.config.device)['model'])
self.model.eval()
res = self.evaluate_step(self.test_loader, mode='test')
self.add_instance(res)
return res
def add_instance(self, res):
self.lines.append(res)
def get_best(self):
best_id = np.argmax([w['default'] for w in self.lines])
res = self.lines[best_id]
return res
def re_init(self):
(self.preds, self.golds) = (defaultdict(list), defaultdict(list))
self.keys = ['total', 'explicits', 'implicits']
def add_output(self, data, output):
is_implicit = data['implicits'].tolist()
gold = data['input_labels']
for (i, key) in enumerate(self.keys):
if (i == 0):
self.preds[key] += output
self.golds[key] += gold.tolist()
else:
if (i == 1):
ids = np.argwhere((np.array(is_implicit) == 0)).flatten()
else:
ids = np.argwhere((np.array(is_implicit) == 1)).flatten()
self.preds[key] += [output[w] for w in ids]
self.golds[key] += [gold.tolist()[w] for w in ids]
def report_score(self, mode='valid'):
res = {}
res['Acc_SA'] = accuracy_score(self.golds['total'], self.preds['total'])
res['F1_SA'] = f1_score(self.golds['total'], self.preds['total'], labels=[0, 1, 2], average='macro')
res['F1_ESA'] = f1_score(self.golds['explicits'], self.preds['explicits'], labels=[0, 1, 2], average='macro')
res['F1_ISA'] = f1_score(self.golds['implicits'], self.preds['implicits'], labels=[0, 1, 2], average='macro')
res['default'] = res['F1_SA']
res['mode'] = mode
for (k, v) in res.items():
if isinstance(v, float):
res[k] = round((v * 100), 3)
return res |
class RegularArray(Content):
def __init__(self, content, size):
assert isinstance(content, Content)
assert isinstance(size, int)
assert (size > 0)
self.content = content
self.size = size
def random(minlen=0, choices=None):
size = random_length(1, 5)
return RegularArray(Content.random((random_length(minlen) * size), choices), size)
def __len__(self):
return (len(self.content) // self.size)
def __getitem__(self, where):
if isinstance(where, int):
return self.content[(where * self.size):((where + 1) * self.size)]
elif (isinstance(where, slice) and (where.step is None)):
start = (where.start * self.size)
stop = (where.stop * self.size)
return RegularArray(self.content[start:stop], self.size)
else:
raise AssertionError(where)
def tostring_part(self, indent, pre, post):
out = ((indent + pre) + '<RegularArray>\n')
out += self.content.tostring_part((indent + ' '), '<content>', '</content>\n')
out += (((indent + ' <size>') + str(self.size)) + '</size>\n')
out += ((indent + '</RegularArray>') + post)
return out
def constructor(self):
return (((('RegularArray(' + self.content.constructor()) + ', ') + repr(self.size)) + ')') |
class Decoder(nn.Module):
def __init__(self, z_dim, c_dim, img_size):
super(Decoder, self).__init__()
self.img_4 = (img_size / 4)
self.fc = nn.Sequential(nn.Linear(z_dim, int(((self.img_4 * self.img_4) * 64))), nn.ReLU())
self.model = nn.Sequential(nn.ConvTranspose2d(64, 64, 4, stride=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.ConvTranspose2d(64, 64, 4, stride=2, padding=2), nn.BatchNorm2d(64), nn.ReLU(), nn.ConvTranspose2d(64, int(c_dim), 4, stride=2, padding=1), nn.BatchNorm2d(int(c_dim)), nn.Sigmoid())
def forward(self, z):
batch_size = z.shape[0]
temp_var = self.fc(z)
temp_var = temp_var.view(batch_size, 64, int(self.img_4), int(self.img_4))
img = self.model(temp_var)
return img |
def make_batch(image, mask, device):
image = np.array(Image.open(image).convert('RGB'))
image = (image.astype(np.float32) / 255.0)
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
mask = np.array(Image.open(mask).convert('L'))
mask = (mask.astype(np.float32) / 255.0)
mask = mask[(None, None)]
mask[(mask < 0.5)] = 0
mask[(mask >= 0.5)] = 1
mask = torch.from_numpy(mask)
masked_image = ((1 - mask) * image)
batch = {'image': image, 'mask': mask, 'masked_image': masked_image}
for k in batch:
batch[k] = batch[k].to(device=device)
batch[k] = ((batch[k] * 2.0) - 1.0)
return batch |
def append_to_bib(bib_entery):
global _BIBLIOGRAPHY
global _BIBLIOGRAPHY_TO_OUTPUT
for bib_entery_i in to_list(bib_entery):
bib = _BIBLIOGRAPHY.entries[bib_entery_i]
if (bib not in _BIBLIOGRAPHY_TO_OUTPUT):
_BIBLIOGRAPHY_TO_OUTPUT.append(bib) |
def spawn_3D_doors(map, entrance, exit, base_pos=5):
border_size = (1, 1, 1)
(i, k, j) = (len(map[0][0]), len(map), len(map[0]))
CLIENT.fillCube(FillCubeRequest(cube=Cube(min=Point(x=(- border_size[0]), y=(base_pos + 1), z=(- border_size[1])), max=Point(x=((i + border_size[0]) - 1), y=(((base_pos + k) + border_size[2]) - 1), z=((j + border_size[1]) - 1))), type=AIR))
CLIENT.spawnBlocks(Blocks(blocks=[Block(position=Point(x=entrance[2], y=((entrance[0] + base_pos) - 1), z=entrance[1]), type=GOLD_BLOCK, orientation=NORTH), Block(position=Point(x=exit[2], y=((exit[0] + base_pos) - 1), z=exit[1]), type=DIAMOND_BLOCK, orientation=NORTH)]))
return |
class ReversibleField(Field):
def __init__(self, **kwargs):
if (kwargs.get('tokenize') is list):
self.use_revtok = False
else:
self.use_revtok = True
if (kwargs.get('tokenize') is None):
kwargs['tokenize'] = 'revtok'
if ('unk_token' not in kwargs):
kwargs['unk_token'] = ' UNK '
super(ReversibleField, self).__init__(**kwargs)
def reverse(self, batch, limited=False):
if self.use_revtok:
try:
import revtok
except ImportError:
print('Please install revtok.')
raise
if (not self.batch_first):
batch = batch.t()
with torch.cuda.device_of(batch):
batch = batch.tolist()
batch = [[self.vocab.itos[ind] for ind in ex] for ex in batch]
def trim(s, t):
sentence = []
for w in s:
if (w == t):
break
sentence.append(w)
return sentence
batch = [trim(ex, self.eos_token) for ex in batch]
def filter_special(tok):
return (tok not in (self.init_token, self.pad_token))
batch = [filter(filter_special, ex) for ex in batch]
if self.use_revtok:
return [revtok.detokenize(ex) for ex in batch]
return [''.join(ex) for ex in batch] |
def launch(main_func, num_gpus_per_machine, num_machines=1, machine_rank=0, dist_url=None, args=(), timeout=DEFAULT_TIMEOUT):
world_size = (num_machines * num_gpus_per_machine)
if (world_size > 1):
if (dist_url == 'auto'):
assert (num_machines == 1), 'dist_url=auto not supported in multi-machine jobs.'
port = _find_free_port()
dist_url = f'tcp://127.0.0.1:{port}'
if ((num_machines > 1) and dist_url.startswith('file://')):
logger = logging.getLogger(__name__)
logger.warning('file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://')
mp.spawn(_distributed_worker, nprocs=num_gpus_per_machine, args=(main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args, timeout), daemon=False)
else:
main_func(*args) |
_grad()
def convert_s3prl_checkpoint(base_model_name, config_path, checkpoint_path, model_dump_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
downstream_dict = checkpoint['Downstream']
hf_config = WavLMConfig.from_pretrained(config_path)
hf_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(base_model_name, return_attention_mask=True, do_normalize=False)
arch = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification'):
hf_model = convert_classification(base_model_name, hf_config, downstream_dict)
elif arch.endswith('ForAudioFrameClassification'):
hf_model = convert_diarization(base_model_name, hf_config, downstream_dict)
elif arch.endswith('ForXVector'):
hf_model = convert_xvector(base_model_name, hf_config, downstream_dict)
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}')
if hf_config.use_weighted_layer_sum:
hf_model.layer_weights.data = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(model_dump_path)
hf_model.save_pretrained(model_dump_path) |
def selected_cols(conn, select):
if (conn.driver == 'paiio'):
name_and_type = conn.query().column_info()
else:
name_and_type = selected_columns_and_types(conn, select)
return [item[0] for item in name_and_type] |
class FusedFunc(Func):
def __init__(self, name, signatures):
super(FusedFunc, self).__init__(name, signatures)
self.doc = ('See the documentation for scipy.special.' + self.name)
(self.incodes, self.outcodes) = self._get_codes()
self.fused_types = set()
(self.intypes, infused_types) = self._get_types(self.incodes)
self.fused_types.update(infused_types)
(self.outtypes, outfused_types) = self._get_types(self.outcodes)
self.fused_types.update(outfused_types)
(self.invars, self.outvars) = self._get_vars()
def _get_codes(self):
(inarg_num, outarg_num) = (None, None)
(all_inp, all_outp) = ([], [])
for (_, inarg, outarg, ret, _) in self.signatures:
outp = (re.sub('\\*.*', '', ret) + outarg)
if (inarg_num is None):
inarg_num = len(inarg)
outarg_num = len(outp)
(inp, outp) = list(iter_variants(inarg, outp))[0]
all_inp.append(inp)
all_outp.append(outp)
incodes = []
for n in range(inarg_num):
codes = unique(map((lambda x: x[n]), all_inp))
codes.sort()
incodes.append(''.join(codes))
outcodes = []
for n in range(outarg_num):
codes = unique(map((lambda x: x[n]), all_outp))
codes.sort()
outcodes.append(''.join(codes))
return (tuple(incodes), tuple(outcodes))
def _get_types(self, codes):
all_types = []
fused_types = set()
for code in codes:
if (len(code) == 1):
all_types.append((CY_TYPES[code], code))
else:
(fused_type, dec) = generate_fused_type(code)
fused_types.add(dec)
all_types.append((fused_type, code))
return (all_types, fused_types)
def _get_vars(self):
invars = ['x{}'.format(n) for n in range(len(self.intypes))]
outvars = ['y{}'.format(n) for n in range(len(self.outtypes))]
return (invars, outvars)
def _get_conditional(self, types, codes, adverb):
clauses = []
seen = set()
for ((typ, typcode), code) in zip(types, codes):
if (len(typcode) == 1):
continue
if (typ not in seen):
clauses.append('{} is {}'.format(typ, underscore(CY_TYPES[code])))
seen.add(typ)
if (clauses and (adverb != 'else')):
line = '{} {}:'.format(adverb, ' and '.join(clauses))
elif (clauses and (adverb == 'else')):
line = 'else:'
else:
line = None
return line
def _get_incallvars(self, intypes, c):
incallvars = []
for (n, intype) in enumerate(intypes):
var = self.invars[n]
if (c and (intype == 'double complex')):
var = npy_cdouble_from_double_complex(var)
incallvars.append(var)
return incallvars
def _get_outcallvars(self, outtypes, c):
(outcallvars, tmpvars, casts) = ([], [], [])
start = (len(self.outvars) - len(outtypes))
outvars = self.outvars[start:]
for (n, (var, outtype)) in enumerate(zip(outvars, outtypes)):
if (c and (outtype == 'double complex')):
tmp = 'tmp{}'.format(n)
tmpvars.append(tmp)
outcallvars.append('&{}'.format(tmp))
tmpcast = double_complex_from_npy_cdouble(tmp)
casts.append('{}[0] = {}'.format(var, tmpcast))
else:
outcallvars.append('{}'.format(var))
return (outcallvars, tmpvars, casts)
def _get_nan_decs(self):
tab = (' ' * 4)
(fused_types, lines) = ([], [(tab + 'else:')])
seen = set()
for (outvar, outtype, code) in zip(self.outvars, self.outtypes, self.outcodes):
if (len(code) == 1):
line = '{}[0] = {}'.format(outvar, NAN_VALUE[code])
lines.append(((2 * tab) + line))
else:
fused_type = outtype
(name, _) = fused_type
if (name not in seen):
fused_types.append(fused_type)
seen.add(name)
if (not fused_types):
return lines
all_codes = tuple([codes for (_unused, codes) in fused_types])
codelens = list(map((lambda x: len(x)), all_codes))
last = (numpy.prod(codelens) - 1)
for (m, codes) in enumerate(itertools.product(*all_codes)):
(fused_codes, decs) = ([], [])
for (n, fused_type) in enumerate(fused_types):
code = codes[n]
fused_codes.append(underscore(CY_TYPES[code]))
for (nn, outvar) in enumerate(self.outvars):
if (self.outtypes[nn] == fused_type):
line = '{}[0] = {}'.format(outvar, NAN_VALUE[code])
decs.append(line)
if (m == 0):
adverb = 'if'
elif (m == last):
adverb = 'else'
else:
adverb = 'elif'
cond = self._get_conditional(fused_types, codes, adverb)
lines.append(((2 * tab) + cond))
lines.extend(map((lambda x: ((3 * tab) + x)), decs))
return lines
def _get_tmp_decs(self, all_tmpvars):
tab = (' ' * 4)
tmpvars = list(all_tmpvars)
tmpvars.sort()
tmpdecs = [(tab + 'cdef npy_cdouble {}'.format(tmpvar)) for tmpvar in tmpvars]
return tmpdecs
def _get_python_wrap(self):
tab = (' ' * 4)
(body, callvars) = ([], [])
for ((intype, _), invar) in zip(self.intypes, self.invars):
callvars.append('{} {}'.format(intype, invar))
line = 'def _{}_pywrap({}):'.format(self.name, ', '.join(callvars))
body.append(line)
for ((outtype, _), outvar) in zip(self.outtypes, self.outvars):
line = 'cdef {} {}'.format(outtype, outvar)
body.append((tab + line))
addr_outvars = map((lambda x: '&{}'.format(x)), self.outvars)
line = '{}({}, {})'.format(self.name, ', '.join(self.invars), ', '.join(addr_outvars))
body.append((tab + line))
line = 'return {}'.format(', '.join(self.outvars))
body.append((tab + line))
body = '\n'.join(body)
return body
def _get_common(self, signum, sig):
tab = (' ' * 4)
(func_name, incodes, outcodes, retcode, header) = sig
incodes = incodes.replace('i', 'l')
outcodes = outcodes.replace('i', 'l')
retcode = retcode.replace('i', 'l')
if header.endswith('h'):
c = True
else:
c = False
if header.endswith('++'):
cpp = True
else:
cpp = False
intypes = list(map((lambda x: CY_TYPES[x]), incodes))
outtypes = list(map((lambda x: CY_TYPES[x]), outcodes))
retcode = re.sub('\\*.*', '', retcode)
if (not retcode):
retcode = 'v'
rettype = CY_TYPES[retcode]
if cpp:
func_name = 'scipy.special._ufuncs_cxx._export_{}'.format(func_name)
func_name = '(<{}(*)({}) nogil>{})'.format(rettype, ', '.join((intypes + outtypes)), func_name)
else:
func_name = self.cython_func_name(func_name, specialized=True)
if (signum == 0):
adverb = 'if'
else:
adverb = 'elif'
cond = self._get_conditional(self.intypes, incodes, adverb)
if cond:
lines = [(tab + cond)]
sp = (2 * tab)
else:
lines = []
sp = tab
return (func_name, incodes, outcodes, retcode, intypes, outtypes, rettype, c, lines, sp)
def _generate_from_return_and_no_outargs(self):
tab = (' ' * 4)
(specs, body) = ([], [])
for (signum, sig) in enumerate(self.signatures):
(func_name, incodes, outcodes, retcode, intypes, outtypes, rettype, c, lines, sp) = self._get_common(signum, sig)
body.extend(lines)
callvars = self._get_incallvars(intypes, c)
call = '{}({})'.format(func_name, ', '.join(callvars))
if (c and (rettype == 'double complex')):
call = double_complex_from_npy_cdouble(call)
line = (sp + 'return {}'.format(call))
body.append(line)
sig = '{}->{}'.format(incodes, retcode)
specs.append(sig)
if (len(specs) > 1):
body.append((tab + 'else:'))
(outtype, outcodes) = self.outtypes[0]
last = (len(outcodes) - 1)
if (len(outcodes) == 1):
line = 'return {}'.format(NAN_VALUE[outcodes])
body.append(((2 * tab) + line))
else:
for (n, code) in enumerate(outcodes):
if (n == 0):
adverb = 'if'
elif (n == last):
adverb = 'else'
else:
adverb = 'elif'
cond = self._get_conditional(self.outtypes, code, adverb)
body.append(((2 * tab) + cond))
line = 'return {}'.format(NAN_VALUE[code])
body.append(((3 * tab) + line))
(callvars, head) = ([], [])
for (n, (intype, _)) in enumerate(self.intypes):
callvars.append('{} {}'.format(intype, self.invars[n]))
(outtype, _) = self.outtypes[0]
dec = 'cpdef {} {}({}) nogil'.format(outtype, self.name, ', '.join(callvars))
head.append((dec + ':'))
head.append((tab + '"""{}"""'.format(self.doc)))
src = '\n'.join((head + body))
return (dec, src, specs)
def _generate_from_outargs_and_no_return(self):
tab = (' ' * 4)
all_tmpvars = set()
(specs, body) = ([], [])
for (signum, sig) in enumerate(self.signatures):
(func_name, incodes, outcodes, retcode, intypes, outtypes, rettype, c, lines, sp) = self._get_common(signum, sig)
body.extend(lines)
callvars = self._get_incallvars(intypes, c)
(outcallvars, tmpvars, casts) = self._get_outcallvars(outtypes, c)
callvars.extend(outcallvars)
all_tmpvars.update(tmpvars)
call = '{}({})'.format(func_name, ', '.join(callvars))
body.append((sp + call))
body.extend(map((lambda x: (sp + x)), casts))
if (len(outcodes) == 1):
sig = '{}->{}'.format(incodes, outcodes)
specs.append(sig)
else:
sig = '{}*{}->v'.format(incodes, outcodes)
specs.append(sig)
if (len(specs) > 1):
lines = self._get_nan_decs()
body.extend(lines)
if (len(self.outvars) == 1):
line = 'return {}[0]'.format(self.outvars[0])
body.append((tab + line))
(callvars, head) = ([], [])
for (invar, (intype, _)) in zip(self.invars, self.intypes):
callvars.append('{} {}'.format(intype, invar))
if (len(self.outvars) > 1):
for (outvar, (outtype, _)) in zip(self.outvars, self.outtypes):
callvars.append('{} *{}'.format(outtype, outvar))
if (len(self.outvars) == 1):
(outtype, _) = self.outtypes[0]
dec = 'cpdef {} {}({}) nogil'.format(outtype, self.name, ', '.join(callvars))
else:
dec = 'cdef void {}({}) nogil'.format(self.name, ', '.join(callvars))
head.append((dec + ':'))
head.append((tab + '"""{}"""'.format(self.doc)))
if (len(self.outvars) == 1):
outvar = self.outvars[0]
(outtype, _) = self.outtypes[0]
line = 'cdef {} {}'.format(outtype, outvar)
head.append((tab + line))
head.extend(self._get_tmp_decs(all_tmpvars))
src = '\n'.join((head + body))
return (dec, src, specs)
def _generate_from_outargs_and_return(self):
tab = (' ' * 4)
all_tmpvars = set()
(specs, body) = ([], [])
for (signum, sig) in enumerate(self.signatures):
(func_name, incodes, outcodes, retcode, intypes, outtypes, rettype, c, lines, sp) = self._get_common(signum, sig)
body.extend(lines)
callvars = self._get_incallvars(intypes, c)
(outcallvars, tmpvars, casts) = self._get_outcallvars(outtypes, c)
callvars.extend(outcallvars)
all_tmpvars.update(tmpvars)
call = '{}({})'.format(func_name, ', '.join(callvars))
if (c and (rettype == 'double complex')):
call = double_complex_from_npy_cdouble(call)
call = '{}[0] = {}'.format(self.outvars[0], call)
body.append((sp + call))
body.extend(map((lambda x: (sp + x)), casts))
sig = '{}*{}->v'.format(incodes, (outcodes + retcode))
specs.append(sig)
if (len(specs) > 1):
lines = self._get_nan_decs()
body.extend(lines)
(callvars, head) = ([], [])
for (invar, (intype, _)) in zip(self.invars, self.intypes):
callvars.append('{} {}'.format(intype, invar))
for (outvar, (outtype, _)) in zip(self.outvars, self.outtypes):
callvars.append('{} *{}'.format(outtype, outvar))
dec = 'cdef void {}({}) nogil'.format(self.name, ', '.join(callvars))
head.append((dec + ':'))
head.append((tab + '"""{}"""'.format(self.doc)))
head.extend(self._get_tmp_decs(all_tmpvars))
src = '\n'.join((head + body))
return (dec, src, specs)
def generate(self):
(_, _, outcodes, retcode, _) = self.signatures[0]
retcode = re.sub('\\*.*', '', retcode)
if (not retcode):
retcode = 'v'
if ((len(outcodes) == 0) and (retcode != 'v')):
(dec, src, specs) = self._generate_from_return_and_no_outargs()
elif ((len(outcodes) > 0) and (retcode == 'v')):
(dec, src, specs) = self._generate_from_outargs_and_no_return()
elif ((len(outcodes) > 0) and (retcode != 'v')):
(dec, src, specs) = self._generate_from_outargs_and_return()
else:
raise ValueError('Invalid signature')
if (len(self.outvars) > 1):
wrap = self._get_python_wrap()
else:
wrap = None
return (dec, src, specs, self.fused_types, wrap) |
def parse_args():
parser = argparse.ArgumentParser(description='Model Ensemble with logits result')
parser.add_argument('--config', type=str, nargs='+', help='ensemble config files path')
parser.add_argument('--checkpoint', type=str, nargs='+', help='ensemble checkpoint files path')
parser.add_argument('--aug-test', action='store_true', help='control ensemble aug-result or single-result (default)')
parser.add_argument('--out', type=str, default='results', help='the dir to save result')
parser.add_argument('--gpus', type=int, nargs='+', default=[0], help='id of gpu to use')
args = parser.parse_args()
assert (len(args.config) == len(args.checkpoint)), f'len(config) must equal len(checkpoint), but len(config) = {len(args.config)} andlen(checkpoint) = {len(args.checkpoint)}'
assert args.out, "ensemble result out-dir can't be None"
return args |
_keyword(color='rgbcolor')
(width=0.5, rgbcolor=(0, 0, 1), legend_label=None, aspect_ratio='automatic')
def bar_chart(datalist, **options):
dl = len(datalist)
if (dl == 3):
datalist = (datalist + [0])
g = Graphics()
g._set_extra_kwds(Graphics._extract_kwds_for_show(options))
ind = list(range(len(datalist)))
g.add_primitive(BarChart(ind, datalist, options=options))
if options['legend_label']:
g.legend(True)
return g |
def add_speech_generation_args(parser):
group = parser.add_argument_group('Speech Generation')
add_common_eval_args(group)
group.add_argument('--eos_prob_threshold', default=0.5, type=float, help='terminate when eos probability exceeds this')
return group |
.parametrize('knn_methods', knn_methods)
def test_mcb_proba(knn_methods):
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = setup_classifiers()
rng = np.random.RandomState(123456)
mcb = MCB(pool_classifiers, random_state=rng, knn_classifier=knn_methods)
mcb.fit(X_dsel, y_dsel)
probas = mcb.predict_proba(X_test)
expected = np.load('deslib/tests/expected_values/mcb_proba_integration.npy')
assert np.allclose(probas, expected) |
def get_barren_layer_plot(var, num_layers, plt):
if isinstance(num_layers, int):
num_layers_ = np.arange(1, (num_layers + 1), 5)
else:
num_layers_ = num_layers
handles = {}
for i in var.keys():
handles[i] = plt.semilogy(num_layers_, var[i])
return handles |
def abstract2ids(abstract_words, vocab, article_oovs):
ids = []
unk_id = vocab.word2id(UNKNOWN_TOKEN)
for w in abstract_words:
i = vocab.word2id(w)
if (i == unk_id):
if (w in article_oovs):
vocab_idx = (vocab.size() + article_oovs.index(w))
ids.append(vocab_idx)
else:
ids.append(unk_id)
else:
ids.append(i)
return ids |
def adaptive_max_pool1d(input, output_size, return_indices=False):
ret = torch.adaptive_max_pool1d(input, output_size)
return (ret if return_indices else ret[0]) |
def parse():
parser = argparse.ArgumentParser(description='EfficientFormer Toolbox')
parser.add_argument('--model', metavar='ARCH', default='efficientformerv2_l')
parser.add_argument('--ckpt', default='weights/eformer_l_450.pth', type=str, metavar='PATH', help='path to checkpoint')
parser.add_argument('--profile', action='store_true', default=True, help='profiling GMACs')
parser.add_argument('--resolution', default=224, type=int)
parser.add_argument('--onnx', action='store_true', default=False, help='export onnx')
parser.add_argument('--coreml', action='store_true', default=False, help='export coreml')
args = parser.parse_args()
return args |
class MpiAdamOptimizer(tf.train.AdamOptimizer):
def __init__(self, **kwargs):
self.comm = MPI.COMM_WORLD
tf.train.AdamOptimizer.__init__(self, **kwargs)
def compute_gradients(self, loss, var_list, **kwargs):
grads_and_vars = super().compute_gradients(loss, var_list, **kwargs)
grads_and_vars = [(g, v) for (g, v) in grads_and_vars if (g is not None)]
flat_grad = flat_concat([g for (g, v) in grads_and_vars])
shapes = [v.shape.as_list() for (g, v) in grads_and_vars]
sizes = [int(np.prod(s)) for s in shapes]
num_tasks = self.comm.Get_size()
buf = np.zeros(flat_grad.shape, np.float32)
def _collect_grads(flat_grad):
self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
np.divide(buf, float(num_tasks), out=buf)
return buf
avg_flat_grad = tf.py_func(_collect_grads, [flat_grad], tf.float32)
avg_flat_grad.set_shape(flat_grad.shape)
avg_grads = tf.split(avg_flat_grad, sizes, axis=0)
avg_grads_and_vars = [(tf.reshape(g, v.shape), v) for (g, (_, v)) in zip(avg_grads, grads_and_vars)]
return avg_grads_and_vars
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
opt = super().apply_gradients(grads_and_vars, global_step, name)
with tf.control_dependencies([opt]):
sync = sync_params([v for (g, v) in grads_and_vars])
return tf.group([opt, sync]) |
class Function_sin(GinacFunction):
def __init__(self):
GinacFunction.__init__(self, 'sin', latex_name='\\sin', conversions=dict(maxima='sin', mathematica='Sin', giac='sin', fricas='sin', sympy='sin')) |
class AverageMeter(object):
def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE):
self.name = name
self.fmt = fmt
self.summary_type = summary_type
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
def all_reduce(self):
total = torch.FloatTensor([self.sum, self.count])
dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False)
(self.sum, self.count) = total.tolist()
self.avg = (self.sum / self.count)
def __str__(self):
fmtstr = (((('{name} {val' + self.fmt) + '} ({avg') + self.fmt) + '})')
return fmtstr.format(**self.__dict__)
def summary(self):
fmtstr = ''
if (self.summary_type is Summary.NONE):
fmtstr = ''
elif (self.summary_type is Summary.AVERAGE):
fmtstr = '{name} {avg:.3f}'
elif (self.summary_type is Summary.SUM):
fmtstr = '{name} {sum:.3f}'
elif (self.summary_type is Summary.COUNT):
fmtstr = '{name} {count:.3f}'
else:
raise ValueError(('invalid summary type %r' % self.summary_type))
return fmtstr.format(**self.__dict__) |
def test_getSubscription5():
url = (brokerIp + '/ngsi10/updateContext')
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=json.dumps(data_ngsi10.subdata8), headers=headers)
resp_content = r.content
resInJson = resp_content.decode('utf8').replace("'", '"')
resp = json.loads(resInJson)
url = (brokerIp + '/ngsi10/subscribeContext')
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=json.dumps(data_ngsi10.subdata9), headers=headers)
resp_content = r.content
resInJson = resp_content.decode('utf8').replace("'", '"')
resp = json.loads(resInJson)
resp = resp['subscribeResponse']
sid = resp['subscriptionId']
get_url = (brokerIp + '/ngsi10/subscription/')
url = (get_url + sid)
r = requests.get(url)
resp_content = r.content
resInJson = resp_content.decode('utf8').replace("'", '"')
resp = json.loads(resInJson)
resp = resp['entities']
sid2 = resp[0]['id']
if ('Result4' == sid2):
print('\nValidated')
else:
print('\nNot Validated')
assert (r.status_code == 200) |
def spol(g1, g2):
(a1, a2) = (g1.lc(), g2.lc())
a = a1.lcm(a2)
(b1, b2) = ((a // a1), (a // a2))
(t1, t2) = (g1.lm(), g2.lm())
t = t1.parent().monomial_lcm(t1, t2)
(s1, s2) = ((t // t1), (t // t2))
return (((b1 * s1) * g1) - ((b2 * s2) * g2)) |
def _rendezvous_error(msg):
return ValueError(('Error initializing torch.distributed using ' + msg)) |
class L1RegressionModel(MLPModel, ModelIOKeysMixin):
def __init__(self, input_dim, output_dim, hidden_dims, device, batch_norm=None, dropout=None, activation='relu', sigma=1.0, lam=0.1):
super().__init__(input_dim, output_dim, hidden_dims, batch_norm=batch_norm, dropout=dropout, activation=activation)
self.loss = nn.MSELoss()
self.lam = lam
def forward(self, feed_dict):
pred = super().forward(self._get_input(feed_dict))
if self.training:
loss = self.loss(pred, self._get_label(feed_dict))
reg = torch.mean(torch.abs(self.mlp[0][0].weight))
total_loss = (loss + (self.lam * reg))
return (total_loss, dict(), dict())
else:
return self._compose_output(pred) |
class PermuteCallMethod(common.BaseSubstitution):
def __init__(self):
nodes = NodeOperationMatcher(permute)
super().__init__(matcher_instance=nodes)
def substitute(self, graph: Graph, node: BaseNode) -> Graph:
if (node.op_call_args and (not isinstance(node.op_call_args[0], tuple))):
node.op_call_args = [node.op_call_args]
return graph |
def compute_value_loss(agent, batch, network_params):
batch['masks'] = (1.0 - batch['rewards'])
batch['rewards'] = (batch['rewards'] - 1.0)
(next_v1, next_v2) = agent.network(batch['next_observations'], batch['goals'], method='target_value')
next_v = jnp.minimum(next_v1, next_v2)
q = (batch['rewards'] + ((agent.config['discount'] * batch['masks']) * next_v))
(v1_t, v2_t) = agent.network(batch['observations'], batch['goals'], method='target_value')
v_t = ((v1_t + v2_t) / 2)
adv = (q - v_t)
q1 = (batch['rewards'] + ((agent.config['discount'] * batch['masks']) * next_v1))
q2 = (batch['rewards'] + ((agent.config['discount'] * batch['masks']) * next_v2))
(v1, v2) = agent.network(batch['observations'], batch['goals'], method='value', params=network_params)
value_loss1 = expectile_loss(adv, (q1 - v1), agent.config['pretrain_expectile']).mean()
value_loss2 = expectile_loss(adv, (q2 - v2), agent.config['pretrain_expectile']).mean()
value_loss = (value_loss1 + value_loss2)
advantage = adv
return (value_loss, {'value_loss': value_loss, 'v max': v1.max(), 'v min': v1.min(), 'v mean': v1.mean(), 'abs adv mean': jnp.abs(advantage).mean(), 'adv mean': advantage.mean(), 'adv max': advantage.max(), 'adv min': advantage.min(), 'accept prob': (advantage >= 0).mean()}) |
def collect_trainable_weights(layer):
trainable = getattr(layer, 'trainable', True)
if (not trainable):
return []
weights = []
if (layer.__class__.__name__ == 'Sequential'):
for sublayer in layer.flattened_layers:
weights += collect_trainable_weights(sublayer)
elif (layer.__class__.__name__ == 'Model'):
for sublayer in layer.layers:
weights += collect_trainable_weights(sublayer)
elif (layer.__class__.__name__ == 'Graph'):
for sublayer in layer._graph_nodes.values():
weights += collect_trainable_weights(sublayer)
else:
weights += layer.trainable_weights
weights = list(set(weights))
weights.sort(key=(lambda x: x.name))
return weights |
def generate_sequences(l):
if (len(l) == 0):
return []
subsequent = generate_sequences(l[1:])
answer = list()
if (len(subsequent) > 0):
answer += subsequent
for elem in l[0]:
answer.append([elem])
for elem2 in subsequent:
answer.append(([elem] + elem2))
return answer |
class PeriodicPointIterator():
def __init__(self, m, cycle):
self._m = m
self._image = m.image
self._cycle = tuple(cycle)
self._cache = [lazy_list(self.get_iterator(i)) for i in range(len(cycle))]
def __reduce__(self):
return (PeriodicPointIterator, (self._m, self._cycle))
_method
def get_iterator(self, i):
j = ((i - 1) % len(self._cycle))
for a in self._image(self._cycle[j]):
(yield a)
u = iter(self._cache[j])
next(u)
while True:
for a in self._image(next(u)):
(yield a) |
def _get_entity_placeholders(dataset, language):
return {e: _get_entity_name_placeholder(e, language) for e in dataset[ENTITIES]} |
class Agent(AbstractPlayer):
def __init__(self):
AbstractPlayer.__init__(self)
self.lastSsoType = LEARNING_SSO_TYPE.JSON
'\n * Public method to be called at the start of every level of a game.\n * Perform any level-entry initialization here.\n * sso Phase Observation of the current game.\n * elapsedTimer Timer (1s)\n '
def init(self, sso, elapsedTimer):
pass
'\n * Method used to determine the next move to be performed by the agent.\n * This method can be used to identify the current state of the game and all\n * relevant details, then to choose the desired course of action.\n *\n * sso Observation of the current state of the game to be used in deciding\n * the next action to be taken by the agent.\n * elapsedTimer Timer (40ms)\n * The action to be performed by the agent.\n '
def act(self, sso, elapsedTimer):
if (sso.gameTick == 1000):
return 'ACTION_ESCAPE'
else:
index = random.randint(0, (len(sso.availableActions) - 1))
return sso.availableActions[index]
'\n * Method used to perform actions in case of a game end.\n * This is the last thing called when a level is played (the game is already in a terminal state).\n * Use this for actions such as teardown or process data.\n *\n * sso The current state observation of the game.\n * elapsedTimer Timer (up to CompetitionParameters.TOTAL_LEARNING_TIME\n * or CompetitionParameters.EXTRA_LEARNING_TIME if current global time is beyond TOTAL_LEARNING_TIME)\n * The next level of the current game to be played.\n * The level is bound in the range of [0,2]. If the input is any different, then the level\n * chosen will be ignored, and the game will play a random one instead.\n '
def result(self, sso, elapsedTimer):
return random.randint(0, 2) |
def _compress_array(lat_lng_dtime_other, spatial_radius):
if (len(lat_lng_dtime_other) < 2):
return lat_lng_dtime_other
measure_distance = gislib.getDistance
compressed_traj = []
(lat_0, lon_0) = lat_lng_dtime_other[0][:2]
(sum_lat, sum_lon) = ([lat_0], [lon_0])
t_0 = lat_lng_dtime_other[0][2]
i_0 = 0
count = 1
lendata = (len(lat_lng_dtime_other) - 1)
for i in range(lendata):
(lat, lon, t) = lat_lng_dtime_other[(i + 1)][:3]
Dr = measure_distance([lat_0, lon_0], [lat, lon])
if (Dr > spatial_radius):
extra_cols = list(lat_lng_dtime_other[i_0][3:])
compressed_traj += [([np.median(sum_lat), np.median(sum_lon), t_0] + extra_cols)]
t_0 = t
count = 0
(lat_0, lon_0) = (lat, lon)
i_0 = (i + 1)
(sum_lat, sum_lon) = ([], [])
count += 1
sum_lat += [lat]
sum_lon += [lon]
if (i == (lendata - 1)):
extra_cols = list(lat_lng_dtime_other[i_0][3:])
compressed_traj += [([np.median(sum_lat), np.median(sum_lon), t_0] + extra_cols)]
return compressed_traj |
def get_current_tensors():
for obj in gc.get_objects():
try:
if (torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data))):
print(type(obj), obj.size())
except Exception:
pass |
class TransparentDataParallel(nn.DataParallel):
def set_best(self, *args, **kwargs):
return self.module.set_best(*args, **kwargs)
def recover_best(self, *args, **kwargs):
return self.module.recover_best(*args, **kwargs)
def save(self, *args, **kwargs):
return self.module.save(*args, **kwargs)
def train_batch(self, *args, **kwargs):
return self.module.train_batch(*args, **kwargs)
def eval_batch(self, *args, **kwargs):
return self.module.eval_batch(*args, **kwargs) |
class base_peripheral(nn.Module):
def __init__(self):
super(base_peripheral, self).__init__() |
class Phrase(object):
def __init__(self, phrase_idx, start_idx, end_idx, size, label, text, parent_idx, align_idx):
super(Phrase, self).__init__()
self.start_idx = start_idx
self.end_idx = end_idx
self.label = label
self.size = size
self.text = text
self.parent_idx = parent_idx
self.childen = []
self.phrase_idx = phrase_idx
self.align_idx = align_idx |
def main(args):
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('destination_dir', help='destination directory')
parser.add_argument('header_files', nargs='+', help='One or more header files to parse')
pargs = parser.parse_args(args)
if (not mk_genfile_common.check_dir_exists(pargs.destination_dir)):
return 1
if (not mk_genfile_common.check_files_exist(pargs.header_files)):
return 1
h_files_full_path = []
for header_file in pargs.header_files:
h_files_full_path.append(os.path.abspath(header_file))
output = mk_genfile_common.mk_gparams_register_modules_internal(h_files_full_path, pargs.destination_dir)
logging.info('Generated "{}"'.format(output))
return 0 |
class NCSymDualBases(Category_realization_of_parent):
def super_categories(self):
return [NCSymOrNCSymDualBases(self.base())]
def _repr_(self):
return 'Category of bases of dual symmetric functions in non-commuting variables over the {}'.format(self.base().base_ring()) |
def format_invalid_values_string(invalid_values, num_values):
if isinstance(invalid_values, pd.DataFrame):
if (len(invalid_values) > num_values):
return f'''{invalid_values.head(num_values)}
+{(len(invalid_values) - num_values)} more'''
if isinstance(invalid_values, set):
invalid_values = sorted(invalid_values, key=(lambda x: str(x)))
if (len(invalid_values) > num_values):
extra_missing_values = [f'+ {(len(invalid_values) - num_values)} more']
return f'{(invalid_values[:num_values] + extra_missing_values)}'
return f'{invalid_values}' |
class EnumAction(Action):
def __init__(self, **kwargs):
_enum = kwargs.pop('type', None)
if (_enum is None):
raise ValueError('type must be assigned an Enum when using EnumAction')
if (not issubclass(_enum, enum.Enum)):
raise TypeError('type must be an Enum when using EnumAction')
kwargs.setdefault('choices', tuple((e.value for e in _enum)))
super(EnumAction, self).__init__(**kwargs)
self._enum = _enum
def __call__(self, parser, namespace, values, option_string=None):
enum = self._enum(values)
setattr(namespace, self.dest, enum) |
class JaxBaseModuleClass(TunableMixin, flax.linen.Module):
def configure(self) -> None:
self.training = None
self.train_state = None
self.seed = (settings.seed if (settings.seed is not None) else 0)
self.seed_rng = device_selecting_PRNGKey()(self.seed)
self._set_rngs()
def setup(self):
def required_rngs(self):
return ('params',)
def __call__(self, tensors: dict[(str, jnp.ndarray)], get_inference_input_kwargs: (dict | None)=None, get_generative_input_kwargs: (dict | None)=None, inference_kwargs: (dict | None)=None, generative_kwargs: (dict | None)=None, loss_kwargs: (dict | None)=None, compute_loss=True) -> (tuple[(jnp.ndarray, jnp.ndarray)] | tuple[(jnp.ndarray, jnp.ndarray, LossOutput)]):
return _generic_forward(self, tensors, inference_kwargs, generative_kwargs, loss_kwargs, get_inference_input_kwargs, get_generative_input_kwargs, compute_loss)
def _get_inference_input(self, tensors: dict[(str, jnp.ndarray)], **kwargs):
def _get_generative_input(self, tensors: dict[(str, jnp.ndarray)], inference_outputs: dict[(str, jnp.ndarray)], **kwargs):
def inference(self, *args, **kwargs) -> dict[(str, (jnp.ndarray | Distribution))]:
def generative(self, *args, **kwargs) -> dict[(str, (jnp.ndarray | Distribution))]:
def loss(self, *args, **kwargs) -> LossOutput:
def device(self):
return self.seed_rng.device()
def train(self):
self.training = True
def eval(self):
self.training = False
def rngs(self) -> dict[(str, jnp.ndarray)]:
return self._split_rngs()
def _set_rngs(self):
required_rngs = self.required_rngs
rng_keys = random.split(self.seed_rng, num=(len(required_rngs) + 1))
(self.seed_rng, module_rngs) = (rng_keys[0], rng_keys[1:])
self._rngs = {k: module_rngs[i] for (i, k) in enumerate(required_rngs)}
def _split_rngs(self):
new_rngs = {}
ret_rngs = {}
for (k, v) in self._rngs.items():
(new_rngs[k], ret_rngs[k]) = random.split(v)
self._rngs = new_rngs
return ret_rngs
def params(self) -> dict[(str, Any)]:
self._check_train_state_is_not_none()
return self.train_state.params
def state(self) -> dict[(str, Any)]:
self._check_train_state_is_not_none()
return self.train_state.state
def state_dict(self) -> dict[(str, Any)]:
self._check_train_state_is_not_none()
return flax.serialization.to_state_dict(self.train_state)
def load_state_dict(self, state_dict: dict[(str, Any)]):
if (self.train_state is None):
raise RuntimeError('Train state is not set. Train for one iteration prior to loading state dict.')
self.train_state = flax.serialization.from_state_dict(self.train_state, state_dict)
def to(self, device: Device):
if (device is not self.device):
if (self.train_state is not None):
self.train_state = jax.tree_util.tree_map((lambda x: jax.device_put(x, device)), self.train_state)
self.seed_rng = jax.device_put(self.seed_rng, device)
self._rngs = jax.device_put(self._rngs, device)
def _check_train_state_is_not_none(self):
if (self.train_state is None):
raise RuntimeError('Train state is not set. Module has not been trained.')
def as_bound(self) -> JaxBaseModuleClass:
return self.bind({'params': self.params, **self.state}, rngs=self.rngs)
def get_jit_inference_fn(self, get_inference_input_kwargs: (dict[(str, Any)] | None)=None, inference_kwargs: (dict[(str, Any)] | None)=None) -> Callable[([dict[(str, jnp.ndarray)], dict[(str, jnp.ndarray)]], dict[(str, jnp.ndarray)])]:
vars_in = {'params': self.params, **self.state}
get_inference_input_kwargs = _get_dict_if_none(get_inference_input_kwargs)
inference_kwargs = _get_dict_if_none(inference_kwargs)
def _run_inference(rngs, array_dict):
module = self.clone()
inference_input = module._get_inference_input(array_dict)
out = module.apply(vars_in, rngs=rngs, method=module.inference, **inference_input, **inference_kwargs)
return out
return _run_inference
def on_load(model):
old_history = model.history_.copy()
model.train(max_steps=1)
model.history_ = old_history
def as_numpy_array(x: jnp.ndarray):
return np.array(jax.device_get(x)) |
class LabelSpacePartitioningClassifier(BinaryRelevance):
def __init__(self, classifier=None, clusterer=None, require_dense=None):
super(LabelSpacePartitioningClassifier, self).__init__(classifier, require_dense)
self.clusterer = clusterer
self.copyable_attrs = ['clusterer', 'classifier', 'require_dense']
def predict(self, X):
X = self._ensure_input_format(X, sparse_format='csr', enforce_sparse=True)
result = sparse.lil_matrix((X.shape[0], self._label_count), dtype=int)
for model in range(self.model_count_):
predictions = self._ensure_output_format(self.classifiers_[model].predict(X), sparse_format=None, enforce_sparse=True).nonzero()
for (row, column) in zip(predictions[0], predictions[1]):
result[(row, self.partition_[model][column])] = 1
return result
def _generate_partition(self, X, y):
self.partition_ = self.clusterer.fit_predict(X, y)
self.model_count_ = len(self.partition_)
self._label_count = y.shape[1]
return self |
def get_input_fn(vocab, data_config, data_files, batch_size, num_epochs, shuffle, shuffle_buffer_multiplier=1, embedding_files=None):
vocab_lookup_ops = vocab.create_vocab_lookup_ops(embedding_files)
return dataset.get_data_iterator(data_files, data_config, vocab_lookup_ops, batch_size, num_epochs, shuffle, shuffle_buffer_multiplier) |
def apply_augmentations(batch, conf):
if ((conf.gauss_augment is not None) or conf.z_rotate):
batch = batch.copy()
if (conf.gauss_augment is not None):
mu = conf.gauss_augment['mu']
sigma = conf.gauss_augment['sigma']
batch += np.random.normal(mu, sigma, batch.shape)
if conf.z_rotate:
r_rotation = rand_rotation_matrix()
r_rotation[(0, 2)] = 0
r_rotation[(2, 0)] = 0
r_rotation[(1, 2)] = 0
r_rotation[(2, 1)] = 0
r_rotation[(2, 2)] = 1
batch = batch.dot(r_rotation)
return batch |
class RandomNetworkDensity(mrl.Module):
def __init__(self, item, optimize_every=1, batch_size=256, layers=(256, 256)):
super().__init__('{}_rnd'.format(item), required_agent_modules=['replay_buffer'], locals=locals())
self.step = 0
self.item = item
self.layers = layers
self.optimize_every = optimize_every
self.batch_size = batch_size
(self.tgt_net, self.prd_net, self.optimizer) = (None, None, None)
self.lazy_load = None
def _setup(self):
assert isinstance(self.replay_buffer, OnlineHERBuffer)
def _init_from_sample(self, x):
input_size = x.shape[(- 1)]
self.tgt_net = MLP(input_size, output_size=self.layers[(- 1)], hidden_sizes=self.layers[:(- 1)])
self.prd_net = MLP(input_size, output_size=self.layers[(- 1)], hidden_sizes=self.layers[:(- 1)])
if self.config.get('device'):
self.tgt_net = self.tgt_net.to(self.config.device)
self.prd_net = self.prd_net.to(self.config.device)
self.optimizer = torch.optim.SGD(self.prd_net.parameters(), lr=0.1, weight_decay=1e-05)
def evaluate_log_density(self, samples):
assert (self.tgt_net is not None), 'ENSURE READY BEFORE EVALUATING LOG DENSITY'
samples = self.torch(samples)
tgt = self.tgt_net(samples)
prd = self.prd_net(samples)
return self.numpy((- torch.mean(((prd - tgt) ** 2), dim=(- 1), keepdim=True)))
def ready(self):
return (self.tgt_net is not None)
def _optimize(self, force=False):
buffer = self.replay_buffer.buffer.BUFF[('buffer_' + self.item)]
self.step += 1
if (force or (((self.step % self.optimize_every) == 0) and len(buffer))):
sample_idxs = np.random.randint(len(buffer), size=self.batch_size)
samples = buffer.get_batch(sample_idxs)
if (self.tgt_net is None):
self._init_from_sample(samples)
if (self.lazy_load is not None):
self.load(self.lazy_load)
self.lazy_load = None
samples = self.torch(samples)
tgt = self.tgt_net(samples)
prd = self.prd_net(samples)
loss = F.mse_loss(tgt, prd)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def save(self, save_folder: str):
path = os.path.join(save_folder, (self.module_name + '.pt'))
if (self.tgt_net is not None):
torch.save({'tgt_state_dict': self.tgt_net.state_dict(), 'prd_state_dict': self.prd_net.state_dict(), 'opt_state_dict': self.optimizer.state_dict()}, path)
def load(self, save_folder: str):
path = os.path.join(save_folder, (self.module_name + '.pt'))
if ((self.tgt_net is None) and os.path.exists(path)):
self.lazy_load = save_folder
else:
checkpoint = torch.load(path)
self.tgt_net.load_state_dict(checkpoint['tgt_state_dict'])
self.prd_net.load_state_dict(checkpoint['prd_state_dict'])
self.optimizer.load_state_dict(checkpoint['opt_state_dict']) |
class GroupOps(object):
def identity():
_res = ([0.0] * 5)
_res[0] = 0
_res[1] = 0
_res[2] = 0
_res[3] = 0
_res[4] = 0
return sym.ATANCameraCal.from_storage(_res)
def inverse(a):
_a = a.data
_res = ([0.0] * 5)
_res[0] = (- _a[0])
_res[1] = (- _a[1])
_res[2] = (- _a[2])
_res[3] = (- _a[3])
_res[4] = (- _a[4])
return sym.ATANCameraCal.from_storage(_res)
def compose(a, b):
_a = a.data
_b = b.data
_res = ([0.0] * 5)
_res[0] = (_a[0] + _b[0])
_res[1] = (_a[1] + _b[1])
_res[2] = (_a[2] + _b[2])
_res[3] = (_a[3] + _b[3])
_res[4] = (_a[4] + _b[4])
return sym.ATANCameraCal.from_storage(_res)
def between(a, b):
_a = a.data
_b = b.data
_res = ([0.0] * 5)
_res[0] = ((- _a[0]) + _b[0])
_res[1] = ((- _a[1]) + _b[1])
_res[2] = ((- _a[2]) + _b[2])
_res[3] = ((- _a[3]) + _b[3])
_res[4] = ((- _a[4]) + _b[4])
return sym.ATANCameraCal.from_storage(_res)
def inverse_with_jacobian(a):
_a = a.data
_res = ([0.0] * 5)
_res[0] = (- _a[0])
_res[1] = (- _a[1])
_res[2] = (- _a[2])
_res[3] = (- _a[3])
_res[4] = (- _a[4])
_res_D_a = numpy.zeros((5, 5))
_res_D_a[(0, 0)] = (- 1)
_res_D_a[(1, 0)] = 0
_res_D_a[(2, 0)] = 0
_res_D_a[(3, 0)] = 0
_res_D_a[(4, 0)] = 0
_res_D_a[(0, 1)] = 0
_res_D_a[(1, 1)] = (- 1)
_res_D_a[(2, 1)] = 0
_res_D_a[(3, 1)] = 0
_res_D_a[(4, 1)] = 0
_res_D_a[(0, 2)] = 0
_res_D_a[(1, 2)] = 0
_res_D_a[(2, 2)] = (- 1)
_res_D_a[(3, 2)] = 0
_res_D_a[(4, 2)] = 0
_res_D_a[(0, 3)] = 0
_res_D_a[(1, 3)] = 0
_res_D_a[(2, 3)] = 0
_res_D_a[(3, 3)] = (- 1)
_res_D_a[(4, 3)] = 0
_res_D_a[(0, 4)] = 0
_res_D_a[(1, 4)] = 0
_res_D_a[(2, 4)] = 0
_res_D_a[(3, 4)] = 0
_res_D_a[(4, 4)] = (- 1)
return (sym.ATANCameraCal.from_storage(_res), _res_D_a)
def compose_with_jacobians(a, b):
_a = a.data
_b = b.data
_res = ([0.0] * 5)
_res[0] = (_a[0] + _b[0])
_res[1] = (_a[1] + _b[1])
_res[2] = (_a[2] + _b[2])
_res[3] = (_a[3] + _b[3])
_res[4] = (_a[4] + _b[4])
_res_D_a = numpy.zeros((5, 5))
_res_D_a[(0, 0)] = 1
_res_D_a[(1, 0)] = 0
_res_D_a[(2, 0)] = 0
_res_D_a[(3, 0)] = 0
_res_D_a[(4, 0)] = 0
_res_D_a[(0, 1)] = 0
_res_D_a[(1, 1)] = 1
_res_D_a[(2, 1)] = 0
_res_D_a[(3, 1)] = 0
_res_D_a[(4, 1)] = 0
_res_D_a[(0, 2)] = 0
_res_D_a[(1, 2)] = 0
_res_D_a[(2, 2)] = 1
_res_D_a[(3, 2)] = 0
_res_D_a[(4, 2)] = 0
_res_D_a[(0, 3)] = 0
_res_D_a[(1, 3)] = 0
_res_D_a[(2, 3)] = 0
_res_D_a[(3, 3)] = 1
_res_D_a[(4, 3)] = 0
_res_D_a[(0, 4)] = 0
_res_D_a[(1, 4)] = 0
_res_D_a[(2, 4)] = 0
_res_D_a[(3, 4)] = 0
_res_D_a[(4, 4)] = 1
_res_D_b = numpy.zeros((5, 5))
_res_D_b[(0, 0)] = 1
_res_D_b[(1, 0)] = 0
_res_D_b[(2, 0)] = 0
_res_D_b[(3, 0)] = 0
_res_D_b[(4, 0)] = 0
_res_D_b[(0, 1)] = 0
_res_D_b[(1, 1)] = 1
_res_D_b[(2, 1)] = 0
_res_D_b[(3, 1)] = 0
_res_D_b[(4, 1)] = 0
_res_D_b[(0, 2)] = 0
_res_D_b[(1, 2)] = 0
_res_D_b[(2, 2)] = 1
_res_D_b[(3, 2)] = 0
_res_D_b[(4, 2)] = 0
_res_D_b[(0, 3)] = 0
_res_D_b[(1, 3)] = 0
_res_D_b[(2, 3)] = 0
_res_D_b[(3, 3)] = 1
_res_D_b[(4, 3)] = 0
_res_D_b[(0, 4)] = 0
_res_D_b[(1, 4)] = 0
_res_D_b[(2, 4)] = 0
_res_D_b[(3, 4)] = 0
_res_D_b[(4, 4)] = 1
return (sym.ATANCameraCal.from_storage(_res), _res_D_a, _res_D_b)
def between_with_jacobians(a, b):
_a = a.data
_b = b.data
_res = ([0.0] * 5)
_res[0] = ((- _a[0]) + _b[0])
_res[1] = ((- _a[1]) + _b[1])
_res[2] = ((- _a[2]) + _b[2])
_res[3] = ((- _a[3]) + _b[3])
_res[4] = ((- _a[4]) + _b[4])
_res_D_a = numpy.zeros((5, 5))
_res_D_a[(0, 0)] = (- 1)
_res_D_a[(1, 0)] = 0
_res_D_a[(2, 0)] = 0
_res_D_a[(3, 0)] = 0
_res_D_a[(4, 0)] = 0
_res_D_a[(0, 1)] = 0
_res_D_a[(1, 1)] = (- 1)
_res_D_a[(2, 1)] = 0
_res_D_a[(3, 1)] = 0
_res_D_a[(4, 1)] = 0
_res_D_a[(0, 2)] = 0
_res_D_a[(1, 2)] = 0
_res_D_a[(2, 2)] = (- 1)
_res_D_a[(3, 2)] = 0
_res_D_a[(4, 2)] = 0
_res_D_a[(0, 3)] = 0
_res_D_a[(1, 3)] = 0
_res_D_a[(2, 3)] = 0
_res_D_a[(3, 3)] = (- 1)
_res_D_a[(4, 3)] = 0
_res_D_a[(0, 4)] = 0
_res_D_a[(1, 4)] = 0
_res_D_a[(2, 4)] = 0
_res_D_a[(3, 4)] = 0
_res_D_a[(4, 4)] = (- 1)
_res_D_b = numpy.zeros((5, 5))
_res_D_b[(0, 0)] = 1
_res_D_b[(1, 0)] = 0
_res_D_b[(2, 0)] = 0
_res_D_b[(3, 0)] = 0
_res_D_b[(4, 0)] = 0
_res_D_b[(0, 1)] = 0
_res_D_b[(1, 1)] = 1
_res_D_b[(2, 1)] = 0
_res_D_b[(3, 1)] = 0
_res_D_b[(4, 1)] = 0
_res_D_b[(0, 2)] = 0
_res_D_b[(1, 2)] = 0
_res_D_b[(2, 2)] = 1
_res_D_b[(3, 2)] = 0
_res_D_b[(4, 2)] = 0
_res_D_b[(0, 3)] = 0
_res_D_b[(1, 3)] = 0
_res_D_b[(2, 3)] = 0
_res_D_b[(3, 3)] = 1
_res_D_b[(4, 3)] = 0
_res_D_b[(0, 4)] = 0
_res_D_b[(1, 4)] = 0
_res_D_b[(2, 4)] = 0
_res_D_b[(3, 4)] = 0
_res_D_b[(4, 4)] = 1
return (sym.ATANCameraCal.from_storage(_res), _res_D_a, _res_D_b) |
def secs_to_str(secs):
s = str(datetime.timedelta(seconds=int(round(secs))))
s = re.sub('^0:', '', s)
s = re.sub('^0', '', s)
s = re.sub('^0:', '', s)
s = re.sub('^0', '', s)
return s |
class Plus():
calculations = 0
def plus_three(self, number):
self.calculations += 1
return (number + 3)
def plus_four(self, number):
self.calculations += 1
return (number + 4) |
class ROIAlignRotated(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
super(ROIAlignRotated, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input, rois):
assert ((rois.dim() == 2) and (rois.size(1) == 6))
orig_dtype = input.dtype
if (orig_dtype == torch.float16):
input = input.float()
rois = rois.float()
return roi_align_rotated(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio).to(dtype=orig_dtype)
def __repr__(self):
tmpstr = (self.__class__.__name__ + '(')
tmpstr += ('output_size=' + str(self.output_size))
tmpstr += (', spatial_scale=' + str(self.spatial_scale))
tmpstr += (', sampling_ratio=' + str(self.sampling_ratio))
tmpstr += ')'
return tmpstr |
class GNNClassifier(BaseGNN):
def __init__(self, dims: Optional[Union[(int, list)]]=None, layer_types: Union[(str, list)]='Conv', activations: Union[(str, list)]='ReLu', use_bias: Union[(bool, list)]=True, normalizations: Union[(str, list)]='both', self_embeddings: Union[(bool, list)]=True, sample_sizes: Union[(int, list)]=25, loss: Union[(BaseLoss, str)]='CrossEntropy', layers: Optional[list]=None, optimizer: Union[(BaseOptimizer, str)]='Adam', learning_rate: float=0.01, early_stopping: bool=True, patience: int=10, verbose: bool=False):
super(GNNClassifier, self).__init__(loss, optimizer, learning_rate, verbose)
if (layers is not None):
layers = [get_layer(layer) for layer in layers]
else:
layers = get_layers(dims, layer_types, activations, use_bias, normalizations, self_embeddings, sample_sizes, loss)
self.loss = check_loss(layers[(- 1)])
self.layers = layers
self.early_stopping = early_stopping
self.patience = patience
self.history_ = defaultdict(list)
def forward(self, adjacency: Union[(list, sparse.csr_matrix)], features: Union[(sparse.csr_matrix, np.ndarray)]) -> np.ndarray:
h = features.copy()
for (i, layer) in enumerate(self.layers):
if isinstance(adjacency, list):
h = layer(adjacency[i], h)
else:
h = layer(adjacency, h)
return h
def _compute_predictions(output: np.ndarray) -> np.ndarray:
if (output.shape[1] == 1):
labels = (output.ravel() > 0.5).astype(int)
else:
labels = output.argmax(axis=1)
return labels
def fit(self, adjacency: Union[(sparse.csr_matrix, np.ndarray)], features: Union[(sparse.csr_matrix, np.ndarray)], labels: np.ndarray, n_epochs: int=100, validation: float=0, reinit: bool=False, random_state: Optional[int]=None, history: bool=False) -> 'GNNClassifier':
if reinit:
for layer in self.layers:
layer.weights_initialized = False
if (random_state is not None):
np.random.seed(random_state)
check_format(adjacency)
check_format(features)
labels = get_values(adjacency.shape, labels)
labels = labels.astype(int)
if (labels < 0).all():
raise ValueError('At least one node must have a non-negative label.')
check_output(self.layers[(- 1)].out_channels, labels)
self.train_mask = (labels >= 0)
if (0 < validation < 1):
mask = (np.random.random(size=len(labels)) < validation)
self.val_mask = (self.train_mask & mask)
self.train_mask &= (~ mask)
early_stopping = check_early_stopping(self.early_stopping, self.val_mask, self.patience)
adjacencies = self._sample_nodes(adjacency)
best_val_accuracy = 0
count = 0
for epoch in range(n_epochs):
output = self.forward(adjacencies, features)
labels_pred = self._compute_predictions(output)
loss_value = self.loss.loss(output[self.train_mask], labels[self.train_mask])
train_accuracy = get_accuracy_score(labels[self.train_mask], labels_pred[self.train_mask])
if ((self.val_mask is not None) and any(self.val_mask)):
val_accuracy = get_accuracy_score(labels[self.val_mask], labels_pred[self.val_mask])
else:
val_accuracy = None
self.backward(features, labels, self.train_mask)
self.optimizer.step(self)
if history:
self.history_['embedding'].append(self.layers[(- 1)].embedding)
self.history_['loss'].append(loss_value)
self.history_['train_accuracy'].append(train_accuracy)
if (val_accuracy is not None):
self.history_['val_accuracy'].append(val_accuracy)
if ((n_epochs > 10) and ((epoch % int((n_epochs / 10))) == 0)):
if (val_accuracy is not None):
self.print_log(f'In epoch {epoch:>3}, loss: {loss_value:.3f}, train accuracy: {train_accuracy:.3f}, val accuracy: {val_accuracy:.3f}')
else:
self.print_log(f'In epoch {epoch:>3}, loss: {loss_value:.3f}, train accuracy: {train_accuracy:.3f}')
elif (n_epochs <= 10):
if (val_accuracy is not None):
self.print_log(f'In epoch {epoch:>3}, loss: {loss_value:.3f}, train accuracy: {train_accuracy:.3f}, val accuracy: {val_accuracy:.3f}')
else:
self.print_log(f'In epoch {epoch:>3}, loss: {loss_value:.3f}, train accuracy: {train_accuracy:.3f}')
if early_stopping:
if (val_accuracy > best_val_accuracy):
count = 0
best_val_accuracy = val_accuracy
else:
count += 1
if (count >= self.patience):
self.print_log('Early stopping.')
break
output = self.forward(adjacencies, features)
labels_pred = self._compute_predictions(output)
self.embedding_ = self.layers[(- 1)].embedding
self.output_ = self.layers[(- 1)].output
self.labels_ = labels_pred
return self
def _sample_nodes(self, adjacency: Union[(sparse.csr_matrix, np.ndarray)]) -> list:
adjacencies = []
for layer in self.layers:
if (layer.layer_type == 'sage'):
sampler = UniformNeighborSampler(sample_size=layer.sample_size)
adjacencies.append(sampler(adjacency))
else:
adjacencies.append(adjacency)
return adjacencies
def predict(self, adjacency_vectors: Union[(sparse.csr_matrix, np.ndarray)]=None, feature_vectors: Union[(sparse.csr_matrix, np.ndarray)]=None) -> np.ndarray:
self._check_fitted()
if ((adjacency_vectors is None) and (feature_vectors is None)):
return self.labels_
elif ((adjacency_vectors is not None) and (feature_vectors is None)):
raise ValueError('Missing value: feature matrix is missing.')
elif (adjacency_vectors is None):
adjacency_vectors = sparse.identity(feature_vectors.shape[0], format='csr')
check_square(adjacency_vectors)
check_nonnegative(adjacency_vectors)
feature_vectors = check_format(feature_vectors)
(n_row, n_col) = adjacency_vectors.shape
(feat_row, feat_col) = feature_vectors.shape
if (n_col != feat_row):
raise ValueError(f'Dimension mismatch: dim0={n_col} != dim1={feat_row}.')
elif (feat_col != self.layers[0].weight.shape[0]):
raise ValueError(f'Dimension mismatch: current number of features is {feat_col} whereas GNN has been trained with {self.layers[0].weight.shape[0]} features.')
h = self.forward(adjacency_vectors, feature_vectors)
labels = self._compute_predictions(h)
return labels |
def conv_bn(inp, oup, stride, padding=1):
return nn.Sequential(nn.Conv2d(inp, oup, 3, stride, padding, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(inplace=True)) |
class DukeMTMCreID(BaseImageDataset):
dataset_dir = 'DukeMTMC-reID'
def __init__(self, root='data', verbose=True, **kwargs):
super(DukeMTMCreID, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.dataset_url = '
self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'query')
self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test')
self._download_data()
self._check_before_run()
train = self._process_dir(self.train_dir, relabel=True)
query = self._process_dir(self.query_dir, relabel=False)
gallery = self._process_dir(self.gallery_dir, relabel=False)
if verbose:
print('=> DukeMTMC-reID loaded')
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
(self.num_train_pids, self.num_train_imgs, self.num_train_cams) = self.get_imagedata_info(self.train)
(self.num_query_pids, self.num_query_imgs, self.num_query_cams) = self.get_imagedata_info(self.query)
(self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams) = self.get_imagedata_info(self.gallery)
def _download_data(self):
if osp.exists(self.dataset_dir):
print('This dataset has been downloaded.')
return
print('Creating directory {}'.format(self.dataset_dir))
mkdir_if_missing(self.dataset_dir)
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
print('Downloading DukeMTMC-reID dataset')
urllib.request.urlretrieve(self.dataset_url, fpath)
print('Extracting files')
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(self.dataset_dir)
zip_ref.close()
def _check_before_run(self):
if (not osp.exists(self.dataset_dir)):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if (not osp.exists(self.train_dir)):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if (not osp.exists(self.query_dir)):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if (not osp.exists(self.gallery_dir)):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
pid_container = set()
for img_path in img_paths:
(pid, _) = map(int, pattern.search(img_path).groups())
pid_container.add(pid)
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
assert (1 <= camid <= 8)
camid -= 1
if relabel:
pid = pid2label[pid]
dataset.append((img_path, pid, camid))
return dataset |
class PlusSAINTModule(pl.LightningModule):
def __init__(self):
super(PlusSAINTModule, self).__init__()
self.loss = nn.BCEWithLogitsLoss()
self.encoder_layer = StackedNMultiHeadAttention(n_stacks=Config.NUM_DECODER, n_dims=Config.EMBED_DIMS, n_heads=Config.DEC_HEADS, seq_len=Config.MAX_SEQ, n_multihead=1, dropout=0.0)
self.decoder_layer = StackedNMultiHeadAttention(n_stacks=Config.NUM_ENCODER, n_dims=Config.EMBED_DIMS, n_heads=Config.ENC_HEADS, seq_len=Config.MAX_SEQ, n_multihead=2, dropout=0.0)
self.encoder_embedding = EncoderEmbedding(n_exercises=Config.TOTAL_EXE, n_categories=Config.TOTAL_CAT, n_dims=Config.EMBED_DIMS, seq_len=Config.MAX_SEQ)
self.decoder_embedding = DecoderEmbedding(n_responses=3, n_dims=Config.EMBED_DIMS, seq_len=Config.MAX_SEQ)
self.elapsed_time = nn.Linear(1, Config.EMBED_DIMS)
self.fc = nn.Linear(Config.EMBED_DIMS, 1)
def forward(self, x, y):
enc = self.encoder_embedding(exercises=x['input_ids'], categories=x['input_cat'])
dec = self.decoder_embedding(responses=y)
elapsed_time = x['input_rtime'].unsqueeze((- 1)).float()
ela_time = self.elapsed_time(elapsed_time)
dec = (dec + ela_time)
encoder_output = self.encoder_layer(input_k=enc, input_q=enc, input_v=enc)
decoder_output = self.decoder_layer(input_k=dec, input_q=dec, input_v=dec, encoder_output=encoder_output, break_layer=1)
out = self.fc(decoder_output)
return out.squeeze()
def configure_optimizers(self):
return torch.optim.Adam(self.parameters())
def training_step(self, batch, batch_ids):
(input, labels) = batch
target_mask = (input['input_ids'] != 0)
out = self(input, labels)
loss = self.loss(out.float(), labels.float())
out = torch.masked_select(out, target_mask)
out = torch.sigmoid(out)
labels = torch.masked_select(labels, target_mask)
self.log('train_loss', loss, on_step=True, prog_bar=True)
return {'loss': loss, 'outs': out, 'labels': labels}
def training_epoch_end(self, training_ouput):
out = np.concatenate([i['outs'].cpu().detach().numpy() for i in training_ouput]).reshape((- 1))
labels = np.concatenate([i['labels'].cpu().detach().numpy() for i in training_ouput]).reshape((- 1))
auc = roc_auc_score(labels, out)
self.print('train auc', auc)
self.log('train_auc', auc)
def validation_step(self, batch, batch_ids):
(input, labels) = batch
target_mask = (input['input_ids'] != 0)
out = self(input, labels)
loss = self.loss(out.float(), labels.float())
out = torch.masked_select(out, target_mask)
out = torch.sigmoid(out)
labels = torch.masked_select(labels, target_mask)
self.log('val_loss', loss, on_step=True, prog_bar=True)
output = {'outs': out, 'labels': labels}
return {'val_loss': loss, 'outs': out, 'labels': labels}
def validation_epoch_end(self, validation_ouput):
out = np.concatenate([i['outs'].cpu().detach().numpy() for i in validation_ouput]).reshape((- 1))
labels = np.concatenate([i['labels'].cpu().detach().numpy() for i in validation_ouput]).reshape((- 1))
auc = roc_auc_score(labels, out)
self.print('val auc', auc)
self.log('val_auc', auc) |
class Brightness(object):
def __init__(self, var):
self.var = var
def __call__(self, img):
gs = img.new().resize_as_(img).zero_()
alpha = random.uniform(0, self.var)
return img.lerp(gs, alpha) |
class MultiCore(Node):
def __init__(self, core_id, core_nums, mlir_cmds: List[BaseTpuCmd], indent=0):
self.core_id = core_id
self.core_nums = core_nums
self.mlir_cmds = mlir_cmds
self.indent = indent
self.core_split_cmds = []
self.core_split_rets = []
self.msges: List[Msg] = ([Msg()] * 512)
last_ret = None
tmp_cmds = []
tmp_rets = []
self.not_sys_cmds = collections.defaultdict(list)
in_sys = False
for (cmd_id, mlir_cmd) in enumerate(mlir_cmds):
cmd = mlir_cmd
if isinstance(cmd.reg, (tiu_sys, dma_sys)):
in_sys = True
ret = self.consume_sys(cmd)
if ((last_ret == Status.PRODUCING) and (ret == Status.RECIEVING)):
self.core_split_cmds.append(tmp_cmds)
self.core_split_rets.append(tmp_rets)
tmp_cmds = []
tmp_rets = []
tmp_cmds.append(mlir_cmds[cmd_id])
tmp_rets.append(ret)
last_ret = ret
elif in_sys:
if ((last_ret == Status.RECIEVING) or (last_ret == Status.CONSUMED) or (last_ret == Status.OP)):
tmp_cmds.append(mlir_cmds[cmd_id])
tmp_rets.append(None)
last_ret = Status.OP
if (last_ret == Status.CONSUMED):
in_sys = False
else:
self.not_sys_cmds[len(self.core_split_cmds)].append(mlir_cmds[cmd_id])
if (cmd_id == (len(mlir_cmds) - 1)):
assert (len(tmp_cmds) > 0)
self.core_split_cmds.append(tmp_cmds)
self.core_split_rets.append(tmp_rets)
tmp_cmds = []
tmp_rets = []
self.msgcores = [MsgCore(msgcore_id, len(self.core_split_cmds), core_nums, msgcore_cmds, self.core_split_rets[msgcore_id], self.not_sys_cmds[msgcore_id], indent) for (msgcore_id, msgcore_cmds) in enumerate(self.core_split_cmds)]
def get_cmd_type(cmd: BaseTpuCmd):
if isinstance(cmd.reg, tiu_sys):
if (cmd.reg.tsk_eu_typ == 8):
return SYS_TYPE.SEND
elif (cmd.reg.tsk_eu_typ == 9):
return SYS_TYPE.WAIT
else:
raise ValueError(f'cmd type error: {cmd}')
elif isinstance(cmd.reg, dma_sys):
if (cmd.reg.cmd_special_function == 3):
return SYS_TYPE.SEND
elif (cmd.reg.cmd_special_function == 4):
return SYS_TYPE.WAIT
else:
raise ValueError(f'cmd type error: {cmd}')
else:
raise ValueError(f'cmd type error: {cmd}')
def get_msg_id(cmd: BaseTpuCmd):
if isinstance(cmd.reg, (tiu_sys, dma_sys)):
return cmd['msg_id']
raise ValueError('not sys cmd')
def get_msg_cnt(cmd: BaseTpuCmd):
if isinstance(cmd.reg, (tiu_sys, dma_sys)):
return cmd['cnt']
raise ValueError('not sys cmd')
def consume_sys(self, cmd: BaseTpuCmd):
sys = (tiu_sys, dma_sys)
assert isinstance(cmd.reg, sys)
if (MultiCore.get_cmd_type(cmd) == SYS_TYPE.SEND):
return self.consume_send(cmd)
elif (MultiCore.get_cmd_type(cmd) == SYS_TYPE.WAIT):
return self.consume_wait(cmd)
def consume_send(self, cmd: BaseTpuCmd):
msg_id = MultiCore.get_msg_id(cmd)
self.msges[msg_id].sent_cnt += 1
return Status.PRODUCING
def consume_wait(self, cmd: BaseTpuCmd):
msg_id = MultiCore.get_msg_id(cmd)
self.msges[msg_id].sent_cnt -= 1
if (self.msges[msg_id].sent_cnt == 0):
return Status.CONSUMED
else:
return Status.RECIEVING
def __str__(self):
return '\n'.join([str(msgcore) for msgcore in self.msgcores]) |
def test_score_one_tree_tuples():
treebank = build_one_tree_treebank(True)
with EvaluateParser() as ep:
response = ep.process(treebank)
assert (response.f1 == pytest.approx(1.0)) |
def build_transforms(cfg, is_train=True):
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
flip_horizontal_prob = cfg.INPUT.HORIZONTAL_FLIP_PROB_TRAIN
flip_vertical_prob = cfg.INPUT.VERTICAL_FLIP_PROB_TRAIN
brightness = cfg.INPUT.BRIGHTNESS
contrast = cfg.INPUT.CONTRAST
saturation = cfg.INPUT.SATURATION
hue = cfg.INPUT.HUE
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
flip_horizontal_prob = 0.0
flip_vertical_prob = 0.0
brightness = 0.0
contrast = 0.0
saturation = 0.0
hue = 0.0
to_bgr255 = cfg.INPUT.TO_BGR255
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=to_bgr255)
color_jitter = T.ColorJitter(brightness=brightness, contrast=contrast, saturation=saturation, hue=hue)
transform = T.Compose([color_jitter, T.Resize(min_size, max_size), T.RandomHorizontalFlip(flip_horizontal_prob), T.RandomVerticalFlip(flip_vertical_prob), T.ToTensor(), normalize_transform])
return transform |
def sel_or_init(collection: Sequence[IndividualLike], base_ind: IndividualLike, sel_fn: Callable, sel_pb: float, init_fn: Callable, init_pb: float=0.0, return_flag: bool=True):
def ret(res, f):
return ((res, f) if return_flag else res)
if (len(collection) == 0):
return ret(init_fn(base_ind), False)
operation = np.random.choice(range(2), p=[sel_pb, init_pb])
if (operation == 0):
return ret(sel_fn(collection), True)
else:
return ret(init_fn(base_ind), False) |
def plot_loss(inner_loop_loss, name='Loss Curve'):
plt.plot(inner_loop_loss, label=name)
plt.legend() |
class SequenceCrossEntropyLoss(tf.keras.losses.Loss):
eps = 1e-08
def call(self, y_true, y_pred):
return (- tf.reduce_mean(((y_true * tf.math.log((y_pred + self.eps))) + ((1 - y_true) * tf.math.log(((1 - y_pred) + self.eps)))))) |
class ToTHWC(object):
def __init__(self):
pass
def __call__(self, tensor):
return tensor.permute(1, 2, 3, 0)
def __repr__(self):
return self.__class__.__name__ |
def resnet_v2(inputs, blocks, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, reuse=None, scope=None):
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = (sc.name + '_end_points')
with slim.arg_scope([slim.conv2d, bottleneck, resnet_utils.stack_blocks_dense], outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if (output_stride is not None):
if ((output_stride % 4) != 0):
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
if global_pool:
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
if (num_classes is not None):
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits')
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if (num_classes is not None):
end_points['predictions'] = slim.softmax(net, scope='predictions')
return (net, end_points) |
def read_file(filename: str) -> Dict[(str, Any)]:
with (CURRENT_DIR / filename).open() as fd:
return json.load(fd) |
def load_checkpoints(path, gpu):
if (gpu is None):
ckpt = torch.load(path)
else:
loc = 'cuda:{}'.format(gpu)
ckpt = torch.load(path, map_location=loc)
return ckpt |
class BertTokenizerFast(metaclass=DummyObject):
_backends = ['tokenizers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tokenizers']) |
(Output('link-table', 'children'), Input('select-domain', 'value'), Input('add-link-btn', 'n_clicks'), Input('delete-link-btn', 'n_clicks'), [State('add-node-A', 'value'), State('add-node-B', 'value'), State('link_radio_button', 'value'), State('link-table', 'children')])
def add_link(domain_file, add_click, delete_click, node_a, node_b, link_type, table):
ctx = dash.callback_context
links = {}
if (table is not None):
if isinstance(table, list):
table = table[0]
links = {(p['Node A'], p['Node B']): p['Type'] for p in table['props']['data'] if p['Node A']}
if ctx.triggered:
prop_id = ctx.triggered_id
if ((prop_id == 'select-domain') and domain_file):
links = {}
(_, _, forbids, requires) = causal_method.parse_domain_knowledge(domain_file)
if forbids:
for (node_a, node_b) in forbids:
links[(node_a, node_b)] = ''
if requires:
for (node_a, node_b) in requires:
links[(node_a, node_b)] = ''
elif ((prop_id == 'add-link-btn') and (add_click > 0) and node_a and node_b):
links[(node_a, node_b)] = ('' if (link_type == 'Required') else '')
elif ((prop_id == 'delete-link-btn') and (delete_click > 0) and node_a and node_b):
links.pop((node_a, node_b), None)
links = [{'A': a, 'B': b, 'type': t} for ((a, b), t) in links.items()]
return create_link_table(links, height=80) |
def _wrap_header_guess_version(header):
try:
return _wrap_header(header, (1, 0))
except ValueError:
pass
try:
ret = _wrap_header(header, (2, 0))
except UnicodeEncodeError:
pass
else:
warnings.warn('Stored array in format 2.0. It can only beread by NumPy >= 1.9', UserWarning, stacklevel=2)
return ret
header = _wrap_header(header, (3, 0))
warnings.warn('Stored array in format 3.0. It can only beread by NumPy >= 1.17', UserWarning, stacklevel=2)
return header |
_function
def ncube_isometry_group_cosets(n, orientation_preserving=True):
from sage.misc.misc_c import prod
from sage.matrix.constructor import diagonal_matrix
G = ncube_isometry_group(n, orientation_preserving)
it = itertools.product((1, (- 1)), repeat=n)
if orientation_preserving:
H = [diagonal_matrix(L) for L in it if (prod(L) == 1)]
else:
H = [diagonal_matrix(L) for L in it]
G_todo = set(G)
for h in H:
h.set_immutable()
assert all(((h in G_todo) for h in H)), 'H must be a subset of G'
cosets = []
for g in G:
if (g not in G_todo):
continue
left_coset = sorted(((h * g) for h in H))
right_coset = sorted(((g * h) for h in H))
assert (left_coset == right_coset), 'H must be a normal subgroup of G'
for c in left_coset:
c.set_immutable()
G_todo.difference_update(left_coset)
cosets.append(left_coset)
return cosets |
def register_Ns3LteRrcSapRrcConnectionSetupCompleted_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteRrcSap::RrcConnectionSetupCompleted const &', 'arg0')])
cls.add_instance_attribute('rrcTransactionIdentifier', 'uint8_t', is_const=False)
return |
def ufunc_add_outer_simple2(A: dace.int32[(2, 2, 2, 2, 2)], B: dace.int32[(2, 2, 2, 2, 2)]):
return np.add.outer(A, B) |
class CartanType_decorator(UniqueRepresentation, SageObject, CartanType_abstract):
def __init__(self, ct):
self._type = ct
def is_irreducible(self):
return self._type.is_irreducible()
def is_finite(self):
return self._type.is_finite()
def is_crystallographic(self):
return self._type.is_crystallographic()
def is_affine(self):
return self._type.is_affine()
def rank(self):
return self._type.rank()
def index_set(self):
return self._type.index_set() |
def filter_desc_df_cv(desc):
df = desc
return df[[(i, j) for i in ['acc', 'total_time'] for j in ['mean', 'max', 'min', 'std']]] |
def _apply_bpe(model_path: str, in_path: str, out_path: str):
Args = namedtuple('Args', ['sentencepiece_vocab'])
args = Args(sentencepiece_vocab=model_path)
tokenizer = SentencepieceBPE(args)
with open(in_path) as f, open(out_path, 'w') as f_o:
for s in f:
f_o.write((tokenizer.encode(s.strip()) + '\n')) |
def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None, c1=0.0001, c2=0.9, amax=50, amin=1e-08, xtol=1e-14):
if (phi0 is None):
phi0 = phi(0.0)
if (derphi0 is None):
derphi0 = derphi(0.0)
if ((old_phi0 is not None) and (derphi0 != 0)):
alpha1 = min(1.0, (((1.01 * 2) * (phi0 - old_phi0)) / derphi0))
if (alpha1 < 0):
alpha1 = 1.0
else:
alpha1 = 1.0
phi1 = phi0
derphi1 = derphi0
isave = np.zeros((2,), np.intc)
dsave = np.zeros((13,), float)
task = b'START'
maxiter = 100
for i in xrange(maxiter):
(stp, phi1, derphi1, task) = minpack2.dcsrch(alpha1, phi1, derphi1, c1, c2, xtol, task, amin, amax, isave, dsave)
if (task[:2] == b'FG'):
alpha1 = stp
phi1 = phi(stp)
derphi1 = derphi(stp)
else:
break
else:
stp = None
if ((task[:5] == b'ERROR') or (task[:4] == b'WARN')):
stp = None
return (stp, phi1, phi0) |
class TestIndexHashOps(serial.SerializedTestCase):
(indices=st.sampled_from([np.int32, np.int64]).flatmap((lambda dtype: hu.tensor(min_dim=1, max_dim=1, dtype=dtype))), seed=st.integers(min_value=0, max_value=10), modulo=st.integers(min_value=100000, max_value=200000), **hu.gcs_cpu_only)
(deadline=10000)
def test_index_hash_ops(self, indices, seed, modulo, gc, dc):
def index_hash(indices):
dtype = np.array(indices).dtype
assert ((dtype == np.int32) or (dtype == np.int64))
hashed_indices = []
for index in indices:
hashed = dtype.type(( * seed))
indices_bytes = np.array([index], dtype).view(np.int8)
for b in indices_bytes:
hashed = dtype.type(((hashed * 65537) + b))
hashed = ((modulo + (hashed % modulo)) % modulo)
hashed_indices.append(hashed)
return [hashed_indices]
op = core.CreateOperator('IndexHash', ['indices'], ['hashed_indices'], seed=seed, modulo=modulo)
self.assertDeviceChecks(dc, op, [indices], [0])
self.assertReferenceChecks(gc, op, [indices], index_hash)
op = core.CreateOperator('IndexHash', ['indices'], ['indices'], seed=seed, modulo=modulo)
self.assertDeviceChecks(dc, op, [indices], [0])
self.assertReferenceChecks(gc, op, [indices], index_hash)
def test_shape_and_type_inference(self):
with hu.temp_workspace('shape_type_inf_int64'):
net = core.Net('test_net')
net.ConstantFill([], 'values', shape=[64], dtype=core.DataType.INT64)
net.IndexHash(['values'], ['values_output'])
(shapes, types) = workspace.InferShapesAndTypes([net], {})
self.assertEqual(shapes['values_output'], [64])
self.assertEqual(types['values_output'], core.DataType.INT64)
with hu.temp_workspace('shape_type_inf_int32'):
net = core.Net('test_net')
net.ConstantFill([], 'values', shape=[2, 32], dtype=core.DataType.INT32)
net.IndexHash(['values'], ['values_output'])
(shapes, types) = workspace.InferShapesAndTypes([net], {})
self.assertEqual(shapes['values_output'], [2, 32])
self.assertEqual(types['values_output'], core.DataType.INT32) |
_test(assert_ii_1=False)
def test_4_interface_to_2_banks_ddr_non_decoupled_interfaces():
return four_interface_to_2_banks(mem_type='DDR', decouple_interfaces=False) |
def test_clean_remove_bracketed(df_text: pd.DataFrame) -> None:
pipeline_all = [{'operator': 'remove_bracketed', 'parameters': {'brackets': {'angle', 'curly', 'round', 'square'}}}]
df_clean_all = clean_text(df_text, 'text', pipeline=pipeline_all)
df_check_all = df_text.copy()
df_check_all['text'] = ["'ZZZZZ!' If IMDb would allow one-word reviews, that's what mine would be.", 'The cast played Shakespeare.Shakespeare lost.', 'Simon of the Desert is a 1965 film directed by Luis Bunuel.', "\nI don't think I've seen a film this bad before ", 'Cannes 1968:\tA video essay', 'Recap thread for excellent panel, hosted by with _NYC and ', '#GameOfThrones: Season 8 is #Rotten at 54% on the #Tomatometer. But does it deserve to be?', "Come join and share your thoughts on this week's episode: '123', np.nan, 'NULL']
pipeline_all_excl = [{'operator': 'remove_bracketed', 'parameters': {'brackets': {'angle', 'curly', 'round', 'square'}, 'inclusive': False}}]
df_clean_all_excl = clean_text(df_text, 'text', pipeline=pipeline_all_excl)
df_check_all_excl = df_text.copy()
df_check_all_excl['text'] = ["'ZZZZZ!' If IMDb would allow one-word reviews, that's what mine would be.", 'The cast played Shakespeare.<><>Shakespeare lost.', 'Simon of the Desert () is a 1965 film directed by Luis Bunuel.', "[]\nI don't think I've seen a film this bad before {}", '<>Cannes 1968:\tA video essay<>', 'Recap thread for excellent panel, hosted by with _NYC and ', '#GameOfThrones: Season 8 is #Rotten at 54% on the #Tomatometer. But does it deserve to be?', "Come join and share your thoughts on this week's episode: '123', np.nan, 'NULL']
pipeline_square = [{'operator': 'remove_bracketed', 'parameters': {'brackets': 'square'}}]
df_clean_square = clean_text(df_text, 'text', pipeline=pipeline_square)
df_check_square = df_text.copy()
df_check_square['text'] = ["'ZZZZZ!' If IMDb would allow one-word reviews, that's what mine would be.", 'The cast played Shakespeare.<br /><br />Shakespeare lost.', 'Simon of the Desert (Simon del desierto) is a 1965 film directed by Luis Bunuel.', "\nI don't think I've seen a film this bad before {acting, script, effects (!), etc...}", "<a href='/festivals/cannes-1968-a-video-essay'>Cannes 1968:\tA video essay</a>", 'Recap thread for excellent panel, hosted by with _NYC and ', '#GameOfThrones: Season 8 is #Rotten at 54% on the #Tomatometer. But does it deserve to be?', "Come join and share your thoughts on this week's episode: '123', np.nan, 'NULL']
assert df_check_all.equals(df_clean_all)
assert df_check_all_excl.equals(df_clean_all_excl)
assert df_check_square.equals(df_clean_square) |
def _is_day_first(date: Union[(str, dd.Series)]) -> Optional[bool]:
if isinstance(date, dd.Series):
judge_col = date.apply(_check_is_day_first, meta=object)
return (judge_col.unique() == True).any().compute()
return _check_is_day_first(date) |
def main(dataset, cls_path, out_path, index=0):
global DEVICE
DEVICE = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
utils.set_seed(seed=(2019 + index))
num_epochs = 200
save_every = 100
viz_every = 10
assert (num_epochs >= save_every)
if (dataset == 'mnist'):
dec_layers = 1
lrn_rate = 0.001
alpha = 1
beta = 0
elif (dataset == 'fashion'):
dec_layers = 3
lrn_rate = 0.01
alpha = 1
beta = 10
elif (dataset == 'svhn'):
dec_layers = 3
lrn_rate = 0.01
alpha = 1
beta = 1
else:
dec_layers = 2
lrn_rate = 0.0001
alpha = 1
beta = 0
cls_network = cls_utils.init_classifier(dataset).to(DEVICE)
gen_network = gen_utils.init_generator(dataset).to(DEVICE)
utils.load_checkpoints(cls_network, cls_path, DEVICE)
nz = gen_network.num_noises
nx = data.to_dataset(dataset).nx
dec_network = models.Decoder(nx, nz, dec_layers).to(DEVICE)
networks = (gen_network, cls_network, dec_network)
path_loss = os.path.join(out_path, 'loss-gen.txt')
dir_model = os.path.join(out_path, 'generator')
path_model = None
os.makedirs(os.path.join(out_path, 'images'), exist_ok=True)
with open(path_loss, 'w') as f:
f.write('Epoch\tClsLoss\tDecLoss\tDivLoss\tLossSum\tAccuracy\n')
loss1 = gen_loss.ReconstructionLoss(method='kld').to(DEVICE)
loss2 = gen_loss.ReconstructionLoss(method='l2').to(DEVICE)
loss3 = gen_loss.DiversityLoss(metric='l1').to(DEVICE)
losses = (loss1, loss2, loss3)
params = (list(gen_network.parameters()) + list(dec_network.parameters()))
optimizer = optim.Adam(params, lrn_rate)
for epoch in range(1, (num_epochs + 1)):
(trn_acc, trn_losses) = update(networks, losses, optimizer, alpha, beta)
with open(path_loss, 'a') as f:
f.write(f'{epoch:3d}')
for loss in trn_losses:
f.write(f' {loss:.8f}')
f.write(f''' {trn_acc:.8f}
''')
if ((viz_every > 0) and ((epoch % viz_every) == 0)):
path = os.path.join(out_path, f'images/images-{epoch:03d}.png')
gen_utils.visualize_images(gen_network, path, DEVICE)
if ((epoch % save_every) == 0):
path = f'{dir_model}-{epoch:03d}.pth.tar'
utils.save_checkpoints(gen_network, path)
path_model = path
print(f'Finished training the generator (index={index}).')
return path_model |
def test_encode_timedelta():
def ensure_roundtrip(td_str, expected_seconds):
td = parse_timedelta(td_str)
assert (td.total_seconds() == expected_seconds)
assert (parse_timedelta(encode_timedelta(td)) == td), f'Failed to roundtrip {td_str}: {encode_timedelta(td)}'
ensure_roundtrip('1d', 86400)
ensure_roundtrip('+32 m 1 s', 1921)
ensure_roundtrip('+ 32 m 1 s', 1921)
ensure_roundtrip('32m', 1920)
ensure_roundtrip('+32m', 1920)
ensure_roundtrip('2h32m', 9120)
ensure_roundtrip('+2h32m', 9120)
ensure_roundtrip('3d2h32m', 268320)
ensure_roundtrip('+3d2h32m', 268320)
ensure_roundtrip('1w3d2h32m', 873120)
ensure_roundtrip('1w 3d 2h 32m', 873120)
ensure_roundtrip('1 w 3 d 2 h 32 m', 873120)
ensure_roundtrip('4:13', 253)
ensure_roundtrip(':13', 13)
ensure_roundtrip('4:13:02', 15182)
ensure_roundtrip('4:13:02.266', 15182.266)
ensure_roundtrip('2:04:13:02.266', 187982.266)
ensure_roundtrip('2 days, 4:13:02', 187982)
ensure_roundtrip('5hr34m56s', 20096)
ensure_roundtrip('5 hours, 34 minutes, 56 seconds', 20096)
ensure_roundtrip('5 hrs, 34 mins, 56 secs', 20096)
ensure_roundtrip('2 days, 5 hours, 34 minutes, 56 seconds', 192896)
ensure_roundtrip('172 hr', 619200) |
class ASTHelperMixin():
def generic_visit_filtered(self, node: ast.AST, filter: Optional[Set[str]]=None):
filter = (filter or set())
for (field, old_value) in ast.iter_fields(node):
if (field in filter):
continue
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, ast.AST):
value = self.visit(value)
if (value is None):
continue
elif (not isinstance(value, ast.AST)):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, ast.AST):
new_node = self.visit(old_value)
if (new_node is None):
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def generic_visit_field(self, node: ast.AST, field: str) -> ast.AST:
old_value = getattr(node, field)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, ast.AST):
value = self.visit(value)
if (value is None):
continue
elif (not isinstance(value, ast.AST)):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, ast.AST):
new_node = self.visit(old_value)
if (new_node is None):
delattr(node, field)
else:
setattr(node, field, new_node)
return node |
class SetPartitionsBk_k(SetPartitionsAk_k):
def _repr_(self):
return (SetPartitionsAk_k._repr_(self) + ' with block size 2')
def __contains__(self, x):
if (not SetPartitionsAk_k.__contains__(self, x)):
return False
for part in x:
if (len(part) != 2):
return False
return True
def cardinality(self):
c = 1
for i in range(1, (2 * self.k), 2):
c *= i
return c
def __iter__(self):
for sp in SetPartitions(self._set, ([2] * (len(self._set) // 2))):
(yield self.element_class(self, sp)) |
def test_get_request_with_body(testdir, cli, base_url, hypothesis_max_examples, schema_with_get_payload, snapshot_cli):
schema_file = testdir.makefile('.yaml', schema=yaml.dump(schema_with_get_payload))
assert (cli.run(str(schema_file), f'--base-url={base_url}', f'--hypothesis-max-examples={(hypothesis_max_examples or 1)}', '--validate-schema=true') == snapshot_cli) |
class KoalaScenario(Scenario):
name = 'koala'
description = 'Koala eval dataset'
tags = ['instructions']
def get_instances(self, output_path: str) -> List[Instance]:
source_url = '
data_path: str = os.path.join(output_path, 'Koala_prompts.jsonl')
ensure_file_downloaded(source_url=source_url, target_path=data_path)
instances: List[Instance] = []
for line in open(data_path):
raw = json.loads(line)
instance = Instance(input=Input(text=raw['prompt']), references=[], split=TEST_SPLIT)
instances.append(instance)
return instances |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.