code
stringlengths
281
23.7M
def draw_paths_horizontal(times, paths, N, expectations, title=None, KDE=False, marginal=False, marginalT=None, envelope=False, lower=None, upper=None, style='seaborn-v0_8-whitegrid', colormap='RdYlBu_r', **fig_kw): with plt.style.context(style): if marginal: fig = plt.figure(**fig_kw) gs = GridSpec(1, 5) ax1 = fig.add_subplot(gs[:4]) ax2 = fig.add_subplot(gs[4:], sharey=ax1) last_points = [path[(- 1)] for path in paths] cm = plt.colormaps[colormap] n_bins = int(np.sqrt(N)) (n, bins, patches) = ax2.hist(last_points, n_bins, orientation='horizontal', density=True) col = np.linspace(0, 1, n_bins, endpoint=True) for (c, p) in zip(col, patches): plt.setp(p, 'facecolor', cm(c)) my_bins = pd.cut(last_points, bins=bins, labels=range((len(bins) - 1)), include_lowest=True) colors = [col[b] for b in my_bins] if KDE: kde = sm.nonparametric.KDEUnivariate(last_points) kde.fit() ax2.plot(kde.density, kde.support, '--', lw=1.75, alpha=0.6, label='$X_T$ KDE', zorder=10) ax2.axhline(y=np.mean(last_points), linestyle='--', lw=1.75, label='$\\overline{X_T}$') else: marginaldist = marginalT x = np.linspace(marginaldist.ppf(0.001), marginaldist.ppf(0.999), 100) ax2.plot(marginaldist.pdf(x), x, '-', lw=1.75, alpha=0.6, label='$X_T$ pdf') ax2.axhline(y=marginaldist.mean(), linestyle='--', lw=1.75, label='$E[X_T]$') plt.setp(ax2.get_yticklabels(), visible=False) ax2.set_title('$X_T$') ax2.legend() for i in range(N): ax1.plot(times, paths[i], '-', lw=1.0, color=cm(colors[i])) ax1.plot(times, expectations, '--', lw=1.75, label='$E[X_t]$') if envelope: ax1.fill_between(times, upper, lower, alpha=0.25, color='grey') plt.subplots_adjust(wspace=0.025, hspace=0.025) else: (fig, ax1) = plt.subplots(**fig_kw) for i in range(N): ax1.plot(times, paths[i], '-', lw=1.0) ax1.plot(times, expectations, '--', lw=1.75, label='$E[X_t]$') if envelope: ax1.fill_between(times, upper, lower, color='grey', alpha=0.25) fig.suptitle(title) ax1.set_title('Simulated Paths $X_t, t \\in [t_0, T]$') ax1.set_xlabel('$t$') ax1.set_ylabel('$X(t)$') ax1.legend() plt.show() return fig
def test_channel_update_traces(): with expected_protocol(AnritsuMS464xB, [(':CALC1:PAR:COUN?', '4'), (':CALC1:PAR:COUN?', '12')]) as instr: assert (len(instr.ch_1.traces) == 16) instr.ch_1.update_traces() assert (len(instr.ch_1.traces) == 4) instr.ch_1.update_traces() assert (len(instr.ch_1.traces) == 12)
class CheckpointHandler(): def __init__(self, coordinator_args: CoordinatorArguments, collab_optimizer_args: CollaborativeOptimizerArguments, averager_args: AveragerArguments, dht: hivemind.DHT): self.save_checkpoint_step_interval = coordinator_args.save_checkpoint_step_interval self.repo_path = coordinator_args.repo_path self.upload_interval = coordinator_args.upload_interval self.previous_step = (- 1) config = AlbertConfig.from_pretrained(coordinator_args.model_config_path) self.model = AlbertForPreTraining(config) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{'params': [p for (n, p) in self.model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in self.model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}] opt = Lamb(optimizer_grouped_parameters, lr=0.00176, weight_decay=0.01, clamp_value=10000.0, debias=True) adjusted_target_batch_size = (collab_optimizer_args.target_batch_size - collab_optimizer_args.batch_size_lead) self.collaborative_optimizer = hivemind.CollaborativeOptimizer(opt=opt, dht=dht, prefix=experiment_prefix, compression_type=hivemind.utils.CompressionType.Value(collab_optimizer_args.compression), throughput=collab_optimizer_args.bandwidth, target_batch_size=adjusted_target_batch_size, client_mode=collab_optimizer_args.client_mode, verbose=True, start=True, **asdict(averager_args)) self.previous_timestamp = time.time() def is_time_to_save_state(self, cur_step): if (self.save_checkpoint_step_interval is None): return False elif ((cur_step - self.previous_step) >= self.save_checkpoint_step_interval): return True else: return False def save_state(self, cur_step): self.collaborative_optimizer.load_state_from_peers() self.previous_step = cur_step def is_time_to_upload(self): if (self.repo_path is None): return False elif ((time.time() - self.previous_timestamp) >= self.upload_interval): return True else: return False def upload_checkpoint(self, current_loss): self.model.save_pretrained(self.repo_path) torch.save(self.collaborative_optimizer.opt.state_dict(), f'{self.repo_path}/optimizer_state.pt') self.previous_timestamp = time.time() try: subprocess.run('git add --all', shell=True, check=True, cwd=self.repo_path) current_step = self.collaborative_optimizer.collaboration_state.optimizer_step subprocess.run(f"git commit -m 'Step {current_step}, loss {current_loss:.3f}'", shell=True, check=True, cwd=self.repo_path) subprocess.run('git push', shell=True, check=True, cwd=self.repo_path) except subprocess.CalledProcessError as e: logger.warning('Error while uploading model:', e.output)
def type_check_config_vars(tempdir, config_name): f = open(path.join(tempdir, (config_name + '.pyi')), 'w') f.write(confreader.config_pyi_header) for (name, type_) in confreader.Config.__annotations__.items(): f.write(name) f.write(': ') f.write(type_) f.write('\n') f.close() newenv = environ.copy() newenv['PYTHONPATH'] = (newenv.get('PYTHONPATH', '') + ':') p = subprocess.Popen(['stubtest', '--concise', config_name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=tempdir, text=True, env=newenv) (stdout, stderr) = p.communicate() missing_vars = [] for line in (stdout + stderr).split('\n'): if ('is not present at runtime' in line): missing_vars.append(line.split()[0]) whitelist = open(path.join(tempdir, 'stubtest_whitelist'), 'w') for var in missing_vars: whitelist.write(var) whitelist.write('\n') whitelist.close() p = subprocess.Popen(['stubtest', '--ignore-missing-stub', '--whitelist', whitelist.name, config_name], cwd=tempdir, text=True, env=newenv) p.wait() if (p.returncode != 0): sys.exit(1)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.') parser.add_argument('--bert_model', default=None, type=str, required=True, help='Bert pre-trained model selected in the list: bert-base-uncased, bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, bert-base-multilingual-cased, bert-base-chinese.') parser.add_argument('--task_name', default=None, type=str, required=True, help='The name of the task to train.') parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.') parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3') parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.') parser.add_argument('--do_train', action='store_true', help='Whether to run training.') parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.') parser.add_argument('--do_predict', action='store_true', help='Whether to run eval on the test set.') parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.') parser.add_argument('--train_batch_size', default=32, type=int, help='Total batch size for training.') parser.add_argument('--eval_batch_size', default=8, type=int, help='Total batch size for eval.') parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.') parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.') parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.') parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available') parser.add_argument('--local_rank', type=int, default=(- 1), help='local_rank for distributed training on gpus') parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.') parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit float precision instead of 32-bit') parser.add_argument('--loss_scale', type=float, default=0, help='Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n0 (default value): dynamic loss scaling.\nPositive power of 2: static loss scaling value.\n') parser.add_argument('--server_ip', type=str, default='', help='Can be used for distant debugging.') parser.add_argument('--server_port', type=str, default='', help='Can be used for distant debugging.') args = parser.parse_args() if (args.server_ip and args.server_port): import ptvsd print('Waiting for debugger attach') ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() processors = {'kg': KGProcessor} if ((args.local_rank == (- 1)) or args.no_cuda): device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu')) n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device('cuda', args.local_rank) n_gpu = 1 torch.distributed.init_process_group(backend='nccl') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=(logging.INFO if (args.local_rank in [(- 1), 0]) else logging.WARN)) logger.info('device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}'.format(device, n_gpu, bool((args.local_rank != (- 1))), args.fp16)) if (args.gradient_accumulation_steps < 1): raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps)) args.train_batch_size = (args.train_batch_size // args.gradient_accumulation_steps) args.seed = random.randint(1, 200) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if (n_gpu > 0): torch.cuda.manual_seed_all(args.seed) if ((not args.do_train) and (not args.do_eval)): raise ValueError('At least one of `do_train` or `do_eval` must be True.') if (os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train): raise ValueError('Output directory ({}) already exists and is not empty.'.format(args.output_dir)) if (not os.path.exists(args.output_dir)): os.makedirs(args.output_dir) task_name = args.task_name.lower() if (task_name not in processors): raise ValueError(('Task not found: %s' % task_name)) processor = processors[task_name]() label_list = processor.get_relations(args.data_dir) num_labels = len(label_list) entity_list = processor.get_entities(args.data_dir) tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) train_examples = None num_train_optimization_steps = 0 if args.do_train: train_examples = processor.get_train_examples(args.data_dir) num_train_optimization_steps = (int(((len(train_examples) / args.train_batch_size) / args.gradient_accumulation_steps)) * args.num_train_epochs) if (args.local_rank != (- 1)): num_train_optimization_steps = (num_train_optimization_steps // torch.distributed.get_world_size()) cache_dir = (args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))) model = BertForSequenceClassification.from_pretrained(args.bert_model, cache_dir=cache_dir, num_labels=num_labels) if args.fp16: model.half() model.to(device) if (args.local_rank != (- 1)): try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError('Please install apex from to use distributed and fp16 training.') model = DDP(model) elif (n_gpu > 1): model = torch.nn.DataParallel(model) param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}] if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError('Please install apex from to use distributed and fp16 training.') optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if (args.loss_scale == 0): optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion, t_total=num_train_optimization_steps) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps) global_step = 0 nb_tr_steps = 0 tr_loss = 0 if args.do_train: train_features = convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer) logger.info('***** Running training *****') logger.info(' Num examples = %d', len(train_examples)) logger.info(' Batch size = %d', args.train_batch_size) logger.info(' Num steps = %d', num_train_optimization_steps) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) if (args.local_rank == (- 1)): train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) model.train() for _ in trange(int(args.num_train_epochs), desc='Epoch'): tr_loss = 0 (nb_tr_examples, nb_tr_steps) = (0, 0) for (step, batch) in enumerate(tqdm(train_dataloader, desc='Iteration')): batch = tuple((t.to(device) for t in batch)) (input_ids, input_mask, segment_ids, label_ids) = batch logits = model(input_ids, segment_ids, input_mask, labels=None) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view((- 1), num_labels), label_ids.view((- 1))) if (n_gpu > 1): loss = loss.mean() if (args.gradient_accumulation_steps > 1): loss = (loss / args.gradient_accumulation_steps) if args.fp16: optimizer.backward(loss) else: loss.backward() tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (((step + 1) % args.gradient_accumulation_steps) == 0): if args.fp16: lr_this_step = (args.learning_rate * warmup_linear.get_lr((global_step / num_train_optimization_steps), args.warmup_proportion)) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 print('Training loss: ', tr_loss, nb_tr_examples) if (args.do_train and ((args.local_rank == (- 1)) or (torch.distributed.get_rank() == 0))): model_to_save = (model.module if hasattr(model, 'module') else model) output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels) tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) else: model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels) model.to(device) if (args.do_eval and ((args.local_rank == (- 1)) or (torch.distributed.get_rank() == 0))): eval_examples = processor.get_dev_examples(args.data_dir) eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer) logger.info('***** Running evaluation *****') logger.info(' Num examples = %d', len(eval_examples)) logger.info(' Batch size = %d', args.eval_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels) tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(device) model.eval() eval_loss = 0 nb_eval_steps = 0 preds = [] for (input_ids, input_mask, segment_ids, label_ids) in tqdm(eval_dataloader, desc='Evaluating'): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): logits = model(input_ids, segment_ids, input_mask, labels=None) loss_fct = CrossEntropyLoss() tmp_eval_loss = loss_fct(logits.view((- 1), num_labels), label_ids.view((- 1))) print(label_ids.view((- 1))) eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if (len(preds) == 0): preds.append(logits.detach().cpu().numpy()) else: preds[0] = np.append(preds[0], logits.detach().cpu().numpy(), axis=0) eval_loss = (eval_loss / nb_eval_steps) preds = preds[0] preds = np.argmax(preds, axis=1) result = compute_metrics(task_name, preds, all_label_ids.numpy()) loss = ((tr_loss / nb_tr_steps) if args.do_train else None) result['eval_loss'] = eval_loss result['global_step'] = global_step result['loss'] = loss output_eval_file = os.path.join(args.output_dir, 'eval_results.txt') with open(output_eval_file, 'w') as writer: logger.info('***** Eval results *****') for key in sorted(result.keys()): logger.info(' %s = %s', key, str(result[key])) writer.write(('%s = %s\n' % (key, str(result[key])))) if (args.do_predict and ((args.local_rank == (- 1)) or (torch.distributed.get_rank() == 0))): train_triples = processor.get_train_triples(args.data_dir) dev_triples = processor.get_dev_triples(args.data_dir) test_triples = processor.get_test_triples(args.data_dir) all_triples = ((train_triples + dev_triples) + test_triples) all_triples_str_set = set() for triple in all_triples: triple_str = '\t'.join(triple) all_triples_str_set.add(triple_str) eval_examples = processor.get_test_examples(args.data_dir) eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer) logger.info('***** Running Prediction *****') logger.info(' Num examples = %d', len(eval_examples)) logger.info(' Batch size = %d', args.eval_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels) tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(device) model.eval() eval_loss = 0 nb_eval_steps = 0 preds = [] for (input_ids, input_mask, segment_ids, label_ids) in tqdm(eval_dataloader, desc='Testing'): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): logits = model(input_ids, segment_ids, input_mask, labels=None) loss_fct = CrossEntropyLoss() tmp_eval_loss = loss_fct(logits.view((- 1), num_labels), label_ids.view((- 1))) eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if (len(preds) == 0): preds.append(logits.detach().cpu().numpy()) else: preds[0] = np.append(preds[0], logits.detach().cpu().numpy(), axis=0) eval_loss = (eval_loss / nb_eval_steps) preds = preds[0] print(preds, preds.shape) all_label_ids = all_label_ids.numpy() ranks = [] filter_ranks = [] hits = [] hits_filter = [] for i in range(10): hits.append([]) hits_filter.append([]) for (i, pred) in enumerate(preds): rel_values = torch.tensor(pred) (_, argsort1) = torch.sort(rel_values, descending=True) argsort1 = argsort1.cpu().numpy() rank = np.where((argsort1 == all_label_ids[i]))[0][0] ranks.append((rank + 1)) test_triple = test_triples[i] filter_rank = rank for tmp_label_id in argsort1[:rank]: tmp_label = label_list[tmp_label_id] tmp_triple = [test_triple[0], tmp_label, test_triple[2]] tmp_triple_str = '\t'.join(tmp_triple) if (tmp_triple_str in all_triples_str_set): filter_rank -= 1 filter_ranks.append((filter_rank + 1)) for hits_level in range(10): if (rank <= hits_level): hits[hits_level].append(1.0) else: hits[hits_level].append(0.0) if (filter_rank <= hits_level): hits_filter[hits_level].append(1.0) else: hits_filter[hits_level].append(0.0) print('Raw mean rank: ', np.mean(ranks)) print('Filtered mean rank: ', np.mean(filter_ranks)) for i in [0, 2, 9]: print('Raw Hits {0}: {1}'.format((i + 1), np.mean(hits[i]))) print('hits_filter Hits {0}: {1}'.format((i + 1), np.mean(hits_filter[i]))) preds = np.argmax(preds, axis=1) result = compute_metrics(task_name, preds, all_label_ids) loss = ((tr_loss / nb_tr_steps) if args.do_train else None) result['eval_loss'] = eval_loss result['global_step'] = global_step result['loss'] = loss output_eval_file = os.path.join(args.output_dir, 'test_results.txt') with open(output_eval_file, 'w') as writer: logger.info('***** Test results *****') for key in sorted(result.keys()): logger.info(' %s = %s', key, str(result[key])) writer.write(('%s = %s\n' % (key, str(result[key])))) print('Relation prediction , raw...') print(metrics.accuracy_score(all_label_ids, preds))
def test(config, env, agent, batch_size, word2id): agent.model.eval() (obs, infos) = env.reset() agent.reset(infos) (print_command_string, print_rewards) = ([[] for _ in infos], [[] for _ in infos]) print_interm_rewards = [[] for _ in infos] provide_prev_action = config['general']['provide_prev_action'] dones = ([False] * batch_size) rewards = None prev_actions = (['' for _ in range(batch_size)] if provide_prev_action else None) (input_description, _) = agent.get_game_step_info(obs, infos, prev_actions) (curr_ras_hidden, curr_ras_cell) = (None, None) while (not all(dones)): (v_idx, n_idx, chosen_strings, curr_ras_hidden, curr_ras_cell) = agent.generate_one_command(input_description, curr_ras_hidden, curr_ras_cell, epsilon=0.0) (obs, rewards, dones, infos) = env.step(chosen_strings) if provide_prev_action: prev_actions = chosen_strings for i in range(len(infos)): print_command_string[i].append(chosen_strings[i]) print_rewards[i].append(rewards[i]) print_interm_rewards[i].append(infos[i]['intermediate_reward']) if (type(dones) is bool): dones = ([dones] * batch_size) agent.rewards.append(rewards) agent.dones.append(dones) agent.intermediate_rewards.append([info['intermediate_reward'] for info in infos]) (input_description, _) = agent.get_game_step_info(obs, infos, prev_actions) agent.finish() R = agent.final_rewards.mean() S = agent.step_used_before_done.mean() IR = agent.final_intermediate_rewards.mean() msg = '====EVAL==== R={:.3f}, IR={:.3f}, S={:.3f}' msg = msg.format(R, IR, S) print(msg) print('\n') return (R, IR, S)
def do_virtual_scan(cat_dir, worker_id, num_workers): object_folders = [dir for dir in os.listdir(cat_dir)] object_folders.sort() print(('#Model: %d' % len(object_folders))) worker_size = int(math.ceil((len(object_folders) / num_workers))) print(('Worker size: ' + str(worker_size))) start_idx = (worker_id * worker_size) end_idx = (start_idx + worker_size) for (o_idx, obj_f) in enumerate(object_folders): if ((o_idx >= start_idx) and (o_idx < end_idx)): virtual_scane_one_model(os.path.join(cat_dir, obj_f), worker_id) print('Done!')
class Pix2PixHDModel(BaseModel): def name(self): return 'Pix2PixHDModel' def init_loss_filter(self, use_gan_feat_loss, use_vgg_loss, use_l1_image_loss): flags = (True, use_gan_feat_loss, use_vgg_loss, use_l1_image_loss, True, True) def loss_filter(g_gan, g_gan_feat, g_vgg, g_image, d_real, d_fake): return [l for (l, f) in zip((g_gan, g_gan_feat, g_vgg, g_image, d_real, d_fake), flags) if f] return loss_filter def initialize(self, opt): BaseModel.initialize(self, opt) if ((opt.resize_or_crop != 'none') or (not opt.isTrain)): torch.backends.cudnn.benchmark = True self.isTrain = opt.isTrain input_nc = opt.label_nc netG_input_nc = input_nc self.netG = networks.define_G(netG_input_nc, opt.output_nc, opt.ngf, opt.n_downsample_global, opt.n_blocks_global, opt.norm, gpu_ids=self.gpu_ids, is_shortcut=opt.is_shortcut) if self.isTrain: use_sigmoid = opt.no_lsgan if opt.single_input_D: netD_input_nc = input_nc else: netD_input_nc = (input_nc + opt.output_nc) self.netD = networks.define_D(netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm, use_sigmoid, opt.num_D, (not opt.no_ganFeat_loss), gpu_ids=self.gpu_ids, spectral_norm=opt.spectral_normalization_D, dropout_=opt.dropout_D, no_lsgan=opt.no_lsgan) if ((not self.isTrain) or opt.continue_train or opt.load_pretrain): pretrained_path = ('' if (not self.isTrain) else opt.load_pretrain) self.load_network(self.netG, 'G', opt.which_epoch, pretrained_path) if self.isTrain: self.load_network(self.netD, 'D', opt.which_epoch, pretrained_path) if self.isTrain: if ((opt.pool_size > 0) and (len(self.gpu_ids) > 1)): raise NotImplementedError('Fake Pool Not Implemented for MultiGPU') self.fake_pool = ImagePool(opt.pool_size) self.old_lr = opt.lr self.old_lr_D = opt.lr_D self.loss_filter = self.init_loss_filter((not opt.no_ganFeat_loss), (not opt.no_vgg_loss), opt.l1_image_loss) self.criterionGAN = networks.GANLoss(use_lsgan=(not opt.no_lsgan), tensor=self.Tensor) self.criterionFeat = torch.nn.L1Loss() if (not opt.no_vgg_loss): self.criterionVGG = networks.VGGLoss(self.gpu_ids) self.loss_names = self.loss_filter('G_GAN', 'G_GAN_Feat', 'G_VGG', 'G_Image', 'D_real', 'D_fake') params = list(self.netG.parameters()) self.optimizer_G = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999)) params = list(self.netD.parameters()) self.optimizer_D = torch.optim.Adam(params, lr=opt.lr_D, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay) def encode_input(self, label_map, real_image=None, infer=False): input_label = label_map.data.cuda() input_label = Variable(input_label, volatile=infer) if ((real_image is not None) and self.opt.isTrain): real_image = Variable(real_image.data.cuda()) return (input_label, real_image) def discriminate(self, input_label, test_image, use_pool=False, is_single_input=False): if is_single_input: input_concat = test_image else: input_concat = torch.cat((input_label, test_image.detach()), dim=1) if use_pool: fake_query = self.fake_pool.query(input_concat) return self.netD.forward(fake_query) else: return self.netD.forward(input_concat) def forward(self, label, image, infer=False, number_of_pixel=None, min_row=None, max_row=None, min_col=None, max_col=None): (input_concat, real_image) = self.encode_input(label, image) fake_image = self.netG.forward(input_concat) disc_fake = fake_image.clone() disc_input = input_concat.clone() disc_real = real_image.clone() pred_fake_pool = self.discriminate(disc_input, disc_fake, use_pool=True, is_single_input=self.opt.single_input_D) loss_D_fake = self.criterionGAN(pred_fake_pool, False) pred_real = self.discriminate(disc_input, disc_real, is_single_input=self.opt.single_input_D) loss_D_real = self.criterionGAN(pred_real, True) loss_G_GAN = 0 if self.opt.single_input_D: pred_fake = self.netD.forward(disc_fake) else: pred_fake = self.netD.forward(torch.cat((disc_input, disc_fake), dim=1)) loss_G_GAN = self.criterionGAN(pred_fake, True) loss_G_GAN_Feat = 0 if (not self.opt.no_ganFeat_loss): feat_weights = (4.0 / (self.opt.n_layers_D + 1)) D_weights = (1.0 / self.opt.num_D) for i in range(self.opt.num_D): for j in range((len(pred_fake[i]) - 1)): loss_G_GAN_Feat += (((D_weights * feat_weights) * self.criterionFeat(pred_fake[i][j], pred_real[i][j].detach())) * self.opt.lambda_feat) loss_G_VGG = 0 if (not self.opt.no_vgg_loss): loss_G_VGG = (self.criterionVGG(fake_image, real_image) * self.opt.lambda_feat) loss_G_Image = 0 if self.opt.l1_image_loss: loss_G_Image = (self.criterionFeat(fake_image, real_image) * self.opt.l1_image_loss_coef) return [self.loss_filter(loss_G_GAN, loss_G_GAN_Feat, loss_G_VGG, loss_G_Image, loss_D_real, loss_D_fake), fake_image] def inference(self, label, image=None): image = (Variable(image) if (image is not None) else None) (input_concat, real_image) = self.encode_input(Variable(label), image, infer=True) if torch.__version__.startswith('0.4'): with torch.no_grad(): fake_image = self.netG.forward(input_concat) else: fake_image = self.netG.forward(input_concat) return fake_image def save(self, which_epoch): self.save_network(self.netG, 'G', which_epoch, self.gpu_ids) self.save_network(self.netD, 'D', which_epoch, self.gpu_ids) def update_fixed_params(self): params = list(self.netG.parameters()) self.optimizer_G = torch.optim.Adam(params, lr=self.opt.lr, betas=(self.opt.beta1, 0.999)) def update_learning_rate(self): lrd = (self.opt.lr / self.opt.niter_decay) lr = (self.old_lr - lrd) lrd_D = (self.opt.lr_D / self.opt.niter_decay) lr_D = (self.old_lr_D - lrd_D) for param_group in self.optimizer_D.param_groups: param_group['lr'] = lr_D for param_group in self.optimizer_G.param_groups: param_group['lr'] = lr if self.opt.verbose: print(('update learning rate G: %f -> %f' % (self.old_lr, lr))) print(('update learning rate D: %f -> %f' % (self.old_lr_D, lr_D))) self.old_lr = lr self.old_lr_D = lr_D
def assert_dataframe_equality(output_df: DataFrame, target_df: DataFrame) -> None: if (not ((output_df.count() == target_df.count()) and (len(target_df.columns) == len(output_df.columns)))): raise AssertionError(f'''DataFrame shape mismatch: output_df shape: {len(output_df.columns)} columns and {output_df.count()} rows target_df shape: {len(target_df.columns)} columns and {target_df.count()} rows.''') select_cols = [col(c) for c in output_df.schema.fieldNames()] output_data = sorted(output_df.select(*select_cols).collect()) output_data = [row.asDict(recursive=True) for row in output_data] target_data = sorted(target_df.select(*select_cols).collect()) target_data = [row.asDict(recursive=True) for row in target_data] if (not (output_data == target_data)): raise AssertionError(f'''DataFrames have different values: output_df records: {output_data} target_data records: {target_data}.''')
def _append_sha1_hash_to_table(table: pa.Table, hash_column: pa.Array) -> pa.Table: hash_column_np = hash_column.to_numpy() result = [] for hash_value in hash_column_np: assert (hash_value is not None), f'Expected non-null primary key' result.append(hashlib.sha1(hash_value.encode('utf-8')).hexdigest()) return sc.append_pk_hash_string_column(table, result)
class Registry(dict): def __init__(self, *args, **kwargs): super(Registry, self).__init__(*args, **kwargs) def register(self, module_name, module=None): if (module is not None): _register_generic(self, module_name, module) return def register_fn(fn): _register_generic(self, module_name, fn) return fn return register_fn
def load_network(ckpt): g_running = StyledGenerator(code_size).cuda() discriminator = Discriminator(from_rgb_activate=True).cuda() ckpt = torch.load(ckpt) g_running.load_state_dict(ckpt['g_running']) discriminator.load_state_dict(ckpt['discriminator']) return (g_running, discriminator)
class PlayBlockChange(Packet): id = 11 to = 1 def __init__(self, x: int, y: int, z: int, block_id: int) -> None: super().__init__() (self.x, self.y, self.z) = (x, y, z) self.block_id = block_id def encode(self) -> bytes: return (Buffer.pack_position(self.x, self.y, self.z) + Buffer.pack_varint(self.block_id))
def aes_encrypt(word, key=config.aes_key, iv=None, output='base64', padding=True, padding_style='pkcs7', mode=AES.MODE_CBC, no_packb=False): if (iv is None): iv = Crypto_random.read(16) if (not no_packb): word = umsgpack.packb(word) if padding: word = pad(word, AES.block_size, padding_style) if (mode in [AES.MODE_ECB, AES.MODE_CTR]): aes = AES.new(key, mode) else: aes = AES.new(key, mode, iv) ciphertext = aes.encrypt(word) if no_packb: output = output.lower() if (output == 'base64'): return base64.encodebytes(ciphertext).decode('utf-8') elif (output == 'hex'): return b2a_hex(ciphertext).decode('utf-8') return ciphertext return umsgpack.packb([ciphertext, iv])
def solve_euclidian_tsp(points, threads=0, timeout=None, gap=None): n = len(points) def subtourelim(model, where): if (where == GRB.Callback.MIPSOL): vals = model.cbGetSolution(model._vars) selected = tuplelist(((i, j) for (i, j) in model._vars.keys() if (vals[(i, j)] > 0.5))) tour = subtour(selected) if (len(tour) < n): model.cbLazy((quicksum((model._vars[(i, j)] for (i, j) in itertools.combinations(tour, 2))) <= (len(tour) - 1))) def subtour(edges): unvisited = list(range(n)) cycle = range((n + 1)) while unvisited: thiscycle = [] neighbors = unvisited while neighbors: current = neighbors[0] thiscycle.append(current) unvisited.remove(current) neighbors = [j for (i, j) in edges.select(current, '*') if (j in unvisited)] if (len(cycle) > len(thiscycle)): cycle = thiscycle return cycle dist = {(i, j): math.sqrt(sum((((points[i][k] - points[j][k]) ** 2) for k in range(2)))) for i in range(n) for j in range(i)} m = Model() m.Params.outputFlag = False vars = m.addVars(dist.keys(), obj=dist, vtype=GRB.BINARY, name='e') for (i, j) in vars.keys(): vars[(j, i)] = vars[(i, j)] m.addConstrs(((vars.sum(i, '*') == 2) for i in range(n))) m._vars = vars m.Params.lazyConstraints = 1 m.Params.threads = threads if timeout: m.Params.timeLimit = timeout if gap: m.Params.mipGap = (gap * 0.01) m.optimize(subtourelim) vals = m.getAttr('x', vars) selected = tuplelist(((i, j) for (i, j) in vals.keys() if (vals[(i, j)] > 0.5))) tour = subtour(selected) assert (len(tour) == n) return (m.objVal, tour)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--K', type=int, default=1) parser.add_argument('--N', type=int, default=1) parser.add_argument('--n_epoch', type=int, default=200) parser.add_argument('--batch_size', type=int, default=100) parser.add_argument('--hidden_add_noise_sigma', type=float, default=0) parser.add_argument('--input_salt_and_pepper', type=float, default=0.4) parser.add_argument('--learning_rate', type=float, default=10) parser.add_argument('--momentum', type=float, default=0.0) parser.add_argument('--annealing', type=float, default=1.0) parser.add_argument('--hidden_size', type=float, default=2000) parser.add_argument('--act', type=str, default='sigmoid') parser.add_argument('--dataset', type=str, default='MNIST_binary') parser.add_argument('--data_path', type=str, default='.') parser.add_argument('--vis_init', type=int, default=0) parser.add_argument('--noiseless_h1', type=int, default=1) parser.add_argument('--input_sampling', type=int, default=1) parser.add_argument('--test_model', type=int, default=0) args = parser.parse_args() model.experiment(args, None)
class SecurityListTestCase(WithMakeAlgo, ZiplineTestCase): START_DATE = pd.Timestamp('2002-01-03', tz='UTC') assert (START_DATE == sorted(list(LEVERAGED_ETFS.keys()))[0]), 'START_DATE should match start of LEVERAGED_ETF data.' END_DATE = pd.Timestamp('2015-02-17', tz='utc') extra_knowledge_date = pd.Timestamp('2015-01-27', tz='utc') trading_day_before_first_kd = pd.Timestamp('2015-01-23', tz='utc') SIM_PARAMS_END = pd.Timestamp('2002-01-08', tz='UTC') SIM_PARAMS_DATA_FREQUENCY = 'daily' DATA_PORTAL_USE_MINUTE_DATA = False ASSET_FINDER_EQUITY_SIDS = (1, 2, 3, 4, 5) ASSET_FINDER_EQUITY_SYMBOLS = ('AAPL', 'GOOG', 'BZQ', 'URTY', 'JFT') def test_iterate_over_restricted_list(self): algo = self.make_algo(algo_class=IterateRLAlgo, symbol='BZQ') algo.run() self.assertTrue(algo.found) def test_security_list(self): def get_datetime(): return self.START_DATE rl = SecurityListSet(get_datetime, self.asset_finder) should_exist = [asset.sid for asset in [self.asset_finder.lookup_symbol(symbol, as_of_date=self.extra_knowledge_date) for symbol in ['BZQ', 'URTY', 'JFT']]] for sid in should_exist: self.assertIn(sid, rl.leveraged_etf_list.current_securities(get_datetime())) shouldnt_exist = [asset.sid for asset in [self.asset_finder.lookup_symbol(symbol, as_of_date=self.extra_knowledge_date) for symbol in ['AAPL', 'GOOG']]] for sid in shouldnt_exist: self.assertNotIn(sid, rl.leveraged_etf_list.current_securities(get_datetime())) def test_security_add(self): def get_datetime(): return pd.Timestamp('2015-01-27', tz='UTC') with security_list_copy(): add_security_data(['AAPL', 'GOOG'], []) rl = SecurityListSet(get_datetime, self.asset_finder) should_exist = [asset.sid for asset in [self.asset_finder.lookup_symbol(symbol, as_of_date=self.extra_knowledge_date) for symbol in ['AAPL', 'GOOG', 'BZQ', 'URTY']]] for sid in should_exist: self.assertIn(sid, rl.leveraged_etf_list.current_securities(get_datetime())) def test_security_add_delete(self): with security_list_copy(): def get_datetime(): return pd.Timestamp('2015-01-27', tz='UTC') rl = SecurityListSet(get_datetime, self.asset_finder) self.assertNotIn('BZQ', rl.leveraged_etf_list.current_securities(get_datetime())) self.assertNotIn('URTY', rl.leveraged_etf_list.current_securities(get_datetime())) def test_algo_without_rl_violation_via_check(self): self.run_algorithm(algo_class=RestrictedAlgoWithCheck, symbol='BZQ') def test_algo_without_rl_violation(self): self.run_algorithm(algo_class=RestrictedAlgoWithoutCheck, symbol='AAPL') ([('using_set_do_not_order_list', RestrictedAlgoWithoutCheckSetDoNotOrderList), ('using_set_restrictions', RestrictedAlgoWithoutCheck)]) def test_algo_with_rl_violation(self, name, algo_class): algo = self.make_algo(algo_class=algo_class, symbol='BZQ') with self.assertRaises(TradingControlViolation) as ctx: algo.run() self.check_algo_exception(algo, ctx, 0) algo = self.make_algo(algo_class=RestrictedAlgoWithoutCheck, symbol='JFT') with self.assertRaises(TradingControlViolation) as ctx: algo.run() self.check_algo_exception(algo, ctx, 0) def test_algo_with_rl_violation_after_knowledge_date(self): start = (self.START_DATE + timedelta(days=7)) end = (start + (self.trading_calendar.day * 4)) algo = self.make_algo(algo_class=RestrictedAlgoWithoutCheck, symbol='BZQ', sim_params=self.make_simparams(start_session=start, end_session=end)) with self.assertRaises(TradingControlViolation) as ctx: algo.run() self.check_algo_exception(algo, ctx, 0) def test_algo_with_rl_violation_cumulative(self): sim_params = factory.create_simulation_parameters(start=(self.START_DATE + timedelta(days=7)), num_days=4) with security_list_copy(): add_security_data(['AAPL'], []) algo = self.make_algo(algo_class=RestrictedAlgoWithoutCheck, symbol='BZQ', sim_params=sim_params) with self.assertRaises(TradingControlViolation) as ctx: algo.run() self.check_algo_exception(algo, ctx, 0) def test_algo_without_rl_violation_after_delete(self): sim_params = factory.create_simulation_parameters(start=self.extra_knowledge_date, num_days=4) with security_list_copy(): add_security_data([], ['BZQ']) algo = self.make_algo(algo_class=RestrictedAlgoWithoutCheck, symbol='BZQ', sim_params=sim_params) algo.run() def test_algo_with_rl_violation_after_add(self): sim_params = factory.create_simulation_parameters(start=self.trading_day_before_first_kd, num_days=4) with security_list_copy(): add_security_data(['AAPL'], []) algo = self.make_algo(algo_class=RestrictedAlgoWithoutCheck, symbol='AAPL', sim_params=sim_params) with self.assertRaises(TradingControlViolation) as ctx: algo.run() self.check_algo_exception(algo, ctx, 2) def check_algo_exception(self, algo, ctx, expected_order_count): self.assertEqual(algo.order_count, expected_order_count) exc = ctx.exception self.assertEqual(TradingControlViolation, type(exc)) exc_msg = str(ctx.exception) self.assertTrue(('RestrictedListOrder' in exc_msg))
def _create_channels(channels, h5f, resolution): for channel in channels: var_name = ('IMG_' + channel.upper()) var = h5f.create_variable(var_name, (('time',) + dimensions_by_resolution[resolution]), np.uint16, chunks=chunks_1km) var[:] = values_by_resolution[resolution] var.attrs['_FillValue'] = 0 for (suffix, lut_values) in zip(LUT_SUFFIXES[channel], (lut_values_2, lut_values_3)): lut_name = '_'.join((var_name, suffix)) var = h5f.create_variable(lut_name, ('GreyCount',), float) var[:] = lut_values var.attrs['units'] = bytes(calibrated_units[suffix], 'ascii') var.attrs['long_name'] = ' '.join((channel_names[channel], calibrated_names[suffix]))
def find_lr(net, trn_loader, optimizer, loss_fn, init_value=1e-08, final_value=10.0, beta=0.98, device='cuda:1'): num = (len(trn_loader) - 1) mult = ((final_value / init_value) ** (1 / num)) lr = init_value optimizer.param_groups[0]['lr'] = lr avg_loss = 0.0 best_loss = 0.0 batch_num = 0 losses = [] log_lrs = [] for (noisy, clean, _) in trn_loader: batch_num += 1 (noisy, clean) = (noisy.to(device), clean.to(device)) noisy = noisy.view((- 1), *noisy.shape[2:]) clean = clean.view((- 1), *clean.shape[2:]) optimizer.zero_grad() output = net(noisy) loss = loss_fn(output, clean) avg_loss = ((beta * avg_loss) + ((1 - beta) * loss.item())) smoothed_loss = (avg_loss / (1 - (beta ** batch_num))) if ((batch_num > 1) and (smoothed_loss > (4 * best_loss))): return (log_lrs, losses) if ((smoothed_loss < best_loss) or (batch_num == 1)): best_loss = smoothed_loss losses.append(smoothed_loss) log_lrs.append(np.log10(lr)) loss.backward() optimizer.step() lr *= mult optimizer.param_groups[0]['lr'] = lr print('finished find lr') return (log_lrs, losses)
def matchPreviousExpr(expr): rep = Forward() e2 = expr.copy() rep <<= e2 def copyTokenToRepeater(s, l, t): matchTokens = _flatten(t.asList()) def mustMatchTheseTokens(s, l, t): theseTokens = _flatten(t.asList()) if (theseTokens != matchTokens): raise ParseException('', 0, '') rep.setParseAction(mustMatchTheseTokens, callDuringTry=True) expr.addParseAction(copyTokenToRepeater, callDuringTry=True) rep.setName(('(prev) ' + str(expr))) return rep
class SingleTimeEvent(TimeEvent): _datetimes_to_data = {} def schedule_new_event(cls, date_time: datetime, data: Any): if (date_time not in cls._datetimes_to_data.keys()): cls._datetimes_to_data[date_time] = data else: raise ValueError('Event associated with the given datetime has been already scheduled.') def next_trigger_time(self, now: datetime) -> Optional[datetime]: date_times = list(self._datetimes_to_data.keys()) for date_time in date_times: if (date_time <= now): del self._datetimes_to_data[date_time] return (min(self._datetimes_to_data) if self._datetimes_to_data else None) def notify(self, listener) -> None: listener.on_single_time_event(self) def get_data(cls, time: datetime): return cls._datetimes_to_data[time] def clear(cls): cls._datetimes_to_data.clear()
.parametrize(('shapes', 'chunks', 'dims', 'exp_unified'), [(((3, 5, 5), (5, 5)), ((- 1), (- 1)), (('bands', 'y', 'x'), ('y', 'x')), True), (((3, 5, 5), (5, 5)), ((- 1), 2), (('bands', 'y', 'x'), ('y', 'x')), True), (((4, 5, 5), (3, 5, 5)), ((- 1), (- 1)), (('bands', 'y', 'x'), ('bands', 'y', 'x')), False)]) def test_unify_chunks(shapes, chunks, dims, exp_unified): from satpy.utils import unify_chunks inputs = list(_data_arrays_from_params(shapes, chunks, dims)) results = unify_chunks(*inputs) if exp_unified: _verify_unified(results) else: _verify_unchanged_chunks(results, inputs)
def get_editor_args(fallback_command='nano'): if ('VISUAL' in os.environ): editor = os.environ['VISUAL'] elif ('EDITOR' in os.environ): editor = os.environ['EDITOR'] elif shutil.which('editor'): editor = 'editor' else: editor = fallback_command try: editor_args = shlex.split(editor) except ValueError: editor_args = [editor] if (not editor_args): editor_args = [fallback_command] return editor_args
class banner(scan): def __init__(self, job, timeout=10): scan.__init__(self, job) setattr(self, 'datasize', 0) if (len(job) > 1): self.type = job[0].split('|')[1] self.port = job[0].split('|')[2] self.scan_type = _whats_your_name() if (timeout >= 60): self.timeout = 59 else: self.timeout = timeout def execute_scan(self, verbose): bannercmd = ops.cmd.getDszCommand('banner', ip=self.target, port=self.port, wait=self.timeout, dszquiet=(not verbose)) bannercmd.optdict[self.type] = True bannerobject = bannercmd.execute() if (bannerobject is None): return False success = bannercmd.success self.respondingip = bannerobject.taskinginfo.target.location if (len(bannerobject.transfer) > 0): self.firsttextline = bannerobject.transfer[0].text self.returnip = bannerobject.transfer[0].address self.datasize = bannerobject.transfer[0].data_size try: self.firsttextline = self.firsttextline.split('\n')[0] except: pass self.moduleerror = '' self.oserror = '' banner_files = glob.glob(os.path.join(ops.LOGDIR, 'Data', ('%05d-banner*' % bannerobject.cmdid))) for datafile in banner_files: f = open(datafile, 'r') output = f.readlines() f.close() for line in output: if line.strip().startswith('<ModuleError'): self.moduleerror = line.split("'")[1] if (self.moduleerror == '4105'): break elif line.strip().startswith('<OsError'): self.oserror = line.split("'")[1] break if (self.moduleerror == '4105'): self.error = '4105: Open (Timeout waiting)' elif (self.moduleerror == '11'): self.error = '11: Open (Timeout waiting)' elif ((self.moduleerror == '4101') or (self.moduleerror == '7')): if (self.oserror == '10060'): self.error = ('%s: No response' % '-'.join([self.moduleerror, self.oserror])) elif (self.oserror == '10061'): self.error = ('%s: Actively refused' % '-'.join([self.moduleerror, self.oserror])) elif (self.oserror == '10051'): self.error = ('%s: Unreachable network' % '-'.join([self.moduleerror, self.oserror])) else: self.error = ('%s: Unknown' % '-'.join([self.moduleerror, self.oserror])) elif (self.moduleerror == '10'): self.error = '10: Error receiving data' else: self.error = ('%s: Unknown' % '-'.join([self.moduleerror, self.oserror])) self.timestamp = dsz.Timestamp() if ((self.moduleerror == '4105') or (self.moduleerror == '11')): pass if (not (self.oserror == '10060')): self.success = True def return_success_message(self): return ('Banner timeout waiting on %s' % self.target) def check_escalation(self, escalation_rule): banner = self try: if eval(escalation_rule): return True else: return False except: return False def verify_escalation(self, escalation_rule): banner = self try: eval_res = eval(escalation_rule) if ((eval_res == True) or (eval_res == False)): return True else: return False except: return False def return_data(self): return scan.return_data(self) def get_display_headers(self): return ['Targeted Address', 'Responding Address', 'Return Address', 'Type', 'Port', 'Error', 'Datasize', 'First Text Line', 'Time Stamp'] def get_data_fields(self): return ['target', 'respondingip', 'returnip', 'type', 'port', 'error', 'datasize', 'firsttextline', 'timestamp'] def get_raw_fields(self): return (self.get_data_fields() + ['success', 'moduleerror', 'oserror']) def verify_job(self, job): if ((not (len(job) == 3)) or (not (job[1] in ['tcp', 'udp'])) or (not ((int(job[2]) <= 65535) and (int(job[2]) >= 1)))): return False return True def min_time(self): return 1 def min_range(self): return 2
def test_solar_noon(): index = pd.date_range(start='T1200', freq='1s', periods=1) apparent_zenith = pd.Series([10], index=index) apparent_azimuth = pd.Series([180], index=index) tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth, axis_tilt=0, axis_azimuth=0, max_angle=90, backtrack=True, gcr=(2.0 / 7.0)) expect = pd.DataFrame({'tracker_theta': 0, 'aoi': 10, 'surface_azimuth': 90, 'surface_tilt': 0}, index=index, dtype=np.float64) expect = expect[SINGLEAXIS_COL_ORDER] assert_frame_equal(expect, tracker_data)
def upgrade(saveddata_engine): sql = '\n DELETE FROM modules WHERE ID IN\n (\n SELECT m.ID FROM modules AS m\n JOIN fits AS f ON m.fitID = f.ID\n WHERE f.shipID IN ("35832", "35833", "35834", "40340")\n AND m.projected = 1\n )\n ' saveddata_engine.execute(sql)
def test_membership_with_multiple_payments(): with time_machine.travel('2020-10-10 10:00:00', tick=False): membership_1 = MembershipFactory(status=MembershipStatus.ACTIVE) membership_1.add_pretix_payment(organizer='python-italia', event='pycon-demo', order_code='XXYYZZ', total=1000, status=PaymentStatus.PAID, payment_date=datetime.datetime(2019, 10, 10, 1, 4, 43, tzinfo=timezone.utc), period_start=datetime.datetime(2019, 10, 10, 1, 4, 43, tzinfo=timezone.utc), period_end=datetime.datetime(2020, 10, 10, 1, 4, 43, tzinfo=timezone.utc)) membership_1.add_pretix_payment(organizer='python-italia', event='pycon-demo', order_code='ABCABCABC', total=1000, status=PaymentStatus.PAID, payment_date=datetime.datetime(2020, 1, 1, 1, 4, 43, tzinfo=timezone.utc), period_start=datetime.datetime(2020, 1, 1, 1, 4, 43, tzinfo=timezone.utc), period_end=datetime.datetime(2021, 1, 1, 1, 4, 43, tzinfo=timezone.utc)) membership_1.save() membership_check_status({}) updated_membership_1 = Membership.objects.get(id=membership_1.id) assert (updated_membership_1.status == MembershipStatus.ACTIVE)
def subscription_registry(): registry = SubscriptionRegistry() server = mock.create_autospec(HTTPServer, instance=True) server.server_address = ('localhost', 8989) with mock.patch('pywemo.subscribe._start_server', return_value=server): registry.start() (yield registry) registry.stop()
class BashcompExtractor(FileExtractor): filename = os.path.join(BPFTOOL_DIR, 'bash-completion/bpftool') def get_prog_attach_types(self): return self.get_bashcomp_list('BPFTOOL_PROG_ATTACH_TYPES') def get_map_types(self): return self.get_bashcomp_list('BPFTOOL_MAP_CREATE_TYPES') def get_cgroup_attach_types(self): return self.get_bashcomp_list('BPFTOOL_CGROUP_ATTACH_TYPES')
def model_to_test_downstream_masks(): inputs = tf.keras.Input(shape=(8, 8, 3)) x = tf.keras.layers.Conv2D(8, (2, 2), activation=tf.nn.relu)(inputs) residual = x x = tf.keras.layers.Conv2D(8, (1, 1))(x) x = (x + residual) x = tf.keras.layers.BatchNormalization(momentum=0.3, epsilon=0.65)(x) x1 = tf.nn.relu(x) x2 = tf.nn.relu(x) x1 = tf.keras.layers.Conv2D(4, (2, 2))(x1) x2 = tf.keras.layers.Conv2D(4, (2, 2))(x2) x = (x1 + x2) x = tf.keras.layers.Flatten()(x) outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name='model_to_test_downstream_masks')(x) return outputs
class AttrVI_ATTR_BUFFER(Attribute): resources = [constants.EventType.io_completion] py_name = 'buffer' visa_name = 'VI_ATTR_BUFFER' visa_type = 'ViBuf' default = NotAvailable (read, write, local) = (True, False, True) def __get__(self, instance: Optional['IOCompletionEvent'], owner) -> Optional[Union[(SupportsBytes, 'AttrVI_ATTR_BUFFER')]]: if (instance is None): return self return instance.visalib.get_buffer_from_id(instance.job_id)
class Migration(migrations.Migration): dependencies = [('core', '0010_tag_active')] operations = [migrations.AddField(model_name='currentsong', name='stream_url', field=models.CharField(blank=True, max_length=2000)), migrations.AddField(model_name='queuedsong', name='stream_url', field=models.CharField(blank=True, max_length=2000))]
def train(array, num_epochs=50, use_global_sort_loss=False, use_global_unique_index_loss=False, local_sort_loss_windows=[2, 3, 4], local_unique_index_loss_windows=[2, 3, 4], verbose=True): metadata = Metadata(array) size = len(array) model = Net(size) if verbose: print_array_diagnostics(array, argsort(model, array)) optimizer = torch.optim.SGD(model.parameters(), lr=1.0) if verbose: epoch_range = tqdm.trange(num_epochs) else: epoch_range = range(num_epochs) sort_loss = sorted_constraint_loss(array) for epoch in epoch_range: optimizer.zero_grad() metadata.update_metadata(model, array) prediction = model(array) total_loss = 0.0 if use_global_sort_loss: global_sort_loss = sort_loss(prediction) metadata.record_loss('global_sort_loss', global_sort_loss) if (global_sort_loss < math.inf): total_loss += global_sort_loss if use_global_unique_index_loss: global_unique_index_loss = unique_index_constraint_loss(prediction) metadata.record_loss('global_unique_index_loss', global_unique_index_loss) if (global_unique_index_loss < math.inf): total_loss += global_unique_index_loss for window_size in local_unique_index_loss_windows: if (window_size > size): continue window_loss = 0.0 for start in range(((size - window_size) + 1)): end = (start + window_size) window_loss += unique_index_constraint_loss(prediction[0][start:end].unsqueeze(0)) metadata.record_loss(f'window_unique_index_loss_{window_size}', window_loss) if (window_loss < math.inf): total_loss += window_loss for window_size in local_sort_loss_windows: if (window_size > size): continue window_loss = 0.0 for start in range(((size - window_size) + 1)): end = (start + window_size) window_loss += sort_loss(prediction[0][start:end].unsqueeze(0)) metadata.record_loss(f'sort_loss_{window_size}', window_loss) if (window_loss < math.inf): total_loss += window_loss if isinstance(total_loss, float): continue metadata.record_loss('total loss', total_loss) total_loss.backward() optimizer.step() sorted_array = sort(model, array) argsort_indices = argsort(model, array) metadata.metadata['orderings'].append(sorted_array) if verbose: print_array_diagnostics(array, argsort(model, array)) return metadata
def test_xfail_skipif_with_globals(pytester: Pytester) -> None: pytester.makepyfile('\n import pytest\n x = 3\n .skipif("x == 3")\n def test_skip1():\n pass\n .xfail("x == 3")\n def test_boolean():\n assert 0\n ') result = pytester.runpytest('-rsx') result.stdout.fnmatch_lines(['*SKIP*x == 3*', '*XFAIL*test_boolean*x == 3*'])
def parse_args(): parser = argparse.ArgumentParser(description='Print the whole config') parser.add_argument('config', help='config file path') parser.add_argument('--graph', action='store_true', help='print the models graph') parser.add_argument('--options', nargs='+', action=DictAction, help='arguments in dict') args = parser.parse_args() return args
def test_laneposition(): pos = OSC.LanePosition(1, 2, lane_id=1, road_id=2) prettyprint(pos.get_element()) pos2 = OSC.LanePosition(1, 2, lane_id=1, road_id=2) pos3 = OSC.LanePosition(1, 1, lane_id=(- 1), road_id=2) assert (pos == pos2) assert (pos != pos3) pos4 = OSC.LanePosition.parse(pos.get_element()) assert (pos == pos4) assert (version_validation('Position', pos, 0) == ValidationResponse.OK) assert (version_validation('Position', pos, 1) == ValidationResponse.OK) assert (version_validation('Position', pos, 2) == ValidationResponse.OK)
def test_cmdstep_runstep_cmd_is_string_formatting_shell_true(): obj = CmdStep('blahname', Context({'k1': 'blah', 'cmd': '{k1} -{k1}1 --{k1}2'}), is_shell=True) assert obj.is_shell assert (obj.logger.name == 'blahname') assert (obj.context == Context({'k1': 'blah', 'cmd': '{k1} -{k1}1 --{k1}2'})) assert (obj.commands == [Command('blah -blah1 --blah2', cwd=None, is_shell=True, is_save=False)]) with patch_logger(sp_mod_name, logging.DEBUG) as mock_logger_debug: with patch('subprocess.run') as mock_run: obj.run_step() assert (mock_logger_debug.mock_calls == [call('stdout & stderr inheriting from parent process.'), call('Processing command string: blah -blah1 --blah2')]) mock_run.assert_called_once_with('blah -blah1 --blah2', cwd=None, shell=True, check=True, stdout=None, stderr=None)
def check_prox_impl(func, values, step=0.5, rtol=1e-05, atol=1e-05, behavior='ret', verbosity=0, opt_kws={}): assert (behavior in ['ret', 'error', 'warn']) errors = [] passes = [] for (i, x) in enumerate(values): prox_out = func.prox(x=x, step=step) (prox_baseline, opt_out) = numeric_prox(func=func, x=x, step=step, init=prox_out, **opt_kws) if (len(prox_baseline) == 1): prox_baseline = prox_baseline.item() did_pass = np.allclose(prox_out, prox_baseline, rtol=rtol, atol=atol) errs = {'mad': np.mean(abs((prox_out - prox_baseline))), 'max': abs((prox_out - prox_baseline)).max(), 'l2': np.linalg.norm((prox_out - prox_baseline))} if (verbosity >= 1): print('value ({}/{}) pass = {}'.format((i + 1), len(values), did_pass)) print(errs) print() if (behavior == 'error'): assert did_pass errors.append(errs) passes.append(did_pass) passes = np.array(passes) n_fails = sum((~ passes)) msg = '{}/{} prox evals failed: {}'.format(n_fails, len(passes), passes) if (behavior == 'ret'): return (errors, passes) if (n_fails > 0): if (behavior == 'warn'): warn(msg)
class Nurbs(VersionBase): def __init__(self, order): self.order = convert_int(order) self.controlpoints = [] self.knots = [] def __eq__(self, other): if isinstance(other, Nurbs): if ((self.get_attributes() == other.get_attributes()) and (self.controlpoints == other.controlpoints) and (self.knots == other.knots)): return True return False def parse(element): nurbs_element = element.find('Nurbs') order = convert_int(nurbs_element.attrib['order']) nurbs = Nurbs(order) control_point_elements = nurbs_element.findall('ControlPoint') for cp in control_point_elements: nurbs.add_control_point(ControlPoint.parse(cp)) knots_elements = nurbs_element.findall('Knot') knots = [] for k in knots_elements: print(k) knots.append(convert_float(k.attrib['value'])) nurbs.add_knots(knots) return nurbs def add_knots(self, knots): self.knots = knots return self def add_control_point(self, controlpoint): if (not isinstance(controlpoint, ControlPoint)): raise TypeError('controlpoint input is not of type ControlPoint') self.controlpoints.append(controlpoint) return self def get_attributes(self): retdict = {} retdict['order'] = str(self.order) return retdict def get_element(self): shape = ET.Element('Shape') element = ET.SubElement(shape, 'Nurbs', attrib=self.get_attributes()) if ((len(self.controlpoints) + self.order) != len(self.knots)): raise ValueError('Number of knots is not equal to the number of contactpoints + order') for c in self.controlpoints: element.append(c.get_element()) for k in self.knots: ET.SubElement(element, 'Knot', attrib={'value': str(k)}) return shape
def read_lexiconp(filename): ans = [] found_empty_prons = False found_large_pronprobs = False whitespace = re.compile('[ \t]+') with open(filename, 'r', encoding='latin-1') as f: for line in f: a = whitespace.split(line.strip(' \t\r\n')) if (len(a) < 2): print("{0}: error: found bad line '{1}' in lexicon file {1} ".format(sys.argv[0], line.strip(' \t\r\n'), filename), file=sys.stderr) sys.exit(1) word = a[0] if (word == '<eps>'): print('{0}: error: found <eps> as a word in lexicon file {1}'.format(line.strip(' \t\r\n'), filename), file=sys.stderr) sys.exit(1) try: pron_prob = float(a[1]) word_sil_prob = float(a[2]) sil_word_correction = float(a[3]) non_sil_word_correction = float(a[4]) except: print("{0}: error: found bad line '{1}' in lexicon file {2}, 2nd field through 5th field should be numbers".format(sys.argv[0], line.strip(' \t\r\n'), filename), file=sys.stderr) sys.exit(1) prons = a[5:] if (pron_prob <= 0.0): print("{0}: error: invalid pron-prob in line '{1}' of lexicon file {2} ".format(sys.argv[0], line.strip(' \t\r\n'), filename), file=sys.stderr) sys.exit(1) if (len(prons) == 0): found_empty_prons = True ans.append((word, pron_prob, word_sil_prob, sil_word_correction, non_sil_word_correction, prons)) if (pron_prob > 1.0): found_large_pronprobs = True if found_empty_prons: print('{0}: warning: found at least one word with an empty pronunciation in lexicon file {1}.'.format(sys.argv[0], filename), file=sys.stderr) if found_large_pronprobs: print('{0}: warning: found at least one word with pron-prob >1.0 in {1}'.format(sys.argv[0], filename), file=sys.stderr) if (len(ans) == 0): print('{0}: error: found no pronunciations in lexicon file {1}'.format(sys.argv[0], filename), file=sys.stderr) sys.exit(1) return ans
def test_cache_get_hit_no_cache(no_cache): cache = Cache() creator_mock = MagicMock() creator_mock.side_effect = ['created obj1', 'created obj2', 'created obj3', 'created obj4'] with patch_logger('pypyr.cache', logging.DEBUG) as mock_logger_debug: obj1 = cache.get('one', (lambda : creator_mock('1'))) obj2 = cache.get('one', (lambda : creator_mock('2'))) obj3 = cache.get('one', (lambda : creator_mock('3'))) assert (obj1 == 'created obj1') assert (obj2 == 'created obj2') assert (obj3 == 'created obj3') assert (creator_mock.mock_calls == [call('1'), call('2'), call('3')]) assert (mock_logger_debug.mock_calls == [call('no cache mode enabled. creating `one` sans cache'), call('no cache mode enabled. creating `one` sans cache'), call('no cache mode enabled. creating `one` sans cache')]) obj4 = creator_mock('4') assert (obj4 == 'created obj4')
class MessageManager(models.Manager): def compose(self, sender, recipient, body): if (not sender.can_send_message(recipient)): return False has_receipt = (sender.allow_receipts and recipient.allow_receipts) message = self.create(sender=sender, recipient=recipient, body=body, has_receipt=has_receipt) return message
class BERTAdam(Optimizer): def __init__(self, params, lr, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-06, weight_decay_rate=0.01, max_grad_norm=1.0): if (not (lr >= 0.0)): raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr)) if (schedule not in SCHEDULES): raise ValueError('Invalid schedule parameter: {}'.format(schedule)) if ((not (0.0 <= warmup < 1.0)) and (not (warmup == (- 1)))): raise ValueError('Invalid warmup: {} - should be in [0.0, 1.0[ or -1'.format(warmup)) if (not (0.0 <= b1 < 1.0)): raise ValueError('Invalid b1 parameter: {} - should be in [0.0, 1.0['.format(b1)) if (not (0.0 <= b2 < 1.0)): raise ValueError('Invalid b2 parameter: {} - should be in [0.0, 1.0['.format(b2)) if (not (e >= 0.0)): raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(e)) defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, b1=b1, b2=b2, e=e, weight_decay_rate=weight_decay_rate, max_grad_norm=max_grad_norm) super(BERTAdam, self).__init__(params, defaults) def get_lr(self): lr = [] print('l_total=', len(self.param_groups)) for group in self.param_groups: print('l_p=', len(group['params'])) for p in group['params']: state = self.state[p] if (len(state) == 0): return [0] if (group['t_total'] != (- 1)): schedule_fct = SCHEDULES[group['schedule']] lr_scheduled = (group['lr'] * schedule_fct((state['step'] / group['t_total']), group['warmup'])) else: lr_scheduled = group['lr'] lr.append(lr_scheduled) return lr def to(self, device): for state in self.state.values(): state['exp_avg'].to(device) state['exp_avg_sq'].to(device) def initialize_step(self, initial_step): for group in self.param_groups: for p in group['params']: state = self.state[p] state['step'] = initial_step state['exp_avg'] = torch.zeros_like(p.data) state['exp_avg_sq'] = torch.zeros_like(p.data) def step(self, closure=None): loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] if (len(state) == 0): state['step'] = 0 state['next_m'] = torch.zeros_like(p.data) state['next_v'] = torch.zeros_like(p.data) (next_m, next_v) = (state['next_m'], state['next_v']) (beta1, beta2) = (group['b1'], group['b2']) if (group['max_grad_norm'] > 0): clip_grad_norm_(p, group['max_grad_norm']) next_m.mul_(beta1).add_((1 - beta1), grad) next_v.mul_(beta2).addcmul_((1 - beta2), grad, grad) update = (next_m / (next_v.sqrt() + group['e'])) if (group['weight_decay_rate'] > 0.0): update += (group['weight_decay_rate'] * p.data) if (group['t_total'] != (- 1)): schedule_fct = SCHEDULES[group['schedule']] lr_scheduled = (group['lr'] * schedule_fct((state['step'] / group['t_total']), group['warmup'])) else: lr_scheduled = group['lr'] update_with_lr = (lr_scheduled * update) p.data.add_((- update_with_lr)) state['step'] += 1 return loss
def rescale_abscoeff(spec, rescaled, initial, old_mole_fraction, new_mole_fraction, old_path_length_cm, wunit, units, extra, true_path_length, assume_equilibrium): if __debug__: printdbg(f'recomputing `abscoeff` from initial {initial} and already rescaled {list(rescaled.keys())}, knowing `old_mole_fraction={old_mole_fraction}`, `new_mole_fraction={new_mole_fraction}`, `old_path_length_cm={old_path_length_cm}`, `true_path_length={true_path_length}`, and `assume_equilibrium={assume_equilibrium}`') unit = None if ('abscoeff' in rescaled): if __debug__: printdbg('... rescale: abscoeff was scaled already') assert ('abscoeff' in units) return (rescaled, units) if ('abscoeff' in initial): (_, abscoeff_init) = spec.get('abscoeff', wunit=wunit, Iunit=spec.units['abscoeff'], copy=False) elif (('absorbance' in initial) and true_path_length): if __debug__: printdbg('... rescale: abscoeff k_1 = A_1/L_1') (_, A) = spec.get('absorbance', wunit=wunit, Iunit=spec.units['absorbance'], copy=False) assert (units['absorbance'] == '') abscoeff_init = (A / old_path_length_cm) unit = 'cm-1' elif (('transmittance_noslit' in initial) and true_path_length): if __debug__: printdbg('... rescale: abscoeff k_1 = -ln(T_1)/L_1') (_, T1) = spec.get('transmittance_noslit', wunit=wunit, Iunit=spec.units['transmittance_noslit'], copy=False) b = (T1 == 0) if (b.sum() > 0): msg = "Transmittance is saturated. Can't infer abscoeff. Please give absorbance" if ('abscoeff' in extra): abscoeff_init = None if __debug__: printdbg(msg) else: raise ValueError(msg) abscoeff_init = ((- ln(T1)) / old_path_length_cm) unit = 'cm-1' elif (('xsection' in initial) and ('Tgas' in spec.conditions) and ('pressure' in spec.conditions) and ('mole_fraction' in spec.conditions)): if __debug__: printdbg('... rescale: abscoeff k_2 = XS_2 * (x * p) / (k_b * T)') xsection = rescaled['xsection'] pressure_Pa = (spec.conditions['pressure'] * 100000.0) x = spec.conditions['mole_fraction'] Tgas = spec.conditions['Tgas'] from radis.phys.constants import k_b abscoeff_init = ((xsection * (((x * pressure_Pa) / k_b) / Tgas)) * 1e-06) if ('xsection' in spec.units): assert (spec.units['xsection'] == 'cm2') unit = 'cm-1' elif ('abscoeff' in extra): abscoeff_init = None elif (assume_equilibrium and ('emissivity_noslit' in initial)): raise NotImplementedError('Recompute `abscoeff` from `emissivity_noslit` at equilibrium is possible but not implemented') elif (assume_equilibrium and ('radiance_noslit' in initial)): raise NotImplementedError('Recompute `abscoeff` from `radiance_noslit` at equilibrium is possible but not implemented') elif (assume_equilibrium and ('transmittance_noslit' in initial)): raise NotImplementedError('Recompute `abscoeff` from `transmittance_noslit` at equilibrium is possible but not implemented') else: raise ValueError((("Can't rescale abscoeff if not all of the following are given : transmittance_noslit ({0}) ".format(('transmittance_noslit' in initial)) + 'or absorbance ({0}), '.format(('absorbance' in initial))) + 'and true_path_length ({0}). '.format(true_path_length))) if (abscoeff_init is not None): if __debug__: printdbg('... rescale: abscoeff k_2 = k_1 * (x_2/x_1)') abscoeff = ((abscoeff_init * new_mole_fraction) / old_mole_fraction) rescaled['abscoeff'] = abscoeff if (unit is not None): units['abscoeff'] = unit return (rescaled, units)
def test_transform_types_params_array(): data = {'attr': [1, 2, 3]} custom_types = {'attr': types.ArrayAttribute} (new_data, files) = utils._transform_types(data, custom_types, transform_data=True) assert (new_data is not data) assert (new_data == {'attr[]': [1, 2, 3]}) assert (files == {})
def main(args): if (args.cuda and torch.cuda.is_available()): device = torch.device('cuda:0') else: device = torch.device('cpu') (init_dict, train_dict, test_dict) = prepare_data(args.data_loc, args.num_init, args.num_total, test_is_year=False, seed=args.seed) (init_x, init_y, init_y_var) = (init_dict['x'].to(device), init_dict['y'].to(device), init_dict['y_var'].to(device)) (train_x, train_y, train_y_var) = (train_dict['x'].to(device), train_dict['y'].to(device), train_dict['y_var'].to(device)) (test_x, test_y, test_y_var) = (test_dict['x'].to(device), test_dict['y'].to(device), test_dict['y_var'].to(device)) likelihood = FixedNoiseGaussianLikelihood(noise=init_y_var) grid_pts = create_grid(grid_sizes=[30, 30], grid_bounds=torch.tensor([[0.0, 1.0], [0.0, 1.0]])) induc_points = torch.cat([x.reshape((- 1), 1) for x in torch.meshgrid(grid_pts)], dim=(- 1)) model = VariationalGPModel(inducing_points=induc_points, mean_module=gpytorch.means.ZeroMean(), covar_module=ScaleKernel(MaternKernel(ard_num_dims=2, nu=0.5, lengthscale_prior=GammaPrior(3.0, 6.0)), outputscale_prior=GammaPrior(2.0, 0.15)), streaming=True, likelihood=likelihood, beta=args.beta, learn_inducing_locations=args.learn_inducing).to(device) mll = VariationalELBO(model.likelihood, model, beta=args.beta, num_data=args.num_init) print('---- Fitting initial model ----') start = time.time() model.train() model.zero_grad() optimizer = torch.optim.Adam(model.parameters(), lr=(10 * args.lr_init)) (model, loss) = fit_variational_model(mll, model, optimizer, init_x, init_y, maxiter=1000) end = time.time() print('Elapsed fitting time: ', (end - start)) print('--- Now computing initial RMSE') model.eval() with gpytorch.settings.skip_posterior_variances(True): test_pred = model(test_x) pred_rmse = ((test_pred.mean - test_y) ** 2).mean().sqrt() print('---- Initial RMSE: ', pred_rmse.item()) all_outputs = [] start_ind = init_x.shape[0] end_ind = int((start_ind + args.batch_size)) current_x = init_x current_y = init_y current_y_var = init_y_var for step in range(args.num_steps): if ((step > 0) and ((step % 25) == 0)): print('Beginning step ', step) total_time_step_start = time.time() if (step > 0): print('---- Fitting model ----') start = time.time() model.train() model.zero_grad() model.likelihood = FixedNoiseGaussianLikelihood(current_y_var) mll = VariationalELBO(model.likelihood, model, beta=args.beta, num_data=args.num_init) optimizer = torch.optim.Adam(model.parameters(), lr=(args.lr_init * (0.99 ** step))) (model, loss) = fit_variational_model(mll, model, optimizer, current_x, current_y, maxiter=300) model.zero_grad() end = time.time() print('Elapsed fitting time: ', (end - start)) if ((args.acqf == 'max_post_var') and (not args.random)): (candidates, acq_value) = generate_candidates(model, args.batch_size, device, maxiter=300) elif ((args.acqf == 'max_test_var') and (not args.random)): model.eval() (vals, inds) = model(test_x).variance.sort() acq_value = vals[(- args.batch_size):].mean().detach() candidates = test_x[inds[(- args.batch_size):]] else: candidates = torch.rand(args.batch_size, train_x.shape[(- 1)], device=device, dtype=train_x.dtype) acq_value = torch.zeros(1) model.eval() _ = model(test_x[:10]) print('---- Finished optimizing; now querying dataset ---- ') with torch.no_grad(): covar_dists = model.covar_module(candidates, train_x) nearest_points = covar_dists.evaluate().argmax(dim=(- 1)) new_x = train_x[nearest_points] new_y = train_y[nearest_points] new_y_var = train_y_var[nearest_points] todrop = torch.tensor([(x in nearest_points) for x in range(train_x.shape[0])]) (train_x, train_y, train_y_var) = (train_x[(~ todrop)], train_y[(~ todrop)], train_y_var[(~ todrop)]) print('New train_x shape', train_x.shape) print('--- Now updating model with simulator ----') current_x = torch.cat((current_x, new_x), dim=0) current_y = torch.cat((current_y, new_y), dim=0) current_y_var = torch.cat((current_y_var, new_y_var), dim=0) print('--- Now computing updated RMSE') model.eval() test_pred = model(test_x) pred_rmse = ((test_pred.mean.view((- 1)) - test_y.view((- 1))) ** 2).mean().sqrt() pred_avg_variance = test_pred.variance.mean() total_time_step_elapsed_time = (time.time() - total_time_step_start) step_output_list = [total_time_step_elapsed_time, acq_value.item(), pred_rmse.item(), pred_avg_variance.item(), loss.item()] print('Step RMSE: ', pred_rmse) all_outputs.append(step_output_list) start_ind = end_ind end_ind = int((end_ind + args.batch_size)) output_dict = {'model_state_dict': model.cpu().state_dict(), 'queried_points': {'x': current_x, 'y': current_y}, 'results': DataFrame(all_outputs)} torch.save(output_dict, args.output)
class TranslationConfig(FairseqDataclass): data: Optional[str] = field(default=None, metadata={'help': 'colon separated path to data directories list, will be iterated upon during epochs in round-robin manner; however, valid and test data are always in the first directory to avoid the need for repeating them in all directories'}) source_lang: Optional[str] = field(default=None, metadata={'help': 'source language', 'argparse_alias': '-s'}) target_lang: Optional[str] = field(default=None, metadata={'help': 'target language', 'argparse_alias': '-t'}) load_alignments: bool = field(default=False, metadata={'help': 'load the binarized alignments'}) left_pad_source: bool = field(default=True, metadata={'help': 'pad the source on the left'}) left_pad_target: bool = field(default=False, metadata={'help': 'pad the target on the left'}) max_source_positions: int = field(default=1024, metadata={'help': 'max number of tokens in the source sequence'}) max_target_positions: int = field(default=1024, metadata={'help': 'max number of tokens in the target sequence'}) upsample_primary: int = field(default=(- 1), metadata={'help': 'the amount of upsample primary dataset'}) truncate_source: bool = field(default=False, metadata={'help': 'truncate source to max-source-positions'}) num_batch_buckets: int = field(default=0, metadata={'help': 'if >0, then bucket source and target lengths into N buckets and pad accordingly; this is useful on TPUs to minimize the number of compilations'}) train_subset: str = II('dataset.train_subset') dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II('dataset.dataset_impl') required_seq_len_multiple: int = II('dataset.required_seq_len_multiple') eval_bleu: bool = field(default=False, metadata={'help': 'evaluation with BLEU scores'}) eval_bleu_args: Optional[str] = field(default='{}', metadata={'help': 'generation args for BLUE scoring, e.g., \'{"beam": 4, "lenpen": 0.6}\', as JSON string'}) eval_bleu_detok: str = field(default='space', metadata={'help': "detokenize before computing BLEU (e.g., 'moses'); required if using --eval-bleu; use 'space' to disable detokenization; see fairseq.data.encoders for other options"}) eval_bleu_detok_args: Optional[str] = field(default='{}', metadata={'help': 'args for building the tokenizer, if needed, as JSON string'}) eval_tokenized_bleu: bool = field(default=False, metadata={'help': 'compute tokenized BLEU instead of sacrebleu'}) eval_bleu_remove_bpe: Optional[str] = field(default=None, metadata={'help': 'remove BPE before computing BLEU', 'argparse_const': ' '}) eval_bleu_print_samples: bool = field(default=False, metadata={'help': 'print sample generations during validation'}) ff_block: str = field(default='ff', metadata={'help': 'The type of FF Block. Choices: {ff, adaptive_ff}'}) adaptive_lossw: float = field(default=1e-06, metadata={'help': 'The adaptive loss.'}) adaptive_ramp: float = field(default=0.25, metadata={'help': 'Percentage of the layer used as the adaptive ramp.'}) adaptive_init: float = field(default=0.0, metadata={'help': 'Init value of the adaptive vector.'}) target_sparsity: float = field(default=0.9, metadata={'help': 'The desired final sparsity.'}) str_loss: float = field(default=0.0, metadata={'help': 'The sparse reparametrization loss.'}) str_type: str = field(default='vector', metadata={'help': 'The sparse reparametrization loss.'}) str_progress_start: float = field(default=0.0, metadata={'help': 'The sparse reparametrization loss.'}) str_lr: float = field(default=0.0, metadata={'help': 'scalar quantization noise and scalar quantization at training time'}) str_mult_offset: float = field(default=0.01, metadata={'help': 'scalar quantization noise and scalar quantization at training time'}) str_noisy_relu: float = field(default=0.0, metadata={'help': 'scalar quantization noise and scalar quantization at training time'}) str_weight_noise: float = field(default=0.0, metadata={'help': 'scalar quantization noise and scalar quantization at training time'}) str_sparse: bool = field(default=False, metadata={'help': 'scalar quantization noise and scalar quantization at training time'}) str_func: str = field(default='logistic', metadata={'help': 'scalar quantization noise and scalar quantization at training time'}) base_hidden_dim: int = field(default=0, metadata={'help': 'scalar quantization noise and scalar quantization at training time'}) base_init: str = field(default='zero', metadata={'help': 'scalar quantization noise and scalar quantization at training time'}) stable_emb: bool = field(default=False, metadata={'help': 'Use bnb optimizers if available.'}) init: str = field(default='pytorch', metadata={'help': 'scalar quantization noise and scalar quantization at training time'})
def make_nspkg_sdist(dist_path, distname, version): parts = distname.split('.') nspackage = parts[0] packages = ['.'.join(parts[:idx]) for idx in range(1, (len(parts) + 1))] setup_py = DALS((' import setuptools\n setuptools.setup(\n name=%r,\n version=%r,\n packages=%r,\n namespace_packages=[%r]\n )\n ' % (distname, version, packages, nspackage))) init = "__import__('pkg_resources').declare_namespace(__name__)" files = [('setup.py', setup_py), (os.path.join(nspackage, '__init__.py'), init)] for package in packages[1:]: filename = os.path.join(*(package.split('.') + ['__init__.py'])) files.append((filename, '')) make_sdist(dist_path, files)
class TransformerDecoderLayer3Add(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.text_cross_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.hist_cross_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.norm3 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) self.normalize_before = normalize_before def forward(self, tgt, text_memory, hist_memory, tgt_key_padding_mask: Optional[Tensor]=None, text_memory_key_padding_mask: Optional[Tensor]=None, hist_memory_key_padding_mask: Optional[Tensor]=None): tgt2 = self.norm1(tgt) tgt2 = self.self_attn(tgt2, tgt2, value=tgt2, key_padding_mask=tgt_key_padding_mask)[0] tgt = (tgt + self.dropout1(tgt2)) tgt2 = self.norm2(tgt) hist_tgt = self.hist_cross_attn(query=tgt2, key=hist_memory, value=hist_memory, key_padding_mask=hist_memory_key_padding_mask)[0] txt_tgt = self.text_cross_attn(query=tgt2, key=text_memory, value=text_memory, key_padding_mask=text_memory_key_padding_mask)[0] tgt = ((tgt + self.dropout2(hist_tgt)) + self.dropout2(txt_tgt)) tgt2 = self.norm3(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) tgt = (tgt + self.dropout3(tgt2)) return tgt
def _format_index(data): tz_raw = data.columns[1] timezone = TZ_MAP.get(tz_raw, tz_raw) datetime = (data['DATE (MM/DD/YYYY)'] + data[tz_raw]) datetime = pd.to_datetime(datetime, format='%m/%d/%Y%H:%M') data = data.set_index(datetime) data = data.tz_localize(timezone) return data
class Caltech101(DatasetBase): dataset_dir = 'caltech-101' def __init__(self, root, num_shots): self.dataset_dir = os.path.join(root, self.dataset_dir) self.image_dir = os.path.join(self.dataset_dir, '101_ObjectCategories') self.split_path = os.path.join(self.dataset_dir, 'split_zhou_Caltech101.json') self.template = template self.cupl_path = './gpt3_prompts/CuPL_prompts_caltech101.json' (train, val, test) = OxfordPets.read_split(self.split_path, self.image_dir) train = self.generate_fewshot_dataset(train, num_shots=num_shots) super().__init__(train_x=train, val=val, test=test)
class ModuleIdentifierOpInfo(): def __init__(self, module_name, op_type, tf_op, pattern_type: str=None, internal_ops: List[tf.Operation]=None): self._module_name = module_name self._op_type = op_type self._tf_op = tf_op self._pattern_type = pattern_type self._attributes = {} self._internal_ops = internal_ops def module_name(self): return self._module_name _name.setter def module_name(self, module_name): self._module_name = module_name def op_type(self): return self._op_type _type.setter def op_type(self, op_type): self._op_type = op_type def tf_op(self): return self._tf_op def pattern_type(self): return self._pattern_type def internal_ops(self): return self._internal_ops def add_attribute(self, attribute_name: str, attribute): self._attributes[attribute_name] = attribute def get_attributes(self): return self._attributes
class TransformerEncoderBlock(nn.Sequential): def __init__(self, emb_size=80, drop_p=0.5, forward_expansion=4, forward_drop_p=0.0, **kwargs): super().__init__(ResidualAdd(nn.Sequential(nn.LayerNorm(emb_size), MultiHeadAttention(emb_size, **kwargs), nn.Dropout(drop_p))), ResidualAdd(nn.Sequential(nn.LayerNorm(emb_size), FeedForwardBlock(emb_size, expansion=forward_expansion, drop_p=forward_drop_p), nn.Dropout(drop_p))))
def test_date_and_delta() -> None: now = dt.datetime.now() td = dt.timedelta int_tests = (3, 29, 86399, 86400, (86401 * 30)) date_tests = [(now - td(seconds=x)) for x in int_tests] td_tests = [td(seconds=x) for x in int_tests] results = [((now - td(seconds=x)), td(seconds=x)) for x in int_tests] for t in (int_tests, date_tests, td_tests): for (arg, result) in zip(t, results): (date, d) = time._date_and_delta(arg) assert_equal_datetime(date, result[0]) assert_equal_timedelta(d, result[1]) assert (time._date_and_delta('NaN') == (None, 'NaN'))
class MemoryCgroupCollector(diamond.collector.Collector): def process_config(self): super(MemoryCgroupCollector, self).process_config() self.memory_path = self.config['memory_path'] self.skip = self.config['skip'] if (not isinstance(self.skip, list)): self.skip = [self.skip] self.skip = [re.compile(e) for e in self.skip] def should_skip(self, path): for skip_re in self.skip: if skip_re.search(path): return True return False def get_default_config_help(self): config_help = super(MemoryCgroupCollector, self).get_default_config_help() config_help.update({}) return config_help def get_default_config(self): config = super(MemoryCgroupCollector, self).get_default_config() config.update({'path': 'memory_cgroup', 'memory_path': '/sys/fs/cgroup/memory/', 'skip': []}) return config def collect(self): matches = [] for (root, dirnames, filenames) in os.walk(self.memory_path): if (not self.should_skip(root)): for filename in filenames: if (filename == 'memory.stat'): parent = root.replace(self.memory_path, '').replace('/', '.') if (parent == ''): parent = 'system' matches.append((parent, os.path.join(root, filename))) results = {} for match in matches: results[match[0]] = {} stat_file = open(match[1]) elements = [line.split() for line in stat_file] stat_file.close() for el in elements: (name, value) = el if (name not in _KEY_MAPPING): continue for unit in self.config['byte_unit']: value = diamond.convertor.binary.convert(value=value, oldUnit='B', newUnit=unit) results[match[0]][name] = value break for (parent, cpuacct) in results.iteritems(): for (key, value) in cpuacct.iteritems(): metric_name = '.'.join([parent, key]) self.publish(metric_name, value, metric_type='GAUGE') return True
def _build_vectors(): count = 0 output = [] key = None plaintext = binascii.unhexlify((32 * '0')) for size in _SIZES_TO_GENERATE: for keyinfo in _RFC6229_KEY_MATERIALS: key = _key_for_size(size, keyinfo) cipher = ciphers.Cipher(algorithms.ARC4(binascii.unhexlify(key)), None) encryptor = cipher.encryptor() current_offset = 0 for offset in _RFC6229_OFFSETS: if ((offset % 16) != 0): raise ValueError(f'Offset {offset} is not evenly divisible by 16') while (current_offset < offset): encryptor.update(plaintext) current_offset += len(plaintext) output.append(f''' COUNT = {count}''') count += 1 output.append(f'KEY = {key}') output.append(f'OFFSET = {offset}') output.append(f'PLAINTEXT = {binascii.hexlify(plaintext)}') output.append('CIPHERTEXT = {}'.format(binascii.hexlify(encryptor.update(plaintext)))) current_offset += len(plaintext) assert (not encryptor.finalize()) return '\n'.join(output)
class WaveformSeekBar(Gtk.Box): def __init__(self, player, library): super().__init__() self._player = player self._rms_vals = [] self._hovering = False self._elapsed_label = TimeLabel() self._remaining_label = TimeLabel() self._waveform_scale = WaveformScale(player) self.pack_start(Align(self._elapsed_label, border=6), False, True, 0) self.pack_start(self._waveform_scale, True, True, 0) self.pack_start(Align(self._remaining_label, border=6), False, True, 0) for child in self.get_children(): child.show_all() self.set_time_label_visibility(CONFIG.show_time_labels) self._waveform_scale.connect('size-allocate', self._update_redraw_interval) self._waveform_scale.connect('motion-notify-event', self._on_mouse_hover) self._waveform_scale.connect('leave-notify-event', self._on_mouse_leave) self._label_tracker = TimeTracker(player) self._label_tracker.connect('tick', self._on_tick_label, player) self._redraw_tracker = TimeTracker(player) self._redraw_tracker.connect('tick', self._on_tick_waveform, player) connect_destroy(player, 'seek', self._on_player_seek) connect_destroy(player, 'song-started', self._on_song_started) connect_destroy(player, 'song-ended', self._on_song_ended) connect_destroy(player, 'notify::seekable', self._on_seekable_changed) connect_destroy(library, 'changed', self._on_song_changed, player) self.connect('destroy', self._on_destroy) self._update(player) if player.info: self._create_waveform(player.info, CONFIG.max_data_points) def set_time_label_visibility(self, is_visible): self._time_labels_visible = is_visible if is_visible: self._elapsed_label.show() self._remaining_label.show() else: self._elapsed_label.hide() self._remaining_label.hide() def _create_waveform(self, song, points): self._clean_pipeline() if (not song.is_file): return command_template = '\n uridecodebin name=uridec\n ! audioconvert\n ! level name=audiolevel interval={} post-messages=true\n ! fakesink sync=false' interval = int(((song('~#length') * .0) / points)) if (not interval): return print_d(('Computing data for each %.3f seconds' % (interval / .0))) command = command_template.format(interval) pipeline = Gst.parse_launch(command) pipeline.get_by_name('uridec').set_property('uri', uri2gsturi(song('~uri'))) bus = pipeline.get_bus() self._bus_id = bus.connect('message', self._on_bus_message, points) bus.add_signal_watch() pipeline.set_state(Gst.State.PLAYING) self._pipeline = pipeline self._new_rms_vals = [] def _on_bus_message(self, bus, message, points): force_stop = False if (message.type == Gst.MessageType.ERROR): (error, debug) = message.parse_error() print_d(f'Error received from element {message.src.get_name()}: {error}') print_d(f'Debugging information: {debug}') elif (message.type == Gst.MessageType.ELEMENT): structure = message.get_structure() if (structure.get_name() == 'level'): rms_db = structure.get_value('rms') if rms_db: rms_db_avg = (sum(rms_db) / len(rms_db)) rms = pow(10, (rms_db_avg / 20)) self._new_rms_vals.append(rms) if (len(self._new_rms_vals) >= points): force_stop = True else: print_w(f'Got unexpected message of type {message.type}') if ((message.type == Gst.MessageType.EOS) or force_stop): self._clean_pipeline() self._rms_vals = self._new_rms_vals self._waveform_scale.reset(self._rms_vals) self._update_redraw_interval() del self._new_rms_vals def _clean_pipeline(self): if (hasattr(self, '_pipeline') and self._pipeline): self._pipeline.set_state(Gst.State.NULL) if self._bus_id: bus = self._pipeline.get_bus() bus.remove_signal_watch() bus.disconnect(self._bus_id) self._bus_id = None if self._pipeline: self._pipeline = None def _update_redraw_interval(self, *args): if (self._player.info and self.is_visible()): interval = self._waveform_scale.compute_redraw_interval() self._redraw_tracker.set_interval(interval) def _on_destroy(self, *args): self._clean_pipeline() self._label_tracker.destroy() self._redraw_tracker.destroy() def _on_tick_label(self, tracker, player): self._update_label(player) def _on_tick_waveform(self, tracker, player): self._update_waveform(player) def _on_seekable_changed(self, player, *args): self._update_label(player) def _on_player_seek(self, player, song, ms): self._update(player) def _on_song_changed(self, library, songs, player): if (not player.info): return if (player.info in songs): self._create_waveform(player.info, CONFIG.max_data_points) self._resize_labels(player.info) self._update_label(player) def _on_song_started(self, player, song): if player.info: self._create_waveform(player.info, CONFIG.max_data_points) self._resize_labels(player.info) self._rms_vals.clear() self._update(player, True) def _on_song_ended(self, player, song, ended): self._update(player) def _update(self, player, full_redraw=False): self._update_label(player) self._update_waveform(player, full_redraw) def _update_label(self, player): if (not self._time_labels_visible): self.set_sensitive(((player.info is not None) and player.seekable)) return if player.info: if self._hovering: position = self._waveform_scale.get_mouse_position() else: position = (player.get_position() / 1000.0) length = player.info('~#length') remaining = (length - position) self._elapsed_label.set_time(position) self._remaining_label.set_time(remaining) self._elapsed_label.set_disabled((not player.seekable)) self._remaining_label.set_disabled((not player.seekable)) self.set_sensitive(player.seekable) else: self._remaining_label.set_disabled(True) self._elapsed_label.set_disabled(True) self.set_sensitive(False) def _update_waveform(self, player, full_redraw=False): if player.info: position = (player.get_position() / 1000.0) length = player.info('~#length') if (length != 0): self._waveform_scale.set_position((position / length)) else: print_d(('Length reported as zero for %s' % player.info)) self._waveform_scale.set_position(0) if ((position == 0) or full_redraw): self._waveform_scale.queue_draw() else: (x, y, w, h) = self._waveform_scale.compute_redraw_area() self._waveform_scale.queue_draw_area(x, y, w, h) else: self._rms_vals.clear() self._waveform_scale.queue_draw() def _on_mouse_hover(self, _, event): def clamp(a, x, b): return min(max(x, a), b) width = self._waveform_scale.get_allocation().width self._waveform_scale.set_mouse_x_position(clamp(0, event.x, width)) if self._hovering: (x, y, w, h) = self._waveform_scale.compute_hover_redraw_area() self._waveform_scale.queue_draw_area(x, y, w, h) else: self._waveform_scale.queue_draw() self._update_label(self._player) self._hovering = True def _on_mouse_leave(self, _, event): self._waveform_scale.set_mouse_x_position((- 1)) self._waveform_scale.queue_draw() self._hovering = False self._update_label(self._player) def _resize_labels(self, song): length = util.format_time_display(song('~#length')) layout = self._remaining_label.get_layout() layout.set_text(length, (- 1)) (width, height) = layout.get_pixel_size() self._remaining_label.set_size_request(width, (- 1)) self._elapsed_label.set_size_request(width, (- 1))
class MultiHeadedAttention(nn.Module): def __init__(self, num_heads: int, dim_model: int, dropout: float=0.1, device: Optional[torch.device]=None) -> None: super().__init__() assert ((dim_model % num_heads) == 0) self.d_k: int = (dim_model // num_heads) self.num_heads = num_heads self.linear_layers = nn.ModuleList([nn.Linear(dim_model, dim_model, device=device) for _ in range(3)]) self.output_linear = nn.Linear(dim_model, dim_model, device=device) self.attention = Attention() self.dropout = nn.Dropout(p=dropout) def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[torch.Tensor]=None) -> torch.Tensor: batch_size = query.size(0) (query, key, value) = [linearLayer(x).view(batch_size, (- 1), self.num_heads, self.d_k).transpose(1, 2) for (linearLayer, x) in zip(self.linear_layers, (query, key, value))] (x, attn) = self.attention(query, key, value, mask=mask, dropout=self.dropout) x = x.transpose(1, 2).contiguous().view(batch_size, (- 1), (self.num_heads * self.d_k)) return self.output_linear(x)
def test_pyright_baseline(): test_file = (Path(__file__).parent / 'dataclass_transform_example.py') diagnostics = parse_pyright_output(test_file) expected_diagnostics = {PyrightDiagnostic(severity='information', message='Type of "Define.__init__" is "(self: Define, a: str, b: int) -> None"'), PyrightDiagnostic(severity='information', message='Type of "DefineConverter.__init__" is "(self: DefineConverter, with_converter: str | Buffer | SupportsInt | SupportsIndex | SupportsTrunc) -> None"'), PyrightDiagnostic(severity='error', message='Cannot assign member "a" for type "Frozen"\n\xa0\xa0"Frozen" is frozen\n\xa0\xa0\xa0\xa0Member "__set__" is unknown'), PyrightDiagnostic(severity='information', message='Type of "d.a" is "Literal[\'new\']"'), PyrightDiagnostic(severity='error', message='Cannot assign member "a" for type "FrozenDefine"\n\xa0\xa0"FrozenDefine" is frozen\n\xa0\xa0\xa0\xa0Member "__set__" is unknown'), PyrightDiagnostic(severity='information', message='Type of "d2.a" is "Literal[\'new\']"'), PyrightDiagnostic(severity='information', message='Type of "af.__init__" is "(_a: int) -> None"')} assert (expected_diagnostics == diagnostics)
def main(argv): lr_start = FLAGS.start_lr lr_end = FLAGS.end_lr epochs = FLAGS.epochs multiplier = ((lr_end / lr_start) ** (1 / epochs)) decayed_lr = [(lr_start * (multiplier ** x)) for x in range(epochs)] train_kargs = {'epochs': epochs, 'learning_rate': decayed_lr} directory = FLAGS.model_save_dir if (not os.path.exists(directory)): os.makedirs(directory) config_file = open((directory + '/config.txt'), 'w') config_file.write(FLAGS.flags_into_string()) config_file.close() h5_file = h5py.File(FLAGS.input_dir, 'r') train_provider = DecafEndToEndProvider(h5_file) model = Model(name=FLAGS.name) model.train(FLAGS.model_save_dir, train_provider, **train_kargs)
def t2_circuit_execution() -> Tuple[(qiskit.result.Result, np.array, List[int], float)]: num_of_gates = np.linspace(1, 30, 10).astype(int) gate_time = 0.11 qubits = [0] n_echos = 5 alt_phase_echo = True (circs, xdata) = t2_circuits(num_of_gates, gate_time, qubits, n_echos, alt_phase_echo) t2_value = 20 error = thermal_relaxation_error(np.inf, t2_value, gate_time, 0.5) noise_model = NoiseModel() noise_model.add_all_qubit_quantum_error(error, 'id') backend = qiskit.Aer.get_backend('qasm_simulator') shots = 100 qobj = qiskit.assemble(qiskit.transpile(circs, backend=backend, optimization_level=0), backend=backend, shots=shots, seed_simulator=SEED, noise_model=noise_model, max_parallel_experiments=0) backend_result = backend.run(qobj).result() return (backend_result, xdata, qubits, t2_value)
def compute_value_of_function(info: FunctionInfo, ctx: Context, *, result: Optional[Value]=None) -> Value: if (result is None): result = info.return_annotation if (result is None): result = AnyValue(AnySource.unannotated) if isinstance(info.node, ast.AsyncFunctionDef): visitor = IsGeneratorVisitor() for line in info.node.body: visitor.visit(line) if visitor.is_generator: break if (not visitor.is_generator): result = make_coro_type(result) sig = Signature.make([param_info.param for param_info in info.params], result, has_return_annotation=(info.return_annotation is not None)) val = CallableValue(sig, types.FunctionType) for (unapplied, decorator, node) in reversed(info.decorators): if ((unapplied == KnownValue(asynq.asynq)) and isinstance(val, CallableValue)): sig = replace(val.signature, is_asynq=True) val = CallableValue(sig, val.typ) continue allow_call = (isinstance(unapplied, KnownValue) and SafeDecoratorsForNestedFunctions.contains(unapplied.val, ctx.options)) val = ctx.check_call(node, decorator, [Composite(val)], allow_call=allow_call) return val
class BrowserBasedOAuth1(BaseOAuth1): REQUEST_TOKEN_URL = '' OAUTH_TOKEN_PARAMETER_NAME = 'oauth_token' REDIRECT_URI_PARAMETER_NAME = 'redirect_uri' ACCESS_TOKEN_URL = '' def auth_url(self): return self.unauthorized_token_request() def get_unauthorized_token(self): return self.strategy.request_data() def unauthorized_token_request(self): params = self.request_token_extra_arguments() params.update(self.get_scope_argument()) (key, secret) = self.get_key_and_secret() state = self.get_or_create_state() auth = OAuth1(key, secret, callback_uri=self.get_redirect_uri(state), signature_method=SIGNATURE_HMAC, signature_type=SIGNATURE_TYPE_QUERY, decoding=None) url = ((self.REQUEST_TOKEN_URL + '?') + urlencode(params)) (url, _, _) = auth.client.sign(url) return url def oauth_auth(self, token=None, oauth_verifier=None): (key, secret) = self.get_key_and_secret() oauth_verifier = (oauth_verifier or self.data.get('oauth_verifier')) token = (token or {}) state = self.get_or_create_state() return OAuth1(key, secret, resource_owner_key=token.get('oauth_token'), resource_owner_secret=token.get('oauth_token_secret'), callback_uri=self.get_redirect_uri(state), verifier=oauth_verifier, signature_method=SIGNATURE_HMAC, signature_type=SIGNATURE_TYPE_QUERY, decoding=None)
def _unpack_iterable_of_pairs(val: Value, ctx: CanAssignContext) -> Union[(Sequence[KVPair], CanAssignError)]: concrete = concrete_values_from_iterable(val, ctx) if isinstance(concrete, CanAssignError): return concrete if isinstance(concrete, Value): vals = unpack_values(concrete, ctx, 2) if isinstance(vals, CanAssignError): return CanAssignError(f'{concrete} is not a key-value pair', [vals]) return [KVPair(vals[0], vals[1], is_many=True)] kv_pairs = [] for (i, subval) in enumerate(concrete): vals = unpack_values(subval, ctx, 2) if isinstance(vals, CanAssignError): child = CanAssignError(f'{concrete} is not a key-value pair', [vals]) return CanAssignError(f'In member {i} of iterable {val}', [child]) kv_pairs.append(KVPair(vals[0], vals[1])) return kv_pairs
def load_manage_dict(filename=None): manage_filename = None if (not MANAGE_DICT): if filename: manage_filename = filename elif os.path.exists(MANAGE_FILE): manage_filename = MANAGE_FILE elif os.path.exists(HIDDEN_MANAGE_FILE): manage_filename = HIDDEN_MANAGE_FILE else: MANAGE_DICT.update(copy.deepcopy(default_manage_dict)) MANAGE_DICT['shell']['banner']['message'] = 'WARNING: This is not a managed project\n\tPlease `exit()` and \n\trun `$ manage init`\n\tand edit `manage.yml` file with desired options' MANAGE_DICT['shell']['auto_import']['display'] = False if manage_filename: loader = (yaml.FullLoader if hasattr(yaml, 'FullLoader') else yaml.Loader) with open(manage_filename) as manage_file: MANAGE_DICT.update(yaml.load(manage_file, Loader=loader)) return MANAGE_DICT
def copy_model(src_model_name, tgt_model_name): src_model_path = get_model_path(src_model_name) model_dir = (Path(__file__).parent / 'pretrained') tgt_model_path = (model_dir / tgt_model_name) assert (not tgt_model_path.exists()), (('provided model name ' + tgt_model_name) + ' has already exist. Consider another name or delete the existing one') shutil.copytree(str(src_model_path), str(tgt_model_path))
def _infer_instance_from_annotation(node: nodes.NodeNG, ctx: (context.InferenceContext | None)=None) -> Iterator[(UninferableBase | bases.Instance)]: klass = None try: klass = next(node.infer(context=ctx)) except (InferenceError, StopIteration): (yield Uninferable) if (not isinstance(klass, nodes.ClassDef)): (yield Uninferable) elif (klass.root().name in {'typing', '_collections_abc', ''}): if (klass.name in _INFERABLE_TYPING_TYPES): (yield klass.instantiate_class()) else: (yield Uninferable) else: (yield klass.instantiate_class())
class ConvertToTranscribedDataTest(unittest.TestCase): def test_convert_to_transcribed_data(self): result_aligned = {'segments': [{'words': [{'word': 'UltraSinger', 'start': 1.23, 'end': 2.34, 'confidence': 0.95}, {'word': 'is', 'start': 2.34, 'end': 3.45, 'confidence': 0.9}, {'word': 'cool!', 'start': 3.45, 'end': 4.56, 'confidence': 0.85}]}, {'words': [{'word': 'And', 'start': 4.56, 'end': 5.67, 'confidence': 0.95}, {'word': 'will', 'start': 5.67, 'end': 6.78, 'confidence': 0.9}, {'word': 'be', 'start': 6.78, 'end': 7.89, 'confidence': 0.85}, {'word': 'better!', 'start': 7.89, 'end': 9.01, 'confidence': 0.8}]}]} expected_output = [TranscribedData({'word': 'UltraSinger ', 'start': 1.23, 'end': 2.34, 'is_hyphen': None, 'confidence': 0.95}), TranscribedData({'word': 'is ', 'start': 2.34, 'end': 3.45, 'is_hyphen': None, 'confidence': 0.9}), TranscribedData({'word': 'cool! ', 'start': 3.45, 'end': 4.56, 'is_hyphen': None, 'confidence': 0.85}), TranscribedData({'word': 'And ', 'start': 4.56, 'end': 5.67, 'is_hyphen': None, 'confidence': 0.95}), TranscribedData({'word': 'will ', 'start': 5.67, 'end': 6.78, 'is_hyphen': None, 'confidence': 0.9}), TranscribedData({'word': 'be ', 'start': 6.78, 'end': 7.89, 'is_hyphen': None, 'confidence': 0.85}), TranscribedData({'word': 'better! ', 'start': 7.89, 'end': 9.01, 'is_hyphen': None, 'confidence': 0.8})] transcribed_data = convert_to_transcribed_data(result_aligned) self.assertEqual(len(transcribed_data), len(expected_output)) for i in range(len(transcribed_data)): self.assertEqual(transcribed_data[i].word, expected_output[i].word) self.assertEqual(transcribed_data[i].end, expected_output[i].end) self.assertEqual(transcribed_data[i].start, expected_output[i].start) self.assertEqual(transcribed_data[i].is_hyphen, expected_output[i].is_hyphen)
def makeUpdateMatrix(qnnArch, unitaries, trainingData, storedStates, lda, ep, l, j): numInputQubits = qnnArch[(l - 1)] summ = 0 for x in range(len(trainingData)): firstPart = updateMatrixFirstPart(qnnArch, unitaries, storedStates, l, j, x) secondPart = updateMatrixSecondPart(qnnArch, unitaries, trainingData, l, j, x) mat = qt.commutator(firstPart, secondPart) keep = list(range(numInputQubits)) keep.append((numInputQubits + j)) mat = partialTraceKeep(mat, keep) summ = (summ + mat) summ = ((((- ep) * (2 ** numInputQubits)) / (lda * len(trainingData))) * summ) return summ.expm()
_grad() def convert_s3prl_checkpoint(base_model_name, config_path, checkpoint_path, model_dump_path): checkpoint = torch.load(checkpoint_path, map_location='cpu') if (checkpoint['Config']['downstream_expert']['modelrc']['select'] not in SUPPORTED_MODELS): raise NotImplementedError(f'The supported s3prl models are {SUPPORTED_MODELS}') downstream_dict = checkpoint['Downstream'] hf_congfig = HubertConfig.from_pretrained(config_path) hf_model = HubertForSequenceClassification.from_pretrained(base_model_name, config=hf_congfig) hf_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(base_model_name, return_attention_mask=True, do_normalize=False) if hf_congfig.use_weighted_layer_sum: hf_model.layer_weights.data = checkpoint['Featurizer']['weights'] hf_model.projector.weight.data = downstream_dict['projector.weight'] hf_model.projector.bias.data = downstream_dict['projector.bias'] hf_model.classifier.weight.data = downstream_dict['model.post_net.linear.weight'] hf_model.classifier.bias.data = downstream_dict['model.post_net.linear.bias'] hf_feature_extractor.save_pretrained(model_dump_path) hf_model.save_pretrained(model_dump_path)
def _get_new_season_shows(config, db): handlers = services.get_link_handlers() for site in db.get_link_sites(): if (site.key not in handlers): warning('Link site handler for {} not installed'.format(site.key)) continue handler = handlers.get(site.key) info(' Checking {} ({})'.format(handler.name, handler.key)) raw_shows = handler.get_seasonal_shows(useragent=config.useragent) for raw_show in raw_shows: (yield raw_show)
def get_path_iterator(tsv, nshard, rank): with open(tsv, 'r') as f: root = f.readline().rstrip() lines = [line.rstrip() for line in f] tot = len(lines) shard_size = math.ceil((tot / nshard)) (start, end) = ((rank * shard_size), min(((rank + 1) * shard_size), tot)) assert (start < end), 'start={start}, end={end}' logger.info(f'rank {rank} of {nshard}, process {(end - start)} ({start}-{end}) out of {tot}') lines = lines[start:end] def iterate(): for line in lines: (subpath, nsample) = line.split('\t') (yield (f'{root}/{subpath}', int(nsample))) return (iterate, len(lines))
def vcf_fields(category, max_number): return builds(Field, category=just(category), vcf_key=vcf_field_keys(category), vcf_type=vcf_types(category), vcf_number=vcf_numbers(category, max_number)).filter((lambda field: (((field.vcf_type == 'Flag') and (field.vcf_number == '0')) or ((field.vcf_type != 'Flag') and (field.vcf_number != '0')))))
def log_stats_finegrainedness(nodes, get_leaves_fn, get_lowest_common_ancestor_fn, graph_name=None, num_per_height_to_print=2, num_leaf_pairs=10000, path='longest'): logging.info('Finegrainedness analysis of %s graph using %s paths in finding the lowest common ancestor.', graph_name, path) leaves = get_leaves_fn(nodes) heights_to_examples = collections.defaultdict(list) heights_to_num_lca_root = collections.defaultdict(int) heights = [] for _ in range(num_leaf_pairs): first_ind = np.random.randint(len(leaves)) second_ind = np.random.randint(len(leaves)) while (first_ind == second_ind): second_ind = np.random.randint(len(leaves)) leaf_a = leaves[first_ind] leaf_b = leaves[second_ind] (lca, height) = get_lowest_common_ancestor_fn(leaf_a, leaf_b, path=path) heights.append(height) heights_to_examples[height].append((leaf_a.words, leaf_b.words, lca.words)) if (not lca.parents): heights_to_num_lca_root[height] += 1 name_message = (' of the {} graph'.format(graph_name) if (graph_name is not None) else '') stats_message = 'mean: {}, median: {}, max: {}, min: {}'.format(np.mean(heights), np.median(heights), max(heights), min(heights)) logging.info('Stats on the height of the Lowest Common Ancestor of random leaf pairs%s: %s', name_message, stats_message) heights_to_num_examples = {} heights_to_proportion_root = {} for (h, examples) in heights_to_examples.items(): heights_to_num_examples[h] = (len(examples) / num_leaf_pairs) heights_to_proportion_root[h] = (heights_to_num_lca_root[h] / float(len(examples))) logging.info('Proportion of example leaf pairs (out of num_leaf_pairs random pairs) for each height of the LCA of the leaves: %s', heights_to_num_examples) logging.info('Proportion of example leaf pairs per height whose LCA is the root: %s', heights_to_proportion_root) logging.info('Examples with different fine-grainedness:\n') for height in heights_to_examples.keys(): for (i, example) in enumerate(heights_to_examples[height]): if (i == num_per_height_to_print): break logging.info('Examples with height %s:\nleafs: %s and %s. LCA: %s', height, example[0], example[1], example[2])
def log_returns(returns, benchmark=None, grayscale=False, figsize=(10, 5), fontname='Arial', lw=1.5, match_volatility=False, compound=True, cumulative=True, resample=None, ylabel='Cumulative Returns', subtitle=True, savefig=None, show=True, prepare_returns=True): title = ('Cumulative Returns' if compound else 'Returns') if (benchmark is not None): if isinstance(benchmark, str): title += (' vs %s (Log Scaled' % benchmark.upper()) else: title += ' vs Benchmark (Log Scaled' if match_volatility: title += ', Volatility Matched' else: title += ' (Log Scaled' title += ')' if prepare_returns: returns = _utils._prepare_returns(returns) benchmark = _utils._prepare_benchmark(benchmark, returns.index) fig = _core.plot_timeseries(returns, benchmark, title, ylabel=ylabel, match_volatility=match_volatility, log_scale=True, resample=resample, compound=compound, cumulative=cumulative, lw=lw, figsize=figsize, fontname=fontname, grayscale=grayscale, subtitle=subtitle, savefig=savefig, show=show) if (not show): return fig
def test_resnest_backbone(): with pytest.raises(KeyError): ResNeSt(depth=18) model = ResNeSt(depth=50, radix=2, reduction_factor=4, out_indices=(0, 1, 2, 3)) model.init_weights() model.train() imgs = torch.randn(2, 3, 224, 224) feat = model(imgs) assert (len(feat) == 4) assert (feat[0].shape == torch.Size([2, 256, 56, 56])) assert (feat[1].shape == torch.Size([2, 512, 28, 28])) assert (feat[2].shape == torch.Size([2, 1024, 14, 14])) assert (feat[3].shape == torch.Size([2, 2048, 7, 7]))
def get_ner_fmeasure(golden_lists, predict_lists, label_type='BMES'): sent_num = len(golden_lists) golden_full = [] predict_full = [] right_full = [] right_tag = 0 all_tag = 0 for idx in range(0, sent_num): golden_list = golden_lists[idx] predict_list = predict_lists[idx] for idy in range(len(golden_list)): if (golden_list[idy] == predict_list[idy]): right_tag += 1 all_tag += len(golden_list) if ((label_type == 'BMES') or (label_type == 'BIOES')): gold_matrix = get_ner_BMES(golden_list) pred_matrix = get_ner_BMES(predict_list) else: gold_matrix = get_ner_BIO(golden_list) pred_matrix = get_ner_BIO(predict_list) right_ner = list(set(gold_matrix).intersection(set(pred_matrix))) golden_full += gold_matrix predict_full += pred_matrix right_full += right_ner right_num = len(right_full) golden_num = len(golden_full) predict_num = len(predict_full) if (predict_num == 0): precision = (- 1) else: precision = ((right_num + 0.0) / predict_num) if (golden_num == 0): recall = (- 1) else: recall = ((right_num + 0.0) / golden_num) if ((precision == (- 1)) or (recall == (- 1)) or ((precision + recall) <= 0.0)): f_measure = (- 1) else: f_measure = (((2 * precision) * recall) / (precision + recall)) accuracy = ((right_tag + 0.0) / all_tag) if label_type.upper().startswith('B-'): print('gold_num = ', golden_num, ' pred_num = ', predict_num, ' right_num = ', right_num) else: print('Right token = ', right_tag, ' All token = ', all_tag, ' acc = ', accuracy) return (accuracy, precision, recall, f_measure)
def get_args_parser(): parser = argparse.ArgumentParser('Prepare images of trash for detection task') parser.add_argument('--dataset_dest', help='paths to annotations', nargs='+', default=['annotations/annotations-epi.json']) parser.add_argument('--split_dest', help='path to destination directory', default='annotations/', type=str) parser.add_argument('--test_split', help='fraction of dataset for test', default=0.2, type=str) return parser
class IntelHexError(Exception): _fmt = 'IntelHex base error' def __init__(self, msg=None, **kw): self.msg = msg for (key, value) in dict_items_g(kw): setattr(self, key, value) def __str__(self): if self.msg: return self.msg try: return (self._fmt % self.__dict__) except (NameError, ValueError, KeyError): e = sys.exc_info()[1] return ('Unprintable exception %s: %s' % (repr(e), str(e)))
def get_insaneDA_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params, border_val_seg=(- 1), seeds_train=None, seeds_val=None, order_seg=1, order_data=3, deep_supervision_scales=None, soft_ds=False, classes=None, pin_memory=True, regions=None): assert (params.get('mirror') is None), 'old version of params, use new keyword do_mirror' tr_transforms = [] if (params.get('selected_data_channels') is not None): tr_transforms.append(DataChannelSelectionTransform(params.get('selected_data_channels'))) if (params.get('selected_seg_channels') is not None): tr_transforms.append(SegChannelSelectionTransform(params.get('selected_seg_channels'))) if ((params.get('dummy_2D') is not None) and params.get('dummy_2D')): ignore_axes = (0,) tr_transforms.append(Convert3DTo2DTransform()) else: ignore_axes = None tr_transforms.append(SpatialTransform(patch_size, patch_center_dist_from_border=None, do_elastic_deform=params.get('do_elastic'), alpha=params.get('elastic_deform_alpha'), sigma=params.get('elastic_deform_sigma'), do_rotation=params.get('do_rotation'), angle_x=params.get('rotation_x'), angle_y=params.get('rotation_y'), angle_z=params.get('rotation_z'), do_scale=params.get('do_scaling'), scale=params.get('scale_range'), border_mode_data=params.get('border_mode_data'), border_cval_data=0, order_data=order_data, border_mode_seg='constant', border_cval_seg=border_val_seg, order_seg=order_seg, random_crop=params.get('random_crop'), p_el_per_sample=params.get('p_eldef'), p_scale_per_sample=params.get('p_scale'), p_rot_per_sample=params.get('p_rot'), independent_scale_for_each_axis=params.get('independent_scale_factor_for_each_axis'), p_independent_scale_per_axis=params.get('p_independent_scale_per_axis'))) if params.get('dummy_2D'): tr_transforms.append(Convert2DTo3DTransform()) tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.15)) tr_transforms.append(GaussianBlurTransform((0.5, 1.5), different_sigma_per_channel=True, p_per_sample=0.2, p_per_channel=0.5)) tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.7, 1.3), p_per_sample=0.15)) tr_transforms.append(ContrastAugmentationTransform(contrast_range=(0.65, 1.5), p_per_sample=0.15)) tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, p_per_channel=0.5, order_downsample=0, order_upsample=3, p_per_sample=0.25, ignore_axes=ignore_axes)) tr_transforms.append(GammaTransform(params.get('gamma_range'), True, True, retain_stats=params.get('gamma_retain_stats'), p_per_sample=0.15)) if params.get('do_additive_brightness'): tr_transforms.append(BrightnessTransform(params.get('additive_brightness_mu'), params.get('additive_brightness_sigma'), True, p_per_sample=params.get('additive_brightness_p_per_sample'), p_per_channel=params.get('additive_brightness_p_per_channel'))) if params.get('do_gamma'): tr_transforms.append(GammaTransform(params.get('gamma_range'), False, True, retain_stats=params.get('gamma_retain_stats'), p_per_sample=params['p_gamma'])) if (params.get('do_mirror') or params.get('mirror')): tr_transforms.append(MirrorTransform(params.get('mirror_axes'))) if (params.get('mask_was_used_for_normalization') is not None): mask_was_used_for_normalization = params.get('mask_was_used_for_normalization') tr_transforms.append(MaskTransform(mask_was_used_for_normalization, mask_idx_in_seg=0, set_outside_to=0)) tr_transforms.append(RemoveLabelTransform((- 1), 0)) if ((params.get('move_last_seg_chanel_to_data') is not None) and params.get('move_last_seg_chanel_to_data')): tr_transforms.append(MoveSegAsOneHotToData(1, params.get('all_segmentation_labels'), 'seg', 'data')) if (params.get('cascade_do_cascade_augmentations') and (not None) and params.get('cascade_do_cascade_augmentations')): if (params.get('cascade_random_binary_transform_p') > 0): tr_transforms.append(ApplyRandomBinaryOperatorTransform(channel_idx=list(range((- len(params.get('all_segmentation_labels'))), 0)), p_per_sample=params.get('cascade_random_binary_transform_p'), key='data', strel_size=params.get('cascade_random_binary_transform_size'))) if (params.get('cascade_remove_conn_comp_p') > 0): tr_transforms.append(RemoveRandomConnectedComponentFromOneHotEncodingTransform(channel_idx=list(range((- len(params.get('all_segmentation_labels'))), 0)), key='data', p_per_sample=params.get('cascade_remove_conn_comp_p'), fill_with_other_class_p=params.get('cascade_remove_conn_comp_max_size_percent_threshold'), dont_do_if_covers_more_than_X_percent=params.get('cascade_remove_conn_comp_fill_with_other_class_p'))) tr_transforms.append(RenameTransform('seg', 'target', True)) if (regions is not None): tr_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target')) if (deep_supervision_scales is not None): if soft_ds: assert (classes is not None) tr_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes)) else: tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target', output_key='target')) tr_transforms.append(NumpyToTensor(['data', 'target'], 'float')) tr_transforms = Compose(tr_transforms) batchgenerator_train = MultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'), params.get('num_cached_per_thread'), seeds=seeds_train, pin_memory=pin_memory) val_transforms = [] val_transforms.append(RemoveLabelTransform((- 1), 0)) if (params.get('selected_data_channels') is not None): val_transforms.append(DataChannelSelectionTransform(params.get('selected_data_channels'))) if (params.get('selected_seg_channels') is not None): val_transforms.append(SegChannelSelectionTransform(params.get('selected_seg_channels'))) if ((params.get('move_last_seg_chanel_to_data') is not None) and params.get('move_last_seg_chanel_to_data')): val_transforms.append(MoveSegAsOneHotToData(1, params.get('all_segmentation_labels'), 'seg', 'data')) val_transforms.append(RenameTransform('seg', 'target', True)) if (regions is not None): val_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target')) if (deep_supervision_scales is not None): if soft_ds: assert (classes is not None) val_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes)) else: val_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target', output_key='target')) val_transforms.append(NumpyToTensor(['data', 'target'], 'float')) val_transforms = Compose(val_transforms) batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms, max((params.get('num_threads') // 2), 1), params.get('num_cached_per_thread'), seeds=seeds_val, pin_memory=pin_memory) return (batchgenerator_train, batchgenerator_val)
.parametrize('username,password', users) def test_update_m2m(db, client, username, password): client.login(username=username, password=password) instances = Section.objects.all() for instance in instances: pages = [{'page': section_page.page_id, 'order': section_page.order} for section_page in instance.section_pages.all()[:1]] url = reverse(urlnames['detail'], args=[instance.pk]) data = {'uri_prefix': instance.uri_prefix, 'uri_path': instance.uri_path, 'comment': instance.comment, 'pages': pages, 'title_en': instance.title_lang1, 'title_de': instance.title_lang2} response = client.put(url, data, content_type='application/json') assert (response.status_code == status_map['update'][username]), response.json() if (response.status_code == 200): instance.refresh_from_db() assert (pages == [{'page': section_page.page_id, 'order': section_page.order} for section_page in instance.section_pages.all()])
class LeaveGroupCall(Scaffold): async def leave_group_call(self, chat_id: Union[(int, str)]): if (self._app is None): raise NoMTProtoClientSet() if (not self._is_running): raise ClientNotStarted() chat_id = (await self._resolve_chat_id(chat_id)) chat_call = (await self._app.get_full_chat(chat_id)) if (chat_call is None): raise NoActiveGroupCall() (await self._app.leave_group_call(chat_id)) try: (await ToAsync(self._binding.stop, chat_id)) except ConnectionError: raise NotInGroupCallError() if (chat_id in self._need_unmute): del self._need_unmute[chat_id]
def _avg_pool(name, tuple_fn): _args('v', 'is', 'is', 'is', 'i', 'i', 'none') def symbolic_fn(g, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override=None): padding = sym_help._avgpool_helper(tuple_fn, padding, kernel_size, stride, divisor_override, name) if (not stride): stride = kernel_size if count_include_pad: input = g.op('Pad', input, g.op('Constant', value_t=torch.tensor(((((0,) * 2) + padding) * 2))), mode_s='constant') padding = ((0,) * len(padding)) output = g.op('AveragePool', input, kernel_shape_i=tuple_fn(kernel_size), strides_i=tuple_fn(stride), pads_i=(padding * 2), ceil_mode_i=ceil_mode) return output return symbolic_fn
def reduce_by_error(logs, error_filter=None): counter = Counter() counter.update([x[1] for x in logs]) counts = counter.most_common() r = {} for (error, count) in counts: if ((error_filter is None) or (error not in error_filter)): r[error] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if (x[1] == error)]} r = dict(sorted(r.items(), key=(lambda item: item[1]['count']), reverse=True)) return r
class IrreversibleBlock(nn.Module): def __init__(self, f, g): super().__init__() self.f = f self.g = g def forward(self, x, f_args, g_args): (x1, x2) = torch.chunk(x, 2, dim=1) y1 = (x1 + self.f(x2, **f_args)) y2 = (x2 + self.g(y1, **g_args)) return torch.cat([y1, y2], dim=1)
class BinarySearchPredicate(): def __init__(self, A: int, B: int, tolerance: int) -> None: self.left = A self.right = B self.tolerance = tolerance self.first = True def next(self, prior_result: bool) -> Optional[int]: if ((self.right - self.left) < self.tolerance): return None mid = self._mid() if self.first: self.first = False return mid if prior_result: self.left = (mid + 1) else: self.right = (mid - 1) if ((self.right - self.left) < self.tolerance): return None return self._mid() def _mid(self) -> int: return (self.left + ((self.right - self.left) // 2))
def assert_psm3_equal(data, metadata, expected): assert np.allclose(data.Year, expected.Year) assert np.allclose(data.Month, expected.Month) assert np.allclose(data.Day, expected.Day) assert np.allclose(data.Hour, expected.Hour) assert np.allclose(data.Minute, expected.Minute) assert np.allclose(data.GHI, expected.GHI) assert np.allclose(data.DNI, expected.DNI) assert np.allclose(data.DHI, expected.DHI) assert np.allclose(data.Temperature, expected.Temperature) assert np.allclose(data.Pressure, expected.Pressure) assert np.allclose(data['Dew Point'], expected['Dew Point']) assert np.allclose(data['Surface Albedo'], expected['Surface Albedo']) assert np.allclose(data['Wind Speed'], expected['Wind Speed']) assert np.allclose(data['Wind Direction'], expected['Wind Direction']) for mf in METADATA_FIELDS: assert (mf in metadata) assert (data.index.tzinfo.zone == ('Etc/GMT%+d' % (- metadata['Time Zone'])))
def postprocess_db_names(values, scheme, dict_values): (table_names, col_names) = match_sql_db_names(scheme, dict_values) new_values = [] for gk in values: if (gk is not None): if (gk.type == 'tbl'): gk = GroundingKey.make_table_grounding(table_names[gk.keys[0]]) elif (gk.type == 'col'): gk = GroundingKey.make_column_grounding(table_names[gk.keys[0]], col_names[gk.keys[1]]) new_values.append(gk) return new_values
class Metadata(Tags): __module__ = 'mutagen' def __init__(self, *args, **kwargs): if (args or kwargs): self.load(*args, **kwargs) () def load(self, filething, **kwargs): raise NotImplementedError (writable=False) def save(self, filething=None, **kwargs): raise NotImplementedError (writable=False) def delete(self, filething=None): raise NotImplementedError
def pytest_runtestloop(session): try: from telegram.utils.deprecate import TelegramDeprecationWarning session.add_marker(pytest.mark.filterwarnings('ignore::telegram.utils.deprecate.TelegramDeprecationWarning')) except ImportError: pass try: from telegram.warnings import PTBDeprecationWarning session.add_marker(pytest.mark.filterwarnings('ignore::telegram.warnings.PTBDeprecationWarning')) except ImportError: pass
(frozen=True) class ValueUnit(): value = attr.ib() orig_value = attr.ib(kw_only=True) tokenized_value = attr.ib(default=None, kw_only=True) bert_tokens = attr.ib(default=None, kw_only=True) value_type = attr.ib(default=None, kw_only=True) column = attr.ib(default=None, kw_only=True) table = attr.ib(default=None, kw_only=True) source = attr.ib(default=None, kw_only=True) q_match = attr.ib(default=None, kw_only=True) idx = attr.ib(default=None, kw_only=True) def __str__(self): value = self.value if (self.value_type == 'number'): try: value = int(value) except: pass return str(value)
def test_rouge(cand, ref_1, ref_2, ref_3): current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) tmp_dir = '.rouge-tmp-{}'.format(current_time) try: if (not os.path.isdir(tmp_dir)): os.mkdir(tmp_dir) os.mkdir((tmp_dir + '/candidate')) os.mkdir((tmp_dir + '/icsi_reference')) references_1 = [line.strip() for line in ref_1] references_2 = [line.strip() for line in ref_2] references_3 = [line.strip() for line in ref_3] candidates = [line.strip() for line in cand] assert (len(candidates) == len(references_1) == len(references_2) == len(references_3)) cnt = len(candidates) for i in range(cnt): with open((tmp_dir + '/icsi_reference/ref.A.{}.txt'.format(i)), 'w', encoding='utf-8') as f: f.write(references_1[i]) with open((tmp_dir + '/icsi_reference/ref.B.{}.txt'.format(i)), 'w', encoding='utf-8') as f: f.write(references_2[i]) with open((tmp_dir + '/icsi_reference/ref.C.{}.txt'.format(i)), 'w', encoding='utf-8') as f: f.write(references_3[i]) with open((tmp_dir + '/candidate/cand.{}.txt'.format(i)), 'w', encoding='utf-8') as f: f.write(candidates[i]) '\n Your Path\n ' r = pyrouge.Rouge155('/users4/xiachongfeng/ROUGE-1.5.5/') r.model_dir = (tmp_dir + '/icsi_reference/') r.system_dir = (tmp_dir + '/candidate/') r.model_filename_pattern = 'ref.[A-Z].#ID#.txt' r.system_filename_pattern = 'cand.(\\d+).txt' rouge_results = r.convert_and_evaluate() results_dict = r.output_to_dict(rouge_results) return results_dict finally: pass if os.path.isdir(tmp_dir): shutil.rmtree(tmp_dir)
def load_app_and_run_server() -> None: sys.path.append(os.getcwd()) shutdown_event = register_signal_handlers() args = parse_args(sys.argv[1:]) with args.config_file: config = read_config(args.config_file, args.server_name, args.app_name) assert config.server if is_metrics_enabled(config.app): from baseplate.server.prometheus import start_prometheus_exporter start_prometheus_exporter() else: logger.info('Metrics are not configured, Prometheus metrics will not be exported.') configure_logging(config, args.debug) app = make_app(config.app) listener = make_listener(args.bind) server = make_server(config.server, listener, app) if einhorn.is_worker(): einhorn.ack_startup() if args.reload: reloader.start_reload_watcher(extra_files=[args.config_file.name]) gc.collect() logger.info('Listening on %s', listener.getsockname()) server.start() try: shutdown_event.wait() SERVER_STATE.state = ServerLifecycle.SHUTTING_DOWN cfg = parse_config(config.server, {'drain_time': OptionalConfig(Timespan)}) if cfg.drain_time: logger.debug('Draining inbound requests...') time.sleep(cfg.drain_time.total_seconds()) finally: logger.debug('Gracefully shutting down...') server.stop() logger.info('Exiting')
(web_fixture=WebFixture) class FileUploadInputFixture(Fixture): def file_was_uploaded(self, filename): return (Session.query(PersistedFile).filter_by(filename=os.path.basename(filename)).count() == 1) file_to_upload1_name = 'file1.html' file_to_upload2_name = 'file2.gif' file_to_upload1_content = b'some content' file_to_upload2_content = b'some different content' def new_file_to_upload1(self): return temp_file_with(self.file_to_upload1_content, name=self.file_to_upload1_name, mode='w+b') def new_file_to_upload2(self): return temp_file_with(self.file_to_upload2_content, name=self.file_to_upload2_name, mode='w+b') def new_domain_object(self): class DomainObject(): def __init__(self): self.throws_exception = False self.files = [] self.submitted_file_info = {} self.submitted = False fields = ExposedNames() fields.files = (lambda i: FileField(allow_multiple=True, label='Attached files', required=True)) events = ExposedNames() events.submit = (lambda i: Event(label='Submit', action=Action(i.submit))) def submit(self): if self.throws_exception: raise DomainException() for f in self.files: with f.open() as opened_file: contents = opened_file.read() self.submitted_file_info[f.filename] = (contents, f.mime_type) self.submitted = True return DomainObject() def new_FileUploadForm(self): fixture = self class FileUploadForm(Form): def __init__(self, view): super().__init__(view, 'test') self.set_attribute('novalidate', 'novalidate') self.use_layout(FormLayout()) if self.exception: self.layout.add_alert_for_domain_exception(self.exception) self.layout.add_input(FileUploadInput(self, fixture.domain_object.fields.files)) self.define_event_handler(fixture.domain_object.events.submit) self.add_child(Button(self, fixture.domain_object.events.submit)) return FileUploadForm def new_wsgi_app(self, enable_js=False): return self.web_fixture.new_wsgi_app(child_factory=self.FileUploadForm.factory(), enable_js=enable_js) def uploaded_file_is_listed(self, filename): return self.web_fixture.driver_browser.is_element_present(('//ul/li/span[text()="%s"]/../input[="Remove"]' % os.path.basename(filename))) def upload_file_is_queued(self, filename): return self.web_fixture.driver_browser.is_element_present(('//ul/li/span[text()="%s"]/../input[="Cancel"]' % os.path.basename(filename)))
class ConcatConv2d(nn.Conv2d, DiffEqModule): def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: _size_2_t=0, dilation: _size_2_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros'): super(ConcatConv2d, self).__init__(in_channels=(in_channels + 1), out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode) self.unpack_params = (self.unpack_wb if bias else self.unpack_w) def forward(self, t, y, params: Optional[List]=None): (weight, bias) = self.unpack_params(params) ty = utils.channel_cat(t, y) if (self.padding_mode != 'zeros'): return F.conv2d(F.pad(ty, self._reversed_padding_repeated_twice, mode=self.padding_mode), weight, bias, self.stride, _pair(0), self.dilation, self.groups) return F.conv2d(ty, weight, bias, self.stride, self.padding, self.dilation, self.groups) def unpack_wb(self, params: Optional[List]=None): if (params is None): return (self.weight, self.bias) return params def unpack_w(self, params: Optional[List]=None): if (params is None): return (self.weight, self.bias) return (params[0], None)
def parse_lambda_config(x): split = x.split(',') if (len(split) == 1): return (float(x), None) else: split = [s.split(':') for s in split] assert all(((len(s) == 2) for s in split)) assert all((k.isdigit() for (k, _) in split)) assert all(((int(split[i][0]) < int(split[(i + 1)][0])) for i in range((len(split) - 1)))) return (float(split[0][1]), [(int(k), float(v)) for (k, v) in split])
class HfArgumentParser(ArgumentParser): dataclass_types: Iterable[DataClassType] def __init__(self, dataclass_types: Union[(DataClassType, Iterable[DataClassType])], **kwargs): if ('formatter_class' not in kwargs): kwargs['formatter_class'] = ArgumentDefaultsHelpFormatter super().__init__(**kwargs) if dataclasses.is_dataclass(dataclass_types): dataclass_types = [dataclass_types] self.dataclass_types = list(dataclass_types) for dtype in self.dataclass_types: self._add_dataclass_arguments(dtype) def _parse_dataclass_field(parser: ArgumentParser, field: dataclasses.Field): field_name = f'--{field.name}' kwargs = field.metadata.copy() if isinstance(field.type, str): raise RuntimeError('Unresolved type detected, which should have been done with the help of `typing.get_type_hints` method by default') aliases = kwargs.pop('aliases', []) if isinstance(aliases, str): aliases = [aliases] origin_type = getattr(field.type, '__origin__', field.type) if (origin_type is Union): if ((str not in field.type.__args__) and ((len(field.type.__args__) != 2) or (type(None) not in field.type.__args__))): raise ValueError(f"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because the argument parser only supports one type per argument. Problem encountered in field '{field.name}'.") if (type(None) not in field.type.__args__): field.type = (field.type.__args__[0] if (field.type.__args__[1] == str) else field.type.__args__[1]) origin_type = getattr(field.type, '__origin__', field.type) elif (bool not in field.type.__args__): field.type = (field.type.__args__[0] if isinstance(None, field.type.__args__[1]) else field.type.__args__[1]) origin_type = getattr(field.type, '__origin__', field.type) bool_kwargs = {} if ((origin_type is Literal) or (isinstance(field.type, type) and issubclass(field.type, Enum))): if (origin_type is Literal): kwargs['choices'] = field.type.__args__ else: kwargs['choices'] = [x.value for x in field.type] kwargs['type'] = make_choice_type_function(kwargs['choices']) if (field.default is not dataclasses.MISSING): kwargs['default'] = field.default else: kwargs['required'] = True elif ((field.type is bool) or (field.type == Optional[bool])): bool_kwargs = copy(kwargs) kwargs['type'] = string_to_bool if ((field.type is bool) or ((field.default is not None) and (field.default is not dataclasses.MISSING))): default = (False if (field.default is dataclasses.MISSING) else field.default) kwargs['default'] = default kwargs['nargs'] = '?' kwargs['const'] = True elif (isclass(origin_type) and issubclass(origin_type, list)): kwargs['type'] = field.type.__args__[0] kwargs['nargs'] = '+' if (field.default_factory is not dataclasses.MISSING): kwargs['default'] = field.default_factory() elif (field.default is dataclasses.MISSING): kwargs['required'] = True else: kwargs['type'] = field.type if (field.default is not dataclasses.MISSING): kwargs['default'] = field.default elif (field.default_factory is not dataclasses.MISSING): kwargs['default'] = field.default_factory() else: kwargs['required'] = True parser.add_argument(field_name, *aliases, **kwargs) if ((field.default is True) and ((field.type is bool) or (field.type == Optional[bool]))): bool_kwargs['default'] = False parser.add_argument(f'--no_{field.name}', action='store_false', dest=field.name, **bool_kwargs) def _add_dataclass_arguments(self, dtype: DataClassType): if hasattr(dtype, '_argument_group_name'): parser = self.add_argument_group(dtype._argument_group_name) else: parser = self try: type_hints: Dict[(str, type)] = get_type_hints(dtype) except NameError: raise RuntimeError(f'Type resolution failed for f{dtype}. Try declaring the class in global scope or removing line of `from __future__ import annotations` which opts in Postponed Evaluation of Annotations (PEP 563)') for field in dataclasses.fields(dtype): if (not field.init): continue field.type = type_hints[field.name] self._parse_dataclass_field(parser, field) def parse_args_into_dataclasses(self, args=None, return_remaining_strings=False, look_for_args_file=True, args_filename=None, args_file_flag=None) -> Tuple[(DataClass, ...)]: if (args_file_flag or args_filename or (look_for_args_file and len(sys.argv))): args_files = [] if args_filename: args_files.append(Path(args_filename)) elif (look_for_args_file and len(sys.argv)): args_files.append(Path(sys.argv[0]).with_suffix('.args')) if args_file_flag: args_file_parser = ArgumentParser() args_file_parser.add_argument(args_file_flag, type=str, action='append') (cfg, args) = args_file_parser.parse_known_args(args=args) cmd_args_file_paths = vars(cfg).get(args_file_flag.lstrip('-'), None) if cmd_args_file_paths: args_files.extend([Path(p) for p in cmd_args_file_paths]) file_args = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() args = ((file_args + args) if (args is not None) else (file_args + sys.argv[1:])) (namespace, remaining_args) = self.parse_known_args(args=args) outputs = [] for dtype in self.dataclass_types: keys = {f.name for f in dataclasses.fields(dtype) if f.init} inputs = {k: v for (k, v) in vars(namespace).items() if (k in keys)} for k in keys: delattr(namespace, k) obj = dtype(**inputs) outputs.append(obj) if (len(namespace.__dict__) > 0): outputs.append(namespace) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f'Some specified arguments are not used by the HfArgumentParser: {remaining_args}') return (*outputs,) def parse_dict(self, args: Dict[(str, Any)], allow_extra_keys: bool=False) -> Tuple[(DataClass, ...)]: unused_keys = set(args.keys()) outputs = [] for dtype in self.dataclass_types: keys = {f.name for f in dataclasses.fields(dtype) if f.init} inputs = {k: v for (k, v) in args.items() if (k in keys)} unused_keys.difference_update(inputs.keys()) obj = dtype(**inputs) outputs.append(obj) if ((not allow_extra_keys) and unused_keys): raise ValueError(f'Some keys are not used by the HfArgumentParser: {sorted(unused_keys)}') return tuple(outputs) def parse_json_file(self, json_file: str, allow_extra_keys: bool=False) -> Tuple[(DataClass, ...)]: open_json_file = open(Path(json_file)) data = json.loads(open_json_file.read()) outputs = self.parse_dict(data, allow_extra_keys=allow_extra_keys) return tuple(outputs) def parse_yaml_file(self, yaml_file: str, allow_extra_keys: bool=False) -> Tuple[(DataClass, ...)]: outputs = self.parse_dict(yaml.safe_load(Path(yaml_file).read_text()), allow_extra_keys=allow_extra_keys) return tuple(outputs)
def main(): parser = arguments.get_argument_parser() opt = parser.parse_args() if (not os.path.exists(opt.model_name)): os.makedirs(opt.model_name) logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) tb_logger.configure(opt.logger_name, flush_secs=5) logger = logging.getLogger(__name__) logger.info(opt) tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') vocab = tokenizer.vocab opt.vocab_size = len(vocab) (train_loader, val_loader) = image_caption.get_loaders(opt.data_path, opt.data_name, tokenizer, opt.batch_size, opt.workers, opt) model = VSEModel(opt) lr_schedules = [opt.lr_update] start_epoch = 0 if opt.resume: if os.path.isfile(opt.resume): logger.info("=> loading checkpoint '{}'".format(opt.resume)) checkpoint = torch.load(opt.resume) start_epoch = checkpoint['epoch'] best_rsum = checkpoint['best_rsum'] if (not model.is_data_parallel): model.make_data_parallel() model.load_state_dict(checkpoint['model']) model.Eiters = checkpoint['Eiters'] logger.info("=> loaded checkpoint '{}' (epoch {}, best_rsum {})".format(opt.resume, start_epoch, best_rsum)) if opt.reset_start_epoch: start_epoch = 0 else: logger.info("=> no checkpoint found at '{}'".format(opt.resume)) if (not model.is_data_parallel): model.make_data_parallel() best_rsum = 0 for epoch in range(start_epoch, opt.num_epochs): logger.info(opt.logger_name) logger.info(opt.model_name) adjust_learning_rate(opt, model.optimizer, epoch, lr_schedules) if (epoch >= opt.vse_mean_warmup_epochs): opt.max_violation = True model.set_max_violation(opt.max_violation) if (opt.precomp_enc_type == 'backbone'): if (epoch < opt.embedding_warmup_epochs): model.freeze_backbone() logger.info('All backbone weights are frozen, only train the embedding layers') else: model.unfreeze_backbone(3) if (epoch < opt.embedding_warmup_epochs): logger.info('Warm up the embedding layers') elif (epoch < (opt.embedding_warmup_epochs + opt.backbone_warmup_epochs)): model.unfreeze_backbone(3) elif (epoch < (opt.embedding_warmup_epochs + (opt.backbone_warmup_epochs * 2))): model.unfreeze_backbone(2) elif (epoch < (opt.embedding_warmup_epochs + (opt.backbone_warmup_epochs * 3))): model.unfreeze_backbone(1) else: model.unfreeze_backbone(0) train(opt, train_loader, model, epoch, val_loader) rsum = validate(opt, val_loader, model) is_best = (rsum > best_rsum) best_rsum = max(rsum, best_rsum) if (not os.path.exists(opt.model_name)): os.mkdir(opt.model_name) save_checkpoint({'epoch': (epoch + 1), 'model': model.state_dict(), 'best_rsum': best_rsum, 'opt': opt, 'Eiters': model.Eiters}, is_best, filename='checkpoint.pth'.format(epoch), prefix=(opt.model_name + '/'))
def _get_tzd(timeinseconds=None): if (timeinseconds is None): timeinseconds = time.time() tzd = time.strftime('%z', time.localtime(timeinseconds)) if Globals.use_compatible_timestamps: time_separator = '-' else: time_separator = ':' if (tzd == '+0000'): return 'Z' else: return ((tzd[:3] + time_separator) + tzd[3:])
def common_arg_parser(): parser = arg_parser() parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2') parser.add_argument('--seed', help='RNG seed', type=int, default=None) parser.add_argument('--alg', help='Algorithm', type=str, default='ppo2') (parser.add_argument('--num_timesteps', type=float, default=1000000.0),) parser.add_argument('--network', help='network type (mlp, cnn, lstm, cnn_lstm, conv_only)', default=None) parser.add_argument('--gamestate', help='game state to load (so far only used in retro games)', default=None) parser.add_argument('--num_env', help='Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco', default=None, type=int) parser.add_argument('--reward_scale', help='Reward scale factor. Default: 1.0', default=1.0, type=float) parser.add_argument('--save_path', help='Path to save trained model to', default=None, type=str) parser.add_argument('--policy_weight_path', help='Policy weight path', default=None, type=str) parser.add_argument('--value_weight_path', help='Value weight path', default=None, type=str) parser.add_argument('--l2regpi', help='Policy L2', default=0.0, type=float) parser.add_argument('--l1regpi', help='Policy L1', default=0.0, type=float) parser.add_argument('--l2regvf', help='Value L2', default=0.0, type=float) parser.add_argument('--l1regvf', help='Value L1', default=0.0, type=float) parser.add_argument('--wclippi', help='Policy Weight Clip', default=0.0, type=float) parser.add_argument('--wclipvf', help='Value Weight Clip', default=0.0, type=float) parser.add_argument('--dropoutpi', help='Policy Dropout', default=1.0, type=float) parser.add_argument('--dropoutvf', help='Value Dropout', default=1.0, type=float) parser.add_argument('--batchnormpi', help='Policy Batchnorm', default=False, type=bool) parser.add_argument('--batchnormvf', help='Value Batchnorm', default=False, type=bool) parser.add_argument('--normalize_value', help='Normalize Value in Mujoco and Roboschool Environment', default=True, type=bool) parser.add_argument('--save_video_interval', help='Save video every x steps (0 = disabled)', default=0, type=int) parser.add_argument('--save_video_length', help='Length of recorded video. Default: 200', default=200, type=int) parser.add_argument('--play', default=False, action='store_true') parser.add_argument('--extra_import', help='Extra module to import to access external environments', type=str, default=None) return parser