code
stringlengths
101
5.91M
def run(): parser = argparse.ArgumentParser(description='Checks all dependencies are found and are correct versions', usage='circlator progcheck') parser.add_argument('--debug', action='store_true', help='Debug mode with very verbose output') options = parser.parse_args() versions.get_all_versions(sys.stdout, raise_error=False, debug=options.debug)
_properties class CodeNode(Node): label = Property(dtype=str, desc='Name of the CodeNode') location = DictProperty(key_type=str, value_type=dace.symbolic.pystr_to_symbolic, desc='Full storage location identifier (e.g., rank, GPU ID)') environments = SetProperty(str, desc='Environments required by CMake to build and run this code node.', default=set()) def __init__(self, label='', location=None, inputs=None, outputs=None): super(CodeNode, self).__init__((inputs or set()), (outputs or set())) self.label = label self.location = (location if (location is not None) else {}) def free_symbols(self) -> Set[str]: return set().union(*[v.free_symbols for v in self.location.values()])
def get_best_encoding(stream): rv = (getattr(stream, 'encoding', None) or sys.getdefaultencoding()) if is_ascii_encoding(rv): return 'utf-8' return rv
class RoCBertForMaskedLM(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class AspuruGuzikAutoEncoder(SeqToSeq): def __init__(self, num_tokens, max_output_length, embedding_dimension=196, filter_sizes=[9, 9, 10], kernel_sizes=[9, 9, 11], decoder_dimension=488, **kwargs): if (len(filter_sizes) != len(kernel_sizes)): raise ValueError('Must have same number of layers and kernels') self._filter_sizes = filter_sizes self._kernel_sizes = kernel_sizes self._decoder_dimension = decoder_dimension super(AspuruGuzikAutoEncoder, self).__init__(input_tokens=num_tokens, output_tokens=num_tokens, max_output_length=max_output_length, embedding_dimension=embedding_dimension, variational=True, reverse_input=False, **kwargs) def _create_features(self): return layers.Feature(shape=(self.batch_size, self._max_output_length, len(self._input_tokens))) def _create_encoder(self, n_layers, dropout): prev_layer = self._features for i in range(len(self._filter_sizes)): filter_size = self._filter_sizes[i] kernel_size = self._kernel_sizes[i] if (dropout > 0.0): prev_layer = layers.Dropout(dropout, in_layers=prev_layer) prev_layer = layers.Conv1D(filters=filter_size, kernel_size=kernel_size, in_layers=prev_layer, activation_fn=tf.nn.relu) prev_layer = layers.Flatten(prev_layer) prev_layer = layers.Dense(self._decoder_dimension, in_layers=prev_layer, activation_fn=tf.nn.relu) prev_layer = layers.BatchNorm(prev_layer) if self._variational: self._embedding_mean = layers.Dense(self._embedding_dimension, in_layers=prev_layer, name='embedding_mean') self._embedding_stddev = layers.Dense(self._embedding_dimension, in_layers=prev_layer, name='embedding_std') prev_layer = layers.CombineMeanStd([self._embedding_mean, self._embedding_stddev], training_only=True) return prev_layer def _create_decoder(self, n_layers, dropout): prev_layer = layers.Dense(self._embedding_dimension, in_layers=self.embedding, activation_fn=tf.nn.relu) prev_layer = layers.Repeat(self._max_output_length, in_layers=prev_layer) for i in range(3): if (dropout > 0.0): prev_layer = layers.Dropout(dropout, in_layers=prev_layer) prev_layer = layers.GRU(self._decoder_dimension, self.batch_size, in_layers=prev_layer) retval = layers.Dense(len(self._output_tokens), in_layers=prev_layer, activation_fn=tf.nn.softmax, name='output') return retval def _generate_batches(self, sequences): for batch in self._batch_elements(sequences): inputs = [] outputs = [] for (input, output) in batch: inputs.append(input) outputs.append(output) for i in range(len(inputs), self.batch_size): inputs.append([]) outputs.append([]) feed_dict = {} feed_dict[self._features] = self._create_output_array(inputs) feed_dict[self._labels] = self._create_output_array(outputs) for (initial, zero) in zip(self.rnn_initial_states, self.rnn_zero_states): feed_dict[initial] = zero (yield feed_dict) def predict_from_sequences(self, sequences, beam_width=5): result = [] with self._get_tf('Graph').as_default(): for batch in self._batch_elements(sequences): feed_dict = {} feed_dict[self._features] = self._create_output_array(batch) feed_dict[self._training_placeholder] = 0.0 for (initial, zero) in zip(self.rnn_initial_states, self.rnn_zero_states): feed_dict[initial] = zero probs = self.session.run(self.output, feed_dict=feed_dict) for i in range(len(batch)): result.append(self._beam_search(probs[i], beam_width)) return result def predict_embeddings(self, sequences): result = [] with self._get_tf('Graph').as_default(): for batch in self._batch_elements(sequences): feed_dict = {} feed_dict[self._features] = self._create_output_array(batch) feed_dict[self._training_placeholder] = 0.0 for (initial, zero) in zip(self.rnn_initial_states, self.rnn_zero_states): feed_dict[initial] = zero embeddings = self.session.run(self.embedding, feed_dict=feed_dict) for i in range(len(batch)): result.append(embeddings[i]) return np.array(result, dtype=np.float32)
class SPPParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SPPPARAMETER
class SawyerFaucetCloseV2Policy(Policy): _fully_parsed def _parse_obs(obs): return {'hand_pos': obs[:3], 'faucet_pos': obs[3:6], 'unused_info': obs[6:]} def get_action(self, obs): o_d = self._parse_obs(obs) action = Action({'delta_pos': np.arange(3), 'grab_effort': 3}) action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.0) action['grab_effort'] = 1.0 return action.array def _desired_pos(o_d): pos_curr = o_d['hand_pos'] pos_faucet = (o_d['faucet_pos'] + np.array([(+ 0.04), 0.0, 0.03])) if (np.linalg.norm((pos_curr[:2] - pos_faucet[:2])) > 0.04): return (pos_faucet + np.array([0.0, 0.0, 0.1])) elif (abs((pos_curr[2] - pos_faucet[2])) > 0.04): return pos_faucet else: return (pos_faucet + np.array([(- 0.1), 0.05, 0.0]))
class FiniteInductiveValuation(InductiveValuation, DiscreteValuation): def __init__(self, parent, phi): InductiveValuation.__init__(self, parent, phi) DiscreteValuation.__init__(self, parent) def extensions(self, other): from sage.categories.function_fields import FunctionFields if ((other in FunctionFields()) and (other.ngens() == 1)): v = self.extension(self.domain().change_ring(self.domain().base().fraction_field())) return [other.valuation(v)] return super().extensions(other)
class InstanceNormalization(keras.layers.Layer): def __init__(self, epsilon=1e-05): super(InstanceNormalization, self).__init__() self.epsilon = epsilon def build(self, input_shape): self.scale = self.add_weight(name='scale', shape=input_shape[(- 1):], initializer=tf.random_normal_initializer(1.0, 0.02), trainable=True) self.offset = self.add_weight(name='offset', shape=input_shape[(- 1):], initializer='zeros', trainable=True) def call(self, x): (mean, variance) = tf.nn.moments(x, axes=[1, 2], keepdims=True) inv = tf.math.rsqrt((variance + self.epsilon)) normalized = ((x - mean) * inv) return ((self.scale * normalized) + self.offset)
def is_torch_bf16_available(): if (not is_torch_available()): return False import torch if ((not torch.cuda.is_available()) or (torch.version.cuda is None)): return False if (torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8): return False if (int(torch.version.cuda.split('.')[0]) < 11): return False if (version.parse(torch.__version__) < version.parse('1.10')): return False if (not hasattr(torch, 'autocast')): return False return True
def test_method_get_teacher_forced_logits_for_encoder_decoder_model(): transformers = pytest.importorskip('transformers') name = 'hf-internal-testing/tiny-random-BartModel' tokenizer = transformers.AutoTokenizer.from_pretrained(name) model = transformers.AutoModelForSeq2SeqLM.from_pretrained(name) wrapped_model = shap.models.TeacherForcing(model, tokenizer, device='cpu') source_sentence = np.array(['This is a test statement for verifying working of teacher forcing logits functionality']) target_sentence = np.array(['Testing teacher forcing logits functionality']) logits = wrapped_model.get_teacher_forced_logits(source_sentence, target_sentence) assert (not np.isnan(np.sum(logits)))
def find_all_links(file_paths): links = [] for path in file_paths: links += scan_code_for_links(path) return [link for link in links if (link != S3_BUCKET_PREFIX)]
def SetTensorBoundShapes(meta_net_def, tensor_bound_shapes): meta_net_def.tensorBoundShapes.CopyFrom(tensor_bound_shapes)
class DatasetTemplates(): TEMPLATES_KEY = 'templates' DATASET_KEY = 'dataset' SUBSET_KEY = 'subset' TEMPLATE_FILENAME = 'templates.yaml' def __init__(self, dataset_name: str, subset_name: str=None): self.dataset_name: str = dataset_name self.subset_name: str = subset_name self.templates: Dict = self.read_from_file() self.name_to_id_mapping = {} self.sync_mapping() def sync_mapping(self) -> None: self.name_to_id_mapping = {template.name: template.id for template in self.templates.values()} def all_template_names(self) -> List[str]: return sorted([template.name for template in self.templates.values()]) def folder_path(self) -> str: if self.subset_name: return os.path.join(TEMPLATES_FOLDER_PATH, self.dataset_name, self.subset_name) else: return os.path.join(TEMPLATES_FOLDER_PATH, self.dataset_name) def yaml_path(self) -> str: return os.path.join(self.folder_path, self.TEMPLATE_FILENAME) def format_for_dump(self) -> Dict: formatted_dict = {self.DATASET_KEY: self.dataset_name, self.TEMPLATES_KEY: self.templates} if self.subset_name: formatted_dict[self.SUBSET_KEY] = self.subset_name return formatted_dict def read_from_file(self) -> Dict: if (not os.path.exists(self.yaml_path)): dataset_name = (f'{self.dataset_name} {self.subset_name}' if self.subset_name else self.dataset_name) logging.warning(f'Tried instantiating `DatasetTemplates` for {dataset_name}, but no prompts found. Please ignore this warning if you are creating new prompts for this dataset.') return {} yaml_dict = yaml.load(open(self.yaml_path, 'r'), Loader=yaml.FullLoader) return yaml_dict[self.TEMPLATES_KEY] def write_to_file(self) -> None: self.sync_mapping() if (not os.path.exists(self.folder_path)): os.makedirs(self.folder_path) yaml.dump(self.format_for_dump(), open(self.yaml_path, 'w')) def add_template(self, template: 'Template') -> None: self.templates[template.get_id()] = template self.write_to_file() def remove_template(self, template_name: str) -> None: if (template_name not in self.all_template_names): raise ValueError(f'No template with name {template_name} for dataset {self.dataset_name} exists.') del self.templates[self.name_to_id_mapping[template_name]] if (len(self.templates) == 0): self.delete_folder() else: self.write_to_file() def update_template(self, current_template_name: str, new_template_name: str, jinja: str, reference: str, metadata: Template.Metadata, answer_choices: str) -> None: template_id = self.name_to_id_mapping[current_template_name] self.templates[template_id].name = new_template_name self.templates[template_id].jinja = jinja self.templates[template_id].reference = reference self.templates[template_id].metadata = metadata self.templates[template_id].answer_choices = answer_choices self.write_to_file() def delete_folder(self) -> None: self.sync_mapping() rmtree(self.folder_path) if self.subset_name: base_dataset_folder = os.path.join(TEMPLATES_FOLDER_PATH, self.dataset_name) if (len(os.listdir(base_dataset_folder)) == 0): rmtree(base_dataset_folder) def __getitem__(self, template_key: str) -> 'Template': return self.templates[self.name_to_id_mapping[template_key]] def __len__(self) -> int: return len(self.templates)
def local_initializer(sess, var_list, print_option=False): if print_option: print('Initialize specific variables') sess.run(tf.variables_initializer(var_list))
def check_all_models_are_auto_configured(): check_missing_backends() modules = get_model_modules() all_auto_models = get_all_auto_configured_models() failures = [] for module in modules: new_failures = check_models_are_auto_configured(module, all_auto_models) if (new_failures is not None): failures += new_failures if (len(failures) > 0): raise Exception((f'''There were {len(failures)} failures: ''' + '\n'.join(failures)))
.parametrize('patchset_file', ['patchset_bad_duplicate_patch_name.json', 'patchset_bad_duplicate_patch_values.json', 'patchset_bad_wrong_values_multiplicity.json']) def test_patchset_bad(datadir, patchset_file): with open(datadir.joinpath(patchset_file), encoding='utf-8') as patch_file: patchsetspec = json.load(patch_file) with pytest.raises(pyhf.exceptions.InvalidPatchSet): pyhf.PatchSet(patchsetspec)
def run_analysis(_): dataset = FLAGS.dataset model = FLAGS.model thresholding = FLAGS.thresholding split = FLAGS.split threshold = FLAGS.threshold no_concord = FLAGS.no_concord no_r2 = FLAGS.no_r2 out_path = FLAGS.out_path fold_num = FLAGS.fold_num hyper_parameters = FLAGS.hyper_parameters hyper_param_search = FLAGS.hyper_param_search verbose_search = FLAGS.verbose_search arithmetic_mean = FLAGS.arithmetic_mean max_iter = FLAGS.max_iter search_range = FLAGS.search_range isreload = FLAGS.reload cross_validation = FLAGS.cross_validation test = FLAGS.test predict_cold = FLAGS.predict_cold cold_drug = FLAGS.cold_drug cold_target = FLAGS.cold_target cold_drug_cluster = FLAGS.cold_drug_cluster split_warm = FLAGS.split_warm filter_threshold = FLAGS.filter_threshold early_stopping = FLAGS.early_stopping evaluate_freq = FLAGS.evaluate_freq patience = FLAGS.patience seed = FLAGS.seed log_file = FLAGS.log_file model_dir = FLAGS.model_dir prot_desc_path = FLAGS.prot_desc_path intermediate_file = FLAGS.intermediate_file plot = FLAGS.plot aggregate = FLAGS.aggregate aggregate_suffix_file = FLAGS.aggregate_suffix_file predict_only = FLAGS.predict_only restore_model = FLAGS.restore_model csv_out = FLAGS.csv_out tensorboard = FLAGS.tensorboard oversampled = FLAGS.oversampled input_protein = (not FLAGS.no_input_protein) weighted_metric_of_each_endpoint = FLAGS.weighted_metric_of_each_endpoint remove_val_set_entries = FLAGS.remove_val_set_entries if ((aggregate_suffix_file is not None) and (len(aggregate) > 0)): aggregate = get_aggregate_list(aggregate_suffix_file, aggregate) assert (len(set(aggregate)) == len(aggregate)) if predict_only: hyper_param_search = False cross_validation = False plot = False early_stopping = False test = False restore_model = True else: assert (((((predict_cold + cold_drug) + cold_target) + split_warm) + cold_drug_cluster) <= 1) assert (model == model) searchObj = re.search('reg', model, re.I) mode = ('regression' if searchObj else 'classification') if (mode == 'regression'): if thresholding: mode = 'reg-threshold' direction = False if (mode == 'regression'): metrics = [dcCustom.metrics.Metric(dcCustom.metrics.rms_score, np.nanmean, arithmetic_mean=arithmetic_mean, aggregate_list=aggregate, weighted_metric_of_each_endpoint=weighted_metric_of_each_endpoint), dcCustom.metrics.Metric(dcCustom.metrics.concordance_index, np.nanmean, arithmetic_mean=arithmetic_mean, aggregate_list=aggregate, weighted_metric_of_each_endpoint=weighted_metric_of_each_endpoint), dcCustom.metrics.Metric(dcCustom.metrics.r2_score, np.nanmean, arithmetic_mean=arithmetic_mean, aggregate_list=aggregate, weighted_metric_of_each_endpoint=weighted_metric_of_each_endpoint)] elif (mode == 'classification'): direction = True metrics = [dcCustom.metrics.Metric(dcCustom.metrics.roc_auc_score, np.nanmean, arithmetic_mean=arithmetic_mean, aggregate_list=aggregate, weighted_metric_of_each_endpoint=weighted_metric_of_each_endpoint), dcCustom.metrics.Metric(dcCustom.metrics.prc_auc_score, np.nanmean, arithmetic_mean=arithmetic_mean, aggregate_list=aggregate, weighted_metric_of_each_endpoint=weighted_metric_of_each_endpoint)] elif (mode == 'reg-threshold'): direction = True metrics = [dcCustom.metrics.Metric(dcCustom.metrics.roc_auc_score, np.nanmean, threshold=threshold[0], mode='regression', arithmetic_mean=arithmetic_mean, aggregate_list=aggregate, weighted_metric_of_each_endpoint=weighted_metric_of_each_endpoint)] loading_functions = {'davis': dcCustom.molnet.load_davis, 'metz': dcCustom.molnet.load_metz, 'kiba': dcCustom.molnet.load_kiba, 'toxcast': dcCustom.molnet.load_toxcast, 'all_kinase': dcCustom.molnet.load_kinases, 'tc_kinase': dcCustom.molnet.load_tc_kinases, 'tc_full_kinase': dcCustom.molnet.load_tc_full_kinases, 'nci60': dcCustom.molnet.load_nci60} pair = (dataset, model) if (pair in CheckFeaturizer): featurizer = CheckFeaturizer[pair][0] n_features = CheckFeaturizer[pair][1] if (not (split in ([None] + CheckSplit[dataset]))): return print('') print(('Running on dataset: %s' % dataset)) print('') prot_desc_dict = {} prot_seq_dict = {} if input_protein: for path in prot_desc_path: load_prot_dict(prot_desc_dict, prot_seq_dict, path, 1, 2) prot_desc_length = 8421 if cross_validation: (tasks, all_dataset, transformers) = loading_functions[dataset](featurizer=featurizer, cross_validation=cross_validation, test=test, split=split, reload=isreload, K=fold_num, mode=mode, predict_cold=predict_cold, cold_drug=cold_drug, cold_target=cold_target, cold_drug_cluster=cold_drug_cluster, split_warm=split_warm, prot_seq_dict=prot_seq_dict, filter_threshold=filter_threshold, oversampled=oversampled, input_protein=input_protein, remove_val_set_entries=remove_val_set_entries) else: (tasks, all_dataset, transformers) = loading_functions[dataset](featurizer=featurizer, cross_validation=cross_validation, test=test, split=split, reload=isreload, mode=mode, predict_cold=predict_cold, cold_drug=cold_drug, cold_target=cold_target, cold_drug_cluster=cold_drug_cluster, split_warm=split_warm, filter_threshold=filter_threshold, prot_seq_dict=prot_seq_dict, oversampled=oversampled, input_protein=input_protein, remove_val_set_entries=remove_val_set_entries) time_start_fitting = time.time() train_scores_list = [] valid_scores_list = [] test_scores_list = [] aggregated_tasks = copy.deepcopy(tasks) meta_task_list = [] if ((aggregate is not None) and (len(aggregate) > 0)): assert (tasks is not None) for meta_task_name in aggregate: for (i, task_name) in enumerate(tasks): if (not re.search(meta_task_name, task_name, re.I)): continue if (meta_task_name not in meta_task_list): meta_task_list.append(meta_task_name) aggregated_tasks.append(meta_task_name) aggregated_tasks.remove(task_name) matchObj = re.match('mpnn', model, re.I) model = ('mpnn' if matchObj else model) if hyper_param_search: if (hyper_parameters is None): hyper_parameters = hps[model] (train_dataset, valid_dataset, test_dataset) = all_dataset search_mode = dcCustom.hyper.GaussianProcessHyperparamOpt(model) (hyper_param_opt, _) = search_mode.hyperparam_search(hyper_parameters, train_dataset, valid_dataset, transformers, metrics, prot_desc_dict, prot_desc_length, tasks=tasks, direction=direction, n_features=n_features, n_tasks=len(tasks), max_iter=max_iter, search_range=search_range, early_stopping=early_stopping, evaluate_freq=evaluate_freq, patience=patience, model_dir=model_dir, log_file=log_file, tensorboard=tensorboard, mode=mode, no_concordance_index=no_concord, no_r2=no_r2, plot=plot, verbose_search=verbose_search, aggregated_tasks=aggregated_tasks, input_protein=input_protein) hyper_parameters = hyper_param_opt opt_epoch = (- 1) test_dataset = None model_functions = {'regression': model_regression, 'reg-threshold': model_regression, 'classification': model_classification} assert (mode in model_functions) if (mode == 'classification'): direction = True if (not cross_validation): (train_dataset, valid_dataset, test_dataset) = all_dataset (train_score, valid_score, test_score, opt_epoch) = model_functions[mode](train_dataset, valid_dataset, test_dataset, tasks, transformers, n_features, metrics, model, prot_desc_dict, prot_desc_length, hyper_parameters=hyper_parameters, test=test, early_stopping=early_stopping, evaluate_freq=evaluate_freq, patience=patience, direction=direction, seed=seed, model_dir=model_dir, no_concordance_index=no_concord, no_r2=no_r2, plot=plot, aggregated_tasks=aggregated_tasks, tensorboard=tensorboard, predict_only=predict_only, restore_model=restore_model, prediction_file=csv_out, input_protein=input_protein) if predict_only: return train_scores_list.append(train_score) valid_scores_list.append(valid_score) test_scores_list.append(test_score) else: for h in range(fold_num): (train_score, valid_score, _, _) = model_functions[mode](all_dataset[h][0], all_dataset[h][1], None, tasks, transformers, n_features, metrics, model, prot_desc_dict, prot_desc_length, hyper_parameters=hyper_parameters, test=test, early_stopping=False, direction=direction, seed=seed, model_dir=model_dir, no_concordance_index=no_concord, tensorboard=tensorboard, no_r2=no_r2, plot=plot, aggregated_tasks=aggregated_tasks, restore_model=restore_model, input_protein=input_protein) write_intermediate_file(out_path, intermediate_file, train_scores_list, valid_scores_list, train_score, valid_score, tasks, dataset, h, aggregated_tasks) write_avg_to_interm_file(out_path, intermediate_file, fold_num, train_scores_list, valid_scores_list, tasks, dataset, h='CV_average') time_finish_fitting = time.time() results_file = ('./results/results_' + model) if (mode == 'classification'): results_file += '_cls' elif (mode == 'reg-threshold'): results_file += '_thrhd' if predict_cold: results_file += '_cold' if split_warm: results_file += '_warm' if cold_drug: results_file += '_cold_drug' elif cold_target: results_file += '_cold_target' if cold_drug_cluster: results_file += '_cold_drug_cluster' if cross_validation: results_file += '_cv' results_file += '.csv' write_results_file(out_path, results_file, train_scores_list, valid_scores_list, fold_num, dataset, tasks, aggregated_tasks, time_finish_fitting, time_start_fitting, cross_validation=cross_validation, test=test, test_scores_list=test_scores_list, early_stopping=early_stopping, opt_epoch=opt_epoch) if hyper_param_search: with open(os.path.join(out_path, ((dataset + model) + '.pkl')), 'w') as f: pickle.dump(hyper_parameters, f)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--model_type', default=None, type=str, required=True) parser.add_argument('--base_model', default=None, type=str, required=True) parser.add_argument('--lora_model', default='', type=str, help='If None, perform inference on the base model') parser.add_argument('--tokenizer_path', default=None, type=str) parser.add_argument('--template_name', default='vicuna', type=str, help='Prompt template name, eg: alpaca, vicuna, baichuan, chatglm2 etc.') parser.add_argument('--repetition_penalty', type=float, default=1.0) parser.add_argument('--max_new_tokens', type=int, default=128) parser.add_argument('--batch_size', type=int, default=4) parser.add_argument('--data_file', default=None, type=str, help='Predict file, one example per line') parser.add_argument('--output_file', default='./predictions_result.jsonl', type=str) parser.add_argument('--resize_emb', action='store_true', help='Whether to resize model token embeddings') args = parser.parse_args() logger.info(args) world_size = int(os.environ.get('WORLD_SIZE', '1')) local_rank = int(os.environ.get('LOCAL_RANK', '0')) logger.info(f'local_rank: {local_rank}, world_size: {world_size}') torch.cuda.set_device(local_rank) dist.init_process_group(backend='nccl') if (not torch.cuda.is_available()): raise ValueError('No GPU available, this script is only for GPU inference.') if (args.tokenizer_path is None): args.tokenizer_path = args.base_model (model_class, tokenizer_class) = MODEL_CLASSES[args.model_type] tokenizer = tokenizer_class.from_pretrained(args.tokenizer_path, trust_remote_code=True, padding_side='left') load_type = torch.float16 base_model = model_class.from_pretrained(args.base_model, load_in_8bit=False, torch_dtype=load_type, low_cpu_mem_usage=True, device_map={'': local_rank}, trust_remote_code=True) try: base_model.generation_config = GenerationConfig.from_pretrained(args.base_model, trust_remote_code=True) except OSError: logger.info('Failed to load generation config, use default.') if args.resize_emb: model_vocab_size = base_model.get_input_embeddings().weight.size(0) tokenzier_vocab_size = len(tokenizer) logger.info(f'Vocab of the base model: {model_vocab_size}') logger.info(f'Vocab of the tokenizer: {tokenzier_vocab_size}') if (model_vocab_size != tokenzier_vocab_size): logger.info('Resize model embeddings to fit tokenizer') base_model.resize_token_embeddings(tokenzier_vocab_size) if args.lora_model: model = PeftModel.from_pretrained(base_model, args.lora_model, torch_dtype=load_type, device_map={'': local_rank}) logger.info('Loaded lora model') else: model = base_model model.eval() model = DataParallel(model) model = model.module logger.info(tokenizer) if (args.data_file is None): examples = ['', '?', '?', '', 'Tell me about alpacas.', 'Tell me about the president of Mexico in 2019.', 'hello.'] else: with open(args.data_file, 'r', encoding='utf-8') as f: examples = [l.strip() for l in f.readlines()] logger.info(f'first 10 examples: {examples[:10]}') prompt_template = get_conv_template(args.template_name) write_batch_size = ((args.batch_size * world_size) * 10) generation_kwargs = dict(max_new_tokens=args.max_new_tokens, do_sample=False, num_beams=1, repetition_penalty=args.repetition_penalty) stop_str = (tokenizer.eos_token if tokenizer.eos_token else prompt_template.stop_str) if ((local_rank <= 0) and os.path.exists(args.output_file)): os.remove(args.output_file) count = 0 for batch in tqdm([examples[i:(i + write_batch_size)] for i in range(0, len(examples), write_batch_size)], desc='Generating outputs'): dataset = TextDataset(batch) sampler = DistributedSampler(dataset, num_replicas=world_size, rank=local_rank, shuffle=False) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=sampler) responses = [] inputs = [] for texts in data_loader: inputs.extend(texts) prompted_texts = [prompt_template.get_prompt(messages=[[s, '']]) for s in texts] logger.debug(f'local_rank: {local_rank}, inputs size:{len(prompted_texts)}, top3: {prompted_texts[:3]}') inputs_tokens = tokenizer(prompted_texts, return_tensors='pt', padding=True) input_ids = inputs_tokens['input_ids'].to(local_rank) outputs = model.generate(input_ids=input_ids, **generation_kwargs) prompt_len = len(input_ids[0]) outputs = [i[prompt_len:] for i in outputs] generated_outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) logger.debug(f'local_rank: {local_rank}, outputs size:{len(generated_outputs)}, top3: {generated_outputs[:3]}') responses.extend(generated_outputs) all_inputs = ([None] * world_size) all_responses = ([None] * world_size) dist.all_gather_object(all_inputs, inputs) dist.all_gather_object(all_responses, responses) if (local_rank <= 0): all_inputs_flat = [inp for process_inputs in all_inputs for inp in process_inputs] all_responses_flat = [response for process_responses in all_responses for response in process_responses] logger.debug(f'all_responses size:{len(all_responses_flat)}, top5: {all_responses_flat[:5]}') results = [] for (example, response) in zip(all_inputs_flat, all_responses_flat): results.append({'Input': example, 'Output': response}) with open(args.output_file, 'a', encoding='utf-8') as f: for entry in results: json.dump(entry, f, ensure_ascii=False) f.write('\n') count += 1 if (local_rank <= 0): logger.info(f'save to {args.output_file}, total count: {count}') dist.barrier() dist.destroy_process_group()
def get_updated_inputs(inputs, **kwargs): features = inputs._asdict() for (k, v) in kwargs.items(): features[k] = v return features_to_inputs(features)
class LatentProductModel(object): def __init__(self, user_size, item_size, size, num_layers, batch_size, learning_rate, learning_rate_decay_factor, user_attributes=None, item_attributes=None, item_ind2logit_ind=None, logit_ind2item_ind=None, loss_function='ce', GPU=None, logit_size_test=None, nonlinear=None, dropout=1.0, n_sampled=None, indices_item=None, dtype=tf.float32, top_N_items=100, hidden_size=500, loss_func='log', loss_exp_p=1.005): self.user_size = user_size self.item_size = item_size self.top_N_items = top_N_items if (user_attributes is not None): user_attributes.set_model_size(size) self.user_attributes = user_attributes if (item_attributes is not None): item_attributes.set_model_size(size) self.item_attributes = item_attributes self.item_ind2logit_ind = item_ind2logit_ind self.logit_ind2item_ind = logit_ind2item_ind if (logit_ind2item_ind is not None): self.logit_size = len(logit_ind2item_ind) if (indices_item is not None): self.indices_item = indices_item else: self.indices_item = range(self.logit_size) self.logit_size_test = logit_size_test self.nonlinear = nonlinear self.loss_function = loss_function self.n_sampled = n_sampled self.batch_size = batch_size self.learning_rate = tf.Variable(float(learning_rate), trainable=False) self.learning_rate_decay_op = self.learning_rate.assign((self.learning_rate * learning_rate_decay_factor)) self.global_step = tf.Variable(0, trainable=False) self.att_emb = None self.dtype = dtype self.data_length = None self.train_permutation = None self.start_index = None mb = self.batch_size self.item_target = tf.placeholder(tf.int32, shape=[mb], name='item') self.item_id_target = tf.placeholder(tf.int32, shape=[mb], name='item_id') self.dropout = dropout self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') m = embed_attribute.EmbeddingAttribute(user_attributes, item_attributes, mb, self.n_sampled, 0, False, item_ind2logit_ind, logit_ind2item_ind) self.att_emb = m (embedded_user, user_b) = m.get_batch_user(self.keep_prob, False) if (self.nonlinear in ['relu', 'tanh']): act = (tf.nn.relu if (self.nonlinear == 'relu') else tf.tanh) w1 = tf.get_variable('w1', [size, hidden_size], dtype=self.dtype) b1 = tf.get_variable('b1', [hidden_size], dtype=self.dtype) w2 = tf.get_variable('w2', [hidden_size, size], dtype=self.dtype) b2 = tf.get_variable('b2', [size], dtype=self.dtype) (embedded_user, user_b) = m.get_batch_user(1.0, False) h0 = tf.nn.dropout(act(embedded_user), self.keep_prob) h1 = act((tf.matmul(h0, w1) + b1)) h1 = tf.nn.dropout(h1, self.keep_prob) h2 = act((tf.matmul(h1, w2) + b2)) embedded_user = tf.nn.dropout(h2, self.keep_prob) (pos_embs_item, pos_item_b) = m.get_batch_item('pos', batch_size) pos_embs_item = tf.reduce_mean(pos_embs_item, 0) (neg_embs_item, neg_item_b) = m.get_batch_item('neg', batch_size) neg_embs_item = tf.reduce_mean(neg_embs_item, 0) print('construct postive/negative items/scores \n(for bpr loss, AUC)') self.pos_score = (tf.reduce_sum(tf.multiply(embedded_user, pos_embs_item), 1) + pos_item_b) self.neg_score = (tf.reduce_sum(tf.multiply(embedded_user, neg_embs_item), 1) + neg_item_b) neg_pos = (self.neg_score - self.pos_score) self.auc = (0.5 - (0.5 * tf.reduce_mean(tf.sign(neg_pos)))) if (self.n_sampled is not None): print('sampled prediction') sampled_logits = m.get_prediction(embedded_user, 'sampled') target_score = m.get_target_score(embedded_user, self.item_id_target) print('non-sampled prediction') logits = m.get_prediction(embedded_user) loss = self.loss_function if (loss in ['warp', 'ce', 'rs', 'rs-sig', 'rs-sig2', 'bbpr']): batch_loss = m.compute_loss(logits, self.item_target, loss, loss_func=loss_func, exp_p=loss_exp_p) elif (loss in ['warp_eval']): (batch_loss, batch_rank) = m.compute_loss(logits, self.item_target, loss) elif (loss in ['mw']): batch_loss = m.compute_loss(sampled_logits, target_score, loss) batch_loss_eval = m.compute_loss(logits, self.item_target, 'warp') elif (loss in ['bpr', 'bpr-hinge']): batch_loss = m.compute_loss(neg_pos, self.item_target, loss) else: print('not implemented!') exit((- 1)) if (loss in ['warp', 'warp_eval', 'mw', 'rs', 'rs-sig', 'rs-sig2', 'bbpr']): (self.set_mask, self.reset_mask) = m.get_warp_mask() self.loss = tf.reduce_mean(batch_loss) self.batch_loss = batch_loss if (loss in ['warp_eval']): self.batch_rank = batch_rank self.loss_eval = (tf.reduce_mean(batch_loss_eval) if (loss == 'mw') else self.loss) params = tf.trainable_variables() opt = tf.train.AdagradOptimizer(self.learning_rate) gradients = tf.gradients(self.loss, params) self.updates = opt.apply_gradients(zip(gradients, params), global_step=self.global_step) self.output = logits (values, self.indices) = tf.nn.top_k(self.output, self.top_N_items, sorted=True) self.saver = tf.train.Saver(tf.global_variables()) def prepare_warp(self, pos_item_set, pos_item_set_eval): self.att_emb.prepare_warp(pos_item_set, pos_item_set_eval) return def step(self, session, user_input, item_input, neg_item_input=None, item_sampled=None, item_sampled_id2idx=None, forward_only=False, recommend=False, recommend_new=False, loss=None, run_op=None, run_meta=None): input_feed = {} if (forward_only or recommend): input_feed[self.keep_prob.name] = 1.0 else: input_feed[self.keep_prob.name] = self.dropout if (recommend == False): targets = self.att_emb.target_mapping([item_input]) input_feed[self.item_target.name] = targets[0] if (loss in ['mw']): input_feed[self.item_id_target.name] = item_input if (self.att_emb is not None): (update_sampled, input_feed_sampled, input_feed_warp) = self.att_emb.add_input(input_feed, user_input, item_input, neg_item_input=neg_item_input, item_sampled=item_sampled, item_sampled_id2idx=item_sampled_id2idx, forward_only=forward_only, recommend=recommend, loss=loss) if (not recommend): if (not forward_only): output_feed = [self.updates, self.loss] else: output_feed = [self.loss_eval] elif recommend_new: output_feed = [self.indices_test] else: output_feed = [self.indices] if (loss in ['warp_eval']): output_feed = [self.batch_loss, self.batch_rank] if ((item_sampled is not None) and (loss in ['mw', 'mce'])): session.run(update_sampled, input_feed_sampled) if ((loss in ['warp', 'warp_eval', 'rs', 'rs-sig', 'rs-sig2', 'bbpr', 'mw']) and (recommend is False)): session.run(self.set_mask[loss], input_feed_warp) if ((run_op is not None) and (run_meta is not None)): outputs = session.run(output_feed, input_feed, options=run_op, run_metadata=run_meta) else: outputs = session.run(output_feed, input_feed) if ((loss in ['warp', 'warp_eval', 'rs', 'rs-sig', 'rs-sig2', 'bbpr', 'mw']) and (recommend is False)): session.run(self.reset_mask[loss], input_feed_warp) if (loss in ['warp_eval']): return outputs if (not recommend): if (not forward_only): return outputs[1] else: return outputs[0] else: return outputs[0] def get_batch(self, data, loss='ce', hist=None): (batch_user_input, batch_item_input) = ([], []) batch_neg_item_input = [] count = 0 while (count < self.batch_size): (u, i, _) = random.choice(data) batch_user_input.append(u) batch_item_input.append(i) count += 1 return (batch_user_input, batch_item_input, batch_neg_item_input) def get_permuted_batch(self, data): (batch_user_input, batch_item_input) = ([], []) if (self.data_length == None): self.data_length = len(data) self.start_index = 0 self.train_permutation = np.random.permutation(self.data_length) if ((self.start_index + self.batch_size) >= self.data_length): self.start_index = 0 self.train_permutation = np.random.permutation(self.data_length) indices = range(self.start_index, (self.start_index + self.batch_size)) indices = self.train_permutation[indices] self.start_index += self.batch_size for j in indices: (u, i, _) = data[j] batch_user_input.append(u) batch_item_input.append(i) return (batch_user_input, batch_item_input, None)
def Accuracy(log_probabilities, targets, length=None): if (length is not None): mask = length_to_mask((length * targets.shape[1]), max_len=targets.shape[1]).bool() if (len(targets.shape) == 3): mask = mask.unsqueeze(2).repeat(1, 1, targets.shape[2]) padded_pred = log_probabilities.argmax((- 1)) if (length is not None): numerator = torch.sum((padded_pred.masked_select(mask) == targets.masked_select(mask))) denominator = torch.sum(mask) else: numerator = torch.sum((padded_pred == targets)) denominator = targets.shape[1] return (float(numerator), float(denominator))
def f(x): tmp = x.copy() if (len(tmp.shape) == 2): tmp = tmp.reshape(tmp.shape[0], *X[0].shape) preprocess_input(tmp) return model(tmp)
def test_vector_fixed_set(): pset = paramsets.constrained_by_poisson(name='foo', is_scalar=False, n_parameters=5, inits=[0, 1, 2, 3, 4], bounds=[((- 1), 1), ((- 2), 2), ((- 3), 3), ((- 4), 4)], fixed=False, auxdata=[0, 0, 0, 0, 0], factors=[1, 1, 1, 1, 1]) pset.suggested_fixed = True assert (pset.suggested_fixed == ([True] * 5)) pset.suggested_fixed = [False, True, False, True, False] assert (pset.suggested_fixed == [False, True, False, True, False])
def parse_args(): parser = argparse.ArgumentParser(description='Train keypoints network') parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str) parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=42, type=int) parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') parser.add_argument('--weight_decay', default=0.0001, type=float) (args, rest) = parser.parse_known_args() update_config(args.cfg) return args
class ValueIdBreakpoint(Breakpoint): type = 'value-id' pattern = re.compile('^%[0-9]+') def should_stop(self, tdb: TdbCmdBackend) -> bool: index = tdb.get_plugin(FinalMlirIndexPlugin) if (not index.enabled): return False mlir = index.get_mlir_by_point(tdb.cmd_point) if (mlir is None): return False if (mlir.find(self.text) >= 0): return True return False
class BatchNormalizationFoldingOppositeModifierInner(FunctionModifier, BatchNormBase): def __init__(self, channel_last=False): super(BatchNormalizationFoldingOppositeModifierInner, self).__init__() self._channel_last = channel_last def modify(self, f, inputs): outputs = f.outputs[0] if (len(outputs.function_references) == 0): return prev_func = f.inputs[0].parent if (f.info.type_name != 'BatchNormalization'): if ((prev_func == None) or (prev_func.info.type_name != 'BatchNormalization')): return if (f.info.type_name == 'BatchNormalization'): for fr in outputs.function_references: if (fr.info.type_name in self._fct_set): return inputs[0] if (len(f.outputs[0].function_references) != 1): return if (not (f.info.type_name in self._fct_set)): return ip_func = f bn_func = prev_func (w_data, b_data) = self._compute_folded_parameters(ip_func, bn_func) ip_func.inputs[1].d = w_data ip_func.inputs[2].d = b_data x = inputs[0] w = ip_func.inputs[1] b = ip_func.inputs[2] h = self.connect(f, x, w, b) return h def reshape_bn_parameters(self, ip_func, bn_parameters, channel_last): if (ip_func.info.type_name == 'Deconvolution'): axes = list(range(bn_parameters[0].ndim)) axis_to_switch = ((- 1) if channel_last else 1) (axes[0], axes[axis_to_switch]) = (axes[axis_to_switch], axes[0]) bn_parameters = [np.transpose(bn_param, axes) for bn_param in bn_parameters] if (ip_func.info.type_name == 'Affine'): bn_parameters = [bn_param.reshape(bn_param.shape[1], bn_param.shape[0], 1, 1) for bn_param in bn_parameters] return bn_parameters def _compute_folded_parameters(self, ip_func, bn_func): beta_data = bn_func.inputs[1].d.copy() gamma_data = bn_func.inputs[2].d.copy() mean_data = bn_func.inputs[3].d.copy() var_data = bn_func.inputs[4].d.copy() eps_data = bn_func.info.args['eps'] (beta_data, gamma_data, mean_data, var_data) = self.reshape_bn_parameters(ip_func, [beta_data, gamma_data, mean_data, var_data], self._channel_last) std_data = np.sqrt((var_data + eps_data)) c0 = (gamma_data / std_data) c1 = (beta_data - ((gamma_data * mean_data) / std_data)) w = ip_func.inputs[1] w_data = w.d if (ip_func.info.type_name == 'Affine'): (d_0, d_1, d_2) = bn_func.inputs[0].shape[1:] d_3 = w_data.shape[1:] w_data = np.reshape(w_data, (d_0, d_1, d_2, d_3)) w_data = (c0 * w_data) b_data = (w_data * c1) w_data = (np.reshape(w_data, ((- 1), d_3)) if (ip_func.info.type_name == 'Affine') else w_data) axes_to_reduce = tuple(range(1, w_data.ndim)) if (ip_func.info.type_name == 'Deconvolution'): axes_to_reduce = (tuple(range((w_data.ndim - 1))) if self._channel_last else ((0,) + tuple(range(2, w_data.ndim)))) axes_to_reduce = ((0, 1, 2) if (ip_func.info.type_name == 'Affine') else axes_to_reduce) b_data = np.sum(b_data, axes_to_reduce) if (len(ip_func.inputs) == 3): b = ip_func.inputs[2] b_data += b.d return (w_data, b_data)
_utils.test() def test_ndrange_start_greater_than_end(): def ndrange_test(i1: ti.i32, i2: ti.i32, j1: ti.i32, j2: ti.i32) -> ti.i32: n: ti.i32 = 0 for (i, j) in ti.ndrange((i1, i2), (j1, j2)): n += 1 return n assert (ndrange_test(0, 10, 0, 20) == 200) assert (ndrange_test(0, 10, 20, 0) == 0) assert (ndrange_test(10, 0, 0, 20) == 0) assert (ndrange_test(10, 0, 20, 0) == 0)
def freeze(mod, preserved_attrs: Optional[List[str]]=None): if (not isinstance(mod, ScriptModule)): raise RuntimeError("Freezing expects a ScriptModule as input. Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'.") if mod.training: raise RuntimeError('Freezing is currently only implemented for modules in eval mode. Please call .eval() on your module before freezing.') preserved_attrs = (preserved_attrs if (preserved_attrs is not None) else []) out = RecursiveScriptModule(torch._C._freeze_module(mod._c, preserved_attrs)) RecursiveScriptModule._finalize_scriptmodule(out) return out
class CrystalOfTableaux_E7(CrystalOfTableaux): def module_generator(self, shape): if (len(shape) != 1): raise NotImplementedError('only implemented for single row shapes') return self(*([self.letters.highest_weight_vector()] * shape[0]))
def test_ce_loss(): with pytest.raises(AssertionError): CELoss(ignore_index='ignore') with pytest.raises(AssertionError): CELoss(reduction=1) with pytest.raises(AssertionError): CELoss(reduction='avg') ce_loss = CELoss(ignore_index=0) outputs = torch.rand(1, 10, 37) targets_dict = {'padded_targets': torch.LongTensor([[1, 2, 3, 4, 0, 0, 0, 0, 0, 0]])} losses = ce_loss(outputs, targets_dict) assert isinstance(losses, dict) assert ('loss_ce' in losses) assert (losses['loss_ce'].size(1) == 10) ce_loss = CELoss(ignore_first_char=True) outputs = torch.rand(1, 10, 37) targets_dict = {'padded_targets': torch.LongTensor([[1, 2, 3, 4, 0, 0, 0, 0, 0, 0]])} (new_output, new_target) = ce_loss.format(outputs, targets_dict) assert (new_output.shape == torch.Size([1, 37, 9])) assert (new_target.shape == torch.Size([1, 9]))
class Fuzzer(object): __metaclass__ = ABCMeta def run(self): pass def start(self): pass def pause(self): pass def resume(self): pass def stop(self): pass
def get_loader_from_returnn_dataset(dataset: Dataset, mp_manager: torch.multiprocessing.Manager) -> DataLoader: epoch_mp_shared = mp_manager.Value('i', 0) epoch_mp_shared.value = 1 reset_callback = returnn_dataset_wrapper.ReturnnDatasetResetMpSharedEpochCallback(dataset=dataset, epoch_mp_shared=epoch_mp_shared) wrapped_dataset = returnn_dataset_wrapper.ReturnnDatasetIterDataPipe(dataset, reset_callback=reset_callback) batch_size = 5 max_seqs = 2 batches_dataset = data_pipeline.BatchingIterDataPipe(wrapped_dataset, batch_size=batch_size, max_seqs=max_seqs) from copy import deepcopy deepcopy(batches_dataset) import pickle pickle.loads(pickle.dumps(batches_dataset)) loader = DataLoader(batches_dataset, batch_size=None, collate_fn=data_pipeline.collate_batch) return loader
def fhtoffset(dln, mu, initial=0.0, bias=0.0): (lnkr, q) = (initial, bias) xp = (((mu + 1) + q) / 2) xm = (((mu + 1) - q) / 2) y = (np.pi / (2 * dln)) zp = loggamma((xp + (1j * y))) zm = loggamma((xm + (1j * y))) arg = (((LN_2 - lnkr) / dln) + ((zp.imag + zm.imag) / np.pi)) return (lnkr + ((arg - np.round(arg)) * dln))
def test(): np_array = np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) one = ak.Array(np_array) np_array[1] = 999 assert (to_list(one) == [0.0, 999, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) two = copy.copy(one) np_array[3] = 123 assert (to_list(two) == [0.0, 999, 2.2, 123, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) three = copy.deepcopy(two) four = np.copy(two) five = ak.operations.copy(two) np_array[5] = 321 assert (to_list(three) == [0.0, 999, 2.2, 123, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) assert (to_list(four) == [0.0, 999, 2.2, 123, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) assert (to_list(five) == [0.0, 999, 2.2, 123, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) assert (to_list(copy.deepcopy(ak.Array([[1, 2, 3], [], [4, 5]]))) == to_list(ak.operations.copy(ak.Array([[1, 2, 3], [], [4, 5]])))) assert (to_list(copy.deepcopy(ak.Record({'one': 1, 'two': 2.2}))) == to_list(ak.operations.copy(ak.Record({'one': 1, 'two': 2.2})))) underlying_array = np.array([1.1, 2.2, 3.3, 4.4, 5.5]) wrapper = ak.Array(underlying_array) duplicate = ak.operations.copy(wrapper) underlying_array[2] = 123 assert (to_list(underlying_array) == [1.1, 2.2, 123.0, 4.4, 5.5]) assert (to_list(wrapper) == [1.1, 2.2, 123, 4.4, 5.5]) assert (to_list(duplicate) == [1.1, 2.2, 3.3, 4.4, 5.5]) original = ak.Array([{'x': 1}, {'x': 2}, {'x': 3}]) shallow_copy = copy.copy(original) shallow_copy['y'] = (original.x ** 2) assert (to_list(shallow_copy) == [{'x': 1, 'y': 1}, {'x': 2, 'y': 4}, {'x': 3, 'y': 9}]) assert (to_list(original) == [{'x': 1}, {'x': 2}, {'x': 3}]) array = ak.Array([[{'x': 1.1, 'y': [1]}, {'x': 2.2, 'y': [1, 2]}, {'x': 3.3, 'y': [1, 2, 3]}], [], [{'x': 4.4, 'y': [1, 2, 3, 4]}, {'x': 5.5, 'y': [1, 2, 3, 4, 5]}]]) assert (to_list(ak.operations.copy(array)) == [[{'x': 1.1, 'y': [1]}, {'x': 2.2, 'y': [1, 2]}, {'x': 3.3, 'y': [1, 2, 3]}], [], [{'x': 4.4, 'y': [1, 2, 3, 4]}, {'x': 5.5, 'y': [1, 2, 3, 4, 5]}]]) a = ak.Array([{'x': 0, 'y': 0.0}, {'x': 1, 'y': 1.1}, {'x': 2, 'y': 2.2}, {'x': 3, 'y': 3.3}, {'x': 4, 'y': 4.4}]) record = copy.deepcopy(a[2].layout) a['z'] = (a.x ** 2) assert (to_list(a) == [{'x': 0, 'y': 0.0, 'z': 0}, {'x': 1, 'y': 1.1, 'z': 1}, {'x': 2, 'y': 2.2, 'z': 4}, {'x': 3, 'y': 3.3, 'z': 9}, {'x': 4, 'y': 4.4, 'z': 16}]) assert (to_list(record) == {'x': 2, 'y': 2.2}) assert (to_list(a[2]) == {'x': 2, 'y': 2.2, 'z': 4})
class DPMSolverSampler(object): def __init__(self, model, **kwargs): super().__init__() self.model = model to_torch = (lambda x: x.clone().detach().to(torch.float32).to(model.device)) self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) def register_buffer(self, name, attr): if (type(attr) == torch.Tensor): if (attr.device != torch.device('cuda')): attr = attr.to(torch.device('cuda')) setattr(self, name, attr) _grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0.0, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1.0, unconditional_conditioning=None, **kwargs): if (conditioning is not None): if isinstance(conditioning, dict): cbs = conditioning[list(conditioning.keys())[0]].shape[0] if (cbs != batch_size): print(f'Warning: Got {cbs} conditionings but batch-size is {batch_size}') elif (conditioning.shape[0] != batch_size): print(f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}') (C, H, W) = shape size = (batch_size, C, H, W) device = self.model.betas.device if (x_T is None): img = torch.randn(size, device=device) else: img = x_T ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) model_fn = model_wrapper((lambda x, t, c: self.model.apply_model(x, t, c)), ns, model_type='noise', guidance_type='classifier-free', condition=conditioning, unconditional_condition=unconditional_conditioning, guidance_scale=unconditional_guidance_scale) dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) x = dpm_solver.sample(img, steps=S, skip_type='time_uniform', method='multistep', order=2, lower_order_final=True) return (x.to(device), None)
def lgb_f1_loss_multiclass(preds: np.ndarray, train_data: lgb.Dataset, clip: float=1e-05) -> Tuple[(np.ndarray, np.ndarray)]: y_true = train_data.get_label().astype(np.int32) preds = preds.reshape((y_true.shape[0], (- 1)), order='F') preds = np.clip(softmax_ax1(preds), clip, (1 - clip)) y_ohe = np.zeros_like(preds) np.add.at(y_ohe, (np.arange(y_true.shape[0]), y_true), 1) grad = ((preds - y_ohe) * preds) hess = (((1 - preds) * preds) * np.clip(((2 * preds) - y_ohe), 0.001, np.inf)) return (grad.reshape(((- 1),), order='F'), hess.reshape(((- 1),), order='F'))
class Generalized_RCNN(nn.Module): def __init__(self, is_train=True): super().__init__() if (not is_train): self.Norm = ops.AffineChannel2d(3) self.Norm.weight.data = torch.from_numpy((1.0 / np.array(cfg.PIXEL_STDS))).float() self.Norm.bias.data = torch.from_numpy((((- 1.0) * np.array(cfg.PIXEL_MEANS)) / np.array(cfg.PIXEL_STDS))).float() conv_body = registry.BACKBONES[cfg.BACKBONE.CONV_BODY] self.Conv_Body = conv_body() self.dim_in = self.Conv_Body.dim_out self.spatial_scale = self.Conv_Body.spatial_scale if cfg.MODEL.FPN_ON: fpn_body = registry.FPN_BODY[cfg.FPN.BODY] self.Conv_Body_FPN = fpn_body(self.dim_in, self.spatial_scale) self.dim_in = self.Conv_Body_FPN.dim_out self.spatial_scale = self.Conv_Body_FPN.spatial_scale else: self.dim_in = self.dim_in[(- 1):] self.spatial_scale = self.spatial_scale[(- 1):] if cfg.MODEL.SEMSEG_ON: self.SemSeg = SemSeg(self.dim_in, self.spatial_scale) if cfg.MODEL.RPN_ON: self.RPN = build_rpn(self.dim_in) if cfg.MODEL.FASTER_ON: if cfg.MODEL.CASCADE_ON: self.Cascade_RCNN = CascadeRCNN(self.dim_in, self.spatial_scale) else: self.Fast_RCNN = FastRCNN(self.dim_in, self.spatial_scale) if cfg.MODEL.MASK_ON: self.Mask_RCNN = MaskRCNN(self.dim_in, self.spatial_scale) if cfg.MODEL.PARSING_ON: self.Parsing_RCNN = ParsingRCNN(self.dim_in, self.spatial_scale) self._init_modules() def _init_modules(self): if cfg.TRAIN.FREEZE_CONV_BODY: for p in self.Conv_Body.parameters(): p.requires_grad = False if cfg.MODEL.FPN_ON: for p in self.Conv_Body_FPN.parameters(): p.requires_grad = False def forward(self, images, targets=None): if (self.training and (targets is None)): raise ValueError('In training mode, targets should be passed') images = to_image_list(images) conv_features = self.Conv_Body(images.tensors) if cfg.MODEL.FPN_ON: conv_features = self.Conv_Body_FPN(conv_features) else: conv_features = [conv_features[(- 1)]] semseg_losses = {} if cfg.MODEL.SEMSEG_ON: (_, conv_features, loss_semseg) = self.SemSeg(conv_features, targets) semseg_losses.update(loss_semseg) proposal_losses = {} if cfg.MODEL.RPN_ON: (proposals, loss_rpn) = self.RPN(images, conv_features, targets) proposal_losses.update(loss_rpn) else: proposals = None roi_losses = {} if cfg.MODEL.FASTER_ON: if cfg.MODEL.CASCADE_ON: (box_features, result, loss_box) = self.Cascade_RCNN(conv_features, proposals, targets) else: (box_features, result, loss_box) = self.Fast_RCNN(conv_features, proposals, targets) roi_losses.update(loss_box) else: result = proposals if cfg.MODEL.MASK_ON: (x, result, loss_mask) = self.Mask_RCNN(conv_features, result, targets) roi_losses.update(loss_mask) if cfg.MODEL.PARSING_ON: (x, result, loss_parsing) = self.Parsing_RCNN(conv_features, result, targets) roi_losses.update(loss_parsing) if self.training: outputs = {'metrics': {}, 'losses': {}} outputs['losses'].update(proposal_losses) outputs['losses'].update(semseg_losses) outputs['losses'].update(roi_losses) return outputs return result def box_net(self, images, targets=None): images = to_image_list(images, cfg.TEST.SIZE_DIVISIBILITY) images_norm = self.Norm(images.tensors) conv_features = self.Conv_Body(images_norm) if cfg.MODEL.FPN_ON: conv_features = self.Conv_Body_FPN(conv_features) else: conv_features = [conv_features[(- 1)]] if cfg.MODEL.SEMSEG_ON: (semseg_pred, conv_features, loss_semseg) = self.SemSeg(conv_features, targets) else: semseg_pred = None if cfg.MODEL.RPN_ON: (proposals, proposal_losses) = self.RPN(images, conv_features, targets) else: proposals = None if cfg.MODEL.FASTER_ON: if cfg.MODEL.CASCADE_ON: (box_features, result, loss_box) = self.Cascade_RCNN(conv_features, proposals, targets) else: (box_features, result, loss_box) = self.Fast_RCNN(conv_features, proposals, targets) else: result = proposals return (conv_features, result, semseg_pred) def mask_net(self, conv_features, result, targets=None): if (len(result[0]) == 0): return {} with torch.no_grad(): (x, result, loss_mask) = self.Mask_RCNN(conv_features, result, targets) return result def parsing_net(self, conv_features, result, targets=None): if (len(result[0]) == 0): return result with torch.no_grad(): (x, result, loss_parsing) = self.Parsing_RCNN(conv_features, result, targets) return result
class MongoKeyValueStore(KeyValueStore): _BATCH_SIZE: int = 8 _REQUEST_KEY = 'request' _RESPONSE_KEY = 'response' def __init__(self, uri: str, collection_name: str): self._mongodb_client: MongoClient = MongoClient(uri) self._database = self._mongodb_client.get_default_database() self._collection = self._database.get_collection(collection_name) self._collection.create_index(self._REQUEST_KEY, unique=True) super().__init__() def __enter__(self) -> 'MongoKeyValueStore': return self def __exit__(self, exc_type, exc_value, traceback) -> None: return def _canonicalize_key(self, key: Dict) -> SON: serialized = json.dumps(key, sort_keys=True) return json.loads(serialized, object_pairs_hook=SON) def contains(self, key: Dict) -> bool: query = {self._REQUEST_KEY: self._canonicalize_key(key)} return (self._collection.find_one(query) is not None) def get(self, key: Dict) -> Optional[Dict]: query = {self._REQUEST_KEY: self._canonicalize_key(key)} document = self._collection.find_one(query) if (document is not None): response = document[self._RESPONSE_KEY] if isinstance(response, str): return json.loads(response) else: return response return None def get_all(self) -> Generator[(Tuple[(Dict, Dict)], None, None)]: for document in self._collection.find({}).batch_size(self._BATCH_SIZE): request = document[self._REQUEST_KEY] response = document[self._RESPONSE_KEY] if isinstance(response, str): (yield (request, json.loads(response))) else: (yield (request, response)) def put(self, key: Dict, value: Dict) -> None: request = self._canonicalize_key(key) document = SON([(self._REQUEST_KEY, request), (self._RESPONSE_KEY, value)]) try: self._collection.replace_one(filter={'request': request}, replacement=document, upsert=True) except (InvalidDocument, OverflowError): alternate_document = SON([(self._REQUEST_KEY, request), (self._RESPONSE_KEY, json.dumps(value))]) self._collection.replace_one(filter={'request': request}, replacement=alternate_document, upsert=True) def multi_put(self, pairs: Iterable[Tuple[(Dict, Dict)]]) -> None: operations = [] for (key, value) in pairs: request = self._canonicalize_key(key) document = SON([(self._REQUEST_KEY, request), (self._RESPONSE_KEY, value)]) operations.append(ReplaceOne({self._REQUEST_KEY: request}, document, upsert=True)) self._collection.bulk_write(operations) def remove(self, key: Dict) -> None: self._collection.delete_one(key)
class PAU(torch.nn.Module): __constants__ = ['num_parameters'] num_parameters: int def __init__(self, num_parameters: int=10, init: float=1.0) -> None: self.num_parameters = num_parameters super(PAU, self).__init__() self.weight = Parameter(torch.Tensor(num_parameters).fill_(init)) def forward(self, input: Tensor) -> Tensor: P = (((((self.weight[0] + (self.weight[1] * input)) + (self.weight[2] * torch.pow(input, 2))) + (self.weight[3] * torch.pow(input, 3))) + (self.weight[4] * torch.pow(input, 4))) + (self.weight[5] * torch.pow(input, 5))) Q = (1 + torch.abs(((((self.weight[6] * input) + (self.weight[7] * torch.pow(input, 2))) + (self.weight[8] * torch.pow(input, 3))) + (self.weight[9] * torch.pow(input, 4))))) return torch.div(P, Q) def extra_repr(self) -> str: return 'num_parameters={}'.format(self.num_parameters)
class EmergencyDispatchSystemSearchIncidents(VirtualFunctionTool): name = 'EmergencyDispatchSystemSearchIncidents' summary = 'Search for incidents based on a specified location and incident type.' parameters: List[ArgParameter] = [{'name': 'location', 'type': 'string', 'description': 'The location to search for incidents, in the street address format.', 'required': True}, {'name': 'incident_type', 'type': 'string', 'description': "The type of the incidents to search for, one of ['Fire', 'Medical', 'Police']. If not provided, all types of incidents will be considered.", 'required': False}, {'name': 'severity', 'type': 'string', 'description': "The severity of the incidents to search for, one of ['Low', 'Medium', 'High']. If not provided, all severities of incidents will be considered.", 'required': False}, {'name': 'incident_status', 'type': 'string', 'description': "If provided, the new status of the incident, one of ['Active', 'Closed'].", 'required': False}, {'name': 'max_results', 'type': 'integer', 'description': 'The maximum number of results to return. Default is 10.', 'required': False}] returns: List[ArgReturn] = [{'name': 'incidents', 'type': 'array', 'description': "An array of incidents that match the search criteria, each object contains 'incident_id', 'incident_type' (one of ['Fire', 'Medical', 'Police']), 'incident_location', 'incident_severity' (one of ['Low', 'Medium', 'High']), 'incident_status' (one of ['Active', 'Closed']), 'dispatch_resource_ids', 'dispatch_time', 'incident_description'."}] exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': "The 'incident_type' parameter is not one of ['Fire', 'Medical', 'Police'], or if the 'severity' parameter is not one of ['Low', 'Medium', 'High'], or if the 'location' is invalid."}]
() def test_memory_challenge_c(memory_management_agent: Agent, patched_api_requestor: MockerFixture, monkeypatch: pytest.MonkeyPatch, level_to_run: int, challenge_name: str) -> None: silly_phrases = ['The purple elephant danced on a rainbow while eating a taco', 'The sneaky toaster stole my socks and ran away to Hawaii', 'My pet rock sings better than Beyonce on Tuesdays', 'The giant hamster rode a unicycle through the crowded mall', 'The talking tree gave me a high-five and then flew away', 'I have a collection of invisible hats that I wear on special occasions', "The flying spaghetti monster stole my sandwich and left a note saying 'thanks for the snack'", 'My imaginary friend is a dragon who loves to play video games', 'I once saw a cloud shaped like a giant chicken eating a pizza', 'The ninja unicorn disguised itself as a potted plant and infiltrated the office'] level_silly_phrases = silly_phrases[:level_to_run] create_instructions_files(memory_management_agent, level_to_run, level_silly_phrases) run_interaction_loop(monkeypatch, memory_management_agent, (level_to_run + 2), challenge_name, level_to_run) file_path = get_workspace_path(memory_management_agent, OUTPUT_LOCATION) content = read_file(file_path, agent=memory_management_agent) for phrase in level_silly_phrases: assert (phrase in content), f'Expected the file to contain {phrase}'
def get_human_object_recognition_categories(): return sorted(['knife', 'keyboard', 'elephant', 'bicycle', 'airplane', 'clock', 'oven', 'chair', 'bear', 'boat', 'cat', 'bottle', 'truck', 'car', 'bird', 'dog'])
def default_config_dict(name=None, parent_name=None, local_path=None): import warnings warnings.warn(('Use Configuration(%r,%r,top_path=%r) instead of deprecated default_config_dict(%r,%r,%r)' % (name, parent_name, local_path, name, parent_name, local_path)), stacklevel=2) c = Configuration(name, parent_name, local_path) return c.todict()
def CalculateHarary(mol): Distance = np.array(Chem.GetDistanceMatrix(mol), 'd') X = (1.0 / Distance[(Distance != 0)]) res = ((1.0 / 2) * X.sum()) if (res == 0): res = MINVALUE return np.log10(res)
def main(args=None): args = parse_args(args=args) utils.set_random_seed(args['seed']) logger.info('Running parser in {} mode'.format(args['mode'])) if (args['mode'] == 'train'): train(args) else: evaluate(args)
def test(sim_time=1.5, qc_atten=1e-05): network_config = 'star_network.json' network_topo = RouterNetTopo(network_config) set_parameters(network_topo, sim_time, qc_atten) start_node_name = 'router1' end_node_name = 'router2' node1 = node2 = None for router in network_topo.get_nodes_by_type(RouterNetTopo.QUANTUM_ROUTER): if (router.name == start_node_name): node1 = router elif (router.name == end_node_name): node2 = router start_app_name = 'start_app' end_app_name = 'end_app' start_app = TeleportApp(node1, start_app_name, end_app_name) end_app = TeleportApp(node2, end_app_name, start_app_name) tl = network_topo.get_timeline() tl.show_progress = False tl.init() start_app.start(end_node_name, .0, .0, 10, 0.9) tick = time.time() tl.run() print(('execution time %.2f sec' % (time.time() - tick))) print('measured |0>:', end_app.results[0]) print('measured |1>:', end_app.results[1])
def resnet50_atrous(pretrained=True, os=16, **kwargs): return _resnet(arch='resnet50', block=Bottleneck, layers=[3, 4, 6, 3], atrous=[1, 2, 1], os=os, pretrained=pretrained, progress=True)
def test_minimize_multiple_constraints(): def func(x): return np.array([(((25 - (0.2 * x[0])) - (0.4 * x[1])) - (0.33 * x[2]))]) def func1(x): return np.array([x[1]]) def func2(x): return np.array([x[2]]) cons = ({'type': 'ineq', 'fun': func}, {'type': 'ineq', 'fun': func1}, {'type': 'ineq', 'fun': func2}) f = (lambda x: ((- 1) * ((x[0] + x[1]) + x[2]))) res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons) assert_allclose(res.x, [125, 0, 0], atol=1e-10)
class AdamW(Optimizer): def __init__(self, params: Iterable, lr: float=0.001, betas: Tuple[(float, float)]=(0.9, 0.999), eps: float=1e-06, weight_decay: float=0.0, correct_bias: bool=True) -> None: if (lr < 0.0): raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr)) if (not (0.0 <= betas[0] < 1.0)): raise ValueError('Invalid beta parameter: {} - should be in [0.0, 1.0['.format(betas[0])) if (not (0.0 <= betas[1] < 1.0)): raise ValueError('Invalid beta parameter: {} - should be in [0.0, 1.0['.format(betas[1])) if (not (0.0 <= eps)): raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(eps)) defaults = {'lr': lr, 'betas': betas, 'eps': eps, 'weight_decay': weight_decay, 'correct_bias': correct_bias} super(AdamW, self).__init__(params, defaults) def step(self, closure: Optional[Callable]=None) -> Optional[Callable]: loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p.data) state['exp_avg_sq'] = torch.zeros_like(p.data) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) (beta1, beta2) = group['betas'] state['step'] += 1 exp_avg.mul_(beta1).add_((1.0 - beta1), grad) exp_avg_sq.mul_(beta2).addcmul_((1.0 - beta2), grad, grad) denom = exp_avg_sq.sqrt().add_(group['eps']) step_size = group['lr'] if group['correct_bias']: bias_correction1 = (1.0 - (beta1 ** state['step'])) bias_correction2 = (1.0 - (beta2 ** state['step'])) step_size = ((step_size * math.sqrt(bias_correction2)) / bias_correction1) p.data.addcdiv_((- step_size), exp_avg, denom) if (group['weight_decay'] > 0.0): p.data.add_(((- group['lr']) * group['weight_decay']), p.data) return loss
def train(model, optimizer, loader, epoch): batch_time = AverageMeter() data_time = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() losses = AverageMeter() end = time.perf_counter() model.train() criterion = nn.CrossEntropyLoss().cuda() for (iter_epoch, (inp, target)) in enumerate(loader): data_time.update((time.perf_counter() - end)) inp = inp.cuda(non_blocking=True) target = target.cuda(non_blocking=True) output = model(inp) loss = criterion(output, target) optimizer.zero_grad() loss.backward() optimizer.step() (acc1, acc5) = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), inp.size(0)) top1.update(acc1[0], inp.size(0)) top5.update(acc5[0], inp.size(0)) batch_time.update((time.perf_counter() - end)) end = time.perf_counter() if ((args.rank == 0) and ((iter_epoch % 50) == 0)): logger.info('Epoch[{0}] - Iter: [{1}/{2}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\tPrec {top1.val:.3f} ({top1.avg:.3f})\tLR trunk {lr}\tLR head {lr_W}'.format(epoch, iter_epoch, len(loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, lr=optimizer.param_groups[0]['lr'], lr_W=optimizer.param_groups[1]['lr'])) return (epoch, losses.avg, top1.avg.item(), top5.avg.item())
def set_linecache(filename, source): import linecache linecache.cache[filename] = (None, None, [(line + '\n') for line in source.splitlines()], filename)
class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, norm_type='batch', stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = normalization(planes, norm_type) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = normalization(planes, norm_type) self.conv3 = nn.Conv2d(planes, (planes * Bottleneck.expansion), kernel_size=1, bias=False) self.bn3 = normalization((planes * Bottleneck.expansion), norm_type) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.relu(out) return out
def ufunc_add_where(A: dace.int32[10], B: dace.int32[10], W: dace.bool_[10]): return np.add(A, B, where=W)
.box(RecordViewType) def box_RecordView(recordviewtype, viewval, c): RecordView_obj = c.pyapi.unserialize(c.pyapi.serialize_object(RecordView)) proxyin = c.context.make_helper(c.builder, recordviewtype, viewval) arrayview_obj = box_ArrayView(recordviewtype.arrayviewtype, proxyin.arrayview, c) at_obj = c.pyapi.long_from_ssize_t(proxyin.at) recordview_obj = c.pyapi.call_function_objargs(RecordView_obj, (arrayview_obj, at_obj)) out = c.pyapi.call_method(recordview_obj, 'torecord', ()) c.pyapi.decref(RecordView_obj) c.pyapi.decref(arrayview_obj) c.pyapi.decref(at_obj) c.pyapi.decref(recordview_obj) return out
def show_ann(coco, img, ann_info): plt.imshow(mmcv.bgr2rgb(img)) plt.axis('off') coco.showAnns(ann_info) plt.show()
_arg_scope def separable_conv2d_same(inputs, num_outputs, kernel_size, depth_multiplier, stride, rate=1, use_explicit_padding=True, regularize_depthwise=False, scope=None, **kwargs): def _separable_conv2d(padding): return slim.separable_conv2d(inputs, num_outputs, kernel_size, depth_multiplier=depth_multiplier, stride=stride, rate=rate, padding=padding, scope=scope, **kwargs) def _split_separable_conv2d(padding): outputs = slim.separable_conv2d(inputs, None, kernel_size, depth_multiplier=depth_multiplier, stride=stride, rate=rate, padding=padding, scope=(scope + '_depthwise'), **kwargs) return slim.conv2d(outputs, num_outputs, 1, scope=(scope + '_pointwise'), **kwargs) if ((stride == 1) or (not use_explicit_padding)): if regularize_depthwise: outputs = _separable_conv2d(padding='SAME') else: outputs = _split_separable_conv2d(padding='SAME') else: inputs = fixed_padding(inputs, kernel_size, rate) if regularize_depthwise: outputs = _separable_conv2d(padding='VALID') else: outputs = _split_separable_conv2d(padding='VALID') return outputs
class AnnealingTemperature(object): def __init__(self, init_tau=1.0, base_tau=0.5, anneal_rate=0.001, N=500): self.init_tau = init_tau self.base_tau = base_tau self.anneal_rate = anneal_rate self.N = N self._tau = init_tau self._step = 0 def step(self): self._step += 1 if ((self._step % self.N) == 0): self._tau = np.maximum((self.init_tau * np.exp(((- self.anneal_rate) * self._step))), self.base_tau) log.info('\nstep: {:d}, update tau: {:.4f}\n'.format(self._step, self._tau)) return self._tau
class GPT2Partitioner(PartitioningTask): def __init__(self, args) -> None: super().__init__(args) self.tokenizer = GPT2Tokenizer.from_pretrained(args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=(args.cache_dir if args.cache_dir else None)) if (args.block_size <= 0): args.block_size = self.tokenizer.max_len_single_sentence args.block_size = min(args.block_size, self.tokenizer.max_len_single_sentence) self.ds = load_and_cache_examples(args, self.tokenizer) def batch_dim(self) -> int: return 0 def post_partitioning(self, args, graph, analysis_result, summary): if args.stateless_tied: try: import subprocess subprocess.check_output(['sed', '-s', '-i', f's/cuda:{args.n_partitions}/cuda:0/g', (args.output_file + '.py')]) except: print('Failed to replaced tied dummy partition device') def update_analysis_kwargs(self, args, config, analysis_kwargs: Dict) -> Dict[(str, Any)]: stages_on_same_gpu = set() if (args.lmhead and args.stateless_tied and (len(config['stages']) == (args.n_partitions + 1))): stages_on_same_gpu = [{0, args.n_partitions}] analysis_kwargs['stages_on_same_gpu'] = stages_on_same_gpu return analysis_kwargs def register_functions(self): register_new_traced_function(math.sqrt, namespace=math) register_new_explicit_untraced_function(operator.is_, namespace=operator) register_new_explicit_untraced_function(operator.is_not, namespace=operator) def get_model(self, args) -> torch.nn.Module: if args.lmhead: if args.stateless_tied: model_class = StatelessGPT2LMHeadModel else: model_class = GPT2LMHeadModel elif args.stateless_tied: model_class = StatelessGPT2Model else: model_class = GPT2Model model_config = GPT2Config.from_pretrained(args.model_name_or_path, cache_dir=(args.cache_dir if args.cache_dir else None)) model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=model_config, cache_dir=(args.cache_dir if args.cache_dir else None)).train() model.resize_token_embeddings(len(self.tokenizer)) if args.stateless_tied: model.make_stateless_after_loaded_tied_and_resized() return model def get_input(self, args, analysis=False): batch_size = (args.analysis_batch_size if analysis else args.partitioning_batch_size) sampler = RandomSampler(self.ds) dl = DataLoader(self.ds, sampler=sampler, batch_size=batch_size) batch = next(iter(dl)) if args.lmhead: sample = {'input_ids': batch, 'labels': batch} else: sample = {'input_ids': batch} return sample
class BipartiteEdgePredLayer(Layer): def __init__(self, input_dim1, input_dim2, placeholders, dropout=False, act=tf.nn.sigmoid, loss_fn='xent', neg_sample_weights=1.0, bias=False, bilinear_weights=False, **kwargs): super(BipartiteEdgePredLayer, self).__init__(**kwargs) self.input_dim1 = input_dim1 self.input_dim2 = input_dim2 self.act = act self.bias = bias self.eps = 1e-07 self.margin = 0.1 self.neg_sample_weights = neg_sample_weights self.bilinear_weights = bilinear_weights if dropout: self.dropout = placeholders['dropout'] else: self.dropout = 0.0 self.output_dim = 1 with tf.variable_scope((self.name + '_vars')): if bilinear_weights: self.vars['weights'] = tf.get_variable('pred_weights', shape=(input_dim1, input_dim2), dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer()) if self.bias: self.vars['bias'] = zeros([self.output_dim], name='bias') if (loss_fn == 'xent'): self.loss_fn = self._xent_loss elif (loss_fn == 'skipgram'): self.loss_fn = self._skipgram_loss elif (loss_fn == 'hinge'): self.loss_fn = self._hinge_loss if self.logging: self._log_vars() def affinity(self, inputs1, inputs2): if self.bilinear_weights: prod = tf.matmul(inputs2, tf.transpose(self.vars['weights'])) self.prod = prod result = tf.reduce_sum((inputs1 * prod), axis=1) else: result = tf.reduce_sum((inputs1 * inputs2), axis=1) return result def neg_cost(self, inputs1, neg_samples, hard_neg_samples=None): if self.bilinear_weights: inputs1 = tf.matmul(inputs1, self.vars['weights']) neg_aff = tf.matmul(inputs1, tf.transpose(neg_samples)) return neg_aff def loss(self, inputs1, inputs2, neg_samples): return self.loss_fn(inputs1, inputs2, neg_samples) def _xent_loss(self, inputs1, inputs2, neg_samples, hard_neg_samples=None): aff = self.affinity(inputs1, inputs2) neg_aff = self.neg_cost(inputs1, neg_samples, hard_neg_samples) true_xent = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(aff), logits=aff) negative_xent = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(neg_aff), logits=neg_aff) loss = (tf.reduce_sum(true_xent) + (self.neg_sample_weights * tf.reduce_sum(negative_xent))) return loss def _skipgram_loss(self, inputs1, inputs2, neg_samples, hard_neg_samples=None): aff = self.affinity(inputs1, inputs2) neg_aff = self.neg_cost(inputs1, neg_samples, hard_neg_samples) neg_cost = tf.log(tf.reduce_sum(tf.exp(neg_aff), axis=1)) loss = tf.reduce_sum((aff - neg_cost)) return loss def _hinge_loss(self, inputs1, inputs2, neg_samples, hard_neg_samples=None): aff = self.affinity(inputs1, inputs2) neg_aff = self.neg_cost(inputs1, neg_samples, hard_neg_samples) diff = tf.nn.relu(tf.subtract(neg_aff, (tf.expand_dims(aff, 1) - self.margin)), name='diff') loss = tf.reduce_sum(diff) self.neg_shape = tf.shape(neg_aff) return loss def weights_norm(self): return tf.nn.l2_norm(self.vars['weights'])
def register_Ns3ApplicationContainer_methods(root_module, cls): cls.add_constructor([param('ns3::ApplicationContainer const &', 'arg0')]) cls.add_constructor([]) cls.add_constructor([param('ns3::Ptr< ns3::Application >', 'application')]) cls.add_constructor([param('std::string', 'name')]) cls.add_method('Add', 'void', [param('ns3::ApplicationContainer', 'other')]) cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Application >', 'application')]) cls.add_method('Add', 'void', [param('std::string', 'name')]) cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Application > const, std::vector< ns3::Ptr< ns3::Application > > >', [], is_const=True) cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Application > const, std::vector< ns3::Ptr< ns3::Application > > >', [], is_const=True) cls.add_method('Get', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'i')], is_const=True) cls.add_method('GetN', 'uint32_t', [], is_const=True) cls.add_method('Start', 'void', [param('ns3::Time', 'start')]) cls.add_method('Stop', 'void', [param('ns3::Time', 'stop')]) return
def attn_post_proc(attn_res, inter_hn=None, wd=0.0, keep_prob=1.0, residual_keep_prob=1.0, is_train=None, activation='relu', sparse_opt=False, scope=None, **kwargs): with tf.variable_scope((scope or 'attn_res')): assert ('mask' in kwargs) if sparse_opt: (x1, reverse_spec) = masked_dense2sparse(attn_res, kwargs.get('mask')) else: x1 = attn_res y = bn_dense_layer_v2(x1, get_shape_list(attn_res)[(- 1)], True, 0.0, 'dense_layer', 'linear', False, wd, keep_prob, is_train) x2 = residual_connection(x1, y, is_train, residual_keep_prob, 'res_con') res = residual_connection_with_dense(x2, (inter_hn or (4 * get_shape_list(attn_res)[(- 1)])), True, 0.0, 'residual_connection_with_dense', activation, False, wd, keep_prob, is_train, residual_keep_prob) if sparse_opt: res = masked_sparse2dense(res, reverse_spec) return res
class TextDataset(Dataset): def __init__(self, paths, vocab, logger, max_lengths=200): self.logger = logger self.vocab = vocab self.max_lengths = max_lengths self.data = self.make_dataset(paths, vocab, logger, (max_lengths - 1)) def make_dataset(paths, vocab, logger, max_lengths): logger.info('reading data from {}'.format(paths)) dataset = [] for path in paths: with open(path, 'r', encoding='utf8') as f: lines = [i.strip().split('\t') for i in f.readlines() if (len(i.strip()) != 0)] for line in lines: dataset.append([int(line[0]), vocab.string2ids(' '.join(line[1].replace(' ', '')))[:max_lengths]]) logger.info('{} data record loaded'.format(len(dataset))) return dataset def __len__(self): return len(self.data) def __getitem__(self, idx): (style, text) = self.data[idx] text = (text + [self.vocab.eos_id]) return {'type': 'text', 'style': style, 'text': text, 'text_len': len(text)}
class TqdmFile(object): dummy_file = None def __init__(self, dummy_file): self.dummy_file = dummy_file def write(self, x): if (len(x.rstrip()) > 0): tqdm.write(x, file=self.dummy_file)
def _dict_flatten(d: Dict[(Any, Any)]) -> Tuple[(List[Any], Context)]: return (list(d.values()), list(d.keys()))
def tokenize(expression: str) -> TokenGenerator: cursor = 0 def is_eol() -> bool: return (cursor == len(expression)) def current_symbol() -> str: return expression[cursor] def move() -> None: nonlocal cursor cursor += 1 def move_until(predicate: Callable[([], bool)]) -> None: move() while (not predicate()): move() stop_symbols = {'$', '.', '{', '}', '#'} while (not is_eol()): if (current_symbol() == '$'): start = cursor move_until((lambda : (is_eol() or (current_symbol() in stop_symbols)))) (yield Token.variable(expression[start:cursor])) elif (current_symbol() == '.'): (yield Token.dot()) move() elif (current_symbol() == '{'): (yield Token.lbracket()) move() elif (current_symbol() == '}'): (yield Token.rbracket()) move() elif (current_symbol() == '#'): start = cursor move_until((lambda : (is_eol() or (current_symbol() == '}')))) (yield Token.pointer(expression[start:cursor])) else: start = cursor move_until((lambda : (is_eol() or (current_symbol() in stop_symbols)))) (yield Token.string(expression[start:cursor]))
def dispatch(fn_name): try: return dispatcher[fn_name] except KeyError: print(('Undefined value function `%s' % fn_name)) exit(1)
def get_core_subclass_dict(superclass): return {k: v for (k, v) in get_core_subclass_list(superclass)}
class ResizeShortestEdge(): def __init__(self, short_edge_length: List[int], max_size: int=sys.maxsize): self.interp_method = 'bilinear' self.max_size = max_size self.short_edge_length = short_edge_length def __call__(self, imgs: List[torch.Tensor]): img_augs = [] for img in imgs: (h, w) = img.shape[:2] size = np.random.randint(self.short_edge_length[0], (self.short_edge_length[1] + 1)) if (size == 0): return img scale = ((size * 1.0) / min(h, w)) if (h < w): (newh, neww) = (size, (scale * w)) else: (newh, neww) = ((scale * h), size) if (max(newh, neww) > self.max_size): scale = ((self.max_size * 1.0) / max(newh, neww)) newh = (newh * scale) neww = (neww * scale) neww = int((neww + 0.5)) newh = int((newh + 0.5)) if (img.dtype == np.uint8): pil_image = Image.fromarray(img) pil_image = pil_image.resize((neww, newh), Image.BILINEAR) img = np.asarray(pil_image) else: img = img.permute(2, 0, 1).unsqueeze(0) img = F.interpolate(img, (newh, neww), mode=self.interp_method, align_corners=False).squeeze(0) img_augs.append(img) return img_augs
def bar_custom(current, total, width=80): print(('Downloading: %d%% [%d / %d] Ks' % (((current / total) * 100), (current / 1000), (total / 1000))), end='\r')
def adjust_length_to_model(length, max_sequence_length): if ((length < 0) and (max_sequence_length > 0)): length = max_sequence_length elif (0 < max_sequence_length < length): length = max_sequence_length elif (length < 0): length = MAX_LENGTH return length
class BNReLU2d(torch.nn.Sequential): def __init__(self, batch_norm, relu): assert ((type(batch_norm) == BatchNorm2d) and (type(relu) == ReLU)), 'Incorrect types for input modules{}{}'.format(type(batch_norm), type(relu)) super(BNReLU2d, self).__init__(batch_norm, relu)
class AnthropicClient(CachingClient): MAX_COMPLETION_LENGTH: int = 8192 ADDITIONAL_TOKENS: int = 5 PROMPT_ANSWER_START: str = 'The answer is ' def __init__(self, tokenizer: Tokenizer, tokenizer_name: str, cache_config: CacheConfig, api_key: Optional[str]=None): super().__init__(cache_config=cache_config) self.tokenizer = tokenizer self.tokenizer_name = tokenizer_name self.api_key: Optional[str] = api_key self._client = (anthropic.Client(api_key) if api_key else None) def _send_request(self, raw_request: Dict[(str, Any)]) -> Dict[(str, Any)]: if (self.api_key is None): raise Exception('API key is not set. Please set it in the HELM config file.') result = self._client.completion(**raw_request) assert ('error' not in result), f"Request failed with error: {result['error']}" return result def _filter_completion(self, completion: str, max_tokens: int) -> str: for _ in range(AnthropicClient.ADDITIONAL_TOKENS): if (len(completion) == 0): return completion elif (completion[0] in [':', ' ', '\n']): completion = completion[1:] else: break return completion def make_request(self, request: Request) -> RequestResult: if (request.max_tokens > AnthropicClient.MAX_COMPLETION_LENGTH): raise ValueError(f'The value for `max_tokens` exceeds the currently supported maximum ({request.max_tokens} > {AnthropicClient.MAX_COMPLETION_LENGTH}).') if ((request.max_tokens == 0) and (not request.echo_prompt)): raise ValueError('echo_prompt must be True when max_tokens=0.') raw_request = {'prompt': request.prompt, 'stop_sequences': request.stop_sequences, 'model': request.model_engine, 'max_tokens_to_sample': request.max_tokens, 'temperature': request.temperature, 'top_p': request.top_p, 'top_k': request.top_k_per_token} completions: List[Sequence] = [] for completion_index in range(request.num_completions): try: def do_it(): result = self._send_request(raw_request) assert ('completion' in result), f'Invalid response: {result}' return result cache_key = CachingClient.make_cache_key({'completion_index': completion_index, **raw_request}, request) (response, cached) = self.cache.get(cache_key, wrap_request_time(do_it)) except Exception as error: if ('Prompt must contain anthropic.AI_PROMPT' in str(error)): return RequestResult(success=False, cached=False, error=str(error), completions=[], embedding=[], error_flags=ErrorFlags(is_retriable=False, is_fatal=False)) if ('exceeds max (' in str(error)): return RequestResult(success=False, cached=False, error=str(error), completions=[], embedding=[], error_flags=ErrorFlags(is_retriable=False, is_fatal=False)) return RequestResult(success=False, cached=False, error=str(error), completions=[], embedding=[]) response['completion'] = self._filter_completion(response['completion'], request.max_tokens) text: str = ((request.prompt + response['completion']) if request.echo_prompt else response['completion']) tokenization_result: TokenizationRequestResult = self.tokenizer.tokenize(TokenizationRequest(text, tokenizer=self.tokenizer_name)) tokens: List[Token] = [Token(text=str(text), logprob=0, top_logprobs={}) for text in tokenization_result.raw_tokens] completion = Sequence(text=response['completion'], logprob=0, tokens=tokens) sequence = truncate_sequence(completion, request, print_warning=True) completions.append(sequence) return RequestResult(success=True, cached=cached, request_time=response['request_time'], request_datetime=response['request_datetime'], completions=completions, embedding=[])
_utils.test(require=ti.extension.bls) def test_scatter_1d(): _test_bls_stencil(1, 128, bs=32, stencil=((1,), (0,)), scatter=True)
.parametrize('evaluation_policy_pscore_cascade, evaluation_policy_action_dist, q_hat, description', invalid_input_of_create_estimator_inputs) def test_meta_create_estimator_inputs_using_invalid_input_data(evaluation_policy_pscore_cascade, evaluation_policy_action_dist, q_hat, description: str, synthetic_slate_bandit_feedback: BanditFeedback) -> None: ope_ = SlateOffPolicyEvaluation(bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[cascade_dr]) with pytest.raises(ValueError, match=f'{description}*'): _ = ope_._create_estimator_inputs(evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade, evaluation_policy_action_dist=evaluation_policy_action_dist, q_hat=q_hat) with pytest.raises(ValueError, match=f'{description}*'): _ = ope_.estimate_policy_values(evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade, evaluation_policy_action_dist=evaluation_policy_action_dist, q_hat=q_hat) with pytest.raises(ValueError, match=f'{description}*'): _ = ope_.estimate_intervals(evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade, evaluation_policy_action_dist=evaluation_policy_action_dist, q_hat=q_hat) with pytest.raises(ValueError, match=f'{description}*'): _ = ope_.summarize_off_policy_estimates(evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade, evaluation_policy_action_dist=evaluation_policy_action_dist, q_hat=q_hat) with pytest.raises(ValueError, match=f'{description}*'): _ = ope_.evaluate_performance_of_estimators(ground_truth_policy_value=0.1, evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade, evaluation_policy_action_dist=evaluation_policy_action_dist, q_hat=q_hat) with pytest.raises(ValueError, match=f'{description}*'): _ = ope_.summarize_estimators_comparison(ground_truth_policy_value=0.1, evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade, evaluation_policy_action_dist=evaluation_policy_action_dist, q_hat=q_hat)
def get_bleu(in_sent, target_sent): bleu = sacrebleu.corpus_bleu([in_sent], [[target_sent]]) out = ' '.join(map(str, (([bleu.score, bleu.sys_len, bleu.ref_len] + bleu.counts) + bleu.totals))) return out
def save_checkpoint(cfg, model, epoch, optimizer=None, scheduler=None, additioanl_dict=None, is_best=False, post_fix='ckpt_latest', save_name=None): if (save_name is None): save_name = cfg.run_name current_ckpt_name = f'{save_name}_{post_fix}.pth' current_pretrained_path = os.path.join(cfg.ckpt_dir, current_ckpt_name) save_dict = {'model': (model.module.state_dict() if hasattr(model, 'module') else model.state_dict()), 'optimizer': (optimizer.state_dict() if (optimizer is not None) else dict()), 'scheduler': (scheduler.state_dict() if (scheduler is not None) else dict()), 'epoch': epoch} if (additioanl_dict is not None): save_dict.update(additioanl_dict) torch.save(save_dict, current_pretrained_path) if ((cfg.save_freq > 0) and ((epoch % cfg.save_freq) == 0)): milestone_ckpt_name = f'{save_name}_E{epoch}.pth' milestone_pretrained_path = os.path.join(cfg.ckpt_dir, milestone_ckpt_name) shutil.copyfile(current_pretrained_path, milestone_pretrained_path) logging.info('Saved in {}'.format(milestone_pretrained_path)) if is_best: best_ckpt_name = (f'{save_name}_ckpt_best.pth' if save_name else 'ckpt_best.pth') best_pretrained_path = os.path.join(cfg.ckpt_dir, best_ckpt_name) shutil.copyfile(current_pretrained_path, best_pretrained_path) logging.info('Found the best model and saved in {}'.format(best_pretrained_path))
def main(inp_dir, oup_dir, map_fn): for (inp_split, oup_split) in [('train', 'train'), ('dev', 'valid'), ('test', 'test')]: n = 0 with open(os.path.join(inp_dir, f'{inp_split}.json')) as fj: with open(os.path.join(oup_dir, f'{oup_split}.text'), 'w') as ftext, open(os.path.join(oup_dir, f'{oup_split}.data'), 'w') as fdata: for (data, text) in map_fn(func, fj): fdata.write((data.strip() + '\n')) ftext.write((text.strip() + '\n')) n += 1 if ((n % 10000) == 0): print(('Processed %d lines' % n))
class clean(_clean): def run(self): self.execute(_clean_bins, (), msg='Cleaning binary files and headers') self.execute(_clean_native_build, (), msg='Cleaning native build') _clean.run(self)
def write(filename, rows, mode='w'): with open(filename, mode) as csvfile: writer = csv.writer(csvfile, delimiter=',') if (type(rows[0]) is tuple): writer.writerows(rows) else: writer.writerow(rows)
class TestSequeneceGenerator(TestSequenceGeneratorBase): def setUp(self): (self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model) = test_utils.sequence_generator_setup() self.sample = {'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths}} def test_with_normalization(self): generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2) hypos = generator.forward(self.sample) (eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2) self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0]) self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0]) self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6]) def test_without_normalization(self): generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2, normalize_scores=False) hypos = generator.forward(self.sample) (eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2) self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False) self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False) self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False) self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False) def test_with_lenpen_favoring_short_hypos(self): lenpen = 0.6 generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen) hypos = generator.forward(self.sample) (eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2) self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen) self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen) self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) def test_with_lenpen_favoring_long_hypos(self): lenpen = 5.0 generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen) hypos = generator.forward(self.sample) (eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2) self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) self.assertHypoTokens(hypos[0][1], [w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen) self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen) def test_maxlen(self): generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2, max_len_b=2) hypos = generator.forward(self.sample) (eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2) self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) self.assertHypoTokens(hypos[0][1], [w2, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6]) self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6]) self.assertHypoTokens(hypos[1][1], [w2, w2, eos]) self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01]) def test_encoder_with_different_output_len(self): args = self.model.encoder.args task = test_utils.TestTranslationTask.setup_task(args, self.tgt_dict, self.tgt_dict) reshaping_model = test_utils.TestReshapingModel.build_model(args, task) generator = SequenceGenerator([reshaping_model], self.tgt_dict, beam_size=2, max_len_b=2) hypos = generator.forward(self.sample) for sent in [0, 1]: for beam in [0, 1]: assert (hypos[sent][beam]['attention'] is not None)
class upConv3D(nn.Module): def __init__(self, in_ch, out_ch, kernel_size, stride, padding, upmode='transpose', batchnorm=False): super().__init__() self.upmode = upmode if (self.upmode == 'transpose'): self.upconv = nn.ModuleList([nn.ConvTranspose3d(in_ch, out_ch, kernel_size=kernel_size, stride=stride, padding=padding), SEGating(out_ch)]) else: self.upconv = nn.ModuleList([nn.Upsample(mode='trilinear', scale_factor=(1, 2, 2), align_corners=False), nn.Conv3d(in_ch, out_ch, kernel_size=1, stride=1), SEGating(out_ch)]) if batchnorm: self.upconv += [nn.BatchNorm3d(out_ch)] self.upconv = nn.Sequential(*self.upconv) def forward(self, x): return self.upconv(x)
def visualize_images(images: List[Any], size: Optional[Tuple[(int, int)]]=(224, 224), *args, **kwargs): try: import matplotlib.pyplot as plt except ImportError: print(('Visualization tools require matplotlib. ' + 'Install using pip install matplotlib.')) raise transform_list = [] assert ((size is not None) or (len(images) == 1)), 'If size is not passed, only one image can be visualized' if (size is not None): transform_list.append(torchvision.transforms.Resize(size=size)) transform_list.append(torchvision.transforms.ToTensor()) transform = torchvision.transforms.Compose(transform_list) img_tensors = torch.stack([transform(image) for image in images]) grid = torchvision.utils.make_grid(img_tensors, *args, **kwargs) plt.axis('off') plt.imshow(grid.permute(1, 2, 0))
def _from_sgf(sgf: str): indexes = 'abcdefghijklmnopqrs' infos = sgf.split(';') game_info = infos[1] game_record = infos[2:] size = 19 if (game_info.find('SZ') != (- 1)): sz = game_info[(game_info.find('SZ') + 3):(game_info.find('SZ') + 5)] if (sz[1] == ']'): sz = sz[0] size = int(sz) env = Go(size=size) init = jax.jit(env.init) step = jax.jit(env.step) key = jax.random.PRNGKey(0) state = init(key) has_branch = False for reco in game_record: if (reco[(- 2)] == ')'): print('this sgf has some branches') print('loaded main branch') has_branch = True if (reco[2] == ']'): state = step(state, (size * size)) if has_branch: return state continue pos = reco[2:4] yoko = indexes.index(pos[0]) tate = indexes.index(pos[1]) action = (yoko + (size * tate)) state = step(state, action) if has_branch: return state return state
def generate(): RecLayer._create_rnn_cells_dict() layer_names = sorted(list(RecLayer._rnn_cells_dict.keys())) rst_file = open('layer_reference/units.rst', 'w') rst_file.write(header_text) for layer_name in layer_names: unit_class = RecLayer.get_rnn_cell_class(layer_name) if (issubclass(unit_class, RNNCell) or issubclass(unit_class, RecSeqCellOp)): module = unit_class.__module__ name = unit_class.__name__ if (name.endswith('Cell') and (not name.startswith('_'))): rst_file.write('\n') rst_file.write(('%s\n' % name)) rst_file.write(('%s\n' % ('-' * len(name)))) rst_file.write('\n') rst_file.write(('.. autoclass:: %s.%s\n' % (module, name))) rst_file.write(' :members:\n') rst_file.write(' :undoc-members:\n') rst_file.write('\n') rst_file.close()
class CombineLosses(nn.Module): def __init__(self, loss_weights: list, loss_instances: list): super(CombineLosses, self).__init__() self.loss_weights = loss_weights self.loss_instances = nn.ModuleList(loss_instances) def forward(self, pred_score, gt_score): loss = torch.tensor(0, dtype=torch.float).to(pred_score.device) for (loss_weight, loss_instance) in zip(self.loss_weights, self.loss_instances): loss += (loss_weight * loss_instance(pred_score, gt_score)) return loss
def register_Ns3CallbackImpl__Void_Ns3LrWpanMacState_Ns3LrWpanMacState_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::CallbackImpl< void, ns3::LrWpanMacState, ns3::LrWpanMacState, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')]) cls.add_method('DoGetTypeid', 'std::string', [], is_static=True) cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True) cls.add_method('operator()', 'void', [param('ns3::LrWpanMacState', 'arg0'), param('ns3::LrWpanMacState', 'arg1')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__') return
class SpeakerVerificationDataLoader(DataLoader): def __init__(self, dataset, speakers_per_batch, utterances_per_speaker, sampler=None, batch_sampler=None, num_workers=0, pin_memory=False, timeout=0, worker_init_fn=None): self.utterances_per_speaker = utterances_per_speaker super().__init__(dataset=dataset, batch_size=speakers_per_batch, shuffle=False, sampler=sampler, batch_sampler=batch_sampler, num_workers=num_workers, collate_fn=self.collate, pin_memory=pin_memory, drop_last=False, timeout=timeout, worker_init_fn=worker_init_fn) def collate(self, speakers): return SpeakerBatch(speakers, self.utterances_per_speaker, partials_n_frames)
.script def call_rpc_torchscript_with_record_function(dst_worker_name: str, block: str) -> Tensor: fut = rpc.rpc_async(dst_worker_name, script_add_ones_with_record_function, (torch.tensor(1), block)) return fut.wait()
class _ctypes(object): def __init__(self, array, ptr=None): self._arr = array if ctypes: self._ctypes = ctypes self._data = _get_void_ptr(array) assert (self._data.value == ptr) else: self._ctypes = _missing_ctypes() self._data = self._ctypes.c_void_p(ptr) self._data._objects = array if (self._arr.ndim == 0): self._zerod = True else: self._zerod = False def data_as(self, obj): return self._ctypes.cast(self._data, obj) def shape_as(self, obj): if self._zerod: return None return (obj * self._arr.ndim)(*self._arr.shape) def strides_as(self, obj): if self._zerod: return None return (obj * self._arr.ndim)(*self._arr.strides) def data(self): return self._data.value def shape(self): return self.shape_as(_getintp_ctype()) def strides(self): return self.strides_as(_getintp_ctype()) def _as_parameter_(self): return self._data get_data = data.fget get_shape = shape.fget get_strides = strides.fget get_as_parameter = _as_parameter_.fget
def load_checkpoint(args, trainer, **passthrough_args): if (args.distributed_rank == 0): os.makedirs(args.save_dir, exist_ok=True) if (args.restore_file == 'checkpoint_last.pt'): checkpoint_path = os.path.join(args.save_dir, 'checkpoint_last.pt') else: checkpoint_path = args.restore_file extra_state = trainer.load_checkpoint(checkpoint_path, args.reset_optimizer, args.reset_lr_scheduler, eval(args.optimizer_overrides), reset_meters=args.reset_meters) if ((extra_state is not None) and ('best' in extra_state) and (not args.reset_optimizer) and (not args.reset_meters)): save_checkpoint.best = extra_state['best'] if ((extra_state is not None) and (not args.reset_dataloader)): itr_state = extra_state['train_iterator'] epoch_itr = trainer.get_train_iterator(epoch=itr_state['epoch'], load_dataset=True, **passthrough_args) epoch_itr.load_state_dict(itr_state) else: epoch_itr = trainer.get_train_iterator(epoch=1, load_dataset=True, **passthrough_args) trainer.lr_step(epoch_itr.epoch) return (extra_state, epoch_itr)
class TestTokenize(unittest.TestCase): def test_simple(self): s = 'select * from foo;' stream = lexer.tokenize(s) self.assert_(isinstance(stream, types.GeneratorType)) tokens = list(stream) self.assertEqual(len(tokens), 8) self.assertEqual(len(tokens[0]), 2) self.assertEqual(tokens[0], (Keyword.DML, u'select')) self.assertEqual(tokens[(- 1)], (Punctuation, u';')) def test_backticks(self): s = '`foo`.`bar`' tokens = list(lexer.tokenize(s)) self.assertEqual(len(tokens), 3) self.assertEqual(tokens[0], (Name, u'`foo`')) def test_linebreaks(self): s = 'foo\nbar\n' tokens = lexer.tokenize(s) self.assertEqual(''.join((str(x[1]) for x in tokens)), s) s = 'foo\rbar\r' tokens = lexer.tokenize(s) self.assertEqual(''.join((str(x[1]) for x in tokens)), s) s = 'foo\r\nbar\r\n' tokens = lexer.tokenize(s) self.assertEqual(''.join((str(x[1]) for x in tokens)), s) s = 'foo\r\nbar\n' tokens = lexer.tokenize(s) self.assertEqual(''.join((str(x[1]) for x in tokens)), s) def test_inline_keywords(self): s = 'create created_foo' tokens = list(lexer.tokenize(s)) self.assertEqual(len(tokens), 3) self.assertEqual(tokens[0][0], Keyword.DDL) self.assertEqual(tokens[2][0], Name) self.assertEqual(tokens[2][1], u'created_foo') s = 'enddate' tokens = list(lexer.tokenize(s)) self.assertEqual(len(tokens), 1) self.assertEqual(tokens[0][0], Name) s = 'join_col' tokens = list(lexer.tokenize(s)) self.assertEqual(len(tokens), 1) self.assertEqual(tokens[0][0], Name) s = 'left join_col' tokens = list(lexer.tokenize(s)) self.assertEqual(len(tokens), 3) self.assertEqual(tokens[2][0], Name) self.assertEqual(tokens[2][1], 'join_col') def test_negative_numbers(self): s = 'values(-1)' tokens = list(lexer.tokenize(s)) self.assertEqual(len(tokens), 4) self.assertEqual(tokens[2][0], Number.Integer) self.assertEqual(tokens[2][1], '-1') .skipif('sys.version_info >= (3,0)') def test_tab_expansion(self): s = '\t' lex = lexer.Lexer() lex.tabsize = 5 tokens = list(lex.get_tokens(s)) self.assertEqual(tokens[0][1], (' ' * 5))
def getScoreUnigram(candidate, gold): (scoring, bestMatch) = ({}, {}) maxScore = 0 maxLabel = '' for goldLabel in gold: goldKey = str(goldLabel) scoring[goldKey] = {} for candidateLabel in candidate: candidateKey = str(candidateLabel) scoring[goldKey][candidateKey] = ((len(goldLabel) - len((goldLabel - candidateLabel))) / len(goldLabel)) while (len(scoring) > 0): maxScore = 0 maxLabel = '' for goldLabel in scoring: goldKey = str(goldLabel) for candidateLabel in scoring[goldKey]: candidateKey = str(candidateLabel) score = scoring[goldKey][candidateKey] if (score >= maxScore): maxScore = score maxLabel = (goldKey, candidateKey) bestMatch[maxLabel] = scoring[maxLabel[0]][maxLabel[1]] scoring.pop(maxLabel[0]) return (sum(bestMatch.values()) / len(gold))
def overapproximate(expr): if isinstance(expr, list): return [overapproximate(elem) for elem in expr] return _overapproximate(expr)
def test_potsdam(): test_dataset = PotsdamDataset(pipeline=[], img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_potsdam_dataset/img_dir'), ann_dir=osp.join(osp.dirname(__file__), '../data/pseudo_potsdam_dataset/ann_dir')) assert (len(test_dataset) == 1)
class MergePlan(AddRows, MergeRows): def __init__(self, log_level=Log.info): self.ServerId = '' self.LogLevel = log_level def __set_serverId(self, serverId): self.ServerId = serverId '\n Public methods\n ' def merge_plans(self, leader_plan, worker_plans): _leader_plan = leader_plan.copy() (_numPlanWorkers, _numWorkers) = self.prepare_merge_rows(_leader_plan) if (len(worker_plans) > 0): self.merge_rows(_leader_plan, worker_plans) '\n If some parallel workers complete the processing, we extrapolate the\n "Actual Rows" of `_leader_plan` because the values of the terminated\n processes are not added by merge_rows().\n ' if ((len(worker_plans) + 1) < _numWorkers): self.extrapolate_rows(_leader_plan, (len(worker_plans) + 1), _numWorkers) return _leader_plan def add_workers_rows(self, serverId, current_seqid, max_seqid): self.__set_serverId(serverId) with open(self.get_log_csv_path(self.ServerId), newline='') as f: _reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_NONE) for row in _reader: _seqid = int(row[0]) _queryid = int(row[6]) _planid = int(row[7]) if ((current_seqid < _seqid) and (_seqid <= max_seqid)): self.add_rows(_seqid, _queryid, _planid) f.close()
class AttentionDecoderTest(tf.test.TestCase, DecoderTests): def setUp(self): tf.test.TestCase.setUp(self) tf.logging.set_verbosity(tf.logging.INFO) DecoderTests.__init__(self) self.attention_dim = 64 self.input_seq_len = 10 def create_decoder(self, helper, mode): attention_fn = AttentionLayerDot(params={'num_units': self.attention_dim}, mode=tf.contrib.learn.ModeKeys.TRAIN) attention_values = tf.convert_to_tensor(np.random.randn(self.batch_size, self.input_seq_len, 32), dtype=tf.float32) attention_keys = tf.convert_to_tensor(np.random.randn(self.batch_size, self.input_seq_len, 32), dtype=tf.float32) params = AttentionDecoder.default_params() params['max_decode_length'] = self.max_decode_length return AttentionDecoder(params=params, mode=mode, vocab_size=self.vocab_size, attention_keys=attention_keys, attention_values=attention_values, attention_values_length=(np.arange(self.batch_size) + 1), attention_fn=attention_fn) def test_attention_scores(self): decoder_output_ = self.test_with_fixed_inputs() np.testing.assert_array_equal(decoder_output_.attention_scores.shape, [self.sequence_length, self.batch_size, self.input_seq_len]) scores_sum = np.sum(decoder_output_.attention_scores, axis=2) np.testing.assert_array_almost_equal(scores_sum, np.ones([self.sequence_length, self.batch_size]))
def _add_entity_variations(utterances, entity_variations, entity_value): utterances[entity_value] = entity_value for variation in entity_variations[entity_value]: if variation: utterances[variation] = entity_value return utterances
class ScopedConstructor(): def __init__(self, c, ctx): self.c = c self.ctx = ctx def __del__(self): if (self.ctx.ref() is not None): Z3_del_constructor(self.ctx.ref(), self.c)
def create_session(agent_path): agent_components = AgentsClient.parse_agent_path(agent_path) location_id = agent_components['location'] if (location_id != 'global'): api_endpoint = f'{location_id}-dialogflow.googleapis.com:443' client_options = {'api_endpoint': api_endpoint} session_client = SessionsClient(client_options=client_options) return (client_options, session_client)