id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
11,998
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, T5Tokenizer def encode_line(tokenizer, line, max_length, padding_side, pad_to_max_length=True, return_tensors="pt"): extra_kw = {"add_prefix_space": True} if isinstance(tokenizer, BartTokenizer) and not line.startswith(" ") else {} tokenizer.padding_side = padding_side return tokenizer( [line], max_length=max_length, padding="max_length" if pad_to_max_length else None, truncation=True, return_tensors=return_tensors, add_special_tokens=True, **extra_kw, )
null
11,999
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, T5Tokenizer The provided code snippet includes necessary dependencies for implementing the `trim_batch` function. Write a Python function `def trim_batch( input_ids, pad_token_id, attention_mask=None, )` to solve the following problem: Remove columns that are populated exclusively by pad_token_id Here is the function: def trim_batch( input_ids, pad_token_id, attention_mask=None, ): """Remove columns that are populated exclusively by pad_token_id""" keep_column_mask = input_ids.ne(pad_token_id).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
Remove columns that are populated exclusively by pad_token_id
12,000
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, T5Tokenizer def flatten_list(summary_ids: List[List]): return [x for x in itertools.chain.from_iterable(summary_ids)]
null
12,001
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, T5Tokenizer def save_json(content, path, indent=4, **json_dump_kwargs): with open(path, "w") as f: json.dump(content, f, indent=indent, **json_dump_kwargs) def get_git_info(): repo = git.Repo(search_parent_directories=True) repo_infos = { "repo_id": str(repo), "repo_sha": str(repo.head.object.hexsha), "repo_branch": str(repo.active_branch), "hostname": str(socket.gethostname()), } return repo_infos The provided code snippet includes necessary dependencies for implementing the `save_git_info` function. Write a Python function `def save_git_info(folder_path: str) -> None` to solve the following problem: Save git information to output_dir/git_log.json Here is the function: def save_git_info(folder_path: str) -> None: """Save git information to output_dir/git_log.json""" repo_infos = get_git_info() save_json(repo_infos, os.path.join(folder_path, "git_log.json"))
Save git information to output_dir/git_log.json
12,002
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, T5Tokenizer def load_json(path): with open(path) as f: return json.load(f)
null
12,003
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, T5Tokenizer The provided code snippet includes necessary dependencies for implementing the `lmap` function. Write a Python function `def lmap(f: Callable, x: Iterable) -> List` to solve the following problem: list(map(f, x)) Here is the function: def lmap(f: Callable, x: Iterable) -> List: """list(map(f, x))""" return list(map(f, x))
list(map(f, x))
12,004
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, T5Tokenizer The provided code snippet includes necessary dependencies for implementing the `pickle_save` function. Write a Python function `def pickle_save(obj, path)` to solve the following problem: pickle.dump(obj, path) Here is the function: def pickle_save(obj, path): """pickle.dump(obj, path)""" with open(path, "wb") as f: return pickle.dump(obj, f)
pickle.dump(obj, path)
12,005
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, T5Tokenizer def exact_match_score(prediction, ground_truth): return normalize_answer(prediction) == normalize_answer(ground_truth) def calculate_exact_match(output_lns: List[str], reference_lns: List[str]) -> Dict: assert len(output_lns) == len(reference_lns) em = 0 for hypo, pred in zip(output_lns, reference_lns): em += exact_match_score(hypo, pred) if len(output_lns) > 0: em /= len(output_lns) return {"em": em}
null
12,006
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, T5Tokenizer def is_rag_model(model_prefix): return model_prefix.startswith("rag")
null
12,007
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, T5Tokenizer logger = getLogger(__name__) def set_extra_model_params(extra_params, hparams, config): equivalent_param = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead equivalent_param["dropout"] = "dropout_rate" for p in extra_params: if getattr(hparams, p, None): if not hasattr(config, p) and not hasattr(config, equivalent_param[p]): logger.info("config doesn't have a `{}` attribute".format(p)) delattr(hparams, p) continue set_p = p if hasattr(config, p) else equivalent_param[p] setattr(config, set_p, getattr(hparams, p)) delattr(hparams, p) return hparams, config
null
12,008
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import torch from datasets import Features, Sequence, Value, load_dataset import faiss from transformers import ( DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser, RagRetriever, RagSequenceForGeneration, RagTokenizer, ) def split_text(text: str, n=100, character=" ") -> List[str]: """Split the text every ``n``-th occurrence of ``character``""" text = text.split(character) return [character.join(text[i : i + n]).strip() for i in range(0, len(text), n)] The provided code snippet includes necessary dependencies for implementing the `split_documents` function. Write a Python function `def split_documents(documents: dict) -> dict` to solve the following problem: Split documents into passages Here is the function: def split_documents(documents: dict) -> dict: """Split documents into passages""" titles, texts = [], [] for title, text in zip(documents["title"], documents["text"]): if text is not None: for passage in split_text(text): titles.append(title if title is not None else "") texts.append(passage) return {"title": titles, "text": texts}
Split documents into passages
12,009
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import torch from datasets import Features, Sequence, Value, load_dataset import faiss from transformers import ( DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser, RagRetriever, RagSequenceForGeneration, RagTokenizer, ) device = "cuda" if torch.cuda.is_available() else "cpu" The provided code snippet includes necessary dependencies for implementing the `embed` function. Write a Python function `def embed(documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast) -> dict` to solve the following problem: Compute the DPR embeddings of document passages Here is the function: def embed(documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast) -> dict: """Compute the DPR embeddings of document passages""" input_ids = ctx_tokenizer( documents["title"], documents["text"], truncation=True, padding="longest", return_tensors="pt" )["input_ids"] embeddings = ctx_encoder(input_ids.to(device=device), return_dict=True).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()}
Compute the DPR embeddings of document passages
12,010
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def consolidate( model_type, generator_name_or_path: str, question_encoder_name_or_path: str, dest_dir: Path, config_name_or_path: str = None, generator_tokenizer_name_or_path: str = None, question_encoder_tokenizer_name_or_path: str = None, ): if config_name_or_path is None: config_name_or_path = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: generator_tokenizer_name_or_path = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: question_encoder_tokenizer_name_or_path = question_encoder_name_or_path model_class = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. rag_config = RagConfig.from_pretrained(config_name_or_path) gen_config = AutoConfig.from_pretrained(generator_name_or_path) question_encoder_config = AutoConfig.from_pretrained(question_encoder_name_or_path) rag_config.generator = gen_config rag_config.question_encoder = question_encoder_config rag_model = model_class.from_pretrained_question_encoder_generator( question_encoder_name_or_path, generator_name_or_path, config=rag_config ) rag_model.save_pretrained(dest_dir) # Sanity check. model_class.from_pretrained(dest_dir) # Save tokenizers. gen_tokenizer = AutoTokenizer.from_pretrained(generator_tokenizer_name_or_path) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/") question_encoder_tokenizer = AutoTokenizer.from_pretrained(question_encoder_tokenizer_name_or_path) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/")
null
12,011
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def count_trainable_parameters(model): model_parameters = filter(lambda p: p.requires_grad, model.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) return params
null
12,012
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json The provided code snippet includes necessary dependencies for implementing the `get_checkpoint_callback` function. Write a Python function `def get_checkpoint_callback(output_dir, metric)` to solve the following problem: Saves the best model by validation EM score. Here is the function: def get_checkpoint_callback(output_dir, metric): """Saves the best model by validation EM score.""" if metric == "rouge2": exp = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": exp = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": exp = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this" " function." ) checkpoint_callback = ModelCheckpoint( dirpath=output_dir, filename=exp, monitor=f"val_{metric}", mode="max", save_top_k=3, every_n_epochs=1, # maybe save a checkpoint every time val is run, not just end of epoch. ) return checkpoint_callback
Saves the best model by validation EM score.
12,013
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def get_early_stopping_callback(metric, patience): return EarlyStopping( monitor=f"val_{metric}", # does this need avg? mode="min" if "loss" in metric else "max", patience=patience, verbose=True, )
null
12,014
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging from utils_rag import exact_match_score, f1_score def infer_model_type(model_name_or_path): if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None
null
12,015
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging from utils_rag import exact_match_score, f1_score logger = logging.getLogger(__name__) def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): return max(metric_fn(prediction, gt) for gt in ground_truths) def f1_score(prediction, ground_truth): prediction_tokens = normalize_answer(prediction).split() ground_truth_tokens = normalize_answer(ground_truth).split() common = Counter(prediction_tokens) & Counter(ground_truth_tokens) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction_tokens) recall = 1.0 * num_same / len(ground_truth_tokens) f1 = (2 * precision * recall) / (precision + recall) return f1 def exact_match_score(prediction, ground_truth): return normalize_answer(prediction) == normalize_answer(ground_truth) def get_scores(args, preds_path, gold_data_path): hypos = [line.strip() for line in open(preds_path, "r").readlines()] answers = [] if args.gold_data_mode == "qa": data = pd.read_csv(gold_data_path, sep="\t", header=None) for answer_list in data[1]: ground_truths = ast.literal_eval(answer_list) answers.append(ground_truths) else: references = [line.strip() for line in open(gold_data_path, "r").readlines()] answers = [[reference] for reference in references] f1 = em = total = 0 for prediction, ground_truths in zip(hypos, answers): total += 1 em += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths) f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths) em = 100.0 * em / total f1 = 100.0 * f1 / total logger.info(f"F1: {f1:.2f}") logger.info(f"EM: {em:.2f}")
null
12,016
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging from utils_rag import exact_match_score, f1_score logger = logging.getLogger(__name__) def get_precision_at_k(args, preds_path, gold_data_path): k = args.k hypos = [line.strip() for line in open(preds_path, "r").readlines()] references = [line.strip() for line in open(gold_data_path, "r").readlines()] em = total = 0 for hypo, reference in zip(hypos, references): hypo_provenance = set(hypo.split("\t")[:k]) ref_provenance = set(reference.split("\t")) total += 1 em += len(hypo_provenance & ref_provenance) / k em = 100.0 * em / total logger.info(f"Precision@{k}: {em: .2f}")
null
12,017
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging from utils_rag import exact_match_score, f1_score def evaluate_batch_retrieval(args, rag_model, questions): def strip_title(title): if title.startswith('"'): title = title[1:] if title.endswith('"'): title = title[:-1] return title retriever_input_ids = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( questions, return_tensors="pt", padding=True, truncation=True, )["input_ids"].to(args.device) question_enc_outputs = rag_model.rag.question_encoder(retriever_input_ids) question_enc_pool_output = question_enc_outputs[0] result = rag_model.retriever( retriever_input_ids, question_enc_pool_output.cpu().detach().to(torch.float32).numpy(), prefix=rag_model.rag.generator.config.prefix, n_docs=rag_model.config.n_docs, return_tensors="pt", ) all_docs = rag_model.retriever.index.get_doc_dicts(result.doc_ids) provenance_strings = [] for docs in all_docs: provenance = [strip_title(title) for title in docs["title"]] provenance_strings.append("\t".join(provenance)) return provenance_strings
null
12,018
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging from utils_rag import exact_match_score, f1_score logger = logging.getLogger(__name__) def evaluate_batch_e2e(args, rag_model, questions): with torch.no_grad(): inputs_dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( questions, return_tensors="pt", padding=True, truncation=True ) input_ids = inputs_dict.input_ids.to(args.device) attention_mask = inputs_dict.attention_mask.to(args.device) outputs = rag_model.generate( # rag_model overwrites generate input_ids, attention_mask=attention_mask, num_beams=args.num_beams, min_length=args.min_length, max_length=args.max_length, early_stopping=False, num_return_sequences=1, bad_words_ids=[[0, 0]], # BART likes to repeat BOS tokens, dont allow it to generate more than one ) answers = rag_model.retriever.generator_tokenizer.batch_decode(outputs, skip_special_tokens=True) if args.print_predictions: for q, a in zip(questions, answers): logger.info("Q: {} - A: {}".format(q, a)) return answers
null
12,019
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging from utils_rag import exact_match_score, f1_score def get_args(): parser = argparse.ArgumentParser() parser.add_argument( "--model_type", choices=["rag_sequence", "rag_token", "bart"], type=str, help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ), ) parser.add_argument( "--index_name", default=None, choices=["exact", "compressed", "legacy"], type=str, help="RAG model retriever type", ) parser.add_argument( "--index_path", default=None, type=str, help="Path to the retrieval index", ) parser.add_argument("--n_docs", default=5, type=int, help="Number of retrieved docs") parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained checkpoints or model identifier from huggingface.co/models", ) parser.add_argument( "--eval_mode", choices=["e2e", "retrieval"], default="e2e", type=str, help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ), ) parser.add_argument("--k", default=1, type=int, help="k for the precision@k calculation") parser.add_argument( "--evaluation_set", default=None, type=str, required=True, help="Path to a file containing evaluation samples", ) parser.add_argument( "--gold_data_path", default=None, type=str, required=True, help="Path to a tab-separated file with gold samples", ) parser.add_argument( "--gold_data_mode", default="qa", type=str, choices=["qa", "ans"], help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ), ) parser.add_argument( "--predictions_path", type=str, default="predictions.txt", help="Name of the predictions file, to be stored in the checkpoints directory", ) parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument( "--eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.", ) parser.add_argument( "--recalculate", help="Recalculate predictions even if the prediction file exists", action="store_true", ) parser.add_argument( "--num_beams", default=4, type=int, help="Number of beams to be used when generating answers", ) parser.add_argument("--min_length", default=1, type=int, help="Min length of the generated answers") parser.add_argument("--max_length", default=50, type=int, help="Max length of the generated answers") parser.add_argument( "--print_predictions", action="store_true", help="If True, prints predictions while evaluating.", ) parser.add_argument( "--print_docs", action="store_true", help="If True, prints docs retried while generating.", ) args = parser.parse_args() args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") return args
null
12,021
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version class BaseTransformer(pl.LightningModule): def __init__( self, hparams: argparse.Namespace, num_labels=None, mode="base", config=None, tokenizer=None, model=None, **config_kwargs ): def load_hf_checkpoint(self, *args, **kwargs): def get_lr_scheduler(self): def configure_optimizers(self): def test_step(self, batch, batch_nb): def test_epoch_end(self, outputs): def total_steps(self) -> int: def setup(self, stage): def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False): def train_dataloader(self): def val_dataloader(self): def test_dataloader(self): def _feature_file(self, mode): def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: def add_model_specific_args(parser, root_dir): class InitCallback(pl.Callback): def on_sanity_check_start(self, trainer, pl_module): class LoggingCallback(pl.Callback): def on_batch_end(self, trainer, pl_module): def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): def generic_train( model: BaseTransformer, args: argparse.Namespace, early_stopping_callback=None, logger=True, # can pass WandbLogger() here custom_ddp_plugin=None, extra_callbacks=[], checkpoint_callback=None, logging_callback=None, **extra_train_kwargs ): pl.seed_everything(args.seed) # init model odir = Path(model.hparams.output_dir) odir.mkdir(exist_ok=True) # add custom checkpoints if checkpoint_callback is None: checkpoint_callback = pl.callbacks.ModelCheckpoint( filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(early_stopping_callback) if logging_callback is None: logging_callback = LoggingCallback() train_params = {} # TODO: remove with PyTorch 1.6 since pl uses native amp if args.fp16: train_params["precision"] = 16 # train_params["amp_level"] = args.fp16_opt_level if args.gpus > 1: train_params["accelerator"] = "auto" # "ddp" train_params["strategy"] = "ddp" train_params["accumulate_grad_batches"] = args.accumulate_grad_batches train_params["profiler"] = None # extra_train_kwargs.get("profiler", None) #get unwanted logs train_params["devices"] = "auto" trainer = pl.Trainer.from_argparse_args( args, weights_summary=None, callbacks=[logging_callback] + extra_callbacks + [checkpoint_callback] + [InitCallback()], # plugins=[custom_ddp_plugin], logger=logger, **train_params, ) if args.do_train: trainer.fit(model) return trainer
null
12,029
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, T5Tokenizer def normalize_answer(s): def f1_score(prediction, ground_truth): prediction_tokens = normalize_answer(prediction).split() ground_truth_tokens = normalize_answer(ground_truth).split() common = Counter(prediction_tokens) & Counter(ground_truth_tokens) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction_tokens) recall = 1.0 * num_same / len(ground_truth_tokens) f1 = (2 * precision * recall) / (precision + recall) return f1
null
12,033
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import torch from datasets import Features, Sequence, Value, load_dataset import faiss from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser def split_text(text: str, n=100, character=" ") -> List[str]: """Split the text every ``n``-th occurrence of ``character``""" text = text.split(character) return [character.join(text[i : i + n]).strip() for i in range(0, len(text), n)] The provided code snippet includes necessary dependencies for implementing the `split_documents` function. Write a Python function `def split_documents(documents: dict) -> dict` to solve the following problem: Split documents into passages Here is the function: def split_documents(documents: dict) -> dict: """Split documents into passages""" titles, texts = [], [] for title, text in zip(documents["title"], documents["text"]): if text is not None: for passage in split_text(text): titles.append(title if title is not None else "") texts.append(passage) return {"title": titles, "text": texts}
Split documents into passages
12,034
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import torch from datasets import Features, Sequence, Value, load_dataset import faiss from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser device = "cuda" if torch.cuda.is_available() else "cpu" The provided code snippet includes necessary dependencies for implementing the `embed` function. Write a Python function `def embed(documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast) -> dict` to solve the following problem: Compute the DPR embeddings of document passages Here is the function: def embed(documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast) -> dict: """Compute the DPR embeddings of document passages""" input_ids = ctx_tokenizer( documents["title"], documents["text"], truncation=True, padding="longest", return_tensors="pt" )["input_ids"] embeddings = ctx_encoder(input_ids.to(device=device), return_dict=True).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()}
Compute the DPR embeddings of document passages
12,036
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json The provided code snippet includes necessary dependencies for implementing the `get_checkpoint_callback` function. Write a Python function `def get_checkpoint_callback(output_dir, metric)` to solve the following problem: Saves the best model by validation EM score. Here is the function: def get_checkpoint_callback(output_dir, metric): """Saves the best model by validation EM score.""" if metric == "rouge2": exp = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": exp = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": exp = "{val_avg_em:.4f}-{step_count}" elif metric == "loss": exp = "{val_avg_loss:.4f}-{step_count}" else: raise NotImplementedError( f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this" " function." ) checkpoint_callback = ModelCheckpoint( dirpath=output_dir, filename=exp, monitor=f"val_{metric}", mode="max", save_top_k=1, every_n_epochs=1, # works only with PL > 1.3 ) return checkpoint_callback
Saves the best model by validation EM score.
12,044
import os from functools import partial from glob import glob from datasets import Features, Sequence, Value, concatenate_datasets, load_dataset, load_from_disk import faiss from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast def split_documents(documents): """Split documents into passages""" titles, texts = [], [] for title, text in zip(documents["title"], documents["text"]): if text is not None: for passage in split_text(text): titles.append(title if title is not None else "") texts.append(passage) return {"title": titles, "text": texts} def embed_update(ctx_encoder, total_processes, device, process_num, shard_dir, csv_path): kb_dataset = load_dataset( "csv", data_files=[csv_path], split="train", delimiter="\t", column_names=["title", "text"] ) kb_dataset = kb_dataset.map( split_documents, batched=True, num_proc=1 ) # if you want you can load already splitted csv. kb_list = [kb_dataset.shard(total_processes, i, contiguous=True) for i in range(total_processes)] data_shrad = kb_list[process_num] arrow_folder = "data_" + str(process_num) passages_path = os.path.join(shard_dir, arrow_folder) context_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained("facebook/dpr-ctx_encoder-multiset-base") ctx_encoder = ctx_encoder.to(device=device) def embed( documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast, device ) -> dict: """Compute the DPR embeddings of document passages""" input_ids = ctx_tokenizer( documents["title"], documents["text"], truncation=True, padding="longest", return_tensors="pt" )["input_ids"] embeddings = ctx_encoder(input_ids.to(device=device), return_dict=True).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} new_features = Features( {"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))} ) # optional, save as float32 instead of float64 to save space dataset = data_shrad.map( partial(embed, ctx_encoder=ctx_encoder, ctx_tokenizer=context_tokenizer, device=device), batched=True, batch_size=16, features=new_features, ) dataset.save_to_disk(passages_path)
null
12,045
import os from functools import partial from glob import glob from datasets import Features, Sequence, Value, concatenate_datasets, load_dataset, load_from_disk import faiss from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast def add_index(shard_dir, index_path): data_shard_list = [] for shard_address in glob(str(shard_dir) + "/*/"): data_shard_list.append(load_from_disk(shard_address)) concat = concatenate_datasets(data_shard_list) faiss.omp_set_num_threads(96) index = faiss.IndexHNSWFlat(768, 128, faiss.METRIC_INNER_PRODUCT) concat.add_faiss_index("embeddings", custom_index=index) concat.get_index("embeddings").save( index_path ) # since we load the index in to memory,we can directly update the index in the disk
null
12,046
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version def add_generic_args(parser, root_dir) -> None: # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( "--output_dir", default=str(Path(__file__).parent / "test_run" / "model_checkpoints"), type=str, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O2", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int) parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm") parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") parser.add_argument( "--gradient_accumulation_steps", dest="accumulate_grad_batches", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--data_dir", default=str(Path(__file__).parent / "test_run" / "dummy-train-data"), type=str, help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", )
null
12,047
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version class BaseTransformer(pl.LightningModule): def __init__( self, hparams: argparse.Namespace, num_labels=None, mode="base", config=None, tokenizer=None, model=None, **config_kwargs ): """Initialize a model, tokenizer and config.""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(hparams) self.step_count = 0 self.output_dir = Path(self.hparams.output_dir) cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: self.config = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"num_labels": num_labels} if num_labels is not None else {}), cache_dir=cache_dir, **config_kwargs, ) else: self.config: PretrainedConfig = config extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(self.hparams, p, None): assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute" setattr(self.config, p, getattr(self.hparams, p)) if tokenizer is None: self.tokenizer = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=cache_dir, ) else: self.tokenizer: PreTrainedTokenizer = tokenizer self.model_type = MODEL_MODES[mode] if model is None: self.model = self.model_type.from_pretrained( self.hparams.model_name_or_path, from_tf=bool(".ckpt" in self.hparams.model_name_or_path), config=self.config, cache_dir=cache_dir, ) else: self.model = model def load_hf_checkpoint(self, *args, **kwargs): self.model = self.model_type.from_pretrained(*args, **kwargs) def get_lr_scheduler(self): get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler] scheduler = get_schedule_func( self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() ) scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1} return scheduler def configure_optimizers(self): """Prepare optimizer and schedule (linear warmup and decay)""" model = self.model no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], # check this named paramters "weight_decay": self.hparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] if self.hparams.adafactor: optimizer = Adafactor( optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False ) else: optimizer = AdamW( optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon ) self.opt = optimizer scheduler = self.get_lr_scheduler() return [optimizer], [scheduler] def test_step(self, batch, batch_nb): return self.validation_step(batch, batch_nb) def test_epoch_end(self, outputs): return self.validation_end(outputs) def total_steps(self) -> int: """The number of total training steps that will be run. Used for lr scheduler purposes.""" num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def setup(self, stage): if stage == "test": self.dataset_size = len(self.test_dataloader().dataset) else: self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True) self.dataset_size = len(self.train_dataloader().dataset) def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False): raise NotImplementedError("You must implement this for your task") def train_dataloader(self): return self.train_loader def val_dataloader(self): return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False) def test_dataloader(self): return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False) def _feature_file(self, mode): return os.path.join( self.hparams.data_dir, "cached_{}_{}_{}".format( mode, list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(), str(self.hparams.max_seq_length), ), ) def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: save_path = self.output_dir.joinpath("best_tfmr") self.model.config.save_step = self.step_count self.model.save_pretrained(save_path) self.tokenizer.save_pretrained(save_path) def add_model_specific_args(parser, root_dir): parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default=None, type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default=str(Path(__file__).parent / "test_run" / "cache"), type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--encoder_layerdrop", type=float, help="Encoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--decoder_layerdrop", type=float, help="Decoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--dropout", type=float, help="Dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--attention_dropout", type=float, help="Attention dropout probability (Optional). Goes into model.config", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument( "--lr_scheduler", default="linear", choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help="Learning rate scheduler", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader") parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int) parser.add_argument("--train_batch_size", default=32, type=int) parser.add_argument("--eval_batch_size", default=32, type=int) parser.add_argument("--adafactor", action="store_true") class InitCallback(pl.Callback): # this process can also be done with PL ddp plugging. # But still it is experimental (check original RAG, I updated that with pluggin (shamanez)) def on_sanity_check_start(self, trainer, pl_module): if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class LoggingCallback(pl.Callback): def on_batch_end(self, trainer, pl_module): lr_scheduler = trainer.lr_schedulers[0]["scheduler"] lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())} pl_module.logger.log_metrics(lrs) def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Validation results *****") metrics = trainer.callback_metrics # Log results for key in sorted(metrics): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Test results *****") metrics = trainer.callback_metrics # Log and save results to file output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt") with open(output_test_results_file, "w") as writer: for key in sorted(metrics): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) writer.write("{} = {}\n".format(key, str(metrics[key]))) def generic_train( model: BaseTransformer, args: argparse.Namespace, early_stopping_callback=None, logger=True, # can pass WandbLogger() here extra_callbacks=[], checkpoint_callback=None, logging_callback=None, **extra_train_kwargs ): pl.seed_everything(args.seed) # init model odir = Path(model.hparams.output_dir) odir.mkdir(exist_ok=True) # add custom checkpoints if checkpoint_callback is None: checkpoint_callback = pl.callbacks.ModelCheckpoint( filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(early_stopping_callback) if logging_callback is None: logging_callback = LoggingCallback() train_params = {} if args.fp16: train_params["precision"] = 16 if args.gpus > 1: train_params["accelerator"] = "auto" train_params["strategy"] = "ddp" train_params["accumulate_grad_batches"] = args.accumulate_grad_batches train_params["profiler"] = None train_params["devices"] = "auto" trainer = pl.Trainer.from_argparse_args( args, weights_summary=None, callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback], logger=logger, val_check_interval=1, num_sanity_val_steps=2, **train_params, ) if args.do_train: trainer.fit(model) else: print("RAG modeling tests with new set functions successfuly executed!") return trainer
null
12,048
import argparse import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler import joblib from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpt2, recopy_gpt2, set_seed, train_secondary_learner, ) from transformers import GPT2LMHeadModel def set_seed(seed): """ For reproducible training Args: seed: A seed for reproducible training """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) def compute_perplexity(model, test_data, context_len): """ Computes perplexity of the transformer model on data in test_data Args: model: Pre-trained GPT2 model test_data: Data on which perplexity calculation is required context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded Returns: Perplexity on input test data """ model.eval() device = next(model.parameters()).device eval_batch_size = 1 context = torch.zeros((eval_batch_size, context_len), dtype=torch.long, device=device) eval_dataloader = DataLoader(test_data, shuffle=False, batch_size=eval_batch_size) eval_loss = torch.zeros(1, device=device) nb_eval_examples = 0 for batch in eval_dataloader: batch.to(device) # pad context.zero_() for i in range(eval_batch_size): context[i, :] = batch[i] outputs = model(context, labels=context) eval_loss += outputs[0].sum().item() nb_eval_examples += batch.size(0) eval_loss = eval_loss / nb_eval_examples perplexity = torch.exp(eval_loss) model.train() return perplexity def load_gpt2(model_name="gpt2"): """ load original gpt2 and save off for quicker loading Args: model_name: GPT-2 Returns: GPT-2 model """ model = GPT2LMHeadModel.from_pretrained(model_name, output_hidden_states=True) torch.save(model.state_dict(), model_name + "local.pt") return model def collect_objective_set( model, orig_perp, context_len, train_data, objective_set, max_steps, device, filename="dev.jbl", recopy_model=recopy_gpt2, ): """ Collect individual IGF values from pre-trained transformer model max_steps samples of training data to train secondary model Args: model: Pre-trained GPT2 model orig_perp: Perplexity of original pretrained GPT-2 model context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded train_data: Data to train model objective_set: Contexts used to create (X,IG(X)) pairs which is the training data for secondary learner max_steps: To calculate training epochs of model device: GPU/CPU filename: To store intermediate perplexity differences recopy_model: Reset the model to the original pretrained GPT-2 weights after each iteration Returns: file stored intermediate perplexity differences in intermediate stages """ # initialize variables to record relevant information contexts = [] real_perps = [] past_perps = [] # Initialize the transformer model orig_model = copy.deepcopy(model) orig_model.to(device="cpu") torch.cuda.empty_cache() # Compute perplexity of initial transformer model for comparison model.train() model, lm_optimizer, lm_scheduler = recopy_model(orig_model, device, max_steps) for step in tqdm(range(max_steps)): context = torch.zeros((1, context_len), dtype=torch.long, device=device) story = random.choice(train_data) start = random.randint(0, len(story[0]) - context_len - 1) context[0, :] = story[0][start : start + context_len] lm_optimizer.zero_grad() outputs = model(context, labels=context) lm_loss = outputs[0] past_perp = compute_perplexity(model, context, context_len) model.train() lm_loss.backward() # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule # Compute perplexity after back-propagating on the selected context real_perp = compute_perplexity(model, objective_set, context_len) # Periodically save the stored (X, IG(X)) pairs if step % 1000 == 0 and step > 1: intermittent_save(contexts, real_perps, past_perps, filename) # Reset the pretrained model to the original pretrained GPT-2 weights after each iteration model, lm_optimizer, lm_scheduler = recopy_model(orig_model, device, max_steps) past_perps.append(past_perp.item()) real_perps.append(orig_perp - real_perp.item()) contexts.append(np.array(context.cpu())) intermittent_save(contexts, real_perps, past_perps, filename) def generate_datasets( context_len, file="data/tokenized_stories_train_wikitext103.jbl", number=100, min_len=1026, trim=True ): """ Generate objective set and training set Args: context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded file: Tokenized data split into training set and objective set number: size of objective dataset min_len: minimum length of a context in objective set trim: If True truncate the context if it exceeds context length Returns: Generated objective set and training data """ # Generate objective set and training set # Designate the first number (100) articles that are long enough to be used # as our objective set, rest (that are long enough) are training data for # secondary learner data = joblib.load(file) print("data loaded") objective_set = [] if trim: for i, example in enumerate(data): if len(example[0]) > min_len: start = random.randint(0, len(example[0]) - context_len - 1) objective_set.append(example[0, start : start + context_len]) if len(objective_set) >= number: break train_data = [] for j in range(i + 1, len(data)): if len(data[j][0]) > min_len: train_data.append(data[j]) else: objective_set = data[0:number] train_data = data[number:] joblib.dump(objective_set, "objective_set.jbl") print("objective set saved") return train_data, objective_set The provided code snippet includes necessary dependencies for implementing the `generate_n_pairs` function. Write a Python function `def generate_n_pairs( context_len=32, max_steps=10, size_objective_set=100, min_len=1026, trim=True, data_file="data/tokenized_stories_train_wikitext103.jbl", igf_data_file="igf_context_pairs.jbl", )` to solve the following problem: Collecting *n* pairs for training the secondary learner Args: context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded max_steps: To calculate training epochs of secondary learner size_objective_set: size of objective data set used to create (X,IG(X)) pairs which is the training data for secondary learner min_len: The minimum length of the article to be used as objective set trim: If True truncate the context if it exceeds context length data_file: Tokenized data set split for training and evaluation of model igf_data_file: file to store (I,IG(X)) paired data set to train secondary learner Returns: Data stored in igf_data_file Here is the function: def generate_n_pairs( context_len=32, max_steps=10, size_objective_set=100, min_len=1026, trim=True, data_file="data/tokenized_stories_train_wikitext103.jbl", igf_data_file="igf_context_pairs.jbl", ): """ Collecting *n* pairs for training the secondary learner Args: context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded max_steps: To calculate training epochs of secondary learner size_objective_set: size of objective data set used to create (X,IG(X)) pairs which is the training data for secondary learner min_len: The minimum length of the article to be used as objective set trim: If True truncate the context if it exceeds context length data_file: Tokenized data set split for training and evaluation of model igf_data_file: file to store (I,IG(X)) paired data set to train secondary learner Returns: Data stored in igf_data_file """ # generates same data everytime set_seed(3) # generate train_data and objective_set train_data, objective_set = generate_datasets( context_len, data_file, number=size_objective_set, min_len=1026, trim=True ) # keeps model same across runs set_seed(4) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # load pretrained model model = load_gpt2("gpt2").to(device) print("computing perplexity on objective set") orig_perp = compute_perplexity(model, objective_set, context_len).item() print("perplexity on objective set:", orig_perp) # collect igf pairs and save to file demo.jbl collect_objective_set(model, orig_perp, context_len, train_data, objective_set, max_steps, device, igf_data_file) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache()
Collecting *n* pairs for training the secondary learner Args: context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded max_steps: To calculate training epochs of secondary learner size_objective_set: size of objective data set used to create (X,IG(X)) pairs which is the training data for secondary learner min_len: The minimum length of the article to be used as objective set trim: If True truncate the context if it exceeds context length data_file: Tokenized data set split for training and evaluation of model igf_data_file: file to store (I,IG(X)) paired data set to train secondary learner Returns: Data stored in igf_data_file
12,049
import argparse import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler import joblib from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpt2, recopy_gpt2, set_seed, train_secondary_learner, ) from transformers import GPT2LMHeadModel def set_seed(seed): """ For reproducible training Args: seed: A seed for reproducible training """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) def train_secondary_learner( secondary_learner, train_dataset, max_epochs, batch_size, eval_freq=50, igf_model_path="secondary_learner.pt" ): """ Train the secondary learner (igf_model) Args: secondary_learner: secondary learner train_dataset: data to train secondary learner max_epochs: number of epochs to train secondary learner batch_size: batch size of training data of secondary learner eval_freq: secondary model evaluation can be triggered at eval_freq igf_model_path: path to store trained secondary learner Returns: Trained secondary learner """ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # We will use the first 512 pairs from our dataset as a test set for # our secondary learner and the rest to train test_dataset = train_dataset[:512] train_dataset = train_dataset[512:] train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size) test_dataloader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size) # secondary learner model set up loss = nn.MSELoss() test_loss = nn.MSELoss(reduction="sum") secondary_learner.to(device) q_optimizer = torch.optim.Adam(secondary_learner.parameters(), lr=0.00001) secondary_learner.train() # TODO in original code this is written as number of actual batches seen # not number of items seen but other places it is number of items instead. # improve consistency! changed this to epochs for clarity best_test_loss = float("inf") # Iterate through batches until we've used max_steps batches for epoch in range(int(max_epochs)): tr_q_loss = 0.0 secondary_learner.train() for step, batch in enumerate(train_dataloader): context = batch[0].to(device) real_q = batch[1].to(device) predicted_q = secondary_learner(context) q_optimizer.zero_grad() q_loss = loss(predicted_q, real_q.float()) q_loss.backward() q_optimizer.step() tr_q_loss += q_loss.item() # model trains fairly quickly so we won't wait for a full epoch # eval is triggered at eval_freq and end of epochs if (step % eval_freq == 0 and step > 0) or ((step + 1) == len(train_dataloader)): tr_loss = tr_q_loss / (step + 1) secondary_learner.eval() q_loss2 = 0.0 sum_q2 = 0.0 predicted = [] actual = [] # Compute performance of the secondary learner after this batch for step2, batch2 in enumerate(test_dataloader): features2 = batch2[0].to(device) real_q2 = batch2[1].to(device) predicted_q2 = secondary_learner(features2) q_loss2 += test_loss(predicted_q2, real_q2).item() sum_q2 += torch.sum(predicted_q2).item() for ei, i in enumerate(predicted_q2.cpu().detach().numpy()): predicted.append(i.item()) for ei, i in enumerate(real_q2.cpu().detach().numpy()): actual.append(i.item()) q_loss2 /= len(test_dataset) print( "Epoch: ", epoch, "step: ", step, "Avg. q:", sum_q2 / len(test_dataset), "Train Loss: ", tr_loss, "Test Loss: ", q_loss2, ) if q_loss2 < best_test_loss: joblib.dump((predicted, actual), "pred_vs_actual.jbl") torch.save(secondary_learner.state_dict(), igf_model_path) best_test_loss = q_loss2 secondary_learner.train() return secondary_learner class SecondaryLearner(nn.Module): """ Our secondary learner """ def __init__(self, model): """ We use a simple convolutional network as our secondary learner Args: model: Pre-trained GPT2 model """ # embeddings are from the pretrained model super(SecondaryLearner, self).__init__() self.embeddings = model.transformer.wte self.embeddings.weight = copy.deepcopy(model.transformer.wte.weight) self.conv = nn.Conv1d(self.embeddings.weight.size(1), 256, 3, padding=1) self.fc = nn.Sequential(nn.Linear(256, 32), nn.Dropout(p=0.1), nn.Linear(32, 32), nn.Linear(32, 1)) def forward(self, context): """ Forward pass through the secondary learner Args: context: Context input to the secondary learner Returns: tensor after squeeze operation """ pooled = torch.max(self.conv(self.embeddings(context).squeeze(1).transpose(1, 2)), 2)[0] qs = self.fc(pooled) return qs.squeeze(1) def from_pretrained(cls, state_path, model): """ Load the secondary learner Args: state_path: Path to save secondary learner model: Pretrained GPT-2 Returns: secondary learner """ secondary_learner = cls(model) # this calls __init__ state_dict = torch.load(state_path) secondary_learner.load_state_dict(state_dict) secondary_learner.embeddings = model.transformer.wte secondary_learner.embeddings.weight = copy.deepcopy(model.transformer.wte.weight) return secondary_learner The provided code snippet includes necessary dependencies for implementing the `training_secondary_learner` function. Write a Python function `def training_secondary_learner( secondary_learner_train_data, secondary_learner_max_epochs=15, secondary_learner_batch_size=128, eval_freq=100, igf_model_path="igf_model.pt", )` to solve the following problem: Train the secondary learner Args: secondary_learner_train_data: Data set with (X,IG(X)) pairs to train secondary learner where IG(X) - measure of informativeness and X- context secondary_learner_max_epochs: Number of epochs to train secondary learner secondary_learner_batch_size: Batch size to train secondary learner eval_freq (object): secondary model evaluation can be triggered at eval_freq igf_model_path: path to store trained secondary learner Returns: Trained secondary learner Here is the function: def training_secondary_learner( secondary_learner_train_data, secondary_learner_max_epochs=15, secondary_learner_batch_size=128, eval_freq=100, igf_model_path="igf_model.pt", ): """ Train the secondary learner Args: secondary_learner_train_data: Data set with (X,IG(X)) pairs to train secondary learner where IG(X) - measure of informativeness and X- context secondary_learner_max_epochs: Number of epochs to train secondary learner secondary_learner_batch_size: Batch size to train secondary learner eval_freq (object): secondary model evaluation can be triggered at eval_freq igf_model_path: path to store trained secondary learner Returns: Trained secondary learner """ set_seed(42) # Load pre-trained model model = GPT2LMHeadModel.from_pretrained("gpt2") # Initialize secondary learner to use embedding weights of model secondary_learner = SecondaryLearner(model) # Train secondary learner secondary_learner = train_secondary_learner( secondary_learner, secondary_learner_train_data, max_epochs=secondary_learner_max_epochs, batch_size=secondary_learner_batch_size, eval_freq=100, igf_model_path=igf_model_path, ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner
Train the secondary learner Args: secondary_learner_train_data: Data set with (X,IG(X)) pairs to train secondary learner where IG(X) - measure of informativeness and X- context secondary_learner_max_epochs: Number of epochs to train secondary learner secondary_learner_batch_size: Batch size to train secondary learner eval_freq (object): secondary model evaluation can be triggered at eval_freq igf_model_path: path to store trained secondary learner Returns: Trained secondary learner
12,050
import argparse import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler import joblib from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpt2, recopy_gpt2, set_seed, train_secondary_learner, ) from transformers import GPT2LMHeadModel def compute_perplexity(model, test_data, context_len): """ Computes perplexity of the transformer model on data in test_data Args: model: Pre-trained GPT2 model test_data: Data on which perplexity calculation is required context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded Returns: Perplexity on input test data """ model.eval() device = next(model.parameters()).device eval_batch_size = 1 context = torch.zeros((eval_batch_size, context_len), dtype=torch.long, device=device) eval_dataloader = DataLoader(test_data, shuffle=False, batch_size=eval_batch_size) eval_loss = torch.zeros(1, device=device) nb_eval_examples = 0 for batch in eval_dataloader: batch.to(device) # pad context.zero_() for i in range(eval_batch_size): context[i, :] = batch[i] outputs = model(context, labels=context) eval_loss += outputs[0].sum().item() nb_eval_examples += batch.size(0) eval_loss = eval_loss / nb_eval_examples perplexity = torch.exp(eval_loss) model.train() return perplexity def recopy_gpt2(orig_model, device, max_steps): """ Reset the model to the original pretrained GPT-2 weights after each iteration Args: orig_model: Original pretrained GPT-2 model imported from Transformers library device: CPU/GPU max_steps: number of training steps Returns: Original PreTrained GPT-2 model, lm_optimizer: Adam optimizer with Decoupled weight decay lm_scheduler: linear scheduler with the appropriate schedule """ model = copy.deepcopy(orig_model) model.to(device) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] lm_optimizer = AdamW(optimizer_grouped_parameters, lr=5e-5, eps=1e-8) lm_scheduler = get_linear_schedule_with_warmup(lm_optimizer, 0, max_steps) torch.cuda.empty_cache() return model, lm_optimizer, lm_scheduler The provided code snippet includes necessary dependencies for implementing the `finetune` function. Write a Python function `def finetune( model, train_dataset, test_dataset, context_len=32, max_steps=1000, batch_size=16, threshold=1.0, recopy_model=recopy_gpt2, secondary_learner=None, eval_interval=10, finetuned_model_name="gpt2_finetuned.pt", )` to solve the following problem: fine-tune with IGF if secondary_learner is not None, else standard fine-tuning Args: model: pre-trained GPT-2 model train_dataset: Data set to train GPT-2 model test_dataset: Evaluate GPT-2 model context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded max_steps: To calculate training epochs batch_size: Batch size to train GPT-2 model threshold: The threshold value used by secondary learner to filter the train_data and allow only" informative data as input to the model recopy_model: Reset the model to the original pretrained GPT-2 weights after each iteration secondary_learner: Selection of IGF as fine-tuning method if not None eval_interval: number of batches after which decay the selectivity of our secondary learner filter from 1 standard deviation above average to 1 below average fine-tuned_model_name: name of the final final-tuned GPT-2 model Returns: Fine-tuned GPT-2 model Here is the function: def finetune( model, train_dataset, test_dataset, context_len=32, max_steps=1000, batch_size=16, threshold=1.0, recopy_model=recopy_gpt2, secondary_learner=None, eval_interval=10, finetuned_model_name="gpt2_finetuned.pt", ): """ fine-tune with IGF if secondary_learner is not None, else standard fine-tuning Args: model: pre-trained GPT-2 model train_dataset: Data set to train GPT-2 model test_dataset: Evaluate GPT-2 model context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded max_steps: To calculate training epochs batch_size: Batch size to train GPT-2 model threshold: The threshold value used by secondary learner to filter the train_data and allow only" informative data as input to the model recopy_model: Reset the model to the original pretrained GPT-2 weights after each iteration secondary_learner: Selection of IGF as fine-tuning method if not None eval_interval: number of batches after which decay the selectivity of our secondary learner filter from 1 standard deviation above average to 1 below average fine-tuned_model_name: name of the final final-tuned GPT-2 model Returns: Fine-tuned GPT-2 model """ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") train_sampler = RandomSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler) num_train_epochs = max_steps // (len(train_dataset)) + 1 global_step = 0 context = torch.zeros((1, context_len), dtype=torch.long, device=device) model, lm_optimizer, lm_scheduler = recopy_model(model, device, max_steps) model.train() if secondary_learner is not None: secondary_learner.to(device) secondary_learner.eval() contexts = [] examples = 0 observed_qs = [] test_perps = [] # Compute the performance of the transformer model at the beginning real_perp = compute_perplexity(model, test_dataset, context_len) test_perps.append(real_perp) print("Test perplexity, step", global_step, ":", real_perp) for epoch in range(int(num_train_epochs)): for step, example in enumerate(train_dataloader): torch.cuda.empty_cache() start = random.randint(0, example.size(2) - context_len - 1) context[0, :] = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() outputs = model(context, labels=context) do_backprop = True if secondary_learner is not None: predicted_q = secondary_learner.forward( torch.tensor(context, dtype=torch.long, device=device).unsqueeze(0) )[0].item() observed_qs.append(float(predicted_q)) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: threshold = -1 if predicted_q < threshold: do_backprop = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu())) lm_loss = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() examples = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: real_perp = compute_perplexity(model, test_dataset, context_len) test_perps.append(real_perp) print("Test perplexity, step", global_step, ":", real_perp) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict(), finetuned_model_name) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model
fine-tune with IGF if secondary_learner is not None, else standard fine-tuning Args: model: pre-trained GPT-2 model train_dataset: Data set to train GPT-2 model test_dataset: Evaluate GPT-2 model context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded max_steps: To calculate training epochs batch_size: Batch size to train GPT-2 model threshold: The threshold value used by secondary learner to filter the train_data and allow only" informative data as input to the model recopy_model: Reset the model to the original pretrained GPT-2 weights after each iteration secondary_learner: Selection of IGF as fine-tuning method if not None eval_interval: number of batches after which decay the selectivity of our secondary learner filter from 1 standard deviation above average to 1 below average fine-tuned_model_name: name of the final final-tuned GPT-2 model Returns: Fine-tuned GPT-2 model
12,051
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def padding_tensor(sequences, padding_value, padding_side, sequence_length): if isinstance(padding_value, tuple): out_tensor = np.full((len(sequences), sequence_length, 2), padding_value) else: out_tensor = np.full((len(sequences), sequence_length), padding_value) for i, tensor in enumerate(sequences): if padding_side == "right": if isinstance(padding_value, tuple): out_tensor[i, : len(tensor[:sequence_length]), :2] = tensor[:sequence_length] else: out_tensor[i, : len(tensor[:sequence_length])] = tensor[:sequence_length] else: if isinstance(padding_value, tuple): out_tensor[i, len(tensor[:sequence_length]) - 1 :, :2] = tensor[:sequence_length] else: out_tensor[i, len(tensor[:sequence_length]) - 1 :] = tensor[:sequence_length] return out_tensor.tolist()
null
12,052
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def is_punctuation(char): cp = ord(char) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
null
12,053
import argparse import logging import math import os import random from pathlib import Path import datasets import torch from datasets import ClassLabel, load_dataset, load_metric from torch.utils.data import DataLoader from tqdm.auto import tqdm import transformers from accelerate import Accelerator, DistributedDataParallelKwargs from huggingface_hub import Repository from luke_utils import DataCollatorForLukeTokenClassification, is_punctuation, padding_tensor from transformers import ( AdamW, LukeConfig, LukeForEntitySpanClassification, LukeTokenizer, SchedulerType, default_data_collator, get_scheduler, set_seed, ) from transformers.file_utils import get_full_repo_name from transformers.utils.versions import require_version def parse_args(): parser = argparse.ArgumentParser( description="Finetune (m)LUKE on a token classification task (such as NER) with the accelerate library" ) parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--text_column_name", type=str, default=None, help="The column name of text to input in the file (a csv or JSON file).", ) parser.add_argument( "--label_column_name", type=str, default=None, help="The column name of label to input in the file (a csv or JSON file).", ) parser.add_argument( "--max_length", type=int, default=128, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_length` is passed." ), ) parser.add_argument( "--max_entity_length", type=int, default=32, help=( "The maximum total input entity length after tokenization (Used only for (M)Luke models). Sequences longer" " than this will be truncated, sequences shorter will be padded if `--pad_to_max_length` is passed." ), ) parser.add_argument( "--max_mention_length", type=int, default=30, help=( "The maximum total input mention length after tokenization (Used only for (M)Luke models). Sequences" " longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_length` is passed." ), ) parser.add_argument( "--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--label_all_tokens", action="store_true", help="Setting labels of all special tokens to -100 and thus PyTorch will ignore them.", ) parser.add_argument( "--return_entity_level_metrics", action="store_true", help="Indication whether entity level metrics are to be returner.", ) parser.add_argument( "--task_name", type=str, default="ner", choices=["ner", "pos", "chunk"], help="The name of the task.", ) parser.add_argument( "--debug", action="store_true", help="Activate debug mode and run training only with a subset of data.", ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") args = parser.parse_args() # Sanity checks if args.task_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a task name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args
null
12,054
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from datasets import load_dataset from tqdm.auto import tqdm import transformers from accelerate import Accelerator from finetuning import finetune from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy logger = logging.getLogger(__name__) MODEL_BIN_FILE = "pytorch_model.bin" class STModelArguments: """Arguments pertaining to which config/tokenizer/model we are going to fine-tune from.""" model_name_or_path: str = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) cache_dir: Optional[str] = dataclasses.field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."}, ) class STDataArguments: """Arguments pertaining to what data we are going to input our model for training and evaluation.""" train_file: str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."}) infer_file: str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."}) eval_file: Optional[str] = dataclasses.field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) task_name: Optional[str] = dataclasses.field( default=None, metadata={"help": "The name of the task to train on."}, ) label_list: Optional[List[str]] = dataclasses.field( default=None, metadata={"help": "The list of labels for the task."} ) class STTrainingArguments: """Training arguments pertaining to the training loop itself.""" output_dir: str = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) eval_metric: Optional[str] = dataclasses.field( default="accuracy", metadata={"help": "The evaluation metric used for the task."} ) evaluation_strategy: Optional[str] = dataclasses.field( default="no", metadata={ "help": 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]' }, ) early_stopping_patience: Optional[int] = dataclasses.field( default=10, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, ) early_stopping_threshold: Optional[float] = dataclasses.field( default=0.0, metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." }, ) do_filter_by_confidence: Optional[bool] = dataclasses.field( default=False, metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."}, ) do_filter_by_val_performance: Optional[bool] = dataclasses.field( default=False, metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."}, ) finetune_on_labeled_data: Optional[bool] = dataclasses.field( default=False, metadata={"help": "Whether to fine-tune on labeled data after pseudo training."}, ) confidence_threshold: Optional[float] = dataclasses.field( default=0.0, metadata={"help": "Confidence threshold for pseudo-labeled data filtering."}, ) max_selftrain_iterations: Optional[int] = dataclasses.field( default=100, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, ) seed: Optional[int] = dataclasses.field( default=None, metadata={"help": "Random seed for initialization."}, ) def create_pseudo_labeled_data(args, infer_input, infer_output, eval_result, id2label, next_data_dir): """Create pseudeo labeled data for the next self-training iteration.""" dataset = datasets.concatenate_datasets([infer_input, infer_output], axis=1) if args.do_filter_by_confidence: dataset = dataset.filter(lambda example: example["probability"] > args.confidence_threshold) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 num_selected_rows = int(eval_result * len(dataset)) print(num_selected_rows) dataset = dataset.sort("probability", reverse=True) dataset = dataset.select(range(num_selected_rows)) dataset = dataset.remove_columns(["label", "probability"]) dataset = dataset.rename_column("prediction", "label") dataset = dataset.map(lambda example: {"label": id2label[example["label"]]}) dataset = dataset.shuffle(seed=args.seed) pseudo_labeled_data_file = os.path.join(next_data_dir, f"train_pseudo.{args.data_file_extension}") if args.data_file_extension == "csv": dataset.to_csv(pseudo_labeled_data_file, index=False) else: dataset.to_json(pseudo_labeled_data_file) def finetune(accelerator, model_name_or_path, train_file, output_dir, **kwargs): """Fine-tuning a pre-trained model on a downstream task. Args: accelerator: An instance of an accelerator for distributed training (on multi-GPU, TPU) or mixed precision training. model_name_or_path: Path to pretrained model or model identifier from huggingface.co/models. train_file: A csv or a json file containing the training data. output_dir: The output directory where the model predictions and checkpoints will be written. **kwargs: Dictionary of key/value pairs with which to update the configuration object after loading. The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. """ # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) model_args = FTModelArguments(model_name_or_path=model_name_or_path) data_args = FTDataArguments(train_file=train_file) training_args = FTTrainingArguments(output_dir=output_dir) args = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(arg_class).items(): setattr(args, key, value) for key, value in kwargs.items(): if hasattr(args, key): setattr(args, key, value) # Sanity checks data_files = {} args.data_file_extension = None # You need to provide the training data as we always run training args.do_train = True assert args.train_file is not None data_files[Split.TRAIN.value] = args.train_file if args.do_eval or args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None data_files[Split.EVAL.value] = args.eval_file if args.do_eval and args.test_file is not None: data_files[Split.TEST.value] = args.test_file if args.do_predict: assert args.infer_file is not None data_files[Split.INFER.value] = args.infer_file for key in data_files: extension = data_files[key].split(".")[-1] assert extension in ["csv", "json"], f"`{key}_file` should be a csv or a json file." if args.data_file_extension is None: args.data_file_extension = extension else: assert extension == args.data_file_extension, f"`{key}_file` should be a {args.data_file_extension} file`." assert ( args.eval_metric in datasets.list_metrics() ), f"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}." # Handle the output directory creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # You need to provide your CSV/JSON data files. # # For CSV/JSON files, this script will use as labels the column called 'label' # and as pair of sentences the sentences in columns called 'sentence1' and # 'sentence2' if these columns exist or the first two columns not named # 'label' if at least two columns are provided. # # If the CSVs/JSONs contain only one non-label column, the script does single # sentence classification on this single column. # # In distributed training, the load_dataset function guarantees that only one # local process can download the dataset. # Loading the dataset from local csv or json files. raw_datasets = load_dataset(args.data_file_extension, data_files=data_files) # Labels is_regression = raw_datasets[Split.TRAIN.value].features["label"].dtype in ["float32", "float64"] args.is_regression = is_regression if args.is_regression: label_list = None num_labels = 1 else: label_list = args.label_list assert label_list is not None label_list.sort() # Let's sort it for determinism num_labels = len(label_list) args.num_labels = num_labels # Load pre-trained model config, tokenizer, model = load_from_pretrained(args, args.model_name_or_path) # Preprocessing the datasets non_label_column_names = [name for name in raw_datasets[Split.TRAIN.value].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None label_to_id = {v: i for i, v in enumerate(label_list)} config.label2id = label_to_id config.id2label = {id: label for label, id in config.label2id.items()} padding = "max_length" if args.pad_to_max_length else False def preprocess_function(examples): # Tokenize the texts texts = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True) if "label" in examples: if label_to_id is not None: # Map labels to IDs (not necessary for GLUE tasks) result["labels"] = [label_to_id[l] for l in examples["label"]] else: # In all cases, rename the column to labels because the model will # expect that. result["labels"] = examples["label"] return result with accelerator.main_process_first(): processed_datasets = raw_datasets.map( preprocess_function, batched=True, remove_columns=raw_datasets[Split.TRAIN.value].column_names, desc="Running tokenizer on dataset", ) num_examples = {} splits = [s.value for s in Split] for split in splits: if split in processed_datasets: num_examples[split] = len(processed_datasets[split]) args.num_examples = num_examples train_dataset = processed_datasets[Split.TRAIN.value] eval_dataset = processed_datasets[Split.EVAL.value] if Split.EVAL.value in processed_datasets else None test_dataset = processed_datasets[Split.TEST.value] if Split.TEST.value in processed_datasets else None infer_dataset = processed_datasets[Split.INFER.value] if Split.INFER.value in processed_datasets else None # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info("Sample %d of the training set: %s.", index, train_dataset[index]) # DataLoaders creation: if args.pad_to_max_length: # If padding was already done ot max length, we use the default data # collator that will just convert everything to tensors. data_collator = default_data_collator else: # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by # padding to the maximum length of the samples passed). When using mixed # precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple of # 8s, which will enable the use of Tensor Cores on NVIDIA hardware with # compute capability >= 7.5 (Volta). data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)) train_dataloader = DataLoader( train_dataset, batch_size=args.per_device_train_batch_size, shuffle=True, collate_fn=data_collator, ) eval_dataloader, test_dataloader, infer_dataloader = None, None, None if eval_dataset is not None: eval_dataloader = DataLoader( eval_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=data_collator ) if test_dataset is not None: test_dataloader = DataLoader( test_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=data_collator ) if infer_dataset is not None: infer_dataloader = DataLoader( infer_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=data_collator ) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, test_dataloader, infer_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, test_dataloader, infer_dataloader ) # Note -> the training dataloader needs to be prepared before we grab its # length below (cause its length will be shorter in multiprocess) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_steps == -1: args.max_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=args.max_steps, ) # Train completed_steps, avg_train_loss = train( args, accelerator, model, tokenizer, train_dataloader, optimizer, lr_scheduler, eval_dataloader ) accelerator.wait_for_everyone() logger.info("Training job completed: completed_steps = %d, avg_train_loss = %f", completed_steps, avg_train_loss) args.model_name_or_path = os.path.join(args.output_dir, "best-checkpoint") logger.info("Loading the best checkpoint: %s", args.model_name_or_path) config, tokenizer, model = load_from_pretrained(args, args.model_name_or_path) model = accelerator.prepare(model) if args.do_eval: # Evaluate if eval_dataloader is not None: logger.info("***** Running evaluation on the eval data using the best checkpoint *****") eval_results = evaluate(args, accelerator, eval_dataloader, Split.EVAL.value, model, "best-checkpoint") avg_eval_loss = eval_results["avg_eval_loss"] eval_metric = eval_results[args.eval_metric] logger.info("Evaluation job completed: avg_eval_loss = %f", avg_eval_loss) logger.info("Evaluation result for the best checkpoint: %s = %f", args.eval_metric, eval_metric) if test_dataloader is not None: logger.info("***** Running evaluation on the test data using the best checkpoint *****") eval_results = evaluate(args, accelerator, test_dataloader, Split.TEST.value, model, "best-checkpoint") avg_eval_loss = eval_results["avg_eval_loss"] eval_metric = eval_results[args.eval_metric] logger.info("Test job completed: avg_test_loss = %f", avg_eval_loss) logger.info("Test result for the best checkpoint: %s = %f", args.eval_metric, eval_metric) if args.do_predict: # Predict if infer_dataloader is not None: logger.info("***** Running inference using the best checkpoint *****") evaluate( args, accelerator, infer_dataloader, Split.INFER.value, model, "best-checkpoint", has_labels=False ) logger.info("Inference job completed.") # Release all references to the internal objects stored and call the garbage # collector. You should call this method between two trainings with different # models/optimizers. accelerator.free_memory() The provided code snippet includes necessary dependencies for implementing the `selftrain` function. Write a Python function `def selftrain(model_name_or_path, train_file, infer_file, output_dir, **kwargs)` to solve the following problem: Self-training a pre-trained model on a downstream task. Args: model_name_or_path: Path to pretrained model or model identifier from huggingface.co/models. train_file: A csv or a json file containing the training data. infer_file: A csv or a json file containing the data to predict on. output_dir: The output directory where the model predictions and checkpoints will be written. **kwargs: Dictionary of key/value pairs with which to update the configuration object after loading. The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. Here is the function: def selftrain(model_name_or_path, train_file, infer_file, output_dir, **kwargs): """Self-training a pre-trained model on a downstream task. Args: model_name_or_path: Path to pretrained model or model identifier from huggingface.co/models. train_file: A csv or a json file containing the training data. infer_file: A csv or a json file containing the data to predict on. output_dir: The output directory where the model predictions and checkpoints will be written. **kwargs: Dictionary of key/value pairs with which to update the configuration object after loading. The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. """ # Initialize the accelerator. We will let the accelerator handle device # placement for us. accelerator = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() model_args = STModelArguments(model_name_or_path=model_name_or_path) data_args = STDataArguments(train_file=train_file, infer_file=infer_file) training_args = STTrainingArguments(output_dir=output_dir) args = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(arg_class).items(): setattr(args, key, value) for key, value in kwargs.items(): if hasattr(args, key): setattr(args, key, value) # Sanity checks data_files = {} args.data_file_extension = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None data_files["train"] = args.train_file data_files["infer"] = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None data_files["eval"] = args.eval_file for key in data_files: extension = data_files[key].split(".")[-1] assert extension in ["csv", "json"], f"`{key}_file` should be a csv or a json file." if args.data_file_extension is None: args.data_file_extension = extension else: assert extension == args.data_file_extension, f"`{key}_file` should be a {args.data_file_extension} file`." assert ( args.eval_metric in datasets.list_metrics() ), f"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}." # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) logger.info("Creating the initial data directory for self-training...") data_dir_format = f"{args.output_dir}/self-train_iter-{{}}".format initial_data_dir = data_dir_format(0) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) os.makedirs(initial_data_dir, exist_ok=True) accelerator.wait_for_everyone() best_iteration = None best_eval_result = None early_stopping_patience_counter = 0 should_training_stop = False # Show the progress bar progress_bar = tqdm(range(args.max_selftrain_iterations), disable=not accelerator.is_local_main_process) # Self-train for iteration in range(0, int(args.max_selftrain_iterations)): current_data_dir = data_dir_format(iteration) assert os.path.exists(current_data_dir) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 current_output_dir = os.path.join(current_data_dir, "stage-1") arguments_dict = { "accelerator": accelerator, "model_name_or_path": args.model_name_or_path, "cache_dir": args.cache_dir, "do_train": True, "train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"], "do_eval": True if args.eval_file is not None else False, "eval_file": data_files["eval"], "do_predict": True, "infer_file": data_files["infer"], "task_name": args.task_name, "label_list": args.label_list, "output_dir": current_output_dir, "eval_metric": args.eval_metric, "evaluation_strategy": args.evaluation_strategy, "early_stopping_patience": args.early_stopping_patience, "early_stopping_threshold": args.early_stopping_threshold, "seed": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(training_args, key): arguments_dict.update({key: value}) model_bin_file_path = os.path.join(current_output_dir, "best-checkpoint", MODEL_BIN_FILE) if os.path.exists(model_bin_file_path): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.", model_bin_file_path, iteration, ) else: logger.info("***** Running self-training: iteration: %d, stage: 1 *****", iteration) finetune(**arguments_dict) accelerator.wait_for_everyone() assert os.path.exists(model_bin_file_path) logger.info("Self-training job completed: iteration: %d, stage: 1.", iteration) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data model_path = os.path.join(current_output_dir, "best-checkpoint") current_output_dir = os.path.join(current_data_dir, "stage-2") # Update arguments_dict arguments_dict["model_name_or_path"] = model_path arguments_dict["train_file"] = data_files["train"] arguments_dict["output_dir"] = current_output_dir model_bin_file_path = os.path.join(current_output_dir, "best-checkpoint", MODEL_BIN_FILE) if os.path.exists(model_bin_file_path): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.", model_bin_file_path, iteration, ) else: logger.info("***** Running self-training: iteration: %d, stage: 2 *****", iteration) finetune(**arguments_dict) accelerator.wait_for_everyone() assert os.path.exists(model_bin_file_path) logger.info("Self-training job completed: iteration: %d, stage: 2.", iteration) new_iteration = iteration next_data_dir = data_dir_format(iteration + 1) config = AutoConfig.from_pretrained(os.path.join(current_output_dir, "best-checkpoint")) id2label = config.id2label eval_results_file = os.path.join(current_output_dir, "eval_results_best-checkpoint.json") test_results_file = os.path.join(current_output_dir, "test_results_best-checkpoint.json") assert os.path.exists(eval_results_file) with open(eval_results_file, "r") as f: eval_result = float(json.load(f)[args.eval_metric]) infer_output_file = os.path.join(current_output_dir, "infer_output_best-checkpoint.csv") assert os.path.exists(infer_output_file) # Loading the dataset from local csv or json files. infer_input = load_dataset(args.data_file_extension, data_files={"data": data_files["infer"]})["data"] infer_output = load_dataset("csv", data_files={"data": infer_output_file})["data"] if accelerator.is_main_process: os.makedirs(next_data_dir, exist_ok=True) shutil.copy(eval_results_file, os.path.join(output_dir, f"eval_results_iter-{iteration}.json")) if os.path.exists(test_results_file): shutil.copy(eval_results_file, os.path.join(output_dir, f"test_results_iter-{iteration}.json")) create_pseudo_labeled_data(args, infer_input, infer_output, eval_result, id2label, next_data_dir) accelerator.wait_for_everyone() data_files["train_pseudo"] = os.path.join(next_data_dir, f"train_pseudo.{args.data_file_extension}") if args.evaluation_strategy != IntervalStrategy.NO.value: new_eval_result = eval_result if best_iteration is None: best_iteration = new_iteration best_eval_result = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: best_iteration = new_iteration best_eval_result = new_eval_result early_stopping_patience_counter = 0 else: if new_eval_result == best_eval_result: best_iteration = new_iteration best_eval_result = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: should_training_stop = True progress_bar.update(1) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("Best iteration: %d", best_iteration) logger.info("Best evaluation result: %s = %f", args.eval_metric, best_eval_result) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(output_dir, f"eval_results_iter-{iteration}.json"), os.path.join(output_dir, "eval_results_best-iteration.json"), ) else: # Assume that the last iteration is the best logger.info("Best iteration: %d", args.max_selftrain_iterations - 1) logger.info("Best evaluation result: %s = %f", args.eval_metric, eval_result) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(output_dir, f"eval_results_iter-{args.max_selftrain_iterations - 1}.json"), os.path.join(output_dir, "eval_results_best-iteration.json"), )
Self-training a pre-trained model on a downstream task. Args: model_name_or_path: Path to pretrained model or model identifier from huggingface.co/models. train_file: A csv or a json file containing the training data. infer_file: A csv or a json file containing the data to predict on. output_dir: The output directory where the model predictions and checkpoints will be written. **kwargs: Dictionary of key/value pairs with which to update the configuration object after loading. The values in kwargs of any keys which are configuration attributes will be used to override the loaded values.
12,055
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process def add_chinese_references(dataset, ref_file): with open(ref_file, "r", encoding="utf-8") as f: refs = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())] assert len(dataset) == len(refs) dataset_dict = {c: dataset[c] for c in dataset.column_names} dataset_dict["chinese_ref"] = refs return Dataset.from_dict(dataset_dict)
null
12,056
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process def main(): def _mp_fn(index): # For xla_spawn (TPUs) main()
null
12,057
import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def _is_chinese_char(cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def get_chinese_word(tokens: List[str]): word_set = set() for token in tokens: chinese_word = len(token) > 1 and is_chinese(token) if chinese_word: word_set.add(token) word_list = list(word_set) return word_list def add_sub_symbol(bert_tokens: List[str], chinese_word_set: set()): if not chinese_word_set: return bert_tokens max_word_len = max([len(w) for w in chinese_word_set]) bert_word = bert_tokens start, end = 0, len(bert_word) while start < end: single_word = True if is_chinese(bert_word[start]): l = min(end - start, max_word_len) for i in range(l, 1, -1): whole_word = "".join(bert_word[start : start + i]) if whole_word in chinese_word_set: for j in range(start + 1, start + i): bert_word[j] = "##" + bert_word[j] start = start + i single_word = False break if single_word: start += 1 return bert_word def prepare_ref(lines: List[str], ltp_tokenizer: LTP, bert_tokenizer: BertTokenizer): ltp_res = [] for i in range(0, len(lines), 100): res = ltp_tokenizer.pipeline(lines[i : i + 100], tasks=["cws"]).cws res = [get_chinese_word(r) for r in res] ltp_res.extend(res) assert len(ltp_res) == len(lines) bert_res = [] for i in range(0, len(lines), 100): res = bert_tokenizer(lines[i : i + 100], add_special_tokens=True, truncation=True, max_length=512) bert_res.extend(res["input_ids"]) assert len(bert_res) == len(lines) ref_ids = [] for input_ids, chinese_word in zip(bert_res, ltp_res): input_tokens = [] for id in input_ids: token = bert_tokenizer._convert_id_to_token(id) input_tokens.append(token) input_tokens = add_sub_symbol(input_tokens, chinese_word) ref_id = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(input_tokens): if token[:2] == "##": clean_token = token[2:] # save chinese tokens' pos if len(clean_token) == 1 and _is_chinese_char(ord(clean_token)): ref_id.append(i) ref_ids.append(ref_id) assert len(ref_ids) == len(bert_res) return ref_ids
null
12,058
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPT2LMHeadModel logger = logging.getLogger(__name__) def print_2d_tensor(tensor): """Print a 2D tensor""" logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(tensor)))) for row in range(len(tensor)): if tensor.dtype != torch.long: logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:.5f}" for x in tensor[row].cpu().data)) else: logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:d}" for x in tensor[row].cpu().data)) def compute_heads_importance( args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None, actually_pruned=False ): """This method shows how to compute: - head attention entropy - head importance scores according to http://arxiv.org/abs/1905.10650 """ # Prepare our tensors n_layers, n_heads = model.config.num_hidden_layers, model.config.num_attention_heads head_importance = torch.zeros(n_layers, n_heads).to(args.device) attn_entropy = torch.zeros(n_layers, n_heads).to(args.device) if head_mask is None: head_mask = torch.ones(n_layers, n_heads).to(args.device) head_mask.requires_grad_(requires_grad=True) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: head_mask = None tot_tokens = 0.0 total_loss = 0.0 for step, inputs in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])): inputs = tuple(t.to(args.device) for t in inputs) (input_ids,) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) outputs = model(input_ids, labels=input_ids, head_mask=head_mask) # (loss), lm_logits, presents, (all hidden_states), (attentions) loss, _, all_attentions = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(all_attentions): masked_entropy = entropy(attn.detach(), True) attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(input_ids).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: exponent = 2 norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1 / exponent) head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20 if not args.dont_normalize_global_importance: head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies") print_2d_tensor(attn_entropy) if compute_importance: logger.info("Head importance scores") print_2d_tensor(head_importance) logger.info("Head ranked by importance scores") head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device) head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange( head_importance.numel(), device=args.device ) head_ranks = head_ranks.view_as(head_importance) print_2d_tensor(head_ranks) return attn_entropy, head_importance, total_loss The provided code snippet includes necessary dependencies for implementing the `mask_heads` function. Write a Python function `def mask_heads(args, model, eval_dataloader)` to solve the following problem: This method shows how to mask head (set some heads to zero), to test the effect on the network, based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650) Here is the function: def mask_heads(args, model, eval_dataloader): """This method shows how to mask head (set some heads to zero), to test the effect on the network, based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650) """ _, head_importance, loss = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False) original_score = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f", original_score, original_score * args.masking_threshold) new_head_mask = torch.ones_like(head_importance) num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount)) current_score = original_score while current_score >= original_score * args.masking_threshold: head_mask = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads head_importance[head_mask == 0.0] = float("Inf") current_heads_to_mask = head_importance.view(-1).sort()[1] if len(current_heads_to_mask) <= num_to_mask: print("BREAK BY num_to_mask") break # mask heads current_heads_to_mask = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s", str(current_heads_to_mask.tolist())) new_head_mask = new_head_mask.view(-1) new_head_mask[current_heads_to_mask] = 0.0 new_head_mask = new_head_mask.view_as(head_mask) new_head_mask = new_head_mask.clone().detach() print_2d_tensor(new_head_mask) # Compute metric and head importance again _, head_importance, loss = compute_heads_importance( args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask ) current_score = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)", current_score, new_head_mask.sum(), new_head_mask.sum() / new_head_mask.numel() * 100, ) logger.info("Final head mask") print_2d_tensor(head_mask) np.save(os.path.join(args.output_dir, "head_mask.npy"), head_mask.detach().cpu().numpy()) return head_mask
This method shows how to mask head (set some heads to zero), to test the effect on the network, based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650)
12,059
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPT2LMHeadModel logger = logging.getLogger(__name__) def save_model(model, dirpath): # save results if os.path.exists(dirpath): if os.path.exists(os.path.join(dirpath, "config.json")) and os.path.isfile( os.path.join(dirpath, "config.json") ): os.remove(os.path.join(dirpath, "config.json")) if os.path.exists(os.path.join(dirpath, "pytorch_model.bin")) and os.path.isfile( os.path.join(dirpath, "pytorch_model.bin") ): os.remove(os.path.join(dirpath, "pytorch_model.bin")) else: os.makedirs(dirpath) model.save_pretrained(dirpath) def compute_heads_importance( args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None, actually_pruned=False ): """This method shows how to compute: - head attention entropy - head importance scores according to http://arxiv.org/abs/1905.10650 """ # Prepare our tensors n_layers, n_heads = model.config.num_hidden_layers, model.config.num_attention_heads head_importance = torch.zeros(n_layers, n_heads).to(args.device) attn_entropy = torch.zeros(n_layers, n_heads).to(args.device) if head_mask is None: head_mask = torch.ones(n_layers, n_heads).to(args.device) head_mask.requires_grad_(requires_grad=True) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: head_mask = None tot_tokens = 0.0 total_loss = 0.0 for step, inputs in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])): inputs = tuple(t.to(args.device) for t in inputs) (input_ids,) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) outputs = model(input_ids, labels=input_ids, head_mask=head_mask) # (loss), lm_logits, presents, (all hidden_states), (attentions) loss, _, all_attentions = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(all_attentions): masked_entropy = entropy(attn.detach(), True) attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(input_ids).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: exponent = 2 norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1 / exponent) head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20 if not args.dont_normalize_global_importance: head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies") print_2d_tensor(attn_entropy) if compute_importance: logger.info("Head importance scores") print_2d_tensor(head_importance) logger.info("Head ranked by importance scores") head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device) head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange( head_importance.numel(), device=args.device ) head_ranks = head_ranks.view_as(head_importance) print_2d_tensor(head_ranks) return attn_entropy, head_importance, total_loss The provided code snippet includes necessary dependencies for implementing the `prune_heads` function. Write a Python function `def prune_heads(args, model, eval_dataloader, head_mask)` to solve the following problem: This method shows how to prune head (remove heads weights) based on the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650) Here is the function: def prune_heads(args, model, eval_dataloader, head_mask): """This method shows how to prune head (remove heads weights) based on the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650) """ # Try pruning and test time speedup # Pruning is like masking but we actually remove the masked weights before_time = datetime.now() _, _, loss = compute_heads_importance( args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=head_mask ) score_masking = 1 / loss original_time = datetime.now() - before_time original_num_params = sum(p.numel() for p in model.parameters()) heads_to_prune = dict( (layer, (1 - head_mask[layer].long()).nonzero().squeeze().tolist()) for layer in range(len(head_mask)) ) for k, v in heads_to_prune.items(): if isinstance(v, int): heads_to_prune[k] = [ v, ] assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item() model.prune_heads(heads_to_prune) pruned_num_params = sum(p.numel() for p in model.parameters()) before_time = datetime.now() _, _, loss = compute_heads_importance( args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=None, actually_pruned=True, ) score_pruning = 1 / loss new_time = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)", original_num_params, pruned_num_params, pruned_num_params / original_num_params * 100, ) logger.info("Pruning: score with masking: %f score with pruning: %f", score_masking, score_pruning) logger.info("Pruning: speed ratio (original timing / new timing): %f percents", original_time / new_time * 100) save_model(model, args.output_dir)
This method shows how to prune head (remove heads weights) based on the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650)
12,060
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, SequentialSampler, Subset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, GlueDataset, default_data_collator, glue_compute_metrics, glue_output_modes, glue_processors, set_seed, ) from transformers.trainer_utils import is_main_process logger = logging.getLogger(__name__) def print_2d_tensor(tensor): """Print a 2D tensor""" logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(tensor)))) for row in range(len(tensor)): if tensor.dtype != torch.long: logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:.5f}" for x in tensor[row].cpu().data)) else: logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:d}" for x in tensor[row].cpu().data)) def compute_heads_importance( args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None, actually_pruned=False ): """This method shows how to compute: - head attention entropy - head importance scores according to http://arxiv.org/abs/1905.10650 """ # Prepare our tensors n_layers, n_heads = model.config.num_hidden_layers, model.config.num_attention_heads head_importance = torch.zeros(n_layers, n_heads).to(args.device) attn_entropy = torch.zeros(n_layers, n_heads).to(args.device) if head_mask is None: head_mask = torch.ones(n_layers, n_heads).to(args.device) head_mask.requires_grad_(requires_grad=True) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: head_mask = None preds = None labels = None tot_tokens = 0.0 for step, inputs in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])): for k, v in inputs.items(): inputs[k] = v.to(args.device) # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) outputs = model(**inputs, head_mask=head_mask) loss, logits, all_attentions = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask if compute_entropy: for layer, attn in enumerate(all_attentions): masked_entropy = entropy(attn.detach()) * inputs["attention_mask"].float().unsqueeze(1) attn_entropy[layer] += masked_entropy.sum(-1).sum(0).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() # Also store our logits/labels if we want to compute metrics afterwards if preds is None: preds = logits.detach().cpu().numpy() labels = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) labels = np.append(labels, inputs["labels"].detach().cpu().numpy(), axis=0) tot_tokens += inputs["attention_mask"].float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: exponent = 2 norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1 / exponent) head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20 if not args.dont_normalize_global_importance: head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print/save matrices np.save(os.path.join(args.output_dir, "attn_entropy.npy"), attn_entropy.detach().cpu().numpy()) np.save(os.path.join(args.output_dir, "head_importance.npy"), head_importance.detach().cpu().numpy()) logger.info("Attention entropies") print_2d_tensor(attn_entropy) logger.info("Head importance scores") print_2d_tensor(head_importance) logger.info("Head ranked by importance scores") head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device) head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange( head_importance.numel(), device=args.device ) head_ranks = head_ranks.view_as(head_importance) print_2d_tensor(head_ranks) return attn_entropy, head_importance, preds, labels The provided code snippet includes necessary dependencies for implementing the `mask_heads` function. Write a Python function `def mask_heads(args, model, eval_dataloader)` to solve the following problem: This method shows how to mask head (set some heads to zero), to test the effect on the network, based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650) Here is the function: def mask_heads(args, model, eval_dataloader): """This method shows how to mask head (set some heads to zero), to test the effect on the network, based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650) """ _, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) original_score = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name] logger.info("Pruning: original score: %f, threshold: %f", original_score, original_score * args.masking_threshold) new_head_mask = torch.ones_like(head_importance) num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount)) current_score = original_score while current_score >= original_score * args.masking_threshold: head_mask = new_head_mask.clone() # save current head mask # heads from least important to most - keep only not-masked heads head_importance[head_mask == 0.0] = float("Inf") current_heads_to_mask = head_importance.view(-1).sort()[1] if len(current_heads_to_mask) <= num_to_mask: break # mask heads current_heads_to_mask = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s", str(current_heads_to_mask.tolist())) new_head_mask = new_head_mask.view(-1) new_head_mask[current_heads_to_mask] = 0.0 new_head_mask = new_head_mask.view_as(head_mask) new_head_mask = new_head_mask.clone().detach() print_2d_tensor(new_head_mask) # Compute metric and head importance again _, head_importance, preds, labels = compute_heads_importance( args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask ) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) current_score = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name] logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)", current_score, new_head_mask.sum(), new_head_mask.sum() / new_head_mask.numel() * 100, ) logger.info("Final head mask") print_2d_tensor(head_mask) np.save(os.path.join(args.output_dir, "head_mask.npy"), head_mask.detach().cpu().numpy()) return head_mask
This method shows how to mask head (set some heads to zero), to test the effect on the network, based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650)
12,061
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, SequentialSampler, Subset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, GlueDataset, default_data_collator, glue_compute_metrics, glue_output_modes, glue_processors, set_seed, ) from transformers.trainer_utils import is_main_process logger = logging.getLogger(__name__) def compute_heads_importance( args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None, actually_pruned=False ): """This method shows how to compute: - head attention entropy - head importance scores according to http://arxiv.org/abs/1905.10650 """ # Prepare our tensors n_layers, n_heads = model.config.num_hidden_layers, model.config.num_attention_heads head_importance = torch.zeros(n_layers, n_heads).to(args.device) attn_entropy = torch.zeros(n_layers, n_heads).to(args.device) if head_mask is None: head_mask = torch.ones(n_layers, n_heads).to(args.device) head_mask.requires_grad_(requires_grad=True) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: head_mask = None preds = None labels = None tot_tokens = 0.0 for step, inputs in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])): for k, v in inputs.items(): inputs[k] = v.to(args.device) # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) outputs = model(**inputs, head_mask=head_mask) loss, logits, all_attentions = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask if compute_entropy: for layer, attn in enumerate(all_attentions): masked_entropy = entropy(attn.detach()) * inputs["attention_mask"].float().unsqueeze(1) attn_entropy[layer] += masked_entropy.sum(-1).sum(0).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() # Also store our logits/labels if we want to compute metrics afterwards if preds is None: preds = logits.detach().cpu().numpy() labels = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) labels = np.append(labels, inputs["labels"].detach().cpu().numpy(), axis=0) tot_tokens += inputs["attention_mask"].float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: exponent = 2 norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1 / exponent) head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20 if not args.dont_normalize_global_importance: head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print/save matrices np.save(os.path.join(args.output_dir, "attn_entropy.npy"), attn_entropy.detach().cpu().numpy()) np.save(os.path.join(args.output_dir, "head_importance.npy"), head_importance.detach().cpu().numpy()) logger.info("Attention entropies") print_2d_tensor(attn_entropy) logger.info("Head importance scores") print_2d_tensor(head_importance) logger.info("Head ranked by importance scores") head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device) head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange( head_importance.numel(), device=args.device ) head_ranks = head_ranks.view_as(head_importance) print_2d_tensor(head_ranks) return attn_entropy, head_importance, preds, labels The provided code snippet includes necessary dependencies for implementing the `prune_heads` function. Write a Python function `def prune_heads(args, model, eval_dataloader, head_mask)` to solve the following problem: This method shows how to prune head (remove heads weights) based on the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650) Here is the function: def prune_heads(args, model, eval_dataloader, head_mask): """This method shows how to prune head (remove heads weights) based on the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650) """ # Try pruning and test time speedup # Pruning is like masking but we actually remove the masked weights before_time = datetime.now() _, _, preds, labels = compute_heads_importance( args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=head_mask ) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) score_masking = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name] original_time = datetime.now() - before_time original_num_params = sum(p.numel() for p in model.parameters()) heads_to_prune = dict( (layer, (1 - head_mask[layer].long()).nonzero().squeeze().tolist()) for layer in range(len(head_mask)) ) assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item() model.prune_heads(heads_to_prune) pruned_num_params = sum(p.numel() for p in model.parameters()) before_time = datetime.now() _, _, preds, labels = compute_heads_importance( args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=None, actually_pruned=True, ) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) score_pruning = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name] new_time = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)", original_num_params, pruned_num_params, pruned_num_params / original_num_params * 100, ) logger.info("Pruning: score with masking: %f score with pruning: %f", score_masking, score_pruning) logger.info("Pruning: speed ratio (new timing / original timing): %f percents", original_time / new_time * 100)
This method shows how to prune head (remove heads weights) based on the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650)
12,062
import abc import functools from collections.abc import Iterable import numpy as onp from absl import logging import jax import jax.numpy as jnp from jax import lax, random def nonnegative_softmax_kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True, eps=0.0001 ): """ Constructs nonnegative kernel features for fast softmax attention Args: data: input for which features are computes projection_matrix: random matrix used to compute features attention_dims_t: tuple of attention dimensions batch_dims_t: tuple of batch dimensions precision: precision parameter is_query: predicate indicating whether input data corresponds to queries or keys normalize_data: predicate indicating whether data should be normalized, eps: numerical stabilizer Returns: Random features for fast softmax attention. """ del attention_dims_t if normalize_data: # We have e^{qk^T/sqrt{d}} = e^{q_norm k_norm^T}, where # w_norm = w * data_normalizer for w in {q,k}. data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1]))) else: data_normalizer = 1.0 ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0]) data_mod_shape = data.shape[0 : len(batch_dims_t)] + projection_matrix.shape data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix data_dash = lax.dot_general( data_normalizer * data, data_thick_random_matrix, (((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)), (batch_dims_t, batch_dims_t)), precision=precision, ) diag_data = jnp.square(data) diag_data = jnp.sum(diag_data, axis=data.ndim - 1) diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1) if is_query: last_dims_t = (len(data_dash.shape) - 1,) data_dash = ratio * ( jnp.exp(data_dash - diag_data - jnp.max(data_dash, axis=last_dims_t, keepdims=True)) + eps ) else: data_dash = ratio * (jnp.exp(data_dash - diag_data - jnp.max(data_dash)) + eps) return data_dash def sincos_softmax_kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, normalize_data=True ): """ Constructs kernel sin-cos features for fast softmax attention Args: data: input for which features are computes projection_matrix: random matrix used to compute features attention_dims_t: tuple of attention dimensions batch_dims_t: tuple of batch dimensions precision: precision parameter normalize_data: predicate indicating whether data should be normalized Returns: Random features for fast softmax attention. """ if normalize_data: # We have: exp(qk^T/sqrt{d}) = exp(|q|^2/2sqrt{d}) * exp(|k|^2/2sqrt{d}) * # exp(-(|q*c-k*c|^2)/2), where c = 1.0 / sqrt{sqrt{d}}. data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1]))) else: data_normalizer = 1.0 ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0]) data_mod_shape = data.shape[0 : len(batch_dims_t)] + projection_matrix.shape data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix data_dash = lax.dot_general( data_normalizer * data, data_thick_random_matrix, (((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)), (batch_dims_t, batch_dims_t)), precision=precision, ) data_dash_cos = ratio * jnp.cos(data_dash) data_dash_sin = ratio * jnp.sin(data_dash) data_dash = jnp.concatenate((data_dash_cos, data_dash_sin), axis=-1) # Constructing D_data and data^{'} diag_data = jnp.square(data) diag_data = jnp.sum(diag_data, axis=data.ndim - 1) diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1) # Additional renormalization for numerical stability data_renormalizer = jnp.max(diag_data, attention_dims_t, keepdims=True) diag_data -= data_renormalizer diag_data = jnp.exp(diag_data) data_prime = data_dash * diag_data return data_prime class GaussianUnstructuredRandomMatrix(RandomMatrix): def __init__(self, nb_rows, nb_columns, key): self.nb_rows = nb_rows self.nb_columns = nb_columns self.key = key def get_2d_array(self): return random.normal(self.key, (self.nb_rows, self.nb_columns)) class GaussianOrthogonalRandomMatrix(RandomMatrix): r""" Class providing a method to create Gaussian orthogonal matrix. Class is responsible for constructing 2D Gaussian orthogonal arrays. """ def __init__(self, nb_rows, nb_columns, key, scaling=0): self.nb_rows = nb_rows self.nb_columns = nb_columns self.key = key self.scaling = scaling def get_2d_array(self): nb_full_blocks = int(self.nb_rows / self.nb_columns) block_list = [] rng = self.key for _ in range(nb_full_blocks): rng, rng_input = jax.random.split(rng) unstructured_block = random.normal(rng_input, (self.nb_columns, self.nb_columns)) q, _ = jnp.linalg.qr(unstructured_block) q = jnp.transpose(q) block_list.append(q) remaining_rows = self.nb_rows - nb_full_blocks * self.nb_columns if remaining_rows > 0: rng, rng_input = jax.random.split(rng) unstructured_block = random.normal(rng_input, (self.nb_columns, self.nb_columns)) q, _ = jnp.linalg.qr(unstructured_block) q = jnp.transpose(q) block_list.append(q[0:remaining_rows]) final_matrix = jnp.vstack(block_list) if self.scaling == 0: multiplier = jnp.linalg.norm(random.normal(self.key, (self.nb_rows, self.nb_columns)), axis=1) elif self.scaling == 1: multiplier = jnp.sqrt(float(self.nb_columns)) * jnp.ones((self.nb_rows)) else: raise ValueError("Scaling must be one of {0, 1}. Was %s" % self._scaling) return jnp.matmul(jnp.diag(multiplier), final_matrix) class FastAttentionviaLowRankDecomposition(FastAttention): r""" Class providing a method for fast attention via low rank decomposition. Class is responsible for providing a method <dot_product_attention> for fast dot-product attention with the use of low rank decomposition (e.g. with random feature maps). """ def __init__( self, matrix_creator, kernel_feature_creator, renormalize_attention, numerical_stabilizer, redraw_features, unidirectional, lax_scan_unroll=1, ): # For optimal GPU performance, set to 16. rng = random.PRNGKey(0) self.matrix_creator = matrix_creator self.projection_matrix = self.draw_weights(rng) self.kernel_feature_creator = kernel_feature_creator self.renormalize_attention = renormalize_attention self.numerical_stabilizer = numerical_stabilizer self.redraw_features = redraw_features self.unidirectional = unidirectional self.lax_scan_unroll = lax_scan_unroll def draw_weights(self, key): if self.matrix_creator is None: return None matrixrng, _ = random.split(key) projection_matrix = self.matrix_creator(key=matrixrng).get_2d_array() return projection_matrix def dot_product_attention( self, query, key, value, dtype=jnp.float32, bias=None, axis=None, broadcast_dropout=True, dropout_rng=None, dropout_rate=0.0, deterministic=False, precision=None, ): assert key.shape[:-1] == value.shape[:-1] assert query.shape[0:1] == key.shape[0:1] and query.shape[-1] == key.shape[-1] if axis is None: axis = tuple(range(1, key.ndim - 2)) if not isinstance(axis, Iterable): axis = (axis,) assert key.ndim == query.ndim assert key.ndim == value.ndim for ax in axis: if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2): raise ValueError("Attention axis must be between the batch axis and the last-two axes.") n = key.ndim # Constructing projection tensor. if self.redraw_features: # TODO(kchoro): Get rid of the constant below. query_seed = lax.convert_element_type(jnp.ceil(jnp.sum(query) * 10000000.0), jnp.int32) rng = random.PRNGKey(query_seed) self.projection_matrix = self.draw_weights(rng) # batch_dims is <bs, <non-attention dims>, num_heads> batch_dims = tuple(onp.delete(range(n), axis + (n - 1,))) # q & k -> (bs, <non-attention dims>, num_heads, <attention dims>, channels) qk_perm = batch_dims + axis + (n - 1,) k_extra_perm = axis + batch_dims + (n - 1,) key_extra = key.transpose(k_extra_perm) key = key.transpose(qk_perm) query = query.transpose(qk_perm) # v -> (bs, <non-attention dims>, num_heads, <attention dims>, channels) v_perm = batch_dims + axis + (n - 1,) value = value.transpose(v_perm) batch_dims_t = tuple(range(len(batch_dims))) attention_dims_t = tuple(range(len(batch_dims), len(batch_dims) + len(axis))) # Constructing tensors Q^{'} and K^{'}. query_prime = self.kernel_feature_creator( query, self.projection_matrix, attention_dims_t, batch_dims_t, precision, True ) key_prime = self.kernel_feature_creator( key, self.projection_matrix, attention_dims_t, batch_dims_t, precision, False ) if self.unidirectional: index = attention_dims_t[0] z_slice_shape = key_prime.shape[0 : len(batch_dims_t)] + (key_prime.shape[-1],) + (value.shape[-1],) numerator_fn = _numerator(z_slice_shape, precision, self.lax_scan_unroll) W = numerator_fn( jnp.moveaxis(query_prime, index, 0), jnp.moveaxis(key_prime, index, 0), jnp.moveaxis(value, index, 0) ) # Constructing W = (Q^{'}(K^{'})^{T})_{masked}V W = jnp.moveaxis(W, 0, index) if not self.renormalize_attention: # Unidirectional, not-normalized attention. perm_inv = _invert_perm(qk_perm) result = W.transpose(perm_inv) return result else: # Unidirectional, normalized attention. thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(key_extra.shape[0 : len(axis)]) index = attention_dims_t[0] t_slice_shape = key_prime.shape[0 : len(batch_dims_t)] + (key_prime.shape[-1],) denominator_fn = _denominator(t_slice_shape, precision, self.lax_scan_unroll) R = denominator_fn(jnp.moveaxis(query_prime, index, 0), jnp.moveaxis(key_prime, index, 0)) R = jnp.moveaxis(R, 0, index) else: contract_query = tuple(range(len(batch_dims) + len(axis), len(batch_dims) + len(axis) + 1)) contract_z = tuple(range(len(batch_dims), len(batch_dims) + 1)) # Constructing Z = (K^{'})^{T}V # Z (bs, <non-attention dims>, num_heads, channels_m, channels_v) Z = lax.dot_general( key_prime, value, ((attention_dims_t, attention_dims_t), (batch_dims_t, batch_dims_t)), precision=precision, ) # Constructing W = Q^{'}Z = Q^{'}(K^{'})^{T}V # q (bs, <non-attention dims>, num_heads, <attention dims>, channels_m) # Z (bs, <non-attention dims>, num_heads, channels_m, channels_v) # W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v) W = lax.dot_general( query_prime, Z, ((contract_query, contract_z), (batch_dims_t, batch_dims_t)), precision=precision ) if not self.renormalize_attention: # Bidirectional, not-normalized attention. perm_inv = _invert_perm(qk_perm) result = W.transpose(perm_inv) return result else: # Bidirectional, normalized attention. thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(key_extra.shape[0 : len(axis)]) contract_key = tuple(range(len(batch_dims), len(batch_dims) + len(axis))) contract_thick_all_ones = tuple(range(thick_all_ones.ndim - len(axis), thick_all_ones.ndim)) # Construct T = (K^{'})^{T} 1_L # k (bs, <non-attention dims>, num_heads, <attention dims>, channels) T = lax.dot_general( key_prime, thick_all_ones, ((contract_key, contract_thick_all_ones), (batch_dims_t, batch_dims_t)), precision=precision, ) # Construct partition function: R = Q^{'} T = Q^{'}(K^{'})^{T} 1_L # q_p (bs, <non-attention dims>, num_heads, <attention dims>, channs_m) # T (bs, <non-attention dims>, num_heads, channels_m) R = lax.dot_general( query_prime, T, (((query_prime.ndim - 1,), (T.ndim - 1,)), (batch_dims_t, range(0, len(T.shape) - 1))), precision=precision, ) R = R + 2 * self.numerical_stabilizer * (jnp.abs(R) <= self.numerical_stabilizer) R = jnp.reciprocal(R) R = jnp.expand_dims(R, len(R.shape)) # W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v) # R (bs, <non-attention dims>, num_heads, <attention dims>, extra_channel) result = W * R # back to (bs, dim1, dim2, ..., dimN, num_heads, channels) perm_inv = _invert_perm(qk_perm) result = result.transpose(perm_inv) return result The provided code snippet includes necessary dependencies for implementing the `make_fast_softmax_attention` function. Write a Python function `def make_fast_softmax_attention( qkv_dim, renormalize_attention=True, numerical_stabilizer=0.000001, nb_features=256, ortho_features=True, ortho_scaling=0.0, redraw_features=True, unidirectional=False, nonnegative_features=True, lax_scan_unroll=1, )` to solve the following problem: Construct a fast softmax attention method. Here is the function: def make_fast_softmax_attention( qkv_dim, renormalize_attention=True, numerical_stabilizer=0.000001, nb_features=256, ortho_features=True, ortho_scaling=0.0, redraw_features=True, unidirectional=False, nonnegative_features=True, lax_scan_unroll=1, ): """Construct a fast softmax attention method.""" logging.info( "Fast softmax attention: %s features and orthogonal=%s, renormalize=%s", nb_features, ortho_features, renormalize_attention, ) if ortho_features: matrix_creator = functools.partial(GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=ortho_scaling) else: matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix, nb_features, qkv_dim) if nonnegative_features: def kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True ): return nonnegative_softmax_kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data, numerical_stabilizer, ) else: def kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True ): del is_query return sincos_softmax_kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, normalize_data ) attention_fn = FastAttentionviaLowRankDecomposition( matrix_creator, kernel_feature_creator, renormalize_attention=renormalize_attention, numerical_stabilizer=numerical_stabilizer, redraw_features=redraw_features, unidirectional=unidirectional, lax_scan_unroll=lax_scan_unroll, ).dot_product_attention return attention_fn
Construct a fast softmax attention method.
12,063
import abc import functools from collections.abc import Iterable import numpy as onp from absl import logging import jax import jax.numpy as jnp from jax import lax, random def generalized_kernel_feature_creator( data, projection_matrix, batch_dims_t, precision, kernel_fn, kernel_epsilon, normalize_data ): """ Constructs kernel features for fast generalized attention Args: data: input for which features are computes projection_matrix: matrix used to compute features batch_dims_t: tuple of batch dimensions precision: precision parameter kernel_fn: kernel function used kernel_epsilon: additive positive term added to every feature for numerical stability normalize_data: predicate indicating whether data should be normalized Returns: Random features for fast generalized attention. """ if normalize_data: data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1]))) else: data_normalizer = 1.0 if projection_matrix is None: return kernel_fn(data_normalizer * data) + kernel_epsilon else: data_mod_shape = data.shape[0 : len(batch_dims_t)] + projection_matrix.shape data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix data_dash = lax.dot_general( data_normalizer * data, data_thick_random_matrix, (((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)), (batch_dims_t, batch_dims_t)), precision=precision, ) data_prime = kernel_fn(data_dash) + kernel_epsilon return data_prime class GaussianUnstructuredRandomMatrix(RandomMatrix): def __init__(self, nb_rows, nb_columns, key): self.nb_rows = nb_rows self.nb_columns = nb_columns self.key = key def get_2d_array(self): return random.normal(self.key, (self.nb_rows, self.nb_columns)) class GaussianOrthogonalRandomMatrix(RandomMatrix): r""" Class providing a method to create Gaussian orthogonal matrix. Class is responsible for constructing 2D Gaussian orthogonal arrays. """ def __init__(self, nb_rows, nb_columns, key, scaling=0): self.nb_rows = nb_rows self.nb_columns = nb_columns self.key = key self.scaling = scaling def get_2d_array(self): nb_full_blocks = int(self.nb_rows / self.nb_columns) block_list = [] rng = self.key for _ in range(nb_full_blocks): rng, rng_input = jax.random.split(rng) unstructured_block = random.normal(rng_input, (self.nb_columns, self.nb_columns)) q, _ = jnp.linalg.qr(unstructured_block) q = jnp.transpose(q) block_list.append(q) remaining_rows = self.nb_rows - nb_full_blocks * self.nb_columns if remaining_rows > 0: rng, rng_input = jax.random.split(rng) unstructured_block = random.normal(rng_input, (self.nb_columns, self.nb_columns)) q, _ = jnp.linalg.qr(unstructured_block) q = jnp.transpose(q) block_list.append(q[0:remaining_rows]) final_matrix = jnp.vstack(block_list) if self.scaling == 0: multiplier = jnp.linalg.norm(random.normal(self.key, (self.nb_rows, self.nb_columns)), axis=1) elif self.scaling == 1: multiplier = jnp.sqrt(float(self.nb_columns)) * jnp.ones((self.nb_rows)) else: raise ValueError("Scaling must be one of {0, 1}. Was %s" % self._scaling) return jnp.matmul(jnp.diag(multiplier), final_matrix) class FastAttentionviaLowRankDecomposition(FastAttention): r""" Class providing a method for fast attention via low rank decomposition. Class is responsible for providing a method <dot_product_attention> for fast dot-product attention with the use of low rank decomposition (e.g. with random feature maps). """ def __init__( self, matrix_creator, kernel_feature_creator, renormalize_attention, numerical_stabilizer, redraw_features, unidirectional, lax_scan_unroll=1, ): # For optimal GPU performance, set to 16. rng = random.PRNGKey(0) self.matrix_creator = matrix_creator self.projection_matrix = self.draw_weights(rng) self.kernel_feature_creator = kernel_feature_creator self.renormalize_attention = renormalize_attention self.numerical_stabilizer = numerical_stabilizer self.redraw_features = redraw_features self.unidirectional = unidirectional self.lax_scan_unroll = lax_scan_unroll def draw_weights(self, key): if self.matrix_creator is None: return None matrixrng, _ = random.split(key) projection_matrix = self.matrix_creator(key=matrixrng).get_2d_array() return projection_matrix def dot_product_attention( self, query, key, value, dtype=jnp.float32, bias=None, axis=None, broadcast_dropout=True, dropout_rng=None, dropout_rate=0.0, deterministic=False, precision=None, ): assert key.shape[:-1] == value.shape[:-1] assert query.shape[0:1] == key.shape[0:1] and query.shape[-1] == key.shape[-1] if axis is None: axis = tuple(range(1, key.ndim - 2)) if not isinstance(axis, Iterable): axis = (axis,) assert key.ndim == query.ndim assert key.ndim == value.ndim for ax in axis: if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2): raise ValueError("Attention axis must be between the batch axis and the last-two axes.") n = key.ndim # Constructing projection tensor. if self.redraw_features: # TODO(kchoro): Get rid of the constant below. query_seed = lax.convert_element_type(jnp.ceil(jnp.sum(query) * 10000000.0), jnp.int32) rng = random.PRNGKey(query_seed) self.projection_matrix = self.draw_weights(rng) # batch_dims is <bs, <non-attention dims>, num_heads> batch_dims = tuple(onp.delete(range(n), axis + (n - 1,))) # q & k -> (bs, <non-attention dims>, num_heads, <attention dims>, channels) qk_perm = batch_dims + axis + (n - 1,) k_extra_perm = axis + batch_dims + (n - 1,) key_extra = key.transpose(k_extra_perm) key = key.transpose(qk_perm) query = query.transpose(qk_perm) # v -> (bs, <non-attention dims>, num_heads, <attention dims>, channels) v_perm = batch_dims + axis + (n - 1,) value = value.transpose(v_perm) batch_dims_t = tuple(range(len(batch_dims))) attention_dims_t = tuple(range(len(batch_dims), len(batch_dims) + len(axis))) # Constructing tensors Q^{'} and K^{'}. query_prime = self.kernel_feature_creator( query, self.projection_matrix, attention_dims_t, batch_dims_t, precision, True ) key_prime = self.kernel_feature_creator( key, self.projection_matrix, attention_dims_t, batch_dims_t, precision, False ) if self.unidirectional: index = attention_dims_t[0] z_slice_shape = key_prime.shape[0 : len(batch_dims_t)] + (key_prime.shape[-1],) + (value.shape[-1],) numerator_fn = _numerator(z_slice_shape, precision, self.lax_scan_unroll) W = numerator_fn( jnp.moveaxis(query_prime, index, 0), jnp.moveaxis(key_prime, index, 0), jnp.moveaxis(value, index, 0) ) # Constructing W = (Q^{'}(K^{'})^{T})_{masked}V W = jnp.moveaxis(W, 0, index) if not self.renormalize_attention: # Unidirectional, not-normalized attention. perm_inv = _invert_perm(qk_perm) result = W.transpose(perm_inv) return result else: # Unidirectional, normalized attention. thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(key_extra.shape[0 : len(axis)]) index = attention_dims_t[0] t_slice_shape = key_prime.shape[0 : len(batch_dims_t)] + (key_prime.shape[-1],) denominator_fn = _denominator(t_slice_shape, precision, self.lax_scan_unroll) R = denominator_fn(jnp.moveaxis(query_prime, index, 0), jnp.moveaxis(key_prime, index, 0)) R = jnp.moveaxis(R, 0, index) else: contract_query = tuple(range(len(batch_dims) + len(axis), len(batch_dims) + len(axis) + 1)) contract_z = tuple(range(len(batch_dims), len(batch_dims) + 1)) # Constructing Z = (K^{'})^{T}V # Z (bs, <non-attention dims>, num_heads, channels_m, channels_v) Z = lax.dot_general( key_prime, value, ((attention_dims_t, attention_dims_t), (batch_dims_t, batch_dims_t)), precision=precision, ) # Constructing W = Q^{'}Z = Q^{'}(K^{'})^{T}V # q (bs, <non-attention dims>, num_heads, <attention dims>, channels_m) # Z (bs, <non-attention dims>, num_heads, channels_m, channels_v) # W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v) W = lax.dot_general( query_prime, Z, ((contract_query, contract_z), (batch_dims_t, batch_dims_t)), precision=precision ) if not self.renormalize_attention: # Bidirectional, not-normalized attention. perm_inv = _invert_perm(qk_perm) result = W.transpose(perm_inv) return result else: # Bidirectional, normalized attention. thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(key_extra.shape[0 : len(axis)]) contract_key = tuple(range(len(batch_dims), len(batch_dims) + len(axis))) contract_thick_all_ones = tuple(range(thick_all_ones.ndim - len(axis), thick_all_ones.ndim)) # Construct T = (K^{'})^{T} 1_L # k (bs, <non-attention dims>, num_heads, <attention dims>, channels) T = lax.dot_general( key_prime, thick_all_ones, ((contract_key, contract_thick_all_ones), (batch_dims_t, batch_dims_t)), precision=precision, ) # Construct partition function: R = Q^{'} T = Q^{'}(K^{'})^{T} 1_L # q_p (bs, <non-attention dims>, num_heads, <attention dims>, channs_m) # T (bs, <non-attention dims>, num_heads, channels_m) R = lax.dot_general( query_prime, T, (((query_prime.ndim - 1,), (T.ndim - 1,)), (batch_dims_t, range(0, len(T.shape) - 1))), precision=precision, ) R = R + 2 * self.numerical_stabilizer * (jnp.abs(R) <= self.numerical_stabilizer) R = jnp.reciprocal(R) R = jnp.expand_dims(R, len(R.shape)) # W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v) # R (bs, <non-attention dims>, num_heads, <attention dims>, extra_channel) result = W * R # back to (bs, dim1, dim2, ..., dimN, num_heads, channels) perm_inv = _invert_perm(qk_perm) result = result.transpose(perm_inv) return result The provided code snippet includes necessary dependencies for implementing the `make_fast_generalized_attention` function. Write a Python function `def make_fast_generalized_attention( qkv_dim, renormalize_attention=True, numerical_stabilizer=0.0, nb_features=256, features_type="deterministic", kernel_fn=jax.nn.relu, kernel_epsilon=0.001, redraw_features=False, unidirectional=False, lax_scan_unroll=1, )` to solve the following problem: Construct a fast generalized attention menthod. Here is the function: def make_fast_generalized_attention( qkv_dim, renormalize_attention=True, numerical_stabilizer=0.0, nb_features=256, features_type="deterministic", kernel_fn=jax.nn.relu, kernel_epsilon=0.001, redraw_features=False, unidirectional=False, lax_scan_unroll=1, ): """Construct a fast generalized attention menthod.""" logging.info("Fast generalized attention.: %s features and renormalize=%s", nb_features, renormalize_attention) if features_type == "ortho": matrix_creator = functools.partial(GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=False) elif features_type == "iid": matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix, nb_features, qkv_dim) elif features_type == "deterministic": matrix_creator = None else: raise ValueError("Unknown feature value type") def kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=False ): del attention_dims_t del is_query return generalized_kernel_feature_creator( data, projection_matrix, batch_dims_t, precision, kernel_fn, kernel_epsilon, normalize_data ) attention_fn = FastAttentionviaLowRankDecomposition( matrix_creator, kernel_feature_creator, renormalize_attention=renormalize_attention, numerical_stabilizer=numerical_stabilizer, redraw_features=redraw_features, unidirectional=unidirectional, lax_scan_unroll=lax_scan_unroll, ).dot_product_attention return attention_fn
Construct a fast generalized attention menthod.
12,064
import abc import functools from collections.abc import Iterable import numpy as onp from absl import logging import jax import jax.numpy as jnp from jax import lax, random def _numerator(z_slice_shape, precision, unroll=1): def fwd(qs, ks, vs): def body(p, qkv): (q, k, v) = qkv p += jnp.einsum("...m,...d->...md", k, v, precision=precision) X_slice = jnp.einsum("...m,...md->...d", q, p, precision=precision) return p, X_slice init_value = jnp.zeros(z_slice_shape) p, W = lax.scan(body, init_value, (qs, ks, vs), unroll=unroll) return W, (p, qs, ks, vs) def bwd(pqkv, W_ct): def body(carry, qkv_xct): p, p_ct = carry q, k, v, x_ct = qkv_xct q_ct = jnp.einsum("...d,...md->...m", x_ct, p, precision=precision) p_ct += jnp.einsum("...d,...m->...md", x_ct, q, precision=precision) k_ct = jnp.einsum("...md,...d->...m", p_ct, v, precision=precision) v_ct = jnp.einsum("...md,...m->...d", p_ct, k, precision=precision) p -= jnp.einsum("...m,...d->...md", k, v, precision=precision) return (p, p_ct), (q_ct, k_ct, v_ct) p, qs, ks, vs = pqkv _, (qs_ct, ks_ct, vs_ct) = lax.scan( body, (p, jnp.zeros_like(p)), (qs, ks, vs, W_ct), reverse=True, unroll=unroll ) return qs_ct, ks_ct, vs_ct @jax.custom_vjp def _numerator_impl(qs, ks, vs): W, _ = fwd(qs, ks, vs) return W _numerator_impl.defvjp(fwd, bwd) return _numerator_impl
null
12,065
import abc import functools from collections.abc import Iterable import numpy as onp from absl import logging import jax import jax.numpy as jnp from jax import lax, random def _denominator(t_slice_shape, precision, unroll=1): def fwd(qs, ks): def body(p, qk): q, k = qk p += k x = jnp.einsum("...m,...m->...", q, p, precision=precision) return p, x p = jnp.zeros(t_slice_shape) p, R = lax.scan(body, p, (qs, ks), unroll=unroll) return R, (qs, ks, p) def bwd(qkp, R_ct): def body(carry, qkx): p, p_ct = carry q, k, x_ct = qkx q_ct = jnp.einsum("...,...m->...m", x_ct, p, precision=precision) p_ct += jnp.einsum("...,...m->...m", x_ct, q, precision=precision) k_ct = p_ct p -= k return (p, p_ct), (q_ct, k_ct) qs, ks, p = qkp _, (qs_ct, ks_ct) = lax.scan(body, (p, jnp.zeros_like(p)), (qs, ks, R_ct), reverse=True, unroll=unroll) return (qs_ct, ks_ct) @jax.custom_vjp def _denominator_impl(qs, ks): R, _ = fwd(qs, ks) return R _denominator_impl.defvjp(fwd, bwd) return _denominator_impl
null
12,066
import abc import functools from collections.abc import Iterable import numpy as onp from absl import logging import jax import jax.numpy as jnp from jax import lax, random def _invert_perm(perm): perm_inv = [0] * len(perm) for i, j in enumerate(perm): perm_inv[j] = i return tuple(perm_inv)
null
12,067
import logging import os import sys from dataclasses import dataclass, field from pathlib import Path from typing import Dict, List, Optional, Tuple import numpy as np from datasets import load_dataset from tqdm import tqdm import jax import jax.numpy as jnp from flax import jax_utils from flax.optim import Adam from flax.training import common_utils from flax.training.common_utils import get_metrics from jax.nn import log_softmax from modeling_flax_performer import FlaxPerformerForMaskedLM from transformers import ( MODEL_FOR_MASKED_LM_MAPPING, AutoTokenizer, BertConfig, FlaxBertForMaskedLM, HfArgumentParser, PreTrainedTokenizerBase, TensorType, TrainingArguments, is_tensorboard_available, set_seed, ) The provided code snippet includes necessary dependencies for implementing the `create_learning_rate_scheduler` function. Write a Python function `def create_learning_rate_scheduler( factors="constant * linear_warmup * rsqrt_decay", base_learning_rate=0.5, warmup_steps=1000, decay_factor=0.5, steps_per_decay=20000, steps_per_cycle=100000, )` to solve the following problem: Creates learning rate schedule. Interprets factors in the factors string which can consist of: * constant: interpreted as the constant value, * linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide by square root of max(step, warmup_steps) * rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1) * decay_every: Every k steps decay the learning rate by decay_factor. * cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter. Args: factors: string, factors separated by "*" that defines the schedule. base_learning_rate: float, the starting constant for the lr schedule. warmup_steps: int, how many steps to warm up for in the warmup schedule. decay_factor: float, the amount to decay the learning rate by. steps_per_decay: int, how often to decay the learning rate. steps_per_cycle: int, steps per cycle when using cosine decay. Returns: a function learning_rate(step): float -> {"learning_rate": float}, the step-dependent lr. Here is the function: def create_learning_rate_scheduler( factors="constant * linear_warmup * rsqrt_decay", base_learning_rate=0.5, warmup_steps=1000, decay_factor=0.5, steps_per_decay=20000, steps_per_cycle=100000, ): """Creates learning rate schedule. Interprets factors in the factors string which can consist of: * constant: interpreted as the constant value, * linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide by square root of max(step, warmup_steps) * rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1) * decay_every: Every k steps decay the learning rate by decay_factor. * cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter. Args: factors: string, factors separated by "*" that defines the schedule. base_learning_rate: float, the starting constant for the lr schedule. warmup_steps: int, how many steps to warm up for in the warmup schedule. decay_factor: float, the amount to decay the learning rate by. steps_per_decay: int, how often to decay the learning rate. steps_per_cycle: int, steps per cycle when using cosine decay. Returns: a function learning_rate(step): float -> {"learning_rate": float}, the step-dependent lr. """ factors = [n.strip() for n in factors.split("*")] def step_fn(step): """Step to learning rate function.""" ret = 1.0 for name in factors: if name == "constant": ret *= base_learning_rate elif name == "linear_warmup": ret *= jnp.minimum(1.0, step / warmup_steps) elif name == "rsqrt_decay": ret /= jnp.sqrt(jnp.maximum(step, warmup_steps)) elif name == "rsqrt_normalized_decay": ret *= jnp.sqrt(warmup_steps) ret /= jnp.sqrt(jnp.maximum(step, warmup_steps)) elif name == "decay_every": ret *= decay_factor ** (step // steps_per_decay) elif name == "cosine_decay": progress = jnp.maximum(0.0, (step - warmup_steps) / float(steps_per_cycle)) ret *= jnp.maximum(0.0, 0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0)))) else: raise ValueError("Unknown factor %s." % name) return jnp.asarray(ret, dtype=jnp.float32) return step_fn
Creates learning rate schedule. Interprets factors in the factors string which can consist of: * constant: interpreted as the constant value, * linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide by square root of max(step, warmup_steps) * rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1) * decay_every: Every k steps decay the learning rate by decay_factor. * cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter. Args: factors: string, factors separated by "*" that defines the schedule. base_learning_rate: float, the starting constant for the lr schedule. warmup_steps: int, how many steps to warm up for in the warmup schedule. decay_factor: float, the amount to decay the learning rate by. steps_per_decay: int, how often to decay the learning rate. steps_per_cycle: int, steps per cycle when using cosine decay. Returns: a function learning_rate(step): float -> {"learning_rate": float}, the step-dependent lr.
12,068
import logging import os import sys from dataclasses import dataclass, field from pathlib import Path from typing import Dict, List, Optional, Tuple import numpy as np from datasets import load_dataset from tqdm import tqdm import jax import jax.numpy as jnp from flax import jax_utils from flax.optim import Adam from flax.training import common_utils from flax.training.common_utils import get_metrics from jax.nn import log_softmax from modeling_flax_performer import FlaxPerformerForMaskedLM from transformers import ( MODEL_FOR_MASKED_LM_MAPPING, AutoTokenizer, BertConfig, FlaxBertForMaskedLM, HfArgumentParser, PreTrainedTokenizerBase, TensorType, TrainingArguments, is_tensorboard_available, set_seed, ) def cross_entropy(logits, targets, weights=None, label_smoothing=0.0): """Compute cross entropy and entropy for log probs and targets. Args: logits: [batch, length, num_classes] float array. targets: categorical targets [batch, length] int array. weights: None or array of shape [batch, length] label_smoothing: label smoothing constant, used to determine the on and off values. Returns: Tuple of scalar loss and batch normalizing factor. """ if logits.ndim != targets.ndim + 1: raise ValueError( "Incorrect shapes. Got shape %s logits and %s targets" % (str(logits.shape), str(targets.shape)) ) vocab_size = logits.shape[-1] confidence = 1.0 - label_smoothing low_confidence = (1.0 - confidence) / (vocab_size - 1) normalizing_constant = -( confidence * jnp.log(confidence) + (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20) ) soft_targets = common_utils.onehot(targets, vocab_size, on_value=confidence, off_value=low_confidence) loss = -jnp.sum(soft_targets * log_softmax(logits), axis=-1) loss = loss - normalizing_constant if weights is not None: loss = loss * weights normalizing_factor = weights.sum() else: normalizing_factor = np.prod(targets.shape) return loss.sum(), normalizing_factor def training_step(optimizer, batch, dropout_rng): dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) def loss_fn(params): targets = batch.pop("labels") # Hide away tokens which doesn't participate in the optimization token_mask = jnp.where(targets > 0, 1.0, 0.0) logits = model(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] loss, weight_sum = cross_entropy(logits, targets, token_mask) return loss / weight_sum step = optimizer.state.step lr = lr_scheduler_fn(step) grad_fn = jax.value_and_grad(loss_fn) loss, grad = grad_fn(optimizer.target) grad = jax.lax.pmean(grad, "batch") optimizer = optimizer.apply_gradient(grad, learning_rate=lr) return loss, optimizer, new_dropout_rng
null
12,069
import logging import os import sys from dataclasses import dataclass, field from pathlib import Path from typing import Dict, List, Optional, Tuple import numpy as np from datasets import load_dataset from tqdm import tqdm import jax import jax.numpy as jnp from flax import jax_utils from flax.optim import Adam from flax.training import common_utils from flax.training.common_utils import get_metrics from jax.nn import log_softmax from modeling_flax_performer import FlaxPerformerForMaskedLM from transformers import ( MODEL_FOR_MASKED_LM_MAPPING, AutoTokenizer, BertConfig, FlaxBertForMaskedLM, HfArgumentParser, PreTrainedTokenizerBase, TensorType, TrainingArguments, is_tensorboard_available, set_seed, ) def compute_metrics(logits, labels, weights, label_smoothing=0.0): """Compute summary metrics.""" loss, normalizer = cross_entropy(logits, labels, weights, label_smoothing) acc, _ = accuracy(logits, labels, weights) metrics = {"loss": loss, "accuracy": acc, "normalizer": normalizer} metrics = jax.lax.psum(metrics, axis_name="batch") return metrics The provided code snippet includes necessary dependencies for implementing the `eval_step` function. Write a Python function `def eval_step(params, batch)` to solve the following problem: Calculate evaluation metrics on a batch. Here is the function: def eval_step(params, batch): """ Calculate evaluation metrics on a batch. """ targets = batch.pop("labels") # Hide away tokens which doesn't participate in the optimization token_mask = jnp.where(targets > 0, 1.0, 0.0) logits = model(**batch, params=params, train=False)[0] return compute_metrics(logits, targets, token_mask)
Calculate evaluation metrics on a batch.
12,070
import logging import os import sys from dataclasses import dataclass, field from pathlib import Path from typing import Dict, List, Optional, Tuple import numpy as np from datasets import load_dataset from tqdm import tqdm import jax import jax.numpy as jnp from flax import jax_utils from flax.optim import Adam from flax.training import common_utils from flax.training.common_utils import get_metrics from jax.nn import log_softmax from modeling_flax_performer import FlaxPerformerForMaskedLM from transformers import ( MODEL_FOR_MASKED_LM_MAPPING, AutoTokenizer, BertConfig, FlaxBertForMaskedLM, HfArgumentParser, PreTrainedTokenizerBase, TensorType, TrainingArguments, is_tensorboard_available, set_seed, ) def generate_batch_splits(samples_idx: np.ndarray, batch_size: int) -> np.ndarray: nb_samples = len(samples_idx) samples_to_remove = nb_samples % batch_size if samples_to_remove != 0: samples_idx = samples_idx[:-samples_to_remove] sections_split = nb_samples // batch_size batch_idx = np.split(samples_idx, sections_split) return batch_idx
null
12,071
import logging import os import sys from dataclasses import dataclass, field from pathlib import Path from typing import Dict, List, Optional, Tuple import numpy as np from datasets import load_dataset from tqdm import tqdm import jax import jax.numpy as jnp from flax import jax_utils from flax.optim import Adam from flax.training import common_utils from flax.training.common_utils import get_metrics from jax.nn import log_softmax from modeling_flax_performer import FlaxPerformerForMaskedLM from transformers import ( MODEL_FOR_MASKED_LM_MAPPING, AutoTokenizer, BertConfig, FlaxBertForMaskedLM, HfArgumentParser, PreTrainedTokenizerBase, TensorType, TrainingArguments, is_tensorboard_available, set_seed, ) def tokenize_function(examples): # Remove empty lines examples = [line for line in examples if len(line) > 0 and not line.isspace()] return tokenizer( examples, return_special_tokens_mask=True, padding=padding, truncation=True, max_length=data_args.max_seq_length, )
null
12,072
import argparse import json from operator import add from typing import List, Optional, Tuple, Union import numpy as np import torch from torch import nn from tqdm import trange from pplm_classification_head import ClassificationHead from transformers import GPT2LMHeadModel, GPT2Tokenizer from transformers.file_utils import cached_path DISCRIMINATOR_MODELS_PARAMS = { "clickbait": { "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/clickbait_classifier_head.pt", "class_size": 2, "embed_size": 1024, "class_vocab": {"non_clickbait": 0, "clickbait": 1}, "default_class": 1, "pretrained_model": "gpt2-medium", }, "sentiment": { "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/SST_classifier_head.pt", "class_size": 5, "embed_size": 1024, "class_vocab": {"very_positive": 2, "very_negative": 3}, "default_class": 3, "pretrained_model": "gpt2-medium", }, } def get_bag_of_words_indices(bag_of_words_ids_or_paths: List[str], tokenizer) -> List[List[List[int]]]: bow_indices = [] for id_or_path in bag_of_words_ids_or_paths: if id_or_path in BAG_OF_WORDS_ARCHIVE_MAP: filepath = cached_path(BAG_OF_WORDS_ARCHIVE_MAP[id_or_path]) else: filepath = id_or_path with open(filepath, "r") as f: words = f.read().strip().split("\n") bow_indices.append([tokenizer.encode(word.strip(), add_prefix_space=True) for word in words]) return bow_indices def full_text_generation( model, tokenizer, context=None, num_samples=1, device="cuda", bag_of_words=None, discrim=None, class_label=None, length=100, stepsize=0.02, temperature=1.0, top_k=10, sample=False, num_iterations=3, grad_length=10000, horizon_length=1, window_length=0, decay=False, gamma=1.5, gm_scale=0.9, kl_scale=0.01, repetition_penalty=1.0, **kwargs ): classifier, class_id = get_classifier(discrim, class_label, device) bow_indices = [] if bag_of_words: bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), tokenizer) if bag_of_words and classifier: print("Both PPLM-BoW and PPLM-Discrim are on. This is not optimized.") loss_type = PPLM_BOW_DISCRIM elif bag_of_words: loss_type = PPLM_BOW print("Using PPLM-BoW") elif classifier is not None: loss_type = PPLM_DISCRIM print("Using PPLM-Discrim") else: raise Exception("Specify either a bag of words or a discriminator") unpert_gen_tok_text, _, _ = generate_text_pplm( model=model, tokenizer=tokenizer, context=context, device=device, length=length, sample=sample, perturb=False, repetition_penalty=repetition_penalty, ) if device == "cuda": torch.cuda.empty_cache() pert_gen_tok_texts = [] discrim_losses = [] losses_in_time = [] for i in range(num_samples): pert_gen_tok_text, discrim_loss, loss_in_time = generate_text_pplm( model=model, tokenizer=tokenizer, context=context, device=device, perturb=True, bow_indices=bow_indices, classifier=classifier, class_label=class_id, loss_type=loss_type, length=length, stepsize=stepsize, temperature=temperature, top_k=top_k, sample=sample, num_iterations=num_iterations, grad_length=grad_length, horizon_length=horizon_length, window_length=window_length, decay=decay, gamma=gamma, gm_scale=gm_scale, kl_scale=kl_scale, repetition_penalty=repetition_penalty, ) pert_gen_tok_texts.append(pert_gen_tok_text) if classifier is not None: discrim_losses.append(discrim_loss.data.cpu().numpy()) losses_in_time.append(loss_in_time) if device == "cuda": torch.cuda.empty_cache() return unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time def set_generic_model_params(discrim_weights, discrim_meta): if discrim_weights is None: raise ValueError("When using a generic discriminator, discrim_weights need to be specified") if discrim_meta is None: raise ValueError("When using a generic discriminator, discrim_meta need to be specified") with open(discrim_meta, "r") as discrim_meta_file: meta = json.load(discrim_meta_file) meta["path"] = discrim_weights DISCRIMINATOR_MODELS_PARAMS["generic"] = meta def run_pplm_example( pretrained_model="gpt2-medium", cond_text="", uncond=False, num_samples=1, bag_of_words=None, discrim=None, discrim_weights=None, discrim_meta=None, class_label=-1, length=100, stepsize=0.02, temperature=1.0, top_k=10, sample=False, num_iterations=3, grad_length=10000, horizon_length=1, window_length=0, decay=False, gamma=1.5, gm_scale=0.9, kl_scale=0.01, seed=0, no_cuda=False, colorama=False, repetition_penalty=1.0, ): # set Random seed torch.manual_seed(seed) np.random.seed(seed) # set the device device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu" if discrim == "generic": set_generic_model_params(discrim_weights, discrim_meta) if discrim is not None: pretrained_model = DISCRIMINATOR_MODELS_PARAMS[discrim]["pretrained_model"] print("discrim = {}, pretrained_model set to discriminator's = {}".format(discrim, pretrained_model)) # load pretrained model model = GPT2LMHeadModel.from_pretrained(pretrained_model, output_hidden_states=True) model.to(device) model.eval() # load tokenizer tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model) # Freeze GPT-2 weights for param in model.parameters(): param.requires_grad = False # figure out conditioning text if uncond: tokenized_cond_text = tokenizer.encode([tokenizer.bos_token]) else: raw_text = cond_text while not raw_text: print("Did you forget to add `--cond_text`? ") raw_text = input("Model prompt >>> ") tokenized_cond_text = tokenizer.encode(tokenizer.bos_token + raw_text) print("= Prefix of sentence =") print(tokenizer.decode(tokenized_cond_text)) print() # generate unperturbed and perturbed texts # full_text_generation returns: # unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time unpert_gen_tok_text, pert_gen_tok_texts, _, _ = full_text_generation( model=model, tokenizer=tokenizer, context=tokenized_cond_text, device=device, num_samples=num_samples, bag_of_words=bag_of_words, discrim=discrim, class_label=class_label, length=length, stepsize=stepsize, temperature=temperature, top_k=top_k, sample=sample, num_iterations=num_iterations, grad_length=grad_length, horizon_length=horizon_length, window_length=window_length, decay=decay, gamma=gamma, gm_scale=gm_scale, kl_scale=kl_scale, repetition_penalty=repetition_penalty, ) # untokenize unperturbed text unpert_gen_text = tokenizer.decode(unpert_gen_tok_text.tolist()[0]) print("=" * 80) print("= Unperturbed generated text =") print(unpert_gen_text) print() generated_texts = [] bow_word_ids = set() if bag_of_words and colorama: bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), tokenizer) for single_bow_list in bow_indices: # filtering all words in the list composed of more than 1 token filtered = list(filter(lambda x: len(x) <= 1, single_bow_list)) # w[0] because we are sure w has only 1 item because previous fitler bow_word_ids.update(w[0] for w in filtered) # iterate through the perturbed texts for i, pert_gen_tok_text in enumerate(pert_gen_tok_texts): try: # untokenize unperturbed text if colorama: import colorama pert_gen_text = "" for word_id in pert_gen_tok_text.tolist()[0]: if word_id in bow_word_ids: pert_gen_text += "{}{}{}".format( colorama.Fore.RED, tokenizer.decode([word_id]), colorama.Style.RESET_ALL, ) else: pert_gen_text += tokenizer.decode([word_id]) else: pert_gen_text = tokenizer.decode(pert_gen_tok_text.tolist()[0]) print("= Perturbed generated text {} =".format(i + 1)) print(pert_gen_text) print() except Exception as exc: print("Ignoring error while generating perturbed text:", exc) # keep the prefix, perturbed seq, original seq for each index generated_texts.append((tokenized_cond_text, pert_gen_tok_text, unpert_gen_tok_text)) return
null
12,073
import argparse import csv import json import math import time import numpy as np import torch import torch.optim as optim import torch.utils.data as data from nltk.tokenize.treebank import TreebankWordDetokenizer from torch import nn from torchtext import data as torchtext_data from torchtext import datasets from tqdm import tqdm, trange from pplm_classification_head import ClassificationHead from transformers import GPT2LMHeadModel, GPT2Tokenizer torch.manual_seed(0) np.random.seed(0) example_sentence = "This is incredible! I love it, this is the best chicken I have ever had." max_length_seq = 100 class Discriminator(nn.Module): """Transformer encoder followed by a Classification Head""" def __init__(self, class_size, pretrained_model="gpt2-medium", cached_mode=False, device="cpu"): super().__init__() self.tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model) self.encoder = GPT2LMHeadModel.from_pretrained(pretrained_model) self.embed_size = self.encoder.transformer.config.hidden_size self.classifier_head = ClassificationHead(class_size=class_size, embed_size=self.embed_size) self.cached_mode = cached_mode self.device = device def get_classifier(self): return self.classifier_head def train_custom(self): for param in self.encoder.parameters(): param.requires_grad = False self.classifier_head.train() def avg_representation(self, x): mask = x.ne(0).unsqueeze(2).repeat(1, 1, self.embed_size).float().to(self.device).detach() hidden = self.encoder.transformer(x)["last_hidden_state"] masked_hidden = hidden * mask avg_hidden = torch.sum(masked_hidden, dim=1) / (torch.sum(mask, dim=1).detach() + EPSILON) return avg_hidden def forward(self, x): if self.cached_mode: avg_hidden = x.to(self.device) else: avg_hidden = self.avg_representation(x.to(self.device)) logits = self.classifier_head(avg_hidden) probs = nn.functional.log_softmax(logits, dim=-1) return probs class Dataset(data.Dataset): def __init__(self, X, y): """Reads source and target sequences from txt files.""" self.X = X self.y = y def __len__(self): return len(self.X) def __getitem__(self, index): """Returns one data pair (source and target).""" data = {} data["X"] = self.X[index] data["y"] = self.y[index] return data def collate_fn(data): def pad_sequences(sequences): lengths = [len(seq) for seq in sequences] padded_sequences = torch.zeros(len(sequences), max(lengths)).long() # padding value = 0 for i, seq in enumerate(sequences): end = lengths[i] padded_sequences[i, :end] = seq[:end] return padded_sequences, lengths item_info = {} for key in data[0].keys(): item_info[key] = [d[key] for d in data] x_batch, _ = pad_sequences(item_info["X"]) y_batch = torch.tensor(item_info["y"], dtype=torch.long) return x_batch, y_batch def train_epoch(data_loader, discriminator, optimizer, epoch=0, log_interval=10, device="cpu"): samples_so_far = 0 discriminator.train_custom() for batch_idx, (input_t, target_t) in enumerate(data_loader): input_t, target_t = input_t.to(device), target_t.to(device) optimizer.zero_grad() output_t = discriminator(input_t) loss = nn.functional.nll_loss(output_t, target_t) loss.backward(retain_graph=True) optimizer.step() samples_so_far += len(input_t) if batch_idx % log_interval == 0: print( "Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format( epoch + 1, samples_so_far, len(data_loader.dataset), 100 * samples_so_far / len(data_loader.dataset), loss.item(), ) ) def evaluate_performance(data_loader, discriminator, device="cpu"): discriminator.eval() test_loss = 0 correct = 0 with torch.no_grad(): for input_t, target_t in data_loader: input_t, target_t = input_t.to(device), target_t.to(device) output_t = discriminator(input_t) # sum up batch loss test_loss += nn.functional.nll_loss(output_t, target_t, reduction="sum").item() # get the index of the max log-probability pred_t = output_t.argmax(dim=1, keepdim=True) correct += pred_t.eq(target_t.view_as(pred_t)).sum().item() test_loss /= len(data_loader.dataset) print( "Performance on test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)".format( test_loss, correct, len(data_loader.dataset), 100.0 * correct / len(data_loader.dataset) ) ) def predict(input_sentence, model, classes, cached=False, device="cpu"): input_t = model.tokenizer.encode(input_sentence) input_t = torch.tensor([input_t], dtype=torch.long, device=device) if cached: input_t = model.avg_representation(input_t) log_probs = model(input_t).data.cpu().numpy().flatten().tolist() print("Input sentence:", input_sentence) print( "Predictions:", ", ".join("{}: {:.4f}".format(c, math.exp(log_prob)) for c, log_prob in zip(classes, log_probs)), ) def get_cached_data_loader(dataset, batch_size, discriminator, shuffle=False, device="cpu"): data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, collate_fn=collate_fn) xs = [] ys = [] for batch_idx, (x, y) in enumerate(tqdm(data_loader, ascii=True)): with torch.no_grad(): x = x.to(device) avg_rep = discriminator.avg_representation(x).cpu().detach() avg_rep_list = torch.unbind(avg_rep.unsqueeze(1)) xs += avg_rep_list ys += y.cpu().numpy().tolist() data_loader = torch.utils.data.DataLoader( dataset=Dataset(xs, ys), batch_size=batch_size, shuffle=shuffle, collate_fn=cached_collate_fn ) return data_loader def train_discriminator( dataset, dataset_fp=None, pretrained_model="gpt2-medium", epochs=10, batch_size=64, log_interval=10, save_model=False, cached=False, no_cuda=False, ): device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu" print("Preprocessing {} dataset...".format(dataset)) start = time.time() if dataset == "SST": idx2class = ["positive", "negative", "very positive", "very negative", "neutral"] class2idx = {c: i for i, c in enumerate(idx2class)} discriminator = Discriminator( class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device ).to(device) text = torchtext_data.Field() label = torchtext_data.Field(sequential=False) train_data, val_data, test_data = datasets.SST.splits( text, label, fine_grained=True, train_subtrees=True, ) x = [] y = [] for i in trange(len(train_data), ascii=True): seq = TreebankWordDetokenizer().detokenize(vars(train_data[i])["text"]) seq = discriminator.tokenizer.encode(seq) seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) x.append(seq) y.append(class2idx[vars(train_data[i])["label"]]) train_dataset = Dataset(x, y) test_x = [] test_y = [] for i in trange(len(test_data), ascii=True): seq = TreebankWordDetokenizer().detokenize(vars(test_data[i])["text"]) seq = discriminator.tokenizer.encode(seq) seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) test_x.append(seq) test_y.append(class2idx[vars(test_data[i])["label"]]) test_dataset = Dataset(test_x, test_y) discriminator_meta = { "class_size": len(idx2class), "embed_size": discriminator.embed_size, "pretrained_model": pretrained_model, "class_vocab": class2idx, "default_class": 2, } elif dataset == "clickbait": idx2class = ["non_clickbait", "clickbait"] class2idx = {c: i for i, c in enumerate(idx2class)} discriminator = Discriminator( class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device ).to(device) with open("datasets/clickbait/clickbait_train_prefix.txt") as f: data = [] for i, line in enumerate(f): try: data.append(eval(line)) except Exception: print("Error evaluating line {}: {}".format(i, line)) continue x = [] y = [] with open("datasets/clickbait/clickbait_train_prefix.txt") as f: for i, line in enumerate(tqdm(f, ascii=True)): try: d = eval(line) seq = discriminator.tokenizer.encode(d["text"]) if len(seq) < max_length_seq: seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) else: print("Line {} is longer than maximum length {}".format(i, max_length_seq)) continue x.append(seq) y.append(d["label"]) except Exception: print("Error evaluating / tokenizing line {}, skipping it".format(i)) pass full_dataset = Dataset(x, y) train_size = int(0.9 * len(full_dataset)) test_size = len(full_dataset) - train_size train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size]) discriminator_meta = { "class_size": len(idx2class), "embed_size": discriminator.embed_size, "pretrained_model": pretrained_model, "class_vocab": class2idx, "default_class": 1, } elif dataset == "toxic": idx2class = ["non_toxic", "toxic"] class2idx = {c: i for i, c in enumerate(idx2class)} discriminator = Discriminator( class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device ).to(device) x = [] y = [] with open("datasets/toxic/toxic_train.txt") as f: for i, line in enumerate(tqdm(f, ascii=True)): try: d = eval(line) seq = discriminator.tokenizer.encode(d["text"]) if len(seq) < max_length_seq: seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) else: print("Line {} is longer than maximum length {}".format(i, max_length_seq)) continue x.append(seq) y.append(int(np.sum(d["label"]) > 0)) except Exception: print("Error evaluating / tokenizing line {}, skipping it".format(i)) pass full_dataset = Dataset(x, y) train_size = int(0.9 * len(full_dataset)) test_size = len(full_dataset) - train_size train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size]) discriminator_meta = { "class_size": len(idx2class), "embed_size": discriminator.embed_size, "pretrained_model": pretrained_model, "class_vocab": class2idx, "default_class": 0, } else: # if dataset == "generic": # This assumes the input dataset is a TSV with the following structure: # class \t text if dataset_fp is None: raise ValueError("When generic dataset is selected, dataset_fp needs to be specified aswell.") classes = set() with open(dataset_fp) as f: csv_reader = csv.reader(f, delimiter="\t") for row in tqdm(csv_reader, ascii=True): if row: classes.add(row[0]) idx2class = sorted(classes) class2idx = {c: i for i, c in enumerate(idx2class)} discriminator = Discriminator( class_size=len(idx2class), pretrained_model=pretrained_model, cached_mode=cached, device=device ).to(device) x = [] y = [] with open(dataset_fp) as f: csv_reader = csv.reader(f, delimiter="\t") for i, row in enumerate(tqdm(csv_reader, ascii=True)): if row: label = row[0] text = row[1] try: seq = discriminator.tokenizer.encode(text) if len(seq) < max_length_seq: seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) else: print("Line {} is longer than maximum length {}".format(i, max_length_seq)) continue x.append(seq) y.append(class2idx[label]) except Exception: print("Error tokenizing line {}, skipping it".format(i)) pass full_dataset = Dataset(x, y) train_size = int(0.9 * len(full_dataset)) test_size = len(full_dataset) - train_size train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size]) discriminator_meta = { "class_size": len(idx2class), "embed_size": discriminator.embed_size, "pretrained_model": pretrained_model, "class_vocab": class2idx, "default_class": 0, } end = time.time() print("Preprocessed {} data points".format(len(train_dataset) + len(test_dataset))) print("Data preprocessing took: {:.3f}s".format(end - start)) if cached: print("Building representation cache...") start = time.time() train_loader = get_cached_data_loader(train_dataset, batch_size, discriminator, shuffle=True, device=device) test_loader = get_cached_data_loader(test_dataset, batch_size, discriminator, device=device) end = time.time() print("Building representation cache took: {:.3f}s".format(end - start)) else: train_loader = torch.utils.data.DataLoader( dataset=train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn ) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, collate_fn=collate_fn) if save_model: with open("{}_classifier_head_meta.json".format(dataset), "w") as meta_file: json.dump(discriminator_meta, meta_file) optimizer = optim.Adam(discriminator.parameters(), lr=0.0001) for epoch in range(epochs): start = time.time() print("\nEpoch", epoch + 1) train_epoch( discriminator=discriminator, data_loader=train_loader, optimizer=optimizer, epoch=epoch, log_interval=log_interval, device=device, ) evaluate_performance(data_loader=test_loader, discriminator=discriminator, device=device) end = time.time() print("Epoch took: {:.3f}s".format(end - start)) print("\nExample prediction") predict(example_sentence, discriminator, idx2class, cached=cached, device=device) if save_model: # torch.save(discriminator.state_dict(), # "{}_discriminator_{}.pt".format( # args.dataset, epoch + 1 # )) torch.save( discriminator.get_classifier().state_dict(), "{}_classifier_head_epoch_{}.pt".format(dataset, epoch + 1), )
null
12,078
import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import sha256 from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import numpy as np from PIL import Image from tqdm.auto import tqdm import cv2 import requests import wget from filelock import FileLock from yaml import Loader, dump, load TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) def is_remote_url(url_or_filename): def get_from_cache( url, cache_dir=None, force_download=False, proxies=None, etag_timeout=10, resume_download=False, user_agent=None, local_files_only=False, ): def cached_path( url_or_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, user_agent=None, extract_compressed_file=False, force_extract=False, local_files_only=False, ): if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if is_remote_url(url_or_filename): # URL, so get it from the cache (downloading if necessary) output_path = get_from_cache( url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, user_agent=user_agent, local_files_only=local_files_only, ) elif os.path.exists(url_or_filename): # File, and it exists. output_path = url_or_filename elif urlparse(url_or_filename).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(url_or_filename)) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) if extract_compressed_file: if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" output_dir, output_file = os.path.split(output_path) output_extract_dir_name = output_file.replace(".", "-") + "-extracted" output_path_extracted = os.path.join(output_dir, output_extract_dir_name) if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract: return output_path_extracted # Prevent parallel extractions lock_path = output_path + ".lock" with FileLock(lock_path): shutil.rmtree(output_path_extracted, ignore_errors=True) os.makedirs(output_path_extracted) if is_zipfile(output_path): with ZipFile(output_path, "r") as zip_file: zip_file.extractall(output_path_extracted) zip_file.close() elif tarfile.is_tarfile(output_path): tar_file = tarfile.open(output_path) tar_file.extractall(output_path_extracted) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(output_path)) return output_path_extracted return output_path
null
12,088
import itertools import math import os from abc import ABCMeta, abstractmethod from collections import OrderedDict, namedtuple from typing import Dict, List, Tuple import numpy as np import torch from torch import nn from torch.nn.modules.batchnorm import BatchNorm2d from torchvision.ops import RoIPool from torchvision.ops.boxes import batched_nms, nms from utils import WEIGHTS_NAME, Config, cached_path, hf_bucket_url, is_remote_url, load_checkpoint class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])): def __new__(cls, *, channels=None, height=None, width=None, stride=None): class BasicStem(nn.Module): def __init__(self, in_channels=3, out_channels=64, norm="BN", caffe_maxpool=False): def forward(self, x): def out_channels(self): def stride(self): class BottleneckBlock(ResNetBlockBase): def __init__( self, in_channels, out_channels, bottleneck_channels, stride=1, num_groups=1, norm="BN", stride_in_1x1=False, dilation=1, ): def forward(self, x): class ResNet(Backbone): def __init__(self, stem, stages, num_classes=None, out_features=None): def forward(self, x): def output_shape(self): def make_stage( block_class, num_blocks, first_stride=None, *, in_channels, out_channels, **kwargs, ): def build_backbone(cfg): input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) norm = cfg.RESNETS.NORM stem = BasicStem( in_channels=input_shape.channels, out_channels=cfg.RESNETS.STEM_OUT_CHANNELS, norm=norm, caffe_maxpool=cfg.MODEL.MAX_POOL, ) freeze_at = cfg.BACKBONE.FREEZE_AT if freeze_at >= 1: for p in stem.parameters(): p.requires_grad = False out_features = cfg.RESNETS.OUT_FEATURES depth = cfg.RESNETS.DEPTH num_groups = cfg.RESNETS.NUM_GROUPS width_per_group = cfg.RESNETS.WIDTH_PER_GROUP bottleneck_channels = num_groups * width_per_group in_channels = cfg.RESNETS.STEM_OUT_CHANNELS out_channels = cfg.RESNETS.RES2_OUT_CHANNELS stride_in_1x1 = cfg.RESNETS.STRIDE_IN_1X1 res5_dilation = cfg.RESNETS.RES5_DILATION assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth] stages = [] out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features] max_stage_idx = max(out_stage_idx) for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): dilation = res5_dilation if stage_idx == 5 else 1 first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 stage_kargs = { "num_blocks": num_blocks_per_stage[idx], "first_stride": first_stride, "in_channels": in_channels, "bottleneck_channels": bottleneck_channels, "out_channels": out_channels, "num_groups": num_groups, "norm": norm, "stride_in_1x1": stride_in_1x1, "dilation": dilation, } stage_kargs["block_class"] = BottleneckBlock blocks = ResNet.make_stage(**stage_kargs) in_channels = out_channels out_channels *= 2 bottleneck_channels *= 2 if freeze_at >= stage_idx: for block in blocks: block.freeze() stages.append(blocks) return ResNet(stem, stages, out_features=out_features)
null
12,093
import itertools import math import os from abc import ABCMeta, abstractmethod from collections import OrderedDict, namedtuple from typing import Dict, List, Tuple import numpy as np import torch from torch import nn from torch.nn.modules.batchnorm import BatchNorm2d from torchvision.ops import RoIPool from torchvision.ops.boxes import batched_nms, nms from utils import WEIGHTS_NAME, Config, cached_path, hf_bucket_url, is_remote_url, load_checkpoint def _fmt_box_list(box_tensor, batch_index: int): repeated_index = torch.full( (len(box_tensor), 1), batch_index, dtype=box_tensor.dtype, device=box_tensor.device, ) return torch.cat((repeated_index, box_tensor), dim=1) def convert_boxes_to_pooler_format(box_lists: List[torch.Tensor]): pooler_fmt_boxes = torch.cat( [_fmt_box_list(box_list, i) for i, box_list in enumerate(box_lists)], dim=0, ) return pooler_fmt_boxes
null
12,098
import os import sys from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) The provided code snippet includes necessary dependencies for implementing the `config` function. Write a Python function `def config(*args, **kwargs)` to solve the following problem: r""" # Using torch.hub ! import torch config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased') # Download configuration from huggingface.co and cache. config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')` config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/my_configuration.json') config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attentions=True, foo=False) assert config.output_attentions == True config, unused_kwargs = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attentions=True, foo=False, return_unused_kwargs=True) assert config.output_attentions == True assert unused_kwargs == {'foo': False} Here is the function: def config(*args, **kwargs): r""" # Using torch.hub ! import torch config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased') # Download configuration from huggingface.co and cache. config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')` config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/my_configuration.json') config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attentions=True, foo=False) assert config.output_attentions == True config, unused_kwargs = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attentions=True, foo=False, return_unused_kwargs=True) assert config.output_attentions == True assert unused_kwargs == {'foo': False} """ return AutoConfig.from_pretrained(*args, **kwargs)
r""" # Using torch.hub ! import torch config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased') # Download configuration from huggingface.co and cache. config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')` config = torch.hub.load('huggingface/transformers', 'config', './test/bert_saved_model/my_configuration.json') config = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attentions=True, foo=False) assert config.output_attentions == True config, unused_kwargs = torch.hub.load('huggingface/transformers', 'config', 'bert-base-uncased', output_attentions=True, foo=False, return_unused_kwargs=True) assert config.output_attentions == True assert unused_kwargs == {'foo': False}
12,099
import os import sys from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) The provided code snippet includes necessary dependencies for implementing the `tokenizer` function. Write a Python function `def tokenizer(*args, **kwargs)` to solve the following problem: r""" # Using torch.hub ! import torch tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', 'bert-base-uncased') # Download vocabulary from huggingface.co and cache. tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', './test/bert_saved_model/') # E.g. tokenizer was saved using `save_pretrained('./test/saved_model/')` Here is the function: def tokenizer(*args, **kwargs): r""" # Using torch.hub ! import torch tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', 'bert-base-uncased') # Download vocabulary from huggingface.co and cache. tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', './test/bert_saved_model/') # E.g. tokenizer was saved using `save_pretrained('./test/saved_model/')` """ return AutoTokenizer.from_pretrained(*args, **kwargs)
r""" # Using torch.hub ! import torch tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', 'bert-base-uncased') # Download vocabulary from huggingface.co and cache. tokenizer = torch.hub.load('huggingface/transformers', 'tokenizer', './test/bert_saved_model/') # E.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`
12,100
import os import sys from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) The provided code snippet includes necessary dependencies for implementing the `model` function. Write a Python function `def model(*args, **kwargs)` to solve the following problem: r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache. model = torch.hub.load('huggingface/transformers', 'model', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased', output_attentions=True) # Update configuration during loading assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'model', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) Here is the function: def model(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache. model = torch.hub.load('huggingface/transformers', 'model', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased', output_attentions=True) # Update configuration during loading assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'model', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModel.from_pretrained(*args, **kwargs)
r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache. model = torch.hub.load('huggingface/transformers', 'model', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased', output_attentions=True) # Update configuration during loading assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'model', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
12,101
import os import sys from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) The provided code snippet includes necessary dependencies for implementing the `modelForCausalLM` function. Write a Python function `def modelForCausalLM(*args, **kwargs)` to solve the following problem: r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', 'gpt2') # Download model and configuration from huggingface.co and cache. model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', './test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', 'gpt2', output_attentions=True) # Update configuration during loading assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/gpt_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', './tf_model/gpt_tf_checkpoint.ckpt.index', from_tf=True, config=config) Here is the function: def modelForCausalLM(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', 'gpt2') # Download model and configuration from huggingface.co and cache. model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', './test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', 'gpt2', output_attentions=True) # Update configuration during loading assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/gpt_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', './tf_model/gpt_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModelForCausalLM.from_pretrained(*args, **kwargs)
r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', 'gpt2') # Download model and configuration from huggingface.co and cache. model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', './test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', 'gpt2', output_attentions=True) # Update configuration during loading assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/gpt_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', './tf_model/gpt_tf_checkpoint.ckpt.index', from_tf=True, config=config)
12,102
import os import sys from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) The provided code snippet includes necessary dependencies for implementing the `modelForMaskedLM` function. Write a Python function `def modelForMaskedLM(*args, **kwargs)` to solve the following problem: r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache. model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', 'bert-base-uncased', output_attentions=True) # Update configuration during loading assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) Here is the function: def modelForMaskedLM(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache. model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', 'bert-base-uncased', output_attentions=True) # Update configuration during loading assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModelForMaskedLM.from_pretrained(*args, **kwargs)
r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache. model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', 'bert-base-uncased', output_attentions=True) # Update configuration during loading assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForMaskedLM', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
12,103
import os import sys from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) The provided code snippet includes necessary dependencies for implementing the `modelForSequenceClassification` function. Write a Python function `def modelForSequenceClassification(*args, **kwargs)` to solve the following problem: r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache. model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased', output_attentions=True) # Update configuration during loading assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) Here is the function: def modelForSequenceClassification(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache. model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased', output_attentions=True) # Update configuration during loading assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModelForSequenceClassification.from_pretrained(*args, **kwargs)
r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache. model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased', output_attentions=True) # Update configuration during loading assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
12,104
import os import sys from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) The provided code snippet includes necessary dependencies for implementing the `modelForQuestionAnswering` function. Write a Python function `def modelForQuestionAnswering(*args, **kwargs)` to solve the following problem: r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache. model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased', output_attentions=True) # Update configuration during loading assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) Here is the function: def modelForQuestionAnswering(*args, **kwargs): r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache. model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased', output_attentions=True) # Update configuration during loading assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ return AutoModelForQuestionAnswering.from_pretrained(*args, **kwargs)
r""" # Using torch.hub ! import torch model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased') # Download model and configuration from huggingface.co and cache. model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased', output_attentions=True) # Update configuration during loading assert model.config.output_attentions == True # Loading from a TF checkpoint file instead of a PyTorch model (slower) config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json') model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
12,105
import os from pathlib import Path def write_model_card(model_card_dir, src_lang, tgt_lang, model_name): texts = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] scores = { "wmt19-de-en-6-6-base": [0, 38.37], "wmt19-de-en-6-6-big": [0, 39.90], } pair = f"{src_lang}-{tgt_lang}" readme = f""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - allenai license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt19 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). 2 models are available: * [wmt19-de-en-6-6-big](https://huggingface.co/allenai/wmt19-de-en-6-6-big) * [wmt19-de-en-6-6-base](https://huggingface.co/allenai/wmt19-de-en-6-6-base) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | transformers -------|--------- {model_name} | {scores[model_name][1]} The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` """ model_card_dir.mkdir(parents=True, exist_ok=True) path = os.path.join(model_card_dir, "README.md") print(f"Generating {path}") with open(path, "w", encoding="utf-8") as f: f.write(readme)
null
12,106
import os from pathlib import Path def write_model_card(model_card_dir, src_lang, tgt_lang, model_name): texts = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] scores = { "wmt16-en-de-dist-12-1": [28.3, 27.52], "wmt16-en-de-dist-6-1": [27.4, 27.11], "wmt16-en-de-12-1": [26.9, 25.75], } pair = f"{src_lang}-{tgt_lang}" readme = f""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` """ model_card_dir.mkdir(parents=True, exist_ok=True) path = os.path.join(model_card_dir, "README.md") print(f"Generating {path}") with open(path, "w", encoding="utf-8") as f: f.write(readme)
null
12,107
import os from pathlib import Path def write_model_card(model_card_dir, src_lang, tgt_lang): texts = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] scores = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } pair = f"{src_lang}-{tgt_lang}" readme = f""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR's WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) """ os.makedirs(model_card_dir, exist_ok=True) path = os.path.join(model_card_dir, "README.md") print(f"Generating {path}") with open(path, "w", encoding="utf-8") as f: f.write(readme)
null
12,108
import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers def get_base_command(args, output_dir): # unwrap multi-line input args.base_cmd = re.sub(r"[\\\n]+", " ", args.base_cmd) # remove --output_dir if any and set our own args.base_cmd = re.sub("--output_dir\s+[^\s]+", "", args.base_cmd) args.base_cmd += f" --output_dir {output_dir}" # ensure we have --overwrite_output_dir args.base_cmd = re.sub("--overwrite_output_dir\s+", "", args.base_cmd) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd)
null
12,109
import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers nan = float("nan") def process_run_single(id, cmd, variation, output_dir, target_metric_key, metric_keys, verbose): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0) return dict( {k: random.uniform(0, 100) for k in metric_keys}, **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222])}, ) result = subprocess.run(cmd, capture_output=True, text=True) if verbose: print("STDOUT", result.stdout) print("STDERR", result.stderr) # save the streams prefix = variation.replace(" ", "-") with open(Path(output_dir) / f"log.{prefix}.stdout.txt", "w") as f: f.write(result.stdout) with open(Path(output_dir) / f"log.{prefix}.stderr.txt", "w") as f: f.write(result.stderr) if result.returncode != 0: if verbose: print("failed") return {target_metric_key: nan} with io.open(f"{output_dir}/all_results.json", "r", encoding="utf-8") as f: metrics = json.load(f) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def process_run( id, cmd, variation_key, variation, longest_variation_len, target_metric_key, report_metric_keys, repeat_times, output_dir, verbose, ): results = [] metrics = [] preamble = f"{id}: {variation:<{longest_variation_len}}" outcome = f"{preamble}: " metric_keys = set(report_metric_keys + [target_metric_key]) for i in tqdm(range(repeat_times), desc=preamble, leave=False): single_run_metrics = process_run_single( id, cmd, variation, output_dir, target_metric_key, metric_keys, verbose ) result = single_run_metrics[target_metric_key] if not math.isnan(result): metrics.append(single_run_metrics) results.append(result) outcome += "✓" else: outcome += "✘" outcome = f"\33[2K\r{outcome}" if len(metrics) > 0: mean_metrics = {k: fmean([x[k] for x in metrics]) for k in metrics[0].keys()} mean_target = round(mean_metrics[target_metric_key], 2) results_str = f"{outcome} {mean_target}" if len(metrics) > 1: results_str += f" {tuple(round(x, 2) for x in results)}" print(results_str) mean_metrics[variation_key] = variation return mean_metrics else: print(outcome) return {variation_key: variation, target_metric_key: nan}
null
12,110
import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers nan = float("nan") def get_original_command(max_width=80, full_python_path=False): """ Return the original command line string that can be replayed nicely and wrapped for 80 char width. Args: max_width (`int`, `optional`, defaults to 80): The width to wrap for. full_python_path (`bool`, `optional`, defaults to `False`): Whether to replicate the full path or just the last segment (i.e. `python`). """ cmd = [] # deal with critical env vars env_keys = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: val = os.environ.get(key, None) if val is not None: cmd.append(f"{key}={val}") # python executable (not always needed if the script is executable) python = sys.executable if full_python_path else sys.executable.split("/")[-1] cmd.append(python) # now the normal args cmd += list(map(shlex.quote, sys.argv)) # split up into up to MAX_WIDTH lines with shell multi-line escapes lines = [] current_line = "" while len(cmd) > 0: current_line += f"{cmd.pop(0)} " if len(cmd) == 0 or len(current_line) + len(cmd[0]) + 1 > max_width - 1: lines.append(current_line) current_line = "" return "\\\n".join(lines) def get_versions(): properties = torch.cuda.get_device_properties(torch.device("cuda")) return f""" Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB """ def process_results(results, target_metric_key, report_metric_keys, base_variation, output_dir): df = pd.DataFrame(results) variation_key = "variation" diff_key = "diff_%" sentinel_value = nan if base_variation is not None and len(df[df[variation_key] == base_variation]): # this may still return nan sentinel_value = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(sentinel_value): # as a fallback, use the minimal value as the sentinel sentinel_value = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(sentinel_value): df[diff_key] = df.apply( lambda r: round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value) if not math.isnan(r[target_metric_key]) else 0, axis="columns", ) # re-order columns cols = [variation_key, target_metric_key, diff_key, *report_metric_keys] df = df.reindex(cols, axis="columns") # reorder cols # capitalize df = df.rename(str.capitalize, axis="columns") # make the cols as narrow as possible df_github = df.rename(lambda c: c.replace("_", "<br>"), axis="columns") df_console = df.rename(lambda c: c.replace("_", "\n"), axis="columns") report = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=False, floatfmt=".2f")] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=False, floatfmt=".2f")] print("\n\n".join(report))
null
12,111
from collections import Counter import datasets import transformers from transformers.convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from transformers.utils import logging dataset = datasets.load_dataset("xnli", split="test+validation") def test_string(slow, fast, text): global perfect global imperfect global wrong global total slow_ids = slow.encode(text) fast_ids = fast.encode(text) skip_assert = False total += 1 if slow_ids != fast_ids: if check_details(text, slow_ids, fast_ids, slow, fast): skip_assert = True imperfect += 1 else: wrong += 1 else: perfect += 1 if total % 10000 == 0: print(f"({perfect} / {imperfect} / {wrong} ----- {perfect + imperfect + wrong})") if skip_assert: return assert ( slow_ids == fast_ids ), f"line {text} : \n\n{slow_ids}\n{fast_ids}\n\n{slow.tokenize(text)}\n{fast.tokenize(text)}" def test_tokenizer(slow, fast): global batch_total for i in range(len(dataset)): # premise, all languages for text in dataset[i]["premise"].values(): test_string(slow, fast, text) # hypothesis, all languages for text in dataset[i]["hypothesis"]["translation"]: test_string(slow, fast, text)
null
12,112
import importlib import inspect import os import re CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = { "DecisionTransformerConfig", "EncoderDecoderConfig", "RagConfig", "SpeechEncoderDecoderConfig", "VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig", } def get_checkpoint_from_config_class(config_class): checkpoint = None # source code of `config_class` config_source = inspect.getsource(config_class) checkpoints = _re_checkpoint.findall(config_source) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` ckpt_name, ckpt_link = checkpoint # verify the checkpoint name corresponds to the checkpoint link ckpt_link_from_name = f"https://huggingface.co/{ckpt_name}" if ckpt_link == ckpt_link_from_name: checkpoint = ckpt_name break return checkpoint def check_config_docstrings_have_checkpoints(): configs_without_checkpoint = [] for config_class in list(CONFIG_MAPPING.values()): checkpoint = get_checkpoint_from_config_class(config_class) name = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(name) if len(configs_without_checkpoint) > 0: message = "\n".join(sorted(configs_without_checkpoint)) raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}")
null
12,113
import importlib import inspect import os import re import warnings from collections import OrderedDict from difflib import get_close_matches from pathlib import Path from transformers import is_flax_available, is_tf_available, is_torch_available from transformers.models.auto import get_values from transformers.utils import ENV_VARS_TRUE_VALUES def check_model_list(): """Check the model list inside the transformers library.""" # Get the models from the directory structure of `src/transformers/models/` models_dir = os.path.join(PATH_TO_TRANSFORMERS, "models") _models = [] for model in os.listdir(models_dir): model_dir = os.path.join(models_dir, model) if os.path.isdir(model_dir) and "__init__.py" in os.listdir(model_dir): _models.append(model) # Get the models from the directory structure of `src/transformers/models/` models = [model for model in dir(transformers.models) if not model.startswith("__")] missing_models = sorted(list(set(_models).difference(models))) if missing_models: raise Exception( f"The following models should be included in {models_dir}/__init__.py: {','.join(missing_models)}." ) def check_models_are_in_init(): """Checks all models defined in the library are in the main init.""" models_not_in_init = [] dir_transformers = dir(transformers) for module in get_model_modules(): models_not_in_init += [ model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers ] # Remove private models models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)] if len(models_not_in_init) > 0: raise Exception(f"The following models should be in the main init: {','.join(models_not_in_init)}.") def check_all_models_are_tested(): """Check all models are properly tested.""" modules = get_model_modules() test_files = get_model_test_files() failures = [] for module in modules: test_file = [file for file in test_files if f"test_{module.__name__.split('.')[-1]}.py" in file] if len(test_file) == 0: failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.") elif len(test_file) > 1: failures.append(f"{module.__name__} has several test files: {test_file}.") else: test_file = test_file[0] new_failures = check_models_are_tested(module, test_file) if new_failures is not None: failures += new_failures if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def check_all_models_are_auto_configured(): """Check all models are each in an auto class.""" missing_backends = [] if not is_torch_available(): missing_backends.append("PyTorch") if not is_tf_available(): missing_backends.append("TensorFlow") if not is_flax_available(): missing_backends.append("Flax") if len(missing_backends) > 0: missing = ", ".join(missing_backends) if os.getenv("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: raise Exception( "Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the " f"Transformers repo, the following are missing: {missing}." ) else: warnings.warn( "Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the " f"Transformers repo, the following are missing: {missing}. While it's probably fine as long as you " "didn't make any change in one of those backends modeling files, you should probably execute the " "command above to be on the safe side." ) modules = get_model_modules() all_auto_models = get_all_auto_configured_models() failures = [] for module in modules: new_failures = check_models_are_auto_configured(module, all_auto_models) if new_failures is not None: failures += new_failures if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) def check_all_decorator_order(): """Check that in all test files, the slow decorator is always last.""" errors = [] for fname in os.listdir(PATH_TO_TESTS): if fname.endswith(".py"): filename = os.path.join(PATH_TO_TESTS, fname) new_errors = check_decorator_order(filename) errors += [f"- {filename}, line {i}" for i in new_errors] if len(errors) > 0: msg = "\n".join(errors) raise ValueError( "The parameterized decorator (and its variants) should always be first, but this is not the case in the" f" following files:\n{msg}" ) def check_all_objects_are_documented(): """Check all models are properly documented.""" documented_objs = find_all_documented_objects() modules = transformers._modules objects = [c for c in dir(transformers) if c not in modules and not c.startswith("_")] undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)] if len(undocumented_objs) > 0: raise Exception( "The following objects are in the public init so should be documented:\n - " + "\n - ".join(undocumented_objs) ) check_docstrings_are_in_md() check_model_type_doc_match() The provided code snippet includes necessary dependencies for implementing the `check_repo_quality` function. Write a Python function `def check_repo_quality()` to solve the following problem: Check all models are properly tested and documented. Here is the function: def check_repo_quality(): """Check all models are properly tested and documented.""" print("Checking all models are included.") check_model_list() print("Checking all models are public.") check_models_are_in_init() print("Checking all models are properly tested.") check_all_decorator_order() check_all_models_are_tested() print("Checking all objects are properly documented.") check_all_objects_are_documented() print("Checking all models are in at least one auto class.") check_all_models_are_auto_configured()
Check all models are properly tested and documented.
12,114
import argparse import collections.abc import importlib import inspect import json import os import shutil import sys from pathlib import Path from datasets import load_dataset from check_config_docstrings import get_checkpoint_from_config_class from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoTokenizer, LayoutLMv3TokenizerFast, PreTrainedTokenizerFast, logging, ) from transformers.feature_extraction_utils import FeatureExtractionMixin from transformers.file_utils import is_tf_available, is_torch_available from transformers.models.auto.configuration_auto import AutoConfig, model_type_to_module_name from transformers.processing_utils import ProcessorMixin, transformers_module from transformers.tokenization_utils_base import PreTrainedTokenizerBase The provided code snippet includes necessary dependencies for implementing the `get_architectures_from_config_class` function. Write a Python function `def get_architectures_from_config_class(config_class, arch_mappings)` to solve the following problem: Return a tuple of all possible architectures attributed to a configuration class `config_class`. For example, BertConfig -> [BertModel, BertForMaskedLM, ..., BertForQuestionAnswering]. Here is the function: def get_architectures_from_config_class(config_class, arch_mappings): """Return a tuple of all possible architectures attributed to a configuration class `config_class`. For example, BertConfig -> [BertModel, BertForMaskedLM, ..., BertForQuestionAnswering]. """ # A model architecture could appear in several mappings. For example, `BartForConditionalGeneration` is in # - MODEL_FOR_PRETRAINING_MAPPING_NAMES # - MODEL_WITH_LM_HEAD_MAPPING_NAMES # - MODEL_FOR_MASKED_LM_MAPPING_NAMES # - MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES # We avoid the duplication. architectures = set() for mapping in arch_mappings: if config_class in mapping: models = mapping[config_class] models = tuple(models) if isinstance(models, collections.abc.Sequence) else (models,) for model in models: if model.__name__ not in unexportable_model_architectures: architectures.add(model) architectures = tuple(architectures) return architectures
Return a tuple of all possible architectures attributed to a configuration class `config_class`. For example, BertConfig -> [BertModel, BertForMaskedLM, ..., BertForQuestionAnswering].
12,115
import argparse import collections.abc import importlib import inspect import json import os import shutil import sys from pathlib import Path from datasets import load_dataset from check_config_docstrings import get_checkpoint_from_config_class from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoTokenizer, LayoutLMv3TokenizerFast, PreTrainedTokenizerFast, logging, ) from transformers.feature_extraction_utils import FeatureExtractionMixin from transformers.file_utils import is_tf_available, is_torch_available from transformers.models.auto.configuration_auto import AutoConfig, model_type_to_module_name from transformers.processing_utils import ProcessorMixin, transformers_module from transformers.tokenization_utils_base import PreTrainedTokenizerBase FRAMEWORKS = ["pytorch", "tensorflow"] def build_failed_report(results, include_warning=True): failed_results = {} for config_name in results: if "error" in results[config_name]: if config_name not in failed_results: failed_results[config_name] = {} failed_results[config_name] = {"error": results[config_name]["error"]} if include_warning and "warnings" in results[config_name]: if config_name not in failed_results: failed_results[config_name] = {} failed_results[config_name]["warnings"] = results[config_name]["warnings"] for framework in FRAMEWORKS: if framework not in results[config_name]: continue for arch_name in results[config_name][framework]: if "error" in results[config_name][framework][arch_name]: if config_name not in failed_results: failed_results[config_name] = {} if framework not in failed_results[config_name]: failed_results[config_name][framework] = {} if arch_name not in failed_results[config_name][framework]: failed_results[config_name][framework][arch_name] = {} error = results[config_name][framework][arch_name]["error"] failed_results[config_name][framework][arch_name]["error"] = error return failed_results
null
12,116
import argparse import collections.abc import importlib import inspect import json import os import shutil import sys from pathlib import Path from datasets import load_dataset from check_config_docstrings import get_checkpoint_from_config_class from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoTokenizer, LayoutLMv3TokenizerFast, PreTrainedTokenizerFast, logging, ) from transformers.feature_extraction_utils import FeatureExtractionMixin from transformers.file_utils import is_tf_available, is_torch_available from transformers.models.auto.configuration_auto import AutoConfig, model_type_to_module_name from transformers.processing_utils import ProcessorMixin, transformers_module from transformers.tokenization_utils_base import PreTrainedTokenizerBase FRAMEWORKS = ["pytorch", "tensorflow"] def build_simple_report(results): text = "" failed_text = "" for config_name in results: for framework in FRAMEWORKS: if framework not in results[config_name]: continue for arch_name in results[config_name][framework]: if "error" in results[config_name][framework][arch_name]: result = results[config_name][framework][arch_name]["error"] failed_text += f"{arch_name}: {result}\n" else: result = "OK" text += f"{arch_name}: {result}\n" return text, failed_text
null
12,117
import argparse import collections.abc import importlib import inspect import json import os import shutil import sys from pathlib import Path from datasets import load_dataset from check_config_docstrings import get_checkpoint_from_config_class from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoTokenizer, LayoutLMv3TokenizerFast, PreTrainedTokenizerFast, logging, ) from transformers.feature_extraction_utils import FeatureExtractionMixin from transformers.file_utils import is_tf_available, is_torch_available from transformers.models.auto.configuration_auto import AutoConfig, model_type_to_module_name from transformers.processing_utils import ProcessorMixin, transformers_module from transformers.tokenization_utils_base import PreTrainedTokenizerBase def list_str(values): return values.split(",")
null
12,118
import collections import importlib.util import os import re from pathlib import Path PATH_TO_TRANSFORMERS = "src/transformers" def parse_init(init_file): """ Read an init_file and parse (per backend) the _import_structure objects defined and the TYPE_CHECKING objects defined """ with open(init_file, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() line_index = 0 while line_index < len(lines) and not lines[line_index].startswith("_import_structure = {"): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lines): return None # First grab the objects without a specific backend in _import_structure objects = [] while not lines[line_index].startswith("if TYPE_CHECKING") and find_backend(lines[line_index]) is None: line = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(line): content = _re_one_line_import_struct.search(line).groups()[0] imports = re.findall("\[([^\]]+)\]", content) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(", ")]) line_index += 1 continue single_line_import_search = _re_import_struct_key_value.search(line) if single_line_import_search is not None: imports = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", ") if len(obj) > 0] objects.extend(imports) elif line.startswith(" " * 8 + '"'): objects.append(line[9:-3]) line_index += 1 import_dict_objects = {"none": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("if TYPE_CHECKING"): # If the line is an if not is_backend_available, we grab all objects associated. backend = find_backend(lines[line_index]) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1]) is None: backend = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index]) is None: line_index += 1 line_index += 1 objects = [] # Until we unindent, add backend objects to the list while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 4): line = lines[line_index] if _re_import_struct_add_one.search(line) is not None: objects.append(_re_import_struct_add_one.search(line).groups()[0]) elif _re_import_struct_add_many.search(line) is not None: imports = _re_import_struct_add_many.search(line).groups()[0].split(", ") imports = [obj[1:-1] for obj in imports if len(obj) > 0] objects.extend(imports) elif _re_between_brackets.search(line) is not None: imports = _re_between_brackets.search(line).groups()[0].split(", ") imports = [obj[1:-1] for obj in imports if len(obj) > 0] objects.extend(imports) elif _re_quote_object.search(line) is not None: objects.append(_re_quote_object.search(line).groups()[0]) elif line.startswith(" " * 8 + '"'): objects.append(line[9:-3]) elif line.startswith(" " * 12 + '"'): objects.append(line[13:-3]) line_index += 1 import_dict_objects[backend] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend objects = [] while ( line_index < len(lines) and find_backend(lines[line_index]) is None and not lines[line_index].startswith("else") ): line = lines[line_index] single_line_import_search = _re_import.search(line) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", ")) elif line.startswith(" " * 8): objects.append(line[8:-2]) line_index += 1 type_hint_objects = {"none": objects} # Let's continue with backend-specific objects while line_index < len(lines): # If the line is an if is_backend_available, we grab all objects associated. backend = find_backend(lines[line_index]) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1]) is None: backend = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index]) is None: line_index += 1 line_index += 1 objects = [] # Until we unindent, add backend objects to the list while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8): line = lines[line_index] single_line_import_search = _re_import.search(line) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", ")) elif line.startswith(" " * 12): objects.append(line[12:-2]) line_index += 1 type_hint_objects[backend] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def analyze_results(import_dict_objects, type_hint_objects): """ Analyze the differences between _import_structure objects and TYPE_CHECKING objects found in an init. """ def find_duplicates(seq): return [k for k, v in collections.Counter(seq).items() if v > 1] if list(import_dict_objects.keys()) != list(type_hint_objects.keys()): return ["Both sides of the init do not have the same backends!"] errors = [] for key in import_dict_objects.keys(): duplicate_imports = find_duplicates(import_dict_objects[key]) if duplicate_imports: errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}") duplicate_type_hints = find_duplicates(type_hint_objects[key]) if duplicate_type_hints: errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}") if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])): name = "base imports" if key == "none" else f"{key} backend" errors.append(f"Differences for {name}:") for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f" {a} in TYPE_HINT but not in _import_structure.") for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f" {a} in _import_structure but not in TYPE_HINT.") return errors The provided code snippet includes necessary dependencies for implementing the `check_all_inits` function. Write a Python function `def check_all_inits()` to solve the following problem: Check all inits in the transformers repo and raise an error if at least one does not define the same objects in both halves. Here is the function: def check_all_inits(): """ Check all inits in the transformers repo and raise an error if at least one does not define the same objects in both halves. """ failures = [] for root, _, files in os.walk(PATH_TO_TRANSFORMERS): if "__init__.py" in files: fname = os.path.join(root, "__init__.py") objects = parse_init(fname) if objects is not None: errors = analyze_results(*objects) if len(errors) > 0: errors[0] = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" failures.append("\n".join(errors)) if len(failures) > 0: raise ValueError("\n\n".join(failures))
Check all inits in the transformers repo and raise an error if at least one does not define the same objects in both halves.
12,119
import collections import importlib.util import os import re from pathlib import Path PATH_TO_TRANSFORMERS = "src/transformers" def get_transformers_submodules(): """ Returns the list of Transformers submodules. """ submodules = [] for path, directories, files in os.walk(PATH_TO_TRANSFORMERS): for folder in directories: # Ignore private modules if folder.startswith("_"): directories.remove(folder) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(path) / folder).glob("*.py"))) == 0: continue short_path = str((Path(path) / folder).relative_to(PATH_TO_TRANSFORMERS)) submodule = short_path.replace(os.path.sep, ".") submodules.append(submodule) for fname in files: if fname == "__init__.py": continue short_path = str((Path(path) / fname).relative_to(PATH_TO_TRANSFORMERS)) submodule = short_path.replace(".py", "").replace(os.path.sep, ".") if len(submodule.split(".")) == 1: submodules.append(submodule) return submodules IGNORE_SUBMODULES = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def check_submodules(): # This is to make sure the transformers module imported is the one in the repo. spec = importlib.util.spec_from_file_location( "transformers", os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) transformers = spec.loader.load_module() module_not_registered = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(module_not_registered) > 0: list_of_modules = "\n".join(f"- {module}" for module in module_not_registered) raise ValueError( "The following submodules are not properly registed in the main init of Transformers:\n" f"{list_of_modules}\n" "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
null
12,120
import argparse import json import subprocess def get_runner_status(target_runners, token): offline_runners = [] cmd = ( f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"' " https://api.github.com/repos/huggingface/transformers/actions/runners" ) output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE) o = output.stdout.decode("utf-8") status = json.loads(o) runners = status["runners"] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(runner) # save the result so we can report them on Slack with open("offline_runners.txt", "w") as fp: fp.write(json.dumps(offline_runners)) if len(offline_runners) > 0: failed = "\n".join(offline_runners) raise ValueError(f"The following runners are offline:\n{failed}")
null
12,121
import argparse import json import subprocess def list_str(values): return values.split(",")
null
12,122
import argparse from collections import defaultdict import yaml PATH_TO_TOC = "docs/source/en/_toctree.yml" def clean_model_doc_toc(model_doc): """ Cleans the table of content of the model documentation by removing duplicates and sorting models alphabetically. """ counts = defaultdict(int) for doc in model_doc: counts[doc["local"]] += 1 duplicates = [key for key, value in counts.items() if value > 1] new_doc = [] for duplicate_key in duplicates: titles = list(set(doc["title"] for doc in model_doc if doc["local"] == duplicate_key)) if len(titles) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]}) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1]) # Sort return sorted(new_doc, key=lambda s: s["title"].lower()) def check_model_doc(overwrite=False): with open(PATH_TO_TOC, encoding="utf-8") as f: content = yaml.safe_load(f.read()) # Get to the API doc api_idx = 0 while content[api_idx]["title"] != "API": api_idx += 1 api_doc = content[api_idx]["sections"] # Then to the model doc model_idx = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 model_doc = api_doc[model_idx]["sections"] modalities_docs = [(idx, section) for idx, section in enumerate(model_doc) if "sections" in section] diff = False for idx, modality_doc in modalities_docs: old_modality_doc = modality_doc["sections"] new_modality_doc = clean_model_doc_toc(old_modality_doc) if old_modality_doc != new_modality_doc: diff = True if overwrite: model_doc[idx]["sections"] = new_modality_doc if diff: if overwrite: api_doc[model_idx]["sections"] = model_doc content[api_idx]["sections"] = api_doc with open(PATH_TO_TOC, "w", encoding="utf-8") as f: f.write(yaml.dump(content, allow_unicode=True)) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
null
12,123
import argparse import math import dateutil.parser as date_parser import requests def extract_time_from_single_job(job): """Extract time info from a single job in a GitHub Actions workflow run""" job_info = {} start = job["started_at"] end = job["completed_at"] start_datetime = date_parser.parse(start) end_datetime = date_parser.parse(end) duration_in_min = round((end_datetime - start_datetime).total_seconds() / 60.0) job_info["started_at"] = start job_info["completed_at"] = end job_info["duration"] = duration_in_min return job_info The provided code snippet includes necessary dependencies for implementing the `get_job_time` function. Write a Python function `def get_job_time(workflow_run_id)` to solve the following problem: Extract time info for all jobs in a GitHub Actions workflow run Here is the function: def get_job_time(workflow_run_id): """Extract time info for all jobs in a GitHub Actions workflow run""" url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" result = requests.get(url).json() job_time = {} try: job_time.update({job["name"]: extract_time_from_single_job(job) for job in result["jobs"]}) pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100) for i in range(pages_to_iterate_over): result = requests.get(url + f"&page={i + 2}").json() job_time.update({job["name"]: extract_time_from_single_job(job) for job in result["jobs"]}) return job_time except Exception as e: print("Unknown error, could not fetch links.", e) return {}
Extract time info for all jobs in a GitHub Actions workflow run
12,124
import argparse import os import re import packaging.version def global_version_update(version, patch=False): """Update the version in all needed files.""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(fname, version, pattern) if not patch: update_version_in_examples(version) def clean_main_ref_in_model_list(): """Replace the links from main doc tp stable doc in the model list of the README.""" # If the introduction or the conclusion of the list change, the prompts may need to be updated. _start_prompt = "🤗 Transformers currently provides the following architectures" _end_prompt = "1. Want to contribute a new model?" with open(README_FILE, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Find the start of the list. start_index = 0 while not lines[start_index].startswith(_start_prompt): start_index += 1 start_index += 1 index = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt): if lines[index].startswith("1."): lines[index] = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc", "https://huggingface.co/docs/transformers/model_doc", ) index += 1 with open(README_FILE, "w", encoding="utf-8", newline="\n") as f: f.writelines(lines) def get_version(): """Reads the current version in the __init__.""" with open(REPLACE_FILES["init"], "r") as f: code = f.read() default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0] return packaging.version.parse(default_version) The provided code snippet includes necessary dependencies for implementing the `pre_release_work` function. Write a Python function `def pre_release_work(patch=False)` to solve the following problem: Do all the necessary pre-release steps. Here is the function: def pre_release_work(patch=False): """Do all the necessary pre-release steps.""" # First let's get the default version: base version if we are in dev, bump minor otherwise. default_version = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!") if default_version.is_devrelease: default_version = default_version.base_version elif patch: default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: default_version = f"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if that's the right one. version = input(f"Which version are you releasing? [{default_version}]") if len(version) == 0: version = default_version print(f"Updating version to {version}.") global_version_update(version, patch=patch) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`.") clean_main_ref_in_model_list()
Do all the necessary pre-release steps.
12,125
import argparse import os import re import packaging.version def global_version_update(version, patch=False): """Update the version in all needed files.""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(fname, version, pattern) if not patch: update_version_in_examples(version) def clean_main_ref_in_model_list(): """Replace the links from main doc tp stable doc in the model list of the README.""" # If the introduction or the conclusion of the list change, the prompts may need to be updated. _start_prompt = "🤗 Transformers currently provides the following architectures" _end_prompt = "1. Want to contribute a new model?" with open(README_FILE, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Find the start of the list. start_index = 0 while not lines[start_index].startswith(_start_prompt): start_index += 1 start_index += 1 index = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt): if lines[index].startswith("1."): lines[index] = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc", "https://huggingface.co/docs/transformers/model_doc", ) index += 1 with open(README_FILE, "w", encoding="utf-8", newline="\n") as f: f.writelines(lines) def get_version(): """Reads the current version in the __init__.""" with open(REPLACE_FILES["init"], "r") as f: code = f.read() default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0] return packaging.version.parse(default_version) The provided code snippet includes necessary dependencies for implementing the `post_release_work` function. Write a Python function `def post_release_work()` to solve the following problem: Do all the necesarry post-release steps. Here is the function: def post_release_work(): """Do all the necesarry post-release steps.""" # First let's get the current version current_version = get_version() dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0" current_version = current_version.base_version # Check with the user we got that right. version = input(f"Which version are we developing now? [{dev_version}]") if len(version) == 0: version = dev_version print(f"Updating version to {version}.") global_version_update(version) print("Cleaning main README, don't forget to run `make fix-copies`.") clean_main_ref_in_model_list()
Do all the necesarry post-release steps.
12,126
import ast import collections import functools import json import math import operator import os import re import sys import time from typing import Dict, List, Optional, Union import requests from slack_sdk import WebClient def handle_test_results(test_results): expressions = test_results.split(" ") failed = 0 success = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. time_spent = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(expressions): if "failed" in expression: failed += int(expressions[i - 1]) if "passed" in expression: success += int(expressions[i - 1]) return failed, success, time_spent
null
12,127
import ast import collections import functools import json import math import operator import os import re import sys import time from typing import Dict, List, Optional, Union import requests from slack_sdk import WebClient def handle_stacktraces(test_results): # These files should follow the following architecture: # === FAILURES === # <path>:<line>: Error ... # <path>:<line>: Error ... # <empty line> total_stacktraces = test_results.split("\n")[1:-1] stacktraces = [] for stacktrace in total_stacktraces: try: line = stacktrace[: stacktrace.index(" ")].split(":")[-2] error_message = stacktrace[stacktrace.index(" ") :] stacktraces.append(f"(line {line}) {error_message}") except Exception: stacktraces.append("Cannot retrieve error message.") return stacktraces
null
12,128
import ast import collections import functools import json import math import operator import os import re import sys import time from typing import Dict, List, Optional, Union import requests from slack_sdk import WebClient def dicts_to_sum(objects: Union[Dict[str, Dict], List[dict]]): if isinstance(objects, dict): lists = objects.values() else: lists = objects # Convert each dictionary to counter counters = map(collections.Counter, lists) # Sum all the counters return functools.reduce(operator.add, counters)
null
12,129
import ast import collections import functools import json import math import operator import os import re import sys import time from typing import Dict, List, Optional, Union import requests from slack_sdk import WebClient def get_job_links(): run_id = os.environ["GITHUB_RUN_ID"] url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" result = requests.get(url).json() jobs = {} try: jobs.update({job["name"]: job["html_url"] for job in result["jobs"]}) pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100) for i in range(pages_to_iterate_over): result = requests.get(url + f"&page={i + 2}").json() jobs.update({job["name"]: job["html_url"] for job in result["jobs"]}) return jobs except Exception as e: print("Unknown error, could not fetch links.", e) return {}
null
12,130
import ast import collections import functools import json import math import operator import os import re import sys import time from typing import Dict, List, Optional, Union import requests from slack_sdk import WebClient def retrieve_artifact(name: str, gpu: Optional[str]): if gpu not in [None, "single", "multi"]: raise ValueError(f"Invalid GPU for artifact. Passed GPU: `{gpu}`.") if gpu is not None: name = f"{gpu}-gpu_{name}" _artifact = {} if os.path.exists(name): files = os.listdir(name) for file in files: try: with open(os.path.join(name, file)) as f: _artifact[file.split(".")[0]] = f.read() except UnicodeDecodeError as e: raise ValueError(f"Could not open {os.path.join(name, file)}.") from e return _artifact
null
12,131
import ast import collections import functools import json import math import operator import os import re import sys import time from typing import Dict, List, Optional, Union import requests from slack_sdk import WebClient def retrieve_available_artifacts(): class Artifact: def __init__(self, name: str, single_gpu: bool = False, multi_gpu: bool = False): self.name = name self.single_gpu = single_gpu self.multi_gpu = multi_gpu self.paths = [] def __str__(self): return self.name def add_path(self, path: str, gpu: str = None): self.paths.append({"name": self.name, "path": path, "gpu": gpu}) _available_artifacts: Dict[str, Artifact] = {} directories = filter(os.path.isdir, os.listdir()) for directory in directories: if directory.startswith("single-gpu"): artifact_name = directory[len("single-gpu") + 1 :] if artifact_name in _available_artifacts: _available_artifacts[artifact_name].single_gpu = True else: _available_artifacts[artifact_name] = Artifact(artifact_name, single_gpu=True) _available_artifacts[artifact_name].add_path(directory, gpu="single") elif directory.startswith("multi-gpu"): artifact_name = directory[len("multi-gpu") + 1 :] if artifact_name in _available_artifacts: _available_artifacts[artifact_name].multi_gpu = True else: _available_artifacts[artifact_name] = Artifact(artifact_name, multi_gpu=True) _available_artifacts[artifact_name].add_path(directory, gpu="multi") else: artifact_name = directory if artifact_name not in _available_artifacts: _available_artifacts[artifact_name] = Artifact(artifact_name) _available_artifacts[artifact_name].add_path(directory) return _available_artifacts
null
12,132
import ast import collections import functools import json import math import operator import os import re import sys import time from typing import Dict, List, Optional, Union import requests from slack_sdk import WebClient def prepare_reports(title, header, reports, to_truncate=True): report = "" MAX_ERROR_TEXT = 3000 - len("[Truncated]") if not to_truncate: MAX_ERROR_TEXT = float("inf") if len(reports) > 0: # `text` must be less than 3001 characters in Slack SDK # keep some room for adding "[Truncated]" when necessary for idx in range(len(reports)): _report = header + "\n".join(reports[: idx + 1]) new_report = f"{title}:\n```\n{_report}\n```\n" if len(new_report) > MAX_ERROR_TEXT: # `report` here has length <= 3000 report = report + "[Truncated]" break report = new_report return report
null
12,133
import argparse import os import sys import urllib.request import zipfile TASK2PATH = { "CoLA": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4", "SST": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8", "MRPC": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc", "QQP": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5", "STS": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5", "MNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce", "SNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df", "QNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601", "RTE": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb", "WNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf", "diagnostic": "https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D", } def download_and_extract(task, data_dir): print(f"Downloading and extracting {task}...") data_file = f"{task}.zip" urllib.request.urlretrieve(TASK2PATH[task], data_file) with zipfile.ZipFile(data_file) as zip_ref: zip_ref.extractall(data_dir) os.remove(data_file) print("\tCompleted!")
null
12,134
import argparse import os import sys import urllib.request import zipfile TASK2PATH = { "CoLA": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4", "SST": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8", "MRPC": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc", "QQP": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5", "STS": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5", "MNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce", "SNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df", "QNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601", "RTE": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb", "WNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf", "diagnostic": "https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D", } MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt" MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt" def format_mrpc(data_dir, path_to_data): print("Processing MRPC...") mrpc_dir = os.path.join(data_dir, "MRPC") if not os.path.isdir(mrpc_dir): os.mkdir(mrpc_dir) if path_to_data: mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt") mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt") else: print("Local MRPC data not specified, downloading data from %s" % MRPC_TRAIN) mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt") mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt") urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file) urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file) if not os.path.isfile(mrpc_train_file): raise ValueError(f"Train data not found at {mrpc_train_file}") if not os.path.isfile(mrpc_test_file): raise ValueError(f"Test data not found at {mrpc_test_file}") urllib.request.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv")) dev_ids = [] with open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf8") as ids_fh: for row in ids_fh: dev_ids.append(row.strip().split("\t")) with open(mrpc_train_file, encoding="utf8") as data_fh, open( os.path.join(mrpc_dir, "train.tsv"), "w", encoding="utf8" ) as train_fh, open(os.path.join(mrpc_dir, "dev.tsv"), "w", encoding="utf8") as dev_fh: header = data_fh.readline() train_fh.write(header) dev_fh.write(header) for row in data_fh: label, id1, id2, s1, s2 = row.strip().split("\t") if [id1, id2] in dev_ids: dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2)) else: train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2)) with open(mrpc_test_file, encoding="utf8") as data_fh, open( os.path.join(mrpc_dir, "test.tsv"), "w", encoding="utf8" ) as test_fh: header = data_fh.readline() test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n") for idx, row in enumerate(data_fh): label, id1, id2, s1, s2 = row.strip().split("\t") test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2)) print("\tCompleted!")
null
12,135
import argparse import os import sys import urllib.request import zipfile TASK2PATH = { "CoLA": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4", "SST": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8", "MRPC": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc", "QQP": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5", "STS": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5", "MNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce", "SNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df", "QNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601", "RTE": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb", "WNLI": "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf", "diagnostic": "https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D", } def download_diagnostic(data_dir): print("Downloading and extracting diagnostic...") if not os.path.isdir(os.path.join(data_dir, "diagnostic")): os.mkdir(os.path.join(data_dir, "diagnostic")) data_file = os.path.join(data_dir, "diagnostic", "diagnostic.tsv") urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file) print("\tCompleted!") return
null
12,136
import argparse import os import sys import urllib.request import zipfile TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "SNLI", "QNLI", "RTE", "WNLI", "diagnostic"] def get_tasks(task_names): task_names = task_names.split(",") if "all" in task_names: tasks = TASKS else: tasks = [] for task_name in task_names: if task_name not in TASKS: raise ValueError(f"Task {task_name} not found!") tasks.append(task_name) return tasks
null
12,137
import argparse import json import math import os import subprocess import time import zipfile from collections import Counter import requests The provided code snippet includes necessary dependencies for implementing the `get_job_links` function. Write a Python function `def get_job_links(workflow_run_id)` to solve the following problem: Extract job names and their job links in a GitHub Actions workflow run Here is the function: def get_job_links(workflow_run_id): """Extract job names and their job links in a GitHub Actions workflow run""" url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" result = requests.get(url).json() job_links = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]}) pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100) for i in range(pages_to_iterate_over): result = requests.get(url + f"&page={i + 2}").json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]}) return job_links except Exception as e: print("Unknown error, could not fetch links.", e) return {}
Extract job names and their job links in a GitHub Actions workflow run