id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
21,600 | import json
import logging
import os
import sys
from contextlib import nullcontext
from dataclasses import dataclass, field
from typing import Optional
import datasets
import transformers
from datasets import load_dataset, load_metric
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizerFast,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils.versions import require_version
from sparseml.pytorch.utils.distributed import record
from sparseml.transformers.sparsification import (
QuestionAnsweringTrainer,
TrainingArguments,
postprocess_qa_predictions,
)
from sparseml.transformers.utils import SparseAutoModel, get_shared_tokenizer_src
def main(**kwargs):
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
21,601 | import logging
import os
import random
import sys
from contextlib import nullcontext
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import transformers
from datasets import load_dataset, load_metric
from sklearn.model_selection import StratifiedShuffleSplit
from torch.nn import Module
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PretrainedConfig,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils.versions import require_version
from sparseml.pytorch.utils.distributed import record
from sparseml.transformers.sparsification import Trainer, TrainingArguments
from sparseml.transformers.utils import (
SparseAutoModel,
get_shared_tokenizer_src,
multi_label_precision_recall_f1,
)
def main(**kwargs):
# See all possible arguments in
# src/sparseml/transformers/sparsification/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments)
)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
elif not kwargs:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
else:
model_args, data_args, training_args = parser.parse_dict(kwargs)
# Setup logging
log_level = training_args.get_process_log_level()
_LOGGER.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
_LOGGER.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, "
f"n_gpu: {training_args.n_gpu}, "
f"distributed training: {bool(training_args.local_rank != -1)}, "
f"16-bits training: {training_args.fp16}"
)
_LOGGER.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if (
os.path.isdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and (len(os.listdir(training_args.output_dir)) > 0):
raise ValueError(
f"Output directory ({training_args.output_dir}) already "
"exists and is not empty. Use --overwrite_output_dir to overcome."
)
elif (
last_checkpoint is not None and training_args.resume_from_checkpoint is None
):
_LOGGER.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To "
"avoid this behavior, change the `--output_dir` or add "
"`--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
raw_datasets = _get_raw_dataset(
data_args, cache_dir=model_args.cache_dir, do_predict=training_args.do_predict
)
# Labels
(
is_regression,
label_column,
label_list,
num_labels,
is_multi_label_classification,
) = _get_label_info(data_args, raw_datasets)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one
# local process can concurrently download model & vocab.
config_kwargs = {}
if is_multi_label_classification:
config_kwargs["problem_type"] = "multi_label_classification"
config = AutoConfig.from_pretrained(
model_args.config_name
if model_args.config_name
else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
**config_kwargs,
)
model, teacher = SparseAutoModel.text_classification_from_pretrained_distil(
model_name_or_path=(
model_args.tokenizer_name
if model_args.tokenizer_name
else model_args.model_name_or_path
),
model_kwargs={
"config": config,
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
},
teacher_name_or_path=training_args.distill_teacher,
teacher_kwargs={
"cache_dir": model_args.cache_dir,
"use_auth_token": True if model_args.use_auth_token else None,
},
)
teacher_tokenizer = None
tokenizer_kwargs = dict(
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if not model_args.use_teacher_tokenizer:
tokenizer_src = (
model_args.tokenizer_name
if model_args.tokenizer_name
else get_shared_tokenizer_src(model, teacher)
)
else:
tokenizer_src = (
model_args.tokenizer_name
if model_args.tokenizer_name
else model.config._name_or_path
)
teacher_tokenizer = AutoTokenizer.from_pretrained(
teacher.config._name_or_path,
**tokenizer_kwargs,
)
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_src,
**tokenizer_kwargs,
)
make_eval_dataset = training_args.do_eval or data_args.num_export_samples > 0
tokenized_datasets, raw_datasets = _get_tokenized_and_preprocessed_raw_datasets(
config=config,
data_args=data_args,
model=model,
raw_datasets=raw_datasets,
tokenizer=tokenizer,
teacher_tokenizer=teacher_tokenizer,
make_eval_dataset=make_eval_dataset,
main_process_func=training_args.main_process_first,
do_train=training_args.do_train,
do_predict=training_args.do_predict,
)
train_dataset = tokenized_datasets.get("train")
eval_dataset = tokenized_datasets.get("validation")
predict_dataset = tokenized_datasets.get("test")
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(train_dataset)), 3):
_LOGGER.info(f"Sample {index} of training set: {train_dataset[index]}.")
# Get the metric function
if data_args.task_name is not None:
metric = load_metric("glue", data_args.task_name)
else:
metric = load_metric("accuracy")
# You can define your custom compute_metrics function. It takes an `EvalPrediction`
# object (a namedtuple with a predictions and label_ids field) and has to return a
# dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
if is_regression:
preds = np.squeeze(preds)
elif not is_multi_label_classification:
# do not run argmax for multi label classification
preds = np.argmax(preds, axis=1)
if data_args.task_name is not None:
result = metric.compute(predictions=preds, references=p.label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
elif is_multi_label_classification:
threshold = 0.3 # from go_emotions paper - potentially move to arg/config
preds_sigmoid = 1 / (1 + np.exp(-preds))
multi_label_preds = (preds_sigmoid > threshold).astype(np.float32)
label_to_id = _get_label_to_id(
data_args=data_args,
is_regression=is_regression,
label_list=label_list,
model=model,
num_labels=num_labels,
config=config,
)
id_to_label = {id_: label for label, id_ in label_to_id.items()}
return multi_label_precision_recall_f1(
predictions=multi_label_preds,
targets=p.label_ids,
id_to_label=id_to_label,
)
else:
return {
"accuracy": (preds == p.label_ids).astype(np.float32).mean().item(),
}
# Data collator will default to DataCollatorWithPadding when the tokenizer is
# passed to Trainer, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Initialize our Trainer
trainer = Trainer(
model=model,
model_state_path=model_args.model_name_or_path,
recipe=training_args.recipe,
metadata_args=metadata_args,
recipe_args=training_args.recipe_args,
teacher=teacher,
args=training_args,
data_args=data_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if make_eval_dataset else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
if not trainer.one_shot:
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples
if data_args.max_train_samples is not None
else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.save_state()
trainer.save_optimizer_and_scheduler(training_args.output_dir)
# Evaluation
if training_args.do_eval and not trainer.one_shot:
_LOGGER.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = (
[data_args.task_name]
if data_args.task_name is not None
else [data_args.dataset_name]
)
eval_datasets = [eval_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
eval_datasets.append(raw_datasets["validation_mismatched"])
combined = {}
for eval_dataset, task in zip(eval_datasets, tasks):
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_eval_samples = (
data_args.max_eval_samples
if data_args.max_eval_samples is not None
else len(eval_dataset)
)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
if task == "mnli-mm":
metrics = {k + "_mm": v for k, v in metrics.items()}
if task is not None and "mnli" in task:
combined.update(metrics)
trainer.save_metrics("eval", combined)
else:
trainer.save_metrics("eval", metrics)
trainer.log_metrics("eval", metrics)
if training_args.do_predict and not trainer.one_shot:
_LOGGER.info("*** Predict ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
predict_datasets = [predict_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
predict_datasets.append(raw_datasets["test_mismatched"])
for predict_dataset, task in zip(predict_datasets, tasks):
# Removing the `label` columns because it contains -1 and Trainer will
# not like that
predict_dataset = predict_dataset.remove_columns(label_column)
predictions = trainer.predict(
predict_dataset, metric_key_prefix="predict"
).predictions
predictions = (
np.squeeze(predictions)
if is_regression
else np.argmax(predictions, axis=1)
)
output_predict_file = os.path.join(
training_args.output_dir, f"predict_results_{task}.txt"
)
if trainer.is_world_process_zero():
with open(output_predict_file, "w") as writer:
_LOGGER.info(f"***** Predict results {task} *****")
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = label_list[item]
writer.write(f"{index}\t{item}\n")
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "text-classification",
}
if data_args.task_name is not None:
kwargs["language"] = "en"
kwargs["dataset_tags"] = "glue"
kwargs["dataset_args"] = data_args.task_name
kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}"
# Exporting Samples
if data_args.num_export_samples > 0:
trainer.save_sample_inputs_outputs(
num_samples_to_export=data_args.num_export_samples
)
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
21,602 | import logging
import os
import sys
from contextlib import nullcontext
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple, Union
import datasets
import numpy as np
import transformers
from datasets import ClassLabel, load_dataset, load_metric
from datasets.arrow_dataset import Dataset
from datasets.dataset_dict import DatasetDict, IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from torch.nn import Module
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForTokenClassification,
HfArgumentParser,
PretrainedConfig,
PreTrainedTokenizerFast,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils.versions import require_version
from sparseml.pytorch.utils.distributed import record
from sparseml.transformers.sparsification import Trainer, TrainingArguments
from sparseml.transformers.utils import SparseAutoModel, get_shared_tokenizer_src
_LOGGER: logging.Logger = logging.getLogger(__name__)
def _check_teacher_student_outputs(
teacher: Module, label_to_id: Dict[str, int], label_list: List[str]
) -> Tuple[Dict[str, int], List[str]]:
# Check that the teacher and student have the same labels and if they do,
# check that the mapping between labels and ids is the same.
teacher_labels = list(teacher.config.label2id.keys())
teacher_ids = list(teacher.config.label2id.values())
student_labels = list(label_to_id.keys())
student_ids = list(label_to_id.values())
if set(teacher_labels) != set(student_labels):
_LOGGER.warning(
f"Teacher labels {teacher_labels} do not match "
f"student labels {student_labels}. Ignore this warning "
"if this is expected behavior."
)
else:
if student_ids != teacher_ids:
_LOGGER.warning(
"Teacher and student labels match, but the mapping "
"between teachers labels and ids does not match the "
"mapping between student labels and ids. "
"The student's mapping will be overwritten "
"by the teacher's mapping."
)
label_to_id = teacher.config.label2id
label_list = teacher_labels
return label_to_id, label_list | null |
21,603 | import logging
import os
import sys
from contextlib import nullcontext
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple, Union
import datasets
import numpy as np
import transformers
from datasets import ClassLabel, load_dataset, load_metric
from datasets.arrow_dataset import Dataset
from datasets.dataset_dict import DatasetDict, IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from torch.nn import Module
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForTokenClassification,
HfArgumentParser,
PretrainedConfig,
PreTrainedTokenizerFast,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils.versions import require_version
from sparseml.pytorch.utils.distributed import record
from sparseml.transformers.sparsification import Trainer, TrainingArguments
from sparseml.transformers.utils import SparseAutoModel, get_shared_tokenizer_src
def main(**kwargs):
# See all possible arguments in
# src/sparseml/transformers/sparsification/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments)
)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
elif not kwargs:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
else:
model_args, data_args, training_args = parser.parse_dict(kwargs)
# Setup logging
log_level = training_args.get_process_log_level()
_LOGGER.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
_LOGGER.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, "
f"n_gpu: {training_args.n_gpu}, "
f"distributed training: {bool(training_args.local_rank != -1)}, "
f"16-bits training: {training_args.fp16}"
)
_LOGGER.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if (
os.path.isdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is "
"not empty. Use --overwrite_output_dir to overcome."
)
elif (
last_checkpoint is not None and training_args.resume_from_checkpoint is None
):
_LOGGER.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. "
"To avoid this behavior, change the `--output_dir` or add "
"`--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
raw_datasets = _get_raw_dataset(data_args, model_args.cache_dir)
column_names, features = _get_column_names_and_features(
raw_datasets, do_train=training_args.do_train
)
text_column_name = _get_text_column_names(column_names, data_args)
label_column_name = _get_label_column_name(column_names, data_args)
# If the labels are of type ClassLabel, they are already integers and we have the
# map stored somewhere. Otherwise, we have to get the list of labels manually.
labels_are_int = isinstance(features[label_column_name].feature, ClassLabel)
if labels_are_int:
label_list = features[label_column_name].feature.names
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = _get_label_list(raw_datasets["train"][label_column_name])
label_to_id = {l: i for i, l in enumerate(label_list)}
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can
# concurrently download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name
if model_args.config_name
else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model, teacher = SparseAutoModel.token_classification_from_pretrained_distil(
model_name_or_path=(
model_args.tokenizer_name
if model_args.tokenizer_name
else model_args.model_name_or_path
),
model_kwargs={
"config": config,
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
},
teacher_name_or_path=training_args.distill_teacher,
teacher_kwargs={
"cache_dir": model_args.cache_dir,
"use_auth_token": True if model_args.use_auth_token else None,
},
)
if teacher and not isinstance(teacher, str):
# check whether teacher and student have the corresponding outputs
label_to_id, label_list = _check_teacher_student_outputs(
teacher, label_to_id, label_list
)
tokenizer_src = (
model_args.tokenizer_name
if model_args.tokenizer_name
else get_shared_tokenizer_src(model, teacher)
)
add_prefix_space = config.model_type in {"gpt2", "roberta"}
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_src,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
add_prefix_space=add_prefix_space,
)
# Tokenizer check: this script requires a fast tokenizer.
if not isinstance(tokenizer, PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. "
"Checkout the big table of models at "
"https://huggingface.co/transformers/index.html#supported-frameworks "
"to find the model types that meet this requirement"
)
tokenized_dataset = _get_tokenized_dataset(
data_args=data_args,
label_list=label_list,
labels_are_int=labels_are_int,
model=model,
num_labels=num_labels,
raw_datasets=raw_datasets,
tokenizer=tokenizer,
text_column_name=text_column_name,
label_column_name=label_column_name,
label_to_id=label_to_id,
do_train=training_args.do_train,
do_eval=training_args.do_eval,
do_predict=training_args.do_predict,
main_process_func=training_args.main_process_first,
)
make_eval_dataset = training_args.do_eval or (data_args.num_export_samples > 0)
train_dataset = tokenized_dataset.get("train")
eval_dataset = tokenized_dataset.get("validation")
predict_dataset = tokenized_dataset.get("test")
# Data collator
data_collator = DataCollatorForTokenClassification(
tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None
)
# Metrics
metric = load_metric("seqeval")
def compute_metrics(p):
predictions, labels = p
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[pred] for (pred, lab) in zip(prediction, label) if lab != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[label_list[lab] for (_, lab) in zip(prediction, label) if lab != -100]
for prediction, label in zip(predictions, labels)
]
results = metric.compute(predictions=true_predictions, references=true_labels)
if data_args.return_entity_level_metrics:
# Unpack nested dictionaries
final_results = {}
for key, value in results.items():
if isinstance(value, dict):
for n, v in value.items():
final_results[f"{key}_{n}"] = v
else:
final_results[key] = value
return final_results
else:
return {
"precision": results["overall_precision"],
"recall": results["overall_recall"],
"f1": results["overall_f1"],
"accuracy": results["overall_accuracy"],
}
# Initialize our Trainer
trainer = Trainer(
model=model,
model_state_path=model_args.model_name_or_path,
recipe=training_args.recipe,
metadata_args=metadata_args,
recipe_args=training_args.recipe_args,
teacher=teacher,
args=training_args,
data_args=data_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if make_eval_dataset else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
if not trainer.one_shot:
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples
if data_args.max_train_samples is not None
else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.save_state()
trainer.save_optimizer_and_scheduler(training_args.output_dir)
# Evaluation
if training_args.do_eval and not trainer.one_shot:
_LOGGER.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = (
data_args.max_eval_samples
if data_args.max_eval_samples is not None
else len(eval_dataset)
)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Predict
if training_args.do_predict and not trainer.one_shot:
_LOGGER.info("*** Predict ***")
predictions, labels, metrics = trainer.predict(
predict_dataset, metric_key_prefix="predict"
)
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[pred] for (pred, lab) in zip(prediction, label) if lab != -100]
for prediction, label in zip(predictions, labels)
]
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
# Save predictions
output_predictions_file = os.path.join(
training_args.output_dir, "predictions.txt"
)
if trainer.is_world_process_zero():
with open(output_predictions_file, "w") as writer:
for prediction in true_predictions:
writer.write(" ".join(prediction) + "\n")
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "token-classification",
}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs[
"dataset"
] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
# Exporting Samples
if data_args.num_export_samples > 0:
trainer.save_sample_inputs_outputs(
num_samples_to_export=data_args.num_export_samples
)
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
21,604 | import logging
from pathlib import Path
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union
import torch
from pydantic import Field
from sparseml.export.export_data import create_data_samples as create_data_samples_
from sparseml.export.helpers import apply_optimizations as apply_optimizations_onnx
from sparseml.integration_helper_functions import (
IntegrationHelperFunctions,
Integrations,
)
from sparseml.transformers.finetune.data.data_helpers import format_calibration_data
from sparseml.transformers.utils.helpers import (
ALL_TASK_NAMES,
MANDATORY_DEPLOYMENT_FILES,
NLG_MANDATORY_DEPLOYMENT_FILES,
NLG_OPTIONAL_DEPLOYMENT_FILES,
OPTIONAL_DEPLOYMENT_FILES,
TaskNames,
create_fake_dataloader,
remove_past_key_value_support_from_config,
resolve_sequence_length,
)
from sparseml.transformers.utils.initializers import (
_parse_data_args,
initialize_config,
initialize_sparse_model,
initialize_tokenizer,
initialize_trainer,
)
from sparseml.transformers.utils.load_task_dataset import load_task_dataset
from sparseml.transformers.utils.optimizations import apply_kv_cache_injection
_LOGGER = logging.getLogger(__name__)
class TaskNames(Enum):
mlm = {"masked-language-modeling", "mlm"}
qa = {"question-answering", "qa"}
token_classification = {"token-classification", "ner"}
text_classification = {
"text-classification",
"sentiment-analysis",
"sequence-classification",
"glue",
}
text_generation = {"text-generation"}
ALL_TASK_NAMES = list(set.union(*[task_names.value for task_names in TaskNames]))
def remove_past_key_value_support_from_config(config: AutoConfig) -> AutoConfig:
"""
Modify config of the causal language model so that it turns off the
past key value support. This means that the model initialized from
this config will not take past key values as input and will not output
past key values.
"""
# not take past_key_values as input
config.is_decoder = True
# whether to use past key values an input
config.use_past = False
# whether to output past key values
config.use_cache = False
return config
def resolve_sequence_length(config: AutoConfig) -> int:
"""
Resolve the sequence length from the config
:param config: the config to resolve the sequence length from
:return: the sequence length
"""
if hasattr(config, "max_position_embeddings"):
sequence_length = config.max_position_embeddings
elif hasattr(config, "max_seq_len"):
sequence_length = config.max_seq_len
else:
raise ValueError(
"Could not infer a default sequence length "
"from the HF transformers config. Please specify "
"the sequence length with --sequence_length"
)
_LOGGER.debug(
f"Using default sequence length of {sequence_length} "
"(inferred from HF transformers config) "
)
return sequence_length
def initialize_config(
model_path: Union[str, Path], trust_remote_code: bool = False, **config_args
) -> AutoConfig:
"""
Initialize a config from a given path
:param model_path: the path to the model to load
:param trust_remote_code: True to trust remote code when loading the model,
False otherwise
:param config_args: additional arguments to pass to the config
:return: the loaded config
"""
config = AutoConfig.from_pretrained(
model_path,
trust_remote_code=trust_remote_code,
**config_args,
)
return config
def initialize_tokenizer(
model_path: Union[str, Path], sequence_length: int, task: str, **tokenizer_args
) -> SparseAutoTokenizer:
"""
Initialize a tokenizer from a given path
:param model_path: the path to the model to load
:param sequence_length: the sequence length to use for the tokenizer
:param task: the task to load the tokenizer for
:return: the loaded tokenizer
"""
tokenizer = SparseAutoTokenizer.from_pretrained(
model_path, model_max_length=sequence_length, **tokenizer_args
)
if task in TaskNames.text_generation.value:
# for generative transformers, we might
# need to set the pad token to the eos token
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
return tokenizer
def initialize_sparse_model(
model_path: Union[str, Path],
task: str,
config: AutoConfig,
trust_remote_code: bool = False,
recipe: Optional[Union[str, Path]] = None,
device: Optional[str] = None,
**model_kwargs,
) -> AutoModel:
"""
Initialize a sparse model from a given path. This will
call the load_task_model function to load an appropriate
SparseAutoModel for the given task.
Optionally, we will also move the model to the specified device
Example usage:
```python
model_path = ... # path to the model
task = ... # the task to load the model for
e.g "text-generation" or "question-answering"
config = initialize_config(model_path=model_path,
trust_remote_code=True)
model = initialize_sparse_model(
model_path=model_path,
task=self.task,
config=config,
)
```
:param model_path: the path to the model to load
:param task: the task to load the model for
:param config: the config to use for the model
:param trust_remote_code: True to trust remote code when loading the model,
False otherwise
:param recipe: the recipe to apply to the model.
:param device: the device to load the model on. If None, will load on CPU
:return: the loaded model
"""
model = load_task_model(
task=task,
model_path=model_path,
config=config,
trust_remote_code=trust_remote_code,
recipe=recipe,
**model_kwargs,
)
if device:
# if device is a list of devices, then we assume we want to use multiple gpus
# (wrap the model in a DataParallel) e.g. device = 'cuda:0,1,...'
use_multiple_gpus = re.match(r"cuda:\d+,(\d+)*", device)
model = torch.nn.DataParallel(model) if use_multiple_gpus else model.to(device)
return model
The provided code snippet includes necessary dependencies for implementing the `create_model` function. Write a Python function `def create_model( source_path: Union[Path, str], device: Optional[str] = None, task: Optional[str] = None, recipe: Optional[str] = None, export: bool = True, **kwargs, ) -> Tuple[torch.nn.Module, Dict[str, Any]]` to solve the following problem:
A contract to create a model and optional dictionary of loaded_model_kwargs (any relevant objects created along with the model) :param source_path: The path to the model :param device: The device to use for the model :param task: The task to use for the model :param recipe: The recipe to use for the model :param export: Whether the created model is for export or not. :return: A tuple of: - torch model - dict of loaded_model_kwargs
Here is the function:
def create_model(
source_path: Union[Path, str],
device: Optional[str] = None,
task: Optional[str] = None,
recipe: Optional[str] = None,
export: bool = True,
**kwargs,
) -> Tuple[torch.nn.Module, Dict[str, Any]]:
"""
A contract to create a model and optional dictionary of
loaded_model_kwargs (any relevant objects created along with the model)
:param source_path: The path to the model
:param device: The device to use for the model
:param task: The task to use for the model
:param recipe: The recipe to use for the model
:param export: Whether the created model is for export or not.
:return: A tuple of:
- torch model
- dict of loaded_model_kwargs
"""
config_args = kwargs.get("config_args", {})
sequence_length = kwargs.get("sequence_length", None)
trust_remote_code = kwargs.get("trust_remote_code", False)
if task is None:
raise ValueError(
"To create a transformer model, a task must be specified. "
f"Choose one from {ALL_TASK_NAMES}"
)
if not trust_remote_code:
_LOGGER.warning(
"trust_remote_code is set to False. It is possible, "
"that the model will not be loaded correctly."
)
config = initialize_config(source_path, trust_remote_code, **config_args)
sequence_length = sequence_length or resolve_sequence_length(config)
tokenizer = initialize_tokenizer(source_path, sequence_length, task)
if export:
if task in TaskNames.text_generation.value:
config = remove_past_key_value_support_from_config(config)
model = initialize_sparse_model(
model_path=source_path,
task=task,
config=config,
trust_remote_code=trust_remote_code,
recipe=recipe,
sequence_length=sequence_length,
device=device,
)
return model, dict(
tokenizer=tokenizer, sequence_length=sequence_length, config=config
) | A contract to create a model and optional dictionary of loaded_model_kwargs (any relevant objects created along with the model) :param source_path: The path to the model :param device: The device to use for the model :param task: The task to use for the model :param recipe: The recipe to use for the model :param export: Whether the created model is for export or not. :return: A tuple of: - torch model - dict of loaded_model_kwargs |
21,605 | import logging
from pathlib import Path
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union
import torch
from pydantic import Field
from sparseml.export.export_data import create_data_samples as create_data_samples_
from sparseml.export.helpers import apply_optimizations as apply_optimizations_onnx
from sparseml.integration_helper_functions import (
IntegrationHelperFunctions,
Integrations,
)
from sparseml.transformers.finetune.data.data_helpers import format_calibration_data
from sparseml.transformers.utils.helpers import (
ALL_TASK_NAMES,
MANDATORY_DEPLOYMENT_FILES,
NLG_MANDATORY_DEPLOYMENT_FILES,
NLG_OPTIONAL_DEPLOYMENT_FILES,
OPTIONAL_DEPLOYMENT_FILES,
TaskNames,
create_fake_dataloader,
remove_past_key_value_support_from_config,
resolve_sequence_length,
)
from sparseml.transformers.utils.initializers import (
_parse_data_args,
initialize_config,
initialize_sparse_model,
initialize_tokenizer,
initialize_trainer,
)
from sparseml.transformers.utils.load_task_dataset import load_task_dataset
from sparseml.transformers.utils.optimizations import apply_kv_cache_injection
def format_calibration_data(
tokenized_dataset: Dataset,
num_calibration_samples: Optional[int] = None,
collate_fn: Callable = default_data_collator,
accelerator: Optional[Any] = None,
) -> List[torch.Tensor]:
"""
Creates a dataloader out of the calibration dataset split, trimming it to
the desired number of calibration samples
:param tokenized_dataset: dataset to convert to dataloader
:param num_calibration_samples: number of data samples to convert
:param collate_fn: optional custom collate function, or use default
:param accelerator: optional accelerator for if preparing in FSDP mode
:return: list of trimmed calibration data tensors
"""
safe_calibration_samples = len(tokenized_dataset)
if num_calibration_samples is not None:
safe_calibration_samples = min(len(tokenized_dataset), num_calibration_samples)
if safe_calibration_samples != num_calibration_samples:
LOGGER.warn(
f"Requested {num_calibration_samples} calibration samples but "
f"the provided dataset only has {safe_calibration_samples}. "
)
shuffled_calibration = tokenized_dataset.shuffle()
shuffled_calibration = shuffled_calibration.select(range(safe_calibration_samples))
dataloader_params = {
"batch_size": 1,
"sampler": RandomSampler(shuffled_calibration),
"collate_fn": collate_fn,
"pin_memory": True,
}
calib_dataloader = DataLoader(shuffled_calibration, **dataloader_params)
if accelerator:
calib_dataloader = accelerator.prepare(calib_dataloader)
return calib_dataloader
class TaskNames(Enum):
mlm = {"masked-language-modeling", "mlm"}
qa = {"question-answering", "qa"}
token_classification = {"token-classification", "ner"}
text_classification = {
"text-classification",
"sentiment-analysis",
"sequence-classification",
"glue",
}
text_generation = {"text-generation"}
def create_fake_dataloader(
model: torch.nn.Module,
tokenizer: transformers.AutoTokenizer,
num_samples: int,
) -> Tuple[Iterable[OrderedDictType[str, torch.Tensor]], List[str]]:
"""
Creates fake transformers dataloader for the model, based on the model's
forward signature.
:param model: The model to create the dataloader for
:param tokenizer: The tokenizer to use for the dataloader
:param num_samples: The number of fake samples in the dataloader
:return: The data loader (iterable) and the input names for the model
"""
forward_args_spec = inspect.getfullargspec(model.__class__.forward)
inputs = tokenizer(
"", return_tensors="pt", padding=PaddingStrategy.MAX_LENGTH.value
).data
fake_inputs = OrderedDict(
[
(input_key, inputs[input_key][0].reshape(1, -1))
for input_key in forward_args_spec.args
if input_key in inputs
]
)
data_loader = (fake_inputs for _ in range(num_samples))
input_names = list(fake_inputs.keys())
return data_loader, input_names
def initialize_tokenizer(
model_path: Union[str, Path], sequence_length: int, task: str, **tokenizer_args
) -> SparseAutoTokenizer:
"""
Initialize a tokenizer from a given path
:param model_path: the path to the model to load
:param sequence_length: the sequence length to use for the tokenizer
:param task: the task to load the tokenizer for
:return: the loaded tokenizer
"""
tokenizer = SparseAutoTokenizer.from_pretrained(
model_path, model_max_length=sequence_length, **tokenizer_args
)
if task in TaskNames.text_generation.value:
# for generative transformers, we might
# need to set the pad token to the eos token
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
return tokenizer
def initialize_trainer(
model: AutoModel,
model_path: Union[str, Path],
validation_dataset: Optional[Any] = None,
) -> Trainer:
"""
Initialize a trainer. This will apply the structure dictated by
any of the recipes stored in the model_path
:param model: the model to initialize the trainer with
:param model_path: the path to the model to load
:param validation_dataset: the validation dataset to use for the trainer
:return: the initialized trainer
"""
# TODO: add here support for v2 trainer
# also add initialize_dataset function that will merge v1 and v2 functions
training_args = TrainingArguments(
output_dir=os.path.dirname(model_path), use_cpu=(model.device.type == "cpu")
)
trainer = Trainer(
model=model,
args=training_args,
model_state_path=model_path,
eval_dataset=validation_dataset,
recipe=None,
recipe_args=None,
teacher=None,
)
applied = trainer.apply_manager(epoch=math.inf, checkpoint=None)
if not applied:
_LOGGER.warning(
f"No recipes were applied for {model_path}, "
"check to make sure recipe(s) are stored in the model_path"
)
else:
trainer.finalize_manager()
num_stages = 0
if trainer.manager:
num_stages += trainer.manager.num_stages()
if trainer.arch_manager:
num_stages += trainer.arch_manager.num_stages()
msg = (
"an unstaged recipe"
if num_stages == 1
else f"a staged recipe with {num_stages} stages"
)
_LOGGER.info(f"Applied {msg} to the model at {model_path}")
return trainer
def _parse_data_args(data_args):
try:
return parse_recipe_variables(data_args)
except ValueError as parse_error:
message = str(parse_error).replace("recipe_args", "data_args")
if "recipe variables" in message:
message = message.replace("recipe variables", "data_args")
raise ValueError(message)
def load_task_dataset(
task: str,
tokenizer: "AutoTokenizer", # noqa F821
data_args: Dict[str, Any],
model: Module,
split: Optional[str] = None,
config: Optional[AutoConfig] = None,
) -> Any:
"""
Load a dataset for a given task.
Note: datasets for task: text-generation are loaded differently than other tasks
using the TextGenerationDataset object
:param task: the task a dataset being loaded for
:param tokenizer: the tokenizer to use for the dataset
:param data_args: additional data args used to create a `DataTrainingArguments`
instance for fetching the dataset
:param model: the model to use for the dataset
:param split: the split to use for the dataset.
:param config: the config to use for the dataset
:return: the dataset for the given task
"""
dataset = None
if task in TaskNames.mlm.value:
from sparseml.transformers.masked_language_modeling import (
DataTrainingArguments,
get_tokenized_mlm_dataset,
)
data_training_args = DataTrainingArguments(**data_args)
dataset = get_tokenized_mlm_dataset(
data_args=data_training_args, tokenizer=tokenizer
)
if task in TaskNames.qa.value:
from sparseml.transformers.question_answering import (
DataTrainingArguments,
get_tokenized_qa_dataset,
)
data_training_args = DataTrainingArguments(**data_args)
dataset = get_tokenized_qa_dataset(
data_args=data_training_args, tokenizer=tokenizer
)
if task in TaskNames.token_classification.value:
from sparseml.transformers.token_classification import (
DataTrainingArguments,
get_tokenized_token_classification_dataset,
)
data_training_args = DataTrainingArguments(**data_args)
dataset = get_tokenized_token_classification_dataset(
data_args=data_training_args, tokenizer=tokenizer, model=model or config
)
if task in TaskNames.text_classification.value:
from sparseml.transformers.text_classification import (
DataTrainingArguments,
get_tokenized_text_classification_dataset,
)
data_training_args = DataTrainingArguments(**data_args)
dataset = get_tokenized_text_classification_dataset(
data_args=data_training_args,
tokenizer=tokenizer,
model=model,
config=config,
)
if task in TaskNames.text_generation.value:
from sparseml.transformers.finetune.data.base import TextGenerationDataset
from sparseml.transformers.finetune.data.data_args import DataTrainingArguments
data_training_args = DataTrainingArguments(**data_args)
dataset_manager = TextGenerationDataset.load_from_registry(
data_args["dataset"],
tokenizer=tokenizer,
data_args=data_training_args,
split=None,
)
raw_dataset = dataset_manager.get_raw_dataset()
raw_dataset = choose_split(raw_dataset, split=split)
dataset = dataset_manager.tokenize_and_process(raw_dataset)
return dataset
if dataset is None:
raise ValueError(f"unrecognized task given of {task}")
return choose_split(dataset, split=split)
The provided code snippet includes necessary dependencies for implementing the `create_data_loader` function. Write a Python function `def create_data_loader( model: torch.nn.Module, task: str, data_args: Optional[Dict[str, Any]] = None, config: Optional["AutoConfig"] = None, # noqa F821 source_path: Optional[str] = None, sequence_length: Optional[int] = None, tokenizer: Optional["AutoTokenizer"] = None, # noqa F821 dataset_with_labels: bool = False, **kwargs, )` to solve the following problem:
A contract to create a dataloader and optional dictionary of loaded_dataloader_kwargs (any relevant objects created along with the dataloader) :param model: A model for which the data_loader is created :param task: The task to use for the model :param data_args: Arguments for instantiation of the dataset :param source_path: Path to the model files :param sequence_length: The sequence length to use for the model :param tokenizer: The tokenizer to use for the model :param dataset_with_labels: Whether to allow the dataset to have "labels" inputs or not. Text-generation datasets may contain labels (needed for training only) :return: A tuple of: - torch model - dict of loaded_model_kwargs
Here is the function:
def create_data_loader(
model: torch.nn.Module,
task: str,
data_args: Optional[Dict[str, Any]] = None,
config: Optional["AutoConfig"] = None, # noqa F821
source_path: Optional[str] = None,
sequence_length: Optional[int] = None,
tokenizer: Optional["AutoTokenizer"] = None, # noqa F821
dataset_with_labels: bool = False,
**kwargs,
):
"""
A contract to create a dataloader and optional dictionary of
loaded_dataloader_kwargs (any relevant objects created along with the dataloader)
:param model: A model for which the data_loader is created
:param task: The task to use for the model
:param data_args: Arguments for instantiation of the dataset
:param source_path: Path to the model files
:param sequence_length: The sequence length to use for the model
:param tokenizer: The tokenizer to use for the model
:param dataset_with_labels: Whether to allow the dataset to
have "labels" inputs or not. Text-generation datasets may
contain labels (needed for training only)
:return: A tuple of:
- torch model
- dict of loaded_model_kwargs
"""
split = kwargs.get("split", None)
config = config or model.config
source_path = source_path or model.name_or_path
if tokenizer is None:
if sequence_length is None:
raise ValueError(
"Sequence length for the transformer model export missing. "
"Provide it manually using sequence_length argument"
)
tokenizer = initialize_tokenizer(config.name_or_path, sequence_length, task)
data_args = _parse_data_args(data_args or {})
if data_args:
dataset = load_task_dataset(
task=task,
tokenizer=tokenizer,
data_args=data_args,
model=model,
config=config,
split=split,
)
if task in TaskNames.text_generation.value:
# text-generation datasets have a separate
# logic for creating a dataloader
if not dataset_with_labels:
dataset = dataset.remove_columns("labels")
data_loader = format_calibration_data(tokenized_dataset=dataset)
input_names = list(next(iter(data_loader)).keys())
else:
trainer = initialize_trainer(model, source_path, dataset)
data_loader = trainer.get_eval_dataloader()
input_names = list(next(trainer._get_fake_dataloader(1, tokenizer)).keys())
else:
# if no data_args are provided, create a fake dataloader
data_loader, input_names = create_fake_dataloader(
model, tokenizer, num_samples=1
)
return data_loader, dict(input_names=input_names) | A contract to create a dataloader and optional dictionary of loaded_dataloader_kwargs (any relevant objects created along with the dataloader) :param model: A model for which the data_loader is created :param task: The task to use for the model :param data_args: Arguments for instantiation of the dataset :param source_path: Path to the model files :param sequence_length: The sequence length to use for the model :param tokenizer: The tokenizer to use for the model :param dataset_with_labels: Whether to allow the dataset to have "labels" inputs or not. Text-generation datasets may contain labels (needed for training only) :return: A tuple of: - torch model - dict of loaded_model_kwargs |
21,606 | import logging
from pathlib import Path
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union
import torch
from pydantic import Field
from sparseml.export.export_data import create_data_samples as create_data_samples_
from sparseml.export.helpers import apply_optimizations as apply_optimizations_onnx
from sparseml.integration_helper_functions import (
IntegrationHelperFunctions,
Integrations,
)
from sparseml.transformers.finetune.data.data_helpers import format_calibration_data
from sparseml.transformers.utils.helpers import (
ALL_TASK_NAMES,
MANDATORY_DEPLOYMENT_FILES,
NLG_MANDATORY_DEPLOYMENT_FILES,
NLG_OPTIONAL_DEPLOYMENT_FILES,
OPTIONAL_DEPLOYMENT_FILES,
TaskNames,
create_fake_dataloader,
remove_past_key_value_support_from_config,
resolve_sequence_length,
)
from sparseml.transformers.utils.initializers import (
_parse_data_args,
initialize_config,
initialize_sparse_model,
initialize_tokenizer,
initialize_trainer,
)
from sparseml.transformers.utils.load_task_dataset import load_task_dataset
from sparseml.transformers.utils.optimizations import apply_kv_cache_injection
def create_dummy_input(
data_loader: torch.utils.data.DataLoader,
**kwargs,
) -> torch.Tensor:
return next(iter(data_loader)) | null |
21,607 | import logging
from pathlib import Path
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union
import torch
from pydantic import Field
from sparseml.export.export_data import create_data_samples as create_data_samples_
from sparseml.export.helpers import apply_optimizations as apply_optimizations_onnx
from sparseml.integration_helper_functions import (
IntegrationHelperFunctions,
Integrations,
)
from sparseml.transformers.finetune.data.data_helpers import format_calibration_data
from sparseml.transformers.utils.helpers import (
ALL_TASK_NAMES,
MANDATORY_DEPLOYMENT_FILES,
NLG_MANDATORY_DEPLOYMENT_FILES,
NLG_OPTIONAL_DEPLOYMENT_FILES,
OPTIONAL_DEPLOYMENT_FILES,
TaskNames,
create_fake_dataloader,
remove_past_key_value_support_from_config,
resolve_sequence_length,
)
from sparseml.transformers.utils.initializers import (
_parse_data_args,
initialize_config,
initialize_sparse_model,
initialize_tokenizer,
initialize_trainer,
)
from sparseml.transformers.utils.load_task_dataset import load_task_dataset
from sparseml.transformers.utils.optimizations import apply_kv_cache_injection
_LOGGER = logging.getLogger(__name__)
def create_data_samples(
num_samples: int,
data_loader: torch.utils.data.DataLoader,
model: Optional["torch.nn.Module"] = None,
**kwargs,
):
if kwargs.get("batch_size"):
_LOGGER.info(
"For exporting samples for transformers integration,"
"batch size is ignored (equal to 1)"
)
return create_data_samples_(
data_loader=data_loader, model=model, num_samples=num_samples
) | null |
21,608 | import logging
from pathlib import Path
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union
import torch
from pydantic import Field
from sparseml.export.export_data import create_data_samples as create_data_samples_
from sparseml.export.helpers import apply_optimizations as apply_optimizations_onnx
from sparseml.integration_helper_functions import (
IntegrationHelperFunctions,
Integrations,
)
from sparseml.transformers.finetune.data.data_helpers import format_calibration_data
from sparseml.transformers.utils.helpers import (
ALL_TASK_NAMES,
MANDATORY_DEPLOYMENT_FILES,
NLG_MANDATORY_DEPLOYMENT_FILES,
NLG_OPTIONAL_DEPLOYMENT_FILES,
OPTIONAL_DEPLOYMENT_FILES,
TaskNames,
create_fake_dataloader,
remove_past_key_value_support_from_config,
resolve_sequence_length,
)
from sparseml.transformers.utils.initializers import (
_parse_data_args,
initialize_config,
initialize_sparse_model,
initialize_tokenizer,
initialize_trainer,
)
from sparseml.transformers.utils.load_task_dataset import load_task_dataset
from sparseml.transformers.utils.optimizations import apply_kv_cache_injection
def apply_kv_cache_injection(onnx_model_path: Union[str, Path]) -> bool:
"""
Apply key value cache injection to an ONNX model.
Before the injection, a copy of the model is created
at the same location under ONNX_MODEL_NAME_INTERMEDIATE.
:param onnx_model_path: path to the ONNX model to inject
:return: True if successful, False otherwise
"""
create_model_copy(onnx_model_path)
onnx_model = onnx.load(onnx_model_path, load_external_data=False)
model_path = os.path.dirname(onnx_model_path)
exporter = KeyValueCacheInjector(model_path=model_path)
exporter.export(onnx_model, onnx_model_path)
return True
def apply_optimizations_generative_transformer(
exported_file_path: Union[str, Path],
optimizations: Union[str, List[str]],
):
if exported_file_path.endswith(".onnx"):
available_optimizations = dict(kv_cache_injection=apply_kv_cache_injection)
apply_optimizations_onnx(
onnx_file_path=exported_file_path,
available_optimizations=available_optimizations,
target_optimizations=optimizations,
)
else:
raise NotImplementedError(
"Applying optimizations is only supported for ONNX files"
) | null |
21,609 | import collections
import inspect
import logging
import math
import os
import warnings
from dataclasses import asdict
from typing import Any, Dict, List, Optional, Tuple, Union
import datasets
import torch
from torch import distributed as dist
from torch.nn import Module
from transformers import Trainer as HFTransformersTrainer
from transformers import TrainerCallback, TrainerControl, TrainingArguments
from transformers.file_utils import PaddingStrategy
from transformers.integrations import TensorBoardCallback
from transformers.trainer_callback import TrainerState
from transformers.trainer_pt_utils import reissue_pt_warnings
from transformers.trainer_utils import ShardedDDPOption, get_last_checkpoint
from sparseml.pytorch.model_load.helpers import log_model_load
from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer
from sparseml.pytorch.sparsification.quantization.helpers import (
initialize_channel_wise_scale_zp,
)
from sparseml.pytorch.utils import (
LoggerManager,
ModuleSparsificationInfo,
TensorBoardLogger,
WANDBLogger,
)
from sparseml.transformers.utils.helpers import RECIPE_NAME
def _get_teacher_base_column_name(column_name: str) -> Optional[str]:
# if column was created by teacher tokenizer, return the base name
if not column_name.startswith("distill_teacher:"):
return
return column_name[len("distill_teacher:") :] | null |
21,610 | import collections
import json
import logging
import os
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from torch.nn import Module
from tqdm.auto import tqdm
from transformers import is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
from sparseml.transformers.sparsification.trainer import (
TrainerInterface,
TransformersTrainer,
)
_LOGGER = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `postprocess_qa_predictions` function. Write a Python function `def postprocess_qa_predictions( examples, features, predictions: Tuple[np.ndarray, np.ndarray], version_2_with_negative: bool = False, n_best_size: int = 20, max_answer_length: int = 30, null_score_diff_threshold: float = 0.0, output_dir: Optional[str] = None, prefix: Optional[str] = None, log_level: Optional[int] = logging.WARNING, )` to solve the following problem:
Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the original contexts. This is the base postprocessing functions for models that only return start and end logits. :param examples: The non-preprocessed dataset. See main script for more :param features: The processed dataset. See main script for more :param predictions: The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of features :param version_2_with_negative: Whether or not the underlying dataset contains examples with no answers :param n_best_size: The total number of n-best predictions to generate when looking for an answer :param max_answer_length: The maximum length of an answer that can be generated. Needed because the start and end predictions are not conditioned on one another :param null_score_diff_threshold: The threshold used to select the null answer: if the best answer has a score that is less than the score of the null answer minus this threshold, the null answer is selected for this example. Only useful when version_2_with_negative is False :param output_dir: If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if version_2_with_negative=True, the dictionary of the scores differences between best and null answers, are saved in output_dir :param prefix: If provided, the dictionaries mentioned above are saved with prefix added to their names :param is_world_process_zero: Whether this process is the main process or not (used to determine if logging/saves should be done) :return: dictionary of prediction values
Here is the function:
def postprocess_qa_predictions(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
null_score_diff_threshold: float = 0.0,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
log_level: Optional[int] = logging.WARNING,
):
"""
Post-processes the predictions of a question-answering model to convert them
to answers that are substrings of the original contexts. This is the base
postprocessing functions for models that only return start and end logits.
:param examples: The non-preprocessed dataset. See main script for more
:param features: The processed dataset. See main script for more
:param predictions: The predictions of the model: two arrays containing the start
logits and the end logits respectively. Its first dimension must match the
number of elements of features
:param version_2_with_negative: Whether or not the underlying dataset contains
examples with no answers
:param n_best_size: The total number of n-best predictions to generate when
looking for an answer
:param max_answer_length: The maximum length of an answer that can be generated.
Needed because the start and end predictions are not conditioned on one another
:param null_score_diff_threshold: The threshold used to select the null answer:
if the best answer has a score that is less than the score of the null answer
minus this threshold, the null answer is selected for this example. Only useful
when version_2_with_negative is False
:param output_dir: If provided, the dictionaries of predictions, n_best predictions
(with their scores and logits) and, if version_2_with_negative=True, the
dictionary of the scores differences between best and null answers, are
saved in output_dir
:param prefix: If provided, the dictionaries mentioned above are saved with
prefix added to their names
:param is_world_process_zero: Whether this process is the main process or not
(used to determine if logging/saves should be done)
:return: dictionary of prediction values
"""
if len(predictions) != 2:
raise ValueError(
"`predictions` should be a tuple with two elements "
"(start_logits, end_logits)."
)
all_start_logits, all_end_logits = predictions
if len(predictions[0]) != len(features):
raise ValueError(
f"Got {len(predictions[0])} predictions and {len(features)} features."
)
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
if version_2_with_negative:
scores_diff_json = collections.OrderedDict()
# Logging.
_LOGGER.setLevel(log_level)
_LOGGER.info(
f"Post-processing {len(examples)} example predictions split into "
f"{len(features)} features."
)
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_prediction = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions in our logits to
# span of texts in the original context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers
# that do not have the maximum context available in the current feature.
token_is_max_context = features[feature_index].get(
"token_is_max_context", None
)
# Update minimum null prediction.
feature_null_score = start_logits[0] + end_logits[0]
if (
min_null_prediction is None
or min_null_prediction["score"] > feature_null_score
):
min_null_prediction = {
"offsets": (0, 0),
"score": feature_null_score,
"start_logit": start_logits[0],
"end_logit": end_logits[0],
}
# Go through all possibilities for the `n_best_size` greater start
# and end logits.
start_indexes = np.argsort(start_logits)[
-1 : -n_best_size - 1 : -1
].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices
# are out of bounds or correspond to part of the input_ids that
# are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or len(offset_mapping[start_index]) < 2
or offset_mapping[end_index] is None
or len(offset_mapping[end_index]) < 2
):
continue
# Don't consider answers with a length that is
# either < 0 or > max_answer_length.
if (
end_index < start_index
or end_index - start_index + 1 > max_answer_length
):
continue
# Don't consider answer that don't have the maximum context
# available (if such information is provided).
if (
token_is_max_context is not None
and not token_is_max_context.get(str(start_index), False)
):
continue
prelim_predictions.append(
{
"offsets": (
offset_mapping[start_index][0],
offset_mapping[end_index][1],
),
"score": start_logits[start_index] + end_logits[end_index],
"start_logit": start_logits[start_index],
"end_logit": end_logits[end_index],
}
)
if version_2_with_negative:
# Add the minimum null prediction
prelim_predictions.append(min_null_prediction)
null_score = min_null_prediction["score"]
# Only keep the best `n_best_size` predictions.
predictions = sorted(
prelim_predictions, key=lambda x: x["score"], reverse=True
)[:n_best_size]
# Add back the minimum null prediction if it was removed because of its
# low score.
if version_2_with_negative and not any(
p["offsets"] == (0, 0) for p in predictions
):
predictions.append(min_null_prediction)
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we
# create a fake prediction to avoid failure
if len(predictions) == 0 or (
len(predictions) == 1 and predictions[0]["text"] == ""
):
predictions.insert(
0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0}
)
# Compute the softmax of all scores (we do it with numpy to stay independent
# from torch/tf in this file, using the LogSumExp trick)
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction. If the null answer is not possible, this is easy.
if not version_2_with_negative:
all_predictions[example["id"]] = predictions[0]["text"]
else:
# Otherwise we first need to find the best non-empty prediction.
i = 0
while predictions[i]["text"] == "":
i += 1
best_non_null_pred = predictions[i]
# Then we compare to the null prediction using the threshold.
score_diff = (
null_score
- best_non_null_pred["start_logit"]
- best_non_null_pred["end_logit"]
)
scores_diff_json[example["id"]] = float(
score_diff
) # To be JSON-serializable.
if score_diff > null_score_diff_threshold:
all_predictions[example["id"]] = ""
else:
all_predictions[example["id"]] = best_non_null_pred["text"]
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{
k: (
float(v)
if isinstance(v, (np.float16, np.float32, np.float64))
else v
)
for k, v in pred.items()
}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
if not os.path.isdir(output_dir):
raise EnvironmentError(f"{output_dir} is not a directory.")
prediction_file = os.path.join(
output_dir,
"predictions.json" if prefix is None else f"{prefix}_predictions.json",
)
nbest_file = os.path.join(
output_dir,
"nbest_predictions.json"
if prefix is None
else f"{prefix}_nbest_predictions.json",
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir,
"null_odds.json" if prefix is None else f"{prefix}_null_odds.json",
)
_LOGGER.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
_LOGGER.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
_LOGGER.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions | Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the original contexts. This is the base postprocessing functions for models that only return start and end logits. :param examples: The non-preprocessed dataset. See main script for more :param features: The processed dataset. See main script for more :param predictions: The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of features :param version_2_with_negative: Whether or not the underlying dataset contains examples with no answers :param n_best_size: The total number of n-best predictions to generate when looking for an answer :param max_answer_length: The maximum length of an answer that can be generated. Needed because the start and end predictions are not conditioned on one another :param null_score_diff_threshold: The threshold used to select the null answer: if the best answer has a score that is less than the score of the null answer minus this threshold, the null answer is selected for this example. Only useful when version_2_with_negative is False :param output_dir: If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if version_2_with_negative=True, the dictionary of the scores differences between best and null answers, are saved in output_dir :param prefix: If provided, the dictionaries mentioned above are saved with prefix added to their names :param is_world_process_zero: Whether this process is the main process or not (used to determine if logging/saves should be done) :return: dictionary of prediction values |
21,611 | import argparse
import collections
import copy
import inspect
import logging
import os
import shutil
from typing import Any, Dict, List, Optional, Union
from torch.nn import Module
from transformers import AutoConfig
from transformers.tokenization_utils_base import PaddingStrategy
import sparseml.core.session as session_manager
from sparseml.optim import parse_recipe_variables
from sparseml.pytorch.model_load.helpers import (
RECIPE_FILE_NAME,
apply_recipe_structure_to_model,
)
from sparseml.pytorch.utils import export_onnx
from sparseml.transformers import SparseAutoTokenizer
from sparseml.transformers.utils import SparseAutoModel
from sparsezoo.utils.onnx import EXTERNAL_ONNX_DATA_NAME
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for
training and eval
"""
dataset_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the dataset to use (via the datasets library)"},
)
dataset_config_name: Optional[str] = field(
default=None,
metadata={
"help": ("The configuration name of the dataset to use"),
},
)
# An extra second dataset
dataset_name_2: Optional[str] = field(
default=None,
metadata={"help": "The name of the dataset to use (via the datasets library)"},
)
dataset_config_name_2: Optional[str] = field(
default=None,
metadata={
"help": ("The configuration name of the dataset to use"),
},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": (
"An optional input evaluation data file to evaluate the perplexity on"
"(a text file)."
),
},
)
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached training and evaluation sets"},
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": (
"The percentage of the train set used as validation set in case "
"there's no validation split"
)
},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. "
"Sequences longer than this will be truncated."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
mlm_probability: float = field(
default=0.15,
metadata={"help": "Ratio of tokens to mask for masked language modeling loss"},
)
line_by_line: bool = field(
default=False,
metadata={
"help": (
"Whether distinct lines of text in the dataset are to be handled as "
"distinct sequences."
),
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to "
"the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number "
"of training examples to this value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number "
"of evaluation examples to this value if set."
},
)
one_shot: bool = field(
default=False,
metadata={"help": "Whether to apply recipe in a one shot manner."},
)
num_export_samples: int = field(
default=0,
metadata={"help": "Number of samples (inputs/outputs) to export during eval."},
)
def __post_init__(self):
if (
self.dataset_name is None
and self.train_file is None
and self.validation_file is None
):
raise ValueError(
"Need either a dataset name or a training/validation file."
)
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
if extension not in ["csv", "json", "txt"]:
raise ValueError(
"`train_file` should be a csv, a json or a txt file."
)
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
if extension not in ["csv", "json", "txt"]:
raise ValueError(
"`validation_file` should be a csv, a json or a txt file."
)
def get_tokenized_mlm_dataset(
data_args: DataTrainingArguments,
tokenizer,
cache_dir: Optional[str] = None,
training_args: Optional[TrainingArguments] = None,
):
"""
A convenience function to fetch a tokenized dataset for masked_language_modelling
:param data_args: a valid `DataTrainingArguments` instance with relevant args for
fetching the tokenized dataset
:param tokenizer: the tokenizer to use for tokenizing the raw dataset
:parm cache_dir: an optional cache directory to look at before downloading the
dataset, this directory is also used to store the downloaded dataset if
not pre-downloaded
:param training_args: an optional `TrainingArgument` instance, used for
downloading with main process, and also contains relevant
training information
:returns: A valid tokenized dataset for masked_language_modelling task
"""
raw_datasets = _get_mlm_raw_dataset(data_args=data_args, cache_dir=cache_dir)
main_process_func = (
lambda desc: training_args.main_process_first(desc=desc)
if training_args
else nullcontext(desc)
)
do_train = training_args.do_train if training_args else False
tokenized_datasets = _get_tokenized_mlm_dataset_from_raw_dataset(
raw_datasets=raw_datasets,
tokenizer=tokenizer,
data_args=data_args,
main_process_func=main_process_func,
do_train=do_train,
)
return tokenized_datasets
class DataTrainingArguments:
"""
Arguments pertaining to what data to input to our model for training and eval
"""
dataset_name: Optional[str] = field(
default=None,
metadata={
"help": "The name of the dataset to use (via the datasets library).",
},
)
dataset_config_name: Optional[str] = field(
default=None,
metadata={
"help": (
"The configuration name of the dataset to use "
"(via the datasets library)."
),
},
)
train_file: Optional[str] = field(
default=None,
metadata={"help": "The input training data file (a text file)."},
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": (
"An optional input evaluation data file to evaluate the perplexity "
"on (a text file)."
),
},
)
test_file: Optional[str] = field(
default=None,
metadata={
"help": (
"An optional input test data file to evaluate the perplexity on "
"(a text file)."
),
},
)
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached training and evaluation sets"},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: int = field(
default=384,
metadata={
"help": "The maximum total input sequence length after tokenization. "
"Sequences longer than this will be truncated, sequences shorter will "
"be padded."
},
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. If False, "
"will pad the samples dynamically when batching to the maximum length "
"in the batch (which can be faster on GPU but will be slower on TPU)."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number "
"of training examples to this value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number "
"of evaluation examples to this value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of "
"prediction examples to this value if set."
),
},
)
version_2_with_negative: bool = field(
default=False,
metadata={"help": "If true, some of the examples do not have an answer."},
)
null_score_diff_threshold: float = field(
default=0.0,
metadata={
"help": (
"The threshold used to select the null answer: if the best answer has "
"a score that is less than the score of the null answer minus this "
"threshold, the null answer is selected for this example. Only useful "
"when `version_2_with_negative=True`."
),
},
)
doc_stride: int = field(
default=128,
metadata={
"help": (
"When splitting up a long document into chunks, how much stride to "
"take between chunks."
),
},
)
n_best_size: int = field(
default=20,
metadata={
"help": (
"The total number of n-best predictions to generate when looking "
"for an answer."
),
},
)
max_answer_length: int = field(
default=30,
metadata={
"help": (
"The maximum length of an answer that can be generated. This is "
"needed because the start and end predictions are not conditioned "
"on one another."
),
},
)
one_shot: bool = field(
default=False,
metadata={"help": "Whether to apply recipe in a one shot manner."},
)
num_export_samples: int = field(
default=0,
metadata={"help": "Number of samples (inputs/outputs) to export during eval."},
)
def __post_init__(self):
if (
self.dataset_name is None
and self.train_file is None
and self.validation_file is None
and self.test_file is None
):
raise ValueError(
"Need either a dataset name or a training/validation file/test_file."
)
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in [
"csv",
"json",
], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in [
"csv",
"json",
], "`validation_file` should be a csv or a json file."
if self.test_file is not None:
extension = self.test_file.split(".")[-1]
assert extension in [
"csv",
"json",
], "`test_file` should be a csv or a json file."
def get_tokenized_qa_dataset(
data_args: DataTrainingArguments,
tokenizer: transformers.PreTrainedTokenizerBase,
cache_dir: Optional[str] = None,
):
"""
Utility method to get tokenized question answering dataset given at-least
the tokenizer, and data_arguments
:param data_args: Arguments pertaining to what data we are going to input
our model for training and eval
:param tokenizer: The tokenizer to use for tokenizing raw dataset
:param cache_dir: Local path to store the pretrained data from huggingface.co
"""
raw_datasets = _get_raw_dataset(data_args=data_args, cache_dir=cache_dir)
tokenized_datasets, _ = _get_tokenized_datasets_and_examples(
data_args=data_args,
raw_datasets=raw_datasets,
tokenizer=tokenizer,
make_eval_dataset=True,
)
return tokenized_datasets
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for
training and eval
"""
task_name: Optional[str] = field(
default="ner", metadata={"help": "The name of the task (ner, pos...)."}
)
dataset_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the dataset to use (via the datasets library)"},
)
dataset_config_name: Optional[str] = field(
default=None,
metadata={
"help": ("The configuration name of the dataset to use"),
},
)
train_file: Optional[str] = field(
default=None,
metadata={"help": "A csv or a json file containing the training data."},
)
validation_file: Optional[str] = field(
default=None,
metadata={"help": "A csv or a json file containing the validation data."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "A csv or a json file containing the test data."},
)
text_column_name: Optional[str] = field(
default=None,
metadata={
"help": "The column name of text to input in the file "
"(a csv or JSON file)."
},
)
label_column_name: Optional[str] = field(
default=None,
metadata={
"help": "The column name of label to input in the file "
"(a csv or JSON file)."
},
)
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached training and evaluation sets"},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: int = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. "
"If set, sequences longer than this will be truncated, sequences shorter "
"will be padded."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. If False, "
"will pad the samples dynamically when batching to the maximum length "
"in the batch (which can be faster on GPU but will be slower on TPU)."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number "
"of training examples to this value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number "
"of evaluation examples to this value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of "
"prediction examples to this value if set."
),
},
)
label_all_tokens: bool = field(
default=False,
metadata={
"help": "Whether to put the label for one word on all tokens of generated "
"by that word or just on the "
"one (in which case the other tokens will have a padding index)."
},
)
return_entity_level_metrics: bool = field(
default=False,
metadata={
"help": "Whether to return all the entity levels during evaluation or "
"just the overall ones."
},
)
one_shot: bool = field(
default=False,
metadata={"help": "Whether to apply recipe in a one shot manner."},
)
num_export_samples: int = field(
default=0,
metadata={"help": "Number of samples (inputs/outputs) to export during eval."},
)
def __post_init__(self):
if (
self.dataset_name is None
and self.train_file is None
and self.validation_file is None
):
raise ValueError(
"Need either a dataset name or a training/validation file."
)
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in [
"csv",
"json",
], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in [
"csv",
"json",
], "`validation_file` should be a csv or a json file."
self.task_name = self.task_name.lower()
def get_tokenized_token_classification_dataset(
data_args: DataTrainingArguments,
tokenizer: transformers.PreTrainedTokenizerBase,
model: Module,
cache_dir: Optional[str] = None,
do_train: bool = False,
do_eval: bool = True,
do_predict: bool = False,
main_process_func=None,
):
"""
Utility method to get tokenized token classification dataset given at-least
the tokenizer, model, and data_arguments
:param data_args: Arguments pertaining to what data we are going to input
our model for training and eval
:param tokenizer: The tokenizer to use for tokenizing raw dataset
:param model: The instantiated torch module to create the dataset for
:param cache_dir: Local path to store the pretrained data from huggingface.co
:param do_train: True to create the train dataset
:param do_predict: True to create test dataset
:param do_eval: True to create eval dataset
:param main_process_func: Callable Context Manager to do something on
the main process
"""
raw_datasets = _get_raw_dataset(data_args=data_args, cache_dir=cache_dir)
column_names, features = _get_column_names_and_features(
raw_datasets, do_train=do_train
)
text_column_name = _get_text_column_names(column_names, data_args)
label_column_name = _get_label_column_name(column_names, data_args)
# If the labels are of type ClassLabel, they are already integers and we have the
# map stored somewhere. Otherwise, we have to get the list of labels manually.
labels_are_int = isinstance(features[label_column_name].feature, ClassLabel)
if labels_are_int:
label_list = features[label_column_name].feature.names
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = _get_label_list(raw_datasets["train"][label_column_name])
label_to_id = {l: i for i, l in enumerate(label_list)}
num_labels = len(label_list)
main_process_func = (
main_process_func if main_process_func else lambda desc: nullcontext(desc)
)
tokenized_dataset = _get_tokenized_dataset(
data_args=data_args,
label_list=label_list,
labels_are_int=labels_are_int,
model=model,
num_labels=num_labels,
raw_datasets=raw_datasets,
tokenizer=tokenizer,
text_column_name=text_column_name,
label_column_name=label_column_name,
label_to_id=label_to_id,
do_train=do_train,
do_eval=do_eval,
main_process_func=main_process_func,
do_predict=do_predict,
)
return tokenized_dataset
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for
training and eval
Using `HfArgumentParser` we can turn this class into argparse
arguments to be able to specify them on the command line
"""
task_name: Optional[str] = field(
default=None,
metadata={
"help": "The name of the task to train on: "
+ ", ".join(_TASK_TO_KEYS.keys())
},
)
dataset_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the dataset to use (via the datasets library)"},
)
dataset_config_name: Optional[str] = field(
default=None,
metadata={
"help": ("The configuration name of the dataset to use"),
},
)
max_seq_length: int = field(
default=384,
metadata={
"help": "The maximum total input sequence length after tokenization. "
"Sequences longer than this will be truncated, sequences shorter will "
"be padded."
},
)
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached preprocessed datasets or not."},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. If False, "
"will pad the samples dynamically when batching to the maximum length "
"in the batch (which can be faster on GPU but will be slower on TPU)."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number "
"of training examples to this value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number "
"of evaluation examples to this value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of "
"prediction examples to this value if set."
),
},
)
train_file: Optional[str] = field(
default=None,
metadata={"help": "A csv or a json file containing the training data."},
)
validation_file: Optional[str] = field(
default=None,
metadata={"help": "A csv or a json file containing the validation data."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "A csv or a json file containing the test data."},
)
validation_ratio: Optional[float] = field(
default=None,
metadata={"help": "Percentage of the training data to be used as validation."},
)
eval_on_test: bool = field(
default=False,
metadata={"help": "Evaluate the test dataset."},
)
input_column_names: Optional[str] = field(
default=None,
metadata={
"help": (
"name of column to read model input data from. May also be comma "
"separated list of two columns to use as inputs. Examples include "
"'sentence' for single column and 'sentence_1,sentence_2' for two. "
"Default behavior is to read columns based on task name or infer from "
"non 'label' columns if sentence_column_names and task name not"
"provided"
)
},
)
label_column_name: str = field(
default="label",
metadata={
"help": (
"column in dataset where input labels are located. Default is 'label'"
)
},
)
one_shot: bool = field(
default=False,
metadata={"help": "Whether to apply recipe in a one shot manner."},
)
num_export_samples: int = field(
default=0,
metadata={"help": "Number of samples (inputs/outputs) to export during eval."},
)
def __post_init__(self):
if self.task_name is not None:
self.task_name = self.task_name.lower()
if self.task_name not in _TASK_TO_KEYS.keys():
raise ValueError(
"Unknown task, you should pick one in "
+ ",".join(_TASK_TO_KEYS.keys())
)
elif self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError(
"Need either a GLUE task, a training/validation file or a dataset name"
)
else:
train_extension = self.train_file.split(".")[-1]
assert train_extension in [
"csv",
"json",
], "`train_file` should be a csv or a json file."
validation_extension = self.validation_file.split(".")[-1]
assert validation_extension == train_extension, (
"`validation_file` should have the same extension (csv or json) "
"as `train_file`."
)
def get_tokenized_text_classification_dataset(
data_args: DataTrainingArguments,
tokenizer: transformers.PreTrainedTokenizerBase,
model: Module,
config,
cache_dir: Optional[str] = None,
):
"""
Utility method to get tokenized text classification dataset given at-least
the tokenizer, model, and data_arguments
:param data_args: Arguments pertaining to what data we are going to input
our model for training and eval
:param tokenizer: The tokenizer to use for tokenizing raw dataset
:param config: The pretrained config used to load this model
:param cache_dir: Local path to store the pretrained data from huggingface.co
:returns: A dictionary containing tokenized_datasets
"""
raw_datasets = _get_raw_dataset(data_args, cache_dir=cache_dir, do_predict=True)
tokenized_datasets, _ = _get_tokenized_and_preprocessed_raw_datasets(
config=config,
data_args=data_args,
model=model,
raw_datasets=raw_datasets,
tokenizer=tokenizer,
teacher_tokenizer=None,
make_eval_dataset=True,
)
return tokenized_datasets
The provided code snippet includes necessary dependencies for implementing the `load_task_dataset` function. Write a Python function `def load_task_dataset( task: str, tokenizer, data_args: Dict[str, Any], model: Module, config=None )` to solve the following problem:
:param task: the task a dataset being loaded for :param tokenizer: the tokenizer to use for the dataset :param data_args: additional data args used to create a `DataTrainingArguments` instance for fetching the dataset
Here is the function:
def load_task_dataset(
task: str, tokenizer, data_args: Dict[str, Any], model: Module, config=None
):
"""
:param task: the task a dataset being loaded for
:param tokenizer: the tokenizer to use for the dataset
:param data_args: additional data args used to create a `DataTrainingArguments`
instance for fetching the dataset
"""
if task == "masked-language-modeling" or task == "mlm":
from sparseml.transformers.masked_language_modeling import (
DataTrainingArguments,
get_tokenized_mlm_dataset,
)
data_training_args = DataTrainingArguments(**data_args)
return get_tokenized_mlm_dataset(
data_args=data_training_args, tokenizer=tokenizer
)
if task == "question-answering" or task == "qa":
from sparseml.transformers.question_answering import (
DataTrainingArguments,
get_tokenized_qa_dataset,
)
data_training_args = DataTrainingArguments(**data_args)
return get_tokenized_qa_dataset(
data_args=data_training_args, tokenizer=tokenizer
)
if task == "token-classification" or task == "ner":
from sparseml.transformers.token_classification import (
DataTrainingArguments,
get_tokenized_token_classification_dataset,
)
data_training_args = DataTrainingArguments(**data_args)
return get_tokenized_token_classification_dataset(
data_args=data_training_args, tokenizer=tokenizer, model=model or config
)
if (
task == "sequence-classification"
or task == "glue"
or task == "sentiment-analysis"
or task == "text-classification"
):
from sparseml.transformers.text_classification import (
DataTrainingArguments,
get_tokenized_text_classification_dataset,
)
data_training_args = DataTrainingArguments(**data_args)
return get_tokenized_text_classification_dataset(
data_args=data_training_args,
tokenizer=tokenizer,
model=model,
config=config,
)
raise ValueError(f"unrecognized task given of {task}") | :param task: the task a dataset being loaded for :param tokenizer: the tokenizer to use for the dataset :param data_args: additional data args used to create a `DataTrainingArguments` instance for fetching the dataset |
21,612 | import argparse
import collections
import copy
import inspect
import logging
import os
import shutil
from typing import Any, Dict, List, Optional, Union
from torch.nn import Module
from transformers import AutoConfig
from transformers.tokenization_utils_base import PaddingStrategy
import sparseml.core.session as session_manager
from sparseml.optim import parse_recipe_variables
from sparseml.pytorch.model_load.helpers import (
RECIPE_FILE_NAME,
apply_recipe_structure_to_model,
)
from sparseml.pytorch.utils import export_onnx
from sparseml.transformers import SparseAutoTokenizer
from sparseml.transformers.utils import SparseAutoModel
from sparsezoo.utils.onnx import EXTERNAL_ONNX_DATA_NAME
MODEL_ONNX_NAME = "model.onnx"
def _parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Export a trained transformers model to an ONNX file"
)
parser.add_argument(
"--task",
type=str,
required=True,
help="Task to create the model for. i.e. mlm, qa, glue, ner",
)
parser.add_argument(
"--model_path",
required=True,
type=str,
help=(
"Path to directory where model files for weights, config, and "
"tokenizer are stored"
),
)
parser.add_argument(
"--sequence_length",
type=int,
default=None,
help=(
"Sequence length to use. Default is `config.max_position_embeddings`. "
"Can be overwritten later"
),
)
parser.add_argument(
"--no_convert_qat",
action="store_true",
help=("Set flag to not perform QAT to fully quantized conversion after export"),
)
parser.add_argument(
"--onnx_file_name",
type=str,
default=MODEL_ONNX_NAME,
help=(
"Name for exported ONNX file in the model directory. "
"Default and recommended value for pipeline "
f"compatibility is {MODEL_ONNX_NAME}"
),
)
parser.add_argument(
"--num_export_samples",
type=int,
default=0,
help="Number of samples (inputs/outputs) to export",
)
parser.add_argument(
"--data_args",
type=str,
default=None,
help="Valid json loadable args used to instantiate a `DataTrainingArguments`"
" instance while exporting samples",
)
parser.add_argument(
"--trust_remote_code",
action="store_true",
help=("Set flag to allow custom models in HF-transformers"),
)
return parser.parse_args() | null |
21,613 | import argparse
import collections
import copy
import inspect
import logging
import os
import shutil
from typing import Any, Dict, List, Optional, Union
from torch.nn import Module
from transformers import AutoConfig
from transformers.tokenization_utils_base import PaddingStrategy
import sparseml.core.session as session_manager
from sparseml.optim import parse_recipe_variables
from sparseml.pytorch.model_load.helpers import (
RECIPE_FILE_NAME,
apply_recipe_structure_to_model,
)
from sparseml.pytorch.utils import export_onnx
from sparseml.transformers import SparseAutoTokenizer
from sparseml.transformers.utils import SparseAutoModel
from sparsezoo.utils.onnx import EXTERNAL_ONNX_DATA_NAME
_LOGGER = logging.getLogger(__name__)
def export_transformer_to_onnx(
task: str,
model_path: str,
sequence_length: Optional[int] = None,
convert_qat: bool = True,
onnx_file_name: str = MODEL_ONNX_NAME,
num_export_samples: int = 0,
trust_remote_code: bool = False,
data_args: Optional[Union[Dict[str, Any], str]] = None,
) -> str:
"""
Exports the saved transformers file to ONNX at batch size 1 using
the given model path weights, config, and tokenizer
:param task: task to create the model for. i.e. mlm, qa, glue, ner
:param model_path: path to directory where model files, tokenizers,
and configs are saved. ONNX export will also be written here
:param sequence_length: model sequence length to use for export
:param convert_qat: set True to convert a QAT model to fully quantized
ONNX model. Default is True
:param onnx_file_name: name to save the exported ONNX file as. Default
is model.onnx. Note that when loading a model directory to a deepsparse
pipeline, it will look only for 'model.onnx'
:param num_export_samples: number of samples (inputs/outputs) to export
:param trust_remote_code: set True to allow custom models in HF-transformers
:param data_args: additional args to instantiate a `DataTrainingArguments`
instance for exporting samples
:return: path to the exported ONNX file
"""
task = task.replace("_", "-").replace(" ", "-")
if not os.path.exists(model_path) or not os.path.isdir(model_path):
raise ValueError(
"model_path must be a directory that contains the trained transformer "
f"files. {model_path} is not a directory or does not exist"
)
if num_export_samples > 0 and data_args is None:
_LOGGER.info(
f"--data_args is needed for exporting {num_export_samples} "
"real samples but got None, synthetic data samples will be "
"generated based on model input/output shapes"
)
data_args: Dict[str, Any] = _parse_data_args(data_args)
_LOGGER.info(f"Attempting onnx export for model at {model_path} for task {task}")
config_args = {}
config = AutoConfig.from_pretrained(
model_path,
trust_remote_code=trust_remote_code,
**config_args,
)
if sequence_length is None:
sequence_length = config.max_position_embeddings
tokenizer = SparseAutoTokenizer.from_pretrained(
model_path, model_max_length=sequence_length
)
if task == "text-generation":
tokenizer.pad_token = tokenizer.eos_token
model = load_task_model(task, model_path, config, trust_remote_code)
_LOGGER.info(f"loaded model, config, and tokenizer from {model_path}")
model = model.train()
# creates a SparseSession and apply structure from the model's recipe
recipe_path = os.path.join(model_path, RECIPE_FILE_NAME)
if os.path.exists(recipe_path):
session_manager.create_session()
apply_recipe_structure_to_model(
model=model, recipe_path=recipe_path, model_path=model_path
)
else:
_LOGGER.warning(f"No input recipe {RECIPE_FILE_NAME} found in {model_path}.")
# create fake model input
inputs = tokenizer(
"", return_tensors="pt", padding=PaddingStrategy.MAX_LENGTH.value
).data # Dict[Tensor]
# Rearrange inputs' keys to match those defined by model foward func, which
# seem to define how the order of inputs is determined in the exported model
forward_args_spec = inspect.getfullargspec(model.__class__.forward)
dropped = [
input_key
for input_key in inputs.keys()
if input_key not in forward_args_spec.args
]
inputs = collections.OrderedDict(
[
(func_input_arg_name, inputs[func_input_arg_name][0].reshape(1, -1))
for func_input_arg_name in forward_args_spec.args
if func_input_arg_name in inputs
]
)
if dropped:
_LOGGER.warning(
"The following inputs were not present in the model forward function "
f"and therefore dropped from ONNX export: {dropped}"
)
inputs_shapes = {
key: (
f"{val.dtype if hasattr(val, 'dtype') else 'unknown'}: "
f"{list(val.shape) if hasattr(val, 'shape') else 'unknown'}"
)
for key, val in inputs.items()
}
_LOGGER.info(f"Created sample inputs for the ONNX export process: {inputs_shapes}")
# run export
model = model.eval()
onnx_file_path = os.path.join(model_path, onnx_file_name)
kwargs = {"input_names": list(inputs.keys())} if task == "text-generation" else {}
export_onnx(
model,
inputs,
onnx_file_path,
convert_qat=convert_qat,
**kwargs,
)
_LOGGER.info(f"ONNX exported to {onnx_file_path}")
return onnx_file_path
def create_deployment_folder(
training_directory: str,
onnx_file_name: str = MODEL_ONNX_NAME,
deployment_files: Optional[List[str]] = None,
):
"""
Sets up the deployment directory i.e. copies over the complete set of files
that are required to run the transformer model in the inference engine
:param training_directory: path to directory where model files, tokenizers,
and configs are saved. Exported ONNX model is also expected to be there
:param onnx_file_name: Name for exported ONNX file in the model directory.
:param deployment_files: optional list of deployment file names to override
default file names with.
:return: path to the valid deployment directory
"""
if deployment_files is None:
# set deployment files to default values
deployment_files = copy.deepcopy(
MANDATORY_DEPLOYMENT_FILES + OPTIONAL_DEPLOYMENT_FILES
)
if onnx_file_name != MODEL_ONNX_NAME:
# replace the default onnx model name with the custom one
deployment_files[deployment_files.index(MODEL_ONNX_NAME)] = onnx_file_name
model_root_dir = os.path.dirname(training_directory)
deployment_folder_dir = os.path.join(model_root_dir, "deployment")
if os.path.isdir(deployment_folder_dir):
shutil.rmtree(deployment_folder_dir)
os.makedirs(deployment_folder_dir)
for file_name in deployment_files:
expected_file_path = os.path.join(training_directory, file_name)
deployment_file_path = os.path.join(deployment_folder_dir, file_name)
if not os.path.exists(expected_file_path):
if file_name in OPTIONAL_DEPLOYMENT_FILES:
_LOGGER.warning(
f"Optional file {file_name} not found in {training_directory}. "
f"Skipping copying to deployment folder."
)
continue
raise ValueError(
f"Attempting to copy {file_name} file from {expected_file_path},"
f"but the file does not exits. Make sure that {training_directory} "
f"contains following files: {deployment_files}"
)
if file_name == MODEL_ONNX_NAME:
# moving onnx file from training to deployment directory
shutil.move(expected_file_path, deployment_file_path)
elif file_name == EXTERNAL_ONNX_DATA_NAME:
# moving external onnx tensors from training to deployment directory
shutil.move(expected_file_path, deployment_file_path)
else:
# copying remaining `deployment_files` from training to deployment directory
shutil.copyfile(expected_file_path, deployment_file_path)
_LOGGER.info(
f"Saved {file_name} in the deployment folder at {deployment_file_path}"
)
return deployment_folder_dir
def export(
task: str,
model_path: str,
sequence_length: Optional[int],
no_convert_qat: bool,
onnx_file_name: str,
num_export_samples: int = 0,
trust_remote_code: bool = False,
data_args: Optional[str] = None,
):
if os.path.exists(model_path):
# expand to absolute path to support downstream logic
model_path = os.path.abspath(model_path)
export_transformer_to_onnx(
task=task,
model_path=model_path,
sequence_length=sequence_length,
convert_qat=(not no_convert_qat), # False if flagged
onnx_file_name=onnx_file_name,
num_export_samples=num_export_samples,
trust_remote_code=trust_remote_code,
data_args=data_args,
)
deployment_folder_dir = create_deployment_folder(
training_directory=model_path, onnx_file_name=onnx_file_name
)
_LOGGER.info(
f"Created deployment folder at {deployment_folder_dir} "
f"with files: {os.listdir(deployment_folder_dir)}"
) | null |
21,614 | import argparse
import logging
import os
from pathlib import Path
from typing import Dict, Optional
from torch.nn import Module
from transformers import AutoConfig
import sparseml.core.session as session_manager
from sparseml.core.framework import Framework
from sparseml.pytorch.model_load.helpers import (
fallback_to_cpu,
parse_dtype,
save_model_and_recipe,
)
from sparseml.transformers import SparseAutoTokenizer
from sparseml.transformers.finetune.data import TextGenerationDataset
from sparseml.transformers.finetune.data.data_args import DataTrainingArguments
from sparseml.transformers.finetune.data.data_helpers import format_calibration_data
from sparseml.transformers.utils.helpers import resolve_sequence_length
from sparseml.transformers.utils.initializers import initialize_sparse_model
_LOGGER = logging.getLogger(__name__)
class Framework(Enum):
"""
An Enum to represent different frameworks recognized by SparseML
"""
general = "general"
pytorch = "pytorch"
tensorflow = "tensorflow"
onnx = "onnx"
keras = "keras"
jax = "jax"
def from_str(cls, framework: str) -> "Framework":
"""
Factory method for creating a framework enum from a string.
The string is case insensitive and whitespace is stripped before
checking for a match.
:param framework: The string to convert to a framework
:return: The corresponding framework enum for the given string
"""
framework = framework.lower().strip()
if framework == "general":
return cls.general
if framework == "pytorch":
return cls.pytorch
if framework == "tensorflow":
return cls.tensorflow
if framework == "onnx":
return cls.onnx
if framework == "keras":
return cls.keras
if framework == "jax":
return cls.jax
raise ValueError(f"Unknown framework: {framework}")
def __str__(self):
"""
:return: The string representation of the framework
"""
return self.value
def formatted(self) -> str:
"""
:return: The formatted string representation of the framework
"""
if self == self.general:
return "General"
if self == self.pytorch:
return "PyTorch"
if self == self.tensorflow:
return "TensorFlow"
if self == self.onnx:
return "ONNX"
if self == self.keras:
return "Keras"
if self == self.jax:
return "JAX"
raise ValueError(f"Unknown framework: {self}")
def class_name(self) -> str:
"""
Get the class name for the framework.
This is the formatted string representation of the framework.
If the framework is `general`, an empty string is returned
:return: The class name for the framework
"""
return self.formatted() if self != self.general else ""
def save_model_and_recipe(
model: Module, save_path: str, tokenizer: Optional[Any] = None
):
"""
Save a model, tokenizer and the currently loaded recipe to file
:param model: pytorch model to save
:param save_path: path to save output to
:param tokenizer: model tokenizer to save
"""
model.save_pretrained(save_path)
if tokenizer is not None:
tokenizer.save_pretrained(save_path)
_LOGGER.info("Saving output to {}".format(os.path.abspath(save_path)))
recipe_path = os.path.join(save_path, RECIPE_FILE_NAME)
session = session_manager.active_session()
recipe_yaml_str = session.get_serialized_recipe()
with open(recipe_path, "w") as fp:
fp.write(recipe_yaml_str)
def fallback_to_cpu(device: str) -> str:
"""
Takes in a device string and forces it to cpu if cuda is not available
:param device: device id to check
:return: device modified for CUDA status
"""
if "cuda" in device and not torch.cuda.is_available():
_LOGGER.warning(
f"Requested {device} but CUDA is not available, falling back to CPU"
)
return "cpu"
return device
def parse_dtype(dtype_arg: str) -> torch.dtype:
"""
:param dtype_arg: dtype string to parse
:return: torch.dtype parsed from input string
"""
dtype = "auto" # get precision from model by default
if dtype_arg == "half" or dtype_arg == "float16":
dtype = torch.float16
elif dtype_arg == "bfloat16":
dtype = torch.bfloat16
elif dtype_arg == "full" or dtype_arg == "float32":
dtype = torch.float32
return dtype
class DataTrainingArguments(CustomDataTrainingArguments):
"""
Arguments pertaining to what data we are going to input our model for
training and eval
Using `HfArgumentParser` we can turn this class into argparse
arguments to be able to specify them on the command line
"""
dataset: Optional[str] = field(
default=None,
metadata={
"help": (
"The name of the dataset to use (via the datasets library). "
"Supports input as a string or DatasetDict from HF"
)
},
)
dataset_config_name: Optional[str] = field(
default=None,
metadata={
"help": ("The configuration name of the dataset to use"),
},
)
max_seq_length: int = field(
default=384,
metadata={
"help": "The maximum total input sequence length after tokenization. "
"Sequences longer than this will be truncated, sequences shorter will "
"be padded."
},
)
concatenate_data: bool = field(
default=False,
metadata={
"help": "Whether or not to concatenate datapoints to fill max_seq_length"
},
)
raw_kwargs: Optional[Dict] = field(
default=None,
metadata={"help": "Additional keyboard args to pass to datasets load_data"},
)
splits: Union[None, str, List, Dict] = field(
default=None,
metadata={"help": "Optional percentages of each split to download"},
)
num_calibration_samples: Optional[int] = field(
default=512,
metadata={"help": "Number of samples to use for one-shot calibration"},
)
streaming: Optional[bool] = field(
default=False,
metadata={"help": "True to stream data from a cloud dataset"},
)
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached preprocessed datasets or not."},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. If False, "
"will pad the samples dynamically when batching to the maximum length "
"in the batch (which can be faster on GPU but will be slower on TPU)."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number "
"of training examples to this value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number "
"of evaluation examples to this value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of "
"prediction examples to this value if set."
),
},
)
def format_calibration_data(
tokenized_dataset: Dataset,
num_calibration_samples: Optional[int] = None,
collate_fn: Callable = default_data_collator,
accelerator: Optional[Any] = None,
) -> List[torch.Tensor]:
"""
Creates a dataloader out of the calibration dataset split, trimming it to
the desired number of calibration samples
:param tokenized_dataset: dataset to convert to dataloader
:param num_calibration_samples: number of data samples to convert
:param collate_fn: optional custom collate function, or use default
:param accelerator: optional accelerator for if preparing in FSDP mode
:return: list of trimmed calibration data tensors
"""
safe_calibration_samples = len(tokenized_dataset)
if num_calibration_samples is not None:
safe_calibration_samples = min(len(tokenized_dataset), num_calibration_samples)
if safe_calibration_samples != num_calibration_samples:
LOGGER.warn(
f"Requested {num_calibration_samples} calibration samples but "
f"the provided dataset only has {safe_calibration_samples}. "
)
shuffled_calibration = tokenized_dataset.shuffle()
shuffled_calibration = shuffled_calibration.select(range(safe_calibration_samples))
dataloader_params = {
"batch_size": 1,
"sampler": RandomSampler(shuffled_calibration),
"collate_fn": collate_fn,
"pin_memory": True,
}
calib_dataloader = DataLoader(shuffled_calibration, **dataloader_params)
if accelerator:
calib_dataloader = accelerator.prepare(calib_dataloader)
return calib_dataloader
def resolve_sequence_length(config: AutoConfig) -> int:
"""
Resolve the sequence length from the config
:param config: the config to resolve the sequence length from
:return: the sequence length
"""
if hasattr(config, "max_position_embeddings"):
sequence_length = config.max_position_embeddings
elif hasattr(config, "max_seq_len"):
sequence_length = config.max_seq_len
else:
raise ValueError(
"Could not infer a default sequence length "
"from the HF transformers config. Please specify "
"the sequence length with --sequence_length"
)
_LOGGER.debug(
f"Using default sequence length of {sequence_length} "
"(inferred from HF transformers config) "
)
return sequence_length
def initialize_sparse_model(
model_path: Union[str, Path],
task: str,
config: AutoConfig,
trust_remote_code: bool = False,
recipe: Optional[Union[str, Path]] = None,
device: Optional[str] = None,
**model_kwargs,
) -> AutoModel:
"""
Initialize a sparse model from a given path. This will
call the load_task_model function to load an appropriate
SparseAutoModel for the given task.
Optionally, we will also move the model to the specified device
Example usage:
```python
model_path = ... # path to the model
task = ... # the task to load the model for
e.g "text-generation" or "question-answering"
config = initialize_config(model_path=model_path,
trust_remote_code=True)
model = initialize_sparse_model(
model_path=model_path,
task=self.task,
config=config,
)
```
:param model_path: the path to the model to load
:param task: the task to load the model for
:param config: the config to use for the model
:param trust_remote_code: True to trust remote code when loading the model,
False otherwise
:param recipe: the recipe to apply to the model.
:param device: the device to load the model on. If None, will load on CPU
:return: the loaded model
"""
model = load_task_model(
task=task,
model_path=model_path,
config=config,
trust_remote_code=trust_remote_code,
recipe=recipe,
**model_kwargs,
)
if device:
# if device is a list of devices, then we assume we want to use multiple gpus
# (wrap the model in a DataParallel) e.g. device = 'cuda:0,1,...'
use_multiple_gpus = re.match(r"cuda:\d+,(\d+)*", device)
model = torch.nn.DataParallel(model) if use_multiple_gpus else model.to(device)
return model
The provided code snippet includes necessary dependencies for implementing the `one_shot` function. Write a Python function `def one_shot( model_path: str, dataset: str, dataset_config_name: Optional[str] = None, num_samples: int = 128, sequence_length: Optional[int] = None, concatenate_data: Optional[bool] = False, device: str = "cuda:0", deploy_dir: Optional[str] = ".", recipe_file: Optional[str] = None, precision: str = "auto", recipe_args: Optional[Dict] = None, do_save: Optional[bool] = False, ) -> Module` to solve the following problem:
Performs in place one shot sparsification/quantization of a model based on: :param model_path: path to Hugging Face stub :param dataset: Dataset to extract calibration data from :param dataset_config_name: Specific configuration to extract from calib dataset :param num_samples: Number of samples to extract from the dataset :param sequence_length: Maximum input sequence length to the model :param concatenate_data: Whether to concatenate datapoints to fill seqlen or not :param device: Device (cuda:index, auto or cpu) to use for computation :param deploy_dir: The output directory to save the model to :param recipe_file: recipe containing SparseGPT configuration :param precision: precision to load model as, either auto, half or full :param recipe_args: additional arguments to use for recipe evaluation :param do_save: whether to save the output model to disk :return: Pytorch module with OBCQ applied
Here is the function:
def one_shot(
model_path: str,
dataset: str,
dataset_config_name: Optional[str] = None,
num_samples: int = 128,
sequence_length: Optional[int] = None,
concatenate_data: Optional[bool] = False,
device: str = "cuda:0",
deploy_dir: Optional[str] = ".",
recipe_file: Optional[str] = None,
precision: str = "auto",
recipe_args: Optional[Dict] = None,
do_save: Optional[bool] = False,
) -> Module:
"""
Performs in place one shot sparsification/quantization of a model based on:
:param model_path: path to Hugging Face stub
:param dataset: Dataset to extract calibration data from
:param dataset_config_name: Specific configuration to extract from calib dataset
:param num_samples: Number of samples to extract from the dataset
:param sequence_length: Maximum input sequence length to the model
:param concatenate_data: Whether to concatenate datapoints to fill seqlen or not
:param device: Device (cuda:index, auto or cpu) to use for computation
:param deploy_dir: The output directory to save the model to
:param recipe_file: recipe containing SparseGPT configuration
:param precision: precision to load model as, either auto, half or full
:param recipe_args: additional arguments to use for recipe evaluation
:param do_save: whether to save the output model to disk
:return: Pytorch module with OBCQ applied
"""
if do_save:
deploy_dir = Path(os.path.join(deploy_dir, "obcq_deployment"))
if deploy_dir.exists():
raise RuntimeError(f"deploy_dir={deploy_dir} already exists")
# fallback to cpu if cuda not available
device = fallback_to_cpu(device)
_LOGGER.info(f"Running one_shot on device {device}")
# Load the configuration from the model path
config = AutoConfig.from_pretrained(model_path)
torch_dtype = parse_dtype(precision)
session_manager.create_session()
model = initialize_sparse_model(
model_path=model_path,
task="text-generation",
sequence_length=sequence_length,
torch_dtype=torch_dtype,
config=config,
device_map=device,
)
# Load calibration data
try:
TextGenerationDataset.get_value_from_registry(dataset)
except KeyError:
raise ValueError(
f"dataset={dataset} should be one of "
f"{TextGenerationDataset.registered_names()}"
)
data_args = DataTrainingArguments(
dataset=dataset,
dataset_config_name=dataset_config_name,
max_seq_length=sequence_length or resolve_sequence_length(config),
num_calibration_samples=num_samples,
concatenate_data=concatenate_data,
pad_to_max_length=False,
)
tokenizer = SparseAutoTokenizer.from_pretrained(
model_path, use_fast=True, trust_remote_code=True
)
dataset_manager = TextGenerationDataset.load_from_registry(
dataset, data_args=data_args, split="train", tokenizer=tokenizer
)
raw_dataset = dataset_manager.get_raw_dataset()
tokenized_dataset = dataset_manager.tokenize_and_process(raw_dataset)
calibration_data = format_calibration_data(
tokenized_dataset=tokenized_dataset, num_calibration_samples=num_samples
)
# launch one shot
session = session_manager.active_session()
session.apply(
framework=Framework.pytorch,
recipe=recipe_file,
model=model,
calib_data=calibration_data,
start=-1,
copy_data=False,
recipe_args=recipe_args,
)
if do_save:
save_model_and_recipe(model, deploy_dir, tokenizer)
return model | Performs in place one shot sparsification/quantization of a model based on: :param model_path: path to Hugging Face stub :param dataset: Dataset to extract calibration data from :param dataset_config_name: Specific configuration to extract from calib dataset :param num_samples: Number of samples to extract from the dataset :param sequence_length: Maximum input sequence length to the model :param concatenate_data: Whether to concatenate datapoints to fill seqlen or not :param device: Device (cuda:index, auto or cpu) to use for computation :param deploy_dir: The output directory to save the model to :param recipe_file: recipe containing SparseGPT configuration :param precision: precision to load model as, either auto, half or full :param recipe_args: additional arguments to use for recipe evaluation :param do_save: whether to save the output model to disk :return: Pytorch module with OBCQ applied |
21,615 | from typing import List
import torch
from torch.nn import Module
from sparseml.modifiers.obcq.utils.helpers import (
cache_attention_inputs,
execute_offloaded_module,
)
def execute_offloaded_module(
module,
buffer,
dev,
nsamples=None,
overwrite_buffer=True,
cached_inputs=None,
**kwargs,
):
module.to(dev)
if not overwrite_buffer:
new_buffer = []
for input_index, inp in enumerate(buffer):
if nsamples is not None and input_index == nsamples:
break
if cached_inputs is None:
module_kwargs = kwargs
else:
module_kwargs = {
key: cached_inputs[key][input_index] for key in cached_inputs
}
module_kwargs.update(kwargs)
if isinstance(inp, tuple):
inp = inp[0]
output = module(inp.to(dev), **module_kwargs)
if overwrite_buffer:
buffer[input_index] = output
else:
new_buffer.append(output)
module.cpu()
torch.cuda.empty_cache()
if overwrite_buffer:
return buffer
else:
del buffer
torch.cuda.empty_cache()
return new_buffer
def cache_attention_inputs(
model, dataloader, device, nsamples, target_ids, layer_prefix
):
if layer_prefix: # get model-specific path to layers list
split_prefix = layer_prefix.split(".")
layers_name = split_prefix[-1]
model_root_name = ".".join(split_prefix[:-1])
model_root = operator.attrgetter(model_root_name)(model)
first_layer = getattr(model_root, layers_name)[0]
else:
model_root = model.model
layers_name = "layers"
first_layer = model_root.layers[0]
# send everything up to the first compressable layer to device
pre_layers_modules = _get_pre_layer_modules(model_root, layers_name)
for pre_layer in pre_layers_modules:
pre_layer.to(device)
first_layer.to(device)
cached_inputs = catch(
model=model,
attention_layer=first_layer,
target_keys=target_ids,
data_loader=dataloader,
nsamples=nsamples,
)
for pre_layer in pre_layers_modules:
pre_layer.cpu()
first_layer.cpu()
torch.cuda.empty_cache()
return cached_inputs
The provided code snippet includes necessary dependencies for implementing the `opt_forward` function. Write a Python function `def opt_forward(model: Module, data_loader: List, device: str, nsamples: int = None)` to solve the following problem:
Run a forward pass of OPT, used for perplexity evaluation :param model: Pytorch module to run :param data_loader: data to run through model :param device: device name to perform computation on :param nsamples: number of samples of data_loader to run, None to run them all :return: logits output of the model
Here is the function:
def opt_forward(model: Module, data_loader: List, device: str, nsamples: int = None):
"""
Run a forward pass of OPT, used for perplexity evaluation
:param model: Pytorch module to run
:param data_loader: data to run through model
:param device: device name to perform computation on
:param nsamples: number of samples of data_loader to run, None to run them all
:return: logits output of the model
"""
cached_inputs = cache_attention_inputs(
model=model,
dataloader=data_loader,
device=device,
nsamples=nsamples,
target_ids=["attention_mask"],
layer_prefix="model.decoder.layers",
)
buffer = [b[0] for b in cached_inputs.pop("inputs")]
for layer in model.model.decoder.layers:
buffer = execute_offloaded_module(
layer,
buffer,
device,
cached_inputs=cached_inputs,
use_cache=False,
)
buffer = [b[0] for b in buffer]
del cached_inputs
torch.cuda.empty_cache()
if model.model.decoder.final_layer_norm is not None:
buffer = execute_offloaded_module(
model.model.decoder.final_layer_norm,
buffer,
device,
)
if model.model.decoder.project_out is not None:
buffer = execute_offloaded_module(
model.model.decoder.project_out,
buffer,
device,
)
logits = execute_offloaded_module(
model.lm_head,
buffer,
device,
)
return logits | Run a forward pass of OPT, used for perplexity evaluation :param model: Pytorch module to run :param data_loader: data to run through model :param device: device name to perform computation on :param nsamples: number of samples of data_loader to run, None to run them all :return: logits output of the model |
21,616 | from typing import List
import torch
from torch.nn import Module
from sparseml.modifiers.obcq.utils.helpers import (
cache_attention_inputs,
execute_offloaded_module,
)
def execute_offloaded_module(
module,
buffer,
dev,
nsamples=None,
overwrite_buffer=True,
cached_inputs=None,
**kwargs,
):
module.to(dev)
if not overwrite_buffer:
new_buffer = []
for input_index, inp in enumerate(buffer):
if nsamples is not None and input_index == nsamples:
break
if cached_inputs is None:
module_kwargs = kwargs
else:
module_kwargs = {
key: cached_inputs[key][input_index] for key in cached_inputs
}
module_kwargs.update(kwargs)
if isinstance(inp, tuple):
inp = inp[0]
output = module(inp.to(dev), **module_kwargs)
if overwrite_buffer:
buffer[input_index] = output
else:
new_buffer.append(output)
module.cpu()
torch.cuda.empty_cache()
if overwrite_buffer:
return buffer
else:
del buffer
torch.cuda.empty_cache()
return new_buffer
def cache_attention_inputs(
model, dataloader, device, nsamples, target_ids, layer_prefix
):
if layer_prefix: # get model-specific path to layers list
split_prefix = layer_prefix.split(".")
layers_name = split_prefix[-1]
model_root_name = ".".join(split_prefix[:-1])
model_root = operator.attrgetter(model_root_name)(model)
first_layer = getattr(model_root, layers_name)[0]
else:
model_root = model.model
layers_name = "layers"
first_layer = model_root.layers[0]
# send everything up to the first compressable layer to device
pre_layers_modules = _get_pre_layer_modules(model_root, layers_name)
for pre_layer in pre_layers_modules:
pre_layer.to(device)
first_layer.to(device)
cached_inputs = catch(
model=model,
attention_layer=first_layer,
target_keys=target_ids,
data_loader=dataloader,
nsamples=nsamples,
)
for pre_layer in pre_layers_modules:
pre_layer.cpu()
first_layer.cpu()
torch.cuda.empty_cache()
return cached_inputs
The provided code snippet includes necessary dependencies for implementing the `llama_forward` function. Write a Python function `def llama_forward(model: Module, data_loader: List, device: str, nsamples: int = None)` to solve the following problem:
Run a forward pass of Llama, used for perplexity evaluation :param model: Pytorch module to run :param data_loader: data to run through model :param device: device name to perform computation on :param nsamples: number of samples of data_loader to run, None to run them all :return: logits output of the model
Here is the function:
def llama_forward(model: Module, data_loader: List, device: str, nsamples: int = None):
"""
Run a forward pass of Llama, used for perplexity evaluation
:param model: Pytorch module to run
:param data_loader: data to run through model
:param device: device name to perform computation on
:param nsamples: number of samples of data_loader to run, None to run them all
:return: logits output of the model
"""
cached_inputs = cache_attention_inputs(
model=model,
dataloader=data_loader,
device=device,
nsamples=nsamples,
target_ids=["attention_mask", "position_ids"],
layer_prefix=None,
)
buffer = [b[0] for b in cached_inputs.pop("inputs")]
for layer in model.model.layers:
buffer = execute_offloaded_module(
layer,
buffer,
device,
cached_inputs=cached_inputs,
use_cache=False,
)
buffer = [b[0] for b in buffer]
del cached_inputs
torch.cuda.empty_cache()
buffer = execute_offloaded_module(
model.model.norm,
buffer,
device,
)
logits = execute_offloaded_module(
model.lm_head,
buffer,
device,
)
return logits | Run a forward pass of Llama, used for perplexity evaluation :param model: Pytorch module to run :param data_loader: data to run through model :param device: device name to perform computation on :param nsamples: number of samples of data_loader to run, None to run them all :return: logits output of the model |
21,617 | import logging
import math
import os
import sys
from contextlib import nullcontext
from dataclasses import dataclass, field
from itertools import chain
from typing import Any, Callable, Optional
import datasets
import transformers
from datasets import concatenate_datasets, load_dataset, load_metric
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
HfArgumentParser,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils.versions import require_version
from sparseml.pytorch.utils.distributed import record
from sparseml.transformers.sparsification import Trainer, TrainingArguments
from sparseml.transformers.utils import SparseAutoModel, get_shared_tokenizer_src
def main(**kwargs):
# See all possible arguments in
# src/sparseml/transformers/sparsification/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments)
)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
elif not kwargs:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
else:
model_args, data_args, training_args = parser.parse_dict(kwargs)
# Setup logging
log_level = training_args.get_process_log_level()
_LOGGER.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
_LOGGER.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, "
f"n_gpu: {training_args.n_gpu}, "
f"distributed training: {bool(training_args.local_rank != -1)}, "
f"16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
_LOGGER.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if (
os.path.isdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and (len(os.listdir(training_args.output_dir)) > 0):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and "
"is not empty. Use --overwrite_output_dir to overcome."
)
elif (
last_checkpoint is not None and training_args.resume_from_checkpoint is None
):
_LOGGER.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. "
"To avoid this behavior, change the `--output_dir` or add "
"`--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can
# concurrently download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(
model_args.model_name_or_path, **config_kwargs
)
else:
config = CONFIG_MAPPING[model_args.model_type]()
_LOGGER.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
_LOGGER.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
_LOGGER.info(f"New config: {config}")
model, teacher = SparseAutoModel.masked_language_modeling_from_pretrained_distil(
model_name_or_path=model_args.model_name_or_path,
model_kwargs={
"config": config,
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
},
teacher_name_or_path=training_args.distill_teacher,
teacher_kwargs={
"cache_dir": model_args.cache_dir,
"use_auth_token": True if model_args.use_auth_token else None,
},
)
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
tokenizer_src = (
model_args.tokenizer_name
if model_args.tokenizer_name
else get_shared_tokenizer_src(model, teacher)
)
tokenizer = AutoTokenizer.from_pretrained(tokenizer_src, **tokenizer_kwargs)
model.resize_token_embeddings(len(tokenizer))
tokenized_datasets = get_tokenized_mlm_dataset(
tokenizer=tokenizer,
data_args=data_args,
training_args=training_args,
cache_dir=model_args.cache_dir,
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = tokenized_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
compute_metrics = None
make_eval_dataset = training_args.do_eval or data_args.num_export_samples > 0
if make_eval_dataset:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = tokenized_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
# Depending on the model and config, logits may contain extra tensors,
# like past_key_values, but logits always come first
logits = logits[0]
return logits.argmax(dim=-1)
metric = load_metric("accuracy")
def compute_metrics(eval_preds):
preds, labels = eval_preds
# preds have the same shape as the labels, after the argmax(-1)
# has been calculated
# by preprocess_logits_for_metrics
labels = labels.reshape(-1)
preds = preds.reshape(-1)
mask = labels != -100
labels = labels[mask]
preds = preds[mask]
return metric.compute(predictions=preds, references=labels)
# Data collator
# This one will take care of randomly masking the tokens.
pad_to_multiple_of_8 = (
data_args.line_by_line
and training_args.fp16
and not data_args.pad_to_max_length
)
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm_probability=data_args.mlm_probability,
pad_to_multiple_of=8 if pad_to_multiple_of_8 else None,
)
# Initialize our Trainer
trainer = Trainer(
model=model,
model_state_path=model_args.model_name_or_path,
recipe=training_args.recipe,
metadata_args=metadata_args,
recipe_args=training_args.recipe_args,
teacher=teacher,
args=training_args,
data_args=data_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if make_eval_dataset else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.do_eval else None,
preprocess_logits_for_metrics=preprocess_logits_for_metrics
if training_args.do_eval
else None,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
if not trainer.one_shot:
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples
if data_args.max_train_samples is not None
else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.save_state()
# Evaluation
if training_args.do_eval and not trainer.one_shot:
_LOGGER.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = (
data_args.max_eval_samples
if data_args.max_eval_samples is not None
else len(eval_dataset)
)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "fill-mask"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs[
"dataset"
] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
# Exporting Samples
if data_args.num_export_samples > 0:
trainer.save_sample_inputs_outputs(
num_samples_to_export=data_args.num_export_samples
)
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
21,618 | import inspect
import logging
import os
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
import torch
from torch.nn import Module
from transformers import (
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
)
from transformers.file_utils import WEIGHTS_NAME
from sparseml.pytorch.model_load.helpers import (
apply_recipe_structure_to_model,
log_model_load,
)
from sparseml.transformers.utils.helpers import resolve_recipe
from sparseml.utils import download_zoo_training_dir
from sparseml.utils.fsdp.context import main_process_first_context
The provided code snippet includes necessary dependencies for implementing the `get_shared_tokenizer_src` function. Write a Python function `def get_shared_tokenizer_src(student: Module, teacher: Optional[Module]) -> str` to solve the following problem:
Get a tokenizer source used for both student and teacher, assuming that they could be shared :param student: the student model :param teacher: the teacher model :return: the source for the tokenizer shared between teacher and model
Here is the function:
def get_shared_tokenizer_src(student: Module, teacher: Optional[Module]) -> str:
"""
Get a tokenizer source used for both student and teacher, assuming
that they could be shared
:param student: the student model
:param teacher: the teacher model
:return: the source for the tokenizer shared between teacher and model
"""
if teacher is not None and teacher not in ("disable", "self"):
student_forward_params = list(
inspect.signature(student.forward).parameters.keys()
)
teacher_forward_params = list(
inspect.signature(teacher.forward).parameters.keys()
)
diff = [p for p in student_forward_params if p not in teacher_forward_params]
if diff:
raise RuntimeError(
"Teacher tokenizer cannot be used for student "
f"due to missing args: {diff}"
)
src_model = teacher
else:
src_model = student
return src_model.config._name_or_path | Get a tokenizer source used for both student and teacher, assuming that they could be shared :param student: the student model :param teacher: the teacher model :return: the source for the tokenizer shared between teacher and model |
21,619 | from typing import Dict
from sparsezoo.utils.registry import RegistryMixin
def custom_evolved_codealpaca_dataset(data: Dict):
PROMPT_DICT = """[Instruction]:\n{instruction}\n\n[Response]:"""
data["prompt"] = PROMPT_DICT.format_map(data)
data["text"] = data["prompt"] + data["output"]
return data | null |
21,620 | import inspect
import logging
import os
from collections import OrderedDict
from contextlib import suppress
from enum import Enum
from pathlib import Path
from typing import Iterable, List, Optional
from typing import OrderedDict as OrderedDictType
from typing import Tuple, Union
import requests
import torch
import transformers
from transformers import AutoConfig
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy
from huggingface_hub import HUGGINGFACE_CO_URL_HOME, hf_hub_download
from sparseml.export.helpers import ONNX_MODEL_NAME
from sparsezoo import Model, setup_model
_LOGGER = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `save_zoo_directory` function. Write a Python function `def save_zoo_directory( output_dir: str, training_outputs_dir: str, logs_path: Optional[str] = None, )` to solve the following problem:
Takes the `training_outputs_dir` (the directory where the pipeline saves its training artifacts), and saves the training artifacts to `output_dir` as a sparsezoo Model class object. :param output_dir: The output path where the artifacts are saved (adhering to the structure of sparsezoo Model class object) :param training_outputs_dir: The path to the existing directory with the saved training artifacts :param logs_path: Optional directory where the training logs reside
Here is the function:
def save_zoo_directory(
output_dir: str,
training_outputs_dir: str,
logs_path: Optional[str] = None,
):
"""
Takes the `training_outputs_dir`
(the directory where the pipeline saves its training artifacts),
and saves the training artifacts to `output_dir` as a sparsezoo Model class object.
:param output_dir: The output path where the artifacts are saved
(adhering to the structure of sparsezoo Model class object)
:param training_outputs_dir: The path to the existing directory
with the saved training artifacts
:param logs_path: Optional directory where the training logs reside
"""
for root_file in ["sample-inputs", "sample-outputs"]:
root_file_path = os.path.join(training_outputs_dir, root_file)
if not os.path.exists(root_file_path):
_LOGGER.warning(
f"File {root_file_path} missing. To create this file, "
"make sure that the export script is being ran with"
"`--num_export_samples` argument."
)
for root_file in ["model.onnx", "deployment"]:
root_file_path = os.path.join(training_outputs_dir, root_file)
if not os.path.exists(root_file_path):
raise ValueError(
f"File {root_file_path} missing. To create this file, "
"make sure that the `export` script (for exporting "
"transformer models) has been evoked."
)
setup_model(
output_dir=output_dir,
training=os.path.join(training_outputs_dir, "training"),
deployment=os.path.join(training_outputs_dir, "deployment"),
onnx_model=os.path.join(training_outputs_dir, "model.onnx"),
sample_inputs=os.path.join(training_outputs_dir, "sample-inputs"),
sample_outputs=os.path.join(training_outputs_dir, "sample-outputs"),
model_card=os.path.join(training_outputs_dir, "model.md"),
logs=logs_path,
sample_labels=None,
sample_originals=None,
analysis=None,
benchmarks=None,
eval_results=None,
recipes=None,
)
_LOGGER.info(f"Created sparsezoo Model directory locally in {output_dir}") | Takes the `training_outputs_dir` (the directory where the pipeline saves its training artifacts), and saves the training artifacts to `output_dir` as a sparsezoo Model class object. :param output_dir: The output path where the artifacts are saved (adhering to the structure of sparsezoo Model class object) :param training_outputs_dir: The path to the existing directory with the saved training artifacts :param logs_path: Optional directory where the training logs reside |
21,621 | import inspect
import logging
import os
from collections import OrderedDict
from contextlib import suppress
from enum import Enum
from pathlib import Path
from typing import Iterable, List, Optional
from typing import OrderedDict as OrderedDictType
from typing import Tuple, Union
import requests
import torch
import transformers
from transformers import AutoConfig
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy
from huggingface_hub import HUGGINGFACE_CO_URL_HOME, hf_hub_download
from sparseml.export.helpers import ONNX_MODEL_NAME
from sparsezoo import Model, setup_model
_LOGGER = logging.getLogger(__name__)
def infer_recipe_from_model_path(model_path: Union[str, Path]) -> Optional[str]:
"""
Infer the recipe from the model_path.
:param model_path: the path to the model to load.
It can be one of the following:
- a path to the model directory
- a path to the model file
- Hugging face model id
- SparseZoo stub
:return the path to the recipe file if found, None otherwise
"""
model_path = model_path.as_posix() if isinstance(model_path, Path) else model_path
if os.path.isdir(model_path) or os.path.isfile(model_path):
# model_path is a local path to the model directory or model file
# attempting to find the recipe in the model_directory
model_path = (
os.path.dirname(model_path) if os.path.isfile(model_path) else model_path
)
recipe = os.path.join(model_path, RECIPE_NAME)
if os.path.isfile(recipe):
_LOGGER.info(f"Found recipe in the model_path: {recipe}")
return recipe
_LOGGER.debug(f"No recipe found in the model_path: {model_path}")
return None
elif model_path.startswith("zoo:"):
# model_path is a sparsezoo stub
return recipe_from_sparsezoo_stub(stub=model_path)
recipe = recipe_from_huggingface_model_id(model_path)[0]
if recipe is None:
_LOGGER.info("Failed to infer the recipe from the model_path")
return recipe
def resolve_recipe_file(
requested_recipe: Union[str, Path], model_path: Union[str, Path]
) -> Union[str, Path, None]:
"""
Given the requested recipe and the model_path, return the path to the recipe file.
:param requested_recipe. Is a full path to the recipe file
:param model_path: the path to the model to load.
It can be one of the following:
- a path to the model directory
- a path to the model file
- Hugging face model id
- SparseZoo stub
:return the path to the recipe file if found, None otherwise
"""
# preprocess arguments so that they are all strings
requested_recipe = (
requested_recipe.as_posix()
if isinstance(requested_recipe, Path)
else requested_recipe
)
model_path = model_path.as_posix() if isinstance(model_path, Path) else model_path
model_path = (
os.path.dirname(model_path) if os.path.isfile(model_path) else model_path
)
if not os.path.isdir(model_path):
# pathway for model_path that is not a directory
if model_path.startswith("zoo:"):
default_recipe = recipe_from_sparsezoo_stub(model_path)
else:
default_recipe, model_exists = recipe_from_huggingface_model_id(model_path)
if not model_exists:
raise ValueError(f"Unrecognized model_path: {model_path}")
if not default_recipe == requested_recipe and default_recipe is not None:
_LOGGER.warning(
f"Attempting to apply recipe: {requested_recipe} "
f"to the model at: {model_path}, "
f"but the model already has a recipe: {default_recipe}. "
f"Using {requested_recipe} instead."
)
return requested_recipe
# pathway for model_path that is a directory
default_recipe = os.path.join(model_path, RECIPE_NAME)
default_recipe_exists = os.path.isfile(default_recipe)
default_and_request_recipes_identical = os.path.samefile(
default_recipe, requested_recipe
)
if (
default_recipe_exists
and requested_recipe
and not default_and_request_recipes_identical
):
_LOGGER.warning(
f"Attempting to apply recipe: {requested_recipe} "
f"to the model located in {model_path}, "
f"but the model already has a recipe stored as {default_recipe}. "
f"Using {requested_recipe} instead."
)
elif not default_recipe_exists and requested_recipe:
_LOGGER.warning(
f"Attempting to apply {requested_recipe} "
f"to the model located in {model_path}."
"However, it is expected that the model "
f"has its target recipe stored as {default_recipe}."
"Applying any recipe before the target recipe may "
"result in unexpected behavior."
f"Applying {requested_recipe} nevertheless."
)
elif default_recipe_exists:
_LOGGER.info(f"Using the default recipe: {requested_recipe}")
return requested_recipe
The provided code snippet includes necessary dependencies for implementing the `resolve_recipe` function. Write a Python function `def resolve_recipe( recipe: Union[str, Path, None], model_path: Union[str, Path] ) -> Union[str, None]` to solve the following problem:
Resolve the recipe to apply to the model. :param recipe: the recipe to apply to the model. It can be one of the following: - None This means that we are not either not applying any recipe and allowing the model to potentially infer the appropriate pre-existing recipe from the model_path - a path to the recipe file This can be a string or Path object pointing to a recipe file. If the specified recipe file is different from the potential pre-existing recipe for that model (stored in the model_path), the function will raise an warning - name of the recipe file (e.g. "recipe.yaml") Recipe file name specific is assumed to be stored in the model_path - a string containing the recipe Needs to adhere to the SparseML recipe format :param model_path: the path to the model to load. It can be one of the following: - a path to the model directory - a path to the model file - Hugging face model id - SparseZoo stub :return: the resolved recipe
Here is the function:
def resolve_recipe(
recipe: Union[str, Path, None], model_path: Union[str, Path]
) -> Union[str, None]:
"""
Resolve the recipe to apply to the model.
:param recipe: the recipe to apply to the model.
It can be one of the following:
- None
This means that we are not either not applying
any recipe and allowing the model to potentially
infer the appropriate pre-existing recipe
from the model_path
- a path to the recipe file
This can be a string or Path object pointing
to a recipe file. If the specified recipe file
is different from the potential pre-existing
recipe for that model (stored in the model_path),
the function will raise an warning
- name of the recipe file (e.g. "recipe.yaml")
Recipe file name specific is assumed to be stored
in the model_path
- a string containing the recipe
Needs to adhere to the SparseML recipe format
:param model_path: the path to the model to load.
It can be one of the following:
- a path to the model directory
- a path to the model file
- Hugging face model id
- SparseZoo stub
:return: the resolved recipe
"""
if recipe is None:
return infer_recipe_from_model_path(model_path)
elif os.path.isfile(recipe):
# recipe is a path to a recipe file
return resolve_recipe_file(recipe, model_path)
elif os.path.isfile(os.path.join(model_path, recipe)):
# recipe is a name of a recipe file
recipe = os.path.join(model_path, recipe)
return resolve_recipe_file(recipe, model_path)
elif isinstance(recipe, str):
# recipe is a string containing the recipe
_LOGGER.debug(
"Applying the recipe string directly to the model, without "
"checking for a potential existing recipe in the model_path."
)
return recipe
_LOGGER.info(
"No recipe requested and no default recipe "
f"found in {model_path}. Skipping recipe resolution."
)
return None | Resolve the recipe to apply to the model. :param recipe: the recipe to apply to the model. It can be one of the following: - None This means that we are not either not applying any recipe and allowing the model to potentially infer the appropriate pre-existing recipe from the model_path - a path to the recipe file This can be a string or Path object pointing to a recipe file. If the specified recipe file is different from the potential pre-existing recipe for that model (stored in the model_path), the function will raise an warning - name of the recipe file (e.g. "recipe.yaml") Recipe file name specific is assumed to be stored in the model_path - a string containing the recipe Needs to adhere to the SparseML recipe format :param model_path: the path to the model to load. It can be one of the following: - a path to the model directory - a path to the model file - Hugging face model id - SparseZoo stub :return: the resolved recipe |
21,622 | import inspect
import logging
import os
from collections import OrderedDict
from contextlib import suppress
from enum import Enum
from pathlib import Path
from typing import Iterable, List, Optional
from typing import OrderedDict as OrderedDictType
from typing import Tuple, Union
import requests
import torch
import transformers
from transformers import AutoConfig
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy
from huggingface_hub import HUGGINGFACE_CO_URL_HOME, hf_hub_download
from sparseml.export.helpers import ONNX_MODEL_NAME
from sparsezoo import Model, setup_model
The provided code snippet includes necessary dependencies for implementing the `fetch_recipe_path` function. Write a Python function `def fetch_recipe_path(target: str)` to solve the following problem:
Fetches the recipe path for the given target. This method will also download the recipe if it is not already downloaded. Takes care of three scenarios: 1. target is a local path to a model directory (looks for recipe.yaml in the directory) 2. target is a SparseZoo stub (downloads and returns the path to the default recipe) 3. target is a HuggingFace stub (downloads and returns the path to the default recipe) :param target: The target to fetch the recipe path for can be a local path, SparseZoo stub, or HuggingFace stub :return: The path to the recipe for the target
Here is the function:
def fetch_recipe_path(target: str):
"""
Fetches the recipe path for the given target.
This method will also download the recipe if it is not
already downloaded.
Takes care of three scenarios:
1. target is a local path to a model directory
(looks for recipe.yaml in the directory)
2. target is a SparseZoo stub (downloads and
returns the path to the default recipe)
3. target is a HuggingFace stub (downloads and
returns the path to the default recipe)
:param target: The target to fetch the recipe path for
can be a local path, SparseZoo stub, or HuggingFace stub
:return: The path to the recipe for the target
"""
DEFAULT_RECIPE_NAME = "recipe.yaml"
if Path(target).exists():
# target is a local path
potential_recipe_path = Path(target) / DEFAULT_RECIPE_NAME
return str(potential_recipe_path) if potential_recipe_path.exists() else None
# Recipe must be downloaded
recipe_path = None
if target.startswith("zoo:"):
# target is a SparseZoo stub
sparsezoo_model = Model(source=target)
with suppress(Exception):
# suppress any errors if the recipe is not found on SparseZoo
recipe_path = sparsezoo_model.recipes.default().path
return recipe_path
# target is a HuggingFace stub
with suppress(Exception):
# suppress any errors if the recipe is not found on HuggingFace
recipe_path = hf_hub_download(repo_id=target, filename=DEFAULT_RECIPE_NAME)
return recipe_path | Fetches the recipe path for the given target. This method will also download the recipe if it is not already downloaded. Takes care of three scenarios: 1. target is a local path to a model directory (looks for recipe.yaml in the directory) 2. target is a SparseZoo stub (downloads and returns the path to the default recipe) 3. target is a HuggingFace stub (downloads and returns the path to the default recipe) :param target: The target to fetch the recipe path for can be a local path, SparseZoo stub, or HuggingFace stub :return: The path to the recipe for the target |
21,623 | import logging
from typing import Any, Dict, Optional, Union
import datasets
from torch.nn import Module
from transformers import AutoConfig
from sparseml.transformers.utils.helpers import TaskNames
def load_dataset(*args, **kwargs):
# a wrapper around datasets.load_dataset
# to be expanded in the future
return datasets.load_dataset(*args, **kwargs) | null |
21,624 | from typing import Dict, Optional
import numpy
from sklearn.metrics import precision_recall_fscore_support
The provided code snippet includes necessary dependencies for implementing the `multi_label_precision_recall_f1` function. Write a Python function `def multi_label_precision_recall_f1( predictions: numpy.ndarray, targets: numpy.ndarray, id_to_label: Optional[Dict[int, str]] = None, ) -> Dict[str, float]` to solve the following problem:
computes per class and macro-averaged precision, recall, and f1 for multiple model sample predictions where targets may contain multiple labels :param predictions: array of model predictions, shape (num_samples, num_labels) where positive predictions are 1 and negative predictions are 0 :param targets: array of sample targets, shape (num_samples, num_labels) where positive predictions are 1 and negative predictions are 0. Must correspond to predictions :param id_to_label: optional mapping of label index to string label for results dictionary. Will default to a string of the index :return: dictionary of per label and macro-average results for precision, recall, and f1
Here is the function:
def multi_label_precision_recall_f1(
predictions: numpy.ndarray,
targets: numpy.ndarray,
id_to_label: Optional[Dict[int, str]] = None,
) -> Dict[str, float]:
"""
computes per class and macro-averaged precision, recall, and f1 for multiple
model sample predictions where targets may contain multiple labels
:param predictions: array of model predictions, shape (num_samples, num_labels)
where positive predictions are 1 and negative predictions are 0
:param targets: array of sample targets, shape (num_samples, num_labels)
where positive predictions are 1 and negative predictions are 0. Must
correspond to predictions
:param id_to_label: optional mapping of label index to string label for results
dictionary. Will default to a string of the index
:return: dictionary of per label and macro-average results for precision, recall,
and f1
"""
precision, recall, f1, _ = precision_recall_fscore_support(targets, predictions)
# compile results into required str -> float dict
results = {}
for idx in range(predictions.shape[1]):
label = id_to_label[idx] if id_to_label else str(idx) # default to str idx
results[f"precision_{label}"] = precision[idx]
results[f"recall_{label}"] = recall[idx]
results[f"f1_{label}"] = f1[idx]
# add macro averages and std to results
results["precision_macro_average"] = precision.mean()
results["recall_macro_average"] = recall.mean()
results["f1_macro_average"] = f1.mean()
results["precision_std"] = precision.std()
results["recall_std"] = recall.std()
results["f1_std"] = f1.std()
return results | computes per class and macro-averaged precision, recall, and f1 for multiple model sample predictions where targets may contain multiple labels :param predictions: array of model predictions, shape (num_samples, num_labels) where positive predictions are 1 and negative predictions are 0 :param targets: array of sample targets, shape (num_samples, num_labels) where positive predictions are 1 and negative predictions are 0. Must correspond to predictions :param id_to_label: optional mapping of label index to string label for results dictionary. Will default to a string of the index :return: dictionary of per label and macro-average results for precision, recall, and f1 |
21,625 | import difflib
import re
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging import version
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import _ConvNd
from sparseml.core.model.base import ModelParameterizedLayer
from sparseml.utils.fsdp.context import fix_fsdp_module_name, summon_full_params_context
def get_layer(target: str, module: Module) -> Tuple[str, Module]:
def summon_full_params_context(model, offload_to_cpu: bool = False):
def fix_fsdp_module_name(name: str) -> str:
def maybe_get_wrapped(model: Union[ModifiableModel, Module]) -> Module:
def set_layer(target: str, layer: Module, module: Module) -> Module:
target = fix_fsdp_module_name(target)
with summon_full_params_context(module):
# importing here to avoid circular import
from sparseml.utils.fsdp.helpers import maybe_get_wrapped
parent_target = ".".join(target.split(".")[:-1])
if parent_target != "":
parent_layer = get_layer(parent_target, module)[1]
else:
parent_layer = maybe_get_wrapped(module)
old_layer = getattr(parent_layer, target.split(".")[-1])
setattr(parent_layer, target.split(".")[-1], layer)
return old_layer | null |
21,626 | import difflib
import re
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging import version
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import _ConvNd
from sparseml.core.model.base import ModelParameterizedLayer
from sparseml.utils.fsdp.context import fix_fsdp_module_name, summon_full_params_context
def get_params(targets: Union[str, List[str]], module: Module) -> Dict[str, Parameter]:
return match_layers_params(targets, module, params=True)
def get_param(target: str, module: Module) -> Tuple[str, Parameter]:
params = get_params(target, module)
if len(params) != 1:
raise ValueError(
f"Expected 1 parameter for target {target}, found {len(params)}"
)
name, param = next(iter(params.items()))
return name, param | null |
21,627 | import difflib
import re
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging import version
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import _ConvNd
from sparseml.core.model.base import ModelParameterizedLayer
from sparseml.utils.fsdp.context import fix_fsdp_module_name, summon_full_params_context
def get_layer(target: str, module: Module) -> Tuple[str, Module]:
layers = get_layers(target, module)
if len(layers) != 1:
raise ValueError(f"Expected 1 layer for target {target}, found {len(layers)}")
name, layer = next(iter(layers.items()))
return name, layer
def set_param(target: str, param: Parameter, module: Module) -> Parameter:
layer_name, param_name = target.rsplit(".", 1)
layer = get_layer(layer_name, module)[1]
old_param = getattr(layer, param_name)
setattr(layer, param_name, param)
return old_param | null |
21,628 | import difflib
import re
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging import version
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import _ConvNd
from sparseml.core.model.base import ModelParameterizedLayer
from sparseml.utils.fsdp.context import fix_fsdp_module_name, summon_full_params_context
try:
from torch.nn.qat import Conv3d as QATConv3d
except Exception as _err:
quant_conv3d_err = _err
QATConv3d = None
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
The provided code snippet includes necessary dependencies for implementing the `qat_active` function. Write a Python function `def qat_active(module: Module) -> bool` to solve the following problem:
Determines if any layers in the model have quantization enabled by checking for weight_fake_quant attributes :param module: PyTorch model to check for quantization :return: True if quantization is active anywhere in the model, False otherwise
Here is the function:
def qat_active(module: Module) -> bool:
"""
Determines if any layers in the model have quantization enabled by checking for
weight_fake_quant attributes
:param module: PyTorch model to check for quantization
:return: True if quantization is active anywhere in the model, False otherwise
"""
for _, layer in module.named_modules():
if isinstance(layer, torch.quantization.FakeQuantize):
return True
return False | Determines if any layers in the model have quantization enabled by checking for weight_fake_quant attributes :param module: PyTorch model to check for quantization :return: True if quantization is active anywhere in the model, False otherwise |
21,629 | import difflib
import re
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging import version
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import _ConvNd
from sparseml.core.model.base import ModelParameterizedLayer
from sparseml.utils.fsdp.context import fix_fsdp_module_name, summon_full_params_context
def get_layers(targets: Union[str, List[str]], module: Module) -> Dict[str, Module]:
return match_layers_params(targets, module)
def get_params(targets: Union[str, List[str]], module: Module) -> Dict[str, Parameter]:
return match_layers_params(targets, module, params=True)
class ModelParameterizedLayer(Generic[LT, PT]):
"""
A dataclass for holding a parameter and its layer
:param layer_name: the name of the layer
:param layer: the layer object
:param param_name: the name of the parameter
:param param: the parameter object
"""
layer_name: str
layer: LT
param_name: str
param: PT
def get_layers_params(
targets: Union[str, List[str]], module: Module
) -> Dict[str, ModelParameterizedLayer[Parameter, Module]]:
params = get_params(targets, module)
layers = get_layers(targets, module)
parameterized_layers = {}
for name, param in params.items():
param_layer = ModelParameterizedLayer(
layer_name=name, layer=layers[name], param_name=name, param=param
)
parameterized_layers[name] = param_layer
return parameterized_layers | null |
21,630 | import difflib
import re
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging import version
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import _ConvNd
from sparseml.core.model.base import ModelParameterizedLayer
from sparseml.utils.fsdp.context import fix_fsdp_module_name, summon_full_params_context
def get_layers(targets: Union[str, List[str]], module: Module) -> Dict[str, Module]:
return match_layers_params(targets, module)
The provided code snippet includes necessary dependencies for implementing the `get_matching_layer` function. Write a Python function `def get_matching_layer( target: str, name_to_match: str, module: Module ) -> Optional[Tuple[str, Module]]` to solve the following problem:
Given a target regex, find the layer name in the module that most closely matches the name_to_match string. This is used to matches submodules in the same layer, for instance matching "re.*k_proj" to "model.decoder.layer.0.q_proj" to find the k_proj that exists in layer 0. :param target: regex to search for :param name_to_match: full layer name to match to, should exist in module :param module: module to search for target in :return: Tuple containing the layer name and module that fits the target regex and best matches name_to_match, or None if no match can be found
Here is the function:
def get_matching_layer(
target: str, name_to_match: str, module: Module
) -> Optional[Tuple[str, Module]]:
"""
Given a target regex, find the layer name in the module that most closely matches
the name_to_match string. This is used to matches submodules in the same layer, for
instance matching "re.*k_proj" to "model.decoder.layer.0.q_proj" to find the k_proj
that exists in layer 0.
:param target: regex to search for
:param name_to_match: full layer name to match to, should exist in module
:param module: module to search for target in
:return: Tuple containing the layer name and module that fits the target regex and
best matches name_to_match, or None if no match can be found
"""
potential_matches = get_layers(target, module)
largest_substring = 0
match = None
for name, module in potential_matches.items():
seq_matcher = difflib.SequenceMatcher(None, name, name_to_match)
_, _, match_length = seq_matcher.find_longest_match(
0, len(name), 0, len(name_to_match)
)
if match_length > largest_substring:
match = (name, module)
largest_substring = match_length
return match | Given a target regex, find the layer name in the module that most closely matches the name_to_match string. This is used to matches submodules in the same layer, for instance matching "re.*k_proj" to "model.decoder.layer.0.q_proj" to find the k_proj that exists in layer 0. :param target: regex to search for :param name_to_match: full layer name to match to, should exist in module :param module: module to search for target in :return: Tuple containing the layer name and module that fits the target regex and best matches name_to_match, or None if no match can be found |
21,631 | import difflib
import re
from typing import Dict, List, Optional, Tuple, Union
import torch
from packaging import version
from torch.nn import Linear, Module, Parameter
from torch.nn.modules.conv import _ConvNd
from sparseml.core.model.base import ModelParameterizedLayer
from sparseml.utils.fsdp.context import fix_fsdp_module_name, summon_full_params_context
ALL_TARGET = "__ALL__"
def maybe_get_wrapped(model: Union[ModifiableModel, Module]) -> Module:
"""
Given a model that may or may not have a distributed wrapper, return the underlying
wrapped model.
:param model: input model to get wrapped model from
:returns: wrapped model
"""
if isinstance(model, ModifiableModel):
model = model.model # get the inner PyTorch model
if is_fsdp_model(model=model):
return model._fsdp_wrapped_module
return model
The provided code snippet includes necessary dependencies for implementing the `get_no_split_params` function. Write a Python function `def get_no_split_params(module: Module) -> Union[str, List[str]]` to solve the following problem:
Get list of module classes that shouldn't be split when sharding. For Hugging Face Transformer models, this is the decoder layer type. For other types of models, this just returns all module names. :return: list of class names that shouldn't be split
Here is the function:
def get_no_split_params(module: Module) -> Union[str, List[str]]:
"""
Get list of module classes that shouldn't be split when sharding. For
Hugging Face Transformer models, this is the decoder layer type. For other
types of models, this just returns all module names.
:return: list of class names that shouldn't be split
"""
# importing here to avoid circular import
from sparseml.utils.fsdp.helpers import maybe_get_wrapped
model = maybe_get_wrapped(module)
if hasattr(model, "_no_split_modules"):
return model._no_split_modules
return ALL_TARGET | Get list of module classes that shouldn't be split when sharding. For Hugging Face Transformer models, this is the decoder layer type. For other types of models, this just returns all module names. :return: list of class names that shouldn't be split |
21,632 | import logging
import operator
from pathlib import Path
from typing import Optional, Union
import torch
from torch.nn import Module
from sparseml.core.model import ModifiableModel
from sparseml.pytorch.model_load.helpers import save_model_and_recipe
from sparseml.utils.pytorch import set_layer
def is_fsdp_model(model: Module) -> bool:
"""
Check if a model instance is wrapped by FSDP
:param model: pytorch model to check
:return: True if module is wrapped, False otherwise
"""
if not FullyShardedDataParallel:
return False
return isinstance(model, FullyShardedDataParallel)
The provided code snippet includes necessary dependencies for implementing the `set_wrapped_model` function. Write a Python function `def set_wrapped_model(model: ModifiableModel, wrapped_model: Module)` to solve the following problem:
Given a model that may or may not have a distributed wrapper, set the underlying wrapped model. :param input_model: input model to be updated :param updated_wrapped: model to inject into input_model
Here is the function:
def set_wrapped_model(model: ModifiableModel, wrapped_model: Module):
"""
Given a model that may or may not have a distributed wrapper, set the underlying
wrapped model.
:param input_model: input model to be updated
:param updated_wrapped: model to inject into input_model
"""
if is_fsdp_model(model.model):
model.model._fsdp_wrapped_module = wrapped_model
else:
model.model = wrapped_model | Given a model that may or may not have a distributed wrapper, set the underlying wrapped model. :param input_model: input model to be updated :param updated_wrapped: model to inject into input_model |
21,633 | import logging
import operator
from pathlib import Path
from typing import Optional, Union
import torch
from torch.nn import Module
from sparseml.core.model import ModifiableModel
from sparseml.pytorch.model_load.helpers import save_model_and_recipe
from sparseml.utils.pytorch import set_layer
def save_model_and_recipe(
model: Module, save_path: str, tokenizer: Optional[Any] = None
):
"""
Save a model, tokenizer and the currently loaded recipe to file
:param model: pytorch model to save
:param save_path: path to save output to
:param tokenizer: model tokenizer to save
"""
model.save_pretrained(save_path)
if tokenizer is not None:
tokenizer.save_pretrained(save_path)
_LOGGER.info("Saving output to {}".format(os.path.abspath(save_path)))
recipe_path = os.path.join(save_path, RECIPE_FILE_NAME)
session = session_manager.active_session()
recipe_yaml_str = session.get_serialized_recipe()
with open(recipe_path, "w") as fp:
fp.write(recipe_yaml_str)
The provided code snippet includes necessary dependencies for implementing the `unwrap_and_export_model` function. Write a Python function `def unwrap_and_export_model(model, accelerator, output_dir, tokenizer)` to solve the following problem:
Recursively unwraps an FSDP model, then saves the unwrapped model and the currently active recipe to disk :param model: model to unwrap :param accelerator: Accelerator instance used to perform unwrapping :param output_dir: where to save output model :param tokenizer: tokenizer used by the model
Here is the function:
def unwrap_and_export_model(model, accelerator, output_dir, tokenizer):
"""
Recursively unwraps an FSDP model, then saves the unwrapped model and the
currently active recipe to disk
:param model: model to unwrap
:param accelerator: Accelerator instance used to perform unwrapping
:param output_dir: where to save output model
:param tokenizer: tokenizer used by the model
"""
full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
with FullyShardedDataParallel.state_dict_type(
model,
StateDictType.FULL_STATE_DICT,
full_state_dict_config,
):
unwrapped_model = accelerator.unwrap_model(model)
for name, module in unwrapped_model.named_modules():
if isinstance(module, FullyShardedDataParallel):
set_layer(name, accelerator.unwrap_model(module), unwrapped_model)
save_model_and_recipe(
model=unwrapped_model,
save_path=output_dir,
tokenizer=tokenizer,
) | Recursively unwraps an FSDP model, then saves the unwrapped model and the currently active recipe to disk :param model: model to unwrap :param accelerator: Accelerator instance used to perform unwrapping :param output_dir: where to save output model :param tokenizer: tokenizer used by the model |
21,634 | import logging
import operator
from pathlib import Path
from typing import Optional, Union
try:
from torch.distributed.fsdp import (
FullStateDictConfig,
FullyShardedDataParallel,
StateDictType,
)
except ImportError:
FullyShardedDataParallel = None
import torch
from torch.nn import Module
from sparseml.core.model import ModifiableModel
from sparseml.pytorch.model_load.helpers import save_model_and_recipe
from sparseml.utils.pytorch import set_layer
_LOGGER = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `find_and_move_state_dicts_to_cpu` function. Write a Python function `def find_and_move_state_dicts_to_cpu(output_dir: str)` to solve the following problem:
Looks for state dicts in the output directory and overwrites them with cpu state dicts. this is needed for quantized models trained with FSDP as the state dict contains device information, which can cause issues when loading the model using transformers AutoModel.from_pretrained(...) if the device information is not removed, assumes the state dicts are named pytorch_model*.bin
Here is the function:
def find_and_move_state_dicts_to_cpu(output_dir: str):
"""
Looks for state dicts in the output directory and overwrites them
with cpu state dicts.
this is needed for quantized models trained with FSDP as the state dict
contains device information, which can cause issues when loading the model
using transformers AutoModel.from_pretrained(...) if the device information
is not removed, assumes the state dicts are named pytorch_model*.bin
"""
for model_file in Path(output_dir).rglob("pytorch_model*.bin"):
loaded_dict = torch.load(model_file)
for key, value in loaded_dict.items():
if isinstance(value, torch.Tensor):
loaded_dict[key] = value.cpu()
torch.save(loaded_dict, model_file)
_LOGGER.info(f"Moved state dict {model_file} to cpu") | Looks for state dicts in the output directory and overwrites them with cpu state dicts. this is needed for quantized models trained with FSDP as the state dict contains device information, which can cause issues when loading the model using transformers AutoModel.from_pretrained(...) if the device information is not removed, assumes the state dicts are named pytorch_model*.bin |
21,635 | import logging
import operator
from pathlib import Path
from typing import Optional, Union
import torch
from torch.nn import Module
from sparseml.core.model import ModifiableModel
from sparseml.pytorch.model_load.helpers import save_model_and_recipe
from sparseml.utils.pytorch import set_layer
The provided code snippet includes necessary dependencies for implementing the `save_pretrained_fsdp` function. Write a Python function `def save_pretrained_fsdp(model, accelerator, output_dir, save_safetensors: bool = True)` to solve the following problem:
Gathers the full FSDP state dict of the model onto rank0 GPU, then uses it to save the pretrained FSDP model to disk :param model: model to save :param accelerator: Accelerator instance used to perform unwrapping :param output_dir: where to save output model :param save_safetensors: True to safe in safetensors format, otherwise .bin
Here is the function:
def save_pretrained_fsdp(model, accelerator, output_dir, save_safetensors: bool = True):
full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
"""
Gathers the full FSDP state dict of the model onto rank0 GPU, then uses it to save
the pretrained FSDP model to disk
:param model: model to save
:param accelerator: Accelerator instance used to perform unwrapping
:param output_dir: where to save output model
:param save_safetensors: True to safe in safetensors format, otherwise .bin
"""
with FullyShardedDataParallel.state_dict_type(
model, StateDictType.FULL_STATE_DICT, full_state_dict_config
):
state_dict = accelerator.get_state_dict(model, unwrap=False)
accelerator.unwrap_model(model).save_pretrained(
output_dir,
is_main_process=accelerator.is_main_process,
save_function=accelerator.save,
state_dict=state_dict,
safe_serialization=save_safetensors,
) | Gathers the full FSDP state dict of the model onto rank0 GPU, then uses it to save the pretrained FSDP model to disk :param model: model to save :param accelerator: Accelerator instance used to perform unwrapping :param output_dir: where to save output model :param save_safetensors: True to safe in safetensors format, otherwise .bin |
21,636 | import logging
import operator
from pathlib import Path
from typing import Optional, Union
import torch
from torch.nn import Module
from sparseml.core.model import ModifiableModel
from sparseml.pytorch.model_load.helpers import save_model_and_recipe
from sparseml.utils.pytorch import set_layer
def is_fsdp_model(model: Module) -> bool:
"""
Check if a model instance is wrapped by FSDP
:param model: pytorch model to check
:return: True if module is wrapped, False otherwise
"""
if not FullyShardedDataParallel:
return False
return isinstance(model, FullyShardedDataParallel)
The provided code snippet includes necessary dependencies for implementing the `get_fsdp_parent` function. Write a Python function `def get_fsdp_parent(layer_name: str, model: Module) -> Optional[Module]` to solve the following problem:
Gets the closest parent of layer_name that is wrapped by FSDP. If no FSDP wrapper is found just return None :param layer_name: layer name in model to get parent of :model: pytorch module to search through :return: FSDP wrapped parent of layer_name if available, otherwise None
Here is the function:
def get_fsdp_parent(layer_name: str, model: Module) -> Optional[Module]:
"""
Gets the closest parent of layer_name that is wrapped by FSDP. If no FSDP wrapper
is found just return None
:param layer_name: layer name in model to get parent of
:model: pytorch module to search through
:return: FSDP wrapped parent of layer_name if available, otherwise None
"""
if not is_fsdp_model(model):
return None
parent_name = layer_name
parent = operator.attrgetter(parent_name)(model)
while not isinstance(parent, FullyShardedDataParallel):
if len(parent_name) == 0: # we've reached the root module and its not FSDP
# this should never get hit because we check for an FSDP root above
# but while statements without a backup are too scary
return None
parent_name = ".".join(parent_name.split(".")[:-1])
parent = operator.attrgetter(parent_name)(model)
return parent | Gets the closest parent of layer_name that is wrapped by FSDP. If no FSDP wrapper is found just return None :param layer_name: layer name in model to get parent of :model: pytorch module to search through :return: FSDP wrapped parent of layer_name if available, otherwise None |
21,637 | from contextlib import nullcontext
The provided code snippet includes necessary dependencies for implementing the `main_process_first_context` function. Write a Python function `def main_process_first_context()` to solve the following problem:
Creates a context manager where the main process runs the block before all other processes. Returns a nullcontext when called from a single process application.
Here is the function:
def main_process_first_context():
"""
Creates a context manager where the main process runs the block before all other
processes. Returns a nullcontext when called from a single process application.
"""
if Accelerator is None:
return nullcontext()
return Accelerator().main_process_first() | Creates a context manager where the main process runs the block before all other processes. Returns a nullcontext when called from a single process application. |
21,638 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
ALL_TOKEN = "__ALL__"
ALL_PRUNABLE_TOKEN = "__ALL_PRUNABLE__"
def flatten_iterable(li: Iterable):
"""
:param li: a possibly nested iterable of items to be flattened
:return: a flattened version of the list where all elements are in a single list
flattened in a depth first pattern
"""
def _flatten_gen(_li):
for el in _li:
if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):
yield from _flatten_gen(el)
else:
yield el
return list(_flatten_gen(li))
The provided code snippet includes necessary dependencies for implementing the `validate_str_iterable` function. Write a Python function `def validate_str_iterable( val: Union[str, Iterable[str]], error_desc: str = "" ) -> Union[str, Iterable[str]]` to solve the following problem:
:param val: the value to validate, check that it is a list (and flattens it), otherwise checks that it's an __ALL__ or __ALL_PRUNABLE__ string, otherwise raises a ValueError :param error_desc: the description to raise an error with in the event that the val wasn't valid :return: the validated version of the param
Here is the function:
def validate_str_iterable(
val: Union[str, Iterable[str]], error_desc: str = ""
) -> Union[str, Iterable[str]]:
"""
:param val: the value to validate, check that it is a list (and flattens it),
otherwise checks that it's an __ALL__ or __ALL_PRUNABLE__ string,
otherwise raises a ValueError
:param error_desc: the description to raise an error with in the event that
the val wasn't valid
:return: the validated version of the param
"""
if isinstance(val, str):
if val.upper() != ALL_TOKEN and val.upper() != ALL_PRUNABLE_TOKEN:
raise ValueError(
"unsupported string ({}) given in {}".format(val, error_desc)
)
return val.upper()
if isinstance(val, Iterable):
return flatten_iterable(val)
raise ValueError("unsupported type ({}) given in {}".format(val, error_desc)) | :param val: the value to validate, check that it is a list (and flattens it), otherwise checks that it's an __ALL__ or __ALL_PRUNABLE__ string, otherwise raises a ValueError :param error_desc: the description to raise an error with in the event that the val wasn't valid :return: the validated version of the param |
21,639 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
The provided code snippet includes necessary dependencies for implementing the `bucket_iterable` function. Write a Python function `def bucket_iterable( val: Iterable[Any], num_buckets: int = 3, edge_percent: float = 0.05, sort_highest: bool = True, sort_key: Callable[[Any], Any] = None, ) -> List[Tuple[int, Any]]` to solve the following problem:
Bucket iterable into subarray consisting of the first top percentage followed by the rest of the iterable sliced into equal sliced groups. :param val: The iterable to bucket :param num_buckets: The number of buckets to group the iterable into, does not include the top bucket :param edge_percent: Group the first percent into its own bucket. If sort_highest, then this is the top percent, else bottom percent. If <= 0, then will not create an edge bucket :param sort_highest: True to sort such that the highest percent is first and will create buckets in descending order. False to sort so lowest is first and create buckets in ascending order. :param sort_key: The sort_key, if any, to use for sorting the iterable after converting it to a list :return: a list of each value mapped to the bucket it was sorted into
Here is the function:
def bucket_iterable(
val: Iterable[Any],
num_buckets: int = 3,
edge_percent: float = 0.05,
sort_highest: bool = True,
sort_key: Callable[[Any], Any] = None,
) -> List[Tuple[int, Any]]:
"""
Bucket iterable into subarray consisting of the first top percentage
followed by the rest of the iterable sliced into equal sliced groups.
:param val: The iterable to bucket
:param num_buckets: The number of buckets to group the iterable into,
does not include the top bucket
:param edge_percent: Group the first percent into its own bucket.
If sort_highest, then this is the top percent, else bottom percent.
If <= 0, then will not create an edge bucket
:param sort_highest: True to sort such that the highest percent is first
and will create buckets in descending order.
False to sort so lowest is first and create buckets in ascending order.
:param sort_key: The sort_key, if any, to use for sorting the iterable
after converting it to a list
:return: a list of each value mapped to the bucket it was sorted into
"""
val_list = [v for v in val]
val_list.sort(key=sort_key, reverse=sort_highest)
bucketed_values = []
edge_count = round(edge_percent * len(val_list))
if edge_count > 0:
bucketed_values.extend([(-1, val) for val in val_list[:edge_count]])
val_list = val_list[edge_count:]
buckets_count = round(len(val_list) / float(num_buckets))
for bucket in range(num_buckets):
add_vals = val_list[:buckets_count] if bucket < num_buckets - 1 else val_list
val_list = val_list[buckets_count:] if bucket < num_buckets - 1 else []
bucketed_values.extend([(bucket, val) for val in add_vals])
return bucketed_values | Bucket iterable into subarray consisting of the first top percentage followed by the rest of the iterable sliced into equal sliced groups. :param val: The iterable to bucket :param num_buckets: The number of buckets to group the iterable into, does not include the top bucket :param edge_percent: Group the first percent into its own bucket. If sort_highest, then this is the top percent, else bottom percent. If <= 0, then will not create an edge bucket :param sort_highest: True to sort such that the highest percent is first and will create buckets in descending order. False to sort so lowest is first and create buckets in ascending order. :param sort_key: The sort_key, if any, to use for sorting the iterable after converting it to a list :return: a list of each value mapped to the bucket it was sorted into |
21,640 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
INTERPOLATION_FUNCS = ["linear", "cubic", "inverse_cubic"]
The provided code snippet includes necessary dependencies for implementing the `interpolate` function. Write a Python function `def interpolate( x_cur: float, x0: float, x1: float, y0: Any, y1: Any, inter_func: str = "linear" ) -> Any` to solve the following problem:
note, caps values at their min of x0 and max x1, designed to not work outside of that range for implementation reasons :param x_cur: the current value for x, should be between x0 and x1 :param x0: the minimum for x to interpolate between :param x1: the maximum for x to interpolate between :param y0: the minimum for y to interpolate between :param y1: the maximum for y to interpolate between :param inter_func: the type of function to interpolate with: linear, cubic, inverse_cubic :return: the interpolated value projecting x into y for the given interpolation function
Here is the function:
def interpolate(
x_cur: float, x0: float, x1: float, y0: Any, y1: Any, inter_func: str = "linear"
) -> Any:
"""
note, caps values at their min of x0 and max x1,
designed to not work outside of that range for implementation reasons
:param x_cur: the current value for x, should be between x0 and x1
:param x0: the minimum for x to interpolate between
:param x1: the maximum for x to interpolate between
:param y0: the minimum for y to interpolate between
:param y1: the maximum for y to interpolate between
:param inter_func: the type of function to interpolate with:
linear, cubic, inverse_cubic
:return: the interpolated value projecting x into y for the given
interpolation function
"""
if inter_func not in INTERPOLATION_FUNCS:
raise ValueError(
"unsupported inter_func given of {} must be one of {}".format(
inter_func, INTERPOLATION_FUNCS
)
)
# convert our x to 0-1 range since equations are designed to fit in
# (0,0)-(1,1) space
x_per = (x_cur - x0) / (x1 - x0)
# map x to y using the desired function in (0,0)-(1,1) space
if inter_func == "linear":
y_per = x_per
elif inter_func == "cubic":
# https://www.wolframalpha.com/input/?i=1-(1-x)%5E3+from+0+to+1
y_per = 1 - (1 - x_per) ** 3
elif inter_func == "inverse_cubic":
# https://www.wolframalpha.com/input/?i=1-(1-x)%5E(1%2F3)+from+0+to+1
y_per = 1 - (1 - x_per) ** (1 / 3)
else:
raise ValueError(
"unsupported inter_func given of {} in interpolate".format(inter_func)
)
if y_per <= 0.0 + sys.float_info.epsilon:
return y0
if y_per >= 1.0 - sys.float_info.epsilon:
return y1
# scale the threshold based on what we want the current to be
return y_per * (y1 - y0) + y0 | note, caps values at their min of x0 and max x1, designed to not work outside of that range for implementation reasons :param x_cur: the current value for x, should be between x0 and x1 :param x0: the minimum for x to interpolate between :param x1: the maximum for x to interpolate between :param y0: the minimum for y to interpolate between :param y1: the maximum for y to interpolate between :param inter_func: the type of function to interpolate with: linear, cubic, inverse_cubic :return: the interpolated value projecting x into y for the given interpolation function |
21,641 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
The provided code snippet includes necessary dependencies for implementing the `interpolate_list_linear` function. Write a Python function `def interpolate_list_linear( measurements: List[Tuple[float, float]], x_val: Union[float, List[float]] ) -> List[Tuple[float, float]]` to solve the following problem:
interpolate for input values within a list of measurements linearly :param measurements: the measurements to interpolate the output value between :param x_val: the target values to interpolate to the second dimension :return: a list of tuples containing the target values, interpolated values
Here is the function:
def interpolate_list_linear(
measurements: List[Tuple[float, float]], x_val: Union[float, List[float]]
) -> List[Tuple[float, float]]:
"""
interpolate for input values within a list of measurements linearly
:param measurements: the measurements to interpolate the output value between
:param x_val: the target values to interpolate to the second dimension
:return: a list of tuples containing the target values, interpolated values
"""
assert len(measurements) > 1
measurements.sort(key=lambda v: v[0])
x_vals = [x_val] if isinstance(x_val, float) else x_val
x_vals.sort()
interpolated = []
lower_index = 0
higher_index = 1
for x_val in x_vals:
while (
x_val > measurements[higher_index][0]
and higher_index < len(measurements) - 1
):
lower_index += 1
higher_index += 1
x0, y0 = measurements[lower_index]
x1, y1 = measurements[higher_index]
y_val = y0 + (x_val - x0) * ((y1 - y0) / (x1 - x0))
interpolated.append((x_val, y_val))
return interpolated | interpolate for input values within a list of measurements linearly :param measurements: the measurements to interpolate the output value between :param x_val: the target values to interpolate to the second dimension :return: a list of tuples containing the target values, interpolated values |
21,642 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
The provided code snippet includes necessary dependencies for implementing the `interpolated_integral` function. Write a Python function `def interpolated_integral(measurements: List[Tuple[float, float]])` to solve the following problem:
Calculate the interpolated integal for a group of measurements of the form [(x0, y0), (x1, y1), ...] :param measurements: the measurements to calculate the integral for :return: the integral or area under the curve for the measurements given
Here is the function:
def interpolated_integral(measurements: List[Tuple[float, float]]):
"""
Calculate the interpolated integal for a group of measurements of the form
[(x0, y0), (x1, y1), ...]
:param measurements: the measurements to calculate the integral for
:return: the integral or area under the curve for the measurements given
"""
if len(measurements) < 1:
return 0.0
if len(measurements) == 1:
return measurements[0][1]
measurements.sort(key=lambda v: v[0])
integral = 0.0
for index, (x_val, y_val) in enumerate(measurements):
if index >= len(measurements) - 1:
continue
x_next, y_next = measurements[index + 1]
x_dist = x_next - x_val
area = y_val * x_dist + (y_next - y_val) * x_dist / 2.0
integral += area
return integral | Calculate the interpolated integal for a group of measurements of the form [(x0, y0), (x1, y1), ...] :param measurements: the measurements to calculate the integral for :return: the integral or area under the curve for the measurements given |
21,643 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
def clean_path(path: str) -> str:
"""
:param path: the directory or file path to clean
:return: a cleaned version that expands the user path and creates an absolute path
"""
return os.path.abspath(os.path.expanduser(path))
The provided code snippet includes necessary dependencies for implementing the `create_unique_dir` function. Write a Python function `def create_unique_dir(path: str, check_number: int = 0) -> str` to solve the following problem:
:param path: the file path to create a unique version of (append numbers until one doesn't exist) :param check_number: the number to begin checking for unique versions at :return: the unique directory path
Here is the function:
def create_unique_dir(path: str, check_number: int = 0) -> str:
"""
:param path: the file path to create a unique version of
(append numbers until one doesn't exist)
:param check_number: the number to begin checking for unique versions at
:return: the unique directory path
"""
check_path = clean_path("{}-{:04d}".format(path, check_number))
if not os.path.exists(check_path):
return check_path
return create_unique_dir(path, check_number + 1) | :param path: the file path to create a unique version of (append numbers until one doesn't exist) :param check_number: the number to begin checking for unique versions at :return: the unique directory path |
21,644 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
def clean_path(path: str) -> str:
"""
:param path: the directory or file path to clean
:return: a cleaned version that expands the user path and creates an absolute path
"""
return os.path.abspath(os.path.expanduser(path))
The provided code snippet includes necessary dependencies for implementing the `path_file_count` function. Write a Python function `def path_file_count(path: str, pattern: str = "*") -> int` to solve the following problem:
Return the number of files that match the given pattern under the given path :param path: the path to the directory to look for files under :param pattern: the pattern the files must match to be counted :return: the number of files matching the pattern under the directory
Here is the function:
def path_file_count(path: str, pattern: str = "*") -> int:
"""
Return the number of files that match the given pattern under the given path
:param path: the path to the directory to look for files under
:param pattern: the pattern the files must match to be counted
:return: the number of files matching the pattern under the directory
"""
path = clean_path(path)
return len(fnmatch.filter(os.listdir(path), pattern)) | Return the number of files that match the given pattern under the given path :param path: the path to the directory to look for files under :param pattern: the pattern the files must match to be counted :return: the number of files matching the pattern under the directory |
21,645 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
The provided code snippet includes necessary dependencies for implementing the `path_file_size` function. Write a Python function `def path_file_size(path: str) -> int` to solve the following problem:
Return the total size, in bytes, for a path on the file system :param path: the path (directory or file) to get the size for :return: the size of the path, in bytes, as stored on disk
Here is the function:
def path_file_size(path: str) -> int:
"""
Return the total size, in bytes, for a path on the file system
:param path: the path (directory or file) to get the size for
:return: the size of the path, in bytes, as stored on disk
"""
if not os.path.isdir(path):
stat = os.stat(path)
return stat.st_size
total_size = 0
seen = {}
for dir_path, dir_names, filenames in os.walk(path):
for file in filenames:
file_path = os.path.join(dir_path, file)
try:
stat = os.stat(file_path)
except OSError:
continue
try:
seen[stat.st_ino]
except KeyError:
seen[stat.st_ino] = True
else:
continue
total_size += stat.st_size
return total_size | Return the total size, in bytes, for a path on the file system :param path: the path (directory or file) to get the size for :return: the size of the path, in bytes, as stored on disk |
21,646 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
The provided code snippet includes necessary dependencies for implementing the `is_url` function. Write a Python function `def is_url(val: str)` to solve the following problem:
:param val: value to check if it is a url or not :return: True if value is a URL, False otherwise
Here is the function:
def is_url(val: str):
"""
:param val: value to check if it is a url or not
:return: True if value is a URL, False otherwise
"""
try:
result = urlparse(val)
return all([result.scheme, result.netloc])
except ValueError:
return False | :param val: value to check if it is a url or not :return: True if value is a URL, False otherwise |
21,647 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
def create_dirs(path: str):
"""
:param path: the directory path to try and create
"""
path = clean_path(path)
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
# Unexpected OSError, re-raise.
raise
The provided code snippet includes necessary dependencies for implementing the `save_numpy` function. Write a Python function `def save_numpy( array: Union[numpy.ndarray, Dict[str, numpy.ndarray], Iterable[numpy.ndarray]], export_dir: str, name: str, npz: bool = True, )` to solve the following problem:
Save a numpy array or collection of numpy arrays to disk :param array: the array or collection of arrays to save :param export_dir: the directory to export the numpy file into :param name: the name of the file to export to (without extension) :param npz: True to save as an npz compressed file, False for standard npy. Note, npy can only be used for single numpy arrays :return: the saved path
Here is the function:
def save_numpy(
array: Union[numpy.ndarray, Dict[str, numpy.ndarray], Iterable[numpy.ndarray]],
export_dir: str,
name: str,
npz: bool = True,
):
"""
Save a numpy array or collection of numpy arrays to disk
:param array: the array or collection of arrays to save
:param export_dir: the directory to export the numpy file into
:param name: the name of the file to export to (without extension)
:param npz: True to save as an npz compressed file, False for standard npy.
Note, npy can only be used for single numpy arrays
:return: the saved path
"""
create_dirs(export_dir)
export_path = os.path.join(
export_dir, "{}.{}".format(name, "npz" if npz else "npy")
)
if isinstance(array, numpy.ndarray) and npz:
numpy.savez_compressed(export_path, array)
elif isinstance(array, numpy.ndarray):
numpy.save(export_path, array)
elif isinstance(array, Dict) and npz:
numpy.savez_compressed(export_path, **array)
elif isinstance(array, Dict):
raise ValueError("Dict can only be exported to an npz file")
elif isinstance(array, Iterable) and npz:
numpy.savez_compressed(export_path, *[val for val in array])
elif isinstance(array, Iterable):
raise ValueError("Iterable can only be exported to an npz file")
else:
raise ValueError("Unrecognized type given for array {}".format(array))
return export_path | Save a numpy array or collection of numpy arrays to disk :param array: the array or collection of arrays to save :param export_dir: the directory to export the numpy file into :param name: the name of the file to export to (without extension) :param npz: True to save as an npz compressed file, False for standard npy. Note, npy can only be used for single numpy arrays :return: the saved path |
21,648 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
_LOGGER = logging.getLogger(__name__)
def load_numpy(file_path: str) -> Union[numpy.ndarray, Dict[str, numpy.ndarray]]:
"""
Load a numpy file into either an ndarray or an OrderedDict representing what
was in the npz file
:param file_path: the file_path to load
:return: the loaded values from the file
"""
file_path = clean_path(file_path)
array = numpy.load(file_path)
if not isinstance(array, numpy.ndarray):
tmp_arrray = array
array = OrderedDict()
for key, val in tmp_arrray.items():
array[key] = val
return array
The provided code snippet includes necessary dependencies for implementing the `load_labeled_data` function. Write a Python function `def load_labeled_data( data: Union[str, Iterable[Union[str, numpy.ndarray, Dict[str, numpy.ndarray]]]], labels: Union[ None, str, Iterable[Union[str, numpy.ndarray, Dict[str, numpy.ndarray]]] ], raise_on_error: bool = True, ) -> List[ Tuple[ Union[numpy.ndarray, Dict[str, numpy.ndarray]], Union[None, numpy.ndarray, Dict[str, numpy.ndarray]], ] ]` to solve the following problem:
Load labels and data from disk or from memory and group them together. Assumes sorted ordering for on disk. Will match between when a file glob is passed for either data and/or labels. :param data: the file glob, file path to numpy data tar ball, or list of arrays to use for data :param labels: the file glob, file path to numpy data tar ball, or list of arrays to use for labels, if any :param raise_on_error: True to raise on any error that occurs; False to log a warning, ignore, and continue :return: a list containing tuples of the data, labels. If labels was passed in as None, will now contain a None for the second index in each tuple
Here is the function:
def load_labeled_data(
data: Union[str, Iterable[Union[str, numpy.ndarray, Dict[str, numpy.ndarray]]]],
labels: Union[
None, str, Iterable[Union[str, numpy.ndarray, Dict[str, numpy.ndarray]]]
],
raise_on_error: bool = True,
) -> List[
Tuple[
Union[numpy.ndarray, Dict[str, numpy.ndarray]],
Union[None, numpy.ndarray, Dict[str, numpy.ndarray]],
]
]:
"""
Load labels and data from disk or from memory and group them together.
Assumes sorted ordering for on disk. Will match between when a file glob is passed
for either data and/or labels.
:param data: the file glob, file path to numpy data tar ball, or list of arrays to
use for data
:param labels: the file glob, file path to numpy data tar ball, or list of arrays
to use for labels, if any
:param raise_on_error: True to raise on any error that occurs;
False to log a warning, ignore, and continue
:return: a list containing tuples of the data, labels. If labels was passed in
as None, will now contain a None for the second index in each tuple
"""
if isinstance(data, str):
data = load_numpy_list(data)
if labels is None:
labels = [None for _ in range(len(data))]
elif isinstance(labels, str):
labels = load_numpy_list(labels)
if len(data) != len(labels) and labels:
# always raise this error, lengths must match
raise ValueError(
"len(data) given of {} does not match len(labels) given of {}".format(
len(data), len(labels)
)
)
labeled_data = []
for dat, lab in zip(data, labels):
try:
if isinstance(dat, str):
dat = load_numpy(dat)
if lab is not None and isinstance(lab, str):
lab = load_numpy(lab)
labeled_data.append((dat, lab))
except Exception as err:
if raise_on_error:
raise err
else:
_LOGGER.error("Error creating labeled data: {}".format(err))
return labeled_data | Load labels and data from disk or from memory and group them together. Assumes sorted ordering for on disk. Will match between when a file glob is passed for either data and/or labels. :param data: the file glob, file path to numpy data tar ball, or list of arrays to use for data :param labels: the file glob, file path to numpy data tar ball, or list of arrays to use for labels, if any :param raise_on_error: True to raise on any error that occurs; False to log a warning, ignore, and continue :return: a list containing tuples of the data, labels. If labels was passed in as None, will now contain a None for the second index in each tuple |
21,649 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
def create_dirs(path: str):
"""
:param path: the directory path to try and create
"""
path = clean_path(path)
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
# Unexpected OSError, re-raise.
raise
def _tensors_export_recursive(
tensors: Union[numpy.ndarray, Iterable[numpy.ndarray]],
export_dir: str,
name_prefix: str,
counter: int,
exported_paths: List[str],
):
if isinstance(tensors, numpy.ndarray):
exported_paths.append(
tensor_export(tensors, export_dir, "{}-{:04d}".format(name_prefix, counter))
)
return
if isinstance(tensors, Dict):
raise ValueError("tensors dictionary is not supported for non batch export")
if isinstance(tensors, Iterable):
for index, tens in enumerate(tensors):
_tensors_export_recursive(
tens,
export_dir,
name_prefix,
counter + index,
exported_paths,
)
return
raise ValueError(
"unrecognized type for tensors given of {}".format(tensors.__class__.__name__)
)
def _tensors_export_batch(
tensors: Union[numpy.ndarray, Dict[str, numpy.ndarray], Iterable[numpy.ndarray]],
export_dir: str,
name_prefix: str,
counter: int,
exported_paths: List[str],
):
if isinstance(tensors, numpy.ndarray):
for index, tens in enumerate(tensors):
exported_paths.append(
tensor_export(
tens, export_dir, "{}-{:04d}".format(name_prefix, counter + index)
)
)
return
if isinstance(tensors, Dict):
tensors = OrderedDict([(key, val) for key, val in tensors.items()])
keys = [key for key in tensors.keys()]
for index, tens in enumerate(zip(*tensors.values())):
tens = OrderedDict([(key, val) for key, val in zip(keys, tens)])
exported_paths.append(
tensor_export(
tens, export_dir, "{}-{:04d}".format(name_prefix, counter + index)
)
)
return
if isinstance(tensors, Iterable):
for index, tens in enumerate(zip(*tensors)):
exported_paths.append(
tensor_export(
tens, export_dir, "{}-{:04d}".format(name_prefix, counter + index)
)
)
return
raise ValueError(
"unrecognized type for tensors given of {}".format(tensors.__class__.__name__)
)
The provided code snippet includes necessary dependencies for implementing the `tensors_export` function. Write a Python function `def tensors_export( tensors: Union[numpy.ndarray, Dict[str, numpy.ndarray], Iterable[numpy.ndarray]], export_dir: str, name_prefix: str, counter: int = 0, break_batch: bool = False, ) -> List[str]` to solve the following problem:
:param tensors: the tensors to export to a saved numpy array file :param export_dir: the directory to export the files in :param name_prefix: the prefix name for the tensors to save as, will append info about the position of the tensor in a list or dict in addition to the .npy file format :param counter: the current counter to save the tensor at :param break_batch: treat the tensor as a batch and break apart into multiple tensors :return: the exported paths
Here is the function:
def tensors_export(
tensors: Union[numpy.ndarray, Dict[str, numpy.ndarray], Iterable[numpy.ndarray]],
export_dir: str,
name_prefix: str,
counter: int = 0,
break_batch: bool = False,
) -> List[str]:
"""
:param tensors: the tensors to export to a saved numpy array file
:param export_dir: the directory to export the files in
:param name_prefix: the prefix name for the tensors to save as, will append
info about the position of the tensor in a list or dict in addition
to the .npy file format
:param counter: the current counter to save the tensor at
:param break_batch: treat the tensor as a batch and break apart into
multiple tensors
:return: the exported paths
"""
create_dirs(export_dir)
exported_paths = []
if break_batch:
_tensors_export_batch(tensors, export_dir, name_prefix, counter, exported_paths)
else:
_tensors_export_recursive(
tensors, export_dir, name_prefix, counter, exported_paths
)
return exported_paths | :param tensors: the tensors to export to a saved numpy array file :param export_dir: the directory to export the files in :param name_prefix: the prefix name for the tensors to save as, will append info about the position of the tensor in a list or dict in addition to the .npy file format :param counter: the current counter to save the tensor at :param break_batch: treat the tensor as a batch and break apart into multiple tensors :return: the exported paths |
21,650 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
The provided code snippet includes necessary dependencies for implementing the `parse_optimization_str` function. Write a Python function `def parse_optimization_str(optim_full_name: str) -> Tuple[str, str, Any]` to solve the following problem:
:param optim_full_name: A name of a pretrained model optimization. i.e. 'pruned-moderate-deepsparse', 'pruned-aggressive', 'base' :return: A tuple representing the corresponding SparseZoo model sparse_name, sparse_category, and sparse_target values with appropriate defaults when not present.
Here is the function:
def parse_optimization_str(optim_full_name: str) -> Tuple[str, str, Any]:
"""
:param optim_full_name: A name of a pretrained model optimization. i.e.
'pruned-moderate-deepsparse', 'pruned-aggressive', 'base'
:return: A tuple representing the corresponding SparseZoo model sparse_name,
sparse_category, and sparse_target values with appropriate defaults when
not present.
"""
optim_defaults = ["base", "none", None]
optim_split_name = optim_full_name.split("-")
while len(optim_split_name) < len(optim_defaults):
optim_split_name.append(optim_defaults[len(optim_split_name)])
sparse_name, sparse_category, sparse_target = optim_split_name[:3]
return sparse_name, sparse_category, sparse_target | :param optim_full_name: A name of a pretrained model optimization. i.e. 'pruned-moderate-deepsparse', 'pruned-aggressive', 'base' :return: A tuple representing the corresponding SparseZoo model sparse_name, sparse_category, and sparse_target values with appropriate defaults when not present. |
21,651 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
The provided code snippet includes necessary dependencies for implementing the `json_to_jsonl` function. Write a Python function `def json_to_jsonl(json_file_path: str, overwrite: bool = True)` to solve the following problem:
Converts a json list file to jsonl file format (used for sharding efficienty) e.x. [{"a": 1}, {"a": 1}] would convert to: {"a": 1} {"a": 1} :param json_file_path: file path to a json file path containing a json list of objects :param overwrite: If True, the existing json file will be overwritten, if False, the file will have the same name but with a .jsonl extension
Here is the function:
def json_to_jsonl(json_file_path: str, overwrite: bool = True):
"""
Converts a json list file to jsonl file format (used for sharding efficienty)
e.x.
[{"a": 1}, {"a": 1}]
would convert to:
{"a": 1}
{"a": 1}
:param json_file_path: file path to a json file path containing a json list
of objects
:param overwrite: If True, the existing json file will be overwritten, if False,
the file will have the same name but with a .jsonl extension
"""
if not json_file_path.endswith(".json"):
raise ValueError("json file must have .json extension")
with open(json_file_path) as json_file:
json_data = json.load(json_file)
if not isinstance(json_data, List):
raise ValueError(
"Json data must be a list to conver to jsonl format. "
f"found {type(json_data)}"
)
jsonl_file_path = json_file_path + ("" if overwrite else "l")
with open(jsonl_file_path, "w") as jsonl_file:
for json_line in json_data:
json.dump(json_line, jsonl_file) # append json line
jsonl_file.write("\n") # newline | Converts a json list file to jsonl file format (used for sharding efficienty) e.x. [{"a": 1}, {"a": 1}] would convert to: {"a": 1} {"a": 1} :param json_file_path: file path to a json file path containing a json list of objects :param overwrite: If True, the existing json file will be overwritten, if False, the file will have the same name but with a .jsonl extension |
21,652 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
_LOGGER = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `parse_kwarg_tuples` function. Write a Python function `def parse_kwarg_tuples(kwargs: tuple) -> Dict` to solve the following problem:
Convert a tuple of kwargs to a dict of kwargs. This function is used to enable the click parsing of kwargs. Example use: ``` @click.command( context_settings=dict( ignore_unknown_options=True) ) @click.argument(...) @click.option(...) ... @click.argument("kwargs", nargs=-1, type=click.UNPROCESSED) def main(..., kwargs): ... kwargs: Dict[str, Any] = parse_kwarg_tuples(kwargs: Tuple) ``` Example inputs, outputs: ``` input = ('--arg1', 1, 'arg2', 2, '-arg3', 3) output = parse_kwarg_tuples(input) output = {'arg1': 1, 'arg2': 2, 'arg3': 3} ``` ``` input = ('--arg1', 1, '--args1', 2 , 'arg2', 2, '-arg3', 3) output = parse_kwarg_tuples(input) output = {'arg1': [1, 2], 'arg2': 2, 'arg3': 3} ``` :param kwargs: The kwargs to convert. Should be a tuple of alternating kwargs names and kwargs values e.g.('--arg1', 1, 'arg2', 2, -arg3', 3). The names can optionally have a '-' or `--` in front of them. :return: The converted kwargs as a dict.
Here is the function:
def parse_kwarg_tuples(kwargs: tuple) -> Dict:
"""
Convert a tuple of kwargs to a dict of kwargs.
This function is used to enable the click parsing of kwargs.
Example use:
```
@click.command(
context_settings=dict(
ignore_unknown_options=True)
)
@click.argument(...)
@click.option(...)
...
@click.argument("kwargs", nargs=-1, type=click.UNPROCESSED)
def main(..., kwargs):
...
kwargs: Dict[str, Any] = parse_kwarg_tuples(kwargs: Tuple)
```
Example inputs, outputs:
```
input = ('--arg1', 1, 'arg2', 2, '-arg3', 3)
output = parse_kwarg_tuples(input)
output = {'arg1': 1, 'arg2': 2, 'arg3': 3}
```
```
input = ('--arg1', 1, '--args1', 2 , 'arg2', 2, '-arg3', 3)
output = parse_kwarg_tuples(input)
output = {'arg1': [1, 2], 'arg2': 2, 'arg3': 3}
```
:param kwargs: The kwargs to convert. Should be a tuple of alternating
kwargs names and kwargs values e.g.('--arg1', 1, 'arg2', 2, -arg3', 3).
The names can optionally have a '-' or `--` in front of them.
:return: The converted kwargs as a dict.
"""
if len(kwargs) == 0:
return {}
if len(kwargs) % 2 != 0:
raise ValueError(
"kwargs must be a tuple of alternating names and values "
"i.e. the length of kwargs tuple must be even. Received "
f"kwargs: {kwargs}"
)
# names are uneven indices, values are even indices
kwargs_names = kwargs[0::2]
kwargs_values = kwargs[1::2]
# by default kwargs values are strings, so convert them
# to the appropriate type if possible
kwargs_values = list(kwargs_values)
for i, value in enumerate(kwargs_values):
try:
kwargs_values[i] = ast.literal_eval(value)
except Exception as e: # noqa E841
_LOGGER.debug(
f"Failed to infer non-string type"
f"from kwarg value: {value}. It will"
f"be left as a string."
)
pass
# remove any '-' or '--' from the names
kwargs_names = [name.lstrip("-") for name in kwargs_names]
processed_kwargs = {}
for kwarg_name, kwarg_value in zip(kwargs_names, kwargs_values):
if kwarg_name in processed_kwargs:
# if the kwarg name is already in the processed kwargs,
# then we should convert the value to a list
if not isinstance(processed_kwargs[kwarg_name], list):
processed_kwargs[kwarg_name] = [processed_kwargs[kwarg_name]]
processed_kwargs[kwarg_name].append(kwarg_value)
else:
processed_kwargs[kwarg_name] = kwarg_value
return processed_kwargs | Convert a tuple of kwargs to a dict of kwargs. This function is used to enable the click parsing of kwargs. Example use: ``` @click.command( context_settings=dict( ignore_unknown_options=True) ) @click.argument(...) @click.option(...) ... @click.argument("kwargs", nargs=-1, type=click.UNPROCESSED) def main(..., kwargs): ... kwargs: Dict[str, Any] = parse_kwarg_tuples(kwargs: Tuple) ``` Example inputs, outputs: ``` input = ('--arg1', 1, 'arg2', 2, '-arg3', 3) output = parse_kwarg_tuples(input) output = {'arg1': 1, 'arg2': 2, 'arg3': 3} ``` ``` input = ('--arg1', 1, '--args1', 2 , 'arg2', 2, '-arg3', 3) output = parse_kwarg_tuples(input) output = {'arg1': [1, 2], 'arg2': 2, 'arg3': 3} ``` :param kwargs: The kwargs to convert. Should be a tuple of alternating kwargs names and kwargs values e.g.('--arg1', 1, 'arg2', 2, -arg3', 3). The names can optionally have a '-' or `--` in front of them. :return: The converted kwargs as a dict. |
21,653 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
The provided code snippet includes necessary dependencies for implementing the `download_zoo_training_dir` function. Write a Python function `def download_zoo_training_dir(zoo_stub: str) -> str` to solve the following problem:
Helper function to download the training directory from a zoo stub, takes care of downloading the missing files in the training directory if any (This can happen if a some subset of files in the training directory were downloaded before) :param zoo_stub: The zoo stub to download the training directory from :return: The path to the downloaded training directory
Here is the function:
def download_zoo_training_dir(zoo_stub: str) -> str:
"""
Helper function to download the training directory from a zoo stub,
takes care of downloading the missing files in the training
directory if any (This can happen if a some subset of files in the
training directory were downloaded before)
:param zoo_stub: The zoo stub to download the training directory from
:return: The path to the downloaded training directory
"""
sparsezoo_model = Model(zoo_stub)
training_dir_path = sparsezoo_model.training.path
# download missing files if any this can happen if
# some subset of files in the training directory
# were downloaded before
for file_name in sparsezoo_model.training.files:
file_name.path
return training_dir_path | Helper function to download the training directory from a zoo stub, takes care of downloading the missing files in the training directory if any (This can happen if a some subset of files in the training directory were downloaded before) :param zoo_stub: The zoo stub to download the training directory from :return: The path to the downloaded training directory |
21,654 | import ast
import errno
import fnmatch
import importlib.metadata
import importlib.util
import json
import logging
import os
import sys
import warnings
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
from urllib.parse import urlparse
import numpy
from sparsezoo import Model
from sparsezoo.utils import load_numpy_list
_LOGGER = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `is_package_available` function. Write a Python function `def is_package_available( package_name: str, return_version: bool = False, ) -> Union[Tuple[bool, str], bool]` to solve the following problem:
A helper function to check if a package is available and optionally return its version. This function enforces a check that the package is available and is not just a directory/file with the same name as the package. inspired from: https://github.com/huggingface/transformers/blob/965cf677695dd363285831afca8cf479cf0c600c/src/transformers/utils/import_utils.py#L41 :param package_name: The package name to check for :param return_version: True to return the version of the package if available :return: True if the package is available, False otherwise or a tuple of (bool, version) if return_version is True
Here is the function:
def is_package_available(
package_name: str,
return_version: bool = False,
) -> Union[Tuple[bool, str], bool]:
"""
A helper function to check if a package is available
and optionally return its version. This function enforces
a check that the package is available and is not
just a directory/file with the same name as the package.
inspired from:
https://github.com/huggingface/transformers/blob/965cf677695dd363285831afca8cf479cf0c600c/src/transformers/utils/import_utils.py#L41
:param package_name: The package name to check for
:param return_version: True to return the version of
the package if available
:return: True if the package is available, False otherwise or a tuple of
(bool, version) if return_version is True
"""
package_exists = importlib.util.find_spec(package_name) is not None
package_version = "N/A"
if package_exists:
try:
package_version = importlib.metadata.version(package_name)
package_exists = True
except importlib.metadata.PackageNotFoundError:
package_exists = False
_LOGGER.debug(f"Detected {package_name} version {package_version}")
if return_version:
return package_exists, package_version
else:
return package_exists | A helper function to check if a package is available and optionally return its version. This function enforces a check that the package is available and is not just a directory/file with the same name as the package. inspired from: https://github.com/huggingface/transformers/blob/965cf677695dd363285831afca8cf479cf0c600c/src/transformers/utils/import_utils.py#L41 :param package_name: The package name to check for :param return_version: True to return the version of the package if available :return: True if the package is available, False otherwise or a tuple of (bool, version) if return_version is True |
21,655 | from typing import Callable, List
def _doc_merge(wrapped: Callable, wrapper: Callable):
stripped_wrapped = _strip_doc_indent(wrapped.__doc__)
stripped_wrapper = _strip_doc_indent(wrapper.__doc__)
merge = []
# check for return at end of doc string in wrapped
if len(stripped_wrapped) > 0 and ":return" in stripped_wrapped[-1]:
merge.extend(stripped_wrapped[:-1])
merge.extend(stripped_wrapper)
merge.append(stripped_wrapped[-1])
else:
merge.extend(stripped_wrapped)
merge.extend(stripped_wrapper)
wrapper.__doc__ = "\n".join(merge)
The provided code snippet includes necessary dependencies for implementing the `wrapper_decorator` function. Write a Python function `def wrapper_decorator(wrapped: Callable)` to solve the following problem:
A wrapper decorator to be applied as a decorator to a function. Merges the decorated function properties with wrapped. :param wrapped: the wrapped function to merge decorations with :return: the decorator to apply to the function
Here is the function:
def wrapper_decorator(wrapped: Callable):
"""
A wrapper decorator to be applied as a decorator to a function.
Merges the decorated function properties with wrapped.
:param wrapped: the wrapped function to merge decorations with
:return: the decorator to apply to the function
"""
def decorator(wrapper: Callable):
for attr in (
"__module__",
"__name__",
"__qualname__",
):
value = getattr(wrapped, attr)
setattr(wrapper, attr, value)
for attr in ("__dict__", "__annotations__"):
getattr(wrapper, attr).update(getattr(wrapped, attr))
_doc_merge(wrapped, wrapper)
wrapper.__wrapped__ = wrapped
return wrapper
return decorator | A wrapper decorator to be applied as a decorator to a function. Merges the decorated function properties with wrapped. :param wrapped: the wrapped function to merge decorations with :return: the decorator to apply to the function |
21,656 | import os
The provided code snippet includes necessary dependencies for implementing the `default_dataset_path` function. Write a Python function `def default_dataset_path(name: str) -> str` to solve the following problem:
:param name: name of the dataset to get a path for :return: the default path to save the dataset at
Here is the function:
def default_dataset_path(name: str) -> str:
"""
:param name: name of the dataset to get a path for
:return: the default path to save the dataset at
"""
path = os.getenv("NM_ML_DATASETS_PATH", "")
if not path:
path = os.path.join("~", ".cache", "nm_datasets")
path = os.path.join(path, name)
return path | :param name: name of the dataset to get a path for :return: the default path to save the dataset at |
21,657 | import math
import os
from datetime import datetime
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sparseml.keras.optim import ScheduledModifierManager
from sparseml.keras.utils.callbacks import LossesAndMetricsLoggingCallback
from sparseml.keras.utils.exporter import ModelExporter
from sparseml.keras.utils.logger import TensorBoardLogger
from sparsezoo import Model
if not os.path.isdir(pruned_model_dir):
os.makedirs(pruned_model_dir)
The provided code snippet includes necessary dependencies for implementing the `download_model_and_recipe` function. Write a Python function `def download_model_and_recipe(root_dir: str)` to solve the following problem:
Download pretrained model and a pruning recipe
Here is the function:
def download_model_and_recipe(root_dir: str):
"""
Download pretrained model and a pruning recipe
"""
# Use the recipe stub
recipe_file_path = (
"zoo:cv/classification/resnet_v1-20/keras/sparseml/cifar_10/pruned-conservative"
)
# Load base model to prune
base_zoo_model = Model(recipe_file_path)
base_zoo_model.path = os.path.join(root_dir, "resnet20_v1")
checkpoint = base_zoo_model.training.default
model_file_path = checkpoint.get_file("model.h5").path
recipe_file_path = base_zoo_model.recipes.default.path
if not os.path.exists(model_file_path) or not model_file_path.endswith(".h5"):
raise RuntimeError("Model file not found: {}".format(model_file_path))
return model_file_path, recipe_file_path | Download pretrained model and a pruning recipe |
21,658 | import math
import os
from datetime import datetime
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sparseml.keras.optim import ScheduledModifierManager
from sparseml.keras.utils.callbacks import LossesAndMetricsLoggingCallback
from sparseml.keras.utils.exporter import ModelExporter
from sparseml.keras.utils.logger import TensorBoardLogger
from sparsezoo import Model
num_classes = 10
The provided code snippet includes necessary dependencies for implementing the `load_and_normalize_cifar10` function. Write a Python function `def load_and_normalize_cifar10(subtract_pixel_mean: bool = True)` to solve the following problem:
Load and normalize the Cifar-10 dataset
Here is the function:
def load_and_normalize_cifar10(subtract_pixel_mean: bool = True):
"""
Load and normalize the Cifar-10 dataset
"""
# Load the CIFAR10 data.
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# Normalize data.
X_train = X_train.astype("float32") / 255
X_test = X_test.astype("float32") / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
X_train_mean = np.mean(X_train, axis=0)
X_train -= X_train_mean
X_test -= X_train_mean
# Convert class vectors to binary class matrices.
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
return (X_train, y_train), (X_test, y_test) | Load and normalize the Cifar-10 dataset |
21,659 | import math
import os
from datetime import datetime
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sparseml.keras.optim import ScheduledModifierManager
from sparseml.keras.utils.callbacks import LossesAndMetricsLoggingCallback
from sparseml.keras.utils.exporter import ModelExporter
from sparseml.keras.utils.logger import TensorBoardLogger
from sparsezoo import Model
pruned_model_dir = os.path.join(root_dir, "pruned")
if not os.path.isdir(pruned_model_dir):
os.makedirs(pruned_model_dir)
The provided code snippet includes necessary dependencies for implementing the `model_checkpoint_callback` function. Write a Python function `def model_checkpoint_callback()` to solve the following problem:
Create model checkpoint callback
Here is the function:
def model_checkpoint_callback():
"""
Create model checkpoint callback
"""
pruned_model_name = "resnet20_v1.pruned.{epoch:03d}.h5"
pruned_filepath = os.path.join(pruned_model_dir, pruned_model_name)
# Prepare a callback for model saving
checkpoint = ModelCheckpoint(
filepath=pruned_filepath, monitor="val_accuracy", verbose=1, save_best_only=True
)
return checkpoint | Create model checkpoint callback |
21,660 | import argparse
import inspect
import json
import math
import os
from typing import Optional, Tuple
import numpy
import tensorflow
from sparseml import get_main_logger
from sparseml.keras.datasets import Dataset, DatasetRegistry
from sparseml.keras.models import ModelRegistry
from sparseml.keras.optim import ScheduledModifierManager, remove_pruning_masks
from sparseml.keras.utils import (
LossesAndMetricsLoggingCallback,
ModelExporter,
TensorBoardLogger,
keras,
)
from sparseml.utils import create_dirs
TRAIN_COMMAND = "train"
EVAL_COMMAND = "evaluate"
EXPORT_COMMAND = "export"
def parse_args():
parser = argparse.ArgumentParser(
description="Run tasks on classification models and datasets "
"using the sparseml API"
)
subparsers = parser.add_subparsers(dest="command")
train_parser = subparsers.add_parser(
TRAIN_COMMAND,
description="Train and/or prune an image classification model",
)
eval_parser = subparsers.add_parser(
EVAL_COMMAND,
description="Evaluate an image classification model on a dataset",
)
export_parser = subparsers.add_parser(
EXPORT_COMMAND,
description="Export a model to onnx as well as "
"store sample inputs, outputs, and labels",
)
parsers = [
train_parser,
eval_parser,
export_parser,
]
for par in parsers:
# general arguments
# model args
par.add_argument(
"--arch-key",
type=str,
required=True,
help="The type of model to use, ex: resnet50, vgg16, mobilenet "
"put as help to see the full list (will raise an exception with the list)",
)
par.add_argument(
"--pretrained",
type=str,
default=True,
help="The type of pretrained weights to use, "
"default is true to load the default pretrained weights for the model. "
"Otherwise should be set to the desired weights type: "
"[base, optim, optim-perf]. "
"To not load any weights set to one of [none, false]",
)
par.add_argument(
"--pretrained-dataset",
type=str,
default=None,
help="The dataset to load pretrained weights for if pretrained is set. "
"Default is None which will load the default dataset for the architecture."
" Ex can be set to imagenet, cifar10, etc",
)
par.add_argument(
"--checkpoint-path",
type=str,
default=None,
help="A path to a previous checkpoint to load the state from and "
"resume the state for. If provided, pretrained will be ignored",
)
par.add_argument(
"--model-kwargs",
type=json.loads,
default={},
help="kew word arguments to be passed to model constructor, should be "
" given as a json object",
)
# dataset args
par.add_argument(
"--dataset",
type=str,
required=True,
help="The dataset to use for training, "
"ex: imagenet, imagenette, cifar10, etc. "
"Set to imagefolder for a generic dataset setup "
"with an image folder structure setup like imagenet or loadable by a "
"dataset in sparseml.keras.datasets",
)
par.add_argument(
"--dataset-path",
type=str,
required=True,
help="The root path to where the dataset is stored",
)
par.add_argument(
"--dataset-kwargs",
type=json.loads,
default={},
help="kew word arguments to be passed to dataset constructor, should be "
" given as a json object",
)
# logging and saving
par.add_argument(
"--model-tag",
type=str,
default=None,
help="A tag to use for the model for saving results under save-dir, "
"defaults to the model arch and dataset used",
)
par.add_argument(
"--save-dir",
type=str,
default="keras_classification",
help="The path to the directory for saving results",
)
# task specific arguments: training
train_parser.add_argument(
"--dataset-parallel-calls",
type=int,
default=4,
help="the number of parallel workers for dataset loading",
)
train_parser.add_argument(
"--train-shuffle-buffer-size",
type=int,
default=None,
help="Shuffle buffer size for dataset loading",
)
train_parser.add_argument(
"--train-prefetch-buffer-size",
type=int,
default=None,
help="Prefetch buffer size for train dataset loading",
)
train_parser.add_argument(
"--test-prefetch-buffer-size",
type=int,
default=None,
help="Prefetch buffer size for test dataset loading",
)
train_parser.add_argument(
"--recipe-path",
type=str,
default=None,
help="The path to the yaml file containing the modifiers and "
"schedule to apply them with. If set to 'transfer_learning', "
"then will create a schedule to enable sparse transfer learning",
)
train_parser.add_argument(
"--transfer-class-type",
type=str,
default="single",
help="Type of target function: 'single' for softmax, 'multi' for sigmoid "
"or 'linear' for linear function. This option takes effects only when the "
"specified dataset is different from the pretrained dataset, implying a "
"transfer learning usecase.",
)
train_parser.add_argument(
"--transfer-top-layer",
type=str,
default=None,
help="Name of the layer which a custom dense layer will be put on. Default "
"to None to imply the layer right under the top of the original model "
"will be used (in other words, layer[-2])",
)
train_parser.add_argument(
"--train-batch-size",
type=int,
required=True,
help="The batch size to use while training",
)
train_parser.add_argument(
"--test-batch-size",
type=int,
required=False,
help="The batch size to use while testing; default to the train " "batch size",
)
train_parser.add_argument(
"--log-dir",
type=str,
default=os.path.join("keras_classification_train", "tensorboard-logs"),
help="The path to the directory for saving logs",
)
train_parser.add_argument(
"--log-epoch",
type=bool,
default=True,
help="Whether logging should be performed at the end of each epoch",
)
train_parser.add_argument(
"--log-batch",
type=bool,
default=False,
help="Whether logging should be performed at the end of each training " "batch",
)
train_parser.add_argument(
"--log-steps",
type=int,
default=-1,
help="Whether logging should be performed after every specified number "
"of steps",
)
train_parser.add_argument(
"--save-best-only",
type=bool,
default=True,
help="Save model only with better monitored metric",
)
train_parser.add_argument(
"--optim",
type=str,
default="SGD",
help="The optimizer type to use, e.g., 'Adam', 'SGD' etc",
)
train_parser.add_argument(
"--optim-args",
type=json.loads,
default={"momentum": 0.9, "nesterov": True},
# default={},
help="Additional args to be passed to the optimizer passed in"
" as a json object",
)
train_parser.add_argument(
"--run-eagerly",
type=bool,
default=True,
help="Run training in eager execution mode",
)
# task specific arguments: evaluation
eval_parser.add_argument(
"--test-batch-size",
type=int,
required=False,
help="The batch size to use while testing; default to the train " "batch size",
)
eval_parser.add_argument(
"--dataset-parallel-calls",
type=int,
default=4,
help="the number of parallel workers for dataset loading",
)
eval_parser.add_argument(
"--test-prefetch-buffer-size",
type=int,
default=None,
help="Prefetch buffer size for test dataset loading",
)
# task specific arguments: export
export_parser.add_argument(
"--num-samples",
type=int,
default=100,
help="The number of samples to export along with the model onnx "
"and pth files (sample inputs and labels as well as the outputs "
"from model execution)",
)
export_parser.add_argument(
"--onnx-opset",
type=int,
default=11,
help="The onnx opset to use for export. Default is 11",
)
export_parser.add_argument(
"--export-debug-mode",
type=bool,
default=False,
help="The debug mode for ONNX export (passed into keras2onnx). "
"Default to False.",
)
return parser.parse_args() | null |
21,661 | import argparse
import inspect
import json
import math
import os
from typing import Optional, Tuple
import numpy
import tensorflow
from sparseml import get_main_logger
from sparseml.keras.datasets import Dataset, DatasetRegistry
from sparseml.keras.models import ModelRegistry
from sparseml.keras.optim import ScheduledModifierManager, remove_pruning_masks
from sparseml.keras.utils import (
LossesAndMetricsLoggingCallback,
ModelExporter,
TensorBoardLogger,
keras,
)
from sparseml.utils import create_dirs
def setup_save_and_log_dirs(args) -> Tuple[str, Optional[str]]:
# Saving dir setup
save_dir = os.path.abspath(os.path.expanduser(args.save_dir))
if not args.model_tag:
model_tag = "{}@{}".format(args.arch_key, args.dataset)
model_id = model_tag
model_inc = 0
while os.path.exists(os.path.join(save_dir, model_id)):
model_inc += 1
model_id = "{}__{:02d}".format(model_tag, model_inc)
else:
model_id = args.model_tag
save_dir = os.path.join(save_dir, model_id)
create_dirs(save_dir)
LOGGER.info("Model directory is set to {}".format(save_dir))
# log dir setup
log_dir = (
os.path.abspath(os.path.expanduser(args.log_dir))
if args.command == TRAIN_COMMAND
else None
)
if args.command == TRAIN_COMMAND:
log_dir = os.path.join(log_dir, model_id)
create_dirs(log_dir)
LOGGER.info("Logging directory is set to {}".format(log_dir))
else:
log_dir = None
return save_dir, log_dir
def create_dataset(
args, train: bool, image_size: Tuple[int, int]
) -> Tuple[Dataset, int]:
kwargs = args.dataset_kwargs
dataset = DatasetRegistry.create(
args.dataset,
root=args.dataset_path,
train=train,
image_size=image_size,
**kwargs,
)
LOGGER.info(
"created {} dataset: {}, images to resize to {}".format(
"train" if train else "val", dataset, image_size
)
)
# get num_classes
if args.dataset == "imagefolder":
num_classes = dataset.num_classes
else:
dataset_attributes = DatasetRegistry.attributes(args.dataset)
num_classes = dataset_attributes["num_classes"]
return dataset, num_classes
def build_dataset(
args, dataset: Dataset, train: bool = True
) -> tensorflow.data.Dataset:
test_batch_size = (
args.test_batch_size if args.test_batch_size else args.train_batch_size
)
batch_size = args.train_batch_size if train else test_batch_size
if train:
shuffle_buffer_size = (
args.train_shuffle_buffer_size
if args.train_shuffle_buffer_size
else dataset.num_images
)
else:
shuffle_buffer_size = None
if train:
prefetch_buffer_size = (
args.train_prefetch_buffer_size
if args.train_prefetch_buffer_size
else batch_size * 8
)
else:
prefetch_buffer_size = (
args.test_prefetch_buffer_size
if args.test_prefetch_buffer_size
else batch_size * 8
)
built_dataset = dataset.build(
batch_size,
repeat_count=1,
shuffle_buffer_size=shuffle_buffer_size,
prefetch_buffer_size=prefetch_buffer_size,
num_parallel_calls=args.dataset_parallel_calls,
)
return built_dataset
def create_model(args, input_shape, num_classes):
kwargs = args.model_kwargs
if args.checkpoint_path == "zoo":
zoo_model = ModelRegistry.create_zoo_model(
args.arch_key, args.pretrained, args.pretrained_dataset
)
model_file_paths = zoo_model.download_framework_files(extensions=[".h5"])
if not model_file_paths:
model_file_paths = zoo_model.download_framework_files(extensions=[".tf"])
if not model_file_paths:
raise RuntimeError("Error downloading model from SparseZoo")
model_file_path = model_file_paths[0]
model = keras.models.load_model(model_file_path)
else:
model = ModelRegistry.create(
args.arch_key,
args.pretrained,
args.checkpoint_path,
args.pretrained_dataset,
input_shape=input_shape,
**kwargs,
)
if (
args.pretrained_dataset is not None
and args.dataset is not None
and args.pretrained_dataset != args.dataset
):
# Set up the model for fine-tuning
# The trainablility of model's parameters should be controlled
# through recipe; we therefore do not set "model.trainable" to False here.
if args.transfer_class_type == "single":
activation = "softmax"
elif args.transfer_class_type == "multi":
activation = "sigmoid"
elif args.transfer_class_type == "linear":
activation = None
else:
raise ValueError(
"Unknown transfer_class_type; expected: 'single', 'softmax' or 'linear'"
)
if args.transfer_top_layer is not None:
transfer_top_layer = None
for i in list(range(len(model.layers)))[::-1]:
if model.layers[i].name == args.transfer_top_layer:
transfer_top_layer = args.transfer_top_layer
break
if transfer_top_layer is None:
raise ValueError(
"Top layer for transfer learning {} not found".format(
args.transfer_top_layer
)
)
else:
transfer_top_layer = model.layers[-2]
new_output = keras.layers.Dense(num_classes, activation=activation)(
transfer_top_layer.output
)
model = keras.Model(model.input, new_output)
return model
def create_optimizer(args):
optim_const = {}
for opt_name in dir(keras.optimizers):
opt = getattr(keras.optimizers, opt_name)
if inspect.isclass(opt) and opt_name != "Optimizer":
optim_const[opt_name] = opt
if args.optim not in optim_const:
raise ValueError(
"Unknown optimizer {}. Supported: {}".format(
args.optim, list(optim_const.keys())
)
)
optimizer = optim_const[args.optim](**args.optim_args)
return optimizer
def get_logging_update_freq(args):
if args.log_steps > 0:
update_freq = args.log_steps
elif args.log_batch:
update_freq = "batch"
elif args.log_epoch:
update_freq = "epoch"
else:
update_freq = None
return update_freq
def handle_train_command(args):
save_dir, log_dir = setup_save_and_log_dirs(args)
input_shape = ModelRegistry.input_shape(args.arch_key)
image_size = (input_shape[0], input_shape[1])
# Load datasets
train_dataset, num_classes = create_dataset(args, train=True, image_size=image_size)
num_train_images = train_dataset.num_images
train_dataset = build_dataset(args, train_dataset, train=True)
val_dataset, _ = create_dataset(args, train=False, image_size=image_size)
val_dataset = build_dataset(args, val_dataset, train=False)
# Create model
model = create_model(args, input_shape, num_classes=num_classes)
# Create optimizer
optimizer = create_optimizer(args)
# Logging
if log_dir:
update_freq = get_logging_update_freq(args)
if update_freq is None:
raise ValueError(
"Logging requires update frequency to take effect; use either "
"'log-epoch', 'log-batch' or 'log-steps' option."
)
loggers = TensorBoardLogger(log_dir=log_dir, update_freq=update_freq)
else:
loggers = []
# Model saving
checkpoint_filepath = os.path.join(
save_dir, "model.{epoch:02d}-{val_accuracy:.2f}.tf"
)
checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=False,
monitor="val_accuracy",
mode="max",
save_best_only=True,
verbose=1,
)
# Manager
manager = ScheduledModifierManager.from_yaml(args.recipe_path)
# Enhance model
steps_per_epoch = math.ceil(num_train_images / args.train_batch_size)
model, optimizer, callbacks = manager.modify(
model, optimizer, steps_per_epoch, loggers=loggers
)
if loggers:
callbacks.append(LossesAndMetricsLoggingCallback(loggers))
callbacks.append(checkpoint_callback)
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=optimizer,
metrics=["accuracy"],
run_eagerly=args.run_eagerly,
)
model.fit(
train_dataset,
epochs=manager.max_epochs,
callbacks=callbacks,
validation_data=val_dataset,
) | null |
21,662 | import argparse
import inspect
import json
import math
import os
from typing import Optional, Tuple
import numpy
import tensorflow
from sparseml import get_main_logger
from sparseml.keras.datasets import Dataset, DatasetRegistry
from sparseml.keras.models import ModelRegistry
from sparseml.keras.optim import ScheduledModifierManager, remove_pruning_masks
from sparseml.keras.utils import (
LossesAndMetricsLoggingCallback,
ModelExporter,
TensorBoardLogger,
keras,
)
from sparseml.utils import create_dirs
def create_dataset(
args, train: bool, image_size: Tuple[int, int]
) -> Tuple[Dataset, int]:
def build_dataset(
args, dataset: Dataset, train: bool = True
) -> tensorflow.data.Dataset:
def create_model(args, input_shape, num_classes):
def handle_eval_command(args):
input_shape = ModelRegistry.input_shape(args.arch_key)
image_size = (input_shape[0], input_shape[1])
# Load dataset
val_dataset, num_classes = create_dataset(args, train=False, image_size=image_size)
val_dataset = build_dataset(args, val_dataset, train=False)
# Create model
model = create_model(args, input_shape, num_classes=num_classes)
# Evaluate
result = model.evaluate(val_dataset)
print(dict(zip(model.metrics_names, result))) | null |
21,663 | import argparse
import inspect
import json
import math
import os
from typing import Optional, Tuple
import numpy
import tensorflow
from sparseml import get_main_logger
from sparseml.keras.datasets import Dataset, DatasetRegistry
from sparseml.keras.models import ModelRegistry
from sparseml.keras.optim import ScheduledModifierManager, remove_pruning_masks
from sparseml.keras.utils import (
LossesAndMetricsLoggingCallback,
ModelExporter,
TensorBoardLogger,
keras,
)
from sparseml.utils import create_dirs
def create_dataset(
args, train: bool, image_size: Tuple[int, int]
) -> Tuple[Dataset, int]:
kwargs = args.dataset_kwargs
dataset = DatasetRegistry.create(
args.dataset,
root=args.dataset_path,
train=train,
image_size=image_size,
**kwargs,
)
LOGGER.info(
"created {} dataset: {}, images to resize to {}".format(
"train" if train else "val", dataset, image_size
)
)
# get num_classes
if args.dataset == "imagefolder":
num_classes = dataset.num_classes
else:
dataset_attributes = DatasetRegistry.attributes(args.dataset)
num_classes = dataset_attributes["num_classes"]
return dataset, num_classes
def create_model(args, input_shape, num_classes):
kwargs = args.model_kwargs
if args.checkpoint_path == "zoo":
zoo_model = ModelRegistry.create_zoo_model(
args.arch_key, args.pretrained, args.pretrained_dataset
)
model_file_paths = zoo_model.download_framework_files(extensions=[".h5"])
if not model_file_paths:
model_file_paths = zoo_model.download_framework_files(extensions=[".tf"])
if not model_file_paths:
raise RuntimeError("Error downloading model from SparseZoo")
model_file_path = model_file_paths[0]
model = keras.models.load_model(model_file_path)
else:
model = ModelRegistry.create(
args.arch_key,
args.pretrained,
args.checkpoint_path,
args.pretrained_dataset,
input_shape=input_shape,
**kwargs,
)
if (
args.pretrained_dataset is not None
and args.dataset is not None
and args.pretrained_dataset != args.dataset
):
# Set up the model for fine-tuning
# The trainablility of model's parameters should be controlled
# through recipe; we therefore do not set "model.trainable" to False here.
if args.transfer_class_type == "single":
activation = "softmax"
elif args.transfer_class_type == "multi":
activation = "sigmoid"
elif args.transfer_class_type == "linear":
activation = None
else:
raise ValueError(
"Unknown transfer_class_type; expected: 'single', 'softmax' or 'linear'"
)
if args.transfer_top_layer is not None:
transfer_top_layer = None
for i in list(range(len(model.layers)))[::-1]:
if model.layers[i].name == args.transfer_top_layer:
transfer_top_layer = args.transfer_top_layer
break
if transfer_top_layer is None:
raise ValueError(
"Top layer for transfer learning {} not found".format(
args.transfer_top_layer
)
)
else:
transfer_top_layer = model.layers[-2]
new_output = keras.layers.Dense(num_classes, activation=activation)(
transfer_top_layer.output
)
model = keras.Model(model.input, new_output)
return model
def handle_export_command(args):
input_shape = ModelRegistry.input_shape(args.arch_key)
image_size = (input_shape[0], input_shape[1])
train_dataset, num_classes = create_dataset(args, train=True, image_size=image_size)
train_dataset = train_dataset.build(args.num_samples)
model = create_model(args, input_shape, num_classes=num_classes)
model = remove_pruning_masks(model)
exporter = ModelExporter(model, args.save_dir)
exporter.export_onnx(opset=args.onnx_opset, debug_mode=args.export_debug_mode)
for img_batch, label_batch in train_dataset.take(1):
output_batch = model(img_batch)
numpy.save(os.path.join(args.save_dir, "inputs.npy"), img_batch)
numpy.save(os.path.join(args.save_dir, "outputs.npy"), output_batch)
numpy.save(os.path.join(args.save_dir, "labels.npy"), label_batch) | null |
21,664 | import json
from dataclasses import dataclass, field
from typing import Any, Optional, Tuple
from torch.nn import Module
from torch.utils.data import DataLoader
from tqdm import tqdm
import utils
from argparser_.nm_argparser_ import NmArgumentParser
from sparseml import get_main_logger
from sparseml.pytorch.models import ModelRegistry
from sparseml.pytorch.opset import TORCH_DEFAULT_ONNX_OPSET
from sparseml.pytorch.utils import ModuleExporter
from sparseml.utils import convert_to_bool
LOGGER = get_main_logger()
class ExportArgs:
"""
Represents the arguments we use in our PyTorch integration scripts for
exporting tasks
Using :class:`NmArgumentParser` we can turn this class into `argparse
<https://docs.python.org/3/library/argparse.html#module-argparse>`__
arguments that can be specified on the command line.
:param arch_key: A str key representing the type of model to use,
ex:resnet50.
:param dataset: The dataset to use for analysis, ex imagenet, imagenette,
etc; Set to `imagefolder` for a custom dataset.
:param dataset_path: Root path to dataset location.
:param checkpoint_path: A path to a previous checkpoint to load the state
from and resume the state for; Also works with SparseZoo recipes;
Set to zoo to automatically download and load weights associated with a
recipe.
:param num_samples: The number of samples to export along with the model
onnx and pth files (sample inputs and labels as well as the outputs
from model execution). Default is 100.
:param onnx_opset: The onnx opset to use for export. Default is 11.
:param use_zipfile_serialization_if_available: for torch >= 1.6.0 only
exports the Module's state dict using the new zipfile serialization.
Default is True, has no affect on lower torch versions.
:param pretrained: The type of pretrained weights to use,
default is true to load the default pretrained weights for the model.
Otherwise should be set to the desired weights type: [base, optim,
optim-perf]. To not load any weights set to one of [none, false].
:param pretrained_dataset: The dataset to load pretrained weights for if
pretrained is set. Default is None which will load the default
dataset for the architecture. Ex can be set to imagenet, cifar10, etc.
:param model_kwargs: Keyword arguments to be passed to model constructor,
should be given as a json object.
:param dataset_kwargs: Keyword arguments to be passed to dataset
constructor, should be given as a json object.
:param model_tag: A tag to use for the model for saving results under
save-dir, defaults to the model arch and dataset used.
:param save_dir: The path to the directory for saving results.
"""
arch_key: str = field(
metadata={
"help": "The type of model to use, ex: resnet50, vgg16, mobilenet "
"put as help to see the full list (will raise an exception "
"with the list)",
}
)
dataset: str = field(
metadata={
"help": "The dataset to use for exporting, "
"ex: imagenet, imagenette, cifar10, etc. "
"Set to imagefolder for a generic dataset setup "
"with an image folder structure setup like imagenet or "
"loadable by a dataset in sparseml.pytorch.datasets"
}
)
dataset_path: str = field(
metadata={
"help": "The root path to where the dataset is stored",
}
)
checkpoint_path: str = field(
default=None,
metadata={
"help": "A path to a previous checkpoint to load the state from "
"and resume the state for. If provided, pretrained will "
"be ignored . If using a SparseZoo recipe, can also "
"provide 'zoo' to load the base weights associated with "
"that recipe"
},
)
num_samples: int = field(
default=100,
metadata={
"help": "The number of samples to export along with the model onnx "
"and pth files (sample inputs and labels as well as the "
"outputs from model execution)"
},
)
onnx_opset: int = field(
default=TORCH_DEFAULT_ONNX_OPSET,
metadata={
"help": "The onnx opset to use for export. "
f"Default is {TORCH_DEFAULT_ONNX_OPSET}"
},
)
use_zipfile_serialization_if_available: convert_to_bool = field(
default=True,
metadata={
"help": "for torch >= 1.6.0 only exports the Module's state dict "
"using the new zipfile serialization. Default is True, "
"has no affect on lower torch versions"
},
)
pretrained: str = field(
default=True,
metadata={
"help": "The type of pretrained weights to use, "
"default is true to load the default pretrained weights for "
"the model. Otherwise should be set to the desired weights "
"type: [base, optim, optim-perf]. To not load any weights "
"set to one of [none, false]"
},
)
pretrained_dataset: str = field(
default=None,
metadata={
"help": "The dataset to load pretrained weights for if pretrained "
"is set. Default is None which will load the default "
"dataset for the architecture. Ex can be set to imagenet, "
"cifar10, etc",
},
)
model_kwargs: json.loads = field(
default_factory=lambda: {},
metadata={
"help": "Keyword arguments to be passed to model constructor, should "
"be given as a json object"
},
)
dataset_kwargs: json.loads = field(
default_factory=lambda: {},
metadata={
"help": "Keyword arguments to be passed to dataset constructor, "
"should be given as a json object",
},
)
model_tag: str = field(
default=None,
metadata={
"help": "A tag to use for the model for saving results under save-dir, "
"defaults to the model arch and dataset used",
},
)
save_dir: str = field(
default="pytorch_vision",
metadata={
"help": "The path to the directory for saving results",
},
)
def __post_init__(self):
if "preprocessing_type" not in self.dataset_kwargs and (
"coco" in self.dataset.lower() or "voc" in self.dataset.lower()
):
if "ssd" in self.arch_key.lower():
self.dataset_kwargs["preprocessing_type"] = "ssd"
elif "yolo" in self.arch_key.lower():
self.dataset_kwargs["preprocessing_type"] = "yolo"
self.local_rank: int = -1
self.is_main_process: bool = True
The provided code snippet includes necessary dependencies for implementing the `export` function. Write a Python function `def export( args: ExportArgs, model: Module, val_loader: DataLoader, save_dir: str ) -> None` to solve the following problem:
Utility method to export the model and data :param args : An ExportArgs object containing config for export task. :param model: loaded model architecture to export :param val_loader: A DataLoader for validation data :param save_dir: Directory to store checkpoints at during exporting process
Here is the function:
def export(
args: ExportArgs, model: Module, val_loader: DataLoader, save_dir: str
) -> None:
"""
Utility method to export the model and data
:param args : An ExportArgs object containing config for export task.
:param model: loaded model architecture to export
:param val_loader: A DataLoader for validation data
:param save_dir: Directory to store checkpoints at during exporting process
"""
exporter = ModuleExporter(model, save_dir)
# export PyTorch state dict
LOGGER.info(f"exporting pytorch in {save_dir}")
exporter.export_pytorch(
use_zipfile_serialization_if_available=(
args.use_zipfile_serialization_if_available
)
)
onnx_exported = False
for batch, data in tqdm(
enumerate(val_loader),
desc="Exporting samples",
total=args.num_samples if args.num_samples > 1 else 1,
):
if not onnx_exported:
# export onnx file using first sample for graph freezing
LOGGER.info(f"exporting onnx in {save_dir}")
exporter.export_onnx(data[0], opset=args.onnx_opset, convert_qat=True)
onnx_exported = True
if args.num_samples > 0:
exporter.export_samples(
sample_batches=[data[0]], sample_labels=[data[1]], exp_counter=batch
) | Utility method to export the model and data :param args : An ExportArgs object containing config for export task. :param model: loaded model architecture to export :param val_loader: A DataLoader for validation data :param save_dir: Directory to store checkpoints at during exporting process |
21,665 | import json
from dataclasses import dataclass, field
from typing import Any, Optional, Tuple
from torch.nn import Module
from torch.utils.data import DataLoader
from tqdm import tqdm
import utils
from argparser_.nm_argparser_ import NmArgumentParser
from sparseml import get_main_logger
from sparseml.pytorch.models import ModelRegistry
from sparseml.pytorch.opset import TORCH_DEFAULT_ONNX_OPSET
from sparseml.pytorch.utils import ModuleExporter
from sparseml.utils import convert_to_bool
CURRENT_TASK = utils.Tasks.EXPORT
class ExportArgs:
"""
Represents the arguments we use in our PyTorch integration scripts for
exporting tasks
Using :class:`NmArgumentParser` we can turn this class into `argparse
<https://docs.python.org/3/library/argparse.html#module-argparse>`__
arguments that can be specified on the command line.
:param arch_key: A str key representing the type of model to use,
ex:resnet50.
:param dataset: The dataset to use for analysis, ex imagenet, imagenette,
etc; Set to `imagefolder` for a custom dataset.
:param dataset_path: Root path to dataset location.
:param checkpoint_path: A path to a previous checkpoint to load the state
from and resume the state for; Also works with SparseZoo recipes;
Set to zoo to automatically download and load weights associated with a
recipe.
:param num_samples: The number of samples to export along with the model
onnx and pth files (sample inputs and labels as well as the outputs
from model execution). Default is 100.
:param onnx_opset: The onnx opset to use for export. Default is 11.
:param use_zipfile_serialization_if_available: for torch >= 1.6.0 only
exports the Module's state dict using the new zipfile serialization.
Default is True, has no affect on lower torch versions.
:param pretrained: The type of pretrained weights to use,
default is true to load the default pretrained weights for the model.
Otherwise should be set to the desired weights type: [base, optim,
optim-perf]. To not load any weights set to one of [none, false].
:param pretrained_dataset: The dataset to load pretrained weights for if
pretrained is set. Default is None which will load the default
dataset for the architecture. Ex can be set to imagenet, cifar10, etc.
:param model_kwargs: Keyword arguments to be passed to model constructor,
should be given as a json object.
:param dataset_kwargs: Keyword arguments to be passed to dataset
constructor, should be given as a json object.
:param model_tag: A tag to use for the model for saving results under
save-dir, defaults to the model arch and dataset used.
:param save_dir: The path to the directory for saving results.
"""
arch_key: str = field(
metadata={
"help": "The type of model to use, ex: resnet50, vgg16, mobilenet "
"put as help to see the full list (will raise an exception "
"with the list)",
}
)
dataset: str = field(
metadata={
"help": "The dataset to use for exporting, "
"ex: imagenet, imagenette, cifar10, etc. "
"Set to imagefolder for a generic dataset setup "
"with an image folder structure setup like imagenet or "
"loadable by a dataset in sparseml.pytorch.datasets"
}
)
dataset_path: str = field(
metadata={
"help": "The root path to where the dataset is stored",
}
)
checkpoint_path: str = field(
default=None,
metadata={
"help": "A path to a previous checkpoint to load the state from "
"and resume the state for. If provided, pretrained will "
"be ignored . If using a SparseZoo recipe, can also "
"provide 'zoo' to load the base weights associated with "
"that recipe"
},
)
num_samples: int = field(
default=100,
metadata={
"help": "The number of samples to export along with the model onnx "
"and pth files (sample inputs and labels as well as the "
"outputs from model execution)"
},
)
onnx_opset: int = field(
default=TORCH_DEFAULT_ONNX_OPSET,
metadata={
"help": "The onnx opset to use for export. "
f"Default is {TORCH_DEFAULT_ONNX_OPSET}"
},
)
use_zipfile_serialization_if_available: convert_to_bool = field(
default=True,
metadata={
"help": "for torch >= 1.6.0 only exports the Module's state dict "
"using the new zipfile serialization. Default is True, "
"has no affect on lower torch versions"
},
)
pretrained: str = field(
default=True,
metadata={
"help": "The type of pretrained weights to use, "
"default is true to load the default pretrained weights for "
"the model. Otherwise should be set to the desired weights "
"type: [base, optim, optim-perf]. To not load any weights "
"set to one of [none, false]"
},
)
pretrained_dataset: str = field(
default=None,
metadata={
"help": "The dataset to load pretrained weights for if pretrained "
"is set. Default is None which will load the default "
"dataset for the architecture. Ex can be set to imagenet, "
"cifar10, etc",
},
)
model_kwargs: json.loads = field(
default_factory=lambda: {},
metadata={
"help": "Keyword arguments to be passed to model constructor, should "
"be given as a json object"
},
)
dataset_kwargs: json.loads = field(
default_factory=lambda: {},
metadata={
"help": "Keyword arguments to be passed to dataset constructor, "
"should be given as a json object",
},
)
model_tag: str = field(
default=None,
metadata={
"help": "A tag to use for the model for saving results under save-dir, "
"defaults to the model arch and dataset used",
},
)
save_dir: str = field(
default="pytorch_vision",
metadata={
"help": "The path to the directory for saving results",
},
)
def __post_init__(self):
if "preprocessing_type" not in self.dataset_kwargs and (
"coco" in self.dataset.lower() or "voc" in self.dataset.lower()
):
if "ssd" in self.arch_key.lower():
self.dataset_kwargs["preprocessing_type"] = "ssd"
elif "yolo" in self.arch_key.lower():
self.dataset_kwargs["preprocessing_type"] = "yolo"
self.local_rank: int = -1
self.is_main_process: bool = True
The provided code snippet includes necessary dependencies for implementing the `export_setup` function. Write a Python function `def export_setup(args_: ExportArgs) -> Tuple[Module, Optional[str], Any]` to solve the following problem:
Pre-export setup :param args_ : An ExportArgs object containing config for export task.
Here is the function:
def export_setup(args_: ExportArgs) -> Tuple[Module, Optional[str], Any]:
"""
Pre-export setup
:param args_ : An ExportArgs object containing config for export task.
"""
save_dir, loggers = utils.get_save_dir_and_loggers(args_, task=CURRENT_TASK)
input_shape = ModelRegistry.input_shape(args_.arch_key)
image_size = input_shape[1] # assume shape [C, S, S] where S is the image size
(
train_dataset,
train_loader,
val_dataset,
val_loader,
) = utils.get_train_and_validation_loaders(args_, image_size, task=CURRENT_TASK)
# model creation
num_classes = utils.infer_num_classes(args_, train_dataset, val_dataset)
model = utils.create_model(args_, num_classes)
return model, save_dir, val_loader | Pre-export setup :param args_ : An ExportArgs object containing config for export task. |
21,666 | import argparse
import json
import os
from dataclasses import dataclass, field
from typing import Any, List, Optional, Tuple
import torch
from torch.nn import Module
from torch.utils.data import DataLoader
import utils
from argparser_.nm_argparser_ import NmArgumentParser
from sparseml import get_main_logger
from sparseml.pytorch.models import ModelRegistry
from sparseml.pytorch.utils import (
DEFAULT_LOSS_KEY,
ModuleDeviceContext,
ModuleTester,
ModuleTrainer,
default_device,
get_prunable_layers,
model_to_device,
set_deterministic_seeds,
tensor_sparsity,
)
LOGGER = get_main_logger()
class TrainingArguments:
"""
Represents the arguments we use in our PyTorch integration scripts for
training tasks
Using :class:`NmArgumentParser` we can turn this class into `argparse
<https://docs.python.org/3/library/argparse.html#module-argparse>`__
arguments that can be specified on the command line.
:param train_batch_size: An int representing the training batch size.
:param test_batch_size: An int representing the test batch size.
:param arch_key: A str key representing the type of model to use,
ex:resnet50.
:param dataset: The dataset to use for training, ex imagenet, imagenette,
etc; Set to `imagefolder` for a custom dataset.
:param dataset_path: Root path to dataset location.
:param local_rank: DDP argument set by PyTorch in DDP mode, default -1
:param checkpoint_path: A path to a previous checkpoint to load the state
from and resume the state for; Also works with SparseZoo recipes;
Set to zoo to automatically download and load weights associated with a
recipe.
:param init_lr: float representing the initial learning for training,
default=1e-9 .
:param optim_args: Additional arguments to be passed in to the optimizer as
a json object
:param recipe_path: The path to the yaml file containing the modifiers and
schedule to apply them with; Can also provide a SparseZoo stub prefixed
with 'zoo:'.
:param sparse_transfer_learn: Boolean to enable sparse transfer learning
modifiers to enforce
the sparsity for already sparse layers. The modifiers are added to
the ones to be loaded from the recipe-path.
:param eval_mode: bool to start evaluation mode so that the model can be
evaluated on the desired dataset.
:param optim: str respresnting the optimizer type to use, one of
[SGD, Adam, RMSprop].
:param logs_dir: The path to the directory for saving logs.
:param save_best_after: int epoch number to start saving the best
validation result after until the end of training.
:param save_epochs: int epochs to save checkpoints at.
:param use_mixed_precision: bool to train model using mixed precision.
Supported environments are single GPU and multiple GPUs using
DistributedDataParallel with one GPU per process.
:param debug_steps: int represnting amount of steps to run for training and
testing for debug mode default=-1.
:param pretrained: The type of pretrained weights to use default is true
to load the default pretrained weights for the model Otherwise should
be set to the desired weights type: [base, optim, optim-perf];
To not load any weights set to one of [none, false].
:param pretrained_dataset: str representing the dataset to load pretrained
weights for if pretrained is set; Default is None which will load the
default dataset for the architecture; Ex can be set to imagenet,
cifar10, etc".
:param model_kwargs: json object containing keyword arguments to be
passed to model constructor.
:param dataset_kwargs: json object to load keyword arguments to be passed
to dataset constructor.
:param model_tag: A str tag to use for the model for saving results
under save-dir, defaults to the model arch and dataset used.
:param save_dir: The path to the directory for saving results,
default="pytorch_vision".
:param device: str represnting the device to run on (can also include ids
for data parallel), ex:{cpu, cuda, cuda:0,1}.
:param loader_num_workers: int number of workers to use for data loading,
default=4.
:param loader_pin_memory: bool to use pinned memory for data loading,
default=True.
"""
train_batch_size: int = field(
metadata={"help": "The batch size to use while training"}
)
test_batch_size: int = field(
metadata={"help": "The batch size to use while testing"}
)
arch_key: str = field(
metadata={
"help": "The type of model to use, ex: resnet50, vgg16, mobilenet "
"put as help to see the full list (will raise an exception"
"with the list)",
}
)
dataset: str = field(
metadata={
"help": "The dataset to use for training, "
"ex: imagenet, imagenette, cifar10, etc. "
"Set to imagefolder for a generic dataset setup "
"with an image folder structure setup like imagenet or"
" loadable by a dataset in sparseml.pytorch.datasets"
}
)
dataset_path: str = field(
metadata={
"help": "The root path to where the dataset is stored",
}
)
local_rank: int = field(
default=-1,
metadata={
"keep_underscores": True,
"help": argparse.SUPPRESS,
},
)
checkpoint_path: str = field(
default=None,
metadata={
"help": "A path to a previous checkpoint to load the state from "
"and resume the state for. If provided, pretrained will "
"be ignored . If using a SparseZoo recipe, can also "
"provide 'zoo' to load the base weights associated with "
"that recipe"
},
)
init_lr: float = field(
default=1e-9,
metadata={
"help": "The initial learning rate to use while training, "
"the actual initial value used should be set by the"
" sparseml recipe"
},
)
optim_args: json.loads = field(
default_factory=lambda: {
"momentum": 0.9,
"nesterov": True,
"weight_decay": 0.0001,
},
metadata={
"help": "Additional args to be passed to the optimizer passed in"
" as a json object",
},
)
recipe_path: str = field(
default=None,
metadata={
"help": "The path to the yaml file containing the modifiers and "
"schedule to apply them with. Can also provide a "
"SparseZoo stub prefixed with 'zoo:' with an optional "
"'?recipe_type=' argument"
},
)
sparse_transfer_learn: Optional[bool] = field(
default=False,
metadata={
"help": "Enable sparse transfer learning modifiers to enforce the "
"sparsity for already sparse layers. The modifiers are "
"added to the ones to be loaded from the recipe-path"
},
)
eval_mode: Optional[bool] = field(
default=False,
metadata={
"help": "Puts into evaluation mode so that the model can be "
"evaluated on the desired dataset"
},
)
optim: str = field(
default="SGD",
metadata={"help": "The optimizer type to use, one of [SGD, Adam, RMSprop]"},
)
logs_dir: str = field(
default=os.path.join("pytorch_vision_train", "tensorboard-logs"),
metadata={
"help": "The path to the directory for saving logs",
},
)
save_best_after: int = field(
default=-1,
metadata={
"help": "start saving the best validation result after the given "
"epoch completes until the end of training"
},
)
save_epochs: List[int] = field(
default_factory=lambda: [], metadata={"help": "epochs to save checkpoints at"}
)
use_mixed_precision: Optional[bool] = field(
default=False,
metadata={
"help": "Trains model using mixed precision. Supported "
"environments are single GPU and multiple GPUs using "
"DistributedDataParallel with one GPU per process"
},
)
debug_steps: int = field(
default=-1,
metadata={
"help": "Amount of steps to run for training and testing for a "
"debug mode"
},
)
pretrained: str = field(
default=True,
metadata={
"help": "The type of pretrained weights to use, "
"default is true to load the default pretrained weights for "
"the model. Otherwise should be set to the desired weights "
"type: [base, optim, optim-perf]. To not load any weights set"
"to one of [none, false]"
},
)
pretrained_dataset: str = field(
default=None,
metadata={
"help": "The dataset to load pretrained weights for if pretrained is "
"set. Default is None which will load the default dataset for "
"the architecture. Ex can be set to imagenet, cifar10, etc",
},
)
model_kwargs: json.loads = field(
default_factory=lambda: {},
metadata={
"help": "Keyword arguments to be passed to model constructor, should "
"be given as a json object"
},
)
dataset_kwargs: json.loads = field(
default_factory=lambda: {},
metadata={
"help": "Keyword arguments to be passed to dataset constructor, "
"should be given as a json object",
},
)
model_tag: str = field(
default=None,
metadata={
"help": "A tag to use for the model for saving results under save-dir, "
"defaults to the model arch and dataset used",
},
)
save_dir: str = field(
default="pytorch_vision",
metadata={
"help": "The path to the directory for saving results",
},
)
device: str = field(
default=default_device(),
metadata={
"help": "The device to run on (can also include ids for data "
"parallel), ex: cpu, cuda, cuda:0,1"
},
)
loader_num_workers: int = field(
default=4, metadata={"help": "The number of workers to use for data loading"}
)
loader_pin_memory: bool = field(
default=True, metadata={"help": "Use pinned memory for data loading"}
)
def __post_init__(self):
# add ddp args
env_world_size = int(os.environ.get("WORLD_SIZE", 1))
self.world_size = env_world_size
env_rank = int(os.environ.get("RANK", -1))
self.rank = env_rank
self.is_main_process = self.rank in [
-1,
0,
] # non DDP execution or 0th DDP process
# modify training batch size for give world size
assert self.train_batch_size % self.world_size == 0, (
f"Invalid training batch size for world size {self.world_size} "
f"given batch size {self.train_batch_size}. "
f"world size must divide training batch size evenly."
)
self.train_batch_size = self.train_batch_size // self.world_size
if "preprocessing_type" not in self.dataset_kwargs and (
"coco" in self.dataset.lower() or "voc" in self.dataset.lower()
):
if "ssd" in self.arch_key.lower():
self.dataset_kwargs["preprocessing_type"] = "ssd"
elif "yolo" in self.arch_key.lower():
self.dataset_kwargs["preprocessing_type"] = "yolo"
if self.local_rank != -1:
torch.distributed.init_process_group(backend="nccl", init_method="env://")
set_deterministic_seeds(0)
self.approximate = False
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train( train_args: TrainingArguments, model: Module, train_loader: DataLoader, val_loader: DataLoader, input_shape: Tuple[int, ...], save_dir: str, loggers: List[Any], ) -> None` to solve the following problem:
Utility function to drive the training processing :param train_args: A TrainingArguments object with arguments for current training task :param model: model architecture to train :param train_loader: A DataLoader for training data :param val_loader: A DataLoader for validation data :param input_shape: A tuple of integers representing the shape of inputs :param save_dir: Directory to store checkpoints at during training process :param loggers: List of loggers to use during training process
Here is the function:
def train(
train_args: TrainingArguments,
model: Module,
train_loader: DataLoader,
val_loader: DataLoader,
input_shape: Tuple[int, ...],
save_dir: str,
loggers: List[Any],
) -> None:
"""
Utility function to drive the training processing
:param train_args: A TrainingArguments object with
arguments for current training task
:param model: model architecture to train
:param train_loader: A DataLoader for training data
:param val_loader: A DataLoader for validation data
:param input_shape: A tuple of integers representing the shape of inputs
:param save_dir: Directory to store checkpoints at during training process
:param loggers: List of loggers to use during training process
"""
# loss setup
val_loss = utils.get_loss_wrapper(arch_key=train_args.arch_key, training=True)
LOGGER.info(f"created loss for validation: {val_loss}")
train_loss = utils.get_loss_wrapper(arch_key=train_args.arch_key, training=True)
LOGGER.info(f"created loss for training: {train_loss}")
# training setup
if not train_args.eval_mode:
epoch, optim, manager = utils.create_scheduled_optimizer(
train_args,
model,
train_loader,
loggers,
)
else:
epoch = 0
train_loss = None
optim = None
manager = None
# device setup
if train_args.rank == -1:
device = train_args.device
ddp = False
else:
torch.cuda.set_device(train_args.local_rank)
device = train_args.local_rank
ddp = True
model, device, device_ids = model_to_device(model, device, ddp=ddp)
LOGGER.info(f"running on device {device} for ids {device_ids}")
trainer = (
ModuleTrainer(
model,
device,
train_loss,
optim,
loggers=loggers,
device_context=ModuleDeviceContext(
use_mixed_precision=train_args.use_mixed_precision,
world_size=train_args.world_size,
),
)
if not train_args.eval_mode
else None
)
if train_args.is_main_process: # only test on one DDP process if using DDP
tester = ModuleTester(model, device, val_loss, loggers=loggers, log_steps=-1)
# initial baseline eval run
tester.run_epoch(
val_loader, epoch=epoch - 1, max_epochs=-1, max_steps=train_args.debug_steps
)
if not train_args.eval_mode:
utils.save_recipe(recipe_manager=manager, save_dir=save_dir)
LOGGER.info(f"starting training from epoch {epoch}")
if epoch > 0:
LOGGER.info("adjusting ScheduledOptimizer to restore point")
optim.adjust_current_step(epoch, 0)
target_metric = (
"top1acc" if "top1acc" in tester.loss.available_losses else DEFAULT_LOSS_KEY
)
best_metric = None
val_res = None
while epoch < manager.max_epochs:
if train_args.debug_steps > 0:
# correct since all optimizer steps are not
# taken in the epochs for debug mode
optim.adjust_current_step(epoch, 0)
if train_args.rank != -1: # sync DDP dataloaders
train_loader.sampler.set_epoch(epoch)
trainer.run_epoch(
train_loader,
epoch,
max_epochs=manager.max_epochs,
max_steps=train_args.debug_steps,
show_progress=train_args.is_main_process,
)
# testing steps
if train_args.is_main_process:
# only test and save on main process
val_res = tester.run_epoch(
val_loader,
epoch,
max_epochs=manager.max_epochs,
max_steps=train_args.debug_steps,
)
val_metric = val_res.result_mean(target_metric).item()
if epoch >= train_args.save_best_after and (
best_metric is None
or (
val_metric <= best_metric
if target_metric != "top1acc"
else val_metric >= best_metric
)
):
utils.save_model_training(
model,
optim,
input_shape,
"checkpoint-best",
save_dir,
epoch,
val_res,
)
best_metric = val_metric
# save checkpoints
_save_epoch = (
train_args.is_main_process
and train_args.save_epochs
and epoch in train_args.save_epochs
)
if _save_epoch:
utils.save_model_training(
model,
optim,
input_shape,
f"checkpoint-{epoch:04d}-{val_metric:.04f}",
save_dir,
epoch,
val_res,
)
epoch += 1
# export the final model
LOGGER.info("completed...")
if train_args.is_main_process:
# only convert qat -> quantized ONNX graph for finalized model
# TODO: change this to all checkpoints when conversion times improve
utils.save_model_training(
model, optim, input_shape, "model", save_dir, epoch - 1, val_res, True
)
LOGGER.info("layer sparsities:")
for (name, layer) in get_prunable_layers(model):
LOGGER.info(
f"{name}.weight: {tensor_sparsity(layer.weight).item():.4f}"
)
# close DDP
if train_args.rank != -1:
torch.distributed.destroy_process_group() | Utility function to drive the training processing :param train_args: A TrainingArguments object with arguments for current training task :param model: model architecture to train :param train_loader: A DataLoader for training data :param val_loader: A DataLoader for validation data :param input_shape: A tuple of integers representing the shape of inputs :param save_dir: Directory to store checkpoints at during training process :param loggers: List of loggers to use during training process |
21,667 | import os
from enum import Enum, auto, unique
from typing import Any, List, Optional, Tuple, Union
import torch
from torch.nn import Module
from torch.nn import functional as torch_functional
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from sparseml.pytorch.datasets import DatasetRegistry, ssd_collate_fn, yolo_collate_fn
from sparseml.pytorch.image_classification.utils.helpers import (
download_framework_model_by_recipe_type,
)
from sparseml.pytorch.models import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer
from sparseml.pytorch.sparsification import ConstantPruningModifier
from sparseml.pytorch.utils import (
DEFAULT_LOSS_KEY,
CrossEntropyLossWrapper,
InceptionCrossEntropyLossWrapper,
LossWrapper,
ModuleExporter,
ModuleRunResults,
PythonLogger,
SSDLossWrapper,
TensorBoardLogger,
TopKAccuracy,
YoloLossWrapper,
early_stop_data_loader,
torch_distributed_zero_first,
)
from sparseml.utils import create_dirs
from sparsezoo import Model
class Tasks(Enum):
def get_save_dir_and_loggers(
args: Any, task: Optional[Tasks] = None
) -> Tuple[Union[str, None], List]:
if args.is_main_process:
save_dir = os.path.abspath(os.path.expanduser(args.save_dir))
logs_dir = (
os.path.abspath(os.path.expanduser(os.path.join(args.logs_dir)))
if task == Tasks.TRAIN
else None
)
if not args.model_tag:
dataset_name = (
f"{args.dataset}-{args.dataset_kwargs['year']}"
if "year" in args.dataset_kwargs
else args.dataset
)
model_tag = f"{args.arch_key.replace('/', '.')}_{dataset_name}"
model_id = model_tag
model_inc = 0
# set location to check for models with same name
model_main_dir = logs_dir or save_dir
while os.path.exists(os.path.join(model_main_dir, model_id)):
model_inc += 1
model_id = f"{model_tag}__{model_inc:02d}"
else:
model_id = args.model_tag
save_dir = os.path.join(save_dir, model_id)
create_dirs(save_dir)
# loggers setup
loggers = [PythonLogger()]
if task == Tasks.TRAIN:
logs_dir = os.path.join(logs_dir, model_id)
create_dirs(logs_dir)
loggers.append(TensorBoardLogger(log_path=logs_dir))
print(f"Model id is set to {model_id}")
else:
# do not log for non main processes
save_dir = None
loggers = []
return save_dir, loggers | null |
21,668 | import os
from enum import Enum, auto, unique
from typing import Any, List, Optional, Tuple, Union
import torch
from torch.nn import Module
from torch.nn import functional as torch_functional
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from sparseml.pytorch.datasets import DatasetRegistry, ssd_collate_fn, yolo_collate_fn
from sparseml.pytorch.image_classification.utils.helpers import (
download_framework_model_by_recipe_type,
)
from sparseml.pytorch.models import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer
from sparseml.pytorch.sparsification import ConstantPruningModifier
from sparseml.pytorch.utils import (
DEFAULT_LOSS_KEY,
CrossEntropyLossWrapper,
InceptionCrossEntropyLossWrapper,
LossWrapper,
ModuleExporter,
ModuleRunResults,
PythonLogger,
SSDLossWrapper,
TensorBoardLogger,
TopKAccuracy,
YoloLossWrapper,
early_stop_data_loader,
torch_distributed_zero_first,
)
from sparseml.utils import create_dirs
from sparsezoo import Model
class Tasks(Enum):
"""
A class representing supported image classification/detection tasks
"""
TRAIN = auto()
EXPORT = auto()
ANALYSIS = auto()
LR_ANALYSIS = auto()
PR_SENSITIVITY = auto()
def _create_train_dataset_and_loader(
args: Any,
image_size: Tuple[int, ...],
task: Optional[Tasks] = None,
) -> Tuple[Any, Any]:
need_train_data = not (
task == Tasks.EXPORT
or task == Tasks.PR_SENSITIVITY
and args.approximate
or task == Tasks.TRAIN
and args.eval_mode
)
if need_train_data:
with torch_distributed_zero_first(
args.local_rank,
): # only download once locally
train_dataset = DatasetRegistry.create(
args.dataset,
root=args.dataset_path,
train=True,
rand_trans=True,
image_size=image_size,
**args.dataset_kwargs,
)
sampler = (
torch.utils.data.distributed.DistributedSampler(train_dataset)
if args.rank != -1
else None
)
shuffle = True if sampler is None else False
batch_size = args.train_batch_size if task == Tasks.TRAIN else args.batch_size
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=args.loader_num_workers,
pin_memory=args.loader_pin_memory,
sampler=sampler,
collate_fn=_get_collate_fn(arch_key=args.arch_key, task=task),
)
print(f"created train_dataset: {train_dataset}")
return train_dataset, train_loader
return None, None
def _create_val_dataset_and_loader(
args, image_size: Tuple[int, ...], task: Optional[Tasks] = None
) -> Tuple[Any, Any]:
need_val_data = not (
task == Tasks.PR_SENSITIVITY
or task == Tasks.LR_ANALYSIS
or not (args.is_main_process and args.dataset != "imagefolder")
)
if need_val_data:
val_dataset = DatasetRegistry.create(
args.dataset,
root=args.dataset_path,
train=False,
rand_trans=False,
image_size=image_size,
**args.dataset_kwargs,
)
if args.is_main_process:
is_training = task == Tasks.TRAIN
val_loader = DataLoader(
val_dataset,
batch_size=args.test_batch_size if is_training else 1,
shuffle=False,
num_workers=args.loader_num_workers if is_training else 1,
pin_memory=args.loader_pin_memory if is_training else False,
collate_fn=_get_collate_fn(arch_key=args.arch_key, task=task),
)
if task == Tasks.EXPORT:
val_loader = early_stop_data_loader(
val_loader, args.num_samples if args.num_samples > 1 else 1
)
print(f"created val_dataset: {val_dataset}")
else:
val_loader = None # only val dataset needed to get the number of classes
return val_dataset, val_loader
return None, None # val dataset not needed
The provided code snippet includes necessary dependencies for implementing the `get_train_and_validation_loaders` function. Write a Python function `def get_train_and_validation_loaders( args: Any, image_size: Tuple[int, ...], task: Optional[Tasks] = None )` to solve the following problem:
:param args: Object containing relevant configuration for the task :param image_size: A Tuple of integers representing the shape of input image :param task: The current task being performed :return: 4 element tuple with the following format (train_dataset, train_loader, val_dataset, val_loader)
Here is the function:
def get_train_and_validation_loaders(
args: Any, image_size: Tuple[int, ...], task: Optional[Tasks] = None
):
"""
:param args: Object containing relevant configuration for the task
:param image_size: A Tuple of integers representing the shape of input image
:param task: The current task being performed
:return: 4 element tuple with the following format (train_dataset,
train_loader, val_dataset, val_loader)
"""
train_dataset, train_loader = _create_train_dataset_and_loader(
args, image_size, task=task
)
val_dataset, val_loader = _create_val_dataset_and_loader(
args, image_size, task=task
)
return train_dataset, train_loader, val_dataset, val_loader | :param args: Object containing relevant configuration for the task :param image_size: A Tuple of integers representing the shape of input image :param task: The current task being performed :return: 4 element tuple with the following format (train_dataset, train_loader, val_dataset, val_loader) |
21,669 | import os
from enum import Enum, auto, unique
from typing import Any, List, Optional, Tuple, Union
import torch
from torch.nn import Module
from torch.nn import functional as torch_functional
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from sparseml.pytorch.datasets import DatasetRegistry, ssd_collate_fn, yolo_collate_fn
from sparseml.pytorch.image_classification.utils.helpers import (
download_framework_model_by_recipe_type,
)
from sparseml.pytorch.models import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer
from sparseml.pytorch.sparsification import ConstantPruningModifier
from sparseml.pytorch.utils import (
DEFAULT_LOSS_KEY,
CrossEntropyLossWrapper,
InceptionCrossEntropyLossWrapper,
LossWrapper,
ModuleExporter,
ModuleRunResults,
PythonLogger,
SSDLossWrapper,
TensorBoardLogger,
TopKAccuracy,
YoloLossWrapper,
early_stop_data_loader,
torch_distributed_zero_first,
)
from sparseml.utils import create_dirs
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `create_model` function. Write a Python function `def create_model(args: Any, num_classes: int) -> Module` to solve the following problem:
:param args: object with configuration for model classes :param num_classes: Integer representing the number of output classes :returns: A Module object representing the created model
Here is the function:
def create_model(args: Any, num_classes: int) -> Module:
"""
:param args: object with configuration for model classes
:param num_classes: Integer representing the number of output classes
:returns: A Module object representing the created model
"""
with torch_distributed_zero_first(args.local_rank): # only download once locally
if args.checkpoint_path == "zoo":
if args.recipe_path and args.recipe_path.startswith("zoo:"):
zoo_model = Model(args.recipe_path)
args.checkpoint_path = download_framework_model_by_recipe_type(
zoo_model
)
else:
raise ValueError(
"'zoo' provided as --checkpoint-path but a SparseZoo stub"
" prefixed by 'zoo:' not provided as --recipe-path"
)
model = ModelRegistry.create(
args.arch_key,
args.pretrained,
args.checkpoint_path,
args.pretrained_dataset,
num_classes=num_classes,
**args.model_kwargs,
)
print(f"created model: {model}")
return model | :param args: object with configuration for model classes :param num_classes: Integer representing the number of output classes :returns: A Module object representing the created model |
21,670 | import os
from enum import Enum, auto, unique
from typing import Any, List, Optional, Tuple, Union
import torch
from torch.nn import Module
from torch.nn import functional as torch_functional
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from sparseml.pytorch.datasets import DatasetRegistry, ssd_collate_fn, yolo_collate_fn
from sparseml.pytorch.image_classification.utils.helpers import (
download_framework_model_by_recipe_type,
)
from sparseml.pytorch.models import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer
from sparseml.pytorch.sparsification import ConstantPruningModifier
from sparseml.pytorch.utils import (
DEFAULT_LOSS_KEY,
CrossEntropyLossWrapper,
InceptionCrossEntropyLossWrapper,
LossWrapper,
ModuleExporter,
ModuleRunResults,
PythonLogger,
SSDLossWrapper,
TensorBoardLogger,
TopKAccuracy,
YoloLossWrapper,
early_stop_data_loader,
torch_distributed_zero_first,
)
from sparseml.utils import create_dirs
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `infer_num_classes` function. Write a Python function `def infer_num_classes(args: Any, train_dataset, val_dataset)` to solve the following problem:
:param args: Object with configuration settings :param train_dataset: dataset representing training data :param val_dataset: dataset representing validation data :return: An integer representing the number of classes
Here is the function:
def infer_num_classes(args: Any, train_dataset, val_dataset):
"""
:param args: Object with configuration settings
:param train_dataset: dataset representing training data
:param val_dataset: dataset representing validation data
:return: An integer representing the number of classes
"""
if "num_classes" in args.model_kwargs:
# handle manually overriden num classes
num_classes = args.model_kwargs["num_classes"]
del args.model_kwargs["num_classes"]
elif args.dataset == "imagefolder":
dataset = val_dataset or train_dataset # get non None dataset
num_classes = dataset.num_classes
else:
dataset_attributes = DatasetRegistry.attributes(args.dataset)
num_classes = dataset_attributes["num_classes"]
return num_classes | :param args: Object with configuration settings :param train_dataset: dataset representing training data :param val_dataset: dataset representing validation data :return: An integer representing the number of classes |
21,671 | import os
from enum import Enum, auto, unique
from typing import Any, List, Optional, Tuple, Union
import torch
from torch.nn import Module
from torch.nn import functional as torch_functional
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from sparseml.pytorch.datasets import DatasetRegistry, ssd_collate_fn, yolo_collate_fn
from sparseml.pytorch.image_classification.utils.helpers import (
download_framework_model_by_recipe_type,
)
from sparseml.pytorch.models import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer
from sparseml.pytorch.sparsification import ConstantPruningModifier
from sparseml.pytorch.utils import (
DEFAULT_LOSS_KEY,
CrossEntropyLossWrapper,
InceptionCrossEntropyLossWrapper,
LossWrapper,
ModuleExporter,
ModuleRunResults,
PythonLogger,
SSDLossWrapper,
TensorBoardLogger,
TopKAccuracy,
YoloLossWrapper,
early_stop_data_loader,
torch_distributed_zero_first,
)
from sparseml.utils import create_dirs
from sparsezoo import Model
class Tasks(Enum):
"""
A class representing supported image classification/detection tasks
"""
TRAIN = auto()
EXPORT = auto()
ANALYSIS = auto()
LR_ANALYSIS = auto()
PR_SENSITIVITY = auto()
The provided code snippet includes necessary dependencies for implementing the `get_loss_wrapper` function. Write a Python function `def get_loss_wrapper( arch_key: str, training: bool = False, task: Optional[Tasks] = None )` to solve the following problem:
:param arch_key: The model architecture :param training: True if training task started else False :param task: current task being executed
Here is the function:
def get_loss_wrapper(
arch_key: str, training: bool = False, task: Optional[Tasks] = None
):
"""
:param arch_key: The model architecture
:param training: True if training task started else False
:param task: current task being executed
"""
if "ssd" in arch_key.lower():
return SSDLossWrapper()
if "yolo" in arch_key.lower():
return YoloLossWrapper()
extras = {"top1acc": TopKAccuracy(1), "top5acc": TopKAccuracy(5)}
if task == Tasks.TRAIN:
return (
CrossEntropyLossWrapper(extras)
if training and "inception" not in arch_key
else InceptionCrossEntropyLossWrapper(extras)
)
return LossWrapper(loss_fn=torch_functional.cross_entropy, extras=extras) | :param arch_key: The model architecture :param training: True if training task started else False :param task: current task being executed |
21,672 | import os
from enum import Enum, auto, unique
from typing import Any, List, Optional, Tuple, Union
import torch
from torch.nn import Module
from torch.nn import functional as torch_functional
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from sparseml.pytorch.datasets import DatasetRegistry, ssd_collate_fn, yolo_collate_fn
from sparseml.pytorch.image_classification.utils.helpers import (
download_framework_model_by_recipe_type,
)
from sparseml.pytorch.models import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer
from sparseml.pytorch.sparsification import ConstantPruningModifier
from sparseml.pytorch.utils import (
DEFAULT_LOSS_KEY,
CrossEntropyLossWrapper,
InceptionCrossEntropyLossWrapper,
LossWrapper,
ModuleExporter,
ModuleRunResults,
PythonLogger,
SSDLossWrapper,
TensorBoardLogger,
TopKAccuracy,
YoloLossWrapper,
early_stop_data_loader,
torch_distributed_zero_first,
)
from sparseml.utils import create_dirs
from sparsezoo import Model
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
The provided code snippet includes necessary dependencies for implementing the `create_scheduled_optimizer` function. Write a Python function `def create_scheduled_optimizer( train_args: Any, model: Module, train_loader: DataLoader, loggers: List[Any], ) -> Tuple[int, ScheduledOptimizer, ScheduledModifierManager]` to solve the following problem:
:param train_args : An object with task specific config :param model: model architecture to train :param train_loader: A DataLoader for training data :param loggers: List of loggers to use during training process :type train_args: TrainingArguments
Here is the function:
def create_scheduled_optimizer(
train_args: Any,
model: Module,
train_loader: DataLoader,
loggers: List[Any],
) -> Tuple[int, ScheduledOptimizer, ScheduledModifierManager]:
"""
:param train_args : An object with task specific config
:param model: model architecture to train
:param train_loader: A DataLoader for training data
:param loggers: List of loggers to use during training process
:type train_args: TrainingArguments
"""
# # optimizer setup
optim_const = torch.optim.__dict__[train_args.optim]
optim = optim_const(
model.parameters(), lr=train_args.init_lr, **train_args.optim_args
)
print(f"created optimizer: {optim}")
print(
"note, the lr for the optimizer may not reflect the manager yet until "
"the recipe config is created and run"
)
# restore from previous check point
if train_args.checkpoint_path:
# currently optimizer restoring is unsupported
# mapping of the restored params to the correct device is not working
# load_optimizer(args.checkpoint_path, optim)
epoch = 0 # load_epoch(args.checkpoint_path) + 1
print(
f"restored checkpoint from {train_args.checkpoint_path} for "
f"epoch {epoch - 1}"
)
else:
epoch = 0
# modifier setup
add_mods = (
ConstantPruningModifier.from_sparse_model(model)
if train_args.sparse_transfer_learn
else None
)
manager = ScheduledModifierManager.from_yaml(
file_path=train_args.recipe_path, add_modifiers=add_mods
)
optim = ScheduledOptimizer(
optim,
model,
manager,
steps_per_epoch=len(train_loader),
loggers=loggers,
)
print(f"created manager: {manager}")
return epoch, optim, manager | :param train_args : An object with task specific config :param model: model architecture to train :param train_loader: A DataLoader for training data :param loggers: List of loggers to use during training process :type train_args: TrainingArguments |
21,673 | import os
from enum import Enum, auto, unique
from typing import Any, List, Optional, Tuple, Union
import torch
from torch.nn import Module
from torch.nn import functional as torch_functional
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from sparseml.pytorch.datasets import DatasetRegistry, ssd_collate_fn, yolo_collate_fn
from sparseml.pytorch.image_classification.utils.helpers import (
download_framework_model_by_recipe_type,
)
from sparseml.pytorch.models import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer
from sparseml.pytorch.sparsification import ConstantPruningModifier
from sparseml.pytorch.utils import (
DEFAULT_LOSS_KEY,
CrossEntropyLossWrapper,
InceptionCrossEntropyLossWrapper,
LossWrapper,
ModuleExporter,
ModuleRunResults,
PythonLogger,
SSDLossWrapper,
TensorBoardLogger,
TopKAccuracy,
YoloLossWrapper,
early_stop_data_loader,
torch_distributed_zero_first,
)
from sparseml.utils import create_dirs
from sparsezoo import Model
The provided code snippet includes necessary dependencies for implementing the `save_recipe` function. Write a Python function `def save_recipe( recipe_manager: ScheduledModifierManager, save_dir: str, )` to solve the following problem:
:param recipe_manager: The ScheduleModified manager to save recipes :param save_dir: The directory to save the recipe
Here is the function:
def save_recipe(
recipe_manager: ScheduledModifierManager,
save_dir: str,
):
"""
:param recipe_manager: The ScheduleModified manager to save recipes
:param save_dir: The directory to save the recipe
"""
recipe_save_path = os.path.join(save_dir, "recipe.yaml")
recipe_manager.save(recipe_save_path)
print(f"Saved recipe to {recipe_save_path}") | :param recipe_manager: The ScheduleModified manager to save recipes :param save_dir: The directory to save the recipe |
21,674 | import os
from enum import Enum, auto, unique
from typing import Any, List, Optional, Tuple, Union
import torch
from torch.nn import Module
from torch.nn import functional as torch_functional
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from sparseml.pytorch.datasets import DatasetRegistry, ssd_collate_fn, yolo_collate_fn
from sparseml.pytorch.image_classification.utils.helpers import (
download_framework_model_by_recipe_type,
)
from sparseml.pytorch.models import ModelRegistry
from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer
from sparseml.pytorch.sparsification import ConstantPruningModifier
from sparseml.pytorch.utils import (
DEFAULT_LOSS_KEY,
CrossEntropyLossWrapper,
InceptionCrossEntropyLossWrapper,
LossWrapper,
ModuleExporter,
ModuleRunResults,
PythonLogger,
SSDLossWrapper,
TensorBoardLogger,
TopKAccuracy,
YoloLossWrapper,
early_stop_data_loader,
torch_distributed_zero_first,
)
from sparseml.utils import create_dirs
from sparsezoo import Model
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
The provided code snippet includes necessary dependencies for implementing the `save_model_training` function. Write a Python function `def save_model_training( model: Module, optim: Optimizer, input_shape: Tuple[int, ...], save_name: str, save_dir: str, epoch: int, val_res: Union[ModuleRunResults, None], convert_qat: bool = False, )` to solve the following problem:
:param model: model architecture :param optim: The optimizer used :param input_shape: A tuple of integers representing the input shape :param save_name: name to save model to :param save_dir: directory to save results in :param epoch: integer representing umber of epochs to :param val_res: results from validation run :param convert_qat: True if model is to be quantized before saving
Here is the function:
def save_model_training(
model: Module,
optim: Optimizer,
input_shape: Tuple[int, ...],
save_name: str,
save_dir: str,
epoch: int,
val_res: Union[ModuleRunResults, None],
convert_qat: bool = False,
):
"""
:param model: model architecture
:param optim: The optimizer used
:param input_shape: A tuple of integers representing the input shape
:param save_name: name to save model to
:param save_dir: directory to save results in
:param epoch: integer representing umber of epochs to
:param val_res: results from validation run
:param convert_qat: True if model is to be quantized before saving
"""
has_top1 = "top1acc" in val_res.results
metric_name = "top-1 accuracy" if has_top1 else "val_loss"
metric = val_res.result_mean("top1acc" if has_top1 else DEFAULT_LOSS_KEY).item()
print(
f"Saving model for epoch {epoch} and {metric_name} "
f"{metric} to {save_dir} for {save_name}"
)
exporter = ModuleExporter(model, save_dir)
exporter.export_pytorch(optim, epoch, f"{save_name}.pth")
exporter.export_onnx(
torch.randn(1, *input_shape),
f"{save_name}.onnx",
convert_qat=convert_qat,
)
info_path = os.path.join(save_dir, f"{save_name}.txt")
with open(info_path, "w") as info_file:
info_lines = [
f"epoch: {epoch}",
]
if val_res is not None:
for loss in val_res.results.keys():
info_lines.append(f"{loss}: {val_res.result_mean(loss).item()}")
info_file.write("\n".join(info_lines)) | :param model: model architecture :param optim: The optimizer used :param input_shape: A tuple of integers representing the input shape :param save_name: name to save model to :param save_dir: directory to save results in :param epoch: integer representing umber of epochs to :param val_res: results from validation run :param convert_qat: True if model is to be quantized before saving |
21,675 | import json
import os
from dataclasses import dataclass, field
from typing import Any, List, Optional
from torch.utils.data import DataLoader
import utils
from argparser_.nm_argparser_ import NmArgumentParser
from sparseml import get_main_logger
from sparseml.pytorch.models import ModelRegistry
from sparseml.pytorch.optim import (
pruning_loss_sens_magnitude,
pruning_loss_sens_one_shot,
)
from sparseml.pytorch.utils import default_device, model_to_device
LOGGER = get_main_logger()
class PRAnalysisArguments:
"""
Represents the arguments we use in our PyTorch integration scripts for
kernel sparsity (pruning) analysis tasks
Using :class:`NmArgumentParser` we can turn this class into `argparse
<https://docs.python.org/3/library/argparse.html#module-argparse>`__
arguments that can be specified on the command line.
:param arch_key: A str key representing the type of model to use,
ex:resnet50.
:param dataset: The dataset to use for training, ex imagenet, imagenette,
etc; Set to `imagefolder` for a custom dataset.
:param dataset_path: Root path to dataset location.
:param pretrained: The type of pretrained weights to use default is true to
load the default pretrained weights for the model Otherwise should be
set to the desired weights type: [base, optim, optim-perf]; To not
load any weights set to one of [none, false].
:param pretrained_dataset: str representing the dataset to load
pretrained weights for if pretrained is set; Default is None which will
load the default dataset for the architecture; Ex can be set to
imagenet, cifar10, etc".
:param model_kwargs: json object containing keyword arguments to be
passed to model constructor.
:param dataset_kwargs: json object to load keyword arguments to be
passed to dataset constructor.
:param model_tag: A str tag to use for the model for saving results
under save-dir, defaults to the model arch and dataset used.
:param save_dir: The path to the directory for saving results,
default="pytorch_vision".
:param device: str represnting the device to run on
(can also include ids for data parallel), ex:{cpu, cuda, cuda:0,1}.
:param loader_num_workers: int number of workers to use for data
loading, default=4.
:param loader_pin_memory: bool to use pinned memory for data loading,
default=True.
:param checkpoint_path: A path to a previous checkpoint to load the state
from and resume the state for; Also works with SparseZoo recipes;
Set to zoo to automatically download and load weights associated with
a recipe.
:param steps_per_measurement: The number of steps (batches) to run for
each measurement
:param batch_size: The batch size to use for analysis
:param approximate: approximate without running data through the model
(uses one shot analysis if --approximate not passed)
"""
arch_key: str = field(
metadata={
"help": "The type of model to use, ex: resnet50, vgg16, mobilenet "
"put as help to see the full list (will raise an exception "
"with the list)",
}
)
dataset: str = field(
metadata={
"help": "The dataset to use for training, "
"ex: imagenet, imagenette, cifar10, etc. "
"Set to imagefolder for a generic dataset setup "
"with an image folder structure setup like imagenet or "
"loadable by a dataset in sparseml.pytorch.datasets"
}
)
dataset_path: str = field(
metadata={
"help": "The root path to where the dataset is stored",
}
)
pretrained: str = field(
default=True,
metadata={
"help": "The type of pretrained weights to use, "
"default is true to load the default pretrained weights "
"for the model. Otherwise should be set to the desired "
"weights type: [base, optim, optim-perf]. To not load any"
" weights set to one of [none, false]"
},
)
pretrained_dataset: str = field(
default=None,
metadata={
"help": "The dataset to load pretrained weights for if pretrained"
"is set. Default is None which will load the default "
"dataset for the architecture. Ex can be set to imagenet,"
"cifar10, etc",
},
)
model_kwargs: json.loads = field(
default_factory=lambda: {},
metadata={
"help": "Keyword arguments to be passed to model constructor, "
"should be given as a json object"
},
)
dataset_kwargs: json.loads = field(
default_factory=lambda: {},
metadata={
"help": "Keyword arguments to be passed to dataset constructor, "
"should be given as a json object",
},
)
model_tag: str = field(
default=None,
metadata={
"help": "A tag to use for the model for saving results under "
"save-dir, defaults to the model arch and dataset used",
},
)
save_dir: str = field(
default="pytorch_vision",
metadata={
"help": "The path to the directory for saving results",
},
)
device: str = field(
default=default_device(),
metadata={
"help": "The device to run on (can also include ids for "
"data parallel), ex: cpu, cuda, cuda:0,1"
},
)
loader_num_workers: int = field(
default=4, metadata={"help": "The number of workers to use for data loading"}
)
loader_pin_memory: bool = field(
default=True, metadata={"help": "Use pinned memory for data loading"}
)
checkpoint_path: str = field(
default=None,
metadata={
"help": "A path to a previous checkpoint to load the state from "
"and resume the state for. If provided, pretrained will "
"be ignored. If using a SparseZoo recipe, can also "
"provide 'zoo' to load the base weights associated with "
"that recipe"
},
)
steps_per_measurement: int = field(
default=15,
metadata={"help": "The number of steps (batches) to run for each measurement"},
)
batch_size: int = field(
default=64, metadata={"help": "The batch size to use for analysis"}
)
approximate: Optional[bool] = field(
default=False,
metadata={
"help": "approximate without running data through the model"
"(uses one shot analysis if --approximate not passed)",
},
)
def __post_init__(self):
if "preprocessing_type" not in self.dataset_kwargs and (
"coco" in self.dataset.lower() or "voc" in self.dataset.lower()
):
if "ssd" in self.arch_key.lower():
self.dataset_kwargs["preprocessing_type"] = "ssd"
elif "yolo" in self.arch_key.lower():
self.dataset_kwargs["preprocessing_type"] = "yolo"
self.is_main_process = True
self.local_rank = -1
self.rank = -1
The provided code snippet includes necessary dependencies for implementing the `pruning_loss_sensitivity` function. Write a Python function `def pruning_loss_sensitivity( args: PRAnalysisArguments, model, train_loader: DataLoader, save_dir: str, loggers: List[Any], ) -> None` to solve the following problem:
Utility function for pruning sensitivity analysis :param args : A PRAnalysisArguments object containing config for current analysis :param model: loaded model architecture to analyse :param train_loader: A DataLoader for training data :param save_dir: Directory to save results :param loggers: List of loggers to use during analysis
Here is the function:
def pruning_loss_sensitivity(
args: PRAnalysisArguments,
model,
train_loader: DataLoader,
save_dir: str,
loggers: List[Any],
) -> None:
"""
Utility function for pruning sensitivity analysis
:param args : A PRAnalysisArguments object containing config for current
analysis
:param model: loaded model architecture to analyse
:param train_loader: A DataLoader for training data
:param save_dir: Directory to save results
:param loggers: List of loggers to use during analysis
"""
# loss setup
if not args.approximate:
loss = utils.get_loss_wrapper(args)
LOGGER.info(f"created loss: {loss}")
else:
loss = None
# device setup
if not args.approximate:
module, device, device_ids = model_to_device(model, args.device)
else:
device = None
# kernel sparsity analysis
if args.approximate:
analysis = pruning_loss_sens_magnitude(model)
else:
analysis = pruning_loss_sens_one_shot(
model,
train_loader,
loss,
device,
args.steps_per_measurement,
tester_loggers=loggers,
)
# saving and printing results
LOGGER.info("completed...")
LOGGER.info(f"Saving results in {save_dir}")
analysis.save_json(
os.path.join(
save_dir,
"ks_approx_sensitivity.json"
if args.approximate
else "ks_one_shot_sensitivity.json",
)
)
analysis.plot(
os.path.join(
save_dir,
os.path.join(
save_dir,
"ks_approx_sensitivity.png"
if args.approximate
else "ks_one_shot_sensitivity.png",
),
),
plot_integral=True,
)
analysis.print_res() | Utility function for pruning sensitivity analysis :param args : A PRAnalysisArguments object containing config for current analysis :param model: loaded model architecture to analyse :param train_loader: A DataLoader for training data :param save_dir: Directory to save results :param loggers: List of loggers to use during analysis |
21,676 | import json
import os
from dataclasses import dataclass, field
from torch.nn import Module
from torch.optim import SGD
from torch.utils.data import DataLoader
import utils
from argparser_.nm_argparser_ import NmArgumentParser
from sparseml import get_main_logger
from sparseml.pytorch.models import ModelRegistry
from sparseml.pytorch.optim import default_exponential_check_lrs, lr_loss_sensitivity
from sparseml.pytorch.utils import PythonLogger, default_device, model_to_device
LOGGER = get_main_logger()
The provided code snippet includes necessary dependencies for implementing the `lr_sensitivity` function. Write a Python function `def lr_sensitivity( args, model: Module, train_loader: DataLoader, save_dir: str, ) -> None` to solve the following problem:
Utility function to run learning rate sensitivity analysis :param args: An LRAnalysisArguments object containing config for current LR analysis task. :param model: loaded model architecture to analyse :param train_loader: A DataLoader for training data :param save_dir: Directory to save results
Here is the function:
def lr_sensitivity(
args,
model: Module,
train_loader: DataLoader,
save_dir: str,
) -> None:
"""
Utility function to run learning rate sensitivity analysis
:param args: An LRAnalysisArguments object containing config for current
LR analysis task.
:param model: loaded model architecture to analyse
:param train_loader: A DataLoader for training data
:param save_dir: Directory to save results
"""
# optimizer setup
optim = SGD(model.parameters(), lr=args.init_lr, **args.optim_args)
LOGGER.info(f"created optimizer: {optim}")
# loss setup
loss = utils.get_loss_wrapper(args.arch_key)
LOGGER.info(f"created loss: {loss}")
# device setup
model, device, device_ids = model_to_device(model, args.device)
# learning rate analysis
LOGGER.info(f"running analysis: {loss}")
analysis = lr_loss_sensitivity(
model,
train_loader,
loss,
optim,
device,
args.steps_per_measurement,
check_lrs=default_exponential_check_lrs(args.init_lr, args.final_lr),
trainer_loggers=[PythonLogger()],
)
# saving and printing results
LOGGER.info("completed...")
LOGGER.info(f"Saving results in {save_dir}")
analysis.save_json(os.path.join(save_dir, "lr_sensitivity.json"))
analysis.plot(os.path.join(save_dir, "lr_sensitivity.png"))
analysis.print_res() | Utility function to run learning rate sensitivity analysis :param args: An LRAnalysisArguments object containing config for current LR analysis task. :param model: loaded model architecture to analyse :param train_loader: A DataLoader for training data :param save_dir: Directory to save results |
21,677 | import dataclasses
import json
import re
import sys
from argparse import ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from pathlib import Path
from typing import Any, Iterable, List, NewType, Optional, Tuple, Union
def string_to_bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no,"
f"true/false, t/f, y/n, 1/0 (case insensitive)."
) | null |
21,678 | import argparse
import os
import time
from types import ModuleType
import torch
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision import models
from sparseml.pytorch.datasets.classification import ImageFolderDataset
from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer
from sparseml.pytorch.utils import ModuleExporter, PythonLogger, load_model
from sparseml.utils import create_dirs
MODEL_IMAGE_SIZES = {
"inception_v3": 299,
}
def parse_args():
parser = argparse.ArgumentParser(
description="Train or finetune an image classification model from "
"torchvision.models"
)
# model args
parser.add_argument(
"--model",
type=str,
required=True,
help=(
"The torchvision model class to use, ex: inception_v3, resnet50, "
"mobilenet_v2 model name is fed directly to torchvision.models, "
"more information can be found here "
"https://pytorch.org/docs/stable/torchvision/models.html"
),
)
parser.add_argument(
"--recipe-path",
type=str,
required=True,
help="The path to the yaml file containing the sparseml modifiers and "
"schedule to apply them with",
)
parser.add_argument(
"--image-size",
type=int,
required=False,
default=None,
help=(
"Size of image to use for model input. Default is 224 unless pytorch "
"documentation specifies otherwise"
),
)
parser.add_argument(
"--batch-size",
type=int,
required=False,
default=32,
help="Batch size to use when training model. Default is 32",
)
parser.add_argument(
"--pretrained",
type=bool,
default=True,
help="Set True to use torchvisions pretrained weights,"
" to not set weights, set False. default is true.",
)
parser.add_argument(
"--checkpoint-path",
type=str,
default=None,
help="A path to a previous checkpoint to load the state from and "
"resume the state for. If provided, pretrained will be ignored",
)
# dataset args
parser.add_argument(
"--imagefolder-path",
type=str,
required=True,
help="Path to root of dataset's generic 'image folder' path. Should have "
"an image folder structure like imagenet with subdirectories 'train' and 'val'"
" see https://pytorch.org/docs/stable/torchvision/datasets.html#imagefolder",
)
parser.add_argument(
"--loader-num-workers",
type=int,
default=4,
help="The number of workers to use for data loading",
)
parser.add_argument(
"--loader-pin-memory",
type=bool,
default=True,
help="Use pinned memory for data loading",
)
# logging and saving
parser.add_argument(
"--model-tag",
type=str,
default=None,
help="A tag to use for the model for saving results under save-dir, "
"defaults to the model arch and dataset used",
)
parser.add_argument(
"--save-dir",
type=str,
default="torchvision_sparseml_export",
help="The path to the directory for saving results",
)
args = parser.parse_args()
if args.image_size is None:
args.image_size = (
MODEL_IMAGE_SIZES[args.model] if args.model in MODEL_IMAGE_SIZES else 224
)
return args | null |
21,679 | import argparse
import os
import time
from types import ModuleType
import torch
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision import models
from sparseml.pytorch.datasets.classification import ImageFolderDataset
from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer
from sparseml.pytorch.utils import ModuleExporter, PythonLogger, load_model
from sparseml.utils import create_dirs
def _load_matched_weights(base_model, pretrained_model):
base_dict = base_model.state_dict()
pretrained_dict = pretrained_model.state_dict()
for key in base_dict:
if (
key in pretrained_dict
and base_dict[key].shape == pretrained_dict[key].shape
):
base_dict[key] = pretrained_dict[key]
base_model.load_state_dict(base_dict)
def _get_torchvision_model(name, num_classes, pretrained=True, checkpoint_path=None):
model_constructor = getattr(models, name, None)
if model_constructor is None or isinstance(model_constructor, ModuleType):
# constructor doesn't exist or is a submodule instead of function in torchvision
raise ValueError("Torchvision model {} not found".format(name))
# build model
model = model_constructor(pretrained=False, num_classes=num_classes)
if pretrained and not checkpoint_path:
pretrained_model = model_constructor(pretrained=True, num_classes=1000)
# fix num classes mismatch
if num_classes == 1000:
model = pretrained_model
else:
_load_matched_weights(model, pretrained_model)
del pretrained_model
if checkpoint_path is not None:
load_model(checkpoint_path, model)
return model | null |
21,680 | import argparse
import os
import time
from types import ModuleType
import torch
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision import models
from sparseml.pytorch.datasets.classification import ImageFolderDataset
from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer
from sparseml.pytorch.utils import ModuleExporter, PythonLogger, load_model
from sparseml.utils import create_dirs
def _create_imagefolder_dataloader(args, train=True):
dataset = ImageFolderDataset(
root=args.imagefolder_path,
train=train,
rand_trans=train,
image_size=args.image_size,
)
loader = DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.loader_num_workers,
pin_memory=args.loader_pin_memory,
)
# return dataloader, number of classes, and input image shape
return loader, dataset.num_classes, dataset[0][0].shape | null |
21,681 | import argparse
import os
import time
from types import ModuleType
import torch
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision import models
from sparseml.pytorch.datasets.classification import ImageFolderDataset
from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer
from sparseml.pytorch.utils import ModuleExporter, PythonLogger, load_model
from sparseml.utils import create_dirs
try:
import torch
_PARSED_TORCH_VERSION = version.parse(torch.__version__)
if _PARSED_TORCH_VERSION.major >= 2:
torch_compile_func = torch.compile
def raise_torch_compile_warning(*args, **kwargs):
warnings.warn("torch.compile is not supported by sparseml for torch 2.0.x")
return torch_compile_func(*args, **kwargs)
torch.compile = raise_torch_compile_warning
_BYPASS = bool(int(os.environ.get("NM_BYPASS_TORCH_VERSION", "0")))
if _PARSED_TORCH_VERSION.major == 1 and _PARSED_TORCH_VERSION.minor in [10, 11]:
if not _BYPASS:
raise RuntimeError(
"sparseml does not support torch==1.10.* or 1.11.*. "
f"Found torch version {torch.__version__}.\n\n"
"To bypass this error, set environment variable "
"`NM_BYPASS_TORCH_VERSION` to '1'.\n\n"
"Bypassing may result in errors or "
"incorrect behavior, so set at your own risk."
)
else:
warnings.warn(
"sparseml quantized onnx export does not work "
"with torch==1.10.* or 1.11.*"
)
except ImportError:
pass
def train_model(
model, dataloaders, criterion, optimizer, device, num_epochs=25, is_inception=False
):
since = time.time()
val_acc_history = []
# not loading best intermediate weights due to sparsity changing
# best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print("Epoch {}/{}".format(epoch, num_epochs - 1))
print("-" * 10)
# Each epoch has a training and validation phase
for phase in ["train", "val"]:
if phase == "train":
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == "train"):
# Get model outputs and calculate loss
# Special case for inception because in training it has an
# auxiliary output. In train mode we calculate the loss by summing
# the final output and the auxiliary output but in testing we
# only consider the final output.
if is_inception and phase == "train":
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958 # noqa
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4 * loss2
else:
outputs = model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == "train":
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print("{} Loss: {:.4f} Acc: {:.4f}".format(phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == "val" and epoch_acc > best_acc:
best_acc = epoch_acc
# not loading best intermediate weights due to sparsity changing
# best_model_wts = copy.deepcopy(model.state_dict())
if phase == "val":
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
print(
"Training complete in {:.0f}m {:.0f}s".format(
time_elapsed // 60, time_elapsed % 60
)
)
print("Best val Acc: {:4f}".format(best_acc))
# load best model weights
# not loading best intermediate weights due to sparsity changing
# model.load_state_dict(best_model_wts)
return model, val_acc_history | null |
21,682 | import argparse
import os
import time
from types import ModuleType
import torch
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision import models
from sparseml.pytorch.datasets.classification import ImageFolderDataset
from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer
from sparseml.pytorch.utils import ModuleExporter, PythonLogger, load_model
from sparseml.utils import create_dirs
def _save_recipe(
recipe_manager: ScheduledModifierManager,
save_dir: str,
):
recipe_save_path = os.path.join(save_dir, "recipe.yaml")
recipe_manager.save(recipe_save_path)
print(f"Saved recipe to {recipe_save_path}") | null |
21,683 | import argparse
import json
import math
import os
from typing import Dict, Optional, Tuple
import numpy
from sparseml import get_main_logger
from sparseml.tensorflow_v1.datasets import (
Dataset,
DatasetRegistry,
create_split_iterators_handle,
)
from sparseml.tensorflow_v1.models import ModelRegistry
from sparseml.tensorflow_v1.optim import (
ConstantPruningModifier,
ScheduledModifierManager,
pruning_loss_sens_magnitude,
pruning_loss_sens_one_shot,
pruning_loss_sens_op_vars,
)
from sparseml.tensorflow_v1.utils import (
GraphExporter,
accuracy,
batch_cross_entropy_loss,
tf_compat,
write_simple_summary,
)
from sparseml.utils import create_dirs
TRAIN_COMMAND = "train"
EXPORT_COMMAND = "export"
PRUNING_SENSITVITY_COMMAND = "pruning_sensitivity"
def parse_args():
parser = argparse.ArgumentParser(
description="Run tasks on classification models and datasets "
"using the sparseml API"
)
subparsers = parser.add_subparsers(dest="command")
train_parser = subparsers.add_parser(
TRAIN_COMMAND,
description="Train and/or prune an image classification model",
)
export_parser = subparsers.add_parser(
EXPORT_COMMAND,
description="Export a model to onnx as well as "
"store sample inputs, outputs, and labels",
)
pruning_sensitivity_parser = subparsers.add_parser(
PRUNING_SENSITVITY_COMMAND,
description="Run a kernel sparsity (pruning) analysis for a given model",
)
parsers = [
train_parser,
export_parser,
pruning_sensitivity_parser,
]
for par in parsers:
# general arguments
# model args
par.add_argument(
"--arch-key",
type=str,
required=True,
help="The type of model to use, ex: resnet50, vgg16, mobilenet "
"put as help to see the full list (will raise an exception with the list)",
)
par.add_argument(
"--pretrained",
type=str,
default=True,
help="The type of pretrained weights to use, "
"default is true to load the default pretrained weights for the model. "
"Otherwise should be set to the desired weights type: "
"[base, optim, optim-perf]. "
"To not load any weights set to one of [none, false]",
)
par.add_argument(
"--pretrained-dataset",
type=str,
default=None,
help="The dataset to load pretrained weights for if pretrained is set. "
"Default is None which will load the default dataset for the architecture."
" Ex can be set to imagenet, cifar10, etc",
)
par.add_argument(
"--checkpoint-path",
type=str,
default=None,
help="A path to a previous checkpoint to load the state from and "
"resume the state for. If provided, pretrained will be ignored",
)
par.add_argument(
"--model-kwargs",
type=json.loads,
default={},
help="kew word arguments to be passed to model constructor, should be "
" given as a json object",
)
# dataset args
par.add_argument(
"--dataset",
type=str,
required=True,
help="The dataset to use for training, "
"ex: imagenet, imagenette, cifar10, etc. "
"Set to imagefolder for a generic dataset setup "
"with an image folder structure setup like imagenet or loadable by a "
"dataset in sparseml.tensorflow_v1.datasets",
)
par.add_argument(
"--dataset-path",
type=str,
required=True,
help="The root path to where the dataset is stored",
)
par.add_argument(
"--dataset-kwargs",
type=json.loads,
default={},
help="kew word arguments to be passed to dataset constructor, should be "
" given as a json object",
)
# logging and saving
par.add_argument(
"--model-tag",
type=str,
default=None,
help="A tag to use for the model for saving results under save-dir, "
"defaults to the model arch and dataset used",
)
par.add_argument(
"--save-dir",
type=str,
default="tensorflow_v1_classification",
help="The path to the directory for saving results",
)
# task specific arguments
if par in [train_parser, pruning_sensitivity_parser]:
par.add_argument(
"--dataset-parallel-calls",
type=int,
default=4,
help="the number of parallel workers for dataset loading",
)
par.add_argument(
"--shuffle-buffer-size",
type=int,
default=1000,
help="Shuffle buffer size for dataset loading",
)
if par == train_parser:
par.add_argument(
"--recipe-path",
type=str,
default=None,
help="The path to the yaml file containing the modifiers and "
"schedule to apply them with. If set to 'transfer_learning', "
"then will create a schedule to enable sparse transfer learning",
)
par.add_argument(
"--sparse-transfer-learn",
action="store_true",
help=(
"Enable sparse transfer learning modifiers to enforce the sparsity "
"for already sparse layers. The modifiers are added to the "
"ones to be loaded from the recipe-path"
),
)
par.add_argument(
"--eval-mode",
action="store_true",
help="Puts into evaluation mode so that the model can be "
"evaluated on the desired dataset",
)
par.add_argument(
"--train-batch-size",
type=int,
required=True,
help="The batch size to use while training",
)
par.add_argument(
"--test-batch-size",
type=int,
required=True,
help="The batch size to use while testing",
)
par.add_argument(
"--logs-dir",
type=str,
default=os.path.join(
"tensorflow_v1_classification_train", "tensorboard-logs"
),
help="The path to the directory for saving logs",
)
par.add_argument(
"--save-best-after",
type=int,
default=-1,
help="start saving the best validation result after the given "
"epoch completes until the end of training",
)
par.add_argument(
"--save-epochs",
type=int,
default=[],
nargs="+",
help="epochs to save checkpoints at",
)
par.add_argument(
"--init-lr",
type=float,
default=1e-9,
help="The initial learning rate to use while training, "
"the actual initial value used should be set by the sparseml recipe",
)
par.add_argument(
"--optim-args",
type=json.loads,
default={},
help="Additional args to be passed to the optimizer passed in"
" as a json object",
)
if par == export_parser:
par.add_argument(
"--num-samples",
type=int,
default=100,
help="The number of samples to export along with the model onnx "
"and pth files (sample inputs and labels as well as the outputs "
"from model execution)",
)
par.add_argument(
"--onnx-opset",
type=int,
default=11,
help="The onnx opset to use for export. Default is 11",
)
if par == pruning_sensitivity_parser:
par.add_argument(
"--approximate",
action="store_true",
help="True to approximate without running data through the model, "
"otherwise will run a one shot analysis",
)
par.add_argument(
"--steps-per-measurement",
type=int,
default=15,
help="The number of steps (batches) to run for each measurement",
)
par.add_argument(
"--batch-size",
type=int,
default=64,
help="The batch size to use while performing analysis",
)
return parser.parse_args() | null |
21,684 | import argparse
import json
import math
import os
from typing import Dict, Optional, Tuple
import numpy
from sparseml import get_main_logger
from sparseml.tensorflow_v1.datasets import (
Dataset,
DatasetRegistry,
create_split_iterators_handle,
)
from sparseml.tensorflow_v1.models import ModelRegistry
from sparseml.tensorflow_v1.optim import (
ConstantPruningModifier,
ScheduledModifierManager,
pruning_loss_sens_magnitude,
pruning_loss_sens_one_shot,
pruning_loss_sens_op_vars,
)
from sparseml.tensorflow_v1.utils import (
GraphExporter,
accuracy,
batch_cross_entropy_loss,
tf_compat,
write_simple_summary,
)
from sparseml.utils import create_dirs
LOGGER = get_main_logger()
TRAIN_COMMAND = "train"
def _setup_save_dirs(args) -> Tuple[str, Optional[str]]:
# logging and saving setup
save_dir = os.path.abspath(os.path.expanduser(args.save_dir))
logs_dir = (
os.path.abspath(os.path.expanduser(os.path.join(args.logs_dir)))
if args.command == TRAIN_COMMAND
else None
)
if not args.model_tag:
model_tag = "{}_{}".format(args.arch_key.replace("/", "."), args.dataset)
model_id = model_tag
model_inc = 0
# set location to check for models with same name
model_main_dir = logs_dir or save_dir
while os.path.exists(os.path.join(model_main_dir, model_id)):
model_inc += 1
model_id = "{}__{:02d}".format(model_tag, model_inc)
else:
model_id = args.model_tag
save_dir = os.path.join(save_dir, model_id)
create_dirs(save_dir)
# logs dir setup
if args.command == TRAIN_COMMAND:
logs_dir = os.path.join(logs_dir, model_id)
create_dirs(logs_dir)
else:
logs_dir = None
LOGGER.info("Model id is set to {}".format(model_id))
return save_dir, logs_dir | null |
21,685 | import argparse
import json
import math
import os
from typing import Dict, Optional, Tuple
import numpy
from sparseml import get_main_logger
from sparseml.tensorflow_v1.datasets import (
Dataset,
DatasetRegistry,
create_split_iterators_handle,
)
from sparseml.tensorflow_v1.models import ModelRegistry
from sparseml.tensorflow_v1.optim import (
ConstantPruningModifier,
ScheduledModifierManager,
pruning_loss_sens_magnitude,
pruning_loss_sens_one_shot,
pruning_loss_sens_op_vars,
)
from sparseml.tensorflow_v1.utils import (
GraphExporter,
accuracy,
batch_cross_entropy_loss,
tf_compat,
write_simple_summary,
)
from sparseml.utils import create_dirs
LOGGER = get_main_logger()
def _create_dataset(args, train=True, image_size=None) -> Tuple[Dataset, int]:
kwargs = args.dataset_kwargs
if "image_size" in kwargs:
image_size = kwargs["image_size"]
del kwargs["image_size"]
dataset = DatasetRegistry.create(
args.dataset,
root=args.dataset_path,
train=train,
image_size=image_size,
**kwargs,
)
LOGGER.info("created {} dataset: {}".format("train" if train else "val", dataset))
# get num_classes
if args.dataset == "imagefolder":
num_classes = dataset.num_classes
else:
dataset_attributes = DatasetRegistry.attributes(args.dataset)
num_classes = dataset_attributes["num_classes"]
return dataset, num_classes
def _build_dataset(args, dataset: Dataset, batch_size: int) -> Dataset:
return dataset.build(
batch_size,
shuffle_buffer_size=args.shuffle_buffer_size,
prefetch_buffer_size=batch_size,
num_parallel_calls=args.dataset_parallel_calls,
)
def _create_model(args, num_classes, inputs, training=False):
outputs = ModelRegistry.create(
args.arch_key,
inputs,
training=training,
num_classes=num_classes,
**args.model_kwargs,
)
LOGGER.info("created model {}".format(args.arch_key))
return outputs
def _load_model(args, sess, checkpoint_path=None):
sess.run(
[
tf_compat.global_variables_initializer(),
tf_compat.local_variables_initializer(),
]
)
checkpoint_path = checkpoint_path or args.checkpoint_path
ModelRegistry.load_pretrained(
args.arch_key,
pretrained=args.pretrained,
pretrained_dataset=args.pretrained_dataset,
pretrained_path=checkpoint_path,
sess=sess,
)
if checkpoint_path:
LOGGER.info("Loaded model weights from checkpoint: {}".format(checkpoint_path))
def train(args, save_dir, logs_dir):
# setup dataset
with tf_compat.device("/cpu:0"):
train_dataset, _ = _create_dataset(args, train=True)
val_dataset, num_classes = _create_dataset(args, train=False)
# calc steps
train_steps = math.ceil(len(train_dataset) / args.train_batch_size)
val_steps = math.ceil(len(val_dataset) / args.test_batch_size)
# build datasets
train_dataset = _build_dataset(args, train_dataset, args.train_batch_size)
val_dataset = _build_dataset(args, val_dataset, args.test_batch_size)
handle, iterator, (train_iter, val_iter) = create_split_iterators_handle(
[train_dataset, val_dataset]
)
# set up model graph
images, labels = iterator.get_next()
training = tf_compat.placeholder(dtype=tf_compat.bool, shape=[])
outputs = _create_model(args, num_classes, images, training)
# set up training objects
loss = batch_cross_entropy_loss(outputs, labels)
acc = accuracy(outputs, labels)
global_step = tf_compat.train.get_or_create_global_step()
train_op = tf_compat.train.AdamOptimizer(
learning_rate=args.init_lr, **args.optim_args
).minimize(loss, global_step=global_step)
update_ops = tf_compat.get_collection(tf_compat.GraphKeys.UPDATE_OPS)
LOGGER.info("Created update ops for training")
# set up sparseml modifier ops
add_mods = (
ConstantPruningModifier(params="__ALL__")
if args.sparse_transfer_learn
else None
)
manager = ScheduledModifierManager.from_yaml(
file_path=args.recipe_path, add_modifiers=add_mods
)
mod_ops, mod_extras = manager.create_ops(train_steps, global_step)
_save_recipe(recipe_manager=manager, save_dir=save_dir)
with tf_compat.Session() as sess:
# set up tensorboard logging
summary_writer = tf_compat.summary.FileWriter(logs_dir, sess.graph)
summaries = tf_compat.summary.merge_all()
LOGGER.info("Logging to tensorboard at {}".format(logs_dir))
# initialize variables, load pretrained weights, initialize modifiers
train_iter_handle, val_iter_handle = sess.run(
[train_iter.string_handle(), val_iter.string_handle()]
)
LOGGER.info("Initialized graph variables")
_load_model(args, sess)
manager.initialize_session()
LOGGER.info("Initialized SparseML modifiers")
best_loss = None
for epoch in range(manager.max_epochs):
# train
LOGGER.info("Training for epoch {}...".format(epoch))
sess.run(train_iter.initializer)
train_acc, train_loss = [], []
for step in range(train_steps):
_, __, meas_step, meas_loss, meas_acc, meas_summ = sess.run(
[train_op, update_ops, global_step, loss, acc, summaries],
feed_dict={handle: train_iter_handle, training: True},
)
if step >= train_steps - 1:
# log the general summaries on the last training step
summary_writer.add_summary(meas_summ, meas_step)
# run modifier ops
sess.run(mod_ops)
# summarize
write_simple_summary(summary_writer, "Train/Loss", meas_loss, meas_step)
write_simple_summary(
summary_writer, "Train/Acc", meas_acc * 100.0, meas_step
)
train_acc.append(meas_acc)
train_loss.append(meas_loss)
LOGGER.info(
"Epoch {} - Train Loss: {}, Train Acc: {}".format(
epoch, numpy.mean(train_loss).item(), numpy.mean(train_acc).item()
)
)
# val
LOGGER.info("Validating for epoch {}...".format(epoch))
sess.run(val_iter.initializer)
val_acc, val_loss = [], []
for step in range(val_steps):
meas_loss, meas_acc = sess.run(
[loss, acc],
feed_dict={handle: val_iter_handle, training: False},
)
val_acc.append(meas_acc)
val_loss.append(meas_loss)
write_simple_summary(
summary_writer, "Val/Loss", numpy.mean(val_loss).item(), epoch
)
write_simple_summary(
summary_writer, "Val/Acc", numpy.mean(val_acc).item(), epoch
)
val_loss = numpy.mean(val_loss).item()
LOGGER.info(
"Epoch {} - Val Loss: {}, Val Acc: {}".format(
epoch, val_loss, numpy.mean(train_acc).item()
)
)
if epoch >= args.save_best_after and (
best_loss is None or val_loss <= best_loss
):
_save_checkpoint(args, sess, save_dir, "checkpoint-best")
best_loss = val_loss
if args.save_epochs and epoch in args.save_epochs:
_save_checkpoint(
args, sess, save_dir, "checkpoint-epoch-{}".format(epoch)
)
# cleanup graph and save final checkpoint
manager.complete_graph()
checkpoint_path = _save_checkpoint(args, sess, save_dir, "final-checkpoint")
LOGGER.info("Running ONNX export flow")
export(
args,
save_dir,
checkpoint_path=checkpoint_path,
skip_samples=True,
num_classes=num_classes,
opset=11,
)
def pruning_loss_sensitivity(args, save_dir):
input_shape = ModelRegistry.input_shape(args.arch_key)
train_dataset, num_classes = _create_dataset(
args, train=True, image_size=input_shape[1]
)
with tf_compat.Graph().as_default() as graph:
# create model graph
inputs = tf_compat.placeholder(
tf_compat.float32, [None] + list(input_shape), name="inputs"
)
outputs = _create_model(args, num_classes, inputs)
with tf_compat.Session() as sess:
_load_model(args, sess, checkpoint_path=args.checkpoint_path)
if args.approximate:
LOGGER.info("Running weight magnitude loss sensitivity analysis...")
analysis = pruning_loss_sens_magnitude(graph, sess)
else:
op_vars = pruning_loss_sens_op_vars(graph)
train_steps = math.ceil(len(train_dataset) / args.batch_size)
train_dataset = _build_dataset(args, train_dataset, args.batch_size)
handle, iterator, dataset_iter = create_split_iterators_handle(
[train_dataset]
)
dataset_iter = dataset_iter[0]
images, labels = iterator.get_next()
loss = batch_cross_entropy_loss(outputs, labels)
tensor_names = ["inputs:0", labels.name]
sess.run(dataset_iter.initializer)
def feed_dict_creator(step: int) -> Dict[str, tf_compat.Tensor]:
assert step < train_steps
batch_data = [
tens.eval(session=sess) for tens in dataset_iter.get_next()
]
return dict(zip(tensor_names, batch_data))
LOGGER.info("Running one shot loss sensitivity analysis...")
analysis = pruning_loss_sens_one_shot(
op_vars=op_vars,
loss_tensor=loss,
steps_per_measurement=args.steps_per_measurement,
feed_dict_creator=feed_dict_creator,
sess=sess,
)
# saving and printing results
LOGGER.info("completed...")
LOGGER.info("Saving results in {}".format(save_dir))
analysis.save_json(
os.path.join(
save_dir,
"ks_approx_sensitivity.json"
if args.approximate
else "ks_one_shot_sensitivity.json",
)
)
analysis.plot(
os.path.join(
save_dir,
os.path.join(
save_dir,
"ks_approx_sensitivity.png"
if args.approximate
else "ks_one_shot_sensitivity.png",
),
),
plot_integral=True,
)
analysis.print_res() | null |
21,686 | import argparse
from collections import OrderedDict
from pathlib import Path
from typing import Any, Union
import torch
import open_clip
from clip_models import TextModel
from sparseml.pytorch.utils import export_onnx
def _export_onnx(
module: torch.nn.Module,
sample_batch: Any,
file_path: Union[Path, str],
opset: int = 14,
**export_kwargs,
):
# _export_onnx by default uses opset = 14 as required by CLIP and will fail
# for opset < 14 as certain operators are not supported.
if opset < 14:
raise ValueError("CLIP onnx export requires a minimum opset of 14")
export_onnx(
module=module,
sample_batch=sample_batch,
opset=opset,
file_path=file_path,
**export_kwargs,
)
def _export_visual(
model: torch.nn.Module,
device: str,
export_path: Union[str, Path],
is_coca: bool,
**export_kwargs,
):
module_name = "clip_visual.onnx"
visual_model = model.visual
image_shape = visual_model.image_size[0]
sample_input = torch.randn(1, 3, image_shape, image_shape, requires_grad=True)
visual_model = visual_model.to(device)
visual_model.eval()
_export_onnx(
module=visual_model,
sample_batch=sample_input,
file_path=export_path / module_name,
**export_kwargs,
) | null |
21,687 | import argparse
from collections import OrderedDict
from pathlib import Path
from typing import Any, Union
import torch
import open_clip
from clip_models import TextModel
from sparseml.pytorch.utils import export_onnx
def _export_onnx(
module: torch.nn.Module,
sample_batch: Any,
file_path: Union[Path, str],
opset: int = 14,
**export_kwargs,
):
# _export_onnx by default uses opset = 14 as required by CLIP and will fail
# for opset < 14 as certain operators are not supported.
if opset < 14:
raise ValueError("CLIP onnx export requires a minimum opset of 14")
export_onnx(
module=module,
sample_batch=sample_batch,
opset=opset,
file_path=file_path,
**export_kwargs,
)
class TextModel(nn.Module):
def __init__(
self,
token_embedding: torch.nn.Embedding,
positional_embedding: torch.nn.parameter.Parameter,
transformer: torch.nn.Module,
ln_final: torch.nn.LayerNorm,
text_projection: torch.nn.parameter.Parameter,
attn_mask: torch.Tensor,
):
super().__init__()
self.token_embedding = token_embedding
self.positional_embedding = positional_embedding
self.transformer = transformer
self.ln_final = ln_final
self.text_projection = text_projection
self.attn_mask = attn_mask
self.cast_dtype = self.transformer.get_cast_dtype()
def forward(self, input_ids):
x = self.token_embedding(input_ids).to(self.cast_dtype)
x = x + self.positional_embedding.to(self.cast_dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, attn_mask=self.attn_mask)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x) # [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token = highest in each sequence)
x = x[torch.arange(x.shape[0]), input_ids.argmax(dim=-1)] @ self.text_projection
return x
def _export_text(
model: torch.nn.Module,
device: str,
export_path: Union[str, Path],
tokenizer,
is_coca: bool,
**export_kwargs,
):
module_name = "clip_text.onnx"
# If the model is a CLIP CoCa model, store the text model as is. For non-CoCa
# models, OpenCLIP does not provide access to the text model, only the transformer
# therefore in that case, create a new TextModel object to wrap the transformer
# and all relevant properties needed for the forward pass.
if is_coca:
text_model = model.text
else:
text_model = TextModel(
token_embedding=model.token_embedding,
positional_embedding=model.positional_embedding,
transformer=model.transformer,
ln_final=model.ln_final,
text_projection=model.text_projection,
attn_mask=model.attn_mask,
)
text_model = text_model.to(device)
text_model.eval()
if is_coca:
sample_batch = torch.ones(6, 15, dtype=torch.long)
else:
sample_batch = tokenizer(["a dog"]).to(torch.int32)
_export_onnx(
module=text_model,
sample_batch=sample_batch,
file_path=export_path / module_name,
**export_kwargs,
) | null |
21,688 | import argparse
from collections import OrderedDict
from pathlib import Path
from typing import Any, Union
import torch
import open_clip
from clip_models import TextModel
from sparseml.pytorch.utils import export_onnx
def _export_onnx(
module: torch.nn.Module,
sample_batch: Any,
file_path: Union[Path, str],
opset: int = 14,
**export_kwargs,
):
# _export_onnx by default uses opset = 14 as required by CLIP and will fail
# for opset < 14 as certain operators are not supported.
if opset < 14:
raise ValueError("CLIP onnx export requires a minimum opset of 14")
export_onnx(
module=module,
sample_batch=sample_batch,
opset=opset,
file_path=file_path,
**export_kwargs,
)
def _export_text_decoder(
model: torch.nn.Module, device: str, export_path: Union[str, Path], **export_kwargs
):
module_name = "clip_text_decoder.onnx"
decoder = model.text_decoder.to(device)
decoder.eval()
sample_batch = OrderedDict()
sample_batch["image_embs"] = torch.randn(1, 255, model.text.output_dim)
sample_batch["text_embs"] = torch.randn(1, 15, model.text.output_dim)
_export_onnx(
module=decoder,
sample_batch=sample_batch,
file_path=export_path / module_name,
**export_kwargs,
) | null |
21,689 | import math
import torch
import torch.nn.functional as F
from sparseml.pytorch.optim.manager import ScheduledModifierManager
from sparseml.pytorch.optim.optimizer import ScheduledOptimizer
from sparseml.pytorch.utils import ModuleExporter, logger
from trainer_qa import QuestionAnsweringTrainer
The provided code snippet includes necessary dependencies for implementing the `export_model` function. Write a Python function `def export_model(model, dataloader, output_dir)` to solve the following problem:
Export a trained model to ONNX :param model: trained model :param dataloader: dataloader to get sample batch :param output_dir: output directory for ONNX model
Here is the function:
def export_model(model, dataloader, output_dir):
"""
Export a trained model to ONNX
:param model: trained model
:param dataloader: dataloader to get sample batch
:param output_dir: output directory for ONNX model
"""
exporter = ModuleExporter(model, output_dir=output_dir)
for _, sample_batch in enumerate(dataloader):
sample_input = (sample_batch["input_ids"], sample_batch["attention_mask"], sample_batch["token_type_ids"])
exporter.export_onnx(sample_batch=sample_input, convert_qat=True)
break | Export a trained model to ONNX :param model: trained model :param dataloader: dataloader to get sample batch :param output_dir: output directory for ONNX model |
21,691 | import argparse
import os
import json
import transformers
from filelock import FileLock
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
set_seed,
)
def load_qid2query(filename):
qid2query = {}
with open(filename, 'r') as f:
for l in f:
l = l.strip().split('\t')
qid2query[int(l[0])] = l[1]
return qid2query | null |
21,692 | import re
import sys
import statistics
from collections import Counter
def load_reference(path_to_reference):
"""Load Reference reference relevant passages
Args:path_to_reference (str): path to a file to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
with open(path_to_reference,'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids
def load_candidate(path_to_candidate):
"""Load candidate data from a file.
Args:path_to_candidate (str): path to file to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
with open(path_to_candidate,'r') as f:
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
return qid_to_ranked_candidate_passages
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Perform quality checks on the dictionaries
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
bool,str: Boolean whether allowed, message to be shown in case of a problem
"""
message = ''
allowed = True
# Create sets of the QIDs for the submitted and reference queries
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
# Check that we do not have multiple passages per query
for qid in qids_to_ranked_candidate_passages:
# Remove all zeros from the candidates
duplicate_pids = set([item for item, count in Counter(qids_to_ranked_candidate_passages[qid]).items() if count > 1])
if len(duplicate_pids-set([0])) > 0:
message = "Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}".format(
qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return allowed, message
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Compute MRR metric
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
all_scores = {}
MRR = 0
qids_with_relevant_passages = 0
ranking = []
for qid in qids_to_ranked_candidate_passages:
if qid in qids_to_relevant_passageids:
ranking.append(0)
target_pid = qids_to_relevant_passageids[qid]
candidate_pid = qids_to_ranked_candidate_passages[qid]
for i in range(0,MaxMRRRank):
if candidate_pid[i] in target_pid:
MRR += 1/(i + 1)
ranking.pop()
ranking.append(i+1)
break
if len(ranking) == 0:
raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?")
MRR = MRR/len(qids_to_relevant_passageids)
all_scores['MRR @10'] = MRR
all_scores['QueriesRanked'] = len(qids_to_ranked_candidate_passages)
return all_scores
The provided code snippet includes necessary dependencies for implementing the `compute_metrics_from_files` function. Write a Python function `def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True)` to solve the following problem:
Compute MRR metric Args: p_path_to_reference_file (str): path to reference file. Reference file should contain lines in the following format: QUERYID\tPASSAGEID Where PASSAGEID is a relevant passage for a query. Note QUERYID can repeat on different lines with different PASSAGEIDs p_path_to_candidate_file (str): path to candidate file. Candidate file sould contain lines in the following format: QUERYID\tPASSAGEID1\tRank If a user wishes to use the TREC format please run the script with a -t flag at the end. If this flag is used the expected format is QUERYID\tITER\tDOCNO\tRANK\tSIM\tRUNID Where the values are separated by tabs and ranked in order of relevance Returns: dict: dictionary of metrics {'MRR': <MRR Score>}
Here is the function:
def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True):
"""Compute MRR metric
Args:
p_path_to_reference_file (str): path to reference file.
Reference file should contain lines in the following format:
QUERYID\tPASSAGEID
Where PASSAGEID is a relevant passage for a query. Note QUERYID can repeat on different lines with different PASSAGEIDs
p_path_to_candidate_file (str): path to candidate file.
Candidate file sould contain lines in the following format:
QUERYID\tPASSAGEID1\tRank
If a user wishes to use the TREC format please run the script with a -t flag at the end. If this flag is used the expected format is
QUERYID\tITER\tDOCNO\tRANK\tSIM\tRUNID
Where the values are separated by tabs and ranked in order of relevance
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
qids_to_relevant_passageids = load_reference(path_to_reference)
qids_to_ranked_candidate_passages = load_candidate(path_to_candidate)
if perform_checks:
allowed, message = quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
if message != '': print(message)
return compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages) | Compute MRR metric Args: p_path_to_reference_file (str): path to reference file. Reference file should contain lines in the following format: QUERYID\tPASSAGEID Where PASSAGEID is a relevant passage for a query. Note QUERYID can repeat on different lines with different PASSAGEIDs p_path_to_candidate_file (str): path to candidate file. Candidate file sould contain lines in the following format: QUERYID\tPASSAGEID1\tRank If a user wishes to use the TREC format please run the script with a -t flag at the end. If this flag is used the expected format is QUERYID\tITER\tDOCNO\tRANK\tSIM\tRUNID Where the values are separated by tabs and ranked in order of relevance Returns: dict: dictionary of metrics {'MRR': <MRR Score>} |
21,693 | import argparse
import os
import json
def load_qid2query(filename):
qid2query = {}
with open(filename, 'r') as f:
for l in f:
l = l.strip().split('\t')
qid2query[int(l[0])] = l[1]
return qid2query | null |
21,694 | import argparse
import os
import json
def load_qrels(filename, collection, qid2query):
qrels = {}
with open(filename, 'r') as f:
for l in f:
l = l.strip().split('\t')
qrels[qid2query[int(l[0])]] = collection[int(l[2])]
return qrels | null |
21,695 | import os
import json
import argparse
def convert_collection(args):
with open(args.output_path, 'w', encoding='utf-8') as w:
with open(args.collection_path, encoding='utf-8') as f:
for i, line in enumerate(f):
id, body = line.split('\t')
output_dict = {'id': id, 'contents': body}
w.write(json.dumps(output_dict) + '\n') | null |
21,696 | import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import random
import math
import nltk
import wandb
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from filelock import FileLock
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
set_seed,
)
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.file_utils import is_offline_mode
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from sparseml.pytorch.optim.manager import ScheduledModifierManager
from sparseml.pytorch.optim.optimizer import ScheduledOptimizer
from sparseml.pytorch.utils import ModuleExporter, logger
def load_optimizer(model, args):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (args.adam_beta1, args.adam_beta2),
"eps": args.adam_epsilon,
}
optimizer_kwargs["lr"] = args.learning_rate
return optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) | null |
21,697 | import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import random
import math
import nltk
import wandb
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from filelock import FileLock
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
set_seed,
)
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.file_utils import is_offline_mode
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from sparseml.pytorch.optim.manager import ScheduledModifierManager
from sparseml.pytorch.optim.optimizer import ScheduledOptimizer
from sparseml.pytorch.utils import ModuleExporter, logger
def convert_example_to_features(example, tokenizer, max_seq_length, sentence1_key, sentence2_key):
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for t in tokenizer.tokenize(example[sentence1_key])[:int(max_seq_length/2)]:
tokens.append(t)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if sentence1_key != None:
for t in tokenizer.tokenize(example[sentence2_key])[:int(max_seq_length/2)]:
tokens.append(t)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
return (
torch.from_numpy(np.array([np.array(input_ids, dtype=np.int64)])),
torch.from_numpy(np.array([np.array(input_mask, dtype=np.int64)])),
torch.from_numpy(np.array([np.array(segment_ids, dtype=np.int64)])),
) | null |
21,698 | import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import random
import math
import nltk
import wandb
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from filelock import FileLock
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
set_seed,
)
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.file_utils import is_offline_mode
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from sparseml.pytorch.optim.manager import ScheduledModifierManager
from sparseml.pytorch.optim.optimizer import ScheduledOptimizer
from sparseml.pytorch.utils import ModuleExporter, logger
def main():
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
21,699 | import logging
import math
import os
import random
import sys
import time
from typing import Tuple
import hydra
import torch
from omegaconf import DictConfig, OmegaConf
from torch import Tensor as T
from torch import nn
from dpr.models import init_biencoder_components
from dpr.models.biencoder import BiEncoder, BiEncoderNllLoss, BiEncoderBatch
from dpr.options import (
setup_cfg_gpu,
set_seed,
get_encoder_params_state_from_cfg,
set_cfg_params_from_state,
setup_logger,
)
from dpr.utils.conf_utils import BiencoderDatasetsCfg
from dpr.utils.data_utils import (
ShardedDataIterator,
Tensorizer,
MultiSetDataIterator,
)
from dpr.utils.dist_utils import all_gather_list
from dpr.utils.model_utils import (
setup_for_distributed_mode,
move_to_device,
get_schedule_linear,
CheckpointState,
get_model_file,
get_model_obj,
load_states_from_checkpoint,
)
def _calc_loss(
cfg,
loss_function,
local_q_vector,
local_ctx_vectors,
local_positive_idxs,
local_hard_negatives_idxs: list = None,
loss_scale: float = None,
) -> Tuple[T, bool]:
"""
Calculates In-batch negatives schema loss and supports to run it in DDP mode by exchanging the representations
across all the nodes.
"""
distributed_world_size = cfg.distributed_world_size or 1
if distributed_world_size > 1:
q_vector_to_send = (
torch.empty_like(local_q_vector).cpu().copy_(local_q_vector).detach_()
)
ctx_vector_to_send = (
torch.empty_like(local_ctx_vectors).cpu().copy_(local_ctx_vectors).detach_()
)
global_question_ctx_vectors = all_gather_list(
[
q_vector_to_send,
ctx_vector_to_send,
local_positive_idxs,
local_hard_negatives_idxs,
],
max_size=cfg.global_loss_buf_sz,
)
global_q_vector = []
global_ctxs_vector = []
# ctxs_per_question = local_ctx_vectors.size(0)
positive_idx_per_question = []
hard_negatives_per_question = []
total_ctxs = 0
for i, item in enumerate(global_question_ctx_vectors):
q_vector, ctx_vectors, positive_idx, hard_negatives_idxs = item
if i != cfg.local_rank:
global_q_vector.append(q_vector.to(local_q_vector.device))
global_ctxs_vector.append(ctx_vectors.to(local_q_vector.device))
positive_idx_per_question.extend([v + total_ctxs for v in positive_idx])
hard_negatives_per_question.extend(
[[v + total_ctxs for v in l] for l in hard_negatives_idxs]
)
else:
global_q_vector.append(local_q_vector)
global_ctxs_vector.append(local_ctx_vectors)
positive_idx_per_question.extend(
[v + total_ctxs for v in local_positive_idxs]
)
hard_negatives_per_question.extend(
[[v + total_ctxs for v in l] for l in local_hard_negatives_idxs]
)
total_ctxs += ctx_vectors.size(0)
global_q_vector = torch.cat(global_q_vector, dim=0)
global_ctxs_vector = torch.cat(global_ctxs_vector, dim=0)
else:
global_q_vector = local_q_vector
global_ctxs_vector = local_ctx_vectors
positive_idx_per_question = local_positive_idxs
hard_negatives_per_question = local_hard_negatives_idxs
loss, is_correct = loss_function.calc(
global_q_vector,
global_ctxs_vector,
positive_idx_per_question,
hard_negatives_per_question,
loss_scale=loss_scale,
)
return loss, is_correct
BiEncoderBatch = collections.namedtuple(
"BiENcoderInput",
[
"question_ids",
"question_segments",
"context_ids",
"ctx_segments",
"is_positive",
"hard_negatives",
"encoder_type",
],
)
class BiEncoderNllLoss(object):
def calc(
self,
q_vectors: T,
ctx_vectors: T,
positive_idx_per_question: list,
hard_negative_idx_per_question: list = None,
loss_scale: float = None,
) -> Tuple[T, int]:
"""
Computes nll loss for the given lists of question and ctx vectors.
Note that although hard_negative_idx_per_question in not currently in use, one can use it for the
loss modifications. For example - weighted NLL with different factors for hard vs regular negatives.
:return: a tuple of loss value and amount of correct predictions per batch
"""
scores = self.get_scores(q_vectors, ctx_vectors)
if len(q_vectors.size()) > 1:
q_num = q_vectors.size(0)
scores = scores.view(q_num, -1)
softmax_scores = F.log_softmax(scores, dim=1)
loss = F.nll_loss(
softmax_scores,
torch.tensor(positive_idx_per_question).to(softmax_scores.device),
reduction="mean",
)
max_score, max_idxs = torch.max(softmax_scores, 1)
correct_predictions_count = (
max_idxs == torch.tensor(positive_idx_per_question).to(max_idxs.device)
).sum()
if loss_scale:
loss.mul_(loss_scale)
return loss, correct_predictions_count
def get_scores(q_vector: T, ctx_vectors: T) -> T:
f = BiEncoderNllLoss.get_similarity_function()
return f(q_vector, ctx_vectors)
def get_similarity_function():
return dot_product_scores
class Tensorizer(object):
"""
Component for all text to model input data conversions and related utility methods
"""
# Note: title, if present, is supposed to be put before text (i.e. optional title + document body)
def text_to_tensor(
self,
text: str,
title: str = None,
add_special_tokens: bool = True,
apply_max_len: bool = True,
):
raise NotImplementedError
def get_pair_separator_ids(self) -> T:
raise NotImplementedError
def get_pad_id(self) -> int:
raise NotImplementedError
def get_attn_mask(self, tokens_tensor: T):
raise NotImplementedError
def is_sub_word_id(self, token_id: int):
raise NotImplementedError
def to_string(self, token_ids, skip_special_tokens=True):
raise NotImplementedError
def set_pad_to_max(self, pad: bool):
raise NotImplementedError
def get_token_id(self, token: str) -> int:
raise NotImplementedError
def move_to_device(sample, device):
if len(sample) == 0:
return {}
def _move_to_device(maybe_tensor, device):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.to(device)
elif isinstance(maybe_tensor, dict):
return {
key: _move_to_device(value, device)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
return [_move_to_device(x, device) for x in maybe_tensor]
elif isinstance(maybe_tensor, tuple):
return [_move_to_device(x, device) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_device(sample, device)
def _do_biencoder_fwd_pass(
model: nn.Module,
input: BiEncoderBatch,
tensorizer: Tensorizer,
cfg,
encoder_type: str,
rep_positions=0,
loss_scale: float = None,
) -> Tuple[torch.Tensor, int]:
input = BiEncoderBatch(**move_to_device(input._asdict(), cfg.device))
q_attn_mask = tensorizer.get_attn_mask(input.question_ids)
ctx_attn_mask = tensorizer.get_attn_mask(input.context_ids)
if model.training:
model_out = model(
input.question_ids,
input.question_segments,
q_attn_mask,
input.context_ids,
input.ctx_segments,
ctx_attn_mask,
encoder_type=encoder_type,
representation_token_pos=rep_positions,
)
else:
with torch.no_grad():
model_out = model(
input.question_ids,
input.question_segments,
q_attn_mask,
input.context_ids,
input.ctx_segments,
ctx_attn_mask,
encoder_type=encoder_type,
representation_token_pos=rep_positions,
)
local_q_vector, local_ctx_vectors = model_out
loss_function = BiEncoderNllLoss()
loss, is_correct = _calc_loss(
cfg,
loss_function,
local_q_vector,
local_ctx_vectors,
input.is_positive,
input.hard_negatives,
loss_scale=loss_scale,
)
is_correct = is_correct.sum().item()
if cfg.n_gpu > 1:
loss = loss.mean()
if cfg.train.gradient_accumulation_steps > 1:
loss = loss / cfg.gradient_accumulation_steps
return loss, is_correct | null |
21,700 | import collections
import glob
import json
import logging
import math
import multiprocessing
import os
import pickle
import torch
from functools import partial
from typing import Tuple, List, Dict, Iterable, Optional
from torch import Tensor as T
from tqdm import tqdm
from dpr.utils.data_utils import Tensorizer, read_serialized_data_from_files
logger = logging.getLogger()
def _preprocess_reader_samples_chunk(
samples: List,
out_file_prefix: str,
gold_passages_file: str,
tensorizer: Tensorizer,
is_train_set: bool,
) -> str:
chunk_id, samples = samples
logger.info("Start batch %d", len(samples))
iterator = preprocess_retriever_data(
samples,
gold_passages_file,
tensorizer,
is_train_set=is_train_set,
)
results = []
iterator = tqdm(iterator)
for i, r in enumerate(iterator):
r.on_serialize()
results.append(r)
out_file = out_file_prefix + "." + str(chunk_id) + ".pkl"
with open(out_file, mode="wb") as f:
logger.info("Serialize %d results to %s", len(results), out_file)
pickle.dump(results, f)
return out_file
class Tensorizer(object):
"""
Component for all text to model input data conversions and related utility methods
"""
# Note: title, if present, is supposed to be put before text (i.e. optional title + document body)
def text_to_tensor(
self,
text: str,
title: str = None,
add_special_tokens: bool = True,
apply_max_len: bool = True,
):
raise NotImplementedError
def get_pair_separator_ids(self) -> T:
raise NotImplementedError
def get_pad_id(self) -> int:
raise NotImplementedError
def get_attn_mask(self, tokens_tensor: T):
raise NotImplementedError
def is_sub_word_id(self, token_id: int):
raise NotImplementedError
def to_string(self, token_ids, skip_special_tokens=True):
raise NotImplementedError
def set_pad_to_max(self, pad: bool):
raise NotImplementedError
def get_token_id(self, token: str) -> int:
raise NotImplementedError
The provided code snippet includes necessary dependencies for implementing the `convert_retriever_results` function. Write a Python function `def convert_retriever_results( is_train_set: bool, input_file: str, out_file_prefix: str, gold_passages_file: str, tensorizer: Tensorizer, num_workers: int = 8, ) -> List[str]` to solve the following problem:
Converts the file with dense retriever(or any compatible file format) results into the reader input data and serializes them into a set of files. Conversion splits the input data into multiple chunks and processes them in parallel. Each chunk results are stored in a separate file with name out_file_prefix.{number}.pkl :param is_train_set: if the data should be processed for a train set (i.e. with answer span detection) :param input_file: path to a json file with data to convert :param out_file_prefix: output path prefix. :param gold_passages_file: optional path for the 'gold passages & questions' file. Required to get best results for NQ :param tensorizer: Tensorizer object for text to model input tensors conversions :param num_workers: the number of parallel processes for conversion :return: names of files with serialized results
Here is the function:
def convert_retriever_results(
is_train_set: bool,
input_file: str,
out_file_prefix: str,
gold_passages_file: str,
tensorizer: Tensorizer,
num_workers: int = 8,
) -> List[str]:
"""
Converts the file with dense retriever(or any compatible file format) results into the reader input data and
serializes them into a set of files.
Conversion splits the input data into multiple chunks and processes them in parallel. Each chunk results are stored
in a separate file with name out_file_prefix.{number}.pkl
:param is_train_set: if the data should be processed for a train set (i.e. with answer span detection)
:param input_file: path to a json file with data to convert
:param out_file_prefix: output path prefix.
:param gold_passages_file: optional path for the 'gold passages & questions' file. Required to get best results for NQ
:param tensorizer: Tensorizer object for text to model input tensors conversions
:param num_workers: the number of parallel processes for conversion
:return: names of files with serialized results
"""
with open(input_file, "r", encoding="utf-8") as f:
samples = json.loads("".join(f.readlines()))
logger.info(
"Loaded %d questions + retrieval results from %s", len(samples), input_file
)
workers = multiprocessing.Pool(num_workers)
ds_size = len(samples)
step = max(math.ceil(ds_size / num_workers), 1)
chunks = [samples[i : i + step] for i in range(0, ds_size, step)]
chunks = [(i, chunks[i]) for i in range(len(chunks))]
logger.info("Split data into %d chunks", len(chunks))
processed = 0
_parse_batch = partial(
_preprocess_reader_samples_chunk,
out_file_prefix=out_file_prefix,
gold_passages_file=gold_passages_file,
tensorizer=tensorizer,
is_train_set=is_train_set,
)
serialized_files = []
for file_name in workers.map(_parse_batch, chunks):
processed += 1
serialized_files.append(file_name)
logger.info("Chunks processed %d", processed)
logger.info("Data saved to %s", file_name)
logger.info("Preprocessed data stored in %s", serialized_files)
return serialized_files | Converts the file with dense retriever(or any compatible file format) results into the reader input data and serializes them into a set of files. Conversion splits the input data into multiple chunks and processes them in parallel. Each chunk results are stored in a separate file with name out_file_prefix.{number}.pkl :param is_train_set: if the data should be processed for a train set (i.e. with answer span detection) :param input_file: path to a json file with data to convert :param out_file_prefix: output path prefix. :param gold_passages_file: optional path for the 'gold passages & questions' file. Required to get best results for NQ :param tensorizer: Tensorizer object for text to model input tensors conversions :param num_workers: the number of parallel processes for conversion :return: names of files with serialized results |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.