id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
161,486 | import argparse
import gc
import hashlib
import itertools
import logging
import math
import os
import threading
import warnings
from contextlib import nullcontext
from pathlib import Path
from typing import Optional
import datasets
import diffusers
import numpy as np
import psutil
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from diffusers import (
AutoencoderKL,
DDPMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
UNet2DConditionModel,
)
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version
from diffusers.utils.import_utils import is_xformers_available
from huggingface_hub import HfFolder, Repository, whoami
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import AutoTokenizer, PretrainedConfig
from peft import get_peft_model
from peft.tuners.oft.config import OFTConfig
def collate_fn(examples, with_prior_preservation=False):
input_ids = [example["instance_prompt_ids"] for example in examples]
pixel_values = [example["instance_images"] for example in examples]
# Concat class and instance examples for prior preservation.
# We do this to avoid doing two forward passes.
if with_prior_preservation:
input_ids += [example["class_prompt_ids"] for example in examples]
pixel_values += [example["class_images"] for example in examples]
pixel_values = torch.stack(pixel_values)
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
input_ids = torch.cat(input_ids, dim=0)
batch = {
"input_ids": input_ids,
"pixel_values": pixel_values,
}
return batch | null |
161,487 | import argparse
import gc
import hashlib
import itertools
import logging
import math
import os
import threading
import warnings
from contextlib import nullcontext
from pathlib import Path
from typing import Optional
import datasets
import diffusers
import numpy as np
import psutil
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from diffusers import (
AutoencoderKL,
DDPMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
UNet2DConditionModel,
)
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version
from diffusers.utils.import_utils import is_xformers_available
from huggingface_hub import HfFolder, Repository, whoami
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import AutoTokenizer, PretrainedConfig
from peft import get_peft_model
from peft.tuners.oft.config import OFTConfig
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
if token is None:
token = HfFolder.get_token()
if organization is None:
username = whoami(token)["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}" | null |
161,488 | import os
from enum import Enum
import torch
from datasets import DatasetDict, load_dataset, load_from_disk
from datasets.builder import DatasetGenerationError
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
)
from peft import LoraConfig
def create_datasets(tokenizer, data_args, training_args, apply_chat_template=False):
def preprocess(samples):
batch = []
for conversation in samples["messages"]:
batch.append(tokenizer.apply_chat_template(conversation, tokenize=False))
return {"content": batch}
raw_datasets = DatasetDict()
for split in data_args.splits.split(","):
try:
# Try first if dataset on a Hub repo
dataset = load_dataset(data_args.dataset_name, split=split)
except DatasetGenerationError:
# If not, check local dataset
dataset = load_from_disk(os.path.join(data_args.dataset_name, split))
if "train" in split:
raw_datasets["train"] = dataset
elif "test" in split:
raw_datasets["test"] = dataset
else:
raise ValueError(f"Split type {split} not recognized as one of test or train.")
if apply_chat_template:
raw_datasets = raw_datasets.map(
preprocess,
batched=True,
remove_columns=raw_datasets["train"].column_names,
)
train_data = raw_datasets["train"]
valid_data = raw_datasets["test"]
print(f"Size of the train set: {len(train_data)}. Size of the validation set: {len(valid_data)}")
print(f"A sample of train dataset: {train_data[0]}")
return train_data, valid_data | null |
161,489 | import os
from enum import Enum
import torch
from datasets import DatasetDict, load_dataset, load_from_disk
from datasets.builder import DatasetGenerationError
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
)
from peft import LoraConfig
DEFAULT_CHATML_CHAT_TEMPLATE = "{% for message in messages %}\n{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% if loop.last and add_generation_prompt %}{{'<|im_start|>assistant\n' }}{% endif %}{% endfor %}"
DEFAULT_ZEPHYR_CHAT_TEMPLATE = "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"
class ZephyrSpecialTokens(str, Enum):
user = "<|user|>"
assistant = "<|assistant|>"
system = "<|system|>"
eos_token = "</s>"
bos_token = "<s>"
pad_token = "<pad>"
def list(cls):
return [c.value for c in cls]
class ChatmlSpecialTokens(str, Enum):
user = "<|im_start|>user"
assistant = "<|im_start|>assistant"
system = "<|im_start|>system"
eos_token = "<|im_end|>"
bos_token = "<s>"
pad_token = "<pad>"
def list(cls):
return [c.value for c in cls]
def create_and_prepare_model(args, data_args, training_args):
if args.use_unsloth:
from unsloth import FastLanguageModel
device_map = None
bnb_config = None
if (
torch.distributed.is_available()
and torch.distributed.is_initialized()
and torch.distributed.get_world_size() > 1
and args.use_unsloth
):
raise NotImplementedError("Unsloth is not supported in distributed training")
if args.use_4bit_quantization:
compute_dtype = getattr(torch, args.bnb_4bit_compute_dtype)
bnb_config = BitsAndBytesConfig(
load_in_4bit=args.use_4bit_quantization,
bnb_4bit_quant_type=args.bnb_4bit_quant_type,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_use_double_quant=args.use_nested_quant,
)
if compute_dtype == torch.float16 and args.use_4bit_quantization:
major, _ = torch.cuda.get_device_capability()
if major >= 8:
print("=" * 80)
print("Your GPU supports bfloat16, you can accelerate training with the argument --bf16")
print("=" * 80)
elif args.use_8bit_quantization:
bnb_config = BitsAndBytesConfig(load_in_8bit=args.use_8bit_quantization)
if args.use_4bit_quantization or args.use_8bit_quantization:
device_map = (
int(os.environ.get("LOCAL_RANK", -1))
if torch.distributed.is_available() and torch.distributed.is_initialized()
else "auto"
) # {"": 0}
if args.use_unsloth:
# Load model
model, _ = FastLanguageModel.from_pretrained(
model_name=args.model_name_or_path,
max_seq_length=data_args.max_seq_length,
dtype=None,
load_in_4bit=args.use_4bit_quantization,
)
else:
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
quantization_config=bnb_config,
device_map=device_map,
trust_remote_code=True,
attn_implementation="flash_attention_2" if args.use_flash_attn else "eager",
)
peft_config = None
chat_template = None
if args.use_peft_lora and not args.use_unsloth:
peft_config = LoraConfig(
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
r=args.lora_r,
bias="none",
task_type="CAUSAL_LM",
target_modules=args.lora_target_modules.split(",")
if args.lora_target_modules != "all-linear"
else args.lora_target_modules,
)
special_tokens = None
chat_template = None
if args.chat_template_format == "chatml":
special_tokens = ChatmlSpecialTokens
chat_template = DEFAULT_CHATML_CHAT_TEMPLATE
elif args.chat_template_format == "zephyr":
special_tokens = ZephyrSpecialTokens
chat_template = DEFAULT_ZEPHYR_CHAT_TEMPLATE
if special_tokens is not None:
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path,
pad_token=special_tokens.pad_token.value,
bos_token=special_tokens.bos_token.value,
eos_token=special_tokens.eos_token.value,
additional_special_tokens=special_tokens.list(),
trust_remote_code=True,
)
tokenizer.chat_template = chat_template
# make embedding resizing configurable?
model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=8)
else:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
if args.use_unsloth:
# Do model patching and add fast LoRA weights
model = FastLanguageModel.get_peft_model(
model,
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout,
r=args.lora_r,
target_modules=args.lora_target_modules.split(",")
if args.lora_target_modules != "all-linear"
else args.lora_target_modules,
use_gradient_checkpointing=training_args.gradient_checkpointing,
random_state=training_args.seed,
max_seq_length=data_args.max_seq_length,
)
return model, peft_config, tokenizer | null |
161,490 | import argparse
import os
import torch
import torch.nn as nn
from transformers import (
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
)
from peft import LoftQConfig, LoraConfig, TaskType, get_peft_model
def unwrap_model(model, sub_module_name=".base_layer"):
sub_module_name_list = [k.split(sub_module_name)[0] for k in model.state_dict().keys() if sub_module_name in k]
sub_module_name_set = set(sub_module_name_list)
for name in sub_module_name_set:
# get the parent of the submodule
name_parent = ".".join(name.split(".")[:-1])
name_child = name.split(".")[-1]
sub_module = model.get_submodule(name_parent)
print(sub_module)
# replace with shell
child = getattr(sub_module, name_child)
weight = getattr(child.base_layer, "weight", None)
bias = getattr(child.base_layer, "bias", None)
shell = Shell(weight, bias)
setattr(sub_module, name_child, shell)
print("You have unwrapped the model. Use it on your own risk.")
def print_model(model, name):
print("=" * 10 + name + "=" * 10)
print(model)
for name, param in model.named_parameters():
if torch.is_tensor(param):
if param.dtype in [torch.float32, torch.float16]:
print(
name,
param.shape,
param.device,
param.dtype,
param.requires_grad,
param.mean().item(),
param.max().item(),
)
else:
print(name, param.shape, param.device, param.dtype, param.requires_grad)
def arg_parse():
parser = argparse.ArgumentParser(description="Quantize a model with LoftQ.")
parser.add_argument(
"--model_name_or_path",
type=str,
default=None,
required=True,
help="The name or path of the fp32/16 model.",
)
parser.add_argument(
"--token",
type=str,
default=None,
help="The access token to download model from HuggingFace Hub.",
)
parser.add_argument(
"--bits",
type=int,
default=4,
help="The quantized bits",
)
parser.add_argument(
"--iter",
type=int,
default=1,
help="The alternating steps in LoftQ",
)
parser.add_argument(
"--rank",
type=int,
default=16,
help="The rank of the LoRA adapter",
)
parser.add_argument(
"--save_dir",
type=str,
default="./model_zoo/loftq/",
help="The rank of the LoRA adapter",
)
args = parser.parse_args()
return args
def quantize_and_save():
args = arg_parse()
# Download weights and configure LoRA
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, token=args.token, trust_remote_code=True)
if any(name in args.model_name_or_path.lower() for name in ["llama", "mistral", "falcon"]):
model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path, token=args.token, trust_remote_code=True)
task_type = TaskType.CAUSAL_LM
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "down_proj", "gate_proj"]
elif any(name in args.model_name_or_path.lower() for name in ["bart", "t5"]):
model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name_or_path, token=args.token)
task_type = TaskType.SEQ_2_SEQ_LM
target_modules = ["q_proj", "k_proj", "v_proj", "fc1", "fc2", "out_proj"]
elif any(name in args.model_name_or_path.lower() for name in ["deberta", "roberta", "bert"]):
model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, token=args.token)
task_type = TaskType.SEQ_CLS
target_modules = ["query_proj", "key_proj", "value_proj", "dense"] # embeddings not supported by peft
else:
raise NotImplementedError("Other models not supported yet.")
# Config of LoftQ
loftq_config = LoftQConfig(loftq_bits=args.bits, loftq_iter=args.iter)
lora_config = LoraConfig(
task_type=task_type,
inference_mode=True,
r=args.rank,
lora_alpha=16 if task_type is TaskType.CAUSAL_LM else args.rank,
lora_dropout=0.1,
target_modules=target_modules,
init_lora_weights="loftq",
loftq_config=loftq_config,
)
# Obtain LoftQ model
lora_model = get_peft_model(model, lora_config)
base_model = lora_model.get_base_model()
# Save LoftQ model
model_name = args.model_name_or_path.split("/")[-1] + f"-{args.bits}bit" + f"-{args.rank}rank"
base_model_dir = os.path.join(args.save_dir, model_name)
lora_model_dir = os.path.join(args.save_dir, model_name, "loft_init")
# save lora adapters first
lora_model.base_model.peft_config[
"default"
].base_model_name_or_path = base_model_dir # This can be a local path or Hub model id
lora_model.base_model.peft_config["default"].init_lora_weights = True # Don't apply LoftQ when loading again
lora_model.save_pretrained(lora_model_dir)
print_model(lora_model, "lora_model")
# remove lora adapters and save the backbone
unwrap_model(base_model)
base_model.save_pretrained(base_model_dir)
tokenizer.save_pretrained(base_model_dir)
print_model(base_model, "base_model")
return base_model_dir, lora_model_dir | null |
161,491 | import argparse
import copy
import logging
import math
import os
import random
import re
from pathlib import Path
import datasets
import torch
import transformers
from accelerate import Accelerator, DistributedType
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from datasets import load_dataset
from huggingface_hub import Repository, create_repo
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from transformers import (
CONFIG_MAPPING,
MODEL_MAPPING,
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
SchedulerType,
default_data_collator,
get_scheduler,
)
from transformers.utils import send_example_telemetry
from transformers.utils.versions import require_version
from peft import PeftModel
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv, txt or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv, txt or a json file containing the validation data."
)
parser.add_argument(
"--validation_split_percentage",
default=5,
help="The percentage of the train set used as validation set in case there's no validation split",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=False,
)
parser.add_argument(
"--config_name",
type=str,
default=None,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--model_type",
type=str,
default=None,
help="Model type to use if training from scratch.",
choices=MODEL_TYPES,
)
parser.add_argument(
"--ignore_pad_token_for_loss",
type=bool,
default=True,
help="Whether to ignore the tokens corresponding to padded labels in the loss computation or not.",
)
parser.add_argument(
"--max_source_length",
type=int,
default=128,
help=(
"The maximum total input sequence length after "
"tokenization.Sequences longer than this will be truncated, sequences shorter will be padded."
),
)
parser.add_argument(
"--max_target_length",
type=int,
default=128,
help=(
"The maximum total sequence length for target text after "
"tokenization. Sequences longer than this will be truncated, sequences shorter will be padded."
"during ``evaluate`` and ``predict``."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=None,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--trust_remote_code",
type=bool,
default=False,
help=(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
),
)
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to enable experiment trackers for logging.",
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.'
"Only applicable when `--with_tracking` is passed."
),
)
parser.add_argument(
"--low_cpu_mem_usage",
action="store_true",
help=(
"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded."
"If passed, LLM loading time and RAM consumption will be benefited."
),
)
##########################
# Generation Config #
##########################
parser.add_argument(
"--temperature",
type=float,
default=0.8,
help="temperature of 1.0 has no effect, lower tend toward greedy sampling",
)
parser.add_argument("--k", type=int, default=40, help="Choose k candidate words")
parser.add_argument("--p", type=float, default=0.95, help="The sum of probability of candidate words is 0.9 ")
##########################
# Exp Args #
##########################
parser.add_argument(
"--adapter_name_or_path",
type=str,
default=None,
help=(
"The LoRA adapter checkpoint. Set None if you want to fine-tune from LoftQ."
"Specify a path if you want to evaluate."
),
)
args = parser.parse_args()
# Sanity checks
if args.dataset_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file."
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
return args | null |
161,492 | import argparse
import copy
import logging
import math
import os
import random
import re
from pathlib import Path
import datasets
import torch
import transformers
from accelerate import Accelerator, DistributedType
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from datasets import load_dataset
from huggingface_hub import Repository, create_repo
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from transformers import (
CONFIG_MAPPING,
MODEL_MAPPING,
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
SchedulerType,
default_data_collator,
get_scheduler,
)
from transformers.utils import send_example_telemetry
from transformers.utils.versions import require_version
from peft import PeftModel
PATTERN_NUMBER = re.compile(r"-?\d+\.?\d*")
def extract_answer_number(sentence: str) -> float:
sentence = sentence.replace(",", "")
pred = PATTERN_NUMBER.findall(sentence)
if not pred:
return float("inf")
segment = sentence.split("The final answer is ")
if len(segment) > 1:
pred_answer = segment[1]
pred_answer = PATTERN_NUMBER.findall(pred_answer)
if len(pred_answer) > 0:
pred_answer = pred_answer[0]
else:
pred_answer = float(pred[-1])
else:
pred_answer = float(pred[-1])
if isinstance(pred_answer, str):
try:
pred_answer = float(pred_answer)
except ValueError:
pred_answer = float("inf")
return pred_answer | null |
161,493 | import argparse
import copy
import logging
import math
import os
import random
import re
from pathlib import Path
import datasets
import torch
import transformers
from accelerate import Accelerator, DistributedType
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from datasets import load_dataset
from huggingface_hub import Repository, create_repo
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from transformers import (
CONFIG_MAPPING,
MODEL_MAPPING,
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
SchedulerType,
default_data_collator,
get_scheduler,
)
from transformers.utils import send_example_telemetry
from transformers.utils.versions import require_version
from peft import PeftModel
def compute_accuracy(pred: list, gold: list):
acc = 0.0
for p, g in zip(pred, gold):
if p == g:
acc += 1
return acc / len(pred) | null |
161,494 | import torch
from accelerate import notebook_launcher
import peft
def init():
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(1, 2)
def forward(self, x):
return self.linear(x)
model = MyModule().to("cuda")
peft.get_peft_model(model, peft.LoraConfig(target_modules=["linear"])) | null |
161,495 | import contextlib
import copy
import io
import itertools
import json
import logging
import os
import warnings
import numpy as np
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
The provided code snippet includes necessary dependencies for implementing the `xyxy2xywh` function. Write a Python function `def xyxy2xywh(bbox)` to solve the following problem:
change bbox to coco format :param bbox: [x1, y1, x2, y2] :return: [x, y, w, h]
Here is the function:
def xyxy2xywh(bbox):
"""
change bbox to coco format
:param bbox: [x1, y1, x2, y2]
:return: [x, y, w, h]
"""
return [
bbox[0],
bbox[1],
bbox[2] - bbox[0],
bbox[3] - bbox[1],
] | change bbox to coco format :param bbox: [x1, y1, x2, y2] :return: [x, y, w, h] |
161,496 | import math
import random
from typing import Dict, Optional, Tuple
import cv2
import numpy as np
def get_flip_matrix(prob=0.5):
F = np.eye(3)
if random.random() < prob:
F[0, 0] = -1
return F
def get_perspective_matrix(perspective=0.0):
"""
:param perspective:
:return:
"""
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
return P
def get_rotation_matrix(degree=0.0):
"""
:param degree:
:return:
"""
R = np.eye(3)
a = random.uniform(-degree, degree)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=1)
return R
def get_scale_matrix(ratio=(1, 1)):
"""
:param ratio:
"""
Scl = np.eye(3)
scale = random.uniform(*ratio)
Scl[0, 0] *= scale
Scl[1, 1] *= scale
return Scl
def get_stretch_matrix(width_ratio=(1, 1), height_ratio=(1, 1)):
"""
:param width_ratio:
:param height_ratio:
"""
Str = np.eye(3)
Str[0, 0] *= random.uniform(*width_ratio)
Str[1, 1] *= random.uniform(*height_ratio)
return Str
def get_shear_matrix(degree):
"""
:param degree:
:return:
"""
Sh = np.eye(3)
Sh[0, 1] = math.tan(
random.uniform(-degree, degree) * math.pi / 180
) # x shear (deg)
Sh[1, 0] = math.tan(
random.uniform(-degree, degree) * math.pi / 180
) # y shear (deg)
return Sh
def get_translate_matrix(translate, width, height):
"""
:param translate:
:return:
"""
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation
return T
def get_resize_matrix(raw_shape, dst_shape, keep_ratio):
"""
Get resize matrix for resizing raw img to input size
:param raw_shape: (width, height) of raw image
:param dst_shape: (width, height) of input image
:param keep_ratio: whether keep original ratio
:return: 3x3 Matrix
"""
r_w, r_h = raw_shape
d_w, d_h = dst_shape
Rs = np.eye(3)
if keep_ratio:
C = np.eye(3)
C[0, 2] = -r_w / 2
C[1, 2] = -r_h / 2
if r_w / r_h < d_w / d_h:
ratio = d_h / r_h
else:
ratio = d_w / r_w
Rs[0, 0] *= ratio
Rs[1, 1] *= ratio
T = np.eye(3)
T[0, 2] = 0.5 * d_w
T[1, 2] = 0.5 * d_h
return T @ Rs @ C
else:
Rs[0, 0] *= d_w / r_w
Rs[1, 1] *= d_h / r_h
return Rs
def warp_boxes(boxes, M, width, height):
n = len(boxes)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = boxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
n * 4, 2
) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
return xy.astype(np.float32)
else:
return boxes
def warp_and_resize(
meta: Dict,
warp_kwargs: Dict,
dst_shape: Tuple[int, int],
keep_ratio: bool = True,
):
# TODO: background, type
raw_img = meta["img"]
height = raw_img.shape[0] # shape(h,w,c)
width = raw_img.shape[1]
# center
C = np.eye(3)
C[0, 2] = -width / 2
C[1, 2] = -height / 2
# do not change the order of mat mul
if "perspective" in warp_kwargs and random.randint(0, 1):
P = get_perspective_matrix(warp_kwargs["perspective"])
C = P @ C
if "scale" in warp_kwargs and random.randint(0, 1):
Scl = get_scale_matrix(warp_kwargs["scale"])
C = Scl @ C
if "stretch" in warp_kwargs and random.randint(0, 1):
Str = get_stretch_matrix(*warp_kwargs["stretch"])
C = Str @ C
if "rotation" in warp_kwargs and random.randint(0, 1):
R = get_rotation_matrix(warp_kwargs["rotation"])
C = R @ C
if "shear" in warp_kwargs and random.randint(0, 1):
Sh = get_shear_matrix(warp_kwargs["shear"])
C = Sh @ C
if "flip" in warp_kwargs:
F = get_flip_matrix(warp_kwargs["flip"])
C = F @ C
if "translate" in warp_kwargs and random.randint(0, 1):
T = get_translate_matrix(warp_kwargs["translate"], width, height)
else:
T = get_translate_matrix(0, width, height)
M = T @ C
# M = T @ Sh @ R @ Str @ P @ C
ResizeM = get_resize_matrix((width, height), dst_shape, keep_ratio)
M = ResizeM @ M
img = cv2.warpPerspective(raw_img, M, dsize=tuple(dst_shape))
meta["img"] = img
meta["warp_matrix"] = M
if "gt_bboxes" in meta:
boxes = meta["gt_bboxes"]
meta["gt_bboxes"] = warp_boxes(boxes, M, dst_shape[0], dst_shape[1])
if "gt_bboxes_ignore" in meta:
bboxes_ignore = meta["gt_bboxes_ignore"]
meta["gt_bboxes_ignore"] = warp_boxes(
bboxes_ignore, M, dst_shape[0], dst_shape[1]
)
if "gt_masks" in meta:
for i, mask in enumerate(meta["gt_masks"]):
meta["gt_masks"][i] = cv2.warpPerspective(mask, M, dsize=tuple(dst_shape))
# TODO: keypoints
# if 'gt_keypoints' in meta:
return meta | null |
161,497 | import math
import random
from typing import Dict, Optional, Tuple
import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `get_minimum_dst_shape` function. Write a Python function `def get_minimum_dst_shape( src_shape: Tuple[int, int], dst_shape: Tuple[int, int], divisible: Optional[int] = None, ) -> Tuple[int, int]` to solve the following problem:
Calculate minimum dst shape
Here is the function:
def get_minimum_dst_shape(
src_shape: Tuple[int, int],
dst_shape: Tuple[int, int],
divisible: Optional[int] = None,
) -> Tuple[int, int]:
"""Calculate minimum dst shape"""
src_w, src_h = src_shape
dst_w, dst_h = dst_shape
if src_w / src_h < dst_w / dst_h:
ratio = dst_h / src_h
else:
ratio = dst_w / src_w
dst_w = int(ratio * src_w)
dst_h = int(ratio * src_h)
if divisible and divisible > 0:
dst_w = max(divisible, int((dst_w + divisible - 1) // divisible * divisible))
dst_h = max(divisible, int((dst_h + divisible - 1) // divisible * divisible))
return dst_w, dst_h | Calculate minimum dst shape |
161,498 | import random
import cv2
import numpy as np
def normalize(meta, mean, std):
img = meta["img"].astype(np.float32)
mean = np.array(mean, dtype=np.float64).reshape(1, -1)
stdinv = 1 / np.array(std, dtype=np.float64).reshape(1, -1)
cv2.subtract(img, mean, img)
cv2.multiply(img, stdinv, img)
meta["img"] = img
return meta | null |
161,499 | import random
import cv2
import numpy as np
def random_brightness(img, delta):
img += random.uniform(-delta, delta)
return img
def random_contrast(img, alpha_low, alpha_up):
img *= random.uniform(alpha_low, alpha_up)
return img
def random_saturation(img, alpha_low, alpha_up):
hsv_img = cv2.cvtColor(img.astype(np.float32), cv2.COLOR_BGR2HSV)
hsv_img[..., 1] *= random.uniform(alpha_low, alpha_up)
img = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2BGR)
return img
def _normalize(img, mean, std):
mean = np.array(mean, dtype=np.float32).reshape(1, 1, 3) / 255
std = np.array(std, dtype=np.float32).reshape(1, 1, 3) / 255
img = (img - mean) / std
return img
def color_aug_and_norm(meta, kwargs):
img = meta["img"].astype(np.float32) / 255
if "brightness" in kwargs and random.randint(0, 1):
img = random_brightness(img, kwargs["brightness"])
if "contrast" in kwargs and random.randint(0, 1):
img = random_contrast(img, *kwargs["contrast"])
if "saturation" in kwargs and random.randint(0, 1):
img = random_saturation(img, *kwargs["saturation"])
# cv2.imshow('trans', img)
# cv2.waitKey(0)
img = _normalize(img, *kwargs["normalize"])
meta["img"] = img
return meta | null |
161,500 | import collections
import re
import torch
from torch._six import string_classes
np_str_obj_array_pattern = re.compile(r"[SaUO]")
default_collate_err_msg_format = (
"default_collate: batch must contain tensors, numpy arrays, numbers, "
"dicts or lists; found {}"
)
The provided code snippet includes necessary dependencies for implementing the `collate_function` function. Write a Python function `def collate_function(batch)` to solve the following problem:
r"""Puts each data field into a tensor with outer dimension batch size
Here is the function:
def collate_function(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
elif (
elem_type.__module__ == "numpy"
and elem_type.__name__ != "str_"
and elem_type.__name__ != "string_"
):
elem = batch[0]
if elem_type.__name__ == "ndarray":
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return batch
elif elem.shape == (): # scalars
return batch
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
return {key: collate_function([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, "_fields"): # namedtuple
return elem_type(*(collate_function(samples) for samples in zip(*batch)))
elif isinstance(elem, collections.abc.Sequence):
transposed = zip(*batch)
return [collate_function(samples) for samples in transposed]
raise TypeError(default_collate_err_msg_format.format(elem_type)) | r"""Puts each data field into a tensor with outer dimension batch size |
161,501 | import collections
import re
import torch
from torch._six import string_classes
The provided code snippet includes necessary dependencies for implementing the `naive_collate` function. Write a Python function `def naive_collate(batch)` to solve the following problem:
Only collate dict value in to a list. E.g. meta data dict and img_info dict will be collated.
Here is the function:
def naive_collate(batch):
"""Only collate dict value in to a list. E.g. meta data dict and img_info
dict will be collated."""
elem = batch[0]
if isinstance(elem, dict):
return {key: naive_collate([d[key] for d in batch]) for key in elem}
else:
return batch | Only collate dict value in to a list. E.g. meta data dict and img_info dict will be collated. |
161,502 | from typing import Sequence
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `stack_batch_img` function. Write a Python function `def stack_batch_img( img_tensors: Sequence[torch.Tensor], divisible: int = 0, pad_value: float = 0.0 ) -> torch.Tensor` to solve the following problem:
Args: img_tensors (Sequence[torch.Tensor]): divisible (int): pad_value (float): value to pad Returns: torch.Tensor.
Here is the function:
def stack_batch_img(
img_tensors: Sequence[torch.Tensor], divisible: int = 0, pad_value: float = 0.0
) -> torch.Tensor:
"""
Args:
img_tensors (Sequence[torch.Tensor]):
divisible (int):
pad_value (float): value to pad
Returns:
torch.Tensor.
"""
assert len(img_tensors) > 0
assert isinstance(img_tensors, (tuple, list))
assert divisible >= 0
img_heights = []
img_widths = []
for img in img_tensors:
assert img.shape[:-2] == img_tensors[0].shape[:-2]
img_heights.append(img.shape[-2])
img_widths.append(img.shape[-1])
max_h, max_w = max(img_heights), max(img_widths)
if divisible > 0:
max_h = (max_h + divisible - 1) // divisible * divisible
max_w = (max_w + divisible - 1) // divisible * divisible
batch_imgs = []
for img in img_tensors:
padding_size = [0, max_w - img.shape[-1], 0, max_h - img.shape[-2]]
batch_imgs.append(F.pad(img, padding_size, value=pad_value))
return torch.stack(batch_imgs, dim=0).contiguous() | Args: img_tensors (Sequence[torch.Tensor]): divisible (int): pad_value (float): value to pad Returns: torch.Tensor. |
161,503 | import logging
import os
import time
import xml.etree.ElementTree as ET
from collections import defaultdict
from pycocotools.coco import COCO
from .coco import CocoDataset
def get_file_list(path, type=".xml"):
file_names = []
for maindir, subdir, file_name_list in os.walk(path):
for filename in file_name_list:
apath = os.path.join(maindir, filename)
ext = os.path.splitext(apath)[1]
if ext == type:
file_names.append(filename)
return file_names | null |
161,504 | import copy
import logging
import torch
from torch.nn import GroupNorm, LayerNorm
from torch.nn.modules.batchnorm import _BatchNorm
NORMS = (GroupNorm, LayerNorm, _BatchNorm)
The provided code snippet includes necessary dependencies for implementing the `build_optimizer` function. Write a Python function `def build_optimizer(model, config)` to solve the following problem:
Build optimizer from config. Supports customised parameter-level hyperparameters. The config should be like: >>> optimizer: >>> name: AdamW >>> lr: 0.001 >>> weight_decay: 0.05 >>> no_norm_decay: True >>> param_level_cfg: # parameter-level config >>> backbone: >>> lr_mult: 0.1
Here is the function:
def build_optimizer(model, config):
"""Build optimizer from config.
Supports customised parameter-level hyperparameters.
The config should be like:
>>> optimizer:
>>> name: AdamW
>>> lr: 0.001
>>> weight_decay: 0.05
>>> no_norm_decay: True
>>> param_level_cfg: # parameter-level config
>>> backbone:
>>> lr_mult: 0.1
"""
config = copy.deepcopy(config)
param_dict = {}
no_norm_decay = config.pop("no_norm_decay", False)
no_bias_decay = config.pop("no_bias_decay", False)
param_level_cfg = config.pop("param_level_cfg", {})
base_lr = config.get("lr", None)
base_wd = config.get("weight_decay", None)
name = config.pop("name")
optim_cls = getattr(torch.optim, name)
logger = logging.getLogger("NanoDet")
# custom param-wise lr and weight_decay
for name, p in model.named_parameters():
if not p.requires_grad:
continue
param_dict[p] = {"name": name}
for key in param_level_cfg:
if key in name:
if "lr_mult" in param_level_cfg[key] and base_lr:
param_dict[p].update(
{"lr": base_lr * param_level_cfg[key]["lr_mult"]}
)
if "decay_mult" in param_level_cfg[key] and base_wd:
param_dict[p].update(
{"weight_decay": base_wd * param_level_cfg[key]["decay_mult"]}
)
break
if no_norm_decay:
# update norms decay
for name, m in model.named_modules():
if isinstance(m, NORMS):
param_dict[m.bias].update({"weight_decay": 0})
param_dict[m.weight].update({"weight_decay": 0})
if no_bias_decay:
# update bias decay
for name, m in model.named_modules():
if hasattr(m, "bias"):
param_dict[m.bias].update({"weight_decay": 0})
# convert param dict to optimizer's param groups
param_groups = []
for p, pconfig in param_dict.items():
name = pconfig.pop("name", None)
if "weight_decay" in pconfig or "lr" in pconfig:
logger.info(f"special optimizer hyperparameter: {name} - {pconfig}")
param_groups += [{"params": p, **pconfig}]
optimizer = optim_cls(param_groups, **config)
return optimizer | Build optimizer from config. Supports customised parameter-level hyperparameters. The config should be like: >>> optimizer: >>> name: AdamW >>> lr: 0.001 >>> weight_decay: 0.05 >>> no_norm_decay: True >>> param_level_cfg: # parameter-level config >>> backbone: >>> lr_mult: 0.1 |
161,505 | import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from ..module.activation import act_layers
def channel_shuffle(x, groups):
# type: (torch.Tensor, int) -> torch.Tensor
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x | null |
161,506 | import math
import torch
import torch.functional as F
import torch.utils.model_zoo as model_zoo
from torch import nn
from ..module.activation import act_layers
The provided code snippet includes necessary dependencies for implementing the `round_filters` function. Write a Python function `def round_filters(filters, multiplier, divisor=8, min_width=None)` to solve the following problem:
Calculate and round number of filters based on width multiplier.
Here is the function:
def round_filters(filters, multiplier, divisor=8, min_width=None):
"""Calculate and round number of filters based on width multiplier."""
if not multiplier:
return filters
filters *= multiplier
min_width = min_width or divisor
new_filters = max(min_width, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters) | Calculate and round number of filters based on width multiplier. |
161,507 | import math
import torch
import torch.functional as F
import torch.utils.model_zoo as model_zoo
from torch import nn
from ..module.activation import act_layers
The provided code snippet includes necessary dependencies for implementing the `round_repeats` function. Write a Python function `def round_repeats(repeats, multiplier)` to solve the following problem:
Round number of filters based on depth multiplier.
Here is the function:
def round_repeats(repeats, multiplier):
"""Round number of filters based on depth multiplier."""
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats)) | Round number of filters based on depth multiplier. |
161,508 | import math
import torch
import torch.functional as F
import torch.utils.model_zoo as model_zoo
from torch import nn
from ..module.activation import act_layers
def drop_connect(x, drop_connect_rate, training):
if not training:
return x
keep_prob = 1.0 - drop_connect_rate
batch_size = x.shape[0]
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=x.dtype, device=x.device)
binary_mask = torch.floor(random_tensor)
x = (x / keep_prob) * binary_mask
return x | null |
161,509 | import numpy as np
import torch
import torch.nn as nn
from nanodet.model.module.conv import RepVGGConvModule
def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
result = nn.Sequential()
result.add_module(
"conv",
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False,
),
)
result.add_module("bn", nn.BatchNorm2d(num_features=out_channels))
return result | null |
161,510 | import numpy as np
import torch
import torch.nn as nn
from nanodet.model.module.conv import RepVGGConvModule
The provided code snippet includes necessary dependencies for implementing the `repvgg_model_convert` function. Write a Python function `def repvgg_model_convert(model, deploy_model, save_path=None)` to solve the following problem:
Examples: >>> train_model = RepVGG(arch='A0', deploy=False) >>> deploy_model = RepVGG(arch='A0', deploy=True) >>> deploy_model = repvgg_model_convert( >>> train_model, deploy_model, save_path='repvgg_deploy.pth')
Here is the function:
def repvgg_model_convert(model, deploy_model, save_path=None):
"""
Examples:
>>> train_model = RepVGG(arch='A0', deploy=False)
>>> deploy_model = RepVGG(arch='A0', deploy=True)
>>> deploy_model = repvgg_model_convert(
>>> train_model, deploy_model, save_path='repvgg_deploy.pth')
"""
converted_weights = {}
for name, module in model.named_modules():
if hasattr(module, "repvgg_convert"):
kernel, bias = module.repvgg_convert()
converted_weights[name + ".rbr_reparam.weight"] = kernel
converted_weights[name + ".rbr_reparam.bias"] = bias
elif isinstance(module, torch.nn.Linear):
converted_weights[name + ".weight"] = module.weight.detach().cpu().numpy()
converted_weights[name + ".bias"] = module.bias.detach().cpu().numpy()
del model
for name, param in deploy_model.named_parameters():
print("deploy param: ", name, param.size(), np.mean(converted_weights[name]))
param.data = torch.from_numpy(converted_weights[name]).float()
if save_path is not None:
torch.save(deploy_model.state_dict(), save_path)
return deploy_model | Examples: >>> train_model = RepVGG(arch='A0', deploy=False) >>> deploy_model = RepVGG(arch='A0', deploy=True) >>> deploy_model = repvgg_model_convert( >>> train_model, deploy_model, save_path='repvgg_deploy.pth') |
161,511 | import numpy as np
import torch
import torch.nn as nn
from nanodet.model.module.conv import RepVGGConvModule
def repvgg_det_model_convert(model, deploy_model):
converted_weights = {}
deploy_model.load_state_dict(model.state_dict(), strict=False)
for name, module in model.backbone.named_modules():
if hasattr(module, "repvgg_convert"):
kernel, bias = module.repvgg_convert()
converted_weights[name + ".rbr_reparam.weight"] = kernel
converted_weights[name + ".rbr_reparam.bias"] = bias
elif isinstance(module, torch.nn.Linear):
converted_weights[name + ".weight"] = module.weight.detach().cpu().numpy()
converted_weights[name + ".bias"] = module.bias.detach().cpu().numpy()
del model
for name, param in deploy_model.backbone.named_parameters():
print("deploy param: ", name, param.size(), np.mean(converted_weights[name]))
param.data = torch.from_numpy(converted_weights[name]).float()
return deploy_model | null |
161,512 | import logging
import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..module.activation import act_layers
def get_url(width_mult=1.0):
if width_mult == 1.0:
return "https://raw.githubusercontent.com/huawei-noah/CV-Backbones/master/ghostnet_pytorch/models/state_dict_73.98.pth" # noqa E501
else:
logging.info("GhostNet only has 1.0 pretrain model. ")
return None | null |
161,513 | import logging
import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..module.activation import act_layers
The provided code snippet includes necessary dependencies for implementing the `_make_divisible` function. Write a Python function `def _make_divisible(v, divisor, min_value=None)` to solve the following problem:
This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
Here is the function:
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v | This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py |
161,514 | import logging
import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..module.activation import act_layers
def hard_sigmoid(x, inplace: bool = False):
if inplace:
return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
else:
return F.relu6(x + 3.0) / 6.0 | null |
161,515 | from __future__ import absolute_import, division, print_function
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from ..module.activation import act_layers
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
) | 3x3 convolution with padding |
161,516 | from __future__ import absolute_import, division, print_function
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from ..module.activation import act_layers
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0) | null |
161,517 | import torch.nn as nn
def kaiming_init(
module, a=0, mode="fan_out", nonlinearity="relu", bias=0, distribution="normal"
):
assert distribution in ["uniform", "normal"]
if distribution == "uniform":
nn.init.kaiming_uniform_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity
)
else:
nn.init.kaiming_normal_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity
)
if hasattr(module, "bias") and module.bias is not None:
nn.init.constant_(module.bias, bias) | null |
161,518 | import torch.nn as nn
def xavier_init(module, gain=1, bias=0, distribution="normal"):
assert distribution in ["uniform", "normal"]
if distribution == "uniform":
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init.xavier_normal_(module.weight, gain=gain)
if hasattr(module, "bias") and module.bias is not None:
nn.init.constant_(module.bias, bias) | null |
161,519 | import torch.nn as nn
def normal_init(module, mean=0, std=1, bias=0):
nn.init.normal_(module.weight, mean, std)
if hasattr(module, "bias") and module.bias is not None:
nn.init.constant_(module.bias, bias) | null |
161,520 | import torch.nn as nn
def constant_init(module, val, bias=0):
if hasattr(module, "weight") and module.weight is not None:
nn.init.constant_(module.weight, val)
if hasattr(module, "bias") and module.bias is not None:
nn.init.constant_(module.bias, bias) | null |
161,521 | import torch.nn as nn
norm_cfg = {
# format: layer_type: (abbreviation, module)
"BN": ("bn", nn.BatchNorm2d),
"SyncBN": ("bn", nn.SyncBatchNorm),
"GN": ("gn", nn.GroupNorm),
# and potentially 'SN'
}
The provided code snippet includes necessary dependencies for implementing the `build_norm_layer` function. Write a Python function `def build_norm_layer(cfg, num_features, postfix="")` to solve the following problem:
Build normalization layer Args: cfg (dict): cfg should contain: type (str): identify norm layer type. layer args: args needed to instantiate a norm layer. requires_grad (bool): [optional] whether stop gradient updates num_features (int): number of channels from input. postfix (int, str): appended into norm abbreviation to create named layer. Returns: name (str): abbreviation + postfix layer (nn.Module): created norm layer
Here is the function:
def build_norm_layer(cfg, num_features, postfix=""):
"""Build normalization layer
Args:
cfg (dict): cfg should contain:
type (str): identify norm layer type.
layer args: args needed to instantiate a norm layer.
requires_grad (bool): [optional] whether stop gradient updates
num_features (int): number of channels from input.
postfix (int, str): appended into norm abbreviation to
create named layer.
Returns:
name (str): abbreviation + postfix
layer (nn.Module): created norm layer
"""
assert isinstance(cfg, dict) and "type" in cfg
cfg_ = cfg.copy()
layer_type = cfg_.pop("type")
if layer_type not in norm_cfg:
raise KeyError("Unrecognized norm type {}".format(layer_type))
else:
abbr, norm_layer = norm_cfg[layer_type]
if norm_layer is None:
raise NotImplementedError
assert isinstance(postfix, (int, str))
name = abbr + str(postfix)
requires_grad = cfg_.pop("requires_grad", True)
cfg_.setdefault("eps", 1e-5)
if layer_type != "GN":
layer = norm_layer(num_features, **cfg_)
if layer_type == "SyncBN" and hasattr(layer, "_specify_ddp_gpu_num"):
layer._specify_ddp_gpu_num(1)
else:
assert "num_groups" in cfg_
layer = norm_layer(num_channels=num_features, **cfg_)
for param in layer.parameters():
param.requires_grad = requires_grad
return name, layer | Build normalization layer Args: cfg (dict): cfg should contain: type (str): identify norm layer type. layer args: args needed to instantiate a norm layer. requires_grad (bool): [optional] whether stop gradient updates num_features (int): number of channels from input. postfix (int, str): appended into norm abbreviation to create named layer. Returns: name (str): abbreviation + postfix layer (nn.Module): created norm layer |
161,522 | import torch
from torchvision.ops import nms
def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False):
"""Performs non-maximum suppression in a batched fashion.
Modified from https://github.com/pytorch/vision/blob
/505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39.
In order to perform NMS independently per class, we add an offset to all
the boxes. The offset is dependent only on the class idx, and is large
enough so that boxes from different classes do not overlap.
Arguments:
boxes (torch.Tensor): boxes in shape (N, 4).
scores (torch.Tensor): scores in shape (N, ).
idxs (torch.Tensor): each index value correspond to a bbox cluster,
and NMS will not be applied between elements of different idxs,
shape (N, ).
nms_cfg (dict): specify nms type and other parameters like iou_thr.
Possible keys includes the following.
- iou_thr (float): IoU threshold used for NMS.
- split_thr (float): threshold number of boxes. In some cases the
number of boxes is large (e.g., 200k). To avoid OOM during
training, the users could set `split_thr` to a small value.
If the number of boxes is greater than the threshold, it will
perform NMS on each group of boxes separately and sequentially.
Defaults to 10000.
class_agnostic (bool): if true, nms is class agnostic,
i.e. IoU thresholding happens over all boxes,
regardless of the predicted class.
Returns:
tuple: kept dets and indice.
"""
nms_cfg_ = nms_cfg.copy()
class_agnostic = nms_cfg_.pop("class_agnostic", class_agnostic)
if class_agnostic:
boxes_for_nms = boxes
else:
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
nms_cfg_.pop("type", "nms")
split_thr = nms_cfg_.pop("split_thr", 10000)
if len(boxes_for_nms) < split_thr:
keep = nms(boxes_for_nms, scores, **nms_cfg_)
boxes = boxes[keep]
scores = scores[keep]
else:
total_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
for id in torch.unique(idxs):
mask = (idxs == id).nonzero(as_tuple=False).view(-1)
keep = nms(boxes_for_nms[mask], scores[mask], **nms_cfg_)
total_mask[mask[keep]] = True
keep = total_mask.nonzero(as_tuple=False).view(-1)
keep = keep[scores[keep].argsort(descending=True)]
boxes = boxes[keep]
scores = scores[keep]
return torch.cat([boxes, scores[:, None]], -1), keep
The provided code snippet includes necessary dependencies for implementing the `multiclass_nms` function. Write a Python function `def multiclass_nms( multi_bboxes, multi_scores, score_thr, nms_cfg, max_num=-1, score_factors=None )` to solve the following problem:
NMS for multi-class bboxes. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class), where the last column contains scores of the background class, but this will be ignored. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. nms_thr (float): NMS IoU threshold max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. score_factors (Tensor): The factors multiplied to scores before applying NMS Returns: tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels \ are 0-based.
Here is the function:
def multiclass_nms(
multi_bboxes, multi_scores, score_thr, nms_cfg, max_num=-1, score_factors=None
):
"""NMS for multi-class bboxes.
Args:
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
multi_scores (Tensor): shape (n, #class), where the last column
contains scores of the background class, but this will be ignored.
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
nms_thr (float): NMS IoU threshold
max_num (int): if there are more than max_num bboxes after NMS,
only top max_num will be kept.
score_factors (Tensor): The factors multiplied to scores before
applying NMS
Returns:
tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels \
are 0-based.
"""
num_classes = multi_scores.size(1) - 1
# exclude background category
if multi_bboxes.shape[1] > 4:
bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)
else:
bboxes = multi_bboxes[:, None].expand(multi_scores.size(0), num_classes, 4)
scores = multi_scores[:, :-1]
# filter out boxes with low scores
valid_mask = scores > score_thr
# We use masked_select for ONNX exporting purpose,
# which is equivalent to bboxes = bboxes[valid_mask]
# we have to use this ugly code
bboxes = torch.masked_select(
bboxes, torch.stack((valid_mask, valid_mask, valid_mask, valid_mask), -1)
).view(-1, 4)
if score_factors is not None:
scores = scores * score_factors[:, None]
scores = torch.masked_select(scores, valid_mask)
labels = valid_mask.nonzero(as_tuple=False)[:, 1]
if bboxes.numel() == 0:
bboxes = multi_bboxes.new_zeros((0, 5))
labels = multi_bboxes.new_zeros((0,), dtype=torch.long)
if torch.onnx.is_in_onnx_export():
raise RuntimeError(
"[ONNX Error] Can not record NMS "
"as it has not been executed this time"
)
return bboxes, labels
dets, keep = batched_nms(bboxes, scores, labels, nms_cfg)
if max_num > 0:
dets = dets[:max_num]
keep = keep[:max_num]
return dets, labels[keep] | NMS for multi-class bboxes. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class), where the last column contains scores of the background class, but this will be ignored. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. nms_thr (float): NMS IoU threshold max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. score_factors (Tensor): The factors multiplied to scores before applying NMS Returns: tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels \ are 0-based. |
161,523 | import torch.nn as nn
activations = {
"ReLU": nn.ReLU,
"LeakyReLU": nn.LeakyReLU,
"ReLU6": nn.ReLU6,
"SELU": nn.SELU,
"ELU": nn.ELU,
"GELU": nn.GELU,
"PReLU": nn.PReLU,
"SiLU": nn.SiLU,
"HardSwish": nn.Hardswish,
"Hardswish": nn.Hardswish,
None: nn.Identity,
}
def act_layers(name):
assert name in activations.keys()
if name == "LeakyReLU":
return nn.LeakyReLU(negative_slope=0.1, inplace=True)
elif name == "GELU":
return nn.GELU()
elif name == "PReLU":
return nn.PReLU()
else:
return activations[name](inplace=True) | null |
161,524 | import math
import cv2
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from nanodet.util import (
bbox2distance,
distance2bbox,
images_to_levels,
multi_apply,
overlay_bbox_cv,
)
from ...data.transform.warp import warp_boxes
from ..loss.gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from ..loss.iou_loss import GIoULoss, bbox_overlaps
from ..module.conv import ConvModule
from ..module.init_weights import normal_init
from ..module.nms import multiclass_nms
from ..module.scale import Scale
from .assigner.atss_assigner import ATSSAssigner
def reduce_mean(tensor):
if not (dist.is_available() and dist.is_initialized()):
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor.true_divide(dist.get_world_size()), op=dist.ReduceOp.SUM)
return tensor | null |
161,525 | import functools
import torch.nn.functional as F
def weight_reduce_loss(loss, weight=None, reduction="mean", avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == "mean":
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != "none":
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
The provided code snippet includes necessary dependencies for implementing the `weighted_loss` function. Write a Python function `def weighted_loss(loss_func)` to solve the following problem:
Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000)
Here is the function:
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction="mean", avg_factor=None, **kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper | Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) |
161,526 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import weighted_loss
The provided code snippet includes necessary dependencies for implementing the `quality_focal_loss` function. Write a Python function `def quality_focal_loss(pred, target, beta=2.0)` to solve the following problem:
r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. Returns: torch.Tensor: Loss tensor with shape (N,).
Here is the function:
def quality_focal_loss(pred, target, beta=2.0):
r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted joint representation of classification
and quality (IoU) estimation with shape (N, C), C is the number of
classes.
target (tuple([torch.Tensor])): Target category label with shape (N,)
and target quality label with shape (N,).
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert (
len(target) == 2
), """target for QFL must be a tuple of two elements,
including category label and quality label, respectively"""
# label denotes the category id, score denotes the quality score
label, score = target
# negatives are supervised by 0 quality score
pred_sigmoid = pred.sigmoid()
scale_factor = pred_sigmoid
zerolabel = scale_factor.new_zeros(pred.shape)
loss = F.binary_cross_entropy_with_logits(
pred, zerolabel, reduction="none"
) * scale_factor.pow(beta)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = pred.size(1)
pos = torch.nonzero((label >= 0) & (label < bg_class_ind), as_tuple=False).squeeze(
1
)
pos_label = label[pos].long()
# positives are supervised by bbox quality (IoU) score
scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
pred[pos, pos_label], score[pos], reduction="none"
) * scale_factor.abs().pow(beta)
loss = loss.sum(dim=1, keepdim=False)
return loss | r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. Returns: torch.Tensor: Loss tensor with shape (N,). |
161,527 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import weighted_loss
The provided code snippet includes necessary dependencies for implementing the `distribution_focal_loss` function. Write a Python function `def distribution_focal_loss(pred, label)` to solve the following problem:
r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (torch.Tensor): Predicted general distribution of bounding boxes (before softmax) with shape (N, n+1), n is the max value of the integral set `{0, ..., n}` in paper. label (torch.Tensor): Target distance label for bounding boxes with shape (N,). Returns: torch.Tensor: Loss tensor with shape (N,).
Here is the function:
def distribution_focal_loss(pred, label):
r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted general distribution of bounding boxes
(before softmax) with shape (N, n+1), n is the max value of the
integral set `{0, ..., n}` in paper.
label (torch.Tensor): Target distance label for bounding boxes with
shape (N,).
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
dis_left = label.long()
dis_right = dis_left + 1
weight_left = dis_right.float() - label
weight_right = label - dis_left.float()
loss = (
F.cross_entropy(pred, dis_left, reduction="none") * weight_left
+ F.cross_entropy(pred, dis_right, reduction="none") * weight_right
)
return loss | r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (torch.Tensor): Predicted general distribution of bounding boxes (before softmax) with shape (N, n+1), n is the max value of the integral set `{0, ..., n}` in paper. label (torch.Tensor): Target distance label for bounding boxes with shape (N,). Returns: torch.Tensor: Loss tensor with shape (N,). |
161,528 | import math
import torch
import torch.nn as nn
from .utils import weighted_loss
def bbox_overlaps(bboxes1, bboxes2, mode="iou", is_aligned=False, eps=1e-6):
"""Calculate overlap between two set of bboxes.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or "iof" (intersection over
foreground).
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> bbox_overlaps(bboxes1, bboxes2)
tensor([[0.5000, 0.0000, 0.0000],
[0.0000, 0.0000, 1.0000],
[0.0000, 0.0000, 0.0000]])
>>> bbox_overlaps(bboxes1, bboxes2, mode='giou', eps=1e-7)
tensor([[0.5000, 0.0000, -0.5000],
[-0.2500, -0.0500, 1.0000],
[-0.8371, -0.8766, -0.8214]])
Example:
>>> empty = torch.FloatTensor([])
>>> nonempty = torch.FloatTensor([
>>> [0, 0, 10, 9],
>>> ])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ["iou", "iof", "giou"], f"Unsupported mode {mode}"
# Either the boxes are empty or the length of boxes's last dimenstion is 4
assert bboxes1.size(-1) == 4 or bboxes1.size(0) == 0
assert bboxes2.size(-1) == 4 or bboxes2.size(0) == 0
# Batch dim must be the same
# Batch dim: (B1, B2, ... Bn)
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows,))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
wh = (rb - lt).clamp(min=0) # [B, rows, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ["iou", "giou"]:
union = area1 + area2 - overlap
else:
union = area1
if mode == "giou":
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(
bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]
) # [B, rows, cols, 2]
rb = torch.min(
bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]
) # [B, rows, cols, 2]
wh = (rb - lt).clamp(min=0) # [B, rows, cols, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ["iou", "giou"]:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == "giou":
enclosed_lt = torch.min(
bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]
)
enclosed_rb = torch.max(
bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]
)
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ["iou", "iof"]:
return ious
# calculate gious
enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
The provided code snippet includes necessary dependencies for implementing the `iou_loss` function. Write a Python function `def iou_loss(pred, target, eps=1e-6)` to solve the following problem:
IoU loss. Computing the IoU loss between a set of predicted bboxes and target bboxes. The loss is calculated as negative log of IoU. Args: pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: torch.Tensor: Loss tensor.
Here is the function:
def iou_loss(pred, target, eps=1e-6):
"""IoU loss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
The loss is calculated as negative log of IoU.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
torch.Tensor: Loss tensor.
"""
ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
loss = -ious.log()
return loss | IoU loss. Computing the IoU loss between a set of predicted bboxes and target bboxes. The loss is calculated as negative log of IoU. Args: pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: torch.Tensor: Loss tensor. |
161,529 | import math
import torch
import torch.nn as nn
from .utils import weighted_loss
The provided code snippet includes necessary dependencies for implementing the `bounded_iou_loss` function. Write a Python function `def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3)` to solve the following problem:
BIoULoss. This is an implementation of paper `Improving Object Localization with Fitness NMS and Bounded IoU Loss. <https://arxiv.org/abs/1711.00164>`_. Args: pred (torch.Tensor): Predicted bboxes. target (torch.Tensor): Target bboxes. beta (float): beta parameter in smoothl1. eps (float): eps to avoid NaN.
Here is the function:
def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3):
"""BIoULoss.
This is an implementation of paper
`Improving Object Localization with Fitness NMS and Bounded IoU Loss.
<https://arxiv.org/abs/1711.00164>`_.
Args:
pred (torch.Tensor): Predicted bboxes.
target (torch.Tensor): Target bboxes.
beta (float): beta parameter in smoothl1.
eps (float): eps to avoid NaN.
"""
pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
pred_w = pred[:, 2] - pred[:, 0]
pred_h = pred[:, 3] - pred[:, 1]
with torch.no_grad():
target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
target_ctry = (target[:, 1] + target[:, 3]) * 0.5
target_w = target[:, 2] - target[:, 0]
target_h = target[:, 3] - target[:, 1]
dx = target_ctrx - pred_ctrx
dy = target_ctry - pred_ctry
loss_dx = 1 - torch.max(
(target_w - 2 * dx.abs()) / (target_w + 2 * dx.abs() + eps),
torch.zeros_like(dx),
)
loss_dy = 1 - torch.max(
(target_h - 2 * dy.abs()) / (target_h + 2 * dy.abs() + eps),
torch.zeros_like(dy),
)
loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w / (target_w + eps))
loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h / (target_h + eps))
loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], dim=-1).view(
loss_dx.size(0), -1
)
loss = torch.where(
loss_comb < beta, 0.5 * loss_comb * loss_comb / beta, loss_comb - 0.5 * beta
).sum(dim=-1)
return loss | BIoULoss. This is an implementation of paper `Improving Object Localization with Fitness NMS and Bounded IoU Loss. <https://arxiv.org/abs/1711.00164>`_. Args: pred (torch.Tensor): Predicted bboxes. target (torch.Tensor): Target bboxes. beta (float): beta parameter in smoothl1. eps (float): eps to avoid NaN. |
161,530 | import math
import torch
import torch.nn as nn
from .utils import weighted_loss
def bbox_overlaps(bboxes1, bboxes2, mode="iou", is_aligned=False, eps=1e-6):
"""Calculate overlap between two set of bboxes.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or "iof" (intersection over
foreground).
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> bbox_overlaps(bboxes1, bboxes2)
tensor([[0.5000, 0.0000, 0.0000],
[0.0000, 0.0000, 1.0000],
[0.0000, 0.0000, 0.0000]])
>>> bbox_overlaps(bboxes1, bboxes2, mode='giou', eps=1e-7)
tensor([[0.5000, 0.0000, -0.5000],
[-0.2500, -0.0500, 1.0000],
[-0.8371, -0.8766, -0.8214]])
Example:
>>> empty = torch.FloatTensor([])
>>> nonempty = torch.FloatTensor([
>>> [0, 0, 10, 9],
>>> ])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ["iou", "iof", "giou"], f"Unsupported mode {mode}"
# Either the boxes are empty or the length of boxes's last dimenstion is 4
assert bboxes1.size(-1) == 4 or bboxes1.size(0) == 0
assert bboxes2.size(-1) == 4 or bboxes2.size(0) == 0
# Batch dim must be the same
# Batch dim: (B1, B2, ... Bn)
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows,))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
wh = (rb - lt).clamp(min=0) # [B, rows, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ["iou", "giou"]:
union = area1 + area2 - overlap
else:
union = area1
if mode == "giou":
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(
bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]
) # [B, rows, cols, 2]
rb = torch.min(
bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]
) # [B, rows, cols, 2]
wh = (rb - lt).clamp(min=0) # [B, rows, cols, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ["iou", "giou"]:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == "giou":
enclosed_lt = torch.min(
bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]
)
enclosed_rb = torch.max(
bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]
)
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ["iou", "iof"]:
return ious
# calculate gious
enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
The provided code snippet includes necessary dependencies for implementing the `giou_loss` function. Write a Python function `def giou_loss(pred, target, eps=1e-7)` to solve the following problem:
r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression <https://arxiv.org/abs/1902.09630>`_. Args: pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor.
Here is the function:
def giou_loss(pred, target, eps=1e-7):
r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
Box Regression <https://arxiv.org/abs/1902.09630>`_.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
gious = bbox_overlaps(pred, target, mode="giou", is_aligned=True, eps=eps)
loss = 1 - gious
return loss | r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression <https://arxiv.org/abs/1902.09630>`_. Args: pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. |
161,531 | import math
import torch
import torch.nn as nn
from .utils import weighted_loss
The provided code snippet includes necessary dependencies for implementing the `diou_loss` function. Write a Python function `def diou_loss(pred, target, eps=1e-7)` to solve the following problem:
r"""`Implementation of Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_. Code is modified from https://github.com/Zzh-tju/DIoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor.
Here is the function:
def diou_loss(pred, target, eps=1e-7):
r"""`Implementation of Distance-IoU Loss: Faster and Better
Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_.
Code is modified from https://github.com/Zzh-tju/DIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw**2 + ch**2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4
right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
rho2 = left + right
# DIoU
dious = ious - rho2 / c2
loss = 1 - dious
return loss | r"""`Implementation of Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_. Code is modified from https://github.com/Zzh-tju/DIoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. |
161,532 | import math
import torch
import torch.nn as nn
from .utils import weighted_loss
The provided code snippet includes necessary dependencies for implementing the `ciou_loss` function. Write a Python function `def ciou_loss(pred, target, eps=1e-7)` to solve the following problem:
r"""`Implementation of paper `Enhancing Geometric Factors into Model Learning and Inference for Object Detection and Instance Segmentation <https://arxiv.org/abs/2005.03572>`_. Code is modified from https://github.com/Zzh-tju/CIoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor.
Here is the function:
def ciou_loss(pred, target, eps=1e-7):
r"""`Implementation of paper `Enhancing Geometric Factors into
Model Learning and Inference for Object Detection and Instance
Segmentation <https://arxiv.org/abs/2005.03572>`_.
Code is modified from https://github.com/Zzh-tju/CIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw**2 + ch**2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4
right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
rho2 = left + right
factor = 4 / math.pi**2
v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
# CIoU
cious = ious - (rho2 / c2 + v**2 / (1 - ious + v))
loss = 1 - cious
return loss | r"""`Implementation of paper `Enhancing Geometric Factors into Model Learning and Inference for Object Detection and Instance Segmentation <https://arxiv.org/abs/2005.03572>`_. Code is modified from https://github.com/Zzh-tju/CIoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. |
161,533 | from collections import OrderedDict
from typing import Any, Dict
import pytorch_lightning as pl
import torch
from .rank_filter import rank_filter
def load_model_weight(model, checkpoint, logger):
state_dict = checkpoint["state_dict"].copy()
for k in checkpoint["state_dict"]:
# convert average model weights
if k.startswith("avg_model."):
v = state_dict.pop(k)
state_dict[k[4:]] = v
# strip prefix of state_dict
if list(state_dict.keys())[0].startswith("module."):
state_dict = {k[7:]: v for k, v in state_dict.items()}
if list(state_dict.keys())[0].startswith("model."):
state_dict = {k[6:]: v for k, v in state_dict.items()}
model_state_dict = (
model.module.state_dict() if hasattr(model, "module") else model.state_dict()
)
# check loaded parameters and created model parameters
for k in state_dict:
if k in model_state_dict:
if state_dict[k].shape != model_state_dict[k].shape:
logger.log(
"Skip loading parameter {}, required shape{}, "
"loaded shape{}.".format(
k, model_state_dict[k].shape, state_dict[k].shape
)
)
state_dict[k] = model_state_dict[k]
else:
logger.log("Drop parameter {}.".format(k))
for k in model_state_dict:
if not (k in state_dict):
logger.log("No param {}.".format(k))
state_dict[k] = model_state_dict[k]
model.load_state_dict(state_dict, strict=False) | null |
161,534 | from collections import OrderedDict
from typing import Any, Dict
import pytorch_lightning as pl
import torch
from .rank_filter import rank_filter
def save_model(model, path, epoch, iter, optimizer=None):
model_state_dict = (
model.module.state_dict() if hasattr(model, "module") else model.state_dict()
)
data = {"epoch": epoch, "state_dict": model_state_dict, "iter": iter}
if optimizer is not None:
data["optimizer"] = optimizer.state_dict()
torch.save(data, path) | null |
161,535 | from collections import OrderedDict
from typing import Any, Dict
import pytorch_lightning as pl
import torch
from .rank_filter import rank_filter
def convert_old_model(old_model_dict):
if "pytorch-lightning_version" in old_model_dict:
raise ValueError("This model is not old format. No need to convert!")
version = pl.__version__
epoch = old_model_dict["epoch"]
global_step = old_model_dict["iter"]
state_dict = old_model_dict["state_dict"]
new_state_dict = OrderedDict()
for name, value in state_dict.items():
new_state_dict["model." + name] = value
new_checkpoint = {
"epoch": epoch,
"global_step": global_step,
"pytorch-lightning_version": version,
"state_dict": new_state_dict,
"lr_schedulers": [],
}
if "optimizer" in old_model_dict:
optimizer_states = [old_model_dict["optimizer"]]
new_checkpoint["optimizer_states"] = optimizer_states
return new_checkpoint | null |
161,536 | from collections import OrderedDict
from typing import Any, Dict
import pytorch_lightning as pl
import torch
from .rank_filter import rank_filter
The provided code snippet includes necessary dependencies for implementing the `convert_avg_params` function. Write a Python function `def convert_avg_params(checkpoint: Dict[str, Any]) -> Dict[str, Any]` to solve the following problem:
Converts average state dict to the format that can be loaded to a model. Args: checkpoint: model. Returns: Converted average state dict.
Here is the function:
def convert_avg_params(checkpoint: Dict[str, Any]) -> Dict[str, Any]:
"""Converts average state dict to the format that can be loaded to a model.
Args:
checkpoint: model.
Returns:
Converted average state dict.
"""
state_dict = checkpoint["state_dict"]
avg_weights = {}
for k, v in state_dict.items():
if "avg_model" in k:
avg_weights[k[10:]] = v
return avg_weights | Converts average state dict to the format that can be loaded to a model. Args: checkpoint: model. Returns: Converted average state dict. |
161,537 | import os
import platform
import warnings
import torch.multiprocessing as mp
The provided code snippet includes necessary dependencies for implementing the `set_multi_processing` function. Write a Python function `def set_multi_processing( mp_start_method: str = "fork", opencv_num_threads: int = 0, distributed: bool = True ) -> None` to solve the following problem:
Set multi-processing related environment. This function is refered from https://github.com/open-mmlab/mmengine/blob/main/mmengine/utils/dl_utils/setup_env.py Args: mp_start_method (str): Set the method which should be used to start child processes. Defaults to 'fork'. opencv_num_threads (int): Number of threads for opencv. Defaults to 0. distributed (bool): True if distributed environment. Defaults to False.
Here is the function:
def set_multi_processing(
mp_start_method: str = "fork", opencv_num_threads: int = 0, distributed: bool = True
) -> None:
"""Set multi-processing related environment.
This function is refered from https://github.com/open-mmlab/mmengine/blob/main/mmengine/utils/dl_utils/setup_env.py
Args:
mp_start_method (str): Set the method which should be used to start
child processes. Defaults to 'fork'.
opencv_num_threads (int): Number of threads for opencv.
Defaults to 0.
distributed (bool): True if distributed environment.
Defaults to False.
""" # noqa
# set multi-process start method as `fork` to speed up the training
if platform.system() != "Windows":
current_method = mp.get_start_method(allow_none=True)
if current_method is not None and current_method != mp_start_method:
warnings.warn(
f"Multi-processing start method `{mp_start_method}` is "
f"different from the previous setting `{current_method}`."
f"It will be force set to `{mp_start_method}`. You can "
"change this behavior by changing `mp_start_method` in "
"your config."
)
mp.set_start_method(mp_start_method, force=True)
try:
import cv2
# disable opencv multithreading to avoid system being overloaded
cv2.setNumThreads(opencv_num_threads)
except ImportError:
pass
# setup OMP threads
# This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa
if "OMP_NUM_THREADS" not in os.environ and distributed:
omp_num_threads = 1
warnings.warn(
"Setting OMP_NUM_THREADS environment variable for each process"
f" to be {omp_num_threads} in default, to avoid your system "
"being overloaded, please further tune the variable for "
"optimal performance in your application as needed."
)
os.environ["OMP_NUM_THREADS"] = str(omp_num_threads)
# setup MKL threads
if "MKL_NUM_THREADS" not in os.environ and distributed:
mkl_num_threads = 1
warnings.warn(
"Setting MKL_NUM_THREADS environment variable for each process"
f" to be {mkl_num_threads} in default, to avoid your system "
"being overloaded, please further tune the variable for "
"optimal performance in your application as needed."
)
os.environ["MKL_NUM_THREADS"] = str(mkl_num_threads) | Set multi-processing related environment. This function is refered from https://github.com/open-mmlab/mmengine/blob/main/mmengine/utils/dl_utils/setup_env.py Args: mp_start_method (str): Set the method which should be used to start child processes. Defaults to 'fork'. opencv_num_threads (int): Number of threads for opencv. Defaults to 0. distributed (bool): True if distributed environment. Defaults to False. |
161,538 |
def rank_filter(func):
def func_filter(local_rank=-1, *args, **kwargs):
if local_rank < 1:
return func(*args, **kwargs)
else:
pass
return func_filter | null |
161,539 | import pickle
import torch
import torch.distributed as dist
from torch.autograd import Variable
from torch.nn.parallel._functions import Scatter
def scatter(inputs, target_gpus, dim=0, chunk_sizes=None):
"""
Slices variables into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not variables. Does not
support Tensors.
"""
def scatter_map(obj):
if isinstance(obj, Variable):
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
assert not torch.is_tensor(obj), "Tensors not supported in scatter."
if isinstance(obj, list):
return list_scatter(obj, target_gpus, chunk_sizes)
if isinstance(obj, tuple):
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, dict):
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
return scatter_map(inputs)
The provided code snippet includes necessary dependencies for implementing the `scatter_kwargs` function. Write a Python function `def scatter_kwargs(inputs, kwargs, target_gpus, dim=0, chunk_sizes=None)` to solve the following problem:
r"""Scatter with support for kwargs dictionary
Here is the function:
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0, chunk_sizes=None):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, dim, chunk_sizes) if inputs else []
kwargs = scatter(kwargs, target_gpus, dim, chunk_sizes) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs | r"""Scatter with support for kwargs dictionary |
161,540 | import pickle
import torch
import torch.distributed as dist
from torch.autograd import Variable
from torch.nn.parallel._functions import Scatter
def gather_results(result_part):
rank = -1
world_size = 1
if dist.is_available() and dist.is_initialized():
rank = dist.get_rank()
world_size = dist.get_world_size()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device="cuda"
)
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device="cuda")
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device="cuda")
part_send[: shape_tensor[0]] = part_tensor
part_recv_list = [part_tensor.new_zeros(shape_max) for _ in range(world_size)]
# gather all result dict
dist.all_gather(part_recv_list, part_send)
if rank < 1:
all_res = {}
for recv, shape in zip(part_recv_list, shape_list):
all_res.update(pickle.loads(recv[: shape[0]].cpu().numpy().tobytes()))
return all_res | null |
161,541 | import os
from .rank_filter import rank_filter
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path) | null |
161,542 | import os
from .rank_filter import rank_filter
def collect_files(path, exts):
file_paths = []
for maindir, subdir, filename_list in os.walk(path):
for filename in filename_list:
file_path = os.path.join(maindir, filename)
ext = os.path.splitext(file_path)[1]
if ext in exts:
file_paths.append(file_path)
return file_paths | null |
161,543 | from .yacs import CfgNode
def load_config(cfg, args_cfg):
cfg.defrost()
cfg.merge_from_file(args_cfg)
cfg.freeze() | null |
161,544 | import copy
import io
import logging
import os
import sys
from ast import literal_eval
import yaml
_VALID_TYPES = {tuple, list, str, int, float, bool, type(None)}
class CfgNode(dict):
"""
CfgNode represents an internal node in the configuration tree. It's a simple
dict-like container that allows for attribute-based access to keys.
"""
IMMUTABLE = "__immutable__"
DEPRECATED_KEYS = "__deprecated_keys__"
RENAMED_KEYS = "__renamed_keys__"
NEW_ALLOWED = "__new_allowed__"
def __init__(self, init_dict=None, key_list=None, new_allowed=False):
"""
Args:
init_dict (dict): the possibly-nested dictionary to initailize the
CfgNode.
key_list (list[str]): a list of names which index this CfgNode from
the root.
Currently only used for logging purposes.
new_allowed (bool): whether adding new key is allowed when merging with
other configs.
"""
# Recursively convert nested dictionaries in init_dict into CfgNodes
init_dict = {} if init_dict is None else init_dict
key_list = [] if key_list is None else key_list
init_dict = self._create_config_tree_from_dict(init_dict, key_list)
super(CfgNode, self).__init__(init_dict)
# Manage if the CfgNode is frozen or not
self.__dict__[CfgNode.IMMUTABLE] = False
# Deprecated options
# If an option is removed from the code and you don't want to break existing
# yaml configs, you can add the full config key as a string to the set below.
self.__dict__[CfgNode.DEPRECATED_KEYS] = set()
# Renamed options
# If you rename a config option, record the mapping from the old name to the
# new name in the dictionary below. Optionally, if the type also changed, you
# can make the value a tuple that specifies first the renamed key and then
# instructions for how to edit the config file.
self.__dict__[CfgNode.RENAMED_KEYS] = {
# 'EXAMPLE.OLD.KEY': 'EXAMPLE.NEW.KEY', # Dummy example to follow
# 'EXAMPLE.OLD.KEY': ( # A more complex example to follow
# 'EXAMPLE.NEW.KEY',
# "Also convert to a tuple, e.g., 'foo' -> ('foo',) or "
# + "'foo:bar' -> ('foo', 'bar')"
# ),
}
# Allow new attributes after initialisation
self.__dict__[CfgNode.NEW_ALLOWED] = new_allowed
def _create_config_tree_from_dict(cls, dic, key_list):
"""
Create a configuration tree using the given dict.
Any dict-like objects inside dict will be treated as a new CfgNode.
Args:
dic (dict):
key_list (list[str]): a list of names which index this CfgNode from
the root. Currently only used for logging purposes.
"""
dic = copy.deepcopy(dic)
for k, v in dic.items():
if isinstance(v, dict):
# Convert dict to CfgNode
dic[k] = cls(v, key_list=key_list + [k])
else:
# Check for valid leaf type or nested CfgNode
_assert_with_logging(
_valid_type(v, allow_cfg_node=False),
"Key {} with value {} is not a valid type; valid types: {}".format(
".".join(key_list + [k]), type(v), _VALID_TYPES
),
)
return dic
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if self.is_frozen():
raise AttributeError(
"Attempted to set {} to {}, but CfgNode is immutable".format(
name, value
)
)
_assert_with_logging(
name not in self.__dict__,
"Invalid attempt to modify internal CfgNode state: {}".format(name),
)
_assert_with_logging(
_valid_type(value, allow_cfg_node=True),
"Invalid type {} for key {}; valid types = {}".format(
type(value), name, _VALID_TYPES
),
)
self[name] = value
def __str__(self):
def _indent(s_, num_spaces):
s = s_.split("\n")
if len(s) == 1:
return s_
first = s.pop(0)
s = [(num_spaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
r = ""
s = []
for k, v in sorted(self.items()):
seperator = "\n" if isinstance(v, CfgNode) else " "
attr_str = "{}:{}{}".format(str(k), seperator, str(v))
attr_str = _indent(attr_str, 2)
s.append(attr_str)
r += "\n".join(s)
return r
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super(CfgNode, self).__repr__())
def dump(self, **kwargs):
"""Dump to a string."""
def convert_to_dict(cfg_node, key_list):
if not isinstance(cfg_node, CfgNode):
_assert_with_logging(
_valid_type(cfg_node),
"Key {} with value {} is not a valid type; valid types: {}".format(
".".join(key_list), type(cfg_node), _VALID_TYPES
),
)
return cfg_node
else:
cfg_dict = dict(cfg_node)
for k, v in cfg_dict.items():
cfg_dict[k] = convert_to_dict(v, key_list + [k])
return cfg_dict
self_as_dict = convert_to_dict(self, [])
return yaml.safe_dump(self_as_dict, **kwargs)
def merge_from_file(self, cfg_filename):
"""Load a yaml config file and merge it this CfgNode."""
with open(cfg_filename, "r", encoding="utf-8") as f:
cfg = self.load_cfg(f)
self.merge_from_other_cfg(cfg)
def merge_from_other_cfg(self, cfg_other):
"""Merge `cfg_other` into this CfgNode."""
_merge_a_into_b(cfg_other, self, self, [])
def merge_from_list(self, cfg_list):
"""Merge config (keys, values) in a list (e.g., from command line) into
this CfgNode. For example, `cfg_list = ['FOO.BAR', 0.5]`.
"""
_assert_with_logging(
len(cfg_list) % 2 == 0,
"Override list has odd length: {}; it must be a list of pairs".format(
cfg_list
),
)
root = self
for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
if root.key_is_deprecated(full_key):
continue
if root.key_is_renamed(full_key):
root.raise_key_rename_error(full_key)
key_list = full_key.split(".")
d = self
for subkey in key_list[:-1]:
_assert_with_logging(
subkey in d, "Non-existent key: {}".format(full_key)
)
d = d[subkey]
subkey = key_list[-1]
_assert_with_logging(subkey in d, "Non-existent key: {}".format(full_key))
value = self._decode_cfg_value(v)
value = _check_and_coerce_cfg_value_type(value, d[subkey], subkey, full_key)
d[subkey] = value
def freeze(self):
"""Make this CfgNode and all of its children immutable."""
self._immutable(True)
def defrost(self):
"""Make this CfgNode and all of its children mutable."""
self._immutable(False)
def is_frozen(self):
"""Return mutability."""
return self.__dict__[CfgNode.IMMUTABLE]
def _immutable(self, is_immutable):
"""Set immutability to is_immutable and recursively apply the setting
to all nested CfgNodes.
"""
self.__dict__[CfgNode.IMMUTABLE] = is_immutable
# Recursively set immutable state
for v in self.__dict__.values():
if isinstance(v, CfgNode):
v._immutable(is_immutable)
for v in self.values():
if isinstance(v, CfgNode):
v._immutable(is_immutable)
def clone(self):
"""Recursively copy this CfgNode."""
return copy.deepcopy(self)
def register_deprecated_key(self, key):
"""Register key (e.g. `FOO.BAR`) a deprecated option. When merging deprecated
keys a warning is generated and the key is ignored.
"""
_assert_with_logging(
key not in self.__dict__[CfgNode.DEPRECATED_KEYS],
"key {} is already registered as a deprecated key".format(key),
)
self.__dict__[CfgNode.DEPRECATED_KEYS].add(key)
def register_renamed_key(self, old_name, new_name, message=None):
"""Register a key as having been renamed from `old_name` to `new_name`.
When merging a renamed key, an exception is thrown alerting to user to
the fact that the key has been renamed.
"""
_assert_with_logging(
old_name not in self.__dict__[CfgNode.RENAMED_KEYS],
"key {} is already registered as a renamed cfg key".format(old_name),
)
value = new_name
if message:
value = (new_name, message)
self.__dict__[CfgNode.RENAMED_KEYS][old_name] = value
def key_is_deprecated(self, full_key):
"""Test if a key is deprecated."""
if full_key in self.__dict__[CfgNode.DEPRECATED_KEYS]:
logger.warning("Deprecated config key (ignoring): {}".format(full_key))
return True
return False
def key_is_renamed(self, full_key):
"""Test if a key is renamed."""
return full_key in self.__dict__[CfgNode.RENAMED_KEYS]
def raise_key_rename_error(self, full_key):
new_key = self.__dict__[CfgNode.RENAMED_KEYS][full_key]
if isinstance(new_key, tuple):
msg = " Note: " + new_key[1]
new_key = new_key[0]
else:
msg = ""
raise KeyError(
"Key {} was renamed to {}; please update your config.{}".format(
full_key, new_key, msg
)
)
def is_new_allowed(self):
return self.__dict__[CfgNode.NEW_ALLOWED]
def load_cfg(cls, cfg_file_obj_or_str):
"""
Load a cfg.
Args:
cfg_file_obj_or_str (str or file):
Supports loading from:
- A file object backed by a YAML file
- A file object backed by a Python source file that exports an attribute
"cfg" that is either a dict or a CfgNode
- A string that can be parsed as valid YAML
"""
_assert_with_logging(
isinstance(cfg_file_obj_or_str, _FILE_TYPES + (str,)),
"Expected first argument to be of type {} or {}, but it was {}".format(
_FILE_TYPES, str, type(cfg_file_obj_or_str)
),
)
if isinstance(cfg_file_obj_or_str, str):
return cls._load_cfg_from_yaml_str(cfg_file_obj_or_str)
elif isinstance(cfg_file_obj_or_str, _FILE_TYPES):
return cls._load_cfg_from_file(cfg_file_obj_or_str)
else:
raise NotImplementedError("Impossible to reach here (unless there's a bug)")
def _load_cfg_from_file(cls, file_obj):
"""Load a config from a YAML file or a Python source file."""
_, file_extension = os.path.splitext(file_obj.name)
if file_extension in _YAML_EXTS:
return cls._load_cfg_from_yaml_str(file_obj.read())
elif file_extension in _PY_EXTS:
return cls._load_cfg_py_source(file_obj.name)
else:
raise Exception(
"Attempt to load from an unsupported file type {}; "
"only {} are supported".format(file_obj, _YAML_EXTS.union(_PY_EXTS))
)
def _load_cfg_from_yaml_str(cls, str_obj):
"""Load a config from a YAML string encoding."""
cfg_as_dict = yaml.safe_load(str_obj)
return cls(cfg_as_dict)
def _load_cfg_py_source(cls, filename):
"""Load a config from a Python source file."""
module = _load_module_from_file("yacs.config.override", filename)
_assert_with_logging(
hasattr(module, "cfg"),
"Python module from file {} must have 'cfg' attr".format(filename),
)
VALID_ATTR_TYPES = {dict, CfgNode}
_assert_with_logging(
type(module.cfg) in VALID_ATTR_TYPES,
"Imported module 'cfg' attr must be in {} but is {} instead".format(
VALID_ATTR_TYPES, type(module.cfg)
),
)
return cls(module.cfg)
def _decode_cfg_value(cls, value):
"""
Decodes a raw config value (e.g., from a yaml config files or command
line argument) into a Python object.
If the value is a dict, it will be interpreted as a new CfgNode.
If the value is a str, it will be evaluated as literals.
Otherwise it is returned as-is.
"""
# Configs parsed from raw yaml will contain dictionary keys that need to be
# converted to CfgNode objects
if isinstance(value, dict):
return cls(value)
# All remaining processing is only applied to strings
if not isinstance(value, str):
return value
# Try to interpret `value` as a:
# string, number, tuple, list, dict, boolean, or None
try:
value = literal_eval(value)
# The following two excepts allow v to pass through when it represents a
# string.
#
# Longer explanation:
# The type of v is always a string (before calling literal_eval), but
# sometimes it *represents* a string and other times a data structure, like
# a list. In the case that v represents a string, what we got back from the
# yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is
# ok with '"foo"', but will raise a ValueError if given 'foo'. In other
# cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval
# will raise a SyntaxError.
except ValueError:
pass
except SyntaxError:
pass
return value
def _valid_type(value, allow_cfg_node=False):
return (type(value) in _VALID_TYPES) or (
allow_cfg_node and isinstance(value, CfgNode)
) | null |
161,545 | import copy
import io
import logging
import os
import sys
from ast import literal_eval
import yaml
class CfgNode(dict):
"""
CfgNode represents an internal node in the configuration tree. It's a simple
dict-like container that allows for attribute-based access to keys.
"""
IMMUTABLE = "__immutable__"
DEPRECATED_KEYS = "__deprecated_keys__"
RENAMED_KEYS = "__renamed_keys__"
NEW_ALLOWED = "__new_allowed__"
def __init__(self, init_dict=None, key_list=None, new_allowed=False):
"""
Args:
init_dict (dict): the possibly-nested dictionary to initailize the
CfgNode.
key_list (list[str]): a list of names which index this CfgNode from
the root.
Currently only used for logging purposes.
new_allowed (bool): whether adding new key is allowed when merging with
other configs.
"""
# Recursively convert nested dictionaries in init_dict into CfgNodes
init_dict = {} if init_dict is None else init_dict
key_list = [] if key_list is None else key_list
init_dict = self._create_config_tree_from_dict(init_dict, key_list)
super(CfgNode, self).__init__(init_dict)
# Manage if the CfgNode is frozen or not
self.__dict__[CfgNode.IMMUTABLE] = False
# Deprecated options
# If an option is removed from the code and you don't want to break existing
# yaml configs, you can add the full config key as a string to the set below.
self.__dict__[CfgNode.DEPRECATED_KEYS] = set()
# Renamed options
# If you rename a config option, record the mapping from the old name to the
# new name in the dictionary below. Optionally, if the type also changed, you
# can make the value a tuple that specifies first the renamed key and then
# instructions for how to edit the config file.
self.__dict__[CfgNode.RENAMED_KEYS] = {
# 'EXAMPLE.OLD.KEY': 'EXAMPLE.NEW.KEY', # Dummy example to follow
# 'EXAMPLE.OLD.KEY': ( # A more complex example to follow
# 'EXAMPLE.NEW.KEY',
# "Also convert to a tuple, e.g., 'foo' -> ('foo',) or "
# + "'foo:bar' -> ('foo', 'bar')"
# ),
}
# Allow new attributes after initialisation
self.__dict__[CfgNode.NEW_ALLOWED] = new_allowed
def _create_config_tree_from_dict(cls, dic, key_list):
"""
Create a configuration tree using the given dict.
Any dict-like objects inside dict will be treated as a new CfgNode.
Args:
dic (dict):
key_list (list[str]): a list of names which index this CfgNode from
the root. Currently only used for logging purposes.
"""
dic = copy.deepcopy(dic)
for k, v in dic.items():
if isinstance(v, dict):
# Convert dict to CfgNode
dic[k] = cls(v, key_list=key_list + [k])
else:
# Check for valid leaf type or nested CfgNode
_assert_with_logging(
_valid_type(v, allow_cfg_node=False),
"Key {} with value {} is not a valid type; valid types: {}".format(
".".join(key_list + [k]), type(v), _VALID_TYPES
),
)
return dic
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if self.is_frozen():
raise AttributeError(
"Attempted to set {} to {}, but CfgNode is immutable".format(
name, value
)
)
_assert_with_logging(
name not in self.__dict__,
"Invalid attempt to modify internal CfgNode state: {}".format(name),
)
_assert_with_logging(
_valid_type(value, allow_cfg_node=True),
"Invalid type {} for key {}; valid types = {}".format(
type(value), name, _VALID_TYPES
),
)
self[name] = value
def __str__(self):
def _indent(s_, num_spaces):
s = s_.split("\n")
if len(s) == 1:
return s_
first = s.pop(0)
s = [(num_spaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
r = ""
s = []
for k, v in sorted(self.items()):
seperator = "\n" if isinstance(v, CfgNode) else " "
attr_str = "{}:{}{}".format(str(k), seperator, str(v))
attr_str = _indent(attr_str, 2)
s.append(attr_str)
r += "\n".join(s)
return r
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super(CfgNode, self).__repr__())
def dump(self, **kwargs):
"""Dump to a string."""
def convert_to_dict(cfg_node, key_list):
if not isinstance(cfg_node, CfgNode):
_assert_with_logging(
_valid_type(cfg_node),
"Key {} with value {} is not a valid type; valid types: {}".format(
".".join(key_list), type(cfg_node), _VALID_TYPES
),
)
return cfg_node
else:
cfg_dict = dict(cfg_node)
for k, v in cfg_dict.items():
cfg_dict[k] = convert_to_dict(v, key_list + [k])
return cfg_dict
self_as_dict = convert_to_dict(self, [])
return yaml.safe_dump(self_as_dict, **kwargs)
def merge_from_file(self, cfg_filename):
"""Load a yaml config file and merge it this CfgNode."""
with open(cfg_filename, "r", encoding="utf-8") as f:
cfg = self.load_cfg(f)
self.merge_from_other_cfg(cfg)
def merge_from_other_cfg(self, cfg_other):
"""Merge `cfg_other` into this CfgNode."""
_merge_a_into_b(cfg_other, self, self, [])
def merge_from_list(self, cfg_list):
"""Merge config (keys, values) in a list (e.g., from command line) into
this CfgNode. For example, `cfg_list = ['FOO.BAR', 0.5]`.
"""
_assert_with_logging(
len(cfg_list) % 2 == 0,
"Override list has odd length: {}; it must be a list of pairs".format(
cfg_list
),
)
root = self
for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
if root.key_is_deprecated(full_key):
continue
if root.key_is_renamed(full_key):
root.raise_key_rename_error(full_key)
key_list = full_key.split(".")
d = self
for subkey in key_list[:-1]:
_assert_with_logging(
subkey in d, "Non-existent key: {}".format(full_key)
)
d = d[subkey]
subkey = key_list[-1]
_assert_with_logging(subkey in d, "Non-existent key: {}".format(full_key))
value = self._decode_cfg_value(v)
value = _check_and_coerce_cfg_value_type(value, d[subkey], subkey, full_key)
d[subkey] = value
def freeze(self):
"""Make this CfgNode and all of its children immutable."""
self._immutable(True)
def defrost(self):
"""Make this CfgNode and all of its children mutable."""
self._immutable(False)
def is_frozen(self):
"""Return mutability."""
return self.__dict__[CfgNode.IMMUTABLE]
def _immutable(self, is_immutable):
"""Set immutability to is_immutable and recursively apply the setting
to all nested CfgNodes.
"""
self.__dict__[CfgNode.IMMUTABLE] = is_immutable
# Recursively set immutable state
for v in self.__dict__.values():
if isinstance(v, CfgNode):
v._immutable(is_immutable)
for v in self.values():
if isinstance(v, CfgNode):
v._immutable(is_immutable)
def clone(self):
"""Recursively copy this CfgNode."""
return copy.deepcopy(self)
def register_deprecated_key(self, key):
"""Register key (e.g. `FOO.BAR`) a deprecated option. When merging deprecated
keys a warning is generated and the key is ignored.
"""
_assert_with_logging(
key not in self.__dict__[CfgNode.DEPRECATED_KEYS],
"key {} is already registered as a deprecated key".format(key),
)
self.__dict__[CfgNode.DEPRECATED_KEYS].add(key)
def register_renamed_key(self, old_name, new_name, message=None):
"""Register a key as having been renamed from `old_name` to `new_name`.
When merging a renamed key, an exception is thrown alerting to user to
the fact that the key has been renamed.
"""
_assert_with_logging(
old_name not in self.__dict__[CfgNode.RENAMED_KEYS],
"key {} is already registered as a renamed cfg key".format(old_name),
)
value = new_name
if message:
value = (new_name, message)
self.__dict__[CfgNode.RENAMED_KEYS][old_name] = value
def key_is_deprecated(self, full_key):
"""Test if a key is deprecated."""
if full_key in self.__dict__[CfgNode.DEPRECATED_KEYS]:
logger.warning("Deprecated config key (ignoring): {}".format(full_key))
return True
return False
def key_is_renamed(self, full_key):
"""Test if a key is renamed."""
return full_key in self.__dict__[CfgNode.RENAMED_KEYS]
def raise_key_rename_error(self, full_key):
new_key = self.__dict__[CfgNode.RENAMED_KEYS][full_key]
if isinstance(new_key, tuple):
msg = " Note: " + new_key[1]
new_key = new_key[0]
else:
msg = ""
raise KeyError(
"Key {} was renamed to {}; please update your config.{}".format(
full_key, new_key, msg
)
)
def is_new_allowed(self):
return self.__dict__[CfgNode.NEW_ALLOWED]
def load_cfg(cls, cfg_file_obj_or_str):
"""
Load a cfg.
Args:
cfg_file_obj_or_str (str or file):
Supports loading from:
- A file object backed by a YAML file
- A file object backed by a Python source file that exports an attribute
"cfg" that is either a dict or a CfgNode
- A string that can be parsed as valid YAML
"""
_assert_with_logging(
isinstance(cfg_file_obj_or_str, _FILE_TYPES + (str,)),
"Expected first argument to be of type {} or {}, but it was {}".format(
_FILE_TYPES, str, type(cfg_file_obj_or_str)
),
)
if isinstance(cfg_file_obj_or_str, str):
return cls._load_cfg_from_yaml_str(cfg_file_obj_or_str)
elif isinstance(cfg_file_obj_or_str, _FILE_TYPES):
return cls._load_cfg_from_file(cfg_file_obj_or_str)
else:
raise NotImplementedError("Impossible to reach here (unless there's a bug)")
def _load_cfg_from_file(cls, file_obj):
"""Load a config from a YAML file or a Python source file."""
_, file_extension = os.path.splitext(file_obj.name)
if file_extension in _YAML_EXTS:
return cls._load_cfg_from_yaml_str(file_obj.read())
elif file_extension in _PY_EXTS:
return cls._load_cfg_py_source(file_obj.name)
else:
raise Exception(
"Attempt to load from an unsupported file type {}; "
"only {} are supported".format(file_obj, _YAML_EXTS.union(_PY_EXTS))
)
def _load_cfg_from_yaml_str(cls, str_obj):
"""Load a config from a YAML string encoding."""
cfg_as_dict = yaml.safe_load(str_obj)
return cls(cfg_as_dict)
def _load_cfg_py_source(cls, filename):
"""Load a config from a Python source file."""
module = _load_module_from_file("yacs.config.override", filename)
_assert_with_logging(
hasattr(module, "cfg"),
"Python module from file {} must have 'cfg' attr".format(filename),
)
VALID_ATTR_TYPES = {dict, CfgNode}
_assert_with_logging(
type(module.cfg) in VALID_ATTR_TYPES,
"Imported module 'cfg' attr must be in {} but is {} instead".format(
VALID_ATTR_TYPES, type(module.cfg)
),
)
return cls(module.cfg)
def _decode_cfg_value(cls, value):
"""
Decodes a raw config value (e.g., from a yaml config files or command
line argument) into a Python object.
If the value is a dict, it will be interpreted as a new CfgNode.
If the value is a str, it will be evaluated as literals.
Otherwise it is returned as-is.
"""
# Configs parsed from raw yaml will contain dictionary keys that need to be
# converted to CfgNode objects
if isinstance(value, dict):
return cls(value)
# All remaining processing is only applied to strings
if not isinstance(value, str):
return value
# Try to interpret `value` as a:
# string, number, tuple, list, dict, boolean, or None
try:
value = literal_eval(value)
# The following two excepts allow v to pass through when it represents a
# string.
#
# Longer explanation:
# The type of v is always a string (before calling literal_eval), but
# sometimes it *represents* a string and other times a data structure, like
# a list. In the case that v represents a string, what we got back from the
# yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is
# ok with '"foo"', but will raise a ValueError if given 'foo'. In other
# cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval
# will raise a SyntaxError.
except ValueError:
pass
except SyntaxError:
pass
return value
def _check_and_coerce_cfg_value_type(replacement, original, key, full_key):
"""Checks that `replacement`, which is intended to replace `original` is of
the right type. The type is correct if it matches exactly or is one of a few
cases in which the type can be easily coerced.
"""
original_type = type(original)
replacement_type = type(replacement)
# The types must match (with some exceptions)
if replacement_type == original_type:
return replacement
# Cast replacement from from_type to to_type if the replacement and original
# types match from_type and to_type
def conditional_cast(from_type, to_type):
if replacement_type == from_type and original_type == to_type:
return True, to_type(replacement)
else:
return False, None
# Conditionally casts
# list <-> tuple
casts = [(tuple, list), (list, tuple)]
# For py2: allow converting from str (bytes) to a unicode string
try:
casts.append((str, unicode)) # noqa: F821
except Exception:
pass
for (from_type, to_type) in casts:
converted, converted_value = conditional_cast(from_type, to_type)
if converted:
return converted_value
raise ValueError(
"Type mismatch ({} vs. {}) with values ({} vs. {}) for config "
"key: {}".format(
original_type, replacement_type, original, replacement, full_key
)
)
def _assert_with_logging(cond, msg):
if not cond:
logger.debug(msg)
assert cond, msg
The provided code snippet includes necessary dependencies for implementing the `_merge_a_into_b` function. Write a Python function `def _merge_a_into_b(a, b, root, key_list)` to solve the following problem:
Merge config dictionary a into config dictionary b, clobbering the options in b whenever they are also specified in a.
Here is the function:
def _merge_a_into_b(a, b, root, key_list):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
_assert_with_logging(
isinstance(a, CfgNode),
"`a` (cur type {}) must be an instance of {}".format(type(a), CfgNode),
)
_assert_with_logging(
isinstance(b, CfgNode),
"`b` (cur type {}) must be an instance of {}".format(type(b), CfgNode),
)
for k, v_ in a.items():
full_key = ".".join(key_list + [k])
v = copy.deepcopy(v_)
v = b._decode_cfg_value(v)
if k in b:
v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key)
# Recursively merge dicts
if isinstance(v, CfgNode):
try:
_merge_a_into_b(v, b[k], root, key_list + [k])
except BaseException:
raise
else:
b[k] = v
elif b.is_new_allowed():
b[k] = v
else:
if root.key_is_deprecated(full_key):
continue
elif root.key_is_renamed(full_key):
root.raise_key_rename_error(full_key)
else:
raise KeyError("Non-existent config key: {}".format(full_key)) | Merge config dictionary a into config dictionary b, clobbering the options in b whenever they are also specified in a. |
161,546 | import copy
import io
import logging
import os
import sys
from ast import literal_eval
import yaml
_PY2 = sys.version_info.major == 2
if _PY2:
_VALID_TYPES = _VALID_TYPES.union({unicode}) # noqa: F821
if _PY2:
# imp is available in both py2 and py3 for now, but is deprecated in py3
import imp
else:
import importlib.util
def _load_module_from_file(name, filename):
if _PY2:
module = imp.load_source(name, filename)
else:
spec = importlib.util.spec_from_file_location(name, filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module | null |
161,547 | import cv2
import matplotlib as mpl
import matplotlib.figure as mplfigure
import numpy as np
import pycocotools.mask as mask_util
from matplotlib.backends.backend_agg import FigureCanvasAgg
_COLORS = (
np.array(
[
0.000,
0.447,
0.741,
0.850,
0.325,
0.098,
0.929,
0.694,
0.125,
0.494,
0.184,
0.556,
0.466,
0.674,
0.188,
0.301,
0.745,
0.933,
0.635,
0.078,
0.184,
0.300,
0.300,
0.300,
0.600,
0.600,
0.600,
1.000,
0.000,
0.000,
1.000,
0.500,
0.000,
0.749,
0.749,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
1.000,
0.667,
0.000,
1.000,
0.333,
0.333,
0.000,
0.333,
0.667,
0.000,
0.333,
1.000,
0.000,
0.667,
0.333,
0.000,
0.667,
0.667,
0.000,
0.667,
1.000,
0.000,
1.000,
0.333,
0.000,
1.000,
0.667,
0.000,
1.000,
1.000,
0.000,
0.000,
0.333,
0.500,
0.000,
0.667,
0.500,
0.000,
1.000,
0.500,
0.333,
0.000,
0.500,
0.333,
0.333,
0.500,
0.333,
0.667,
0.500,
0.333,
1.000,
0.500,
0.667,
0.000,
0.500,
0.667,
0.333,
0.500,
0.667,
0.667,
0.500,
0.667,
1.000,
0.500,
1.000,
0.000,
0.500,
1.000,
0.333,
0.500,
1.000,
0.667,
0.500,
1.000,
1.000,
0.500,
0.000,
0.333,
1.000,
0.000,
0.667,
1.000,
0.000,
1.000,
1.000,
0.333,
0.000,
1.000,
0.333,
0.333,
1.000,
0.333,
0.667,
1.000,
0.333,
1.000,
1.000,
0.667,
0.000,
1.000,
0.667,
0.333,
1.000,
0.667,
0.667,
1.000,
0.667,
1.000,
1.000,
1.000,
0.000,
1.000,
1.000,
0.333,
1.000,
1.000,
0.667,
1.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.167,
0.000,
0.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.167,
0.000,
0.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.143,
0.143,
0.143,
0.286,
0.286,
0.286,
0.429,
0.429,
0.429,
0.571,
0.571,
0.571,
0.714,
0.714,
0.714,
0.857,
0.857,
0.857,
0.000,
0.447,
0.741,
0.314,
0.717,
0.741,
0.50,
0.5,
0,
]
)
.astype(np.float32)
.reshape(-1, 3)
)
def overlay_bbox_cv(img, dets, class_names, score_thresh):
all_box = []
for label in dets:
for bbox in dets[label]:
score = bbox[-1]
if score > score_thresh:
x0, y0, x1, y1 = [int(i) for i in bbox[:4]]
all_box.append([label, x0, y0, x1, y1, score])
all_box.sort(key=lambda v: v[5])
for box in all_box:
label, x0, y0, x1, y1, score = box
# color = self.cmap(i)[:3]
color = (_COLORS[label] * 255).astype(np.uint8).tolist()
text = "{}:{:.1f}%".format(class_names[label], score * 100)
txt_color = (0, 0, 0) if np.mean(_COLORS[label]) > 0.5 else (255, 255, 255)
font = cv2.FONT_HERSHEY_SIMPLEX
txt_size = cv2.getTextSize(text, font, 0.5, 2)[0]
cv2.rectangle(img, (x0, y0), (x1, y1), color, 2)
cv2.rectangle(
img,
(x0, y0 - txt_size[1] - 1),
(x0 + txt_size[0] + txt_size[1], y0 - 1),
color,
-1,
)
cv2.putText(img, text, (x0, y0 - 1), font, 0.5, txt_color, thickness=1)
return img | null |
161,548 | import cv2
import matplotlib as mpl
import matplotlib.figure as mplfigure
import numpy as np
import pycocotools.mask as mask_util
from matplotlib.backends.backend_agg import FigureCanvasAgg
The provided code snippet includes necessary dependencies for implementing the `rand_cmap` function. Write a Python function `def rand_cmap( nlabels, type="bright", first_color_black=False, last_color_black=False, verbose=False, )` to solve the following problem:
Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks :param nlabels: Number of labels (size of colormap) :param type: 'bright' for strong colors, 'soft' for pastel colors :param first_color_black: Option to use first color as black, True or False :param last_color_black: Option to use last color as black, True or False :param verbose: Prints the number of labels and shows the colormap. True or False :return: colormap for matplotlib
Here is the function:
def rand_cmap(
nlabels,
type="bright",
first_color_black=False,
last_color_black=False,
verbose=False,
):
"""
Creates a random colormap to be used together with matplotlib.
Useful for segmentation tasks
:param nlabels: Number of labels (size of colormap)
:param type: 'bright' for strong colors, 'soft' for pastel colors
:param first_color_black: Option to use first color as black, True or False
:param last_color_black: Option to use last color as black, True or False
:param verbose: Prints the number of labels and shows the colormap. True or False
:return: colormap for matplotlib
"""
import colorsys
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
if type not in ("bright", "soft"):
print('Please choose "bright" or "soft" for type')
return
if verbose:
print("Number of labels: " + str(nlabels))
# Generate color map for bright colors, based on hsv
if type == "bright":
randHSVcolors = [
(
np.random.uniform(low=0.0, high=1),
np.random.uniform(low=0.2, high=1),
np.random.uniform(low=0.9, high=1),
)
for i in range(nlabels)
]
# Convert HSV list to RGB
randRGBcolors = []
for HSVcolor in randHSVcolors:
randRGBcolors.append(
colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2])
)
if first_color_black:
randRGBcolors[0] = [0, 0, 0]
if last_color_black:
randRGBcolors[-1] = [0, 0, 0]
random_colormap = LinearSegmentedColormap.from_list(
"new_map", randRGBcolors, N=nlabels
)
# Generate soft pastel colors, by limiting the RGB spectrum
if type == "soft":
low = 0.6
high = 0.95
randRGBcolors = [
(
np.random.uniform(low=low, high=high),
np.random.uniform(low=low, high=high),
np.random.uniform(low=low, high=high),
)
for i in range(nlabels)
]
if first_color_black:
randRGBcolors[0] = [0, 0, 0]
if last_color_black:
randRGBcolors[-1] = [0, 0, 0]
random_colormap = LinearSegmentedColormap.from_list(
"new_map", randRGBcolors, N=nlabels
)
return random_colormap | Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks :param nlabels: Number of labels (size of colormap) :param type: 'bright' for strong colors, 'soft' for pastel colors :param first_color_black: Option to use first color as black, True or False :param last_color_black: Option to use last color as black, True or False :param verbose: Prints the number of labels and shows the colormap. True or False :return: colormap for matplotlib |
161,550 | from functools import partial
import torch
The provided code snippet includes necessary dependencies for implementing the `images_to_levels` function. Write a Python function `def images_to_levels(target, num_level_anchors)` to solve the following problem:
Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...]
Here is the function:
def images_to_levels(target, num_level_anchors):
"""Convert targets by image to targets by feature level.
[target_img0, target_img1] -> [target_level0, target_level1, ...]
"""
target = torch.stack(target, 0)
level_targets = []
start = 0
for n in num_level_anchors:
end = start + n
level_targets.append(target[:, start:end].squeeze(0))
start = end
return level_targets | Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] |
161,551 | from functools import partial
import torch
The provided code snippet includes necessary dependencies for implementing the `unmap` function. Write a Python function `def unmap(data, count, inds, fill=0)` to solve the following problem:
Unmap a subset of item (data) back to the original set of items (of size count)
Here is the function:
def unmap(data, count, inds, fill=0):
"""Unmap a subset of item (data) back to the original set of items (of
size count)"""
if data.dim() == 1:
ret = data.new_full((count,), fill)
ret[inds.type(torch.bool)] = data
else:
new_size = (count,) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds.type(torch.bool), :] = data
return ret | Unmap a subset of item (data) back to the original set of items (of size count) |
161,552 | import torch
The provided code snippet includes necessary dependencies for implementing the `distance2bbox` function. Write a Python function `def distance2bbox(points, distance, max_shape=None)` to solve the following problem:
Decode distance prediction to bounding box. Args: points (Tensor): Shape (n, 2), [x, y]. distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). max_shape (tuple): Shape of the image. Returns: Tensor: Decoded bboxes.
Here is the function:
def distance2bbox(points, distance, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (n, 2), [x, y].
distance (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom).
max_shape (tuple): Shape of the image.
Returns:
Tensor: Decoded bboxes.
"""
x1 = points[..., 0] - distance[..., 0]
y1 = points[..., 1] - distance[..., 1]
x2 = points[..., 0] + distance[..., 2]
y2 = points[..., 1] + distance[..., 3]
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1])
y1 = y1.clamp(min=0, max=max_shape[0])
x2 = x2.clamp(min=0, max=max_shape[1])
y2 = y2.clamp(min=0, max=max_shape[0])
return torch.stack([x1, y1, x2, y2], -1) | Decode distance prediction to bounding box. Args: points (Tensor): Shape (n, 2), [x, y]. distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). max_shape (tuple): Shape of the image. Returns: Tensor: Decoded bboxes. |
161,553 | import torch
The provided code snippet includes necessary dependencies for implementing the `bbox2distance` function. Write a Python function `def bbox2distance(points, bbox, max_dis=None, eps=0.1)` to solve the following problem:
Decode bounding box based on distances. Args: points (Tensor): Shape (n, 2), [x, y]. bbox (Tensor): Shape (n, 4), "xyxy" format max_dis (float): Upper bound of the distance. eps (float): a small value to ensure target < max_dis, instead <= Returns: Tensor: Decoded distances.
Here is the function:
def bbox2distance(points, bbox, max_dis=None, eps=0.1):
"""Decode bounding box based on distances.
Args:
points (Tensor): Shape (n, 2), [x, y].
bbox (Tensor): Shape (n, 4), "xyxy" format
max_dis (float): Upper bound of the distance.
eps (float): a small value to ensure target < max_dis, instead <=
Returns:
Tensor: Decoded distances.
"""
left = points[:, 0] - bbox[:, 0]
top = points[:, 1] - bbox[:, 1]
right = bbox[:, 2] - points[:, 0]
bottom = bbox[:, 3] - points[:, 1]
if max_dis is not None:
left = left.clamp(min=0, max=max_dis - eps)
top = top.clamp(min=0, max=max_dis - eps)
right = right.clamp(min=0, max=max_dis - eps)
bottom = bottom.clamp(min=0, max=max_dis - eps)
return torch.stack([left, top, right, bottom], -1) | Decode bounding box based on distances. Args: points (Tensor): Shape (n, 2), [x, y]. bbox (Tensor): Shape (n, 4), "xyxy" format max_dis (float): Upper bound of the distance. eps (float): a small value to ensure target < max_dis, instead <= Returns: Tensor: Decoded distances. |
161,554 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def flops_to_string(flops, units="GFLOPs", precision=2):
"""Convert FLOPs number into a string.
Note that Here we take a multiply-add counts as one FLOP.
Args:
flops (float): FLOPs number to be converted.
units (str | None): Converted FLOPs units. Options are None, 'GFLOPs',
'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically
choose the most suitable unit for FLOPs. Default: 'GFLOPs'.
precision (int): Digit number after the decimal point. Default: 2.
Returns:
str: The converted FLOPs number with units.
Examples:
>>> flops_to_string(1e9)
'1.0 GFLOPs'
>>> flops_to_string(2e5, 'MFLOPs')
'0.2 MFLOPs'
>>> flops_to_string(3e-9, None)
'3e-09 FLOPs'
"""
if units is None:
if flops // 10**9 > 0:
return str(round(flops / 10.0**9, precision)) + " GFLOPs"
elif flops // 10**6 > 0:
return str(round(flops / 10.0**6, precision)) + " MFLOPs"
elif flops // 10**3 > 0:
return str(round(flops / 10.0**3, precision)) + " KFLOPs"
else:
return str(flops) + " FLOPs"
else:
if units == "GFLOPs":
return str(round(flops / 10.0**9, precision)) + " " + units
elif units == "MFLOPs":
return str(round(flops / 10.0**6, precision)) + " " + units
elif units == "KFLOPs":
return str(round(flops / 10.0**3, precision)) + " " + units
else:
return str(flops) + " FLOPs"
def params_to_string(num_params, units=None, precision=2):
"""Convert parameter number into a string.
Args:
num_params (float): Parameter number to be converted.
units (str | None): Converted FLOPs units. Options are None, 'M',
'K' and ''. If set to None, it will automatically choose the most
suitable unit for Parameter number. Default: None.
precision (int): Digit number after the decimal point. Default: 2.
Returns:
str: The converted parameter number with units.
Examples:
>>> params_to_string(1e9)
'1000.0 M'
>>> params_to_string(2e5)
'200.0 k'
>>> params_to_string(3e-9)
'3e-09'
"""
if units is None:
if num_params // 10**6 > 0:
return str(round(num_params / 10**6, precision)) + " M"
elif num_params // 10**3:
return str(round(num_params / 10**3, precision)) + " k"
else:
return str(num_params)
else:
if units == "M":
return str(round(num_params / 10.0**6, precision)) + " " + units
elif units == "K":
return str(round(num_params / 10.0**3, precision)) + " " + units
else:
return str(num_params)
def print_model_with_flops(
model,
total_flops,
total_params,
units="GFLOPs",
precision=3,
ost=sys.stdout,
flush=False,
):
"""Print a model with FLOPs for each layer.
Args:
model (nn.Module): The model to be printed.
total_flops (float): Total FLOPs of the model.
total_params (float): Total parameter counts of the model.
units (str | None): Converted FLOPs units. Default: 'GFLOPs'.
precision (int): Digit number after the decimal point. Default: 3.
ost (stream): same as `file` param in :func:`print`.
Default: sys.stdout.
flush (bool): same as that in :func:`print`. Default: False.
Example:
>>> class ExampleModel(nn.Module):
>>> def __init__(self):
>>> super().__init__()
>>> self.conv1 = nn.Conv2d(3, 8, 3)
>>> self.conv2 = nn.Conv2d(8, 256, 3)
>>> self.conv3 = nn.Conv2d(256, 8, 3)
>>> self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
>>> self.flatten = nn.Flatten()
>>> self.fc = nn.Linear(8, 1)
>>> def forward(self, x):
>>> x = self.conv1(x)
>>> x = self.conv2(x)
>>> x = self.conv3(x)
>>> x = self.avg_pool(x)
>>> x = self.flatten(x)
>>> x = self.fc(x)
>>> return x
>>> model = ExampleModel()
>>> x = (3, 16, 16)
to print the complexity inforamtion state for each layer, you can use
>>> get_model_complexity_info(model, x)
or directly use
>>> print_model_with_flops(model, 4579784.0, 37361)
ExampleModel(
0.037 M, 100.000% Params, 0.005 GFLOPs, 100.000% FLOPs,
(conv1): Conv2d(0.0 M, 0.600% Params, 0.0 GFLOPs, 0.959% FLOPs, 3, 8, kernel_size=(3, 3), stride=(1, 1)) # noqa: E501
(conv2): Conv2d(0.019 M, 50.020% Params, 0.003 GFLOPs, 58.760% FLOPs, 8, 256, kernel_size=(3, 3), stride=(1, 1))
(conv3): Conv2d(0.018 M, 49.356% Params, 0.002 GFLOPs, 40.264% FLOPs, 256, 8, kernel_size=(3, 3), stride=(1, 1))
(avg_pool): AdaptiveAvgPool2d(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.017% FLOPs, output_size=(1, 1))
(flatten): Flatten(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.000% FLOPs, )
(fc): Linear(0.0 M, 0.024% Params, 0.0 GFLOPs, 0.000% FLOPs, in_features=8, out_features=1, bias=True)
)
"""
def accumulate_params(self):
if is_supported_instance(self):
return self.__params__
else:
sum = 0
for m in self.children():
sum += m.accumulate_params()
return sum
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_num_params = self.accumulate_params()
accumulated_flops_cost = self.accumulate_flops()
return ", ".join(
[
params_to_string(
accumulated_num_params, units="M", precision=precision
),
"{:.3%} Params".format(accumulated_num_params / total_params),
flops_to_string(
accumulated_flops_cost, units=units, precision=precision
),
"{:.3%} FLOPs".format(accumulated_flops_cost / total_flops),
self.original_extra_repr(),
]
)
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
m.accumulate_params = accumulate_params.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, "original_extra_repr"):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, "accumulate_flops"):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model, file=ost, flush=flush)
model.apply(del_extra_repr)
def add_flops_counting_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(
net_main_module
) # noqa: E501
net_main_module.reset_flops_count()
return net_main_module
def compute_average_flops_cost(self):
"""Compute average FLOPs cost.
A method to compute average FLOPs cost, which will be available after
`add_flops_counting_methods()` is called on a desired net object.
Returns:
float: Current mean flops consumption per image.
"""
batches_count = self.__batch_counter__
flops_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
params_sum = get_model_parameters_number(self)
return flops_sum / batches_count, params_sum
def start_flops_count(self):
"""Activate the computation of mean flops consumption per image.
A method to activate the computation of mean flops consumption per image.
which will be available after ``add_flops_counting_methods()`` is called on
a desired net object. It should be called before running the network.
"""
add_batch_counter_hook_function(self)
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, "__flops_handle__"):
return
else:
handle = module.register_forward_hook(MODULES_MAPPING[type(module)])
module.__flops_handle__ = handle
self.apply(partial(add_flops_counter_hook_function))
def stop_flops_count(self):
"""Stop computing the mean flops consumption per image.
A method to stop computing the mean flops consumption per image, which will
be available after ``add_flops_counting_methods()`` is called on a desired
net object. It can be called to pause the computation whenever.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
The provided code snippet includes necessary dependencies for implementing the `get_model_complexity_info` function. Write a Python function `def get_model_complexity_info( model, input_shape, print_per_layer_stat=True, as_strings=True, input_constructor=None, flush=False, ost=sys.stdout, )` to solve the following problem:
Get complexity information of a model. This method can calculate FLOPs and parameter counts of a model with corresponding input shape. It can also print complexity information for each layer in a model. Supported layers are listed as below: - Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``. - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``, ``nn.ReLU6``. - Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``, ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``, ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``, ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``, ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``. - BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``, ``nn.BatchNorm3d``. - Linear: ``nn.Linear``. - Deconvolution: ``nn.ConvTranspose2d``. - Upsample: ``nn.Upsample``. Args: model (nn.Module): The model for complexity calculation. input_shape (tuple): Input shape used for calculation. print_per_layer_stat (bool): Whether to print complexity information for each layer in a model. Default: True. as_strings (bool): Output FLOPs and params counts in a string form. Default: True. input_constructor (None | callable): If specified, it takes a callable method that generates input. otherwise, it will generate a random tensor with input shape to calculate FLOPs. Default: None. flush (bool): same as that in :func:`print`. Default: False. ost (stream): same as ``file`` param in :func:`print`. Default: sys.stdout. Returns: tuple[float | str]: If ``as_strings`` is set to True, it will return FLOPs and parameter counts in a string format. otherwise, it will return those in a float number format.
Here is the function:
def get_model_complexity_info(
model,
input_shape,
print_per_layer_stat=True,
as_strings=True,
input_constructor=None,
flush=False,
ost=sys.stdout,
):
"""Get complexity information of a model.
This method can calculate FLOPs and parameter counts of a model with
corresponding input shape. It can also print complexity information for
each layer in a model.
Supported layers are listed as below:
- Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.
- Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``,
``nn.ReLU6``.
- Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,
``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,
``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,
``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,
``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.
- BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,
``nn.BatchNorm3d``.
- Linear: ``nn.Linear``.
- Deconvolution: ``nn.ConvTranspose2d``.
- Upsample: ``nn.Upsample``.
Args:
model (nn.Module): The model for complexity calculation.
input_shape (tuple): Input shape used for calculation.
print_per_layer_stat (bool): Whether to print complexity information
for each layer in a model. Default: True.
as_strings (bool): Output FLOPs and params counts in a string form.
Default: True.
input_constructor (None | callable): If specified, it takes a callable
method that generates input. otherwise, it will generate a random
tensor with input shape to calculate FLOPs. Default: None.
flush (bool): same as that in :func:`print`. Default: False.
ost (stream): same as ``file`` param in :func:`print`.
Default: sys.stdout.
Returns:
tuple[float | str]: If ``as_strings`` is set to True, it will return
FLOPs and parameter counts in a string format. otherwise, it will
return those in a float number format.
"""
assert type(input_shape) is tuple
assert len(input_shape) >= 1
assert isinstance(model, nn.Module)
flops_model = add_flops_counting_methods(model)
flops_model.eval()
flops_model.start_flops_count()
if input_constructor:
input = input_constructor(input_shape)
_ = flops_model(**input)
else:
try:
batch = torch.ones(()).new_empty(
(1, *input_shape),
dtype=next(flops_model.parameters()).dtype,
device=next(flops_model.parameters()).device,
)
except StopIteration:
# Avoid StopIteration for models which have no parameters,
# like `nn.Relu()`, `nn.AvgPool2d`, etc.
batch = torch.ones(()).new_empty((1, *input_shape))
_ = flops_model(batch)
flops_count, params_count = flops_model.compute_average_flops_cost()
if print_per_layer_stat:
print_model_with_flops(
flops_model, flops_count, params_count, ost=ost, flush=flush
)
flops_model.stop_flops_count()
if as_strings:
return flops_to_string(flops_count), params_to_string(params_count)
return flops_count, params_count | Get complexity information of a model. This method can calculate FLOPs and parameter counts of a model with corresponding input shape. It can also print complexity information for each layer in a model. Supported layers are listed as below: - Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``. - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``, ``nn.ReLU6``. - Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``, ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``, ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``, ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``, ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``. - BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``, ``nn.BatchNorm3d``. - Linear: ``nn.Linear``. - Deconvolution: ``nn.ConvTranspose2d``. - Upsample: ``nn.Upsample``. Args: model (nn.Module): The model for complexity calculation. input_shape (tuple): Input shape used for calculation. print_per_layer_stat (bool): Whether to print complexity information for each layer in a model. Default: True. as_strings (bool): Output FLOPs and params counts in a string form. Default: True. input_constructor (None | callable): If specified, it takes a callable method that generates input. otherwise, it will generate a random tensor with input shape to calculate FLOPs. Default: None. flush (bool): same as that in :func:`print`. Default: False. ost (stream): same as ``file`` param in :func:`print`. Default: sys.stdout. Returns: tuple[float | str]: If ``as_strings`` is set to True, it will return FLOPs and parameter counts in a string format. otherwise, it will return those in a float number format. |
161,555 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0 | null |
161,556 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count) | null |
161,557 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count) | null |
161,558 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def linear_flops_counter_hook(module, input, output):
input = input[0]
output_last_dim = output.shape[
-1
] # pytorch checks dimensions, so here we don't care much
module.__flops__ += int(np.prod(input.shape) * output_last_dim) | null |
161,559 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += int(np.prod(input.shape)) | null |
161,560 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def bn_flops_counter_hook(module, input, output):
input = input[0]
batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += int(batch_flops) | null |
161,561 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def deconv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
input_height, input_width = input.shape[2:]
kernel_height, kernel_width = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = (
kernel_height * kernel_width * in_channels * filters_per_channel
)
active_elements_count = batch_size * input_height * input_width
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
output_height, output_width = output.shape[2:]
bias_flops = out_channels * batch_size * output_height * output_height
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops) | null |
161,562 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = (
int(np.prod(kernel_dims)) * in_channels * filters_per_channel
)
active_elements_count = batch_size * int(np.prod(output_dims))
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
bias_flops = out_channels * active_elements_count
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops) | null |
161,563 | import argparse
import os
import time
import cv2
import torch
from nanodet.data.batch_process import stack_batch_img
from nanodet.data.collate import naive_collate
from nanodet.data.transform import Pipeline
from nanodet.model.arch import build_model
from nanodet.util import Logger, cfg, load_config, load_model_weight
from nanodet.util.path import mkdir
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"demo", default="image", help="demo type, eg. image, video and webcam"
)
parser.add_argument("--config", help="model config file path")
parser.add_argument("--model", help="model file path")
parser.add_argument("--path", default="./demo", help="path to images or video")
parser.add_argument("--camid", type=int, default=0, help="webcam demo camera id")
parser.add_argument(
"--save_result",
action="store_true",
help="whether to save the inference result of image/video",
)
args = parser.parse_args()
return args | null |
161,564 | import argparse
import os
import time
import cv2
import torch
from nanodet.data.batch_process import stack_batch_img
from nanodet.data.collate import naive_collate
from nanodet.data.transform import Pipeline
from nanodet.model.arch import build_model
from nanodet.util import Logger, cfg, load_config, load_model_weight
from nanodet.util.path import mkdir
image_ext = [".jpg", ".jpeg", ".webp", ".bmp", ".png"]
def get_image_list(path):
image_names = []
for maindir, subdir, file_name_list in os.walk(path):
for filename in file_name_list:
apath = os.path.join(maindir, filename)
ext = os.path.splitext(apath)[1]
if ext in image_ext:
image_names.append(apath)
return image_names | null |
161,565 | import argparse
import os
import warnings
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import TQDMProgressBar
from nanodet.data.collate import naive_collate
from nanodet.data.dataset import build_dataset
from nanodet.evaluator import build_evaluator
from nanodet.trainer.task import TrainingTask
from nanodet.util import (
NanoDetLightningLogger,
cfg,
convert_old_model,
env_utils,
load_config,
load_model_weight,
mkdir,
)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="train config file path")
parser.add_argument(
"--local_rank", default=-1, type=int, help="node rank for distributed training"
)
parser.add_argument("--seed", type=int, default=None, help="random seed")
args = parser.parse_args()
return args | null |
161,566 | import argparse
import os
import onnx
import onnxsim
import torch
from nanodet.model.arch import build_model
from nanodet.util import Logger, cfg, load_config, load_model_weight
def generate_ouput_names(head_cfg):
cls_names, dis_names = [], []
for stride in head_cfg.strides:
cls_names.append("cls_pred_stride_{}".format(stride))
dis_names.append("dis_pred_stride_{}".format(stride))
return cls_names + dis_names | null |
161,567 | import argparse
import os
import onnx
import onnxsim
import torch
from nanodet.model.arch import build_model
from nanodet.util import Logger, cfg, load_config, load_model_weight
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Convert .pth or .ckpt model to onnx.",
)
parser.add_argument("--cfg_path", type=str, help="Path to .yml config file.")
parser.add_argument(
"--model_path", type=str, default=None, help="Path to .ckpt model."
)
parser.add_argument(
"--out_path", type=str, default="nanodet.onnx", help="Onnx model output path."
)
parser.add_argument(
"--input_shape", type=str, default=None, help="Model intput shape."
)
return parser.parse_args() | null |
161,568 | import argparse
import os
import torch
from nanodet.model.arch import build_model
from nanodet.util import Logger, cfg, load_config, load_model_weight
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Convert .pth model weights to TorchScript.",
)
parser.add_argument("--cfg_path", type=str, help="Path to .yml config file.")
parser.add_argument(
"--model_path", type=str, default=None, help="Path to .ckpt model."
)
parser.add_argument(
"--out_path",
type=str,
default="nanodet.torchscript.pth",
help="TorchScript model output path.",
)
parser.add_argument(
"--input_shape", type=str, default=None, help="Model input shape."
)
return parser.parse_args() | null |
161,569 | import argparse
import torch
from nanodet.model.arch import build_model
from nanodet.util import cfg, load_config
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Convert .pth model to onnx.",
)
parser.add_argument("cfg", type=str, help="Path to .yml config file.")
parser.add_argument(
"--input_shape", type=str, default=None, help="Model intput shape."
)
return parser.parse_args() | null |
161,570 | import argparse
import torch
from nanodet.util import convert_old_model
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Convert .pth model to onnx.",
)
parser.add_argument("--file_path", type=str, help="Path to .pth checkpoint.")
parser.add_argument("--out_path", type=str, help="Path to .ckpt checkpoint.")
return parser.parse_args() | null |
161,571 | import os
import argparse
import pandas as pd
from loguru import logger
import sys
from textgen.seq2seq import BartSeq2SeqModel
def load_qa_data(file_path):
data = []
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line.startswith('='):
q = ''
a = ''
continue
if line.startswith('Q: '):
q = line[3:]
if line.startswith('A: '):
a = line[3:]
if q and a:
data.append((q, a))
q = ''
a = ''
return data | null |
161,572 | import argparse
import pandas as pd
from loguru import logger
import os
from transformers import BertTokenizerFast
from transformers import BartForConditionalGeneration, Text2TextGenerationPipeline
import sys
from textgen.seq2seq import BartSeq2SeqModel
def model_fill_mask():
tokenizer = BertTokenizerFast.from_pretrained("fnlp/bart-base-chinese")
model = BartForConditionalGeneration.from_pretrained("fnlp/bart-base-chinese")
text2text_generator = Text2TextGenerationPipeline(model, tokenizer)
r = text2text_generator("北京是[MASK]的首都", max_length=50, do_sample=False)
print(r) | null |
161,573 | import argparse
import pandas as pd
from loguru import logger
import os
from transformers import BertTokenizerFast
from transformers import BartForConditionalGeneration, Text2TextGenerationPipeline
import sys
from textgen.seq2seq import BartSeq2SeqModel
def load_data(file_path):
data = []
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip('\n')
terms = line.split('\t')
if len(terms) == 2:
data.append([terms[0], terms[1]])
else:
logger.warning(f'line error: {line}, split size: {len(terms)}')
return data | null |
161,574 | from loguru import logger
import argparse
from transformers import BertTokenizerFast
import torch
from torch.utils.data import Dataset
import os
import pickle
import sys
from textgen.language_generation import LanguageGenerationModel
from textgen.language_modeling import LanguageModelingModel
The provided code snippet includes necessary dependencies for implementing the `encode` function. Write a Python function `def encode(data)` to solve the following problem:
Encode data to src trg token ids
Here is the function:
def encode(data):
"""Encode data to src trg token ids"""
tokenizer, line = data
cls_id = tokenizer.cls_token_id
sep_id = tokenizer.sep_token_id
src, trg = line.split('\t')
input_ids = [cls_id] + tokenizer.encode(src, add_special_tokens=False) + [sep_id] + \
tokenizer.encode(trg, add_special_tokens=False) + [sep_id]
return input_ids | Encode data to src trg token ids |
161,575 | import sys
from textgen.language_generation import LanguageGenerationModel
from textgen.language_modeling import LanguageModelingModel
def raw(prompts):
model = LanguageGenerationModel("gpt2", "gpt2", args={"max_length": 200})
for prompt in prompts:
# Generate text using the model. Verbose set to False to prevent logging generated sequences.
generated = model.generate(prompt, verbose=False)
generated = ".".join(generated[0].split(".")[:-1]) + "."
print("=" * 42)
print(generated)
print("=" * 42) | null |
161,576 | import sys
from textgen.language_generation import LanguageGenerationModel
from textgen.language_modeling import LanguageModelingModel
def finetune(prompts, train_path, test_path):
train_args = {
"reprocess_input_data": True,
"overwrite_output_dir": True,
"block_size": 512,
"max_seq_length": 64,
"learning_rate": 5e-6,
"train_batch_size": 8,
"gradient_accumulation_steps": 8,
"num_train_epochs": 1,
"mlm": False,
"output_dir": "outputs/en_finetuned/",
"save_best_model": True,
"best_model_dir": "outputs/en_finetuned/best_model",
}
model = LanguageModelingModel("gpt2", "gpt2", args=train_args)
model.train_model(train_path, eval_file=test_path)
print(model.eval_model(test_path))
# Use finetuned model
model = LanguageGenerationModel("gpt2", "outputs/en_finetuned/", args={"max_length": 200})
for prompt in prompts:
# Generate text using the model. Verbose set to False to prevent logging generated sequences.
generated = model.generate(prompt, verbose=False)
generated = ".".join(generated[0].split(".")[:-1]) + "."
print("=" * 42)
print(generated)
print("=" * 42) | null |
161,577 | import argparse
from loguru import logger
import pandas as pd
import time
import os
import sys
from textgen.t5 import T5Model
def load_data(prefix, file_path):
data = []
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip('\n')
terms = line.split('\t')
if len(terms) == 2:
data.append([prefix, terms[0], terms[1]])
else:
logger.warning(f'line error: {line}')
return data | null |
161,578 | import argparse
from loguru import logger
import pandas as pd
import os
import sys
from textgen.t5 import T5Model
def load_data(prefix, file_path):
data = []
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip('\n')
terms = line.split('\t')
if len(terms) == 2:
data.append([prefix, terms[0], terms[1]])
else:
logger.warning(f'line error: {line}')
return data | null |
161,579 | import argparse
from loguru import logger
import time
import pandas as pd
import os
import sys
from textgen import CopyT5Model
def load_data(prefix, file_path):
data = []
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip('\n')
terms = line.split('\t')
if len(terms) == 2:
data.append([prefix, terms[0], terms[1]])
else:
logger.warning(f'line error: {line}')
return data | null |
161,580 | import sys
import gradio as gr
from textgen import T5Model
def ai_text(sentence):
out_sentences = model.predict([sentence])
print("{} \t out: {}".format(sentence, out_sentences[0]))
return out_sentences[0] | null |
161,581 | import argparse
import json
from loguru import logger
import pandas as pd
import numpy as np
import time
import os
import sys
from textgen import T5Model
from textgen.t5.t5_utils import f1_sim, rouge_l_zh
def load_json_data(prefix, file_path):
data = []
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
# {"input": "以下内容为真:“滁县地区专员张友道说:大都架到高处了”那么下面的陈述:“张友道对身边的官员说了话。”是真的,假的,或未知?\n答案:", "target": "未知", "answer_choices": ["真的", "假的", "未知"], "type": "nli"}
line = line.strip()
if line:
json_string = json.loads(line.strip())
input_text = json_string["input"].replace("\n", "_")
target_text = json_string["target"]
answer_choices = json_string.get("answer_choices", [])
type = json_string["type"]
data.append([prefix, input_text, target_text])
else:
logger.warning(f'line error: {line}')
return data | null |
161,582 | import argparse
import json
from loguru import logger
import pandas as pd
import numpy as np
import time
import os
import sys
from textgen import T5Model
from textgen.t5.t5_utils import f1_sim, rouge_l_zh
def normalize(text):
"""简单的文本标准化"""
return ' '.join(text.lower().split())
def f1_sim(text_a, text_b):
"""F1相似度
说明:算出两个文本的最长公共子序列长度,然后乘2并处以两者
长度之和。
脚本见:https://github.com/CLUEbenchmark/pCLUE/blob/main/evaluate_pclue.py
计算pCLUE任务总分,及子分数
"""
if not text_a and not text_b:
return 0.
lcs_len = dynamic_lcs(text_a, text_b)
return 2. * lcs_len / (len(text_a) + len(text_b))
def rouge_l_zh(target, pred):
"""计算Rouge-l得分,Rouge-l指标常用于评估自动文本摘要及翻译任务
target: 真实标签
pred: 预测标签
"""
if not (isinstance(target, str) or isinstance(pred, str)):
logger.error("target或pred为非字符串!请检查!")
return 0
try:
from rouge import Rouge
except ImportError:
logger.error("require rouge package, please `pip install Rouge`")
return 0
rouge = Rouge()
scores = rouge.get_scores(" ".join(list(pred)), " ".join(list(target)))
score = scores[0]["rouge-l"]
return score["f"]
The provided code snippet includes necessary dependencies for implementing the `evaluate_pclue_fn` function. Write a Python function `def evaluate_pclue_fn(test_file, model, select_top=200)` to solve the following problem:
计算pclue的成绩 :param test_file: 预测文件 :param target_file: 正确的文件 :return: 一个dict,包括总分score,以及各个部分的分数(mrc, generate, classify, nli)
Here is the function:
def evaluate_pclue_fn(test_file, model, select_top=200):
"""
计算pclue的成绩
:param test_file: 预测文件
:param target_file: 正确的文件
:return: 一个dict,包括总分score,以及各个部分的分数(mrc, generate, classify, nli)
"""
test_lines = open(test_file, 'r', encoding='utf8').readlines()
test_lines = test_lines[:select_top]
predict_lines = model.predict([json.loads(i)['input'] for i in test_lines])
# 1.记录
classify_list = []
mrc_list = []
generate_list = []
nli_list = []
for i, test_line in enumerate(test_lines):
json_dict = json.loads(test_line.strip())
type = json_dict["type"]
target_line = json_dict["target"]
target_answer = target_line.replace(",", ",") # 正确的标签
if isinstance(target_answer, list): # 将列表转换为字符串,如关键词生成
target_answer = ",".join(target_answer)
target_answer = normalize(target_answer)
predict_answer = predict_lines[i] # 预测的标签
predict_answer = normalize(predict_answer)
if len(predict_answer) == 0:
predict_answer = "无答案"
if type == 'classify' or type == 'anaphora_resolution': # 分类
label_temp = True if target_answer == predict_answer else False
classify_list.append(label_temp)
elif type == 'mrc': # 阅读理解
em = 1 if target_answer == predict_answer else 0
f1 = f1_sim(predict_answer, target_answer)
mrc_list.append((em, f1))
elif type == 'generate': # 生成
rouge_l = rouge_l_zh(target_answer, predict_answer)
generate_list.append(rouge_l)
elif type == 'nli': # 推理
label_temp = True if target_answer == predict_answer else False
nli_list.append(label_temp)
else:
logger.error("predict_line: {predict_line}")
break
if i < 10:
logger.debug(f"num: {i}, target_answer: {target_answer}; predict_answer: {predict_answer}")
# 2.计算最后的得分
classify_score = np.average(classify_list)
nli_score = np.average(nli_list)
generate_score = np.average(generate_list)
mrc_em_score = np.average([x[0] for x in mrc_list])
mrc_f1_score = np.average([x[1] for x in mrc_list])
mrc_score = np.average([mrc_em_score, mrc_f1_score])
# 计算总分
score = np.average([classify_score, nli_score, generate_score, mrc_score])
# 保存分数
result_dict = {"score": score, "classify_score": classify_score, "nli_score": nli_score,
"generate_score": generate_score, "mrc_em_score": mrc_em_score, "mrc_f1_score": mrc_f1_score}
return result_dict | 计算pclue的成绩 :param test_file: 预测文件 :param target_file: 正确的文件 :return: 一个dict,包括总分score,以及各个部分的分数(mrc, generate, classify, nli) |
161,583 | from loguru import logger
def prepare_ctrl_input(args, _, tokenizer, prompt_text):
if args.temperature > 0.7:
logger.warning("CTRL typically works better with lower temperatures (and lower top_k).")
encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False)
if not any(encoded_prompt[0] == x for x in tokenizer.control_codes.values()):
logger.warning("WARNING! You are not starting your generation from a control code so you won't get good results")
return prompt_text | null |
161,584 | from loguru import logger
def prepare_xlm_input(args, model, tokenizer, prompt_text):
# Set the language
use_lang_emb = hasattr(model.config, "use_lang_emb") and model.config.use_lang_emb
if hasattr(model.config, "lang2id") and use_lang_emb:
available_languages = model.config.lang2id.keys()
if args.xlm_language in available_languages:
language = args.xlm_language
else:
language = None
while language not in available_languages:
language = input("Using XLM. Select language in " + str(list(available_languages)) + " >>> ")
model.config.lang_id = model.config.lang2id[language]
return prompt_text | null |
161,585 | from loguru import logger
PADDING_TEXT = """In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def prepare_xlnet_input(args, _, tokenizer, prompt_text):
prompt_text = (args.padding_text if args.padding_text else PADDING_TEXT) + prompt_text
return prompt_text | null |
161,586 | from loguru import logger
PADDING_TEXT = """In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def prepare_transfoxl_input(args, _, tokenizer, prompt_text):
prompt_text = (args.padding_text if args.padding_text else PADDING_TEXT) + prompt_text
return prompt_text | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.