repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
custom-diffusion | custom-diffusion-main/src/diffusers_training.py | # This code is built from the Huggingface repository: https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py, and
# https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py
# Copyright 2022- The Hugging Face team. All rights reserved.
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
# ==========================================================================================
#
# modifications are MIT License. To view a copy of the license, visit MIT_LICENSE.md.
#
# ==========================================================================================
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
# TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
# 1. Definitions.
# "License" shall mean the terms and conditions for use, reproduction,
# and distribution as defined by Sections 1 through 9 of this document.
# "Licensor" shall mean the copyright owner or entity authorized by
# the copyright owner that is granting the License.
# "Legal Entity" shall mean the union of the acting entity and all
# other entities that control, are controlled by, or are under common
# control with that entity. For the purposes of this definition,
# "control" means (i) the power, direct or indirect, to cause the
# direction or management of such entity, whether by contract or
# otherwise, or (ii) ownership of fifty percent (50%) or more of the
# outstanding shares, or (iii) beneficial ownership of such entity.
# "You" (or "Your") shall mean an individual or Legal Entity
# exercising permissions granted by this License.
# "Source" form shall mean the preferred form for making modifications,
# including but not limited to software source code, documentation
# source, and configuration files.
# "Object" form shall mean any form resulting from mechanical
# transformation or translation of a Source form, including but
# not limited to compiled object code, generated documentation,
# and conversions to other media types.
# "Work" shall mean the work of authorship, whether in Source or
# Object form, made available under the License, as indicated by a
# copyright notice that is included in or attached to the work
# (an example is provided in the Appendix below).
# "Derivative Works" shall mean any work, whether in Source or Object
# form, that is based on (or derived from) the Work and for which the
# editorial revisions, annotations, elaborations, or other modifications
# represent, as a whole, an original work of authorship. For the purposes
# of this License, Derivative Works shall not include works that remain
# separable from, or merely link (or bind by name) to the interfaces of,
# the Work and Derivative Works thereof.
# "Contribution" shall mean any work of authorship, including
# the original version of the Work and any modifications or additions
# to that Work or Derivative Works thereof, that is intentionally
# submitted to Licensor for inclusion in the Work by the copyright owner
# or by an individual or Legal Entity authorized to submit on behalf of
# the copyright owner. For the purposes of this definition, "submitted"
# means any form of electronic, verbal, or written communication sent
# to the Licensor or its representatives, including but not limited to
# communication on electronic mailing lists, source code control systems,
# and issue tracking systems that are managed by, or on behalf of, the
# Licensor for the purpose of discussing and improving the Work, but
# excluding communication that is conspicuously marked or otherwise
# designated in writing by the copyright owner as "Not a Contribution."
# "Contributor" shall mean Licensor and any individual or Legal Entity
# on behalf of whom a Contribution has been received by Licensor and
# subsequently incorporated within the Work.
# 2. Grant of Copyright License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# copyright license to reproduce, prepare Derivative Works of,
# publicly display, publicly perform, sublicense, and distribute the
# Work and such Derivative Works in Source or Object form.
# 3. Grant of Patent License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# (except as stated in this section) patent license to make, have made,
# use, offer to sell, sell, import, and otherwise transfer the Work,
# where such license applies only to those patent claims licensable
# by such Contributor that are necessarily infringed by their
# Contribution(s) alone or by combination of their Contribution(s)
# with the Work to which such Contribution(s) was submitted. If You
# institute patent litigation against any entity (including a
# cross-claim or counterclaim in a lawsuit) alleging that the Work
# or a Contribution incorporated within the Work constitutes direct
# or contributory patent infringement, then any patent licenses
# granted to You under this License for that Work shall terminate
# as of the date such litigation is filed.
# 4. Redistribution. You may reproduce and distribute copies of the
# Work or Derivative Works thereof in any medium, with or without
# modifications, and in Source or Object form, provided that You
# meet the following conditions:
# (a) You must give any other recipients of the Work or
# Derivative Works a copy of this License; and
# (b) You must cause any modified files to carry prominent notices
# stating that You changed the files; and
# (c) You must retain, in the Source form of any Derivative Works
# that You distribute, all copyright, patent, trademark, and
# attribution notices from the Source form of the Work,
# excluding those notices that do not pertain to any part of
# the Derivative Works; and
# (d) If the Work includes a "NOTICE" text file as part of its
# distribution, then any Derivative Works that You distribute must
# include a readable copy of the attribution notices contained
# within such NOTICE file, excluding those notices that do not
# pertain to any part of the Derivative Works, in at least one
# of the following places: within a NOTICE text file distributed
# as part of the Derivative Works; within the Source form or
# documentation, if provided along with the Derivative Works; or,
# within a display generated by the Derivative Works, if and
# wherever such third-party notices normally appear. The contents
# of the NOTICE file are for informational purposes only and
# do not modify the License. You may add Your own attribution
# notices within Derivative Works that You distribute, alongside
# or as an addendum to the NOTICE text from the Work, provided
# that such additional attribution notices cannot be construed
# as modifying the License.
# You may add Your own copyright statement to Your modifications and
# may provide additional or different license terms and conditions
# for use, reproduction, or distribution of Your modifications, or
# for any such Derivative Works as a whole, provided Your use,
# reproduction, and distribution of the Work otherwise complies with
# the conditions stated in this License.
# 5. Submission of Contributions. Unless You explicitly state otherwise,
# any Contribution intentionally submitted for inclusion in the Work
# by You to the Licensor shall be under the terms and conditions of
# this License, without any additional terms or conditions.
# Notwithstanding the above, nothing herein shall supersede or modify
# the terms of any separate license agreement you may have executed
# with Licensor regarding such Contributions.
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor,
# except as required for reasonable and customary use in describing the
# origin of the Work and reproducing the content of the NOTICE file.
# 7. Disclaimer of Warranty. Unless required by applicable law or
# agreed to in writing, Licensor provides the Work (and each
# Contributor provides its Contributions) on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied, including, without limitation, any warranties or conditions
# of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
# PARTICULAR PURPOSE. You are solely responsible for determining the
# appropriateness of using or redistributing the Work and assume any
# risks associated with Your exercise of permissions under this License.
# 8. Limitation of Liability. In no event and under no legal theory,
# whether in tort (including negligence), contract, or otherwise,
# unless required by applicable law (such as deliberate and grossly
# negligent acts) or agreed to in writing, shall any Contributor be
# liable to You for damages, including any direct, indirect, special,
# incidental, or consequential damages of any character arising as a
# result of this License or out of the use or inability to use the
# Work (including but not limited to damages for loss of goodwill,
# work stoppage, computer failure or malfunction, or any and all
# other commercial damages or losses), even if such Contributor
# has been advised of the possibility of such damages.
# 9. Accepting Warranty or Additional Liability. While redistributing
# the Work or Derivative Works thereof, You may choose to offer,
# and charge a fee for, acceptance of support, warranty, indemnity,
# or other liability obligations and/or rights consistent with this
# License. However, in accepting such obligations, You may act only
# on Your own behalf and on Your sole responsibility, not on behalf
# of any other Contributor, and only if You agree to indemnify,
# defend, and hold each Contributor harmless for any liability
# incurred by, or claims asserted against, such Contributor by reason
# of your accepting any such warranty or additional liability.
# END OF TERMS AND CONDITIONS
# APPENDIX: How to apply the Apache License to your work.
# To apply the Apache License to your work, attach the following
# boilerplate notice, with the fields enclosed by brackets "[]"
# replaced with your own identifying information. (Don't include
# the brackets!) The text should be enclosed in the appropriate
# comment syntax for the file format. We also recommend that a
# file or class name and description of purpose be included on the
# same "printed page" as the copyright notice for easier
# identification within third-party archives.
# Copyright [yyyy] [name of copyright owner]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import hashlib
import itertools
import logging
import math
import os
from pathlib import Path
from typing import Optional
import torch
import json
import numpy as np
import torch.nn.functional as F
import torch.utils.checkpoint
from packaging import version
import transformers
import diffusers
from accelerate.logging import get_logger
from accelerate import Accelerator
from accelerate.utils import set_seed
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel, DPMSolverMultistepScheduler
from diffusers.optimization import get_scheduler
from huggingface_hub import HfFolder, Repository, create_repo, whoami
from tqdm.auto import tqdm
from transformers import AutoTokenizer, PretrainedConfig
from diffusers.models.cross_attention import CrossAttention
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils import check_min_version, is_wandb_available
sys.path.append('./')
from src.diffusers_model_pipeline import CustomDiffusionAttnProcessor, CustomDiffusionPipeline, set_use_memory_efficient_attention_xformers
from src.diffusers_data_pipeline import CustomDiffusionDataset, PromptDataset, collate_fn
from src import retrieve
check_min_version("0.14.0")
logger = get_logger(__name__)
def create_custom_diffusion(unet, freeze_model):
for name, params in unet.named_parameters():
if freeze_model == 'crossattn':
if 'attn2' in name:
params.requires_grad = True
print(name)
else:
params.requires_grad = False
elif freeze_model == "crossattn_kv":
if 'attn2.to_k' in name or 'attn2.to_v' in name:
params.requires_grad = True
print(name)
else:
params.requires_grad = False
else:
raise ValueError(
"freeze_model argument only supports crossattn_kv or crossattn"
)
# change attn class
def change_attn(unet):
for layer in unet.children():
if type(layer) == CrossAttention:
bound_method = set_use_memory_efficient_attention_xformers.__get__(layer, layer.__class__)
setattr(layer, 'set_use_memory_efficient_attention_xformers', bound_method)
else:
change_attn(layer)
change_attn(unet)
unet.set_attn_processor(CustomDiffusionAttnProcessor())
return unet
def freeze_params(params):
for param in params:
param.requires_grad = False
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
text_encoder_config = PretrainedConfig.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder",
revision=revision,
)
model_class = text_encoder_config.architectures[0]
if model_class == "CLIPTextModel":
from transformers import CLIPTextModel
return CLIPTextModel
elif model_class == "RobertaSeriesModelWithTransformation":
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
return RobertaSeriesModelWithTransformation
else:
raise ValueError(f"{model_class} is not supported.")
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--instance_data_dir",
type=str,
default=None,
help="A folder containing the training data of instance images.",
)
parser.add_argument(
"--class_data_dir",
type=str,
default=None,
required=False,
help="A folder containing the training data of class images.",
)
parser.add_argument(
"--instance_prompt",
type=str,
default=None,
help="The prompt with identifier specifying the instance",
)
parser.add_argument(
"--class_prompt",
type=str,
default=None,
help="The prompt to specify images in the same class as provided instance images.",
)
parser.add_argument(
"--validation_prompt",
type=str,
default=None,
help="A prompt that is used during validation to verify that the model is learning.",
)
parser.add_argument(
"--num_validation_images",
type=int,
default=4,
help="Number of images that should be generated during validation with `validation_prompt`.",
)
parser.add_argument(
"--with_prior_preservation",
default=False,
action="store_true",
help="Flag to add prior preservation loss.",
)
parser.add_argument(
"--real_prior",
default=False,
action="store_true",
help="real images as prior.",
)
parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
parser.add_argument(
"--num_class_images",
type=int,
default=100,
help=(
"Minimal class images for prior preservation loss. If there are not enough images already present in"
" class_data_dir, additional images will be sampled with class_prompt."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="custom-diffusion-model",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
)
parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
parser.add_argument(
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
)
parser.add_argument(
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
)
parser.add_argument("--num_train_epochs", type=int, default=1)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
),
)
parser.add_argument(
"--freeze_model",
type=str,
default='crossattn_kv',
help="crossattn to enable fine-tuning of all key, value, query matrices",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
),
)
parser.add_argument(
"--prior_generation_precision",
type=str,
default=None,
choices=["no", "fp32", "fp16", "bf16"],
help=(
"Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
),
)
parser.add_argument(
"--concepts_list",
type=str,
default=None,
help="Path to json containing multiple concepts, will overwrite parameters like instance_prompt, class_prompt, etc.",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--modifier_token",
type=str,
default=None,
help="A token to use as a modifier for the concept.",
)
parser.add_argument(
"--initializer_token", type=str, default='ktn+pll+ucd', help="A token to use as initializer word."
)
parser.add_argument("--hflip", action="store_true", help="Apply horizontal flip data augmentation.")
if input_args is not None:
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
if args.with_prior_preservation:
if args.concepts_list is None:
if args.class_data_dir is None:
raise ValueError("You must specify a data directory for class images.")
if args.class_prompt is None:
raise ValueError("You must specify prompt for class images.")
else:
if args.class_data_dir is not None:
logger.warning("You need not use --class_data_dir without --with_prior_preservation.")
if args.class_prompt is not None:
logger.warning("You need not use --class_prompt without --with_prior_preservation.")
return args
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
if token is None:
token = HfFolder.get_token()
if organization is None:
username = whoami(token)["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def main(args):
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
logging_dir=logging_dir,
)
if args.report_to == "wandb":
if not is_wandb_available():
raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
import wandb
# Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
# This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
# TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
if args.seed is not None:
set_seed(args.seed)
if args.concepts_list is None:
args.concepts_list = [
{
"instance_prompt": args.instance_prompt,
"class_prompt": args.class_prompt,
"instance_data_dir": args.instance_data_dir,
"class_data_dir": args.class_data_dir
}
]
else:
with open(args.concepts_list, "r") as f:
args.concepts_list = json.load(f)
if args.with_prior_preservation:
for i, concept in enumerate(args.concepts_list):
class_images_dir = Path(concept['class_data_dir'])
if not class_images_dir.exists():
class_images_dir.mkdir(parents=True, exist_ok=True)
if args.real_prior:
if accelerator.is_main_process:
if not Path(os.path.join(class_images_dir, 'images')).exists() or len(list(Path(os.path.join(class_images_dir, 'images')).iterdir())) < args.num_class_images:
retrieve.retrieve(concept['class_prompt'], class_images_dir, args.num_class_images)
concept['class_prompt'] = os.path.join(class_images_dir, 'caption.txt')
concept['class_data_dir'] = os.path.join(class_images_dir, 'images.txt')
args.concepts_list[i] = concept
accelerator.wait_for_everyone()
else:
cur_class_images = len(list(class_images_dir.iterdir()))
if cur_class_images < args.num_class_images:
torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
if args.prior_generation_precision == "fp32":
torch_dtype = torch.float32
elif args.prior_generation_precision == "fp16":
torch_dtype = torch.float16
elif args.prior_generation_precision == "bf16":
torch_dtype = torch.bfloat16
pipeline = DiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
torch_dtype=torch_dtype,
safety_checker=None,
revision=args.revision,
)
pipeline.set_progress_bar_config(disable=True)
num_new_images = args.num_class_images - cur_class_images
logger.info(f"Number of class images to sample: {num_new_images}.")
sample_dataset = PromptDataset(concept['class_prompt'], num_new_images)
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
sample_dataloader = accelerator.prepare(sample_dataloader)
pipeline.to(accelerator.device)
for example in tqdm(
sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
):
images = pipeline(example["prompt"], num_inference_steps=50, guidance_scale=6., eta=1.).images
for i, image in enumerate(images):
hash_image = hashlib.sha1(image.tobytes()).hexdigest()
image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
image.save(image_filename)
del pipeline
if torch.cuda.is_available():
torch.cuda.empty_cache()
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
create_repo(repo_name, exist_ok=True, token=args.hub_token)
repo = Repository(args.output_dir, clone_from=repo_name, token=args.hub_token)
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
if "step_*" not in gitignore:
gitignore.write("step_*\n")
if "epoch_*" not in gitignore:
gitignore.write("epoch_*\n")
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
# Load the tokenizer
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name,
revision=args.revision,
use_fast=False,
)
elif args.pretrained_model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="tokenizer",
revision=args.revision,
use_fast=False,
)
# import correct text encoder class
text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
# Load scheduler and models
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
text_encoder = text_encoder_cls.from_pretrained(
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
)
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
unet = UNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
)
vae.requires_grad_(False)
if not args.train_text_encoder and args.modifier_token is None:
text_encoder.requires_grad_(False)
unet = create_custom_diffusion(unet, args.freeze_model)
# For mixed precision training we cast the text_encoder and vae weights to half-precision
# as these models are only used for inference, keeping weights in full precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
# Move unet, vae and text_encoder to device and cast to weight_dtype
if accelerator.mixed_precision != "fp16":
unet.to(accelerator.device, dtype=weight_dtype)
text_encoder.to(accelerator.device, dtype=weight_dtype)
vae.to(accelerator.device, dtype=weight_dtype)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warn(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
)
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
## check this##
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
if args.train_text_encoder or args.modifier_token is not None:
text_encoder.gradient_checkpointing_enable()
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if args.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
)
if args.with_prior_preservation:
args.learning_rate = args.learning_rate*2.
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError(
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
)
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
# Adding a modifier token which is optimized ####
# Code taken from https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py
modifier_token_id = []
initializer_token_id = []
if args.modifier_token is not None:
args.modifier_token = args.modifier_token.split('+')
args.initializer_token = args.initializer_token.split('+')
if len(args.modifier_token) > len(args.initializer_token):
raise ValueError("You must specify + separated initializer token for each modifier token.")
for modifier_token, initializer_token in zip(args.modifier_token, args.initializer_token[:len(args.modifier_token)]):
# Add the placeholder token in tokenizer
num_added_tokens = tokenizer.add_tokens(modifier_token)
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {modifier_token}. Please pass a different"
" `modifier_token` that is not already in the tokenizer."
)
# Convert the initializer_token, placeholder_token to ids
token_ids = tokenizer.encode([initializer_token], add_special_tokens=False)
print(token_ids)
# Check if initializer_token is a single token or a sequence of tokens
if len(token_ids) > 1:
raise ValueError("The initializer token must be a single token.")
initializer_token_id.append(token_ids[0])
modifier_token_id.append(tokenizer.convert_tokens_to_ids(modifier_token))
# Resize the token embeddings as we are adding new special tokens to the tokenizer
text_encoder.resize_token_embeddings(len(tokenizer))
# Initialise the newly added placeholder token with the embeddings of the initializer token
token_embeds = text_encoder.get_input_embeddings().weight.data
for (x,y) in zip(modifier_token_id,initializer_token_id):
token_embeds[x] = token_embeds[y]
# Freeze all parameters except for the token embeddings in text encoder
params_to_freeze = itertools.chain(
text_encoder.text_model.encoder.parameters(),
text_encoder.text_model.final_layer_norm.parameters(),
text_encoder.text_model.embeddings.position_embedding.parameters(),
)
freeze_params(params_to_freeze)
if args.freeze_model == 'crossattn':
params_to_optimize = itertools.chain( text_encoder.get_input_embeddings().parameters() , [x[1] for x in unet.named_parameters() if 'attn2' in x[0]] )
else:
params_to_optimize = itertools.chain( text_encoder.get_input_embeddings().parameters() , [x[1] for x in unet.named_parameters() if ('attn2.to_k' in x[0] or 'attn2.to_v' in x[0])] )
########################################################
########################################################
else:
if args.freeze_model == 'crossattn':
params_to_optimize = (
itertools.chain([x[1] for x in unet.named_parameters() if 'attn2' in x[0]], text_encoder.parameters() if args.train_text_encoder else [] )
)
else:
params_to_optimize = (
itertools.chain([x[1] for x in unet.named_parameters() if ('attn2.to_k' in x[0] or 'attn2.to_v' in x[0])], text_encoder.parameters() if args.train_text_encoder else [] )
)
optimizer = optimizer_class(
params_to_optimize,
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
train_dataset = CustomDiffusionDataset(
concepts_list=args.concepts_list,
tokenizer=tokenizer,
with_prior_preservation=args.with_prior_preservation,
size=args.resolution,
center_crop=args.center_crop,
num_class_images=args.num_class_images,
hflip=args.hflip
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.train_batch_size,
shuffle=True,
collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
num_workers=args.dataloader_num_workers,
)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
)
if args.train_text_encoder or args.modifier_token is not None:
unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, text_encoder, optimizer, train_dataloader, lr_scheduler
)
else:
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, optimizer, train_dataloader, lr_scheduler
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
accelerator.init_trackers("custom-diffusion")
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
progress_bar.set_description("Steps")
global_step = 0
for epoch in range(args.num_train_epochs):
unet.train()
if args.train_text_encoder or args.modifier_token is not None:
text_encoder.train()
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(unet):
# Convert images to latent space
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * vae.config.scaling_factor
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Get the text embedding for conditioning
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
# Predict the noise residual
model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
# Get the target for loss depending on the prediction type
if noise_scheduler.config.prediction_type == "epsilon":
target = noise
elif noise_scheduler.config.prediction_type == "v_prediction":
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
if args.with_prior_preservation:
# Chunk the noise and model_pred into two parts and compute the loss on each part separately.
model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
target, target_prior = torch.chunk(target, 2, dim=0)
mask = torch.chunk(batch["mask"], 2, dim=0)[0]
# Compute instance loss
loss = F.mse_loss(model_pred.float(), target.float(), reduction="none")
loss = ((loss*mask).sum([1, 2, 3])/mask.sum([1, 2, 3])).mean()
# Compute prior loss
prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
# Add the prior loss to the instance loss.
loss = loss + args.prior_loss_weight * prior_loss
else:
mask = batch["mask"]
loss = F.mse_loss(model_pred.float(), target.float(), reduction="none")
loss = ((loss*mask).sum([1, 2, 3])/mask.sum([1, 2, 3])).mean()
accelerator.backward(loss)
# Zero out the gradients for all token embeddings except the newly added
# embeddings for the concept, as we only want to optimize the concept embeddings
if args.modifier_token is not None:
if accelerator.num_processes > 1:
grads_text_encoder = text_encoder.module.get_input_embeddings().weight.grad
else:
grads_text_encoder = text_encoder.get_input_embeddings().weight.grad
# Get the index for tokens that we want to zero the grads for
index_grads_to_zero = torch.arange(len(tokenizer)) != modifier_token_id[0]
for i in range(len(modifier_token_id[1:])):
index_grads_to_zero = index_grads_to_zero & (torch.arange(len(tokenizer)) != modifier_token_id[i])
grads_text_encoder.data[index_grads_to_zero, :] = grads_text_encoder.data[index_grads_to_zero, :].fill_(0)
if accelerator.sync_gradients:
params_to_clip = (
itertools.chain([x[1] for x in unet.named_parameters() if ('attn2' in x[0])], text_encoder.parameters())
if (args.train_text_encoder or args.modifier_token is not None)
else itertools.chain([x[1] for x in unet.named_parameters() if ('attn2' in x[0])])
)
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
if global_step % args.save_steps == 0:
if accelerator.is_main_process:
pipeline = CustomDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
unet=accelerator.unwrap_model(unet),
text_encoder=accelerator.unwrap_model(text_encoder),
tokenizer=tokenizer,
revision=args.revision,
modifier_token=args.modifier_token,
modifier_token_id=modifier_token_id,
)
save_path = os.path.join(args.output_dir, f"delta-{global_step}.bin")
pipeline.save_pretrained(save_path, freeze_model=args.freeze_model)
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
accelerator.wait_for_everyone()
if accelerator.is_main_process:
# create pipeline
unet = unet.to(torch.float32)
pipeline = CustomDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
unet=accelerator.unwrap_model(unet),
text_encoder=accelerator.unwrap_model(text_encoder),
tokenizer=tokenizer,
revision=args.revision,
modifier_token=args.modifier_token,
modifier_token_id=modifier_token_id,
)
save_path = os.path.join(args.output_dir, f"delta.bin")
pipeline.save_pretrained(save_path, freeze_model=args.freeze_model)
if args.validation_prompt is not None:
logger.info(
f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
f" {args.validation_prompt}."
)
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
# run inference
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
images = [
pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
for _ in range(args.num_validation_images)
]
for tracker in accelerator.trackers:
if tracker.name == "tensorboard":
np_images = np.stack([np.asarray(img) for img in images])
tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
if tracker.name == "wandb":
tracker.log(
{
"validation": [
wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
for i, image in enumerate(images)
]
}
)
del pipeline
torch.cuda.empty_cache()
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
accelerator.end_training()
if __name__ == "__main__":
args = parse_args()
main(args)
| 51,778 | 45.816456 | 253 | py |
custom-diffusion | custom-diffusion-main/src/compress.py | # Copyright 2022 Adobe Research. All rights reserved.
# To view a copy of the license, visit LICENSE.md.
import torch
import argparse
def compress(delta_ckpt, ckpt, diffuser=False, compression_ratio=0.6, device='cuda'):
st = torch.load(f'{delta_ckpt}')
if not diffuser:
compressed_key = 'state_dict'
compressed_st = {compressed_key: {}}
pretrained_st = torch.load(ckpt)['state_dict']
if 'embed' in st['state_dict']:
compressed_st['state_dict']['embed'] = st['state_dict']['embed']
del st['state_dict']['embed']
st = st['state_dict']
else:
from diffusers import StableDiffusionPipeline
compressed_key = 'unet'
compressed_st = {compressed_key: {}}
pretrained_st = StableDiffusionPipeline.from_pretrained(ckpt, torch_dtype=torch.float16).to("cuda")
pretrained_st = pretrained_st.unet.state_dict()
if 'modifier_token' in st:
compressed_st['modifier_token'] = st['modifier_token']
st = st['unet']
print("getting compression")
layers = list(st.keys())
for name in layers:
if 'to_k' in name or 'to_v' in name:
W = st[name].to(device)
Wpretrain = pretrained_st[name].clone().to(device)
deltaW = W-Wpretrain
u, s, vt = torch.linalg.svd(deltaW.clone())
explain = 0
all_ = (s).sum()
for i, t in enumerate(s):
explain += t/(all_)
if explain > compression_ratio:
break
compressed_st[compressed_key][f'{name}'] = {}
compressed_st[compressed_key][f'{name}']['u'] = (u[:, :i]@torch.diag(s)[:i, :i]).clone()
compressed_st[compressed_key][f'{name}']['v'] = vt[:i].clone()
else:
compressed_st[compressed_key][f'{name}'] = st[name]
name = delta_ckpt.replace('delta', 'compressed_delta')
torch.save(compressed_st, f'{name}')
def parse_args():
parser = argparse.ArgumentParser('', add_help=False)
parser.add_argument('--delta_ckpt', help='path of checkpoint to compress',
type=str)
parser.add_argument('--ckpt', help='path of pretrained model checkpoint',
type=str)
parser.add_argument("--diffuser", action='store_true')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
compress(args.delta_ckpt, args.ckpt, args.diffuser)
| 2,479 | 34.428571 | 107 | py |
custom-diffusion | custom-diffusion-main/src/diffusers_data_pipeline.py | # This code is built from the Huggingface repository: https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py, and
# https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py
# Copyright 2022- The Hugging Face team. All rights reserved.
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
# ==========================================================================================
#
# Adobe’s modifications are Copyright 2022 Adobe Research. All rights reserved.
# Adobe’s modifications are licensed under the Adobe Research License. To view a copy of the license, visit
# LICENSE.md.
#
# ==========================================================================================
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
# TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
# 1. Definitions.
# "License" shall mean the terms and conditions for use, reproduction,
# and distribution as defined by Sections 1 through 9 of this document.
# "Licensor" shall mean the copyright owner or entity authorized by
# the copyright owner that is granting the License.
# "Legal Entity" shall mean the union of the acting entity and all
# other entities that control, are controlled by, or are under common
# control with that entity. For the purposes of this definition,
# "control" means (i) the power, direct or indirect, to cause the
# direction or management of such entity, whether by contract or
# otherwise, or (ii) ownership of fifty percent (50%) or more of the
# outstanding shares, or (iii) beneficial ownership of such entity.
# "You" (or "Your") shall mean an individual or Legal Entity
# exercising permissions granted by this License.
# "Source" form shall mean the preferred form for making modifications,
# including but not limited to software source code, documentation
# source, and configuration files.
# "Object" form shall mean any form resulting from mechanical
# transformation or translation of a Source form, including but
# not limited to compiled object code, generated documentation,
# and conversions to other media types.
# "Work" shall mean the work of authorship, whether in Source or
# Object form, made available under the License, as indicated by a
# copyright notice that is included in or attached to the work
# (an example is provided in the Appendix below).
# "Derivative Works" shall mean any work, whether in Source or Object
# form, that is based on (or derived from) the Work and for which the
# editorial revisions, annotations, elaborations, or other modifications
# represent, as a whole, an original work of authorship. For the purposes
# of this License, Derivative Works shall not include works that remain
# separable from, or merely link (or bind by name) to the interfaces of,
# the Work and Derivative Works thereof.
# "Contribution" shall mean any work of authorship, including
# the original version of the Work and any modifications or additions
# to that Work or Derivative Works thereof, that is intentionally
# submitted to Licensor for inclusion in the Work by the copyright owner
# or by an individual or Legal Entity authorized to submit on behalf of
# the copyright owner. For the purposes of this definition, "submitted"
# means any form of electronic, verbal, or written communication sent
# to the Licensor or its representatives, including but not limited to
# communication on electronic mailing lists, source code control systems,
# and issue tracking systems that are managed by, or on behalf of, the
# Licensor for the purpose of discussing and improving the Work, but
# excluding communication that is conspicuously marked or otherwise
# designated in writing by the copyright owner as "Not a Contribution."
# "Contributor" shall mean Licensor and any individual or Legal Entity
# on behalf of whom a Contribution has been received by Licensor and
# subsequently incorporated within the Work.
# 2. Grant of Copyright License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# copyright license to reproduce, prepare Derivative Works of,
# publicly display, publicly perform, sublicense, and distribute the
# Work and such Derivative Works in Source or Object form.
# 3. Grant of Patent License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# (except as stated in this section) patent license to make, have made,
# use, offer to sell, sell, import, and otherwise transfer the Work,
# where such license applies only to those patent claims licensable
# by such Contributor that are necessarily infringed by their
# Contribution(s) alone or by combination of their Contribution(s)
# with the Work to which such Contribution(s) was submitted. If You
# institute patent litigation against any entity (including a
# cross-claim or counterclaim in a lawsuit) alleging that the Work
# or a Contribution incorporated within the Work constitutes direct
# or contributory patent infringement, then any patent licenses
# granted to You under this License for that Work shall terminate
# as of the date such litigation is filed.
# 4. Redistribution. You may reproduce and distribute copies of the
# Work or Derivative Works thereof in any medium, with or without
# modifications, and in Source or Object form, provided that You
# meet the following conditions:
# (a) You must give any other recipients of the Work or
# Derivative Works a copy of this License; and
# (b) You must cause any modified files to carry prominent notices
# stating that You changed the files; and
# (c) You must retain, in the Source form of any Derivative Works
# that You distribute, all copyright, patent, trademark, and
# attribution notices from the Source form of the Work,
# excluding those notices that do not pertain to any part of
# the Derivative Works; and
# (d) If the Work includes a "NOTICE" text file as part of its
# distribution, then any Derivative Works that You distribute must
# include a readable copy of the attribution notices contained
# within such NOTICE file, excluding those notices that do not
# pertain to any part of the Derivative Works, in at least one
# of the following places: within a NOTICE text file distributed
# as part of the Derivative Works; within the Source form or
# documentation, if provided along with the Derivative Works; or,
# within a display generated by the Derivative Works, if and
# wherever such third-party notices normally appear. The contents
# of the NOTICE file are for informational purposes only and
# do not modify the License. You may add Your own attribution
# notices within Derivative Works that You distribute, alongside
# or as an addendum to the NOTICE text from the Work, provided
# that such additional attribution notices cannot be construed
# as modifying the License.
# You may add Your own copyright statement to Your modifications and
# may provide additional or different license terms and conditions
# for use, reproduction, or distribution of Your modifications, or
# for any such Derivative Works as a whole, provided Your use,
# reproduction, and distribution of the Work otherwise complies with
# the conditions stated in this License.
# 5. Submission of Contributions. Unless You explicitly state otherwise,
# any Contribution intentionally submitted for inclusion in the Work
# by You to the Licensor shall be under the terms and conditions of
# this License, without any additional terms or conditions.
# Notwithstanding the above, nothing herein shall supersede or modify
# the terms of any separate license agreement you may have executed
# with Licensor regarding such Contributions.
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor,
# except as required for reasonable and customary use in describing the
# origin of the Work and reproducing the content of the NOTICE file.
# 7. Disclaimer of Warranty. Unless required by applicable law or
# agreed to in writing, Licensor provides the Work (and each
# Contributor provides its Contributions) on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied, including, without limitation, any warranties or conditions
# of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
# PARTICULAR PURPOSE. You are solely responsible for determining the
# appropriateness of using or redistributing the Work and assume any
# risks associated with Your exercise of permissions under this License.
# 8. Limitation of Liability. In no event and under no legal theory,
# whether in tort (including negligence), contract, or otherwise,
# unless required by applicable law (such as deliberate and grossly
# negligent acts) or agreed to in writing, shall any Contributor be
# liable to You for damages, including any direct, indirect, special,
# incidental, or consequential damages of any character arising as a
# result of this License or out of the use or inability to use the
# Work (including but not limited to damages for loss of goodwill,
# work stoppage, computer failure or malfunction, or any and all
# other commercial damages or losses), even if such Contributor
# has been advised of the possibility of such damages.
# 9. Accepting Warranty or Additional Liability. While redistributing
# the Work or Derivative Works thereof, You may choose to offer,
# and charge a fee for, acceptance of support, warranty, indemnity,
# or other liability obligations and/or rights consistent with this
# License. However, in accepting such obligations, You may act only
# on Your own behalf and on Your sole responsibility, not on behalf
# of any other Contributor, and only if You agree to indemnify,
# defend, and hold each Contributor harmless for any liability
# incurred by, or claims asserted against, such Contributor by reason
# of your accepting any such warranty or additional liability.
# END OF TERMS AND CONDITIONS
# APPENDIX: How to apply the Apache License to your work.
# To apply the Apache License to your work, attach the following
# boilerplate notice, with the fields enclosed by brackets "[]"
# replaced with your own identifying information. (Don't include
# the brackets!) The text should be enclosed in the appropriate
# comment syntax for the file format. We also recommend that a
# file or class name and description of purpose be included on the
# same "printed page" as the copyright notice for easier
# identification within third-party archives.
# Copyright [yyyy] [name of copyright owner]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
from pathlib import Path
import numpy as np
import PIL
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms
def preprocess(image, scale, resample):
image = image.resize((scale, scale), resample=resample)
image = np.array(image).astype(np.uint8)
image = (image / 127.5 - 1.0).astype(np.float32)
return image
def collate_fn(examples, with_prior_preservation):
input_ids = [example["instance_prompt_ids"] for example in examples]
pixel_values = [example["instance_images"] for example in examples]
mask = [example["mask"] for example in examples]
# Concat class and instance examples for prior preservation.
# We do this to avoid doing two forward passes.
if with_prior_preservation:
input_ids += [example["class_prompt_ids"] for example in examples]
pixel_values += [example["class_images"] for example in examples]
mask += [example["class_mask"] for example in examples]
input_ids = torch.cat(input_ids, dim=0)
pixel_values = torch.stack(pixel_values)
mask = torch.stack(mask)
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
mask = mask.to(memory_format=torch.contiguous_format).float()
batch = {
"input_ids": input_ids,
"pixel_values": pixel_values,
"mask": mask.unsqueeze(1)
}
return batch
class PromptDataset(Dataset):
"A simple dataset to prepare the prompts to generate class images on multiple GPUs."
def __init__(self, prompt, num_samples):
self.prompt = prompt
self.num_samples = num_samples
def __len__(self):
return self.num_samples
def __getitem__(self, index):
example = {}
example["prompt"] = self.prompt
example["index"] = index
return example
class CustomDiffusionDataset(Dataset):
"""
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
It pre-processes the images and the tokenizes prompts.
"""
def __init__(
self,
concepts_list,
tokenizer,
size=512,
center_crop=False,
with_prior_preservation=False,
num_class_images=200,
hflip=False,
):
self.size = size
self.center_crop = center_crop
self.tokenizer = tokenizer
self.interpolation = PIL.Image.BILINEAR
self.instance_images_path = []
self.class_images_path = []
self.with_prior_preservation = with_prior_preservation
for concept in concepts_list:
inst_img_path = [(x, concept["instance_prompt"]) for x in Path(concept["instance_data_dir"]).iterdir() if x.is_file()]
self.instance_images_path.extend(inst_img_path)
if with_prior_preservation:
class_data_root = Path(concept["class_data_dir"])
if os.path.isdir(class_data_root):
class_images_path = list(class_data_root.iterdir())
class_prompt = [concept["class_prompt"] for _ in range(len(class_images_path))]
else:
with open(class_data_root, "r") as f:
class_images_path = f.read().splitlines()
with open(concept["class_prompt"], "r") as f:
class_prompt = f.read().splitlines()
class_img_path = [(x, y) for (x, y) in zip(class_images_path, class_prompt)]
self.class_images_path.extend(class_img_path[:num_class_images])
random.shuffle(self.instance_images_path)
self.num_instance_images = len(self.instance_images_path)
self.num_class_images = len(self.class_images_path)
self._length = max(self.num_class_images, self.num_instance_images)
self.flip = transforms.RandomHorizontalFlip(0.5 * hflip)
self.image_transforms = transforms.Compose(
[
self.flip,
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __len__(self):
return self._length
def __getitem__(self, index):
example = {}
instance_image, instance_prompt = self.instance_images_path[index % self.num_instance_images]
instance_image = Image.open(instance_image)
if not instance_image.mode == "RGB":
instance_image = instance_image.convert("RGB")
instance_image = self.flip(instance_image)
##############################################################################
#### apply resize augmentation and create a valid image region mask ##########
##############################################################################
if np.random.randint(0, 3) < 2:
random_scale = np.random.randint(self.size // 3, self.size+1)
else:
random_scale = np.random.randint(int(1.2*self.size), int(1.4*self.size))
if random_scale % 2 == 1:
random_scale += 1
if random_scale < 0.6*self.size:
add_to_caption = np.random.choice(["a far away ", "very small "])
instance_prompt = add_to_caption + instance_prompt
cx = np.random.randint(random_scale // 2, self.size - random_scale // 2 + 1)
cy = np.random.randint(random_scale // 2, self.size - random_scale // 2 + 1)
instance_image1 = preprocess(instance_image, random_scale, self.interpolation)
instance_image = np.zeros((self.size, self.size, 3), dtype=np.float32)
instance_image[cx - random_scale // 2: cx + random_scale // 2, cy - random_scale // 2: cy + random_scale // 2, :] = instance_image1
mask = np.zeros((self.size // 8, self.size // 8))
mask[(cx - random_scale // 2) // 8 + 1: (cx + random_scale // 2) // 8 - 1, (cy - random_scale // 2) // 8 + 1: (cy + random_scale // 2) // 8 - 1] = 1.
elif random_scale > self.size:
add_to_caption = np.random.choice(["zoomed in ", "close up "])
instance_prompt = add_to_caption + instance_prompt
cx = np.random.randint(self.size // 2, random_scale - self.size // 2 + 1)
cy = np.random.randint(self.size // 2, random_scale - self.size // 2 + 1)
instance_image = preprocess(instance_image, random_scale, self.interpolation)
instance_image = instance_image[cx - self.size // 2: cx + self.size // 2, cy - self.size // 2: cy + self.size // 2, :]
mask = np.ones((self.size // 8, self.size // 8))
else:
instance_image = preprocess(instance_image, self.size, self.interpolation)
mask = np.ones((self.size // 8, self.size // 8))
########################################################################
example["instance_images"] = torch.from_numpy(instance_image).permute(2, 0, 1)
example["mask"] = torch.from_numpy(mask)
example["instance_prompt_ids"] = self.tokenizer(
instance_prompt,
truncation=True,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids
if self.with_prior_preservation:
class_image, class_prompt = self.class_images_path[index % self.num_class_images]
class_image = Image.open(class_image)
if not class_image.mode == "RGB":
class_image = class_image.convert("RGB")
example["class_images"] = self.image_transforms(class_image)
example["class_mask"] = torch.ones_like(example["mask"])
example["class_prompt_ids"] = self.tokenizer(
class_prompt,
truncation=True,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids
return example
| 20,730 | 50.061576 | 161 | py |
custom-diffusion | custom-diffusion-main/src/diffusers_composenW.py | # Copyright 2022 Adobe Research. All rights reserved.
# To view a copy of the license, visit LICENSE.md.
import sys
import os
import argparse
import torch
from scipy.linalg import lu_factor, lu_solve
sys.path.append('./')
from diffusers import StableDiffusionPipeline
from src import diffusers_sample
def gdupdateWexact(K, V, Ktarget1, Vtarget1, W, device='cuda'):
input_ = K
output = V
C = input_.T@input_
d = []
lu, piv = lu_factor(C.cpu().numpy())
for i in range(Ktarget1.size(0)):
sol = lu_solve((lu, piv), Ktarget1[i].reshape(-1, 1).cpu().numpy())
d.append(torch.from_numpy(sol).to(K.device))
d = torch.cat(d, 1).T
e2 = d@Ktarget1.T
e1 = (Vtarget1.T - W@Ktarget1.T)
delta = e1@torch.linalg.inv(e2)
Wnew = W + delta@d
lambda_split1 = Vtarget1.size(0)
input_ = torch.cat([Ktarget1.T, K.T], dim=1)
output = torch.cat([Vtarget1, V], dim=0)
loss = torch.norm((Wnew@input_).T - output, 2, dim=1)
print(loss[:lambda_split1].mean().item(), loss[lambda_split1:].mean().item())
return Wnew
def compose(paths, category, outpath, pretrained_model_path, regularization_prompt, prompts, save_path, device='cuda'):
model_id = pretrained_model_path
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
layers_modified = []
for name, param in pipe.unet.named_parameters():
if 'attn2.to_k' in name or 'attn2.to_v' in name:
layers_modified.append(name)
tokenizer = pipe.tokenizer
def get_text_embedding(prompts):
with torch.no_grad():
uc = []
for text in prompts:
tokens = tokenizer(text,
truncation=True,
max_length=tokenizer.model_max_length,
return_length=True,
return_overflowing_tokens=False,
padding="do_not_pad",
).input_ids
if 'photo of a' in text[:15]:
print(text)
uc.append(pipe.text_encoder(torch.cuda.LongTensor(tokens).reshape(1,-1))[0][:, 4:].reshape(-1, 768))
else:
uc.append(pipe.text_encoder(torch.cuda.LongTensor(tokens).reshape(1,-1))[0][:, 1:].reshape(-1, 768))
return torch.cat(uc, 0).float()
embeds = {}
count = 1
model2_sts = []
modifier_tokens = []
modifier_token_ids = []
categories = []
for path1, cat1 in zip(paths.split('+'), category.split('+')):
model2_st = torch.load(path1)
if 'modifier_token' in model2_st:
# composition of models with individual concept only
key = list(model2_st['modifier_token'].keys())[0]
_ = tokenizer.add_tokens(f'<new{count}>')
modifier_token_ids.append(tokenizer.convert_tokens_to_ids(f'<new{count}>'))
modifier_tokens.append(True)
embeds[f'<new{count}>'] = model2_st['modifier_token'][key]
else:
modifier_tokens.append(False)
model2_sts.append(model2_st['unet'])
categories.append(cat1)
count += 1
pipe.text_encoder.resize_token_embeddings(len(tokenizer))
token_embeds = pipe.text_encoder.get_input_embeddings().weight.data
for (x, y) in zip(modifier_token_ids, list(embeds.keys())):
token_embeds[x] = embeds[y]
print(x, y, "added embeddings")
f = open(regularization_prompt, 'r')
prompt = [x.strip() for x in f.readlines()][:200]
uc = get_text_embedding(prompt)
uc_targets = []
from collections import defaultdict
uc_values = defaultdict(list)
for composing_model_count in range(len(model2_sts)):
category = categories[composing_model_count]
if modifier_tokens[composing_model_count]:
string1 = f'<new{composing_model_count+1}> {category}'
else:
string1 = f'{category}'
if 'art' in string1:
prompt = [string1] + [f"painting in the style of {string1}"]
else:
prompt = [string1] + [f"photo of a {string1}"]
uc_targets.append(get_text_embedding(prompt))
for each in layers_modified:
uc_values[each].append((model2_sts[composing_model_count][each].to(device)@uc_targets[-1].T).T)
uc_targets = torch.cat(uc_targets, 0)
removal_indices = []
for i in range(uc_targets.size(0)):
for j in range(i+1, uc_targets.size(0)):
if (uc_targets[i]-uc_targets[j]).abs().mean() == 0:
removal_indices.append(j)
removal_indices = list(set(removal_indices))
uc_targets = torch.stack([uc_targets[i] for i in range(uc_targets.size(0)) if i not in removal_indices], 0)
for each in layers_modified:
uc_values[each] = torch.cat(uc_values[each], 0)
uc_values[each] = torch.stack([uc_values[each][i] for i in range(uc_values[each].size(0)) if i not in removal_indices], 0)
print(uc_values[each].size(), each)
print("target size:", uc_targets.size())
new_weights = {'unet': {}}
for each in layers_modified:
W = pipe.unet.state_dict()[each].float()
values = (W@uc.T).T
input_target = uc_targets
output_target = uc_values[each]
Wnew = gdupdateWexact(uc[:values.shape[0]],
values,
input_target,
output_target,
W.clone(),
)
new_weights['unet'][each] = Wnew
print(Wnew.size())
new_weights['modifier_token'] = embeds
os.makedirs(f'{save_path}/{outpath}', exist_ok=True)
torch.save(new_weights, f'{save_path}/{outpath}/delta.bin')
if prompts is not None:
if os.path.exists(prompts):
diffusers_sample.sample(model_id, f'{save_path}/{outpath}/delta.bin', prompts, prompt=None, compress=False, freeze_model='crossattn_kv', batch_size=1)
else:
diffusers_sample.sample(model_id, f'{save_path}/{outpath}/delta.bin', from_file=None, prompt=prompts, compress=False, freeze_model='crossattn_kv', batch_size=1)
def parse_args():
parser = argparse.ArgumentParser('', add_help=False)
parser.add_argument('--paths', help='+ separated list of checkpoints', required=True,
type=str)
parser.add_argument('--save_path', help='folder name to save optimized weights', default='optimized_logs',
type=str)
parser.add_argument('--categories', help='+ separated list of categories of the models', required=True,
type=str)
parser.add_argument('--prompts', help='prompts for composition model (can be a file or string)', default=None,
type=str)
parser.add_argument('--ckpt', required=True,
type=str)
parser.add_argument('--regularization_prompt', default='./data/regularization_captions.txt',
type=str)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
paths = args.paths
categories = args.categories
if ' ' in categories:
temp = categories.replace(' ', '_')
else:
temp = categories
outpath = '_'.join(['optimized', temp])
compose(paths, categories, outpath, args.ckpt, args.regularization_prompt, args.prompts, args.save_path)
| 7,508 | 37.706186 | 172 | py |
custom-diffusion | custom-diffusion-main/src/model.py | # This code is built from the Stable Diffusion repository: https://github.com/CompVis/stable-diffusion.
# Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors.
# CreativeML Open RAIL-M
#
# ==========================================================================================
#
# Adobe’s modifications are Copyright 2022 Adobe Research. All rights reserved.
# Adobe’s modifications are licensed under the Adobe Research License. To view a copy of the license, visit
# LICENSE.md.
#
# ==========================================================================================
#
# CreativeML Open RAIL-M License
#
# Section I: PREAMBLE
# Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation.
# Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations.
# In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation.
# Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI.
# This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model.
# NOW THEREFORE, You and Licensor agree as follows:
# 1. Definitions
# - "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document.
# - "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
# - "Output" means the results of operating a Model as embodied in informational content resulting therefrom.
# - "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.
# - "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.
# - "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.
# - "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.
# - "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model.
# - "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator.
# - "Third Parties" means individuals or legal entities that are not under common control with Licensor or You.
# - "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
# - "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model.
# Section II: INTELLECTUAL PROPERTY RIGHTS
# Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.
# 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.
# 3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed.
# Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
# 4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:
# Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.
# You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;
# You must cause any modified files to carry prominent notices stating that You changed the files;
# You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model.
# You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.
# 5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).
# 6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.
# Section IV: OTHER PROVISIONS
# 7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License, update the Model through electronic means, or modify the Output of the Model based on updates. You shall undertake reasonable efforts to use the latest version of the Model.
# 8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors.
# 9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.
# 10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
# 11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
# 12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.
# END OF TERMS AND CONDITIONS
# Attachment A
# Use Restrictions
# You agree not to use the Model or Derivatives of the Model:
# - In any way that violates any applicable national, federal, state, local or international law or regulation;
# - For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
# - To generate or disseminate verifiably false information and/or content with the purpose of harming others;
# - To generate or disseminate personal identifiable information that can be used to harm an individual;
# - To defame, disparage or otherwise harass others;
# - For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation;
# - For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics;
# - To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
# - For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories;
# - To provide medical advice and medical results interpretation;
# - To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use).
import torch
from einops import rearrange, repeat
from torch import nn, einsum
from ldm.models.diffusion.ddpm import LatentDiffusion as LatentDiffusion
from ldm.util import default
from ldm.modules.attention import BasicTransformerBlock as BasicTransformerBlock
from ldm.modules.attention import CrossAttention as CrossAttention
from ldm.util import log_txt_as_img, exists, ismap, isimage, mean_flat, count_params, instantiate_from_config
from torchvision.utils import make_grid
from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
import numpy as np
class CustomDiffusion(LatentDiffusion):
def __init__(self,
freeze_model='crossattn-kv',
cond_stage_trainable=False,
add_token=False,
*args, **kwargs):
self.freeze_model = freeze_model
self.add_token = add_token
self.cond_stage_trainable = cond_stage_trainable
super().__init__(cond_stage_trainable=cond_stage_trainable, *args, **kwargs)
if self.freeze_model == 'crossattn-kv':
for x in self.model.diffusion_model.named_parameters():
if 'transformer_blocks' not in x[0]:
x[1].requires_grad = False
elif not ('attn2.to_k' in x[0] or 'attn2.to_v' in x[0]):
x[1].requires_grad = False
else:
x[1].requires_grad = True
elif self.freeze_model == 'crossattn':
for x in self.model.diffusion_model.named_parameters():
if 'transformer_blocks' not in x[0]:
x[1].requires_grad = False
elif not 'attn2' in x[0]:
x[1].requires_grad = False
else:
x[1].requires_grad = True
def change_checkpoint(model):
for layer in model.children():
if type(layer) == BasicTransformerBlock:
layer.checkpoint = False
else:
change_checkpoint(layer)
change_checkpoint(self.model.diffusion_model)
def new_forward(self, x, context=None, mask=None):
h = self.heads
crossattn = False
if context is not None:
crossattn = True
q = self.to_q(x)
context = default(context, x)
k = self.to_k(context)
v = self.to_v(context)
if crossattn:
modifier = torch.ones_like(k)
modifier[:, :1, :] = modifier[:, :1, :]*0.
k = modifier*k + (1-modifier)*k.detach()
v = modifier*v + (1-modifier)*v.detach()
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
return self.to_out(out)
def change_forward(model):
for layer in model.children():
if type(layer) == CrossAttention:
bound_method = new_forward.__get__(layer, layer.__class__)
setattr(layer, 'forward', bound_method)
else:
change_forward(layer)
change_forward(self.model.diffusion_model)
def configure_optimizers(self):
lr = self.learning_rate
params = []
if self.freeze_model == 'crossattn-kv':
for x in self.model.diffusion_model.named_parameters():
if 'transformer_blocks' in x[0]:
if 'attn2.to_k' in x[0] or 'attn2.to_v' in x[0]:
params += [x[1]]
print(x[0])
elif self.freeze_model == 'crossattn':
for x in self.model.diffusion_model.named_parameters():
if 'transformer_blocks' in x[0]:
if 'attn2' in x[0]:
params += [x[1]]
print(x[0])
else:
params = list(self.model.parameters())
if self.cond_stage_trainable:
print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
if self.add_token:
params = params + list(self.cond_stage_model.transformer.text_model.embeddings.token_embedding.parameters())
else:
params = params + list(self.cond_stage_model.parameters())
if self.learn_logvar:
print('Diffusion model optimizing logvar')
params.append(self.logvar)
opt = torch.optim.AdamW(params, lr=lr)
if self.use_scheduler:
assert 'target' in self.scheduler_config
scheduler = instantiate_from_config(self.scheduler_config)
print("Setting up LambdaLR scheduler...")
scheduler = [
{
'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
'interval': 'step',
'frequency': 1
}]
return [opt], scheduler
return opt
def p_losses(self, x_start, cond, t, mask=None, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
model_output = self.apply_model(x_noisy, t, cond)
loss_dict = {}
prefix = 'train' if self.training else 'val'
if self.parameterization == "x0":
target = x_start
elif self.parameterization == "eps":
target = noise
else:
raise NotImplementedError()
loss_simple = self.get_loss(model_output, target, mean=False)
if mask is not None:
loss_simple = (loss_simple*mask).sum([1, 2, 3])/mask.sum([1, 2, 3])
else:
loss_simple = loss_simple.mean([1, 2, 3])
loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
logvar_t = (self.logvar.to(self.device))[t]
loss = loss_simple / torch.exp(logvar_t) + logvar_t
# loss = loss_simple / torch.exp(self.logvar) + self.logvar
if self.learn_logvar:
loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
loss_dict.update({'logvar': self.logvar.data.mean()})
loss = self.l_simple_weight * loss.mean()
loss_vlb = self.get_loss(model_output, target, mean=False)
if mask is not None:
loss_vlb = (loss_vlb*mask).sum([1, 2, 3])/mask.sum([1, 2, 3])
else:
loss_vlb = loss_vlb.mean([1, 2, 3])
loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
loss += (self.original_elbo_weight * loss_vlb)
loss_dict.update({f'{prefix}/loss': loss})
return loss, loss_dict
@torch.no_grad()
def get_input_withmask(self, batch, **args):
out = super().get_input(batch, self.first_stage_key, **args)
mask = batch["mask"]
if len(mask.shape) == 3:
mask = mask[..., None]
mask = rearrange(mask, 'b h w c -> b c h w')
mask = mask.to(memory_format=torch.contiguous_format).float()
out += [mask]
return out
def training_step(self, batch, batch_idx):
if isinstance(batch, list):
train_batch = batch[0]
train2_batch = batch[1]
loss_train, loss_dict = self.shared_step(train_batch)
loss_train2, _ = self.shared_step(train2_batch)
loss = loss_train + loss_train2
else:
train_batch = batch
loss, loss_dict = self.shared_step(train_batch)
self.log_dict(loss_dict, prog_bar=True,
logger=True, on_step=True, on_epoch=True)
self.log("global_step", self.global_step,
prog_bar=True, logger=True, on_step=True, on_epoch=False)
if self.use_scheduler:
lr = self.optimizers().param_groups[0]['lr']
self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
return loss
def shared_step(self, batch, **kwargs):
x, c, mask = self.get_input_withmask(batch, **kwargs)
loss = self(x, c, mask=mask)
return loss
@torch.no_grad()
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
plot_diffusion_rows=True, **kwargs):
use_ddim = ddim_steps is not None
log = dict()
if isinstance(batch, list):
batch = batch[0]
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
return_first_stage_outputs=True,
force_c_encode=True,
return_original_cond=True,
bs=N)
N = min(x.shape[0], N)
n_row = min(x.shape[0], n_row)
log["inputs"] = x
log["reconstruction"] = xrec
if self.model.conditioning_key is not None:
if hasattr(self.cond_stage_model, "decode"):
xc = self.cond_stage_model.decode(c)
log["conditioning"] = xc
elif self.cond_stage_key in ["caption"]:
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
log["conditioning"] = xc
elif self.cond_stage_key == 'class_label':
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
log['conditioning'] = xc
elif isimage(xc):
log["conditioning"] = xc
if ismap(xc):
log["original_conditioning"] = self.to_rgb(xc)
if plot_diffusion_rows:
# get diffusion row
diffusion_row = list()
z_start = z[:n_row]
for t in range(self.num_timesteps):
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
t = t.to(self.device).long()
noise = torch.randn_like(z_start)
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
diffusion_row.append(self.decode_first_stage(z_noisy))
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
log["diffusion_row"] = diffusion_grid
if sample:
# get denoise row
with self.ema_scope("Plotting"):
unconditional_guidance_scale=6.
unconditional_conditioning = self.get_learned_conditioning(len(c) * [""])
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
ddim_steps=ddim_steps,eta=ddim_eta,
unconditional_conditioning=unconditional_conditioning, unconditional_guidance_scale=unconditional_guidance_scale)
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
x_samples = self.decode_first_stage(samples)
log["samples_scaled"] = x_samples
if plot_denoise_rows:
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
log["denoise_row"] = denoise_grid
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
self.first_stage_model, IdentityFirstStage):
# also display when quantizing x0 while sampling
with self.ema_scope("Plotting Quantized Denoised"):
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
ddim_steps=ddim_steps,eta=ddim_eta,
quantize_denoised=True)
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
# quantize_denoised=True)
x_samples = self.decode_first_stage(samples.to(self.device))
log["samples_x0_quantized"] = x_samples
if inpaint:
# make a simple center square
b, h, w = z.shape[0], z.shape[2], z.shape[3]
mask = torch.ones(N, h, w).to(self.device)
# zeros will be filled in
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
mask = mask[:, None, ...]
with self.ema_scope("Plotting Inpaint"):
samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
x_samples = self.decode_first_stage(samples.to(self.device))
log["samples_inpainting"] = x_samples
log["mask"] = mask
# outpaint
with self.ema_scope("Plotting Outpaint"):
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
x_samples = self.decode_first_stage(samples.to(self.device))
log["samples_outpainting"] = x_samples
if plot_progressive_rows:
with self.ema_scope("Plotting Progressives"):
img, progressives = self.progressive_denoising(c,
shape=(self.channels, self.image_size, self.image_size),
batch_size=N)
prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
log["progressive_row"] = prog_row
if return_keys:
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
return log
else:
return {key: log[key] for key in return_keys}
return log
| 29,720 | 70.102871 | 1,097 | py |
custom-diffusion | custom-diffusion-main/src/custom_modules.py | # This code is built from the Huggingface repository: https://github.com/huggingface/transformers/tree/main/src/transformers/models/clip.
# Copyright 2018- The Hugging Face team. All rights reserved.
# Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
#
# ==========================================================================================
#
# Adobe’s modifications are Copyright 2022 Adobe Research. All rights reserved.
# Adobe’s modifications are licensed under the Adobe Research License. To view a copy of the license, visit
# LICENSE.md.
#
# ==========================================================================================
#
# Apache License, Version 2.0, January 2004
# 1. Definitions.
# "License" shall mean the terms and conditions for use, reproduction,
# and distribution as defined by Sections 1 through 9 of this document.
# "Licensor" shall mean the copyright owner or entity authorized by
# the copyright owner that is granting the License.
# "Legal Entity" shall mean the union of the acting entity and all
# other entities that control, are controlled by, or are under common
# control with that entity. For the purposes of this definition,
# "control" means (i) the power, direct or indirect, to cause the
# direction or management of such entity, whether by contract or
# otherwise, or (ii) ownership of fifty percent (50%) or more of the
# outstanding shares, or (iii) beneficial ownership of such entity.
# "You" (or "Your") shall mean an individual or Legal Entity
# exercising permissions granted by this License.
# "Source" form shall mean the preferred form for making modifications,
# including but not limited to software source code, documentation
# source, and configuration files.
# "Object" form shall mean any form resulting from mechanical
# transformation or translation of a Source form, including but
# not limited to compiled object code, generated documentation,
# and conversions to other media types.
# "Work" shall mean the work of authorship, whether in Source or
# Object form, made available under the License, as indicated by a
# copyright notice that is included in or attached to the work
# (an example is provided in the Appendix below).
# "Derivative Works" shall mean any work, whether in Source or Object
# form, that is based on (or derived from) the Work and for which the
# editorial revisions, annotations, elaborations, or other modifications
# represent, as a whole, an original work of authorship. For the purposes
# of this License, Derivative Works shall not include works that remain
# separable from, or merely link (or bind by name) to the interfaces of,
# the Work and Derivative Works thereof.
# "Contribution" shall mean any work of authorship, including
# the original version of the Work and any modifications or additions
# to that Work or Derivative Works thereof, that is intentionally
# submitted to Licensor for inclusion in the Work by the copyright owner
# or by an individual or Legal Entity authorized to submit on behalf of
# the copyright owner. For the purposes of this definition, "submitted"
# means any form of electronic, verbal, or written communication sent
# to the Licensor or its representatives, including but not limited to
# communication on electronic mailing lists, source code control systems,
# and issue tracking systems that are managed by, or on behalf of, the
# Licensor for the purpose of discussing and improving the Work, but
# excluding communication that is conspicuously marked or otherwise
# designated in writing by the copyright owner as "Not a Contribution."
# "Contributor" shall mean Licensor and any individual or Legal Entity
# on behalf of whom a Contribution has been received by Licensor and
# subsequently incorporated within the Work.
# 2. Grant of Copyright License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# copyright license to reproduce, prepare Derivative Works of,
# publicly display, publicly perform, sublicense, and distribute the
# Work and such Derivative Works in Source or Object form.
# 3. Grant of Patent License. Subject to the terms and conditions of
# this License, each Contributor hereby grants to You a perpetual,
# worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# (except as stated in this section) patent license to make, have made,
# use, offer to sell, sell, import, and otherwise transfer the Work,
# where such license applies only to those patent claims licensable
# by such Contributor that are necessarily infringed by their
# Contribution(s) alone or by combination of their Contribution(s)
# with the Work to which such Contribution(s) was submitted. If You
# institute patent litigation against any entity (including a
# cross-claim or counterclaim in a lawsuit) alleging that the Work
# or a Contribution incorporated within the Work constitutes direct
# or contributory patent infringement, then any patent licenses
# granted to You under this License for that Work shall terminate
# as of the date such litigation is filed.
# 4. Redistribution. You may reproduce and distribute copies of the
# Work or Derivative Works thereof in any medium, with or without
# modifications, and in Source or Object form, provided that You
# meet the following conditions:
# (a) You must give any other recipients of the Work or
# Derivative Works a copy of this License; and
# (b) You must cause any modified files to carry prominent notices
# stating that You changed the files; and
# (c) You must retain, in the Source form of any Derivative Works
# that You distribute, all copyright, patent, trademark, and
# attribution notices from the Source form of the Work,
# excluding those notices that do not pertain to any part of
# the Derivative Works; and
# (d) If the Work includes a "NOTICE" text file as part of its
# distribution, then any Derivative Works that You distribute must
# include a readable copy of the attribution notices contained
# within such NOTICE file, excluding those notices that do not
# pertain to any part of the Derivative Works, in at least one
# of the following places: within a NOTICE text file distributed
# as part of the Derivative Works; within the Source form or
# documentation, if provided along with the Derivative Works; or,
# within a display generated by the Derivative Works, if and
# wherever such third-party notices normally appear. The contents
# of the NOTICE file are for informational purposes only and
# do not modify the License. You may add Your own attribution
# notices within Derivative Works that You distribute, alongside
# or as an addendum to the NOTICE text from the Work, provided
# that such additional attribution notices cannot be construed
# as modifying the License.
# You may add Your own copyright statement to Your modifications and
# may provide additional or different license terms and conditions
# for use, reproduction, or distribution of Your modifications, or
# for any such Derivative Works as a whole, provided Your use,
# reproduction, and distribution of the Work otherwise complies with
# the conditions stated in this License.
# 5. Submission of Contributions. Unless You explicitly state otherwise,
# any Contribution intentionally submitted for inclusion in the Work
# by You to the Licensor shall be under the terms and conditions of
# this License, without any additional terms or conditions.
# Notwithstanding the above, nothing herein shall supersede or modify
# the terms of any separate license agreement you may have executed
# with Licensor regarding such Contributions.
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor,
# except as required for reasonable and customary use in describing the
# origin of the Work and reproducing the content of the NOTICE file.
# 7. Disclaimer of Warranty. Unless required by applicable law or
# agreed to in writing, Licensor provides the Work (and each
# Contributor provides its Contributions) on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied, including, without limitation, any warranties or conditions
# of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
# PARTICULAR PURPOSE. You are solely responsible for determining the
# appropriateness of using or redistributing the Work and assume any
# risks associated with Your exercise of permissions under this License.
# 8. Limitation of Liability. In no event and under no legal theory,
# whether in tort (including negligence), contract, or otherwise,
# unless required by applicable law (such as deliberate and grossly
# negligent acts) or agreed to in writing, shall any Contributor be
# liable to You for damages, including any direct, indirect, special,
# incidental, or consequential damages of any character arising as a
# result of this License or out of the use or inability to use the
# Work (including but not limited to damages for loss of goodwill,
# work stoppage, computer failure or malfunction, or any and all
# other commercial damages or losses), even if such Contributor
# has been advised of the possibility of such damages.
# 9. Accepting Warranty or Additional Liability. While redistributing
# the Work or Derivative Works thereof, You may choose to offer,
# and charge a fee for, acceptance of support, warranty, indemnity,
# or other liability obligations and/or rights consistent with this
# License. However, in accepting such obligations, You may act only
# on Your own behalf and on Your sole responsibility, not on behalf
# of any other Contributor, and only if You agree to indemnify,
# defend, and hold each Contributor harmless for any liability
# incurred by, or claims asserted against, such Contributor by reason
# of your accepting any such warranty or additional liability.
# END OF TERMS AND CONDITIONS
# APPENDIX: How to apply the Apache License to your work.
# To apply the Apache License to your work, attach the following
# boilerplate notice, with the fields enclosed by brackets "[]"
# replaced with your own identifying information. (Don't include
# the brackets!) The text should be enclosed in the appropriate
# comment syntax for the file format. We also recommend that a
# file or class name and description of purpose be included on the
# same "printed page" as the copyright notice for easier
# identification within third-party archives.
# Copyright [yyyy] [name of copyright owner]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
import torch
import torch.nn as nn
import transformers
from transformers import CLIPTokenizer, CLIPTextModel
class AbstractEncoder(nn.Module):
def __init__(self):
super().__init__()
def encode(self, *args, **kwargs):
raise NotImplementedError
class FrozenCLIPEmbedderWrapper(AbstractEncoder):
"""Uses the CLIP transformer encoder for text (from Hugging Face)"""
def __init__(self, modifier_token, version="openai/clip-vit-large-patch14", device="cuda", max_length=77):
super().__init__()
self.tokenizer = CLIPTokenizer.from_pretrained(version)
self.transformer = CLIPTextModel.from_pretrained(version)
self.device = device
self.max_length = max_length
self.modifier_token = modifier_token
if '+' in self.modifier_token:
self.modifier_token = self.modifier_token.split('+')
else:
self.modifier_token = [self.modifier_token]
self.add_token()
self.freeze()
def add_token(self):
self.modifier_token_id = []
token_embeds1 = self.transformer.get_input_embeddings().weight.data
for each_modifier_token in self.modifier_token:
num_added_tokens = self.tokenizer.add_tokens(each_modifier_token)
modifier_token_id = self.tokenizer.convert_tokens_to_ids(each_modifier_token)
self.modifier_token_id.append(modifier_token_id)
self.transformer.resize_token_embeddings(len(self.tokenizer))
token_embeds = self.transformer.get_input_embeddings().weight.data
token_embeds[self.modifier_token_id[-1]] = torch.nn.Parameter(token_embeds[42170], requires_grad=True)
if len(self.modifier_token) == 2:
token_embeds[self.modifier_token_id[-2]] = torch.nn.Parameter(token_embeds[47629], requires_grad=True)
if len(self.modifier_token) == 3:
token_embeds[self.modifier_token_id[-3]] = torch.nn.Parameter(token_embeds[43514], requires_grad=True)
def custom_forward(self, hidden_states, input_ids):
r"""
Returns:
"""
input_shape = hidden_states.size()
bsz, seq_len = input_shape[:2]
if version.parse(transformers.__version__) >= version.parse('4.21'):
causal_attention_mask = self.transformer.text_model._build_causal_attention_mask(bsz, seq_len, hidden_states.dtype).to(
hidden_states.device
)
else:
causal_attention_mask = self.transformer.text_model._build_causal_attention_mask(bsz, seq_len).to(
hidden_states.device
)
encoder_outputs = self.transformer.text_model.encoder(
inputs_embeds=hidden_states,
causal_attention_mask=causal_attention_mask,
)
last_hidden_state = encoder_outputs[0]
last_hidden_state = self.transformer.text_model.final_layer_norm(last_hidden_state)
return last_hidden_state
def freeze(self):
self.transformer = self.transformer.eval()
for param in self.transformer.text_model.encoder.parameters():
param.requires_grad = False
for param in self.transformer.text_model.final_layer_norm.parameters():
param.requires_grad = False
for param in self.transformer.text_model.embeddings.position_embedding.parameters():
param.requires_grad = False
def forward(self, text):
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
tokens = batch_encoding["input_ids"].to(self.device)
indices = tokens == self.modifier_token_id[-1]
for token_id in self.modifier_token_id:
indices |= tokens == token_id
indices = (indices*1).unsqueeze(-1)
input_shape = tokens.size()
tokens = tokens.view(-1, input_shape[-1])
hidden_states = self.transformer.text_model.embeddings(input_ids=tokens)
hidden_states = (1-indices)*hidden_states.detach() + indices*hidden_states
z = self.custom_forward(hidden_states, tokens)
return z
def encode(self, text):
return self(text)
if __name__ == "__main__":
from ldm.util import count_params
model = FrozenCLIPEmbedderWrapper()
count_params(model, verbose=True)
| 16,723 | 50.937888 | 137 | py |
custom-diffusion | custom-diffusion-main/src/finetune_data.py | # This code is built from the Stable Diffusion repository: https://github.com/CompVis/stable-diffusion.
# Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors.
# CreativeML Open RAIL-M
#
# ==========================================================================================
#
# Adobe’s modifications are Copyright 2022 Adobe Research. All rights reserved.
# Adobe’s modifications are licensed under the Adobe Research License. To view a copy of the license, visit
# LICENSE.md.
#
# ==========================================================================================
#
# CreativeML Open RAIL-M License
#
# Section I: PREAMBLE
# Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation.
# Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations.
# In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation.
# Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI.
# This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model.
# NOW THEREFORE, You and Licensor agree as follows:
# 1. Definitions
# - "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document.
# - "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
# - "Output" means the results of operating a Model as embodied in informational content resulting therefrom.
# - "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.
# - "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.
# - "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.
# - "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.
# - "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model.
# - "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator.
# - "Third Parties" means individuals or legal entities that are not under common control with Licensor or You.
# - "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
# - "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model.
# Section II: INTELLECTUAL PROPERTY RIGHTS
# Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.
# 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.
# 3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed.
# Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
# 4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:
# Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.
# You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;
# You must cause any modified files to carry prominent notices stating that You changed the files;
# You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model.
# You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.
# 5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).
# 6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.
# Section IV: OTHER PROVISIONS
# 7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License, update the Model through electronic means, or modify the Output of the Model based on updates. You shall undertake reasonable efforts to use the latest version of the Model.
# 8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors.
# 9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.
# 10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
# 11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
# 12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.
# END OF TERMS AND CONDITIONS
# Attachment A
# Use Restrictions
# You agree not to use the Model or Derivatives of the Model:
# - In any way that violates any applicable national, federal, state, local or international law or regulation;
# - For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
# - To generate or disseminate verifiably false information and/or content with the purpose of harming others;
# - To generate or disseminate personal identifiable information that can be used to harm an individual;
# - To defame, disparage or otherwise harass others;
# - For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation;
# - For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics;
# - To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
# - For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories;
# - To provide medical advice and medical results interpretation;
# - To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use).
import os
import numpy as np
import PIL
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
templates_small = [
'photo of a {}',
]
templates_small_style = [
'painting in the style of {}',
]
def isimage(path):
if 'png' in path.lower() or 'jpg' in path.lower() or 'jpeg' in path.lower():
return True
class MaskBase(Dataset):
def __init__(self,
datapath,
reg_datapath=None,
caption=None,
reg_caption=None,
size=512,
interpolation="bicubic",
flip_p=0.5,
aug=True,
style=False,
repeat=0.
):
self.aug = aug
self.repeat = repeat
self.style = style
self.templates_small = templates_small
if self.style:
self.templates_small = templates_small_style
if os.path.isdir(datapath):
self.image_paths1 = [os.path.join(datapath, file_path) for file_path in os.listdir(datapath) if isimage(file_path)]
else:
with open(datapath, "r") as f:
self.image_paths1 = f.read().splitlines()
self._length1 = len(self.image_paths1)
self.image_paths2 = []
self._length2 = 0
if reg_datapath is not None:
if os.path.isdir(reg_datapath):
self.image_paths2 = [os.path.join(reg_datapath, file_path) for file_path in os.listdir(reg_datapath) if isimage(file_path)]
else:
with open(reg_datapath, "r") as f:
self.image_paths2 = f.read().splitlines()
self._length2 = len(self.image_paths2)
self.labels = {
"relative_file_path1_": [x for x in self.image_paths1],
"relative_file_path2_": [x for x in self.image_paths2],
}
self.size = size
self.interpolation = {"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
}[interpolation]
self.flip = transforms.RandomHorizontalFlip(p=flip_p)
self.caption = caption
if os.path.exists(self.caption):
self.caption = [x.strip() for x in open(caption, 'r').readlines()]
self.reg_caption = reg_caption
if os.path.exists(self.reg_caption):
self.reg_caption = [x.strip() for x in open(reg_caption, 'r').readlines()]
def __len__(self):
if self._length2 > 0:
return 2*self._length2
elif self.repeat > 0:
return self._length1*self.repeat
else:
return self._length1
def __getitem__(self, i):
example = {}
if i > self._length2 or self._length2 == 0:
image = Image.open(self.labels["relative_file_path1_"][i % self._length1])
if isinstance(self.caption, str):
example["caption"] = np.random.choice(self.templates_small).format(self.caption)
else:
example["caption"] = self.caption[i % min(self._length1, len(self.caption)) ]
else:
image = Image.open(self.labels["relative_file_path2_"][i % self._length2])
if isinstance(self.reg_caption, str):
example["caption"] = np.random.choice(self.templates_small).format(self.reg_caption)
else:
example["caption"] = self.reg_caption[i % self._length2]
if not image.mode == "RGB":
image = image.convert("RGB")
# default to score-sde preprocessing
img = np.array(image).astype(np.uint8)
crop = min(img.shape[0], img.shape[1])
h, w, = img.shape[0], img.shape[1]
img = img[(h - crop) // 2:(h + crop) // 2,
(w - crop) // 2:(w + crop) // 2]
image = Image.fromarray(img)
image = self.flip(image)
if i > self._length2 or self._length2 == 0:
if self.aug:
if np.random.randint(0, 3) < 2:
random_scale = np.random.randint(self.size // 3, self.size+1)
else:
random_scale = np.random.randint(int(1.2*self.size), int(1.4*self.size))
if random_scale % 2 == 1:
random_scale += 1
else:
random_scale = self.size
if random_scale < 0.6*self.size:
add_to_caption = np.random.choice(["a far away ", "very small "])
example["caption"] = add_to_caption + example["caption"]
cx = np.random.randint(random_scale // 2, self.size - random_scale // 2 + 1)
cy = np.random.randint(random_scale // 2, self.size - random_scale // 2 + 1)
image = image.resize((random_scale, random_scale), resample=self.interpolation)
image = np.array(image).astype(np.uint8)
image = (image / 127.5 - 1.0).astype(np.float32)
input_image1 = np.zeros((self.size, self.size, 3), dtype=np.float32)
input_image1[cx - random_scale // 2: cx + random_scale // 2, cy - random_scale // 2: cy + random_scale // 2, :] = image
mask = np.zeros((self.size // 8, self.size // 8))
mask[(cx - random_scale // 2) // 8 + 1: (cx + random_scale // 2) // 8 - 1, (cy - random_scale // 2) // 8 + 1: (cy + random_scale // 2) // 8 - 1] = 1.
elif random_scale > self.size:
add_to_caption = np.random.choice(["zoomed in ", "close up "])
example["caption"] = add_to_caption + example["caption"]
cx = np.random.randint(self.size // 2, random_scale - self.size // 2 + 1)
cy = np.random.randint(self.size // 2, random_scale - self.size // 2 + 1)
image = image.resize((random_scale, random_scale), resample=self.interpolation)
image = np.array(image).astype(np.uint8)
image = (image / 127.5 - 1.0).astype(np.float32)
input_image1 = image[cx - self.size // 2: cx + self.size // 2, cy - self.size // 2: cy + self.size // 2, :]
mask = np.ones((self.size // 8, self.size // 8))
else:
if self.size is not None:
image = image.resize((self.size, self.size), resample=self.interpolation)
input_image1 = np.array(image).astype(np.uint8)
input_image1 = (input_image1 / 127.5 - 1.0).astype(np.float32)
mask = np.ones((self.size // 8, self.size // 8))
else:
if self.size is not None:
image = image.resize((self.size, self.size), resample=self.interpolation)
input_image1 = np.array(image).astype(np.uint8)
input_image1 = (input_image1 / 127.5 - 1.0).astype(np.float32)
mask = np.ones((self.size // 8, self.size // 8))
example["image"] = input_image1
example["mask"] = mask
return example
| 22,159 | 81.686567 | 1,097 | py |
custom-diffusion | custom-diffusion-main/src/get_deltas.py | # Copyright 2022 Adobe Research. All rights reserved.
# To view a copy of the license, visit LICENSE.md.
import os
import argparse
import glob
import torch
def main(path, newtoken=0):
layers = []
for files in glob.glob(f'{path}/checkpoints/*'):
if ('=' in files or '_' in files) and 'delta' not in files:
print(files)
if '=' in files:
epoch_number = files.split('=')[1].split('.ckpt')[0]
elif '_' in files:
epoch_number = files.split('/')[-1].split('.ckpt')[0]
st = torch.load(files)["state_dict"]
if len(layers) == 0:
for key in list(st.keys()):
if 'attn2.to_k' in key or 'attn2.to_v' in key:
layers.append(key)
print(layers)
st_delta = {'state_dict': {}}
for each in layers:
st_delta['state_dict'][each] = st[each].clone()
print('/'.join(files.split('/')[:-1]) + f'/delta_epoch={epoch_number}.ckpt')
num_tokens = st['cond_stage_model.transformer.text_model.embeddings.token_embedding.weight'].shape[0]
if newtoken > 0:
print("saving the optimized embedding")
st_delta['state_dict']['embed'] = st['cond_stage_model.transformer.text_model.embeddings.token_embedding.weight'][-newtoken:].clone()
print(st_delta['state_dict']['embed'].shape, num_tokens)
torch.save(st_delta, '/'.join(files.split('/')[:-1]) + f'/delta_epoch={epoch_number}.ckpt')
os.remove(files)
def parse_args():
parser = argparse.ArgumentParser('', add_help=False)
parser.add_argument('--path', help='path of folder to checkpoints',
type=str)
parser.add_argument('--newtoken', help='number of new tokens in the checkpoint', default=1,
type=int)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
path = args.path
main(path, args.newtoken)
| 2,047 | 36.925926 | 149 | py |
custom-diffusion | custom-diffusion-main/src/composenW.py | # Copyright 2022 Adobe Research. All rights reserved.
# To view a copy of the license, visit LICENSE.md.
import sys
import os
import argparse
import random
import torch
import torchvision
import numpy as np
from tqdm import tqdm
from scipy.linalg import lu_factor, lu_solve
sys.path.append('stable-diffusion')
sys.path.append('./')
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
def load_model_from_config(config, ckpt):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
model.cuda()
model.eval()
return model
def get_model(path):
config = OmegaConf.load("configs/custom-diffusion/finetune.yaml")
model = load_model_from_config(config, path)
return model, config
def gdupdateWexact(K, V, Ktarget1, Vtarget1, W, device='cuda'):
input_ = K
output = V
C = input_.T@input_
d = []
lu, piv = lu_factor(C.cpu().numpy())
for i in range(Ktarget1.size(0)):
sol = lu_solve((lu, piv), Ktarget1[i].reshape(-1, 1).cpu().numpy())
d.append(torch.from_numpy(sol).to(K.device))
d = torch.cat(d, 1).T
e2 = d@Ktarget1.T
e1 = (Vtarget1.T - W@Ktarget1.T)
delta = e1@torch.linalg.inv(e2)
Wnew = W + delta@d
lambda_split1 = Vtarget1.size(0)
input_ = torch.cat([Ktarget1.T, K.T], dim=1)
output = torch.cat([Vtarget1, V], dim=0)
loss = torch.norm((Wnew@input_).T - output, 2, dim=1)
print(loss[:lambda_split1].mean().item(), loss[lambda_split1:].mean().item())
return Wnew
def compose(paths, category, outpath, pretrained_model_path, regularization_prompt, prompts, save_path, device='cuda'):
model, config = get_model(pretrained_model_path)
model.eval()
model.requires_grad = False
layers = []
layers_modified = []
def getlayers(model, root_name=''):
for name, module in model.named_children():
if module.__class__.__name__ == 'SpatialTransformer':
layers_modified.append(root_name + '.' + name + '.transformer_blocks.0.attn2.to_k')
layers_modified.append(root_name + '.' + name + '.transformer_blocks.0.attn2.to_v')
else:
if list(module.children()) == []:
layers.append(root_name + '.' + name)
else:
getlayers(module, root_name + '.' + name)
getlayers(model.model.diffusion_model)
for i in range(len(layers_modified)):
layers_modified[i] = 'model.diffusion_model' + layers_modified[i] + '.weight'
def get_text_embedding(prompts):
with torch.no_grad():
uc = []
for text in prompts:
tokens = tokenizer(text,
truncation=True,
max_length=77,
return_length=True,
return_overflowing_tokens=False,
padding="max_length",
return_tensors="pt")
tokens = tokens["input_ids"]
end = torch.nonzero(tokens == 49407)[:, 1].min()
if 'photo of a' in text[:15]:
print(text)
uc.append((model.get_learned_conditioning(1 * [text])[:, 4:end+1]).reshape(-1, 768))
else:
uc.append((model.get_learned_conditioning(1 * [text])[:, 1:end+1]).reshape(-1, 768))
return torch.cat(uc, 0)
tokenizer = model.cond_stage_model.tokenizer
embeds = []
count = 1
model2_sts = []
modifier_tokens = []
categories = []
config.model.params.cond_stage_config.params = {}
config.model.params.cond_stage_config.params.modifier_token = None
for path1, cat1 in zip(paths.split('+'), category.split('+')):
model2_st = torch.load(path1)
if 'embed' in model2_st['state_dict']:
config.model.params.cond_stage_config.target = 'src.custom_modules.FrozenCLIPEmbedderWrapper'
embeds.append(model2_st['state_dict']['embed'][-1:])
num_added_tokens1 = tokenizer.add_tokens(f'<new{count}>')
modifier_token_id1 = tokenizer.convert_tokens_to_ids('<new1>')
modifier_tokens.append(True)
if config.model.params.cond_stage_config.params.modifier_token is None:
config.model.params.cond_stage_config.params.modifier_token = f'<new{count}>'
else:
config.model.params.cond_stage_config.params.modifier_token += f'+<new{count}>'
else:
modifier_tokens.append(False)
model2_sts.append(model2_st['state_dict'])
categories.append(cat1)
count += 1
embeds = torch.cat(embeds, 0)
model.cond_stage_model.transformer.resize_token_embeddings(len(tokenizer))
token_embeds = model.cond_stage_model.transformer.get_input_embeddings().weight.data
token_embeds[-embeds.size(0):] = embeds
f = open(regularization_prompt, 'r')
prompt = [x.strip() for x in f.readlines()][:200]
uc = get_text_embedding(prompt)
uc_targets = []
from collections import defaultdict
uc_values = defaultdict(list)
for composing_model_count in range(len(model2_sts)):
category = categories[composing_model_count]
if modifier_tokens[composing_model_count]:
string1 = f'<new{composing_model_count+1}> {category}'
else:
string1 = f'{category}'
if 'art' in string1:
prompt = [string1] + [f"painting in the style of {string1}"]
else:
prompt = [string1] + [f"a photo of {string1}"]
uc_targets.append(get_text_embedding(prompt))
for each in layers_modified:
uc_values[each].append((model2_sts[composing_model_count][each].to(device)@uc_targets[-1].T).T)
uc_targets = torch.cat(uc_targets, 0)
removal_indices = []
for i in range(uc_targets.size(0)):
for j in range(i+1, uc_targets.size(0)):
if (uc_targets[i]-uc_targets[j]).abs().mean() == 0:
removal_indices.append(j)
removal_indices = list(set(removal_indices))
uc_targets = torch.stack([uc_targets[i] for i in range(uc_targets.size(0)) if i not in removal_indices], 0)
for each in layers_modified:
uc_values[each] = torch.cat(uc_values[each], 0)
uc_values[each] = torch.stack([uc_values[each][i] for i in range(uc_values[each].size(0)) if i not in removal_indices], 0)
print(uc_values[each].size(), each)
print("target size:", uc_targets.size())
new_weights = {}
for each in layers_modified:
values = (model.state_dict()[each]@uc.T).T
input_target = uc_targets
output_target = uc_values[each]
Wnew = gdupdateWexact(uc[:values.shape[0]],
values,
input_target,
output_target,
model.state_dict()[each].clone(),
)
new_weights[each] = Wnew
print(Wnew.size())
if prompts is not None:
model.load_state_dict(new_weights, strict=False)
sampler = DDIMSampler(model)
sampler.make_schedule(ddim_num_steps=200, ddim_eta=1., verbose=False)
seed = 68
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
batch_size = 10
if not os.path.exists(prompts):
assert prompts is not None
prompts = [batch_size * [prompts]]
else:
print(f"reading prompts from {prompts}")
with open(prompts, "r") as f:
prompts = f.read().splitlines()
prompts = [batch_size * [prompt] for prompt in prompts]
print(prompts[0])
sample_path = os.path.join(f'{save_path}/{outpath}/', 'samples')
os.makedirs(sample_path, exist_ok=True)
with torch.no_grad():
for counter, prompt in enumerate(prompts):
print(prompt)
uc_try = model.get_learned_conditioning(batch_size * [prompt[0]])
unconditional_guidance_scale = 6.
cond = uc_try
unconditional_conditioning = model.get_learned_conditioning(batch_size * [""])
img = torch.randn((batch_size, 4, 64, 64)).cuda()
ddim_use_original_steps = False
timesteps = sampler.ddpm_num_timesteps if ddim_use_original_steps else sampler.ddim_timesteps
time_range = reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((batch_size,), step, device=device, dtype=torch.long)
outs = sampler.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning)
img, _ = outs
outim = model.decode_first_stage(outs[0])
outim = torch.clamp((outim + 1.0) / 2.0, min=0.0, max=1.0)
name = '-'.join(prompt[0].split(' '))
torchvision.utils.save_image(outim, f'{save_path}/{outpath}/{counter}_{name}.jpg', nrow=batch_size // 2)
new_weights['embed'] = embeds
os.makedirs(f'{save_path}/{outpath}', exist_ok=True)
os.makedirs(f'{save_path}/{outpath}/checkpoints', exist_ok=True)
os.makedirs(f'{save_path}/{outpath}/configs', exist_ok=True)
with open(f'{save_path}/{outpath}/configs/config_project.yaml', 'w') as fp:
OmegaConf.save(config=config, f=fp)
torch.save({'state_dict': new_weights}, f'{save_path}/{outpath}/checkpoints/delta_epoch=000000.ckpt')
def parse_args():
parser = argparse.ArgumentParser('', add_help=False)
parser.add_argument('--paths', help='+ separated list of checkpoints', required=True,
type=str)
parser.add_argument('--save_path', help='folder name to save optimized weights', default='optimized_logs',
type=str)
parser.add_argument('--categories', help='+ separated list of categories of the models', required=True,
type=str)
parser.add_argument('--prompts', help='prompts for composition model (can be a file or string)', default=None,
type=str)
parser.add_argument('--ckpt', required=True,
type=str)
parser.add_argument('--regularization_prompt', default='./data/regularization_captions.txt',
type=str)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
paths = args.paths
categories = args.categories
if ' ' in categories:
temp = categories.replace(' ', '_')
else:
temp = categories
outpath = '_'.join(['optimized', temp])
compose(paths, categories, outpath, args.ckpt, args.regularization_prompt, args.prompts, args.save_path)
| 11,722 | 38.738983 | 130 | py |
custom-diffusion | custom-diffusion-main/src/convert.py | # Copyright 2022 Adobe Research. All rights reserved.
# To view a copy of the license, visit LICENSE.md.
import os, sys
import argparse
import torch
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
sys.path.append('stable-diffusion')
sys.path.append('./')
from src.diffusers_model_pipeline import CustomDiffusionPipeline
def load_model_from_config(config, ckpt, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
model.cuda()
model.eval()
return model
def load_model_from_config_addtoken(config, ckpt, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
token_weights = sd["cond_stage_model.transformer.text_model.embeddings.token_embedding.weight"]
del sd["cond_stage_model.transformer.text_model.embeddings.token_embedding.weight"]
m, u = model.load_state_dict(sd, strict=False)
model.cond_stage_model.transformer.text_model.embeddings.token_embedding.weight.data[:token_weights.shape[0]] = token_weights
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
model.cuda()
model.eval()
return model
def convert(ckpt, delta_ckpt, sd_version, config, modelname, mode):
config = OmegaConf.load(config)
model = load_model_from_config(config, f"{ckpt}")
# get the mapping of layer names between diffuser and CompVis checkpoints
mapping_compvis_to_diffuser = {}
mapping_compvis_to_diffuser_rev = {}
for key in list(model.state_dict().keys()):
if 'attn2' in key:
diffuser_key = key.replace('model.diffusion_model.', '')
if 'input_blocks' in key:
i, j = [int(x) for x in key.split('.')[3:5]]
i_, j_ = max(0, i // 3), 0 if i in [1, 4, 7] else 1
diffuser_key = diffuser_key.replace(f'input_blocks.{i}.{j}', f'down_blocks.{i_}.attentions.{j_}')
if 'output_blocks' in key:
i, j = [int(x) for x in key.split('.')[3:5]]
i_, j_ = max(0, i // 3), 0 if i % 3 == 0 else 1 if i % 3 == 1 else 2
diffuser_key = diffuser_key.replace(f'output_blocks.{i}.{j}', f'up_blocks.{i_}.attentions.{j_}')
diffuser_key = diffuser_key.replace('middle_block.1', 'mid_block.attentions.0')
mapping_compvis_to_diffuser[key] = diffuser_key
mapping_compvis_to_diffuser_rev[diffuser_key] = key
# convert checkpoint to webui
if mode in ['diffuser-to-webui' or 'compvis-to-webui']:
outpath = f'{os.path.dirname(delta_ckpt)}/webui'
os.makedirs(outpath, exist_ok=True)
if mode == 'diffuser-to-webui':
st = torch.load(delta_ckpt)
compvis_st = {}
compvis_st['state_dict'] = {}
for key in list(st['unet'].keys()):
compvis_st['state_dict'][mapping_compvis_to_diffuser_rev[key]] = st['unet'][key]
model.load_state_dict(compvis_st['state_dict'], strict=False)
torch.save({'state_dict': model.state_dict()}, f'{outpath}/{modelname}')
if 'modifier_token' in st:
os.makedirs(f'{outpath}/embeddings/', exist_ok=True)
for word, feat in st['modifier_token'].items():
torch.save({word: feat}, f'{outpath}/embeddings/{word}.pt')
else:
compvis_st = torch.load(delta_ckpt)["state_dict"]
model.load_state_dict(compvis_st['state_dict'], strict=False)
torch.save({'state_dict': model.state_dict()}, f'{outpath}/{modelname}')
if 'embed' in st:
os.makedirs(f'{outpath}/embeddings/', exist_ok=True)
for i, feat in enumerate(st['embed']):
torch.save({f'<new{i}>': feat}, f'{outpath}/embeddings/<new{i}>.pt')
# convert checkpoint from CompVis to diffuser
elif mode == 'compvis-to-diffuser':
st = torch.load(delta_ckpt)["state_dict"]
diffuser_st = {'unet': {}}
if 'embed' in st:
diffuser_st['modifier_token'] = {}
for i in range(st['embed'].size(0)):
diffuser_st['modifier_token'][f'<new{i+1}>'] = st['embed'][i].clone()
del st['embed']
for key in list(st.keys()):
diffuser_st['unet'][mapping_compvis_to_diffuser[key]] = st[key]
torch.save(diffuser_st, f'{os.path.dirname(delta_ckpt)}/delta.bin')
pipe = CustomDiffusionPipeline.from_pretrained(sd_version, torch_dtype=torch.float16).to("cuda")
pipe.load_model(f'{os.path.dirname(delta_ckpt)}/delta.bin')
pipe.save_pretrained(os.path.dirname(delta_ckpt), all=True)
# convert checkpoint from diffuser to CompVis
elif mode == 'diffuser-to-compvis':
st = torch.load(delta_ckpt)
compvis_st = {}
compvis_st['state_dict'] = {}
if 'modifier_token' in st:
compvis_st['state_dict']['embed'] = []
for _, feat in st['modifier_token'].items():
compvis_st['state_dict']['embed'].append(feat)
compvis_st['state_dict']['embed'] = torch.cat(compvis_st['state_dict']['embed'])
config.model.params.cond_stage_config.target = 'src.custom_modules.FrozenCLIPEmbedderWrapper'
config.model.params.cond_stage_config.params = {}
config.model.params.cond_stage_config.params.modifier_token = '+'.join([f'<new{i+1}>' for i in range(len(st['modifier_token']))])
for key in list(st['unet'].keys()):
compvis_st['state_dict'][mapping_compvis_to_diffuser_rev[key]] = st['unet'][key]
torch.save(compvis_st, f'{os.path.dirname(delta_ckpt)}/delta_model.ckpt')
model = load_model_from_config_addtoken(config, f"{ckpt}")
if 'modifier_token' in st:
model.cond_stage_model.transformer.text_model.embeddings.token_embedding.weight.data[-len(st['modifier_token']):] = compvis_st['state_dict']['embed']
del compvis_st['state_dict']['embed']
model.load_state_dict(compvis_st['state_dict'], strict=False)
torch.save({'state_dict': model.state_dict()}, f'{os.path.dirname(delta_ckpt)}/model.ckpt')
def parse_args():
parser = argparse.ArgumentParser('Checkpoint conversion given delta ckpts, currently supported for stable diffusion 1.4 only', add_help=True)
parser.add_argument('--ckpt', help='pretrained compvis model checkpoint', required=True,
type=str)
parser.add_argument('--delta_ckpt', help='delta checkpoint either of compvis or diffuser', required=True,
type=str)
parser.add_argument('--sd_version', default="CompVis/stable-diffusion-v1-4",
type=str)
parser.add_argument('--config', default="configs/custom-diffusion/finetune.yaml",
type=str)
parser.add_argument('--modelname', default="model.ckpt", help="name of the model to save when converting to webui",
type=str)
parser.add_argument("--mode", default='compvis-to-diffuser', choices=['diffuser-to-webui', 'compvis-to-webui', 'compvis-to-diffuser', 'diffuser-to-compvis'],
type=str)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
assert args.sd_version == "CompVis/stable-diffusion-v1-4"
convert(args.ckpt, args.delta_ckpt, args.sd_version, args.config, args.modelname, args.mode)
| 7,914 | 47.858025 | 161 | py |
custom-diffusion | custom-diffusion-main/src/diffusers_sample.py | # ==========================================================================================
#
# MIT License. To view a copy of the license, visit MIT_LICENSE.md.
#
# ==========================================================================================
import argparse
import sys
import os
import numpy as np
import torch
from PIL import Image
sys.path.append('./')
from src.diffusers_model_pipeline import CustomDiffusionPipeline
def sample(ckpt, delta_ckpt, from_file, prompt, compress, batch_size, freeze_model):
model_id = ckpt
pipe = CustomDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
pipe.load_model(delta_ckpt, compress)
outdir = os.path.dirname(delta_ckpt)
generator = torch.Generator(device='cuda').manual_seed(42)
all_images = []
if prompt is not None:
images = pipe([prompt]*batch_size, num_inference_steps=200, guidance_scale=6., eta=1., generator=generator).images
all_images += images
images = np.hstack([np.array(x) for x in images])
images = Image.fromarray(images)
# takes only first 50 characters of prompt to name the image file
name = '-'.join(prompt[:50].split())
images.save(f'{outdir}/{name}.png')
else:
print(f"reading prompts from {from_file}")
with open(from_file, "r") as f:
data = f.read().splitlines()
data = [[prompt]*batch_size for prompt in data]
for prompt in data:
images = pipe(prompt, num_inference_steps=200, guidance_scale=6., eta=1., generator=generator).images
all_images += images
images = np.hstack([np.array(x) for x in images], 0)
images = Image.fromarray(images)
# takes only first 50 characters of prompt to name the image file
name = '-'.join(prompt[0][:50].split())
images.save(f'{outdir}/{name}.png')
os.makedirs(f'{outdir}/samples', exist_ok=True)
for i, im in enumerate(all_images):
im.save(f'{outdir}/samples/{i}.jpg')
def parse_args():
parser = argparse.ArgumentParser('', add_help=False)
parser.add_argument('--ckpt', help='target string for query',
type=str)
parser.add_argument('--delta_ckpt', help='target string for query', default=None,
type=str)
parser.add_argument('--from-file', help='path to prompt file', default='./',
type=str)
parser.add_argument('--prompt', help='prompt to generate', default=None,
type=str)
parser.add_argument("--compress", action='store_true')
parser.add_argument("--batch_size", default=5, type=int)
parser.add_argument('--freeze_model', help='crossattn or crossattn_kv', default='crossattn_kv',
type=str)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
sample(args.ckpt, args.delta_ckpt, args.from_file, args.prompt, args.compress, args.batch_size, args.freeze_model)
| 3,037 | 39.506667 | 122 | py |
custom-diffusion | custom-diffusion-main/customconcept101/evaluate.py | import argparse
import glob
import json
import os
import warnings
from pathlib import Path
import clip
import numpy as np
import pandas as pd
import sklearn.preprocessing
import torch
from packaging import version
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from tqdm import tqdm
class CLIPCapDataset(torch.utils.data.Dataset):
def __init__(self, data, append=False, prefix='A photo depicts'):
self.data = data
self.prefix = ''
if append:
self.prefix = prefix
if self.prefix[-1] != ' ':
self.prefix += ' '
def __getitem__(self, idx):
c_data = self.data[idx]
c_data = clip.tokenize(self.prefix + c_data, truncate=True).squeeze()
return {'caption': c_data}
def __len__(self):
return len(self.data)
def Convert(image):
return image.convert("RGB")
class CLIPImageDataset(torch.utils.data.Dataset):
def __init__(self, data):
self.data = data
# only 224x224 ViT-B/32 supported for now
self.preprocess = self._transform_test(224)
def _transform_test(self, n_px):
return Compose([
Resize(n_px, interpolation=Image.BICUBIC),
CenterCrop(n_px),
Convert,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711)),
])
def __getitem__(self, idx):
c_data = self.data[idx]
image = Image.open(c_data)
image = self.preprocess(image)
return {'image': image}
def __len__(self):
return len(self.data)
class DINOImageDataset(torch.utils.data.Dataset):
def __init__(self, data):
self.data = data
# only 224x224 ViT-B/32 supported for now
self.preprocess = self._transform_test(224)
def _transform_test(self, n_px):
return Compose([
Resize(256, interpolation=Image.BICUBIC),
CenterCrop(n_px),
Convert,
ToTensor(),
Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
def __getitem__(self, idx):
c_data = self.data[idx]
image = Image.open(c_data)
image = self.preprocess(image)
return {'image': image}
def __len__(self):
return len(self.data)
def extract_all_captions(captions, model, device, batch_size=256, num_workers=8, append=False):
data = torch.utils.data.DataLoader(
CLIPCapDataset(captions, append=append),
batch_size=batch_size, num_workers=num_workers, shuffle=False)
all_text_features = []
with torch.no_grad():
for b in tqdm(data):
b = b['caption'].to(device)
all_text_features.append(model.encode_text(b).cpu().numpy())
all_text_features = np.vstack(all_text_features)
return all_text_features
def extract_all_images(images, model, datasetclass, device, batch_size=64, num_workers=8):
data = torch.utils.data.DataLoader(
datasetclass(images),
batch_size=batch_size, num_workers=num_workers, shuffle=False)
all_image_features = []
with torch.no_grad():
for b in tqdm(data):
b = b['image'].to(device)
if hasattr(model, 'encode_image'):
if device == 'cuda':
b = b.to(torch.float16)
all_image_features.append(model.encode_image(b).cpu().numpy())
else:
all_image_features.append(model(b).cpu().numpy())
all_image_features = np.vstack(all_image_features)
return all_image_features
def get_clip_score(model, images, candidates, device, append=False, w=2.5):
'''
get standard image-text clipscore.
images can either be:
- a list of strings specifying filepaths for images
- a precomputed, ordered matrix of image features
'''
if isinstance(images, list):
# need to extract image features
images = extract_all_images(images, model, device)
candidates = extract_all_captions(candidates, model, device, append=append)
# as of numpy 1.21, normalize doesn't work properly for float16
if version.parse(np.__version__) < version.parse('1.21'):
images = sklearn.preprocessing.normalize(images, axis=1)
candidates = sklearn.preprocessing.normalize(candidates, axis=1)
else:
warnings.warn(
'due to a numerical instability, new numpy normalization is slightly different than paper results. '
'to exactly replicate paper results, please use numpy version less than 1.21, e.g., 1.20.3.')
images = images / np.sqrt(np.sum(images ** 2, axis=1, keepdims=True))
candidates = candidates / \
np.sqrt(np.sum(candidates ** 2, axis=1, keepdims=True))
per = w * np.clip(np.sum(images * candidates, axis=1), 0, None)
return np.mean(per), per, candidates
def clipeval(image_dir, candidates_json, device):
image_paths = [os.path.join(image_dir, path) for path in os.listdir(image_dir)
if path.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.JPG'))]
image_ids = [Path(path).stem for path in image_paths]
with open(candidates_json) as f:
candidates = json.load(f)
candidates = [candidates[cid] for cid in image_ids]
model, _ = clip.load("ViT-B/32", device=device, jit=False)
model.eval()
image_feats = extract_all_images(
image_paths, model, CLIPImageDataset, device, batch_size=64, num_workers=8)
_, per_instance_image_text, _ = get_clip_score(
model, image_feats, candidates, device)
scores = {image_id: {'CLIPScore': float(clipscore)}
for image_id, clipscore in
zip(image_ids, per_instance_image_text)}
print('CLIPScore: {:.4f}'.format(
np.mean([s['CLIPScore'] for s in scores.values()])))
return np.mean([s['CLIPScore'] for s in scores.values()]), np.std([s['CLIPScore'] for s in scores.values()])
def clipeval_image(image_dir, image_dir_ref, device):
image_paths = [os.path.join(image_dir, path) for path in os.listdir(image_dir)
if path.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.JPG'))]
image_paths_ref = [os.path.join(image_dir_ref, path) for path in os.listdir(image_dir_ref)
if path.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.JPG'))]
model, _ = clip.load("ViT-B/32", device=device, jit=False)
model.eval()
image_feats = extract_all_images(
image_paths, model, CLIPImageDataset, device, batch_size=64, num_workers=8)
image_feats_ref = extract_all_images(
image_paths_ref, model, CLIPImageDataset, device, batch_size=64, num_workers=8)
image_feats = image_feats / \
np.sqrt(np.sum(image_feats ** 2, axis=1, keepdims=True))
image_feats_ref = image_feats_ref / \
np.sqrt(np.sum(image_feats_ref ** 2, axis=1, keepdims=True))
res = image_feats @ image_feats_ref.T
return np.mean(res)
def dinoeval_image(image_dir, image_dir_ref, device):
image_paths = [os.path.join(image_dir, path) for path in os.listdir(image_dir)
if path.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.JPG'))]
image_paths_ref = [os.path.join(image_dir_ref, path) for path in os.listdir(image_dir_ref)
if path.endswith(('.png', '.jpg', '.jpeg', '.tiff', '.JPG'))]
model = torch.hub.load('facebookresearch/dino:main', 'dino_vits16').to(device)
model.eval()
image_feats = extract_all_images(
image_paths, model, DINOImageDataset, device, batch_size=64, num_workers=8)
image_feats_ref = extract_all_images(
image_paths_ref, model, DINOImageDataset, device, batch_size=64, num_workers=8)
image_feats = image_feats / \
np.sqrt(np.sum(image_feats ** 2, axis=1, keepdims=True))
image_feats_ref = image_feats_ref / \
np.sqrt(np.sum(image_feats_ref ** 2, axis=1, keepdims=True))
res = image_feats @ image_feats_ref.T
return np.mean(res)
def calmetrics(sample_root, target_paths, numgen, outpkl):
device = 'cuda'
if os.path.exists(outpkl):
df = pd.read_pickle(outpkl)
else:
df = pd.DataFrame()
full = {}
assert sample_root.is_dir()
image_path = sample_root / 'samples'
json_path = sample_root / 'prompts.json'
assert len(glob.glob(str(image_path / '*.png'))) == numgen, "Sample folder does not contain required number of images"
textalignment, _ = \
clipeval(str(image_path), str(json_path), device)
sd = {}
sd['CLIP Text alignment'] = textalignment
for i, target_path in enumerate(target_paths.split('+')):
imagealignment = \
clipeval_image(str(image_path), target_path, device)
dinoimagealignment = \
dinoeval_image(str(image_path), target_path, device)
if i > 0:
sd[f'CLIP Image alignment{i}'] = imagealignment
sd[f'DINO Image alignment{i}'] = dinoimagealignment
else:
sd['CLIP Image alignment'] = imagealignment
sd['DINO Image alignment'] = dinoimagealignment
expname = sample_root
if expname not in full:
full[expname] = sd
else:
full[expname] = {**sd, **full[expname]}
print(sd)
print("Metrics:", full)
for expname, sd in full.items():
if expname not in df.index:
df1 = pd.DataFrame(sd, index=[expname])
df = pd.concat([df, df1])
else:
df.loc[df.index == expname, sd.keys()] = sd.values()
df.to_pickle(outpkl)
def parse_args():
parser = argparse.ArgumentParser("metric", add_help=False)
parser.add_argument("--sample_root", type=str,
help="the root folder to generated images")
parser.add_argument("--numgen", type=int, default=100,
help="total number of images.")
parser.add_argument("--target_paths", type=str,
help="+ separated paths to real target images")
parser.add_argument("--outpkl", type=str, default="evaluation.pkl",
help="the path to save result pkl file")
return parser.parse_args()
def main(args):
calmetrics(Path(args.sample_root), args.target_paths,
args.numgen, args.outpkl)
if __name__ == "__main__":
# distributed setting
args = parse_args()
main(args)
| 10,450 | 33.953177 | 122 | py |
AnalyzeParameterEfficientFinetune | AnalyzeParameterEfficientFinetune-main/src/zlog.py | import os
import time
import torch
import traceback
from contextlib import contextmanager
from tensorboardX import SummaryWriter # maple
import jiant.utils.python.io as py_io
import jiant.utils.python.filesystem as filesystem
class BaseZLogger:
def log_context(self):
raise NotImplementedError()
def write_entry(self, key, entry):
raise NotImplementedError()
def write_obj(self, key, obj, entry):
raise NotImplementedError()
def flush(self):
raise NotImplementedError()
class ZLogger(BaseZLogger):
def __init__(self, fol_path, log_errors=True, overwrite=False):
self.fol_path = fol_path
self.log_errors = log_errors
self.overwrite = overwrite
self.write_mode = "w" if overwrite else "a"
os.makedirs(fol_path)
self.handles = {}
self.tb_writer = SummaryWriter(fol_path)
def __exit__(self, type, value, traceback):
self.tb_writer.close()
@contextmanager
def log_context(self):
try:
yield self
except Exception:
if self.log_errors:
self.write_entry("errors", traceback.format_exc())
raise
finally:
for f in self.handles.values():
f.close()
def write_entry(self, key, entry, do_print=False):
if isinstance(entry, dict):
entry = entry.copy()
else:
entry = {"data": entry}
entry["TIMESTAMP"] = time.time()
self._write_entry_to_file(key=key, entry=entry)
if do_print:
print(entry)
if key in ['train_val', 'train_val_best']:
task = list(entry['train_state']['task_steps'].keys())[0]
self.tb_writer.add_scalar('%s/%s'%(task, key), entry['score'], entry['train_state']['global_steps'])
elif key == 'early_stopping':
pass
elif key == 'loss_train':
for e in entry:
if e.startswith('loss_'):
self.tb_writer.add_scalar('%s/%s'%(entry['task'], key), entry[e], entry['task_step'])
def write_obj(self, key, obj, entry):
assert "DATA" not in entry
if isinstance(entry, dict):
entry = entry.copy()
else:
entry = {"data": entry}
time_stamp = time.time()
entry["DATA"] = self._save_obj(key, time_stamp, obj)
entry["TIMESTAMP"] = time_stamp
self._write_entry_to_file(key=key, entry=entry)
def _save_obj(self, key, time_stamp, obj):
cache_path = self.get_cache_path(key)
os.makedirs(cache_path, exist_ok=True)
save_path = os.path.join(cache_path, str(time_stamp))
torch.save(obj, save_path)
return save_path
def check_handle_open(self, key):
if key in self.handles:
return
handle_path = self.get_path(key)
py_io.create_containing_folder(handle_path)
self.handles[key] = open(handle_path, self.write_mode)
def get_path(self, key):
return os.path.join(self.fol_path, key + ".zlog")
def get_cache_path(self, key):
return os.path.join(self.fol_path, key + "___CACHE")
def flush(self, key=None):
if key is None:
for f in self.handles.values():
f.flush()
elif isinstance(key, list):
for k in key:
self.handles[k].flush()
else:
self.handles[key].flush()
self.tb_writer.flush()
def _write_entry_to_file(self, key, entry):
self.check_handle_open(key)
self.handles[key].write(py_io.to_jsonl(entry) + "\n")
class ZBufferedLogger(ZLogger):
def __init__(
self,
fol_path,
default_buffer_size=1,
buffer_size_dict=None,
log_errors=True,
overwrite=False,
):
super().__init__(fol_path=fol_path, log_errors=log_errors, overwrite=overwrite)
self.default_buffer_size = default_buffer_size
self.buffer_size_dict = buffer_size_dict.copy() if buffer_size_dict else {}
self.buffer_dict = {}
def check_handle_open(self, key):
super().check_handle_open(key=key)
if key not in self.buffer_dict:
self.buffer_dict[key] = []
if key not in self.buffer_size_dict:
self.buffer_size_dict[key] = self.default_buffer_size
def _write_entry_to_file(self, key, entry):
self.check_handle_open(key)
self.buffer_dict[key].append(entry)
if len(self.buffer_dict[key]) >= self.buffer_size_dict[key]:
self.flush(key)
def _write_buffer(self, key):
if not self.buffer_dict[key]:
return
self.handles[key].write(
"".join(py_io.to_jsonl(entry) + "\n" for entry in self.buffer_dict[key])
)
self.buffer_dict[key] = []
def flush(self, key=None):
if key is None:
for k, f in self.handles.items():
self._write_buffer(k)
f.flush()
elif isinstance(key, list):
for k in key:
self._write_buffer(k)
self.handles[k].flush()
else:
self._write_buffer(key)
self.handles[key].flush()
class _VoidZLogger(BaseZLogger):
def log_context(self):
yield
def write_entry(self, key, entry):
pass
def write_obj(self, key, obj, entry):
pass
def flush(self):
pass
class _PrintZLogger(BaseZLogger):
def log_context(self):
yield
def write_entry(self, key, entry):
print(f"{key}: {entry}")
def write_obj(self, key, obj, entry):
print(f"{key}: {obj}")
def flush(self):
pass
class InMemoryZLogger(BaseZLogger):
def __init__(self):
self.entries = {}
self.data = {}
def log_context(self):
yield
def write_entry(self, key, entry):
if isinstance(entry, dict):
entry = entry.copy()
else:
entry = {"data": entry}
entry["TIMESTAMP"] = time.time()
self._write_entry(key=key, entry=entry)
def write_obj(self, key, obj, entry):
assert "DATA" not in entry
if isinstance(entry, dict):
entry = entry.copy()
else:
entry = {"data": entry}
time_stamp = time.time()
entry["DATA"] = obj
entry["TIMESTAMP"] = time_stamp
self._write_entry(key=key, entry=entry)
def _write_entry(self, key, entry):
if key not in self.entries:
self.entries[key] = []
self.entries[key].append(entry)
def flush(self):
pass
VOID_LOGGER = _VoidZLogger()
PRINT_LOGGER = _PrintZLogger()
def load_log(fol_path):
all_paths = filesystem.find_files_with_ext(fol_path, "zlog")
log_data = {}
for path in all_paths:
key = os.path.abspath(path).replace(os.path.abspath(fol_path), "")[1:].replace(".zlog", "")
log_data[key] = py_io.read_jsonl(path)
return log_data
| 7,042 | 27.864754 | 112 | py |
AnalyzeParameterEfficientFinetune | AnalyzeParameterEfficientFinetune-main/src/runscript.py | import os
import torch
from transformers import AutoConfig
import jiant.proj.main.write_task_configs as write_task_configs
import jiant.proj.main.export_model as export_model
#import jiant.proj.main.tokenize_and_cache as tokenize_and_cache
import tokenize_and_cache # maple
import jiant.proj.main.scripts.configurator as configurator
import main_runscript as runscript # maple
import jiant.shared.distributed as distributed
import jiant.utils.zconf as zconf
import jiant.utils.python.io as py_io
from jiant.utils.python.logic import replace_none
from jiant.utils.python.io import read_json
@zconf.run_config
class RunConfiguration(zconf.RunConfig):
# === User parameters === #
user_mode = zconf.attr(type=str, default="")
log_dir = zconf.attr(type=str, default=".")
do_test = zconf.attr(action="store_true")#maple
# === Required parameters === #
run_name = zconf.attr(type=str, required=True)
exp_dir = zconf.attr(type=str, required=True)
data_dir = zconf.attr(type=str, required=True)
# === Model parameters === #
hf_pretrained_model_name_or_path = zconf.attr(type=str, required=True)
model_weights_path = zconf.attr(type=str, default=None)
model_cache_path = zconf.attr(type=str, default=None)
# === Task parameters === #
tasks = zconf.attr(type=str, default=None)
train_tasks = zconf.attr(type=str, default=None)
val_tasks = zconf.attr(type=str, default=None)
test_tasks = zconf.attr(type=str, default=None)
# === Misc parameters === #
train_batch_size = zconf.attr(type=int, default=32)
max_seq_length = zconf.attr(type=int, default=256)
num_train_epochs = zconf.attr(type=float, default=3)
train_examples_cap = zconf.attr(type=int, default=None)
create_config = zconf.attr(action="store_true")
# === Running Setup === #
do_save = zconf.attr(action="store_true")
do_save_last = zconf.attr(action="store_true")
do_save_best = zconf.attr(action="store_true")
write_val_preds = zconf.attr(action="store_true")
write_test_preds = zconf.attr(action="store_true")
eval_every_steps = zconf.attr(type=int, default=0)
min_train_steps = zconf.attr(type=int, default=0)#maple
save_every_steps = zconf.attr(type=int, default=0)
save_checkpoint_every_steps = zconf.attr(type=int, default=0)
no_improvements_for_n_evals = zconf.attr(type=int, default=0)
keep_checkpoint_when_done = zconf.attr(action="store_true")
force_overwrite = zconf.attr(action="store_true")
seed = zconf.attr(type=int, default=-1)
# === Training Learning Parameters === #
learning_rate = zconf.attr(default=1e-5, type=float)
adam_epsilon = zconf.attr(default=1e-8, type=float)
max_grad_norm = zconf.attr(default=1.0, type=float)
optimizer_type = zconf.attr(default="adam", type=str)
# === Specialized config === #
no_cuda = zconf.attr(action="store_true")
fp16 = zconf.attr(action="store_true")
fp16_opt_level = zconf.attr(default="O1", type=str)
local_rank = zconf.attr(default=-1, type=int)
server_ip = zconf.attr(default="", type=str)
server_port = zconf.attr(default="", type=str)
def _post_init(self):
assert self.tasks or (
self.train_tasks or self.val_tasks or self.test_tasks
), "Must include tasks or one of train_tasks, val_tasks, tests_tasks"
if self.tasks and (self.train_tasks or self.val_tasks or self.test_tasks):
assert (
([self.tasks] == self.train_tasks)
and ([self.tasks] == self.val_tasks)
and ([self.tasks] == self.test_tasks)
), "Tasks must be same as train_tasks/val_tasks/test_tasks if both are present"
if self.tasks:
self.train_tasks = self.tasks
self.val_tasks = self.tasks
self.test_tasks = self.tasks
self.train_tasks = self.train_tasks.split(",")
self.val_tasks = self.val_tasks.split(",")
self.test_tasks = self.test_tasks.split(",")
def create_and_write_task_configs(task_name_list, data_dir, task_config_base_path):
os.makedirs(task_config_base_path, exist_ok=True)
task_config_path_dict = {}
for task_name in task_name_list:
task_config_path = os.path.join(task_config_base_path, f"{task_name}_config.json")
write_task_configs.create_and_write_task_config(
task_name=task_name,
task_data_dir=os.path.join(data_dir, task_name),
task_config_path=task_config_path,
)
task_config_path_dict[task_name] = task_config_path
return task_config_path_dict
def run_simple(args: RunConfiguration, with_continue: bool = False):
hf_config = AutoConfig.from_pretrained(args.hf_pretrained_model_name_or_path)
model_cache_path = replace_none(
args.model_cache_path, default=os.path.join(args.exp_dir, "models")
)
with distributed.only_first_process(local_rank=args.local_rank):
# === Step 1: Write task configs based on templates === #
full_task_name_list = sorted(list(set(args.train_tasks + args.val_tasks + args.test_tasks)))
task_config_path_dict = {}
if args.create_config:
task_config_path_dict = create_and_write_task_configs(
task_name_list=full_task_name_list,
data_dir=args.data_dir,
task_config_base_path=os.path.join(args.data_dir, "configs"),
)
else:
for task_name in full_task_name_list:
task_config_path_dict[task_name] = os.path.join(
args.data_dir, "configs", f"{task_name}_config.json"
)
# === Step 2: Download models === #
if not os.path.exists(os.path.join(model_cache_path, hf_config.model_type)):
print("Downloading model")
export_model.export_model(
hf_pretrained_model_name_or_path=args.hf_pretrained_model_name_or_path,
output_base_path=os.path.join(model_cache_path, hf_config.model_type),
)
# === Step 3: Tokenize and cache === #
phase_task_dict = {
"train": args.train_tasks,
"val": args.val_tasks,
"test": args.test_tasks,
}
for task_name in full_task_name_list:
phases_to_do = []
for phase, phase_task_list in phase_task_dict.items():
if task_name in phase_task_list and not os.path.exists(
os.path.join(args.exp_dir, "cache", hf_config.model_type, task_name, phase)
):
config = read_json(task_config_path_dict[task_name])
if phase in config["paths"]:
phases_to_do.append(phase)
else:
phase_task_list.remove(task_name)
if not phases_to_do:
continue
if args.do_test:#maple
phases_to_do.append("test_labels")
phases_to_do.append("train_labels")
print(f"Tokenizing Task '{task_name}' for phases '{','.join(phases_to_do)}'")
tokenize_and_cache.main(
tokenize_and_cache.RunConfiguration(
task_config_path=task_config_path_dict[task_name],
hf_pretrained_model_name_or_path=args.hf_pretrained_model_name_or_path,
output_dir=os.path.join(args.exp_dir, "cache", hf_config.model_type, task_name),
phases=phases_to_do,
# TODO: Need a strategy for task-specific max_seq_length issues (issue #1176)
max_seq_length=args.max_seq_length,
smart_truncate=True,
do_iter=True,
)
)
# === Step 4: Generate jiant_task_container_config === #
# We'll do this with a configurator. Creating a jiant_task_config has a surprising
# number of moving parts.
jiant_task_container_config = configurator.SimpleAPIMultiTaskConfigurator(
task_config_base_path=os.path.join(args.data_dir, "configs"),
task_cache_base_path=os.path.join(args.exp_dir, "cache", hf_config.model_type),
train_task_name_list=args.train_tasks,
val_task_name_list=args.val_tasks,
test_task_name_list=args.test_tasks,
train_batch_size=args.train_batch_size,
eval_batch_multiplier=2,
epochs=args.num_train_epochs,
num_gpus=torch.cuda.device_count(),
train_examples_cap=args.train_examples_cap,
).create_config()
if args.do_test: #maple
for tsk in jiant_task_container_config['task_cache_config_dict']:
jiant_task_container_config['task_cache_config_dict'][tsk]['test_labels'] = jiant_task_container_config['task_cache_config_dict'][tsk]['val_labels'].replace("/val_labels", "/test_labels")
jiant_task_container_config['task_cache_config_dict'][tsk]['train_labels'] = jiant_task_container_config['task_cache_config_dict'][tsk]['val_labels'].replace("/val_labels", "/train_labels")
os.makedirs(os.path.join(args.exp_dir, "run_configs"), exist_ok=True)
jiant_task_container_config_path = os.path.join(
args.exp_dir, "run_configs", f"{args.run_name}_config.json"
)
py_io.write_json(jiant_task_container_config, path=jiant_task_container_config_path)
# === Step 5: Train/Eval! === #
if args.model_weights_path:
model_load_mode = "partial"
model_weights_path = args.model_weights_path
else:
# From Transformers
if any(task_name.startswith("mlm_") for task_name in full_task_name_list):
model_load_mode = "from_transformers_with_mlm"
else:
model_load_mode = "from_transformers"
model_weights_path = os.path.join(
model_cache_path, hf_config.model_type, "model", "model.p"
)
run_output_dir = os.path.join(args.exp_dir, "runs", args.run_name)
run_args = runscript.RunConfiguration(
# === Required parameters === #
jiant_task_container_config_path=jiant_task_container_config_path,
output_dir=run_output_dir,
# === Model parameters === #
hf_pretrained_model_name_or_path=args.hf_pretrained_model_name_or_path,
model_path=model_weights_path,
model_config_path=os.path.join(
model_cache_path, hf_config.model_type, "model", "config.json",
),
model_load_mode=model_load_mode,
# === Running Setup === #
do_train=bool(args.train_tasks),
do_val=bool(args.val_tasks),
do_save=args.do_save,
do_save_best=args.do_save_best,
do_save_last=args.do_save_last,
write_val_preds=args.write_val_preds,
write_test_preds=args.write_test_preds,
eval_every_steps=args.eval_every_steps,
min_train_steps = args.min_train_steps,
save_every_steps=args.save_every_steps,
save_checkpoint_every_steps=args.save_checkpoint_every_steps,
no_improvements_for_n_evals=args.no_improvements_for_n_evals,
keep_checkpoint_when_done=args.keep_checkpoint_when_done,
force_overwrite=args.force_overwrite,
seed=args.seed,
# === Training Learning Parameters === #
learning_rate=args.learning_rate,
adam_epsilon=args.adam_epsilon,
max_grad_norm=args.max_grad_norm,
optimizer_type=args.optimizer_type,
# === Specialized config === #
no_cuda=args.no_cuda,
fp16=args.fp16,
fp16_opt_level=args.fp16_opt_level,
local_rank=args.local_rank,
server_ip=args.server_ip,
server_port=args.server_port,
)
if (
args.save_checkpoint_every_steps
and os.path.exists(os.path.join(run_output_dir, "checkpoint.p"))
and with_continue
):
print("Resuming")
checkpoint = torch.load(os.path.join(run_output_dir, "checkpoint.p"))
#run_args = runscript.RunConfiguration.from_dict(checkpoint["metadata"]["args"])
else:
print("Running from start")
checkpoint = None
run_args.user_mode=args.user_mode #maple
run_args.min_train_steps=args.min_train_steps #maple
run_args.log_dir=args.log_dir #maple
run_args.do_test=args.do_test #maple
runscript.run_loop(args=run_args, checkpoint=checkpoint)
py_io.write_file(args.to_json(), os.path.join(run_output_dir, "simple_run_config.json"))
def main():
mode, cl_args = zconf.get_mode_and_cl_args()
args = RunConfiguration.default_run_cli(cl_args=cl_args)
user_mode = {e.split('=')[0] : e.split('=')[1] if len(e.split('=')) > 1 else None for e in (args.user_mode[0].split(',') if type(args.user_mode) is not str else args.user_mode.split(',')) }
if 'ptm' in user_mode:
args.hf_pretrained_model_name_or_path = user_mode['ptm']
if 'seed' in user_mode:
args.seed = int(user_mode['seed'])
if 'lr' in user_mode:
args.learning_rate = float(user_mode['lr'])
print("lr:", args.learning_rate)
if 'nie' in user_mode:
args.no_improvements_for_n_evals = int(user_mode['nie'])
if 'srand' in user_mode:
args.seed = args.seed + int(user_mode['srand'])
if mode == "run":
run_simple(args, with_continue=False)
elif mode == "run_with_continue":
run_simple(args, with_continue=True)
else:
raise zconf.ModeLookupError(mode)
if __name__ == "__main__":
main()
| 13,469 | 43.163934 | 201 | py |
AnalyzeParameterEfficientFinetune | AnalyzeParameterEfficientFinetune-main/src/evaluate.py | import json
import os
import torch
import jiant.utils.python.io as py_io
import jiant.proj.main.components.task_sampler as jiant_task_sampler
from jiant.proj.main.components.evaluate import *
def write_val_results(val_results_dict, metrics_aggregator, output_dir, verbose=True, result_file = "val_metrics.json"):
full_results_to_write = {
"aggregated": jiant_task_sampler.compute_aggregate_major_metrics_from_results_dict(
metrics_aggregator=metrics_aggregator, results_dict=val_results_dict,
),
}
for task_name, task_results in val_results_dict.items():
task_results_to_write = {}
if "loss" in task_results:
task_results_to_write["loss"] = task_results["loss"]
if "metrics" in task_results:
task_results_to_write["metrics"] = task_results["metrics"].to_dict()
full_results_to_write[task_name] = task_results_to_write
metrics_str = json.dumps(full_results_to_write, indent=2)
if verbose:
print(metrics_str)
py_io.write_json(data=full_results_to_write, path=os.path.join(output_dir, result_file))
print("Saved at " + os.path.join(output_dir, result_file)) | 1,180 | 38.366667 | 120 | py |
AnalyzeParameterEfficientFinetune | AnalyzeParameterEfficientFinetune-main/src/runner.py | from http.client import NotConnected
from typing import Dict
from dataclasses import dataclass
import torch
import math
import numpy as np
import copy
#from functorch import *
from torch.autograd.functional import *
import jiant.tasks.evaluate as evaluate
import jiant.utils.torch_utils as torch_utils
#from jiant.proj.main.components.container_setup import JiantTaskContainer
from container_setup import JiantTaskContainer # maple
from jiant.proj.main.modeling.primary import JiantModel, wrap_jiant_forward
from jiant.shared.constants import PHASE
from jiant.shared.runner import (
#complex_backpropagate,
get_train_dataloader_from_cache,
get_eval_dataloader_from_cache,
)
from jiant.utils.display import maybe_tqdm
from jiant.utils.python.datastructures import InfiniteYield, ExtendedDataClassMixin
def complex_backpropagate(
loss, optimizer, model, fp16, n_gpu, gradient_accumulation_steps, max_grad_norm, retain_graph = False
):
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
if fp16:
# noinspection PyUnresolvedReferences,PyPackageRequirements
from apex import amp
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), max_grad_norm)
else:
loss.backward(retain_graph=retain_graph)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
return loss
@dataclass
class RunnerParameters(ExtendedDataClassMixin):
local_rank: int
n_gpu: int
fp16: bool
max_grad_norm: float
@dataclass
class TrainState(ExtendedDataClassMixin):
global_steps: int
task_steps: Dict[str, int]
@classmethod
def from_task_name_list(cls, task_name_list):
return cls(global_steps=0, task_steps={task_name: 0 for task_name in task_name_list})
def step(self, task_name):
self.task_steps[task_name] += 1
self.global_steps += 1
# Maple for hvp
# Following are utilities to make nn.Module functional
def del_attr(obj, names):
if len(names) == 1:
delattr(obj, names[0])
else:
del_attr(getattr(obj, names[0]), names[1:])
def set_attr(obj, names, val):
if len(names) == 1:
setattr(obj, names[0], val)
else:
set_attr(getattr(obj, names[0]), names[1:], val)
def make_functional(mod):
orig_params = tuple(mod.parameters())
# Remove all the parameters in the model
names = []
for name, p in list(mod.named_parameters()):
del_attr(mod, name.split("."))
names.append(name)
return orig_params, names
def get_parms(mod):
orig_params = tuple(mod.parameters())
# Remove all the parameters in the model
parms = []
names = []
for name, p in list(mod.named_parameters()):
parms.append(copy.deepcopy(p))
del_attr(mod, name.split("."))
names.append(name)
return parms, names
def load_weights(mod, names, params):
for name, p in zip(names, params):
set_attr(mod, name.split("."), p)
class JiantRunner:
def __init__(
self,
jiant_task_container: JiantTaskContainer,
jiant_model: JiantModel,
optimizer_scheduler,
device,
rparams: RunnerParameters,
log_writer,
):
self.jiant_task_container = jiant_task_container
self.jiant_model = jiant_model
self.optimizer_scheduler = optimizer_scheduler
self.device = device
self.rparams = rparams
self.log_writer = log_writer
self.model = self.jiant_model
def run_train(self):
for _ in self.run_train_context():
pass
def run_train_context(self, verbose=True):
train_dataloader_dict = self.get_train_dataloader_dict()
train_state = TrainState.from_task_name_list(
self.jiant_task_container.task_run_config.train_task_list
)
pbar = maybe_tqdm(
range(self.jiant_task_container.global_train_config.max_steps),
desc="Training",
verbose=verbose,
)
for _ in pbar:
self.run_train_step(
train_dataloader_dict=train_dataloader_dict, train_state=train_state,
pbar = pbar
)
yield train_state
def resume_train_context(self, train_state, verbose=True):
train_dataloader_dict = self.get_train_dataloader_dict()
start_position = train_state.global_steps
pbar = maybe_tqdm(
range(start_position, self.jiant_task_container.global_train_config.max_steps),
desc="Training",
initial=start_position,
total=self.jiant_task_container.global_train_config.max_steps,
verbose=verbose,
)
for _ in pbar:
self.run_train_step(
train_dataloader_dict=train_dataloader_dict, train_state=train_state,
pbar = pbar
)
yield train_state
def run_train_step(self, train_dataloader_dict: dict, train_state: TrainState, pbar):
self.jiant_model.train()
task_name, task = self.jiant_task_container.task_sampler.pop()
task_specific_config = self.jiant_task_container.task_specific_configs[task_name]
loss_val = 0
"""if 'sctmask' in self.user_mode:
retain_graph = True
else:
retain_graph = False """
if 'sctmask' in self.user_mode:
#import numpy as np
parms = dict(self.jiant_model.named_parameters())
for name in parms:
if not name.startswith('encoder') or '__' in name:
continue
xname = "sctmask__" + name.replace(".", "__")
idx = self.jiant_model.encoder.idx_dict[xname]
x = getattr(self.jiant_model.encoder, xname)
parms[name].detach_()
parms[name].flatten()[idx] = x
elif 'prompt' in self.user_mode:
self.jiant_model.encoder.embeddings.word_embeddings.weight.detach_()
self.jiant_model.encoder.embeddings.word_embeddings.weight[:] = torch.cat([self.jiant_model.encoder.embeddings.word_embeddings.weight_ori, self.jiant_model.encoder.embeddings.prompt_weight])
elif 'diffprun' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "dp_burnin_step"):
self.dp_burnin_step = 500 if 'burnin' not in self.user_mode else int(self.user_mode['burnin'])
self.dp_mask = {}
self.dp_step = 0
for p in parms:
if not p.startswith('encoder.w'):
continue
pn = p.replace("encoder.w__", "").replace("__", ".")
self.dp_mask[pn] = torch.zeros_like(parms[p])
self.dp_step += 1
if self.dp_step < self.dp_burnin_step:
l0s = []
for p in parms:
if not p.startswith('encoder.w'):
continue
p = p.replace("encoder.w__", "").replace("__", ".")
alpha = getattr(self.jiant_model.encoder, "ber__" + p.replace(".", "__"))
w = getattr(self.jiant_model.encoder, "w__" + p.replace(".", "__"))
u = 1e-6 + torch.rand_like(w) * (1- 2e-6)
st = torch.sigmoid(torch.log(u) - torch.log(1-u) + alpha)
clamp = float(self.user_mode['clamp']) if 'clamp' in self.user_mode else 3.
l, r = -clamp, clamp
stb = st * (r-l) + l
z = stb.clamp_min(0).clamp_max(1)
nw = self.jiant_model.ori_pars[p] + z * w
node = self.jiant_model
pnames = p.split(".")
for pname in pnames[:-1]:
node = getattr(node, pname)
delattr(node, pnames[-1])
setattr(node, pnames[-1], nw)
l0s.append(torch.sigmoid(alpha - math.log(-l/r)).flatten())
l0 = torch.cat(l0s).mean()
elif self.dp_step >= self.dp_burnin_step:
l0 = torch.tensor(0)
if self.dp_step == self.dp_burnin_step:
for p in self.dp_mask:
alpha = getattr(self.jiant_model.encoder, "ber__" + p.replace(".", "__"))
alpha.requires_grad = False
_, idx = torch.topk(alpha.flatten().abs(), k = int(float(self.user_mode['diffprun']) * alpha.numel()))
self.dp_mask[p].flatten()[idx] = 1.0
for p in parms:
if not p.startswith('encoder.w'):
continue
p = p.replace("encoder.w__", "").replace("__", ".")
w = getattr(self.jiant_model.encoder, "w__" + p.replace(".", "__"))
nw = self.jiant_model.ori_pars[p] + self.dp_mask[p] * w
node = self.jiant_model
pnames = p.split(".")
for pname in pnames[:-1]:
node = getattr(node, pname)
delattr(node, pnames[-1])
setattr(node, pnames[-1], nw)
for i in range(task_specific_config.gradient_accumulation_steps):
batch, batch_metadata = train_dataloader_dict[task_name].pop()
batch = batch.to(self.device)
if 'prompt' in self.user_mode:
ptsize = int(self.user_mode['prompt'])
input_ids = batch.input_ids.new_zeros([batch.input_ids.shape[0], ptsize])
input_ids[:] = torch.arange(ptsize) + self.jiant_model.encoder.embeddings.word_embeddings.num_embeddings
batch.input_ids = torch.cat([input_ids, batch.input_ids], 1)
batch.input_mask = torch.cat([torch.ones_like(input_ids), batch.input_mask], 1)
batch.segment_ids = torch.cat([torch.zeros_like(input_ids), batch.segment_ids], 1)
elif 'qapp_functorch' in self.user_mode:#functorch
model = copy.deepcopy(self.jiant_model)
model.eval()
func, params, buffers = make_functional_with_buffers(model)
def compute_loss(params, buffers):
y = func(params, buffers, batch=batch, task=task, compute_loss=True)
return y['loss']
grad(grad(compute_loss))(params, buffers)
elif 'qapp' in self.user_mode:
if not hasattr(self, 'q_h'):
self.q_h = {}
if not hasattr(self, 'bs_step') or self.bs_step < self.bs_burnin_step:
#import copy
model = copy.deepcopy(self.jiant_model)
model.eval()
params, names = make_functional(model)
# Make params regular Tensors instead of nn.Parameter
params = tuple(p.detach().requires_grad_() for p in params)
# your forward function with update
def forward(*new_params):
# this line replace your for loop
load_weights(model, names, new_params)
model_output = wrap_jiant_forward(
jiant_model=model, batch=batch, task=task, compute_loss=True,
)
return model_output.loss
ones = tuple([torch.ones_like(p) for p in params])
hv = hvp(forward, params, ones)[1]
hvs = {name : t for name, t in zip(names, hv)}
elif 'hda_backpack' in self.user_mode:
from backpack.extensions import BatchDiagHessian, DiagHessian
from backpack import backpack, extend
model = copy.deepcopy(self.jiant_model)
model = extend(model)
model.eval()
model_output = wrap_jiant_forward(
jiant_model=model, batch=batch, task=task, compute_loss=False,
)
loss_fct = extend(nn.CrossEntropyLoss())
loss = loss_fct(model_output.logits.view(-1, self.head.num_labels), batch.label_id.view(-1),)
with backpack(DiagHessian(), BatchDiagHessian()):
loss = self.complex_backpropagate(
loss=loss,
gradient_accumulation_steps=task_specific_config.gradient_accumulation_steps,
#retain_graph = retain_graph
)
elif 'hda' in self.user_mode:
if not hasattr(self, 'hvs'):
self.hvs = {}
if not hasattr(self, 'bs_step') or self.bs_step < self.bs_burnin_step:
#import copy
model = copy.deepcopy(self.jiant_model)
model.eval()
params, names = make_functional(model)
# Make params regular Tensors instead of nn.Parameter
params = tuple(p.detach().requires_grad_() for p in params)
# your forward function with update
def forward(*new_params):
# this line replace your for loop
load_weights(model, names, new_params)
model_output = wrap_jiant_forward(
jiant_model=model, batch=batch, task=task, compute_loss=True,
)
return model_output.loss
rad = tuple([(torch.rand_like(p) > 0.5).float() * 2 - 1 for p in params])
N = 10
for i in range(N):
hv = vhp(forward, params, rad)[1]
for name, r, t in zip(names, rad, hv):
self.hvs[name] = t*r / N if name not in self.hvs else self.hvs[name] + t*r / N
model_output = wrap_jiant_forward(
jiant_model=self.jiant_model, batch=batch, task=task, compute_loss=True,
)
if 'diffprun' in self.user_mode:
lbd = float(self.user_mode['lambda']) if ('lambda' in self.user_mode) else 1.
model_output.loss = model_output.loss + lbd * l0
if 'l2sp' in self.user_mode:
lbd = float(self.user_mode['l2sp']) if ('l2sp' in self.user_mode and self.user_mode['l2sp'] is not None) else 1.
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "l2sp_w0"):
#import copy
self.l2sp_w0 = {}
for p in parms:
if 'taskmodels_dict' in p:
continue
self.l2sp_w0[p] = copy.deepcopy(parms[p].data)
rs = []
for p in self.l2sp_w0:
rs.append((parms[p] - self.l2sp_w0[p]).flatten()**2)
model_output.loss = model_output.loss + lbd * torch.cat(rs).mean()
if 'lnsr' in self.user_mode:
#import copy
lbd = float(self.user_mode['lambda']) if ('lambda' in self.user_mode) else 1.
embbak = self.jiant_model.encoder.embeddings.word_embeddings.weight.data
self.jiant_model.encoder.embeddings.word_embeddings.weight.data = copy.deepcopy(embbak) + torch.randn_like(embbak) * 0.01
with torch.no_grad():
model_output1 = wrap_jiant_forward(
jiant_model=self.jiant_model, batch=batch, task=task, compute_loss=True,
)
self.jiant_model.encoder.embeddings.word_embeddings.weight.data = embbak
a, b = model_output.other[-1], model_output1.other[-1]
if type(a) is list:
a, b = torch.cat(a), torch.cat(b)
model_output.loss = model_output.loss + lbd * ((a - b)**2).mean()
loss = self.complex_backpropagate(
loss=model_output.loss,
gradient_accumulation_steps=task_specific_config.gradient_accumulation_steps,
#retain_graph = retain_graph
)
loss_val += loss.item()
if 'bottleneck' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
for p in parms:
if 'output.dense' not in p and 'taskmodels_dict' not in p:
parms[p].grad = None
continue
elif 'output.dense' in p and 'weight' in p:
parms[p].grad[:, int(parms[p].shape[1] * 0.2):] = 0
elif 'fixptm' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
for p in parms:
if 'taskmodels_dict' not in p:
parms[p].grad = None
elif 'randmask' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "grad_mask"):
self.grad_mask = {}
for p in parms:
#if p.startswith('encoder') and ('attention' in p or 'embeddings' in p): # quite good why?
if p.startswith('encoder'):
self.grad_mask[p] = torch.rand_like(parms[p]) > float(self.user_mode['randmask'])
for p in parms:
if p in self.grad_mask and parms[p].grad is not None:
parms[p].grad.masked_fill_(self.grad_mask[p], 0.)
elif 'psearch' in self.user_mode:# DEPRECATED: It still changes all parms
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "ps_mask"):
self.ps_mask = {}
self.ps_accu = {}
self.ps_masked = {}
self.ps_finished = {}
self.ps_step = 0
self.ps_update = 10
self.ps_r = float(self.user_mode['psearch']) / 2
for p in parms:
self.ps_mask[p] = torch.zeros_like(parms[p]).bool()
self.ps_masked[p] = 0
self.ps_finished[p] = False
self.ps_accu[p] = 0
for p in parms:
if parms[p].grad is not None:
self.ps_accu[p] = self.ps_accu[p] + parms[p].grad.abs()
if not self.ps_finished[p] and self.ps_step % self.ps_update == self.ps_update - 1 and self.ps_step > 1 and p.startswith('encoder'):
remain = self.ps_accu[p].masked_fill(self.ps_mask[p], float('inf'))
size = parms[p].numel()
_, idx = torch.topk(-remain.flatten(), k = int(self.ps_r * size))
newm = self.ps_masked[p] + len(idx)
if newm >= (1 - float(self.user_mode['psearch'])) * size:
self.ps_finished[p] = True
print("%s : Fixed."%p)
continue
self.ps_mask[p].flatten()[idx] = True
if p in self.ps_mask:
parms[p].grad.masked_fill_(self.ps_mask[p], 0.)
self.ps_step += 1
elif 'bsearch' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "bs_mask"):
self.bs_burnin_step = 500 if 'burnin' not in self.user_mode else int(self.user_mode['burnin'])
self.bs_step = 0
self.bs_mask = {}
self.bs_accu = {}
for p in parms:
self.bs_mask[p] = torch.ones_like(parms[p]).bool()
self.bs_accu[p] = 0
if "happ" in self.user_mode:
self.happ_accu = {}
self.happ_prev = {}
for p in parms:
self.happ_accu[p] = 0
self.happ_prev[p] = 0
self.bs_step += 1
if self.bs_step < self.bs_burnin_step:
if 'arand' in self.user_mode:
self.jiant_model.eval()
for p in parms:
if parms[p].grad is not None:
if "fisher" in self.user_mode:
self.bs_accu[p] = self.bs_accu[p] + parms[p].grad**2
elif "abs" in self.user_mode:
self.bs_accu[p] = self.bs_accu[p] + parms[p].grad.abs()
elif "m1" in self.user_mode:
self.q_h[p] = hvs[p] if p not in self.q_h else self.q_h[p] + hvs[p]
self.bs_accu[p] = self.bs_accu[p] + parms[p].grad
elif "qapp" in self.user_mode:
nhv = 1. / hvs[p] / 1000
nhv[(nhv > 1.) | (nhv < -1.)] = 0
self.bs_accu[p] = self.bs_accu[p] + parms[p].grad * nhv
elif "happ" in self.user_mode:
self.bs_accu[p] = self.bs_accu[p] + parms[p].grad
self.happ_accu[p] += (parms[p].grad - (self.happ_prev[p] if p in self.happ_prev else 0)).abs()
self.happ_prev[p] = parms[p].grad
else:
self.bs_accu[p] = self.bs_accu[p] + parms[p].grad
if "fdm" in self.user_mode:
hmodel = copy.deepcopy(self.jiant_model)
hparms = dict(hmodel.named_parameters())
for p in hparms:
hparms[p].data = hparms[p].data * 0.9
hmodel_output = wrap_jiant_forward(
jiant_model=hmodel, batch=batch, task=task, compute_loss=True,
)
hloss = self.complex_backpropagate(
loss=hmodel_output.loss,
gradient_accumulation_steps=task_specific_config.gradient_accumulation_steps,
#retain_graph = retain_graph
)
if not hasattr(self, 'fdm_hessian'):
self.fdm_hessian = {p: 0 for p in parms}
hparms = dict(hmodel.named_parameters())
for p in parms:
hpp = (parms[p].grad - hparms[p].grad) / (0.1 * parms[p].data)
hpp[hpp.isnan()] = 0
self.fdm_hessian[p] += hpp
self.optimizer_scheduler.optimizer.zero_grad()
return
elif self.bs_step == self.bs_burnin_step:
for p in parms:
if parms[p].grad is None:
continue
if "vanish" in self.user_mode:
_, idx = torch.topk(-self.bs_accu[p].flatten().abs(), k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
elif "m1" in self.user_mode:
#score = self.bs_accu[p] * (1. / self.q_h[p]).clamp(min=-100, max=100) / 100
score = self.bs_accu[p] * ((1. / self.q_h[p]).sigmoid()*2-1)
_, idx = torch.topk(score.flatten().abs(), k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
elif "happ" in self.user_mode:
score = self.bs_accu[p].abs() - self.happ_accu[p]
score[score.isnan()] = 0
_, idx = torch.topk(score.flatten(), k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
elif "fdm" in self.user_mode:
#self.fdm_hessian[p][self.fdm_hessian[p].abs() < 1e-3] = 0
scale = float(self.user_mode['fdm']) if 'fdm' in self.user_mode else 1.
score = (self.bs_accu[p] * ((1. / self.fdm_hessian[p] * scale).sigmoid()*2-1) ).abs()
score[score.isnan()] = 0
_, idx = torch.topk(score.flatten(), k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
elif 'hda_v1' in self.user_mode:
r = float(self.user_mode['hda']) if self.user_mode['hda'] is not None else 0.1
ksize = 11#self.hvs[p].numel() // 10
h = torch.nn.functional.avg_pool1d(torch.nn.functional.pad(self.hvs[p].flatten(), (ksize // 2, ksize // 2)).unsqueeze(0), ksize, stride = 1).abs()
h = h.squeeze(0)
m = h[h!=0].mean()
h1 = h * r + (1 - r) * m
h2 = (1 / h1)
s = (h2 < 1).float()
h3 = s * h2 + (1 - s) * (1 + h2.log10())
print(h3.max(), h3.min(), (h3.max() - h3.min()) / h3.min())
score = (self.bs_accu[p].flatten() * h3 ).abs()
_, idx = torch.topk(score, k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
elif 'hda' in self.user_mode:
r = float(self.user_mode['hda']) if self.user_mode['hda'] is not None else 0.005
score = (2 * self.bs_accu[p].flatten().abs().log10() - self.hvs[p].flatten().abs().clamp(0.1).log10() * r)
b = self.hvs[p].flatten().abs().clamp(0.1).log10() * r
a = 2 * self.bs_accu[p].flatten().abs().log10()
#print(b.min().item(), b.max().item(), b.max().item() - b.min().item(), a.max().item())
_, idx = torch.topk(score, k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
_, idx1 = torch.topk(a, k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
sidx = set(idx1.tolist())
print(len([i for i in idx.tolist() if i in sidx]) / max(len(idx), 1))
else:
_, idx = torch.topk(self.bs_accu[p].flatten().abs(), k = int(float(self.user_mode['bsearch']) * parms[p].numel()))
self.bs_mask[p].flatten()[idx] = False
if 'arand' in self.user_mode:
self.jiant_model.train()
for p in parms:
if p in self.bs_mask and p.startswith('encoder') and parms[p].grad is not None:
parms[p].grad.masked_fill_(self.bs_mask[p], 0.)
elif 'magprun' in self.user_mode:# former impabs
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "mag_step"):
self.mag_burnin_step = 500 if 'burnin' not in self.user_mode else int(self.user_mode['burnin'])
self.mag_step = 0
self.mag_step += 1
if self.mag_step == self.mag_burnin_step and not hasattr(self, "ia_mask"):
self.ia_mask = {}
for p in parms:
self.ia_mask[p] = torch.ones_like(parms[p]).bool()
_, idx = torch.topk(parms[p].abs().flatten(), k = int(float(self.user_mode['magprun']) * parms[p].numel()))
self.ia_mask[p].flatten()[idx] = False
for p in parms:
if hasattr(self, 'ia_mask') and p in self.ia_mask and p.startswith('encoder') and parms[p].grad is not None:
parms[p].grad.masked_fill_(self.ia_mask[p], 0.)
elif 'impsa' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "isa_mask"):
self.isa_mask = {}
plist = []
for p in parms:
self.isa_mask[p] = torch.ones_like(parms[p]).bool()
plist.append(parms[p].data.cpu().flatten())
apars = torch.cat(plist)
_, idx = torch.topk(apars, k = int(float(self.user_mode['impsa']) * apars.numel()))
startid = 0
for p in parms:
pids = idx.masked_select((startid <= idx) & (idx < startid + parms[p].numel()))
pids -= startid
self.isa_mask[p].flatten()[pids] = False
startid += parms[p].numel()
if pids.numel() == 0 and p.startswith('encoder'):
parms[p].requires_grad = False
for p in parms:
if p in self.isa_mask and p.startswith('encoder') and parms[p].requires_grad:
parms[p].grad.masked_fill_(self.isa_mask[p], 0.)
elif 'impback' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
if not hasattr(self, "ib_mask"):
self.ib_burnin_step = 1000
self.ib_step = 0
self.ib_mask = {}
self.ib_weights = {}
for p in parms:
self.ib_mask[p] = torch.ones_like(parms[p]).bool()
self.ib_weights[p] = parms[p].detach().cpu()
self.ib_step += 1
if self.ib_step == self.ib_burnin_step:
for p in parms:
_, idx = torch.topk(parms[p].abs().flatten(), k = int(float(self.user_mode['impback']) * parms[p].numel()))
parms[p].data[:] = self.ib_weights[p]
self.ib_mask[p].flatten()[idx] = False
if self.ib_step >= self.ib_burnin_step:
for p in parms:
if p in self.ib_mask and p.startswith('encoder'):
parms[p].grad.masked_fill_(self.ib_mask[p], 0.)
elif 'bitfit' in self.user_mode:
parms = dict(self.jiant_model.named_parameters())
for p in parms:
if not p.endswith(".bias") and "taskmodels_dict" not in p and parms[p].grad is not None:
parms[p].grad[:] = 0
elif 'gproj' in self.user_mode:
if not hasattr(self, "gp_step"):
#import copy
parms = dict(self.jiant_model.named_parameters())
self.w0 = copy.deepcopy(parms)
self.gp_step = 0
self.gp_mask = {}
self.gp_burnin_step = 1e100 if 'burnin' not in self.user_mode else int(self.user_mode['burnin'])
self.gp_gstep = 1 if 'gstep' not in self.user_mode else int(self.user_mode['gstep'])
self.gp_step += 1
if self.gp_step % self.gp_gstep == 0 and self.gp_step <= self.gp_burnin_step:
parms = dict(self.jiant_model.named_parameters())
for p in parms:
if parms[p].grad is None or not p.startswith('encoder'):
continue
_, idx = torch.topk((parms[p] - self.w0[p]).flatten().abs(), k = int(float(self.user_mode['gproj']) * parms[p].numel()))
self.gp_mask[p] = torch.ones_like(parms[p]).bool()
self.gp_mask[p].flatten()[idx] = False
print("The masked_select error has not been fiexed!!!")
parms[p].data.masked_select(self.gp_mask[p])[:] = self.w0[p].masked_select(self.gp_mask[p])
if self.gp_step == self.gp_burnin_step and 'reset' in self.user_mode:
for p in parms:
parms[p].data[:] = self.w0[p].data
elif self.gp_step > self.gp_burnin_step:
parms = dict(self.jiant_model.named_parameters())
for p in parms:
if p in self.gp_mask and p.startswith('encoder') and parms[p].grad is not None:
parms[p].grad.masked_fill_(self.gp_mask[p], 0.)
elif 'sgpa' in self.user_mode:
if not hasattr(self, "gp_step"):
#import copy
parms = dict(self.jiant_model.named_parameters())
self.w0 = copy.deepcopy(parms)
self.gp_step = 0
self.gp_mask = {}
self.gp_burnin_step = 1e100 if 'burnin' not in self.user_mode else int(self.user_mode['burnin'])
self.gp_gstep = 1 if 'gstep' not in self.user_mode else int(self.user_mode['gstep'])
if 'mmt' in self.user_mode:
self.gp_mmt = {}
self.gp_mmtr = 0.3 if self.user_mode['mmt'] is None else float(self.user_mode['mmt'])
self.gp_step += 1
if self.gp_step > self.gp_burnin_step:
parms = dict(self.jiant_model.named_parameters())
for p in parms:
if p in self.gp_mask and p.startswith('encoder') and parms[p].grad is not None:
parms[p].grad.masked_fill_(self.gp_mask[p], 0.)
self.optimizer_scheduler.step()
self.optimizer_scheduler.optimizer.zero_grad()
if 'sgpa' in self.user_mode and self.gp_step <= self.gp_burnin_step:
parms = dict(self.jiant_model.named_parameters())
if self.gp_step % self.gp_gstep == 0:
for p in parms:
if parms[p].grad is None or not p.startswith('encoder'):
continue
with torch.no_grad():
diff = (parms[p] - self.w0[p]).flatten().abs()
if "mmt" in self.user_mode:
diff = self.gp_mmtr * diff + (1-self.gp_mmtr) * self.gp_mmt[p] if p in self.gp_mmt else diff
self.gp_mmt[p] = diff
_, idx = torch.topk(diff, k = int(float(self.user_mode['sgpa']) * parms[p].numel()))
self.gp_mask[p] = torch.ones_like(parms[p]).bool()
self.gp_mask[p].flatten()[idx] = False
#parms[p].data.masked_select(self.gp_mask[p])[:] = self.w0[p].masked_select(self.gp_mask[p])
parms[p].data[:] = parms[p].data[:] * (~self.gp_mask[p]) + self.w0[p] * self.gp_mask[p]
if self.gp_step == self.gp_burnin_step and 'reset' in self.user_mode:
for p in parms:
parms[p].data[:] = self.w0[p].data
train_state.step(task_name=task_name)
entry = {
"task": task_name,
"task_step": train_state.task_steps[task_name],
"global_step": train_state.global_steps,
"loss_val": loss_val / task_specific_config.gradient_accumulation_steps,
}
if 'diffprun' in self.user_mode:
entry["loss_val"] = entry["loss_val"] - l0.item()
entry["loss_l0"] = l0.item()
self.log_writer.write_entry(
"loss_train",
entry,
)
pbar.set_postfix({'loss': loss_val / task_specific_config.gradient_accumulation_steps})
def run_val(self, task_name_list, use_subset=None, return_preds=False, verbose=True, phase = "val"):
print("Log Dir:", self.log_writer.tb_writer.logdir)
evaluate_dict = {}
val_dataloader_dict = self.get_val_dataloader_dict(
task_name_list=task_name_list, use_subset=use_subset, phase = phase
)
val_labels_dict = self.get_val_labels_dict(
task_name_list=task_name_list, use_subset=use_subset, label_phase = phase
)
emodel = self.jiant_model
if 'mixout' in self.user_mode:
#import copy
emodel = copy.deepcopy(self.jiant_model)
parms = dict(emodel.named_parameters())
for p in parms:
if not p.startswith("encoder."):
continue
node = emodel.encoder
node0 = self.encoder0
pnames = p.split(".")
for pname in pnames[1:-1]:
node = getattr(node, pname)
node0 = getattr(node0, pname)
msk = (torch.rand_like(getattr(node, pnames[-1])) < float(self.user_mode['mixout'])).float()
nw = (1 - msk) * getattr(node, pnames[-1]) + msk * getattr(node0, pnames[-1])
delattr(node, pnames[-1])
setattr(node, pnames[-1], nw)
for task_name in task_name_list:
task = self.jiant_task_container.task_dict[task_name]
evaluate_dict[task_name] = run_val(
val_dataloader=val_dataloader_dict[task_name],
val_labels=val_labels_dict[task_name],
jiant_model=emodel,
task=task,
device=self.device,
local_rank=self.rparams.local_rank,
return_preds=return_preds,
verbose=verbose,
tag = phase,#maple
user_mode = self.user_mode,
)
return evaluate_dict
def run_test(self, task_name_list, verbose=True):
evaluate_dict = {}
test_dataloader_dict = self.get_test_dataloader_dict()
for task_name in task_name_list:
task = self.jiant_task_container.task_dict[task_name]
evaluate_dict[task_name] = run_test(
test_dataloader=test_dataloader_dict[task_name],
jiant_model=self.jiant_model,
task=task,
device=self.device,
local_rank=self.rparams.local_rank,
verbose=verbose,
)
return evaluate_dict
def get_train_dataloader_dict(self):
# Not currently supported distributed parallel
train_dataloader_dict = {}
for task_name in self.jiant_task_container.task_run_config.train_task_list:
task = self.jiant_task_container.task_dict[task_name]
train_cache = self.jiant_task_container.task_cache_dict[task_name]["train"]
train_batch_size = self.jiant_task_container.task_specific_configs[
task_name
].train_batch_size
train_dataloader_dict[task_name] = InfiniteYield(
get_train_dataloader_from_cache(
train_cache=train_cache, task=task, train_batch_size=train_batch_size,
)
)
return train_dataloader_dict
def _get_eval_dataloader_dict(self, phase, task_name_list, use_subset=False):
val_dataloader_dict = {}
for task_name in task_name_list:
task = self.jiant_task_container.task_dict[task_name]
eval_cache = self.jiant_task_container.task_cache_dict[task_name][phase]
task_specific_config = self.jiant_task_container.task_specific_configs[task_name]
val_dataloader_dict[task_name] = get_eval_dataloader_from_cache(
eval_cache=eval_cache,
task=task,
eval_batch_size=task_specific_config.eval_batch_size,
subset_num=task_specific_config.eval_subset_num if use_subset else None,
)
return val_dataloader_dict
def get_val_dataloader_dict(self, task_name_list, use_subset=False, phase = "val"):
return self._get_eval_dataloader_dict(
phase, task_name_list=task_name_list, use_subset=use_subset,
)
def get_val_labels_dict(self, task_name_list, use_subset=False, label_phase = "val"):
val_labels_dict = {}
for task_name in task_name_list:
task_specific_config = self.jiant_task_container.task_specific_configs[task_name]
val_labels_cache = self.jiant_task_container.task_cache_dict[task_name][label_phase + "_labels"]
val_labels = val_labels_cache.get_all()
if use_subset:
val_labels = val_labels[: task_specific_config.eval_subset_num]
val_labels_dict[task_name] = val_labels
return val_labels_dict
def get_test_dataloader_dict(self):
return self._get_eval_dataloader_dict(
task_name_list=self.jiant_task_container.task_run_config.test_task_list,
phase=PHASE.TEST,
)
def complex_backpropagate(self, loss, gradient_accumulation_steps, retain_graph = False):
return complex_backpropagate(
loss=loss,
optimizer=self.optimizer_scheduler.optimizer,
model=self.jiant_model,
fp16=self.rparams.fp16,
n_gpu=self.rparams.n_gpu,
gradient_accumulation_steps=gradient_accumulation_steps,
max_grad_norm=self.rparams.max_grad_norm,
retain_graph = retain_graph
)
def get_runner_state(self):
# TODO: Add fp16 (issue #1186)
state = {
"model": torch_utils.get_model_for_saving(self.jiant_model).state_dict(),
"optimizer": self.optimizer_scheduler.optimizer.state_dict(),
}
return state
def load_state(self, runner_state):
torch_utils.get_model_for_saving(self.jiant_model).load_state_dict(runner_state["model"])
self.optimizer_scheduler.optimizer.load_state_dict(runner_state["optimizer"])
class CheckpointSaver:
def __init__(self, metadata, save_path):
self.metadata = metadata
self.save_path = save_path
def save(self, runner_state: dict, metarunner_state: dict):
to_save = {
"runner_state": runner_state,
"metarunner_state": metarunner_state,
"metadata": self.metadata,
}
torch_utils.safe_save(to_save, self.save_path)
def run_val(
val_dataloader,
val_labels,
jiant_model: JiantModel,
task,
device,
local_rank,
return_preds=False,
verbose=True,
tag="Val",
user_mode = None,
):
# Reminder:
# val_dataloader contains mostly PyTorch-relevant info
# val_labels might contain more details information needed for full evaluation
if not local_rank == -1:
return
jiant_model.eval()
total_eval_loss = 0
nb_eval_steps, nb_eval_examples = 0, 0
evaluation_scheme = evaluate.get_evaluation_scheme_for_task(task=task)
eval_accumulator = evaluation_scheme.get_accumulator()
for step, (batch, batch_metadata) in enumerate(
maybe_tqdm(val_dataloader, desc=f"Eval ({task.name}, {tag})", verbose=verbose)
):
batch = batch.to(device)
if user_mode is not None and 'prompt' in user_mode:
ptsize = int(user_mode['prompt'])
input_ids = batch.input_ids.new_zeros([batch.input_ids.shape[0], ptsize])
input_ids[:] = torch.arange(ptsize) + jiant_model.encoder.embeddings.word_embeddings.num_embeddings
batch.input_ids = torch.cat([input_ids, batch.input_ids], 1)
batch.input_mask = torch.cat([torch.ones_like(input_ids), batch.input_mask], 1)
batch.segment_ids = torch.cat([torch.zeros_like(input_ids), batch.segment_ids], 1)
with torch.no_grad():
model_output = wrap_jiant_forward(
jiant_model=jiant_model, batch=batch, task=task, compute_loss=True,
)
batch_logits = model_output.logits.detach().cpu().numpy()
batch_loss = model_output.loss.mean().item()
total_eval_loss += batch_loss
eval_accumulator.update(
batch_logits=batch_logits,
batch_loss=batch_loss,
batch=batch,
batch_metadata=batch_metadata,
)
nb_eval_examples += len(batch)
nb_eval_steps += 1
eval_loss = total_eval_loss / nb_eval_steps
tokenizer = (
jiant_model.tokenizer
if not torch_utils.is_data_parallel(jiant_model)
else jiant_model.module.tokenizer
)
output = {
"accumulator": eval_accumulator,
"loss": eval_loss,
"metrics": evaluation_scheme.compute_metrics_from_accumulator(
task=task, accumulator=eval_accumulator, labels=val_labels, tokenizer=tokenizer,
),
}
if return_preds:
output["preds"] = evaluation_scheme.get_preds_from_accumulator(
task=task, accumulator=eval_accumulator,
)
return output
def run_test(
test_dataloader,
jiant_model: JiantModel,
task,
device,
local_rank,
verbose=True,
return_preds=True,
):
if not local_rank == -1:
return
jiant_model.eval()
evaluation_scheme = evaluate.get_evaluation_scheme_for_task(task=task)
eval_accumulator = evaluation_scheme.get_accumulator()
for step, (batch, batch_metadata) in enumerate(
maybe_tqdm(test_dataloader, desc=f"Eval ({task.name}, Test)", verbose=verbose)
):
batch = batch.to(device)
with torch.no_grad():
model_output = wrap_jiant_forward(
jiant_model=jiant_model, batch=batch, task=task, compute_loss=False,
)
batch_logits = model_output.logits.detach().cpu().numpy()
eval_accumulator.update(
batch_logits=batch_logits, batch_loss=0, batch=batch, batch_metadata=batch_metadata,
)
output = {
"accumulator": eval_accumulator,
}
if return_preds:
output["preds"] = evaluation_scheme.get_preds_from_accumulator(
task=task, accumulator=eval_accumulator,
)
return output
| 45,479 | 47.177966 | 202 | py |
AnalyzeParameterEfficientFinetune | AnalyzeParameterEfficientFinetune-main/src/model_setup.py | import transformers
import torch
from jiant.ext.radam import RAdam
class OptimizerScheduler:
def __init__(self, optimizer, scheduler):
super().__init__()
self.optimizer = optimizer
self.scheduler = scheduler
def step(self):
self.optimizer.step()
self.scheduler.step()
def state_dict(self):
return {
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
}
def load_state_dict(self, state_dict, strict=True):
self.optimizer.load_state_dict(state_dict["optimizer"], strict=strict)
self.scheduler.load_state_dict(state_dict["scheduler"], strict=strict)
def create_optimizer(
model,
learning_rate,
t_total,
warmup_steps,
warmup_proportion,
optimizer_epsilon=1e-8,
optimizer_type="adam",
verbose=False,
):
return create_optimizer_from_params(
named_parameters=list(model.named_parameters()),
learning_rate=learning_rate,
t_total=t_total,
warmup_steps=warmup_steps,
warmup_proportion=warmup_proportion,
optimizer_epsilon=optimizer_epsilon,
optimizer_type=optimizer_type,
verbose=verbose,
)
def create_optimizer_from_params(
named_parameters,
learning_rate,
t_total,
warmup_steps,
warmup_proportion,
optimizer_epsilon=1e-8,
optimizer_type="adam",
verbose=False,
):
# Prepare optimizer
no_decay = [
"bias",
"LayerNorm.bias",
"LayerNorm.weight",
"adapter.down_project.weight",
"adapter.up_project.weight",
"weighted_sum.weights",
]
if verbose:
print("No optimizer decay for:")
for n, p in named_parameters:
if any(nd in n for nd in no_decay):
print(f" {n}")
used_named_parameters = [
(n, p) for n, p in named_parameters if p.requires_grad and "weighted_sum.weights" not in n and p.is_leaf
]
weighted_sum_params = [
(n, p) for n, p in named_parameters if p.requires_grad and "weighted_sum.weights" in n and p.is_leaf
]
optimizer_grouped_parameters = [
{
"params": [p for n, p in used_named_parameters if not any(nd in n for nd in no_decay)],
"weight_decay": 0.01,
},
{
"params": [p for n, p in used_named_parameters if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
{"params": [p for n, p in weighted_sum_params], "weight_decay": 0.0, "lr": 0.01},
]
if optimizer_type == "adam":
if verbose:
print("Using AdamW")
optimizer = transformers.AdamW(
optimizer_grouped_parameters, lr=learning_rate, eps=optimizer_epsilon
)
elif optimizer_type == "radam":
if verbose:
print("Using RAdam")
optimizer = RAdam(optimizer_grouped_parameters, lr=learning_rate, eps=optimizer_epsilon)
else:
raise KeyError(optimizer_type)
warmup_steps = resolve_warmup_steps(
t_total=t_total, warmup_steps=warmup_steps, warmup_proportion=warmup_proportion,
)
scheduler = transformers.get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total
)
optimizer_scheduler = OptimizerScheduler(optimizer=optimizer, scheduler=scheduler)
return optimizer_scheduler
def resolve_warmup_steps(t_total, warmup_steps, warmup_proportion):
if warmup_steps is None and warmup_proportion is None:
raise RuntimeError()
elif warmup_steps is not None and warmup_proportion is not None:
raise RuntimeError()
elif warmup_steps is None and warmup_proportion is not None:
return warmup_proportion * t_total
elif warmup_steps is not None and warmup_proportion is None:
return warmup_steps
else:
raise RuntimeError()
def fp16ize(model, optimizer, fp16_opt_level):
try:
# noinspection PyUnresolvedReferences,PyPackageRequirements
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(model, optimizer, opt_level=fp16_opt_level)
return model, optimizer
def parallelize_gpu(model):
return torch.nn.DataParallel(model)
def parallelize_dist(model, local_rank):
return torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], output_device=local_rank,
)
def raw_special_model_setup(model, optimizer, fp16, fp16_opt_level, n_gpu, local_rank):
"""Perform setup for special modes (e.g., FP16, DataParallel, and/or DistributedDataParallel.
Args:
model (nn.Module): torch model object.
optimizer: TODO
fp16 (bool): True to enable FP16 mode.
fp16_opt_level (str): Apex AMP optimization level default mode identifier.
n_gpu: number of GPUs.
local_rank (int): Which GPU the script should use in DistributedDataParallel mode.
Notes:
Initialization steps performed in init_cuda_from_args() set n_gpu = 1 when local_rank != -1.
Returns:
Model and optimizer with the specified special configuration.
"""
if fp16:
model, optimizer = fp16ize(model=model, optimizer=optimizer, fp16_opt_level=fp16_opt_level)
if n_gpu > 1:
model = parallelize_gpu(model=model)
if local_rank != -1:
model = parallelize_dist(model=model, local_rank=local_rank)
return model, optimizer
def special_model_setup(
model_wrapper, optimizer_scheduler, fp16, fp16_opt_level, n_gpu, local_rank
):
model, optimizer = raw_special_model_setup(
model=model_wrapper.model,
optimizer=optimizer_scheduler.optimizer,
fp16=fp16,
fp16_opt_level=fp16_opt_level,
n_gpu=n_gpu,
local_rank=local_rank,
)
model_wrapper.model = model
optimizer_scheduler.optimizer = optimizer
| 6,050 | 30.515625 | 112 | py |
AnalyzeParameterEfficientFinetune | AnalyzeParameterEfficientFinetune-main/src/main_runscript.py | import os
import torch
import datetime
import jiant.proj.main.modeling.model_setup as jiant_model_setup
import runner as jiant_runner
#import jiant.proj.main.components.container_setup as container_setup
#import jiant.proj.main.metarunner as jiant_metarunner
import metarunner as jiant_metarunner
#import jiant.proj.main.components.evaluate as jiant_evaluate
import evaluate as jiant_evaluate
import jiant.shared.initialization as initialization
import jiant.shared.distributed as distributed
#import jiant.shared.model_setup as model_setup
import model_setup
import jiant.utils.python.io as py_io
import jiant.utils.zconf as zconf
import zlog # maple
import container_setup # maple
@zconf.run_config
class RunConfiguration(zconf.RunConfig):
# === Required parameters === #
jiant_task_container_config_path = zconf.attr(type=str, required=True)
output_dir = zconf.attr(type=str, required=True)
# === Model parameters === #
hf_pretrained_model_name_or_path = zconf.attr(type=str, required=True)
model_path = zconf.attr(type=str, required=True)
model_config_path = zconf.attr(default=None, type=str)
model_load_mode = zconf.attr(default="from_transformers", type=str)
# === Running Setup === #
do_train = zconf.attr(action="store_true")
do_val = zconf.attr(action="store_true")
do_save = zconf.attr(action="store_true")
do_save_last = zconf.attr(action="store_true")
do_save_best = zconf.attr(action="store_true")
write_val_preds = zconf.attr(action="store_true")
write_test_preds = zconf.attr(action="store_true")
eval_every_steps = zconf.attr(type=int, default=0)
min_train_steps = zconf.attr(type=int, default=0)# maple
save_every_steps = zconf.attr(type=int, default=0)
save_checkpoint_every_steps = zconf.attr(type=int, default=0)
no_improvements_for_n_evals = zconf.attr(type=int, default=0)
keep_checkpoint_when_done = zconf.attr(action="store_true")
force_overwrite = zconf.attr(action="store_true")
seed = zconf.attr(type=int, default=-1)
# === Training Learning Parameters === #
learning_rate = zconf.attr(default=1e-5, type=float)
adam_epsilon = zconf.attr(default=1e-8, type=float)
max_grad_norm = zconf.attr(default=1.0, type=float)
optimizer_type = zconf.attr(default="adam", type=str)
# Specialized config
no_cuda = zconf.attr(action="store_true")
fp16 = zconf.attr(action="store_true")
fp16_opt_level = zconf.attr(default="O1", type=str)
local_rank = zconf.attr(default=-1, type=int)
server_ip = zconf.attr(default="", type=str)
server_port = zconf.attr(default="", type=str)
@zconf.run_config
class ResumeConfiguration(zconf.RunConfig):
checkpoint_path = zconf.attr(type=str)
def setup_runner(
args: RunConfiguration,
jiant_task_container: container_setup.JiantTaskContainer,
quick_init_out,
verbose: bool = True,
) -> jiant_runner.JiantRunner:
"""Setup jiant model, optimizer, and runner, and return runner.
Args:
args (RunConfiguration): configuration carrying command line args specifying run params.
jiant_task_container (container_setup.JiantTaskContainer): task and sampler configs.
quick_init_out (QuickInitContainer): device (GPU/CPU) and logging configuration.
verbose: If True, enables printing configuration info (to standard out).
Returns:
jiant_runner.JiantRunner
"""
# TODO document why the distributed.only_first_process() context manager is being used here.
with distributed.only_first_process(local_rank=args.local_rank):
# load the model
jiant_model = jiant_model_setup.setup_jiant_model(
hf_pretrained_model_name_or_path=args.hf_pretrained_model_name_or_path,
model_config_path=args.model_config_path,
task_dict=jiant_task_container.task_dict,
taskmodels_config=jiant_task_container.taskmodels_config,
)
jiant_model_setup.delegate_load_from_path(
jiant_model=jiant_model, weights_path=args.model_path, load_mode=args.model_load_mode
)
jiant_model.to(quick_init_out.device)
user_mode = {e.split('=')[0] : e.split('=')[1] if len(e.split('=')) > 1 else None for e in (args.user_mode[0].split(',') if type(args.user_mode) is not str else args.user_mode.split(',')) }
if 'sctmask' in user_mode:
import numpy as np
parms = dict(jiant_model.named_parameters())
max_len = max([parms[n].numel() for n in parms])
max_ids = list(range(max_len))
jiant_model.encoder.idx_dict = {}
for name in parms:
if not name.startswith('encoder'):
continue
fpar = parms[name].view(-1)
xname = "sctmask__" + name.replace(".", "__")
idx = torch.tensor(np.random.choice(max_ids[:len(fpar)], size = int(len(fpar) * float(user_mode['sctmask'])), replace = False)).to(fpar.device)
x = fpar[idx].detach()
x.requires_grad = True
jiant_model.encoder.idx_dict[xname] = idx
setattr(jiant_model.encoder, xname, torch.nn.Parameter(x))
x = getattr(jiant_model.encoder, xname)
parms[name].requires_grad = False
parms[name].flatten()[idx] = x
elif 'embtune' in user_mode:
parms = dict(jiant_model.named_parameters())
for p in parms:
if 'embeddings' not in p and 'taskmodels_dict' not in p:
parms[p].requires_grad = False
elif 'prompt' in user_mode:
parms = dict(jiant_model.named_parameters())
for name in parms:
if name.startswith('encoder'):
parms[name].requires_grad = False
jiant_model.encoder.embeddings.prompt_weight = torch.nn.Parameter(jiant_model.encoder.embeddings.word_embeddings.weight.new_zeros([int(user_mode['prompt']), 768]))
torch.nn.init.xavier_uniform_(jiant_model.encoder.embeddings.prompt_weight)
jiant_model.encoder.embeddings.prompt_weight.requires_grad = True
jiant_model.encoder.embeddings.word_embeddings.weight_ori = jiant_model.encoder.embeddings.word_embeddings.weight
jiant_model.encoder.embeddings.word_embeddings.weight = torch.nn.Parameter(torch.cat([jiant_model.encoder.embeddings.word_embeddings.weight_ori, jiant_model.encoder.embeddings.prompt_weight]))
elif 'diffprun' in user_mode:
parms = dict(jiant_model.named_parameters())
jiant_model.ori_pars = {}
for p in parms:
w = torch.zeros_like(parms[p])
wname = "w__" + p.replace(".", "__")
setattr(jiant_model.encoder, wname, torch.nn.Parameter(w))
bername = "ber__" + p.replace(".", "__")
ber = torch.randn_like(parms[p]).sigmoid()
setattr(jiant_model.encoder, bername, torch.nn.Parameter(ber))
jiant_model.ori_pars[p] = parms[p].data
parms[p].requires_grad = False
parms[p].detach_()
elif 'adapter' in user_mode:
jiant_model.encoder.add_adapter("adapter")
jiant_model.encoder.train_adapter("adapter")
jiant_model.encoder.to(jiant_model.encoder.device)
elif 'lora' in user_mode:
import loralib as lora
import math
import torch.nn as nn
r = int(user_mode['lora']) if ('lora' in user_mode) else 16
def set_lora(attn , name):
linear = getattr(attn, name)
q = lora.Linear(linear.in_features, linear.out_features, r).to(linear.weight.device)
q.weight.data[:] = linear.weight.data[:]
q.bias.data[:] = linear.bias.data[:]
nn.init.kaiming_uniform_(q.lora_B, a=math.sqrt(5))
setattr(attn, name, q)
for i in range(len(jiant_model.encoder.encoder.layer)):
set_lora(jiant_model.encoder.encoder.layer[i].attention.self, 'query')
set_lora(jiant_model.encoder.encoder.layer[i].attention.self, 'key')
set_lora(jiant_model.encoder.encoder.layer[i].attention.self, 'value')
set_lora(jiant_model.encoder.encoder.layer[i].attention.output, 'dense')
set_lora(jiant_model.encoder.encoder.layer[i].intermediate, 'dense')
set_lora(jiant_model.encoder.encoder.layer[i].output, 'dense')
lora.mark_only_lora_as_trainable(jiant_model)
optimizer_scheduler = model_setup.create_optimizer(
model=jiant_model,
learning_rate=args.learning_rate,
t_total=jiant_task_container.global_train_config.max_steps,
warmup_steps=jiant_task_container.global_train_config.warmup_steps,
warmup_proportion=None,
optimizer_type=args.optimizer_type,
verbose=verbose,
)
jiant_model, optimizer = model_setup.raw_special_model_setup(
model=jiant_model,
optimizer=optimizer_scheduler.optimizer,
fp16=args.fp16,
fp16_opt_level=args.fp16_opt_level,
n_gpu=quick_init_out.n_gpu,
local_rank=args.local_rank,
)
optimizer_scheduler.optimizer = optimizer
rparams = jiant_runner.RunnerParameters(
local_rank=args.local_rank,
n_gpu=quick_init_out.n_gpu,
fp16=args.fp16,
max_grad_norm=args.max_grad_norm,
)
runner = jiant_runner.JiantRunner(
jiant_task_container=jiant_task_container,
jiant_model=jiant_model,
optimizer_scheduler=optimizer_scheduler,
device=quick_init_out.device,
rparams=rparams,
log_writer=quick_init_out.log_writer,
)
runner.user_mode = user_mode
if 'mixout' in user_mode:
import copy
runner.encoder0 = copy.deepcopy(jiant_model.encoder)
return runner
def run_loop(args: RunConfiguration, checkpoint=None):
is_resumed = checkpoint is not None
quick_init_out = initialization.quick_init(args=args, verbose=True)
# maple
quick_init_out.log_writer = zlog.ZLogger(os.path.join(args.log_dir, datetime.datetime.now().strftime("%Y%m%d%H%m%S")), overwrite=True)
print(quick_init_out.n_gpu)
with quick_init_out.log_writer.log_context():
jiant_task_container = container_setup.create_jiant_task_container_from_json(
jiant_task_container_config_path=args.jiant_task_container_config_path, verbose=True,
)
runner = setup_runner(
args=args,
jiant_task_container=jiant_task_container,
quick_init_out=quick_init_out,
verbose=True,
)
if is_resumed:
runner.load_state(checkpoint["runner_state"])
del checkpoint["runner_state"]
checkpoint_saver = jiant_runner.CheckpointSaver(
metadata={"args": args.to_dict()},
save_path=os.path.join(args.output_dir, "checkpoint.p"),
)
if args.do_train:
metarunner = jiant_metarunner.JiantMetarunner(
runner=runner,
save_every_steps=args.save_every_steps,
eval_every_steps=args.eval_every_steps,
min_train_steps = args.min_train_steps,
save_checkpoint_every_steps=args.save_checkpoint_every_steps,
no_improvements_for_n_evals=args.no_improvements_for_n_evals,
checkpoint_saver=checkpoint_saver,
output_dir=args.output_dir,
verbose=True,
save_best_model=args.do_save or args.do_save_best,
save_last_model=args.do_save or args.do_save_last,
load_best_model=True,
log_writer=quick_init_out.log_writer,
)
if is_resumed:
metarunner.load_state(checkpoint["metarunner_state"])
del checkpoint["metarunner_state"]
metarunner.run_train_loop()
if args.do_val:
val_results_dict = runner.run_val(
task_name_list=runner.jiant_task_container.task_run_config.val_task_list,
return_preds=args.write_val_preds,
)
jiant_evaluate.write_val_results(
val_results_dict=val_results_dict,
metrics_aggregator=runner.jiant_task_container.metrics_aggregator,
output_dir=args.output_dir,
verbose=True,
)
if args.write_val_preds:
jiant_evaluate.write_preds(
eval_results_dict=val_results_dict,
path=os.path.join(args.output_dir, "val_preds.p"),
)
else:
assert not args.write_val_preds
if args.do_test:#maple
test_results_dict = runner.run_val(
task_name_list=runner.jiant_task_container.task_run_config.test_task_list,
return_preds=False,
phase = "test"
)
jiant_evaluate.write_val_results(
val_results_dict=test_results_dict,
metrics_aggregator=runner.jiant_task_container.metrics_aggregator,
output_dir=args.output_dir,
verbose=True,
result_file = "test_metrics.json"
)
train_results_dict = runner.run_val(
task_name_list=runner.jiant_task_container.task_run_config.test_task_list,
return_preds=False,
phase = "train"
)
jiant_evaluate.write_val_results(
val_results_dict=train_results_dict,
metrics_aggregator=runner.jiant_task_container.metrics_aggregator,
output_dir=args.output_dir,
verbose=True,
result_file = "train_metrics.json"
)
if args.write_test_preds:
test_results_dict = runner.run_test(
task_name_list=runner.jiant_task_container.task_run_config.test_task_list,
)
jiant_evaluate.write_preds(
eval_results_dict=test_results_dict,
path=os.path.join(args.output_dir, "test_preds.p"),
)
if (
not args.keep_checkpoint_when_done
and args.save_checkpoint_every_steps
and os.path.exists(os.path.join(args.output_dir, "checkpoint.p"))
):
os.remove(os.path.join(args.output_dir, "checkpoint.p"))
py_io.write_file("DONE", os.path.join(args.output_dir, "done_file"))
def run_resume(args: ResumeConfiguration):
resume(checkpoint_path=args.checkpoint_path)
def resume(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
args = RunConfiguration.from_dict(checkpoint["metadata"]["args"])
run_loop(args=args, checkpoint=checkpoint)
def run_with_continue(cl_args):
run_args = RunConfiguration.default_run_cli(cl_args=cl_args)
if not run_args.force_overwrite and (
os.path.exists(os.path.join(run_args.output_dir, "done_file"))
or os.path.exists(os.path.join(run_args.output_dir, "val_metrics.json"))
):
print("Already Done")
return
elif run_args.save_checkpoint_every_steps and os.path.exists(
os.path.join(run_args.output_dir, "checkpoint.p")
):
print("Resuming")
resume(os.path.join(run_args.output_dir, "checkpoint.p"))
else:
print("Running from start")
run_loop(args=run_args)
def main():
mode, cl_args = zconf.get_mode_and_cl_args()
if mode == "run":
run_loop(RunConfiguration.default_run_cli(cl_args=cl_args))
elif mode == "continue":
run_resume(ResumeConfiguration.default_run_cli(cl_args=cl_args))
elif mode == "run_with_continue":
run_with_continue(cl_args=cl_args)
else:
raise zconf.ModeLookupError(mode)
if __name__ == "__main__":
main()
| 15,719 | 41.95082 | 200 | py |
DeepSpeed | DeepSpeed-master/setup.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
DeepSpeed library
To build wheel on Windows:
1. Install pytorch, such as pytorch 1.12 + cuda 11.6.
2. Install visual cpp build tool.
3. Include cuda toolkit.
4. Launch cmd console with Administrator privilege for creating required symlink folders.
Create a new wheel via the following command:
build_win.bat
The wheel will be located at: dist/*.whl
"""
import os
import sys
import subprocess
from setuptools import setup, find_packages
from setuptools.command import egg_info
import time
torch_available = True
try:
import torch
except ImportError:
torch_available = False
print('[WARNING] Unable to import torch, pre-compiling ops will be disabled. ' \
'Please visit https://pytorch.org/ to see how to properly install torch on your system.')
from op_builder import get_default_compute_capabilities, OpBuilder
from op_builder.all_ops import ALL_OPS
from op_builder.builder import installed_cuda_version
# Fetch rocm state.
is_rocm_pytorch = OpBuilder.is_rocm_pytorch()
rocm_version = OpBuilder.installed_rocm_version()
RED_START = '\033[31m'
RED_END = '\033[0m'
ERROR = f"{RED_START} [ERROR] {RED_END}"
def abort(msg):
print(f"{ERROR} {msg}")
assert False, msg
def fetch_requirements(path):
with open(path, 'r') as fd:
return [r.strip() for r in fd.readlines()]
install_requires = fetch_requirements('requirements/requirements.txt')
extras_require = {
'1bit': [], # add cupy based on cuda/rocm version
'1bit_mpi': fetch_requirements('requirements/requirements-1bit-mpi.txt'),
'readthedocs': fetch_requirements('requirements/requirements-readthedocs.txt'),
'dev': fetch_requirements('requirements/requirements-dev.txt'),
'autotuning': fetch_requirements('requirements/requirements-autotuning.txt'),
'autotuning_ml': fetch_requirements('requirements/requirements-autotuning-ml.txt'),
'sparse_attn': fetch_requirements('requirements/requirements-sparse_attn.txt'),
'sparse': fetch_requirements('requirements/requirements-sparse_pruning.txt'),
'inf': fetch_requirements('requirements/requirements-inf.txt'),
'sd': fetch_requirements('requirements/requirements-sd.txt'),
'triton': fetch_requirements('requirements/requirements-triton.txt'),
}
# Add specific cupy version to both onebit extension variants.
if torch_available and torch.cuda.is_available():
cupy = None
if is_rocm_pytorch:
rocm_major, rocm_minor = rocm_version
# XXX cupy support for rocm 5 is not available yet.
if rocm_major <= 4:
cupy = f"cupy-rocm-{rocm_major}-{rocm_minor}"
else:
cuda_major_ver, cuda_minor_ver = installed_cuda_version()
if (cuda_major_ver < 11) or ((cuda_major_ver == 11) and (cuda_minor_ver < 3)):
cupy = f"cupy-cuda{cuda_major_ver}{cuda_minor_ver}"
else:
cupy = f"cupy-cuda{cuda_major_ver}x"
if cupy:
extras_require['1bit'].append(cupy)
extras_require['1bit_mpi'].append(cupy)
# Make an [all] extra that installs all needed dependencies.
all_extras = set()
for extra in extras_require.items():
for req in extra[1]:
all_extras.add(req)
extras_require['all'] = list(all_extras)
cmdclass = {}
# For any pre-installed ops force disable ninja.
if torch_available:
from accelerator import get_accelerator
cmdclass['build_ext'] = get_accelerator().build_extension().with_options(use_ninja=False)
if torch_available:
TORCH_MAJOR = torch.__version__.split('.')[0]
TORCH_MINOR = torch.__version__.split('.')[1]
else:
TORCH_MAJOR = "0"
TORCH_MINOR = "0"
if torch_available and not torch.cuda.is_available():
# Fix to allow docker builds, similar to https://github.com/NVIDIA/apex/issues/486.
print("[WARNING] Torch did not find cuda available, if cross-compiling or running with cpu only "
"you can ignore this message. Adding compute capability for Pascal, Volta, and Turing "
"(compute capabilities 6.0, 6.1, 6.2)")
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
os.environ["TORCH_CUDA_ARCH_LIST"] = get_default_compute_capabilities()
ext_modules = []
# Default to pre-install kernels to false so we rely on JIT on Linux, opposite on Windows.
BUILD_OP_PLATFORM = 1 if sys.platform == "win32" else 0
BUILD_OP_DEFAULT = int(os.environ.get('DS_BUILD_OPS', BUILD_OP_PLATFORM))
print(f"DS_BUILD_OPS={BUILD_OP_DEFAULT}")
if BUILD_OP_DEFAULT:
assert torch_available, "Unable to pre-compile ops without torch installed. Please install torch before attempting to pre-compile ops."
def command_exists(cmd):
if sys.platform == "win32":
result = subprocess.Popen(f'{cmd}', stdout=subprocess.PIPE, shell=True)
return result.wait() == 1
else:
result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)
return result.wait() == 0
def op_envvar(op_name):
assert hasattr(ALL_OPS[op_name], 'BUILD_VAR'), \
f"{op_name} is missing BUILD_VAR field"
return ALL_OPS[op_name].BUILD_VAR
def op_enabled(op_name):
env_var = op_envvar(op_name)
return int(os.environ.get(env_var, BUILD_OP_DEFAULT))
compatible_ops = dict.fromkeys(ALL_OPS.keys(), False)
install_ops = dict.fromkeys(ALL_OPS.keys(), False)
for op_name, builder in ALL_OPS.items():
op_compatible = builder.is_compatible()
compatible_ops[op_name] = op_compatible
# If op is requested but not available, throw an error.
if op_enabled(op_name) and not op_compatible:
env_var = op_envvar(op_name)
if env_var not in os.environ:
builder.warning(f"One can disable {op_name} with {env_var}=0")
abort(f"Unable to pre-compile {op_name}")
# If op is compatible but install is not enabled (JIT mode).
if is_rocm_pytorch and op_compatible and not op_enabled(op_name):
builder.hipify_extension()
# If op install enabled, add builder to extensions.
if op_enabled(op_name) and op_compatible:
assert torch_available, f"Unable to pre-compile {op_name}, please first install torch"
install_ops[op_name] = op_enabled(op_name)
ext_modules.append(builder.builder())
print(f'Install Ops={install_ops}')
# Write out version/git info.
git_hash_cmd = "git rev-parse --short HEAD"
git_branch_cmd = "git rev-parse --abbrev-ref HEAD"
if command_exists('git') and 'DS_BUILD_STRING' not in os.environ:
try:
result = subprocess.check_output(git_hash_cmd, shell=True)
git_hash = result.decode('utf-8').strip()
result = subprocess.check_output(git_branch_cmd, shell=True)
git_branch = result.decode('utf-8').strip()
except subprocess.CalledProcessError:
git_hash = "unknown"
git_branch = "unknown"
else:
git_hash = "unknown"
git_branch = "unknown"
def create_dir_symlink(src, dest):
if not os.path.islink(dest):
if os.path.exists(dest):
os.remove(dest)
assert not os.path.exists(dest)
os.symlink(src, dest)
if sys.platform == "win32":
# This creates a symbolic links on Windows.
# It needs Administrator privilege to create symlinks on Windows.
create_dir_symlink('..\\..\\csrc', '.\\deepspeed\\ops\\csrc')
create_dir_symlink('..\\..\\op_builder', '.\\deepspeed\\ops\\op_builder')
create_dir_symlink('..\\accelerator', '.\\deepspeed\\accelerator')
egg_info.manifest_maker.template = 'MANIFEST_win.in'
# Parse the DeepSpeed version string from version.txt.
version_str = open('version.txt', 'r').read().strip()
# Build specifiers like .devX can be added at install time. Otherwise, add the git hash.
# Example: DS_BUILD_STRING=".dev20201022" python setup.py sdist bdist_wheel.
# Building wheel for distribution, update version file.
if 'DS_BUILD_STRING' in os.environ:
# Build string env specified, probably building for distribution.
with open('build.txt', 'w') as fd:
fd.write(os.environ.get('DS_BUILD_STRING'))
version_str += os.environ.get('DS_BUILD_STRING')
elif os.path.isfile('build.txt'):
# build.txt exists, probably installing from distribution.
with open('build.txt', 'r') as fd:
version_str += fd.read().strip()
else:
# None of the above, probably installing from source.
version_str += f'+{git_hash}'
torch_version = ".".join([TORCH_MAJOR, TORCH_MINOR])
bf16_support = False
# Set cuda_version to 0.0 if cpu-only.
cuda_version = "0.0"
nccl_version = "0.0"
# Set hip_version to 0.0 if cpu-only.
hip_version = "0.0"
if torch_available and torch.version.cuda is not None:
cuda_version = ".".join(torch.version.cuda.split('.')[:2])
if sys.platform != "win32":
if isinstance(torch.cuda.nccl.version(), int):
# This will break if minor version > 9.
nccl_version = ".".join(str(torch.cuda.nccl.version())[:2])
else:
nccl_version = ".".join(map(str, torch.cuda.nccl.version()[:2]))
if hasattr(torch.cuda, 'is_bf16_supported') and torch.cuda.is_available():
bf16_support = torch.cuda.is_bf16_supported()
if torch_available and hasattr(torch.version, 'hip') and torch.version.hip is not None:
hip_version = ".".join(torch.version.hip.split('.')[:2])
torch_info = {
"version": torch_version,
"bf16_support": bf16_support,
"cuda_version": cuda_version,
"nccl_version": nccl_version,
"hip_version": hip_version
}
print(f"version={version_str}, git_hash={git_hash}, git_branch={git_branch}")
with open('deepspeed/git_version_info_installed.py', 'w') as fd:
fd.write(f"version='{version_str}'\n")
fd.write(f"git_hash='{git_hash}'\n")
fd.write(f"git_branch='{git_branch}'\n")
fd.write(f"installed_ops={install_ops}\n")
fd.write(f"compatible_ops={compatible_ops}\n")
fd.write(f"torch_info={torch_info}\n")
print(f'install_requires={install_requires}')
print(f'compatible_ops={compatible_ops}')
print(f'ext_modules={ext_modules}')
# Parse README.md to make long_description for PyPI page.
thisdir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(thisdir, 'README.md'), encoding='utf-8') as fin:
readme_text = fin.read()
start_time = time.time()
setup(name='deepspeed',
version=version_str,
description='DeepSpeed library',
long_description=readme_text,
long_description_content_type='text/markdown',
author='DeepSpeed Team',
author_email='deepspeed-info@microsoft.com',
url='http://deepspeed.ai',
project_urls={
'Documentation': 'https://deepspeed.readthedocs.io',
'Source': 'https://github.com/microsoft/DeepSpeed',
},
install_requires=install_requires,
extras_require=extras_require,
packages=find_packages(include=['deepspeed', 'deepspeed.*']),
include_package_data=True,
scripts=[
'bin/deepspeed', 'bin/deepspeed.pt', 'bin/ds', 'bin/ds_ssh', 'bin/ds_report', 'bin/ds_bench', 'bin/dsr',
'bin/ds_elastic'
],
classifiers=[
'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10'
],
license='Apache Software License 2.0',
ext_modules=ext_modules,
cmdclass=cmdclass)
end_time = time.time()
print(f'deepspeed build time = {end_time - start_time} secs')
| 11,499 | 36.216828 | 139 | py |
DeepSpeed | DeepSpeed-master/deepspeed/env_report.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
import subprocess
import argparse
from .ops.op_builder.all_ops import ALL_OPS
from .git_version_info import installed_ops, torch_info
from deepspeed.accelerator import get_accelerator
GREEN = '\033[92m'
RED = '\033[91m'
YELLOW = '\033[93m'
END = '\033[0m'
SUCCESS = f"{GREEN} [SUCCESS] {END}"
OKAY = f"{GREEN}[OKAY]{END}"
WARNING = f"{YELLOW}[WARNING]{END}"
FAIL = f'{RED}[FAIL]{END}'
INFO = '[INFO]'
color_len = len(GREEN) + len(END)
okay = f"{GREEN}[OKAY]{END}"
warning = f"{YELLOW}[WARNING]{END}"
def op_report(verbose=True):
max_dots = 23
max_dots2 = 11
h = ["op name", "installed", "compatible"]
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
print("DeepSpeed C++/CUDA extension op report")
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
print("NOTE: Ops not installed will be just-in-time (JIT) compiled at\n"
" runtime if needed. Op compatibility means that your system\n"
" meet the required dependencies to JIT install the op.")
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
print("JIT compiled ops requires ninja")
ninja_status = OKAY if ninja_installed() else FAIL
print('ninja', "." * (max_dots - 5), ninja_status)
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
print(h[0], "." * (max_dots - len(h[0])), h[1], "." * (max_dots2 - len(h[1])), h[2])
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
installed = f"{GREEN}[YES]{END}"
no = f"{YELLOW}[NO]{END}"
for op_name, builder in ALL_OPS.items():
dots = "." * (max_dots - len(op_name))
is_compatible = OKAY if builder.is_compatible(verbose) else no
is_installed = installed if installed_ops.get(op_name, False) else no
dots2 = '.' * ((len(h[1]) + (max_dots2 - len(h[1]))) - (len(is_installed) - color_len))
print(op_name, dots, is_installed, dots2, is_compatible)
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
def ninja_installed():
try:
import ninja # noqa: F401
except ImportError:
return False
return True
def nvcc_version():
import torch.utils.cpp_extension
cuda_home = torch.utils.cpp_extension.CUDA_HOME
if cuda_home is None:
return f"{RED} [FAIL] cannot find CUDA_HOME via torch.utils.cpp_extension.CUDA_HOME={torch.utils.cpp_extension.CUDA_HOME} {END}"
try:
output = subprocess.check_output([cuda_home + "/bin/nvcc", "-V"], universal_newlines=True)
except FileNotFoundError:
return f"{RED} [FAIL] nvcc missing {END}"
output_split = output.split()
release_idx = output_split.index("release")
release = output_split[release_idx + 1].replace(',', '').split(".")
return ".".join(release)
def debug_report():
max_dots = 33
report = [("torch install path", torch.__path__), ("torch version", torch.__version__),
("deepspeed install path", deepspeed.__path__),
("deepspeed info", f"{deepspeed.__version__}, {deepspeed.__git_hash__}, {deepspeed.__git_branch__}")]
if get_accelerator().device_name() == 'cuda':
hip_version = getattr(torch.version, "hip", None)
report.extend([("torch cuda version", torch.version.cuda), ("torch hip version", hip_version),
("nvcc version", (None if hip_version else nvcc_version())),
("deepspeed wheel compiled w.", f"torch {torch_info['version']}, " +
(f"hip {torch_info['hip_version']}" if hip_version else f"cuda {torch_info['cuda_version']}"))
])
else:
report.extend([("deepspeed wheel compiled w.", f"torch {torch_info['version']} ")])
print("DeepSpeed general environment info:")
for name, value in report:
print(name, "." * (max_dots - len(name)), value)
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--hide_operator_status',
action='store_true',
help='Suppress display of installation and compatibility statuses of DeepSpeed operators. ')
parser.add_argument('--hide_errors_and_warnings', action='store_true', help='Suppress warning and error messages.')
args = parser.parse_args()
return args
def main(hide_operator_status=False, hide_errors_and_warnings=False):
if not hide_operator_status:
op_report(verbose=not hide_errors_and_warnings)
debug_report()
def cli_main():
args = parse_arguments()
main(hide_operator_status=args.hide_operator_status, hide_errors_and_warnings=args.hide_errors_and_warnings)
if __name__ == "__main__":
main()
| 4,804 | 37.134921 | 136 | py |
DeepSpeed | DeepSpeed-master/deepspeed/git_version_info.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
try:
# This is populated by setup.py
from .git_version_info_installed import * # noqa: F401
except ModuleNotFoundError:
import os
if os.path.isfile('version.txt'):
# Will be missing from checkouts that haven't been installed (e.g., readthedocs)
version = open('version.txt', 'r').read().strip()
else:
version = "0.0.0"
git_hash = '[none]'
git_branch = '[none]'
from .ops.op_builder.all_ops import ALL_OPS
installed_ops = dict.fromkeys(ALL_OPS.keys(), False)
compatible_ops = dict.fromkeys(ALL_OPS.keys(), False)
torch_info = {'version': "0.0", "cuda_version": "0.0", "hip_version": "0.0"}
| 756 | 31.913043 | 88 | py |
DeepSpeed | DeepSpeed-master/deepspeed/__init__.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import sys
import types
import json
from typing import Optional, Union
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from packaging import version as pkg_version
try:
import triton # noqa: F401
HAS_TRITON = True
except ImportError:
HAS_TRITON = False
from . import ops
from . import module_inject
from .accelerator import get_accelerator
from .runtime.engine import DeepSpeedEngine, DeepSpeedOptimizerCallable, DeepSpeedSchedulerCallable
from .runtime.engine import ADAM_OPTIMIZER, LAMB_OPTIMIZER
from .runtime.hybrid_engine import DeepSpeedHybridEngine
from .runtime.pipe.engine import PipelineEngine
from .inference.engine import InferenceEngine
from .inference.config import DeepSpeedInferenceConfig
from .runtime.lr_schedules import add_tuning_arguments
from .runtime.config import DeepSpeedConfig, DeepSpeedConfigError
from .runtime.activation_checkpointing import checkpointing
from .ops.transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
from .module_inject import replace_transformer_layer, revert_transformer_layer
from .utils import log_dist, OnDevice, logger
from .comm.comm import init_distributed
from .runtime import zero
from .runtime import DeepSpeedOptimizer, ZeROOptimizer
from .pipe import PipelineModule
from .git_version_info import version, git_hash, git_branch
def _parse_version(version_str):
'''Parse a version string and extract the major, minor, and patch versions.'''
ver = pkg_version.parse(version_str)
return ver.major, ver.minor, ver.micro
# Export version information
__version__ = version
__version_major__, __version_minor__, __version_patch__ = _parse_version(__version__)
__git_hash__ = git_hash
__git_branch__ = git_branch
# Set to torch's distributed package or deepspeed.comm based inside DeepSpeedEngine init
dist = None
def initialize(args=None,
model: torch.nn.Module = None,
optimizer: Optional[Union[Optimizer, DeepSpeedOptimizerCallable]] = None,
model_parameters: Optional[torch.nn.Module] = None,
training_data: Optional[torch.utils.data.Dataset] = None,
lr_scheduler: Optional[Union[_LRScheduler, DeepSpeedSchedulerCallable]] = None,
mpu=None,
dist_init_required: Optional[bool] = None,
collate_fn=None,
config=None,
config_params=None):
"""Initialize the DeepSpeed Engine.
Arguments:
args: an object containing local_rank and deepspeed_config fields.
This is optional if `config` is passed.
model: Required: nn.module class before apply any wrappers
optimizer: Optional: a user defined Optimizer or Callable that returns an Optimizer object.
This overrides any optimizer definition in the DeepSpeed json config.
model_parameters: Optional: An iterable of torch.Tensors or dicts.
Specifies what Tensors should be optimized.
training_data: Optional: Dataset of type torch.utils.data.Dataset
lr_scheduler: Optional: Learning Rate Scheduler Object or a Callable that takes an Optimizer and returns a Scheduler object.
The scheduler object should define a get_lr(), step(), state_dict(), and load_state_dict() methods
mpu: Optional: A model parallelism unit object that implements
get_{model,data}_parallel_{rank,group,world_size}()
dist_init_required: Optional: None will auto-initialize torch distributed if needed,
otherwise the user can force it to be initialized or not via boolean.
collate_fn: Optional: Merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset.
config: Optional: Instead of requiring args.deepspeed_config you can pass your deepspeed config
as an argument instead, as a path or a dictionary.
config_params: Optional: Same as `config`, kept for backwards compatibility.
Returns:
A tuple of ``engine``, ``optimizer``, ``training_dataloader``, ``lr_scheduler``
* ``engine``: DeepSpeed runtime engine which wraps the client model for distributed training.
* ``optimizer``: Wrapped optimizer if a user defined ``optimizer`` is supplied, or if
optimizer is specified in json config else ``None``.
* ``training_dataloader``: DeepSpeed dataloader if ``training_data`` was supplied,
otherwise ``None``.
* ``lr_scheduler``: Wrapped lr scheduler if user ``lr_scheduler`` is passed, or
if ``lr_scheduler`` specified in JSON configuration. Otherwise ``None``.
"""
log_dist("DeepSpeed info: version={}, git-hash={}, git-branch={}".format(__version__, __git_hash__,
__git_branch__),
ranks=[0])
# Disable zero.Init context if it's currently enabled
zero.partition_parameters.shutdown_init_context()
assert model is not None, "deepspeed.initialize requires a model"
global dist
from deepspeed import comm as dist
dist_backend = get_accelerator().communication_backend_name()
dist.init_distributed(dist_backend=dist_backend, dist_init_required=dist_init_required)
# Set config using config_params for backwards compat
if config is None and config_params is not None:
config = config_params
# Check for deepscale_config for backwards compat
if hasattr(args, "deepscale_config") and args.deepscale_config is not None:
logger.warning("************ --deepscale_config is deprecated, please use --deepspeed_config ************")
if hasattr(args, "deepspeed_config"):
assert (args.deepspeed_config is
None), "Not sure how to proceed, we were given both a deepscale_config and deepspeed_config"
args.deepspeed_config = args.deepscale_config
args.deepscale_config = None
# Check that we have only one config passed
if hasattr(args, "deepspeed_config") and args.deepspeed_config is not None:
assert config is None, "Not sure how to proceed, we were given deepspeed configs in the deepspeed arguments and deepspeed.initialize() function call"
config = args.deepspeed_config
assert config is not None, "DeepSpeed requires --deepspeed_config to specify configuration file"
if not isinstance(model, PipelineModule):
config_class = DeepSpeedConfig(config, mpu)
if config_class.hybrid_engine.enabled:
engine = DeepSpeedHybridEngine(args=args,
model=model,
optimizer=optimizer,
model_parameters=model_parameters,
training_data=training_data,
lr_scheduler=lr_scheduler,
mpu=mpu,
dist_init_required=dist_init_required,
collate_fn=collate_fn,
config=config,
config_class=config_class)
else:
engine = DeepSpeedEngine(args=args,
model=model,
optimizer=optimizer,
model_parameters=model_parameters,
training_data=training_data,
lr_scheduler=lr_scheduler,
mpu=mpu,
dist_init_required=dist_init_required,
collate_fn=collate_fn,
config=config,
config_class=config_class)
else:
assert mpu is None, "mpu must be None with pipeline parallelism"
mpu = model.mpu()
config_class = DeepSpeedConfig(config, mpu)
engine = PipelineEngine(args=args,
model=model,
optimizer=optimizer,
model_parameters=model_parameters,
training_data=training_data,
lr_scheduler=lr_scheduler,
mpu=mpu,
dist_init_required=dist_init_required,
collate_fn=collate_fn,
config=config,
config_class=config_class)
# Restore zero.Init context if necessary
zero.partition_parameters.restore_init_context()
return_items = [engine, engine.optimizer, engine.training_dataloader, engine.lr_scheduler]
return tuple(return_items)
def _add_core_arguments(parser):
r"""Helper (internal) function to update an argument parser with an argument group of the core DeepSpeed arguments.
The core set of DeepSpeed arguments include the following:
1) --deepspeed: boolean flag to enable DeepSpeed
2) --deepspeed_config <json file path>: path of a json configuration file to configure DeepSpeed runtime.
This is a helper function to the public add_config_arguments()
Arguments:
parser: argument parser
Return:
parser: Updated Parser
"""
group = parser.add_argument_group('DeepSpeed', 'DeepSpeed configurations')
group.add_argument('--deepspeed',
default=False,
action='store_true',
help='Enable DeepSpeed (helper flag for user code, no impact on DeepSpeed backend)')
group.add_argument('--deepspeed_config', default=None, type=str, help='DeepSpeed json configuration file.')
group.add_argument('--deepscale',
default=False,
action='store_true',
help='Deprecated enable DeepSpeed (helper flag for user code, no impact on DeepSpeed backend)')
group.add_argument('--deepscale_config',
default=None,
type=str,
help='Deprecated DeepSpeed json configuration file.')
group.add_argument('--deepspeed_mpi',
default=False,
action='store_true',
help="Run via MPI, this will attempt to discover the necessary variables to initialize torch "
"distributed from the MPI environment")
return parser
def add_config_arguments(parser):
r"""Update the argument parser to enabling parsing of DeepSpeed command line arguments.
The set of DeepSpeed arguments include the following:
1) --deepspeed: boolean flag to enable DeepSpeed
2) --deepspeed_config <json file path>: path of a json configuration file to configure DeepSpeed runtime.
Arguments:
parser: argument parser
Return:
parser: Updated Parser
"""
parser = _add_core_arguments(parser)
return parser
def default_inference_config():
"""
Return a default DeepSpeed inference configuration dictionary.
"""
return DeepSpeedInferenceConfig().dict()
def init_inference(model, config=None, **kwargs):
"""Initialize the DeepSpeed InferenceEngine.
Description: all four cases are valid and supported in DS init_inference() API.
# Case 1: user provides no config and no kwargs. Default config will be used.
.. code-block:: python
generator.model = deepspeed.init_inference(generator.model)
string = generator("DeepSpeed is")
print(string)
# Case 2: user provides a config and no kwargs. User supplied config will be used.
.. code-block:: python
generator.model = deepspeed.init_inference(generator.model, config=config)
string = generator("DeepSpeed is")
print(string)
# Case 3: user provides no config and uses keyword arguments (kwargs) only.
.. code-block:: python
generator.model = deepspeed.init_inference(generator.model,
mp_size=world_size,
dtype=torch.half,
replace_with_kernel_inject=True)
string = generator("DeepSpeed is")
print(string)
# Case 4: user provides config and keyword arguments (kwargs). Both config and kwargs are merged and kwargs take precedence.
.. code-block:: python
generator.model = deepspeed.init_inference(generator.model, config={"dtype": torch.half}, replace_with_kernel_inject=True)
string = generator("DeepSpeed is")
print(string)
Arguments:
model: Required: original nn.module object without any wrappers
config: Optional: instead of arguments, you can pass in a DS inference config dict or path to JSON file
Returns:
A deepspeed.InferenceEngine wrapped model.
"""
log_dist("DeepSpeed info: version={}, git-hash={}, git-branch={}".format(__version__, __git_hash__,
__git_branch__),
ranks=[0])
# Load config_dict from config first
if config is None:
config = {}
if isinstance(config, str):
with open(config, "r") as f:
config_dict = json.load(f)
elif isinstance(config, dict):
config_dict = config
else:
raise ValueError(f"'config' argument expected string or dictionary, got {type(config)}")
# Update with values from kwargs, ensuring no conflicting overlap between config and kwargs
overlap_keys = set(config_dict.keys()).intersection(kwargs.keys())
# If there is overlap, error out if values are different
for key in overlap_keys:
if config_dict[key] != kwargs[key]:
raise ValueError(f"Conflicting argument '{key}' in 'config':{config_dict[key]} and kwargs:{kwargs[key]}")
config_dict.update(kwargs)
ds_inference_config = DeepSpeedInferenceConfig(**config_dict)
engine = InferenceEngine(model, config=ds_inference_config)
return engine
| 14,510 | 41.06087 | 157 | py |
DeepSpeed | DeepSpeed-master/deepspeed/checkpoint/zero_checkpoint.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from .constants import (BASE_OPTIMIZER_STATE, GROUP_PADDINGS, OPTIMIZER_STATE_DICT, PARTITION_COUNT)
from .reshape_utils import (basic_folder_validation, get_zero_files, merge_state)
from .reshape_3d_utils import (model_3d_desc, get_model_3d_descriptor)
GROUP_STATE_KEY = 'state'
class ZeROCheckpoint(object):
def __init__(self, dir):
basic_folder_validation(dir)
self.dir = dir
self.file_list = get_zero_files(dir)
self.num_files = len(self.file_list)
assert self.num_files > 0, f'No ZeRO files found in {dir}'
self.src_3d = get_model_3d_descriptor(dir)
self.target_3d = model_3d_desc(pp_degree=self.src_3d.pp_degree,
tp_degree=self.src_3d.tp_degree,
dp_degree=self.src_3d.dp_degree)
self._3d_file_map = self.src_3d.reshape(self.target_3d)
def get_src_world_size(self):
return self.src_3d.world_size()
def get_src_tp_degree(self):
return self.src_3d.tp_degree
def get_src_pp_degree(self):
return self.src_3d.pp_degree
def get_src_dp_degree(self):
return self.src_3d.dp_degree
def get_file_indices_for_rank(self, pp_index, tp_index, dp_index):
assert dp_index < len(self._3d_file_map), f'DP index {dp_index} >= DP degree {len(self._3d_file_map)}'
dp_2d_map = self._3d_file_map[dp_index]
return dp_2d_map.get_data(pp_index, tp_index)
def get_files_for_rank(self, pp_index, tp_index, dp_index):
file_idx_list = self.get_file_indices_for_rank(pp_index, tp_index, dp_index)
return [self.file_list[idx] for idx in file_idx_list]
def get_state_for_rank(self, pp_index, tp_index, dp_index, keys_to_ignore=[], strip_tensor_paddings=True):
state_file_list = self.get_files_for_rank(pp_index, tp_index, dp_index)
merged_sd = None
for state_file in state_file_list:
sd = torch.load(state_file, map_location=torch.device('cpu'))
for key in keys_to_ignore:
sd.pop(key, None)
if strip_tensor_paddings:
self._strip_tensor_paddings(sd)
if merged_sd is None:
merged_sd = sd
else:
merged_sd = merge_state(merged_sd, sd)
self._update_partition_count(merged_sd)
if strip_tensor_paddings:
self._clear_group_paddings(merged_sd)
return merged_sd
def print_3d_index_map(self, tag=None):
if tag:
print(f'3D index map: {tag}')
for dp_index, _2d_map in enumerate(self._3d_file_map):
_2d_map.print_data(f'dp = {dp_index}')
def print_3d_file_map(self, tag=None):
if tag:
print(f'3D file map: {tag}')
for dp_index, _2d_map in enumerate(self._3d_file_map):
for pp_index in _2d_map.pp_degree:
for tp_index in _2d_map.tp_degree:
file_index_list = _2d_map.get_data(pp_index, tp_index)
file_list = [self.file_list[idx] for idx in file_index_list]
print(f'{pp_index}, {tp_index}, {dp_index} => {file_list}')
def reshape(self, target_3d_desc: model_3d_desc):
self.target_3d = target_3d_desc
self._3d_file_map = self.src_3d.reshape(self.target_3d)
def _strip_tensor_paddings(self, sd):
param_group_states = self._get_param_group_states(sd)
if param_group_states is None:
return
group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS)
if group_paddings is None:
return
for key, group_state in param_group_states.items():
if group_paddings[key] == 0:
continue
for state_name, state_value in group_state.items():
if torch.is_tensor(state_value):
raw_length = state_value.numel() - group_paddings[key]
group_state[state_name] = torch.narrow(state_value, 0, 0, raw_length).clone()
def _clear_group_paddings(self, sd):
group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS)
if group_paddings:
num_groups = len(group_paddings)
sd[OPTIMIZER_STATE_DICT][GROUP_PADDINGS] = [0] * num_groups
def _get_optimizer_state(self, sd, state_key):
optimizer_state = sd.get(OPTIMIZER_STATE_DICT, None)
if optimizer_state is None:
return None
return optimizer_state.get(state_key, None)
def _get_param_group_states(self, sd):
optimizer_state = sd.get(OPTIMIZER_STATE_DICT, None)
if optimizer_state is None:
return None
base_optimizer_state = optimizer_state.get(BASE_OPTIMIZER_STATE, None)
if base_optimizer_state is None:
return None
return base_optimizer_state.get(GROUP_STATE_KEY, None)
def _update_partition_count(self, sd):
partition_counts = self._get_optimizer_state(sd, PARTITION_COUNT)
if partition_counts:
num_groups = len(partition_counts)
sd[OPTIMIZER_STATE_DICT][PARTITION_COUNT] = [self.target_3d.dp_degree] * num_groups
| 5,316 | 36.70922 | 110 | py |
DeepSpeed | DeepSpeed-master/deepspeed/checkpoint/utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
from .constants import (MODEL_FILE_PREFIX, MODEL_FILE_SUFFIX, OPTIM_FILE_SUFFIX, ZERO_FILE_PREFIX)
def get_model_ckpt_name_for_rank(base_folder, mp_rank_str):
ckpt_name = os.path.join(
base_folder,
MODEL_FILE_PREFIX + mp_rank_str + MODEL_FILE_SUFFIX,
)
return ckpt_name
def get_zero_ckpt_name_for_rank(base_folder, dp_rank, mp_rank):
zero_prefix = f'{ZERO_FILE_PREFIX}{dp_rank}'
mp_rank_string = f'_{MODEL_FILE_PREFIX}{mp_rank:02d}'
zero_ckpt_name = os.path.join(
base_folder,
zero_prefix + mp_rank_string + OPTIM_FILE_SUFFIX,
)
return zero_ckpt_name
def get_layer_ckpt_name_for_rank(base_folder, layer_id, tp_rank):
ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}'
ckpt_path = os.path.join(base_folder, ckpt_file)
return ckpt_path
# We pass cloned tensors to torch.save() to avoid checkpoint bloat that occurs when torch.save()
# saves the underlying storage rather than the slice of the storage corresponding to individual tensors.
# This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers.
# Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size.
# It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat.
# See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing
def clone_tensors_for_torch_save(item, device=torch.device('cpu')):
"""
Returns a copy of ``item`` with all enclosed tensors replaced by clones on a specified device.
Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.
Parameters:
- ``item``: tensor to clone or (possibly nested) container of tensors to clone.
- ``device``: target device (defaults to 'cpu')
Returns:
- copy of ``item`` with cloned tensors on target device
"""
if torch.is_tensor(item):
return item.detach().clone().to(device)
elif isinstance(item, list):
return [clone_tensors_for_torch_save(v, device) for v in item]
elif isinstance(item, tuple):
return tuple([clone_tensors_for_torch_save(v, device) for v in item])
elif isinstance(item, dict):
return type(item)({k: clone_tensors_for_torch_save(v, device) for k, v in item.items()})
else:
return item
| 2,534 | 39.238095 | 111 | py |
DeepSpeed | DeepSpeed-master/deepspeed/checkpoint/reshape_utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
from collections import OrderedDict
from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX)
def basic_folder_validation(dir):
assert os.path.exists(dir), f'{dir} path does not exist'
assert os.path.isdir(dir), f'{dir} is not a folder'
def get_files_with_prefix(all_files, prefix):
file_list = []
for file_path in all_files:
_, fname = os.path.split(file_path)
if fname.startswith(prefix):
file_list.append(file_path)
return sorted(file_list)
def validate_files(file_list):
for file in file_list:
if not os.path.isfile(file):
print(f'Error: {file} is not existent')
def get_files(dir):
file_list = []
for root, _, files in os.walk(dir):
for file in files:
file_list.append(os.path.join(root, file))
return file_list
def get_zero_files(dir):
file_list = get_files(dir)
for prefix in [ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX]:
zero_files = get_files_with_prefix(file_list, prefix)
if len(zero_files) > 0:
return zero_files
return []
def partition_data(data_list, num_partitions):
num_elems = len(data_list)
assert num_elems % num_partitions == 0
partition_size = num_elems // num_partitions
partitions_list = [data_list[i:i + partition_size] for i in range(0, num_elems, partition_size)]
return partitions_list
def _key_list_to_string(key_list):
return '.'.join(key_list)
def merge_state_dict(dict_a, dict_b, key_list):
merged_dict = type(dict_a)({})
for key, value in dict_b.items():
if key in dict_a.keys():
merged_dict[key] = merge_state(dict_a[key], dict_b[key], [str(key)])
else:
merged_dict[key] = value
return merged_dict
def merge_state_list(list_a, list_b, key_list):
if len(list_a) != len(list_b):
print(f'{_key_list_to_string(key_list)}')
raise ValueError(f'Cannot merge lists of different lengths, a = {len(list_a)} b = {len(list_b)}')
return [merge_state(a, b, key_list) for a, b in zip(list_a, list_b)]
def merge_state(state_a, state_b, key_list=[]):
if type(state_a) != type(state_b):
key_list_string = _key_list_to_string(key_list)
print(f'key_list = {key_list_string}')
raise ValueError(f'Cannot merge two states of types {type(state_a)} and type {type(state_b)}')
if type(state_a) in (dict, OrderedDict):
return merge_state_dict(state_a, state_b, key_list)
elif type(state_a) in (list, tuple):
return type(state_a)(merge_state_list(state_a, state_b, key_list))
elif torch.is_tensor(state_a):
return torch.cat([state_a, state_b], 0)
else:
return state_a
| 2,888 | 28.783505 | 105 | py |
DeepSpeed | DeepSpeed-master/deepspeed/checkpoint/universal_checkpoint.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import types
from .constants import (FP32_WEIGHT_KEY, PARAM, VOCAB_DIVISIBILITY_PADDING_TENSOR, CAT_DIM)
def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size):
hp_mapping = self._hp_mapping
optim_state_keys = hp_mapping.get_optim_state_keys()
hp_keys = [FP32_WEIGHT_KEY] + optim_state_keys
checkpoint_files = {key: os.path.join(folder, f"{key}.pt") for key in hp_keys}
for file in checkpoint_files.values():
assert os.path.isfile(file), f'{file} is not a valid file'
for key in hp_keys:
ckpt_file = checkpoint_files[key]
ckpt_dict = torch.load(ckpt_file)
full_hp_param = ckpt_dict[PARAM]
# need to deal with slices that were averaged.
# the opposite of averaging here becomes an exact copy of the first slice
# I thought of 2 ways:
# implementation a. find a way for a client to pass a dict with patterns
# if any(re.search(pattern, folder) for pattern in WEIGHTS_TO_AVERAGE_PATTERNS):
# tp_rank = 0
# tp_world_size = 1
# the other approach is to assume that the saved data is correct and if full_hp_param.shape ==
# self.shape that means we automatically copy?
# implementation b.
# this version requires no additional data passed from the client
# if the shapes already match it must be slices that were averaged - so we just hack around those
if full_hp_param.shape == self.shape:
tp_rank = 0
tp_world_size = 1
# special case for word_embeddings weights which get padded differently depending on TP degree.
# the converter to universal currently strips the original padding completely so the saved
# weight is padding-free and we just need to add new padding depending on the target TP
# degree
vocab_divisibility_padding_tensor = ckpt_dict.get(VOCAB_DIVISIBILITY_PADDING_TENSOR, None)
if vocab_divisibility_padding_tensor is not None:
# In the absence of data passed from the user wrt new padded vocab specific to tp degree
# we can again derive that data by reverse engineering the target shapes like so:
padded_target_vocab_size = self.shape[0] * tp_world_size
if padded_target_vocab_size > full_hp_param.shape[0]:
# Need to expand
padding_size = padded_target_vocab_size - full_hp_param.shape[0]
# Implement the following concat in efficient way using pad
#full_hp_param = torch.cat((full_hp_param, padding_tensor), 0)
full_hp_param = torch.nn.functional.pad(full_hp_param, (0, 0, 0, padding_size), "constant", 0)
full_hp_param[:-padding_size, :] = vocab_divisibility_padding_tensor
else:
# Need to shrink or keep the same
full_hp_param = full_hp_param[:padded_target_vocab_size, :]
full_param_numel = full_hp_param.numel()
tp_slice_numel = self.numel()
# if key == FP32_WEIGHT_KEY and 'word_embeddings.weight' in folder:
# print_rank_0(f'{full_hp_param[:10]=}', force=True)
assert full_param_numel == tp_world_size * tp_slice_numel, \
f'Loading {ckpt_file} full param numel {full_param_numel} != tensor slice numel {tp_slice_numel} * tp_world_size {tp_world_size}'
dst_tensor = hp_mapping.hp_fragment if key == FP32_WEIGHT_KEY else hp_mapping.get_optim_state_fragment(key)
# print(f"{full_hp_param.shape=} {full_param_numel=} {folder=}")
# print(f"{dst_tensor.shape=} {dst_tensor.numel()=}{folder=}")
# since when we do many to 1 on tp we cat sometimes on dim=0 and other times on dim=1 we have to do exactly the same in reverse
chunk_dim = ckpt_dict.get(CAT_DIM, 0)
# this performs the opposite of cat when merging TP slices
tp_hp_slice = full_hp_param.chunk(tp_world_size, chunk_dim)[tp_rank]
tp_hp_slice = tp_hp_slice.flatten()
lp_frag_address = hp_mapping.lp_fragment_address
tp_hp_fragment = tp_hp_slice.narrow(0, lp_frag_address.start, lp_frag_address.numel)
assert dst_tensor.numel() == lp_frag_address.numel, \
f'Load checkpoint {key} dst_tensor numel {dst_tensor.numel()} != src numel {lp_frag_address.numel}'
# print(f"{key} SHAPE: {tp_hp_slice.shape=}")
# print(f"{key} SHAPE: {dst_tensor.shape=}")
# print(f"{key} SHAPE: {tp_hp_fragment.shape=}")
dst_tensor.data.copy_(tp_hp_fragment.data)
def enable_universal_checkpoint(param_list):
for param in param_list:
param.load_hp_checkpoint_state = types.MethodType(load_hp_checkpoint_state, param)
| 4,888 | 49.927083 | 141 | py |
DeepSpeed | DeepSpeed-master/deepspeed/checkpoint/deepspeed_checkpoint.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from typing import Dict
import torch
from .reshape_3d_utils import model_3d_desc
from .reshape_utils import (basic_folder_validation, merge_state, partition_data, get_files, get_files_with_prefix)
from .constants import (MODEL_FILE_PREFIX, LAYER_FILE_PREFIX)
from .reshape_meg_2d import reshape_meg_2d_parallel, meg_2d_parallel_map
from .zero_checkpoint import ZeROCheckpoint
from .constants import *
EMBEDDING_LAYER_INDEX = 0
FINAL_LAYER_NORM_INDEX = -1
ARGS_KEY = 'args'
CHECKPOINT_INFO_KEY = 'checkpoint_info'
ITERATION_KEY = 'iteration'
SEQUENTIAL_LAYERS = [
'input_layernorm.weight', 'input_layernorm.bias', 'self_attention.dense.bias', 'post_attention_layernorm.weight',
'post_attention_layernorm.bias', 'mlp.dense_4h_to_h.bias', 'position_embeddings.weight'
]
LAYER_CONCAT_DIM = {'self_attention.dense.weight': 1, 'mlp.dense_4h_to_h.weight': 1}
class DeepSpeedCheckpoint(object):
def __init__(self, dir, tp_degree=None, pp_degree=None, dp_degree=None):
self.dir = dir
self._validate_folder(dir)
self.zero_checkpoint = ZeROCheckpoint(dir)
self.file_list = get_files(dir)
self.layer_files = get_files_with_prefix(self.file_list, LAYER_FILE_PREFIX)
self.mp_rank_files = get_files_with_prefix(self.file_list, MODEL_FILE_PREFIX)
self.layer_keys = self._get_layer_keys()
self.layer_count = len(self.layer_keys)
self.tp_degree = self.zero_checkpoint.get_src_tp_degree() if tp_degree is None else tp_degree
self.pp_degree = self.zero_checkpoint.get_src_pp_degree() if pp_degree is None else pp_degree
self.dp_degree = self.zero_checkpoint.get_src_dp_degree() if dp_degree is None else dp_degree
self.original_world_size = self.zero_checkpoint.get_src_tp_degree() * self.zero_checkpoint.get_src_pp_degree(
) * self.zero_checkpoint.get_src_dp_degree()
self.world_size = self.tp_degree * self.pp_degree * self.dp_degree
self.old_2d_map = meg_2d_parallel_map(self.zero_checkpoint.get_src_pp_degree(),
self.zero_checkpoint.get_src_tp_degree())
self.old_2d_map.simple_init()
self.new_2d_map = reshape_meg_2d_parallel(old_pp_degree=self.zero_checkpoint.get_src_pp_degree(),
old_tp_degree=self.zero_checkpoint.get_src_tp_degree(),
new_pp_degree=self.pp_degree,
new_tp_degree=self.tp_degree)
if self.is_change_pp_degree() or self.is_change_tp_degree() or self.is_change_dp_degree():
self.zero_checkpoint.reshape(model_3d_desc(self.pp_degree, self.tp_degree, self.dp_degree))
self.global_state = {}
self._sanity_check()
self.pp_to_transformer_map = self._build_pp_transformer_map()
self.transformer_file_map = self._build_transformer_file_map()
self.tp_to_embedding_map = self._build_tp_other_layer_map(EMBEDDING_LAYER_INDEX)
self.tp_to_final_norm_map = self._build_tp_other_layer_map(FINAL_LAYER_NORM_INDEX)
self._build_global_state()
def is_change_tp_degree(self):
return self.tp_degree != self.zero_checkpoint.get_src_tp_degree()
def is_change_pp_degree(self):
return self.pp_degree != self.zero_checkpoint.get_src_pp_degree()
def is_change_dp_degree(self):
return self.dp_degree != self.zero_checkpoint.get_src_dp_degree()
def show_2d_mapping(self):
print(f'reshaped 2d map ---- begin')
for i in range(self.pp_degree):
for j in range(self.tp_degree):
file_list = self.get_2d_parallel_files(pp_index=i, tp_index=j)
print(f'[{i}, {j}] = {file_list}')
print(f'reshaped 2d map ---- end')
def show_tp_embedding_map(self):
self._dump_mapping(self.tp_to_embedding_map, 'tp_to_embedding_layers')
def show_tp_final_norm_map(self):
self._dump_mapping(self.tp_to_final_norm_map, 'tp_to_final_norm_layers')
def show_pp_transformer_map(self):
self._dump_mapping(self.pp_to_transformer_map, 'pp_to_transformer_layers')
def show_transformer_file_map(self):
self._dump_mapping(self.transformer_file_map, 'rank_to_transformer_files')
def _build_global_state(self):
sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu'))
self.global_state[ITERATION_KEY] = sd.get(ITERATION_KEY, 0)
self.global_state[ARGS_KEY] = sd.get(ARGS_KEY, None)
def get_zero_checkpoint_state(self, pp_index, tp_index, dp_index) -> dict:
return self.zero_checkpoint.get_state_for_rank(pp_index=pp_index,
tp_index=tp_index,
dp_index=dp_index,
keys_to_ignore=[PARAM_SHAPES])
def get_zero_files(self, pp_index, tp_index, dp_index) -> list:
return self.zero_checkpoint.get_files_for_rank(pp_index=pp_index, tp_index=tp_index, dp_index=dp_index)
def get_embedding_layer_id(self):
return self.layer_keys[EMBEDDING_LAYER_INDEX]
def get_final_norm_layer_id(self):
return self.layer_keys[FINAL_LAYER_NORM_INDEX]
def get_iteration(self):
if not ITERATION_KEY in self.global_state:
sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu'))
self.global_state[ITERATION_KEY] = sd.get(ITERATION_KEY, 0)
return self.global_state[ITERATION_KEY]
def get_embedding_state(self, tp_index: int) -> Dict:
assert tp_index in self.tp_to_embedding_map.keys()
sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in self.tp_to_embedding_map[tp_index]]
sd = self._merge_state_dicts(sd_list)
return sd
def get_embedding_files(self, tp_index: int) -> list:
assert tp_index in self.tp_to_embedding_map.keys()
return self.tp_to_embedding_map[tp_index]
def _get_checkpoint_value(self, key):
if not key in self.global_state:
sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu'))
self.global_state[key] = sd.get(key, None)
return self.global_state[key]
def get_args(self):
return self._get_checkpoint_value(ARGS_KEY)
def get_checkpoint_info(self, info_key=CHECKPOINT_INFO_KEY):
return self._get_checkpoint_value(info_key)
def get_2d_parallel_state(self, tp_index: int, pp_index: int) -> dict:
assert tp_index < self.tp_degree
assert pp_index < self.pp_degree
fname_list = self.get_2d_parallel_files(tp_index=tp_index, pp_index=pp_index)
sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in fname_list]
merged_sd = None
for sd in sd_list:
if merged_sd is None:
merged_sd = sd
else:
merged_sd = merge_state(merged_sd, sd)
return merged_sd
def get_transformer_state(self, tp_index: int, pp_index: int) -> list:
assert tp_index < self.tp_degree
assert pp_index < self.pp_degree
t_list = []
for fname_list in self.transformer_file_map[(tp_index, pp_index)]:
sd_list = [torch.load(fname, map_location=torch.device('cpu')) for fname in fname_list]
sd = self._merge_state_dicts(sd_list)
t_list.append(sd)
return t_list
def get_pp_transformer_map(self, pp_index: int) -> list:
assert pp_index < self.pp_degree
return self.pp_to_transformer_map[pp_index]
def get_final_norm_state(self, tp_index: int) -> Dict:
assert tp_index in self.tp_to_final_norm_map.keys()
sd = torch.load(self.tp_to_final_norm_map[tp_index][0], map_location=torch.device('cpu'))
return sd
def get_final_norm_files(self, tp_index: int) -> list:
assert tp_index in self.tp_to_final_norm_map.keys()
return self.tp_to_final_norm_map[tp_index]
def _build_tp_other_layer_map(self, layer_index: int):
assert layer_index < len(self.layer_files)
layer_files = get_files_with_prefix(self.layer_files, self.layer_keys[layer_index])
layer_file_partitions = partition_data(layer_files, self.tp_degree)
data_map = {i: flist for i, flist in enumerate(layer_file_partitions)}
return data_map
def get_2d_parallel_files(self, tp_index: int, pp_index: int) -> list:
assert tp_index < self.tp_degree
assert pp_index < self.pp_degree
file_indices = self.new_2d_map.get_data(pp_index=pp_index, tp_index=tp_index)
return [self.mp_rank_files[i] for i in file_indices]
def _build_pp_transformer_map(self):
data_map = {}
transformer_layers = self.layer_keys[1:-1]
layers_per_pp = len(transformer_layers) // self.pp_degree
data_map = {i: transformer_layers[i * layers_per_pp:(i + 1) * layers_per_pp] for i in range(0, self.pp_degree)}
return data_map
def _dump_mapping(self, data_map, map_tag=None):
if map_tag is not None:
print(f'Dump mapping: {map_tag}')
for k, v in data_map.items():
print(f'{k} = {v}')
def _build_transformer_file_map(self):
transformer_layer_keys = self.layer_keys[1:-1]
file_map = {}
# XXX: this is not guaranteed
layers_per_pp = len(transformer_layer_keys) // self.pp_degree
if layers_per_pp == 0:
layers_per_pp = 1
#print(f"{transformer_layer_keys} {layers_per_pp}")
for key_index, layer_key in enumerate(transformer_layer_keys):
pp_index = key_index // layers_per_pp
layer_files = get_files_with_prefix(self.layer_files, layer_key)
layer_file_partitions = partition_data(layer_files, self.tp_degree)
for tp_index in range(self.tp_degree):
map_key = (tp_index, pp_index)
if not map_key in file_map.keys():
file_map[map_key] = []
file_map[map_key].append(layer_file_partitions[tp_index])
return file_map
def _sanity_check(self):
assert len(self.mp_rank_files) % self.tp_degree == 0
assert len(self.layer_keys) > 2
assert self.zero_checkpoint.num_files % (self.pp_degree * self.tp_degree) == 0
# XXX: fix me - isn't always the case
# only true with --pp-partition-method 'type:transformer|embedding' \
# assert (len(self.layer_keys) - 2) % self.pp_degree == 0
def validate_files(self):
for file in self.file_list:
if not os.path.isfile(file):
print(f'Error: {file} is not existent')
def _get_layer_keys(self):
key_set = set()
key_len = len(LAYER_FILE_PREFIX) + 2
for file_path in self.layer_files:
_, fname = os.path.split(file_path)
key_set.add(fname[:key_len])
return sorted(list(key_set))
def _merge_state_dicts(self, sd_list):
merged_sd = {}
for key in sd_list[0].keys():
if not key in SEQUENTIAL_LAYERS:
cat_dim = LAYER_CONCAT_DIM.get(key, 0)
merged_sd[key] = torch.cat([sd[key] for sd in sd_list], dim=cat_dim)
else:
merged_sd[key] = sd_list[0][key]
return merged_sd
def _validate_folder(self, dir):
basic_folder_validation(dir)
file_list = get_files(dir)
for file_prefix in [MODEL_FILE_PREFIX, LAYER_FILE_PREFIX, f'{LAYER_FILE_PREFIX}01']:
ckpt_files = get_files_with_prefix(file_list, file_prefix)
assert len(
ckpt_files
) > 0, f'{dir} seems a bogus DeepSpeed checkpoint folder: Cannot find {file_prefix}* files in there.'
| 12,012 | 41.448763 | 119 | py |
DeepSpeed | DeepSpeed-master/deepspeed/profiling/flops_profiler/profiler.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
from deepspeed.accelerator import get_accelerator
from deepspeed.moe.layer import MoE
Tensor = torch.Tensor
module_flop_count = []
module_mac_count = []
old_functions = {}
class FlopsProfiler(object):
"""Measures the latency, number of estimated floating-point operations and parameters of each module in a PyTorch model.
The flops-profiler profiles the forward pass of a PyTorch model and prints the model graph with the measured profile attached to each module. It shows how latency, flops and parameters are spent in the model and which modules or layers could be the bottleneck. It also outputs the names of the top k modules in terms of aggregated latency, flops, and parameters at depth l with k and l specified by the user. The output profile is computed for each batch of input.
The DeepSpeed flops profiler can be used with the DeepSpeed runtime or as a standalone package.
When using DeepSpeed for model training, the flops profiler can be configured in the deepspeed_config file and no user code change is required.
If using the profiler as a standalone package, one imports the flops_profiler package and use the APIs.
Here is an example for usage in a typical training workflow:
.. code-block:: python
model = Model()
prof = FlopsProfiler(model)
for step, batch in enumerate(data_loader):
if step == profile_step:
prof.start_profile()
loss = model(batch)
if step == profile_step:
flops = prof.get_total_flops(as_string=True)
params = prof.get_total_params(as_string=True)
prof.print_model_profile(profile_step=profile_step)
prof.end_profile()
loss.backward()
optimizer.step()
To profile a trained model in inference, use the `get_model_profile` API.
Args:
object (torch.nn.Module): The PyTorch model to profile.
"""
def __init__(self, model, ds_engine=None, recompute_fwd_factor=0.0):
self.model = model
self.ds_engine = ds_engine
self.recompute_fwd_factor = recompute_fwd_factor
self.started = False
self.func_patched = False
def start_profile(self, ignore_list=None):
"""Starts profiling.
Extra attributes are added recursively to all the modules and the profiled torch.nn.functionals are monkey patched.
Args:
ignore_list (list, optional): the list of modules to ignore while profiling. Defaults to None.
"""
self.reset_profile()
_patch_functionals()
_patch_tensor_methods()
def register_module_hooks(module, ignore_list):
if ignore_list and type(module) in ignore_list:
return
# if computing the flops of a module directly
if type(module) in MODULE_HOOK_MAPPING:
if not hasattr(module, "__flops_handle__"):
module.__flops_handle__ = module.register_forward_hook(MODULE_HOOK_MAPPING[type(module)])
return
# if computing the flops of the functionals in a module
def pre_hook(module, input):
module_flop_count.append([])
module_mac_count.append([])
if not hasattr(module, "__pre_hook_handle__"):
module.__pre_hook_handle__ = module.register_forward_pre_hook(pre_hook)
def post_hook(module, input, output):
if module_flop_count:
module.__flops__ += sum([elem[1] for elem in module_flop_count[-1]])
module_flop_count.pop()
module.__macs__ += sum([elem[1] for elem in module_mac_count[-1]])
module_mac_count.pop()
if not hasattr(module, "__post_hook_handle__"):
module.__post_hook_handle__ = module.register_forward_hook(post_hook)
def start_time_hook(module, input):
get_accelerator().synchronize()
module.__start_time__ = time.time()
if not hasattr(module, "__start_time_hook_handle"):
module.__start_time_hook_handle__ = module.register_forward_pre_hook(start_time_hook)
def end_time_hook(module, input, output):
get_accelerator().synchronize()
module.__duration__ += time.time() - module.__start_time__
if not hasattr(module, "__end_time_hook_handle__"):
module.__end_time_hook_handle__ = module.register_forward_hook(end_time_hook)
self.model.apply(partial(register_module_hooks, ignore_list=ignore_list))
self.started = True
self.func_patched = True
def stop_profile(self):
"""Stop profiling.
All torch.nn.functionals are restored to their originals.
"""
if self.started and self.func_patched:
_reload_functionals()
_reload_tensor_methods()
self.func_patched = False
def remove_profile_attrs(module):
if hasattr(module, "__pre_hook_handle__"):
module.__pre_hook_handle__.remove()
del module.__pre_hook_handle__
if hasattr(module, "__post_hook_handle__"):
module.__post_hook_handle__.remove()
del module.__post_hook_handle__
if hasattr(module, "__flops_handle__"):
module.__flops_handle__.remove()
del module.__flops_handle__
if hasattr(module, "__start_time_hook_handle__"):
module.__start_time_hook_handle__.remove()
del module.__start_time_hook_handle__
if hasattr(module, "__end_time_hook_handle__"):
module.__end_time_hook_handle__.remove()
del module.__end_time_hook_handle__
self.model.apply(remove_profile_attrs)
def reset_profile(self):
"""Resets the profiling.
Adds or resets the extra attributes.
"""
def get_param_count_and_ep(param):
"""
Return the number of parameters in the layer, whether the layer is an MoE layer,
and its expert parallelism size if so
"""
prefix = 'ep_size_'
offset = len(prefix)
expert_parallelism = 0
if getattr(param, "group_name", "").startswith(prefix):
try:
expert_parallelism = int(param.group_name[offset:])
except ValueError:
pass
is_moe = expert_parallelism > 0
return param.numel(), is_moe, expert_parallelism
def add_or_reset_attrs(module):
parameters = [get_param_count_and_ep(p) for p in module.parameters()]
module.__flops__ = 0
module.__macs__ = 0
module.__params__ = sum(count for count, is_expert, _ in parameters if not is_expert)
module.__expert_params__ = sum(count for count, is_expert, _ in parameters if is_expert)
# number of expert parameters taking into account other expert parallel groups
module.__model_expert_params__ = sum(count * expert_parallelism
for count, is_expert, expert_parallelism in parameters if is_expert)
module.__start_time__ = 0
module.__duration__ = 0
self.model.apply(add_or_reset_attrs)
def end_profile(self):
"""Ends profiling.
The added attributes and handles are removed recursively on all the modules.
"""
if not self.started:
return
self.stop_profile()
self.started = False
def remove_profile_attrs(module):
if hasattr(module, "__flops__"):
del module.__flops__
if hasattr(module, "__macs__"):
del module.__macs__
if hasattr(module, "__params__"):
del module.__params__
if hasattr(module, "__expert_params__"):
del module.__expert_params__
if hasattr(module, "__model_expert_params__"):
del module.__model_expert_params__
if hasattr(module, "__start_time__"):
del module.__start_time__
if hasattr(module, "__duration__"):
del module.__duration__
self.model.apply(remove_profile_attrs)
def get_total_flops(self, as_string=False):
"""Returns the total flops of the model.
Args:
as_string (bool, optional): whether to output the flops as string. Defaults to False.
Returns:
The number of multiply-accumulate operations of the model forward pass.
"""
total_flops = get_module_flops(self.model)
return num_to_string(total_flops) if as_string else total_flops
def get_total_macs(self, as_string=False):
"""Returns the total MACs of the model.
Args:
as_string (bool, optional): whether to output the flops as string. Defaults to False.
Returns:
The number of multiply-accumulate operations of the model forward pass.
"""
total_macs = get_module_macs(self.model)
return macs_to_string(total_macs) if as_string else total_macs
def get_total_duration(self, as_string=False):
"""Returns the total duration of the model forward pass.
Args:
as_string (bool, optional): whether to output the duration as string. Defaults to False.
Returns:
The latency of the model forward pass.
"""
total_duration = get_module_duration(self.model)
return duration_to_string(total_duration) if as_string else total_duration
def get_total_params(self, as_string=False):
"""Returns the total number of parameters stored per rank.
Args:
as_string (bool, optional): whether to output the parameters as string. Defaults to False.
Returns:
The total number of parameters stored per rank.
"""
total_params = self.model.__expert_params__ + self.model.__params__
return params_to_string(total_params) if as_string else total_params
def is_expert_tensor_parallelism_enabled(self):
for _, module in self.model.named_modules():
if isinstance(module, MoE) and hasattr(module, 'enable_expert_tensor_parallelism'):
return module.enable_expert_tensor_parallelism
return False
def print_model_profile(self, profile_step=1, module_depth=-1, top_modules=1, detailed=True, output_file=None):
"""Prints the model graph with the measured profile attached to each module.
Args:
profile_step (int, optional): The global training step at which to profile. Note that warm up steps are needed for accurate time measurement.
module_depth (int, optional): The depth of the model to which to print the aggregated module information. When set to -1, it prints information from the top to the innermost modules (the maximum depth).
top_modules (int, optional): Limits the aggregated profile output to the number of top modules specified.
detailed (bool, optional): Whether to print the detailed model profile.
output_file (str, optional): Path to the output file. If None, the profiler prints to stdout.
"""
if not self.started:
return
import sys
import os.path
original_stdout = None
f = None
if output_file and output_file != "":
dir_path = os.path.dirname(os.path.abspath(output_file))
if not os.path.exists(dir_path):
os.makedirs(dir_path)
original_stdout = sys.stdout
f = open(output_file, "w")
sys.stdout = f
total_flops = self.get_total_flops()
total_macs = self.get_total_macs()
total_duration = self.get_total_duration()
total_params = self.get_total_params()
expert_tensor_parallelism = None # silence the linters
total_model_expert_params = total_model_nonexpert_params = 0
if self.ds_engine:
total_model_nonexpert_params = self.model.__params__ * self.ds_engine.mp_world_size
if self.ds_engine.has_moe_layers:
expert_tensor_parallelism = self.is_expert_tensor_parallelism_enabled()
total_model_expert_params = self.model.__model_expert_params__ * (self.ds_engine.mp_world_size
if expert_tensor_parallelism else 1)
self.flops = total_flops
self.macs = total_macs
self.params = total_params
print("\n-------------------------- DeepSpeed Flops Profiler --------------------------")
print(f'Profile Summary at step {profile_step}:')
print(
"Notations:\ndata parallel size (dp_size), model parallel size(mp_size),\nnumber of parameters (params), number of multiply-accumulate operations(MACs),\nnumber of floating-point operations (flops), floating-point operations per second (FLOPS),\nfwd latency (forward propagation latency), bwd latency (backward propagation latency),\nstep (weights update latency), iter latency (sum of fwd, bwd and step latency)\n"
)
if self.ds_engine:
print('{:<60} {:<8}'.format('world size: ', self.ds_engine.world_size))
print('{:<60} {:<8}'.format('data parallel size: ', self.ds_engine.dp_world_size))
print('{:<60} {:<8}'.format('model parallel size: ', self.ds_engine.mp_world_size))
print('{:<60} {:<8}'.format('batch size per GPU: ', self.ds_engine.train_micro_batch_size_per_gpu()))
if self.ds_engine.has_moe_layers:
print('{:<60} {:<8}'.format('expert tensor parallelism enabled: ', expert_tensor_parallelism))
print('{:<60} {:<8}'.format('params per gpu: ', params_to_string(total_params)))
if total_model_expert_params > 0:
print('{:<60} {:<8}'.format('params of model: ',
params_to_string(total_model_nonexpert_params + total_model_expert_params)))
print('{:<60} {:<8}'.format(' non-expert params of model: ',
params_to_string(total_model_nonexpert_params)))
print('{:<60} {:<8}'.format(' expert params of model: ', params_to_string(total_model_expert_params)))
else:
print('{:<60} {:<8}'.format('params of model = params per GPU * mp_size: ',
params_to_string(total_model_nonexpert_params)))
print('{:<60} {:<8}'.format('fwd MACs per GPU: ', macs_to_string(total_macs)))
print('{:<60} {:<8}'.format('fwd flops per GPU: ', num_to_string(total_flops)))
print('{:<60} {:<8}'.format(
'fwd flops of model = fwd flops per GPU * mp_size: ',
num_to_string(total_flops * ((self.ds_engine.mp_world_size) if self.ds_engine else 1))))
fwd_latency = self.get_total_duration()
if self.ds_engine and self.ds_engine.wall_clock_breakdown():
fwd_latency = self.ds_engine.timers('forward').elapsed(False) / 1000.0
print('{:<60} {:<8}'.format('fwd latency: ', duration_to_string(fwd_latency)))
print('{:<60} {:<8}'.format('fwd FLOPS per GPU = fwd flops per GPU / fwd latency: ',
flops_to_string(total_flops / fwd_latency)))
if self.ds_engine and self.ds_engine.wall_clock_breakdown():
bwd_factor = 2 + self.recompute_fwd_factor
bwd_latency = self.ds_engine.timers('backward').elapsed(False) / 1000.0
step_latency = self.ds_engine.timers('step').elapsed(False) / 1000.0
print('{:<60} {:<8}'.format('bwd latency: ', duration_to_string(bwd_latency)))
print('{:<60} {:<8}'.format(f'bwd FLOPS per GPU = {bwd_factor} * fwd flops per GPU / bwd latency: ',
flops_to_string(bwd_factor * total_flops / bwd_latency)))
print('{:<60} {:<8}'.format(
f'fwd+bwd FLOPS per GPU = {bwd_factor+1} * fwd flops per GPU / (fwd+bwd latency): ',
flops_to_string((bwd_factor + 1) * total_flops / (fwd_latency + bwd_latency))))
print('{:<60} {:<8}'.format('step latency: ', duration_to_string(step_latency)))
iter_latency = fwd_latency + bwd_latency + step_latency
print('{:<60} {:<8}'.format('iter latency: ', duration_to_string(iter_latency)))
print('{:<60} {:<8}'.format(f'FLOPS per GPU = {bwd_factor+1} * fwd flops per GPU / iter latency: ',
flops_to_string((bwd_factor + 1) * total_flops / iter_latency)))
samples_per_iter = self.ds_engine.train_micro_batch_size_per_gpu() * self.ds_engine.world_size
print('{:<60} {:<8.2f}'.format('samples/second: ', samples_per_iter / iter_latency))
def flops_repr(module):
params = module.__params__ + module.__expert_params__
flops = get_module_flops(module)
macs = get_module_macs(module)
items = [
params_to_string(params),
"{:.2%} Params".format(params / total_params if total_params else 0),
macs_to_string(macs),
"{:.2%} MACs".format(0.0 if total_macs == 0 else macs / total_macs),
]
duration = get_module_duration(module)
items.append(duration_to_string(duration))
items.append("{:.2%} latency".format(0.0 if total_duration == 0 else duration / total_duration))
items.append(flops_to_string(0.0 if duration == 0 else flops / duration))
items.append(module.original_extra_repr())
return ", ".join(items)
def add_extra_repr(module):
flops_extra_repr = flops_repr.__get__(module)
if module.extra_repr != flops_extra_repr:
module.original_extra_repr = module.extra_repr
module.extra_repr = flops_extra_repr
assert module.extra_repr != module.original_extra_repr
def del_extra_repr(module):
if hasattr(module, "original_extra_repr"):
module.extra_repr = module.original_extra_repr
del module.original_extra_repr
self.model.apply(add_extra_repr)
print("\n----------------------------- Aggregated Profile per GPU -----------------------------")
self.print_model_aggregated_profile(module_depth=module_depth, top_modules=top_modules)
if detailed:
print("\n------------------------------ Detailed Profile per GPU ------------------------------")
print(
"Each module profile is listed after its name in the following order: \nparams, percentage of total params, MACs, percentage of total MACs, fwd latency, percentage of total fwd latency, fwd FLOPS"
)
print(
"\nNote: 1. A module can have torch.nn.module or torch.nn.functional to compute logits (e.g. CrossEntropyLoss). They are not counted as submodules, thus not to be printed out. However they make up the difference between a parent's MACs (or latency) and the sum of its submodules'.\n2. Number of floating-point operations is a theoretical estimation, thus FLOPS computed using that could be larger than the maximum system throughput.\n3. The fwd latency listed in the top module's profile is directly captured at the module forward function in PyTorch, thus it's less than the fwd latency shown above which is captured in DeepSpeed.\n"
)
print(self.model)
self.model.apply(del_extra_repr)
print("------------------------------------------------------------------------------")
if output_file:
sys.stdout = original_stdout
f.close()
def print_model_aggregated_profile(self, module_depth=-1, top_modules=1):
"""Prints the names of the top top_modules modules in terms of aggregated time, flops, and parameters at depth module_depth.
Args:
module_depth (int, optional): the depth of the modules to show. Defaults to -1 (the innermost modules).
top_modules (int, optional): the number of top modules to show. Defaults to 1.
"""
info = {}
if not hasattr(self.model, "__flops__"):
print("no __flops__ attribute in the model, call this function after start_profile and before end_profile")
return
def walk_module(module, curr_depth, info):
if curr_depth not in info:
info[curr_depth] = {}
if module.__class__.__name__ not in info[curr_depth]:
info[curr_depth][module.__class__.__name__] = [
0,
0,
0,
] # macs, params, time
info[curr_depth][module.__class__.__name__][0] += get_module_macs(module)
info[curr_depth][module.__class__.__name__][1] += module.__params__ + module.__expert_params__
info[curr_depth][module.__class__.__name__][2] += get_module_duration(module)
has_children = len(module._modules.items()) != 0
if has_children:
for child in module.children():
walk_module(child, curr_depth + 1, info)
walk_module(self.model, 0, info)
depth = module_depth
if module_depth == -1:
depth = len(info) - 1
print(f'Top {top_modules} modules in terms of params, MACs or fwd latency at different model depths:')
for d in range(depth):
num_items = min(top_modules, len(info[d]))
sort_macs = {
k: macs_to_string(v[0])
for k, v in sorted(info[d].items(), key=lambda item: item[1][0], reverse=True)[:num_items]
}
sort_params = {
k: params_to_string(v[1])
for k, v in sorted(info[d].items(), key=lambda item: item[1][1], reverse=True)[:num_items]
}
sort_time = {
k: duration_to_string(v[2])
for k, v in sorted(info[d].items(), key=lambda item: item[1][2], reverse=True)[:num_items]
}
print(f"depth {d}:")
print(f" params - {sort_params}")
print(f" MACs - {sort_macs}")
print(f" fwd latency - {sort_time}")
def _prod(dims):
p = 1
for v in dims:
p *= v
return p
def _linear_flops_compute(input, weight, bias=None):
out_features = weight.shape[0]
macs = input.numel() * out_features
return 2 * macs, macs
def _relu_flops_compute(input, inplace=False):
return input.numel(), 0
def _prelu_flops_compute(input: Tensor, weight: Tensor):
return input.numel(), 0
def _elu_flops_compute(input: Tensor, alpha: float = 1.0, inplace: bool = False):
return input.numel(), 0
def _leaky_relu_flops_compute(input: Tensor, negative_slope: float = 0.01, inplace: bool = False):
return input.numel(), 0
def _relu6_flops_compute(input: Tensor, inplace: bool = False):
return input.numel(), 0
def _silu_flops_compute(input: Tensor, inplace: bool = False):
return input.numel(), 0
def _gelu_flops_compute(input, **kwargs):
return input.numel(), 0
def _pool_flops_compute(input,
kernel_size,
stride=None,
padding=0,
dilation=None,
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
return_indices=None):
return input.numel(), 0
def _conv_flops_compute(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
assert weight.shape[1] * groups == input.shape[1]
batch_size = input.shape[0]
in_channels = input.shape[1]
out_channels = weight.shape[0]
kernel_dims = list(weight.shape[2:])
input_dims = list(input.shape[2:])
length = len(input_dims)
strides = stride if type(stride) is tuple else (stride, ) * length
dilations = dilation if type(dilation) is tuple else (dilation, ) * length
if isinstance(padding, str):
if padding == 'valid':
paddings = (0, ) * length
elif padding == 'same':
paddings = ()
for d, k in zip(dilations, kernel_dims):
total_padding = d * (k - 1)
paddings += (total_padding // 2, )
elif isinstance(padding, tuple):
paddings = padding
else:
paddings = (padding, ) * length
output_dims = []
for idx, input_dim in enumerate(input_dims):
output_dim = (input_dim + 2 * paddings[idx] - (dilations[idx] *
(kernel_dims[idx] - 1) + 1)) // strides[idx] + 1
output_dims.append(output_dim)
filters_per_channel = out_channels // groups
conv_per_position_macs = int(_prod(kernel_dims)) * in_channels * filters_per_channel
active_elements_count = batch_size * int(_prod(output_dims))
overall_conv_macs = conv_per_position_macs * active_elements_count
overall_conv_flops = 2 * overall_conv_macs
bias_flops = 0
if bias is not None:
bias_flops = out_channels * active_elements_count
return int(overall_conv_flops + bias_flops), int(overall_conv_macs)
def _conv_trans_flops_compute(
input,
weight,
bias=None,
stride=1,
padding=0,
output_padding=0,
groups=1,
dilation=1,
):
batch_size = input.shape[0]
in_channels = input.shape[1]
out_channels = weight.shape[1]
kernel_dims = list(weight.shape[2:])
input_dims = list(input.shape[2:])
length = len(input_dims)
paddings = padding if type(padding) is tuple else (padding, ) * length
strides = stride if type(stride) is tuple else (stride, ) * length
dilations = dilation if type(dilation) is tuple else (dilation, ) * length
output_dims = []
for idx, input_dim in enumerate(input_dims):
output_dim = (input_dim + 2 * paddings[idx] - (dilations[idx] *
(kernel_dims[idx] - 1) + 1)) // strides[idx] + 1
output_dims.append(output_dim)
paddings = padding if type(padding) is tuple else (padding, padding)
strides = stride if type(stride) is tuple else (stride, stride)
dilations = dilation if type(dilation) is tuple else (dilation, dilation)
filters_per_channel = out_channels // groups
conv_per_position_macs = int(_prod(kernel_dims)) * in_channels * filters_per_channel
active_elements_count = batch_size * int(_prod(input_dims))
overall_conv_macs = conv_per_position_macs * active_elements_count
overall_conv_flops = 2 * overall_conv_macs
bias_flops = 0
if bias is not None:
bias_flops = out_channels * batch_size * int(_prod(output_dims))
return int(overall_conv_flops + bias_flops), int(overall_conv_macs)
def _batch_norm_flops_compute(
input,
running_mean,
running_var,
weight=None,
bias=None,
training=False,
momentum=0.1,
eps=1e-05,
):
has_affine = weight is not None
if training:
# estimation
return input.numel() * (5 if has_affine else 4), 0
flops = input.numel() * (2 if has_affine else 1)
return flops, 0
def _layer_norm_flops_compute(
input: Tensor,
normalized_shape: List[int],
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
eps: float = 1e-5,
):
has_affine = weight is not None
# estimation
return input.numel() * (5 if has_affine else 4), 0
def _group_norm_flops_compute(input: Tensor,
num_groups: int,
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
eps: float = 1e-5):
has_affine = weight is not None
# estimation
return input.numel() * (5 if has_affine else 4), 0
def _instance_norm_flops_compute(
input: Tensor,
running_mean: Optional[Tensor] = None,
running_var: Optional[Tensor] = None,
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
use_input_stats: bool = True,
momentum: float = 0.1,
eps: float = 1e-5,
):
has_affine = weight is not None
# estimation
return input.numel() * (5 if has_affine else 4), 0
def _upsample_flops_compute(*args, **kwargs):
input = args[0]
size = kwargs.get('size', None)
if size is None and len(args) > 1:
size = args[1]
if size is not None:
if isinstance(size, tuple) or isinstance(size, list):
return int(_prod(size)), 0
else:
return int(size), 0
scale_factor = kwargs.get('scale_factor', None)
if scale_factor is None and len(args) > 2:
scale_factor = args[2]
assert scale_factor is not None, "either size or scale_factor should be defined"
flops = input.numel()
if isinstance(scale_factor, tuple) and len(scale_factor) == len(input):
flops * int(_prod(scale_factor))
else:
flops * scale_factor**len(input)
return flops, 0
def _softmax_flops_compute(input, dim=None, _stacklevel=3, dtype=None):
return input.numel(), 0
def _embedding_flops_compute(
input,
weight,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
):
return 0, 0
def _dropout_flops_compute(input, p=0.5, training=True, inplace=False):
return 0, 0
def _matmul_flops_compute(input, other, *, out=None):
"""
Count flops for the matmul operation.
"""
macs = _prod(input.shape) * other.shape[-1]
return 2 * macs, macs
def _addmm_flops_compute(input, mat1, mat2, *, beta=1, alpha=1, out=None):
"""
Count flops for the addmm operation.
"""
macs = _prod(mat1.shape) * mat2.shape[-1]
return 2 * macs + _prod(input.shape), macs
def _einsum_flops_compute(equation, *operands):
"""
Count flops for the einsum operation.
"""
equation = equation.replace(" ", "")
input_shapes = [o.shape for o in operands]
# Re-map equation so that same equation with different alphabet
# representations will look the same.
letter_order = OrderedDict((k, 0) for k in equation if k.isalpha()).keys()
mapping = {ord(x): 97 + i for i, x in enumerate(letter_order)}
equation = equation.translate(mapping)
np_arrs = [np.zeros(s) for s in input_shapes]
optim = np.einsum_path(equation, *np_arrs, optimize="optimal")[1]
for line in optim.split("\n"):
if "optimized flop" in line.lower():
flop = int(float(line.split(":")[-1]))
return flop, 0
raise NotImplementedError("Unsupported einsum operation.")
def _tensor_addmm_flops_compute(self, mat1, mat2, *, beta=1, alpha=1, out=None):
"""
Count flops for the tensor addmm operation.
"""
macs = _prod(mat1.shape) * mat2.shape[-1]
return 2 * macs + _prod(self.shape), macs
def _mul_flops_compute(input, other, *, out=None):
return _elementwise_flops_compute(input, other)
def _add_flops_compute(input, other, *, alpha=1, out=None):
return _elementwise_flops_compute(input, other)
def _elementwise_flops_compute(input, other):
if not torch.is_tensor(input):
if torch.is_tensor(other):
return _prod(other.shape), 0
else:
return 1, 0
elif not torch.is_tensor(other):
return _prod(input.shape), 0
else:
dim_input = len(input.shape)
dim_other = len(other.shape)
max_dim = max(dim_input, dim_other)
final_shape = []
for i in range(max_dim):
in_i = input.shape[i] if i < dim_input else 1
ot_i = other.shape[i] if i < dim_other else 1
if in_i > ot_i:
final_shape.append(in_i)
else:
final_shape.append(ot_i)
flops = _prod(final_shape)
return flops, 0
def wrapFunc(func, funcFlopCompute):
oldFunc = func
name = func.__str__
old_functions[name] = oldFunc
def newFunc(*args, **kwds):
flops, macs = funcFlopCompute(*args, **kwds)
if module_flop_count:
module_flop_count[-1].append((name, flops))
if module_mac_count and macs:
module_mac_count[-1].append((name, macs))
return oldFunc(*args, **kwds)
newFunc.__str__ = func.__str__
return newFunc
def _patch_functionals():
# FC
F.linear = wrapFunc(F.linear, _linear_flops_compute)
# convolutions
F.conv1d = wrapFunc(F.conv1d, _conv_flops_compute)
F.conv2d = wrapFunc(F.conv2d, _conv_flops_compute)
F.conv3d = wrapFunc(F.conv3d, _conv_flops_compute)
# conv transposed
F.conv_transpose1d = wrapFunc(F.conv_transpose1d, _conv_trans_flops_compute)
F.conv_transpose2d = wrapFunc(F.conv_transpose2d, _conv_trans_flops_compute)
F.conv_transpose3d = wrapFunc(F.conv_transpose3d, _conv_trans_flops_compute)
# activations
F.relu = wrapFunc(F.relu, _relu_flops_compute)
F.prelu = wrapFunc(F.prelu, _prelu_flops_compute)
F.elu = wrapFunc(F.elu, _elu_flops_compute)
F.leaky_relu = wrapFunc(F.leaky_relu, _leaky_relu_flops_compute)
F.relu6 = wrapFunc(F.relu6, _relu6_flops_compute)
if hasattr(F, "silu"):
F.silu = wrapFunc(F.silu, _silu_flops_compute)
F.gelu = wrapFunc(F.gelu, _gelu_flops_compute)
# Normalizations
F.batch_norm = wrapFunc(F.batch_norm, _batch_norm_flops_compute)
F.layer_norm = wrapFunc(F.layer_norm, _layer_norm_flops_compute)
F.instance_norm = wrapFunc(F.instance_norm, _instance_norm_flops_compute)
F.group_norm = wrapFunc(F.group_norm, _group_norm_flops_compute)
# poolings
F.avg_pool1d = wrapFunc(F.avg_pool1d, _pool_flops_compute)
F.avg_pool2d = wrapFunc(F.avg_pool2d, _pool_flops_compute)
F.avg_pool3d = wrapFunc(F.avg_pool3d, _pool_flops_compute)
F.max_pool1d = wrapFunc(F.max_pool1d, _pool_flops_compute)
F.max_pool2d = wrapFunc(F.max_pool2d, _pool_flops_compute)
F.max_pool3d = wrapFunc(F.max_pool3d, _pool_flops_compute)
F.adaptive_avg_pool1d = wrapFunc(F.adaptive_avg_pool1d, _pool_flops_compute)
F.adaptive_avg_pool2d = wrapFunc(F.adaptive_avg_pool2d, _pool_flops_compute)
F.adaptive_avg_pool3d = wrapFunc(F.adaptive_avg_pool3d, _pool_flops_compute)
F.adaptive_max_pool1d = wrapFunc(F.adaptive_max_pool1d, _pool_flops_compute)
F.adaptive_max_pool2d = wrapFunc(F.adaptive_max_pool2d, _pool_flops_compute)
F.adaptive_max_pool3d = wrapFunc(F.adaptive_max_pool3d, _pool_flops_compute)
# upsample
F.upsample = wrapFunc(F.upsample, _upsample_flops_compute)
F.interpolate = wrapFunc(F.interpolate, _upsample_flops_compute)
# softmax
F.softmax = wrapFunc(F.softmax, _softmax_flops_compute)
# embedding
F.embedding = wrapFunc(F.embedding, _embedding_flops_compute)
def _patch_tensor_methods():
torch.matmul = wrapFunc(torch.matmul, _matmul_flops_compute)
torch.Tensor.matmul = wrapFunc(torch.Tensor.matmul, _matmul_flops_compute)
torch.mm = wrapFunc(torch.mm, _matmul_flops_compute)
torch.Tensor.mm = wrapFunc(torch.Tensor.mm, _matmul_flops_compute)
torch.bmm = wrapFunc(torch.bmm, _matmul_flops_compute)
torch.Tensor.bmm = wrapFunc(torch.Tensor.bmm, _matmul_flops_compute)
torch.addmm = wrapFunc(torch.addmm, _addmm_flops_compute)
torch.Tensor.addmm = wrapFunc(torch.Tensor.addmm, _tensor_addmm_flops_compute)
torch.mul = wrapFunc(torch.mul, _mul_flops_compute)
torch.Tensor.mul = wrapFunc(torch.Tensor.mul, _mul_flops_compute)
torch.add = wrapFunc(torch.add, _add_flops_compute)
torch.Tensor.add = wrapFunc(torch.Tensor.add, _add_flops_compute)
torch.einsum = wrapFunc(torch.einsum, _einsum_flops_compute)
torch.baddbmm = wrapFunc(torch.baddbmm, _tensor_addmm_flops_compute)
def _reload_functionals():
# torch.nn.functional does not support importlib.reload()
F.linear = old_functions[F.linear.__str__]
F.conv1d = old_functions[F.conv1d.__str__]
F.conv2d = old_functions[F.conv2d.__str__]
F.conv3d = old_functions[F.conv3d.__str__]
F.conv_transpose1d = old_functions[F.conv_transpose1d.__str__]
F.conv_transpose2d = old_functions[F.conv_transpose2d.__str__]
F.conv_transpose3d = old_functions[F.conv_transpose3d.__str__]
F.relu = old_functions[F.relu.__str__]
F.prelu = old_functions[F.prelu.__str__]
F.elu = old_functions[F.elu.__str__]
F.leaky_relu = old_functions[F.leaky_relu.__str__]
F.relu6 = old_functions[F.relu6.__str__]
if hasattr(F, "silu"):
F.silu = old_functions[F.silu.__str__]
F.gelu = old_functions[F.gelu.__str__]
F.batch_norm = old_functions[F.batch_norm.__str__]
F.layer_norm = old_functions[F.layer_norm.__str__]
F.instance_norm = old_functions[F.instance_norm.__str__]
F.group_norm = old_functions[F.group_norm.__str__]
F.avg_pool1d = old_functions[F.avg_pool1d.__str__]
F.avg_pool2d = old_functions[F.avg_pool2d.__str__]
F.avg_pool3d = old_functions[F.avg_pool3d.__str__]
F.max_pool1d = old_functions[F.max_pool1d.__str__]
F.max_pool2d = old_functions[F.max_pool2d.__str__]
F.max_pool3d = old_functions[F.max_pool3d.__str__]
F.adaptive_avg_pool1d = old_functions[F.adaptive_avg_pool1d.__str__]
F.adaptive_avg_pool2d = old_functions[F.adaptive_avg_pool2d.__str__]
F.adaptive_avg_pool3d = old_functions[F.adaptive_avg_pool3d.__str__]
F.adaptive_max_pool1d = old_functions[F.adaptive_max_pool1d.__str__]
F.adaptive_max_pool2d = old_functions[F.adaptive_max_pool2d.__str__]
F.adaptive_max_pool3d = old_functions[F.adaptive_max_pool3d.__str__]
F.upsample = old_functions[F.upsample.__str__]
F.interpolate = old_functions[F.interpolate.__str__]
F.softmax = old_functions[F.softmax.__str__]
F.embedding = old_functions[F.embedding.__str__]
def _reload_tensor_methods():
torch.matmul = old_functions[torch.matmul.__str__]
torch.Tensor.matmul = old_functions[torch.Tensor.matmul.__str__]
torch.mm = old_functions[torch.mm.__str__]
torch.Tensor.mm = old_functions[torch.Tensor.mm.__str__]
torch.bmm = old_functions[torch.matmul.__str__]
torch.Tensor.bmm = old_functions[torch.Tensor.bmm.__str__]
torch.addmm = old_functions[torch.addmm.__str__]
torch.Tensor.addmm = old_functions[torch.Tensor.addmm.__str__]
torch.mul = old_functions[torch.mul.__str__]
torch.Tensor.mul = old_functions[torch.Tensor.mul.__str__]
torch.add = old_functions[torch.add.__str__]
torch.Tensor.add = old_functions[torch.Tensor.add.__str__]
torch.einsum = old_functions[torch.einsum.__str__]
torch.baddbmm = old_functions[torch.baddbmm.__str__]
def _rnn_flops(flops, rnn_module, w_ih, w_hh, input_size):
input_size, hidden_size = w_ih.shape
# matrix matrix mult ih state and internal state
flops += 2 * input_size * hidden_size - hidden_size
# matrix matrix mult hh state and internal state
flops += 2 * hidden_size * hidden_size - hidden_size
if isinstance(rnn_module, (nn.RNN, nn.RNNCell)):
# add both operations
flops += rnn_module.hidden_size
elif isinstance(rnn_module, (nn.GRU, nn.GRUCell)):
# hadamard of r
flops += rnn_module.hidden_size
# adding operations from both states
flops += rnn_module.hidden_size * 3
# last two hadamard _product and add
flops += rnn_module.hidden_size * 3
elif isinstance(rnn_module, (nn.LSTM, nn.LSTMCell)):
# adding operations from both states
flops += rnn_module.hidden_size * 4
# two hadamard _product and add for C state
flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size
# final hadamard
flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size
return flops
def _rnn_forward_hook(rnn_module, input, output):
flops = 0
# input is a tuple containing a sequence to process and (optionally) hidden state
inp = input[0]
batch_size = inp.shape[0]
seq_length = inp.shape[1]
num_layers = rnn_module.num_layers
for i in range(num_layers):
w_ih = rnn_module.__getattr__("weight_ih_l" + str(i))
w_hh = rnn_module.__getattr__("weight_hh_l" + str(i))
if i == 0:
input_size = rnn_module.input_size
else:
input_size = rnn_module.hidden_size
flops = _rnn_flops(flops, rnn_module, w_ih, w_hh, input_size)
if rnn_module.bias:
b_ih = rnn_module.__getattr__("bias_ih_l" + str(i))
b_hh = rnn_module.__getattr__("bias_hh_l" + str(i))
flops += b_ih.shape[0] + b_hh.shape[0]
flops *= batch_size
flops *= seq_length
if rnn_module.bidirectional:
flops *= 2
rnn_module.__flops__ += int(flops)
def _rnn_cell_forward_hook(rnn_cell_module, input, output):
flops = 0
inp = input[0]
batch_size = inp.shape[0]
w_ih = rnn_cell_module.__getattr__("weight_ih")
w_hh = rnn_cell_module.__getattr__("weight_hh")
input_size = inp.shape[1]
flops = _rnn_flops(flops, rnn_cell_module, w_ih, w_hh, input_size)
if rnn_cell_module.bias:
b_ih = rnn_cell_module.__getattr__("bias_ih")
b_hh = rnn_cell_module.__getattr__("bias_hh")
flops += b_ih.shape[0] + b_hh.shape[0]
flops *= batch_size
rnn_cell_module.__flops__ += int(flops)
MODULE_HOOK_MAPPING = {
# RNN
nn.RNN: _rnn_forward_hook,
nn.GRU: _rnn_forward_hook,
nn.LSTM: _rnn_forward_hook,
nn.RNNCell: _rnn_cell_forward_hook,
nn.LSTMCell: _rnn_cell_forward_hook,
nn.GRUCell: _rnn_cell_forward_hook,
}
def num_to_string(num, precision=2):
if num // 10**9 > 0:
return str(round(num / 10.0**9, precision)) + " G"
elif num // 10**6 > 0:
return str(round(num / 10.0**6, precision)) + " M"
elif num // 10**3 > 0:
return str(round(num / 10.0**3, precision)) + " K"
else:
return str(num)
def macs_to_string(macs, units=None, precision=2):
if units is None:
if macs // 10**9 > 0:
return str(round(macs / 10.0**9, precision)) + " GMACs"
elif macs // 10**6 > 0:
return str(round(macs / 10.0**6, precision)) + " MMACs"
elif macs // 10**3 > 0:
return str(round(macs / 10.0**3, precision)) + " KMACs"
else:
return str(macs) + " MACs"
else:
if units == "GMACs":
return str(round(macs / 10.0**9, precision)) + " " + units
elif units == "MMACs":
return str(round(macs / 10.0**6, precision)) + " " + units
elif units == "KMACs":
return str(round(macs / 10.0**3, precision)) + " " + units
else:
return str(macs) + " MACs"
def number_to_string(num, units=None, precision=2):
if units is None:
if num // 10**9 > 0:
return str(round(num / 10.0**9, precision)) + " G"
elif num // 10**6 > 0:
return str(round(num / 10.0**6, precision)) + " M"
elif num // 10**3 > 0:
return str(round(num / 10.0**3, precision)) + " K"
else:
return str(num) + " "
else:
if units == "G":
return str(round(num / 10.0**9, precision)) + " " + units
elif units == "M":
return str(round(num / 10.0**6, precision)) + " " + units
elif units == "K":
return str(round(num / 10.0**3, precision)) + " " + units
else:
return str(num) + " "
def flops_to_string(flops, units=None, precision=2):
if units is None:
if flops // 10**12 > 0:
return str(round(flops / 10.0**12, precision)) + " TFLOPS"
if flops // 10**9 > 0:
return str(round(flops / 10.0**9, precision)) + " GFLOPS"
elif flops // 10**6 > 0:
return str(round(flops / 10.0**6, precision)) + " MFLOPS"
elif flops // 10**3 > 0:
return str(round(flops / 10.0**3, precision)) + " KFLOPS"
else:
return str(flops) + " FLOPS"
else:
if units == "TFLOPS":
return str(round(flops / 10.0**12, precision)) + " " + units
if units == "GFLOPS":
return str(round(flops / 10.0**9, precision)) + " " + units
elif units == "MFLOPS":
return str(round(flops / 10.0**6, precision)) + " " + units
elif units == "KFLOPS":
return str(round(flops / 10.0**3, precision)) + " " + units
else:
return str(flops) + " FLOPS"
def params_to_string(params_num, units=None, precision=2):
if units is None:
if params_num // 10**6 > 0:
return str(round(params_num / 10**6, 2)) + " M"
elif params_num // 10**3:
return str(round(params_num / 10**3, 2)) + " k"
else:
return str(params_num)
else:
if units == "M":
return str(round(params_num / 10.0**6, precision)) + " " + units
elif units == "K":
return str(round(params_num / 10.0**3, precision)) + " " + units
else:
return str(params_num)
def duration_to_string(duration, units=None, precision=2):
if units is None:
if duration > 1:
return str(round(duration, precision)) + " s"
elif duration * 10**3 > 1:
return str(round(duration * 10**3, precision)) + " ms"
elif duration * 10**6 > 1:
return str(round(duration * 10**6, precision)) + " us"
else:
return str(duration)
else:
if units == "us":
return str(round(duration * 10.0**6, precision)) + " " + units
elif units == "ms":
return str(round(duration * 10.0**3, precision)) + " " + units
else:
return str(round(duration, precision)) + " s"
# can not iterate over all submodules using self.model.modules()
# since modules() returns duplicate modules only once
def get_module_flops(module):
sum = module.__flops__
# iterate over immediate children modules
for child in module.children():
sum += get_module_flops(child)
return sum
def get_module_macs(module):
sum = module.__macs__
# iterate over immediate children modules
for child in module.children():
sum += get_module_macs(child)
return sum
def get_module_duration(module):
duration = module.__duration__
if duration == 0: # e.g. ModuleList
for m in module.children():
duration += m.__duration__
return duration
def get_model_profile(model,
input_shape=None,
args=[],
kwargs={},
print_profile=True,
detailed=True,
module_depth=-1,
top_modules=1,
warm_up=1,
as_string=True,
output_file=None,
ignore_modules=None,
mode='forward'):
"""Returns the total floating-point operations, MACs, and parameters of a model.
Example:
.. code-block:: python
model = torchvision.models.alexnet()
batch_size = 256
flops, macs, params = get_model_profile(model=model, input_shape=(batch_size, 3, 224, 224)))
Args:
model ([torch.nn.Module]): the PyTorch model to be profiled.
input_shape (tuple): input shape to the model. If specified, the model takes a tensor with this shape as the only positional argument.
args (list): list of positional arguments to the model.
kwargs (dict): dictionary of keyword arguments to the model.
print_profile (bool, optional): whether to print the model profile. Defaults to True.
detailed (bool, optional): whether to print the detailed model profile. Defaults to True.
module_depth (int, optional): the depth into the nested modules. Defaults to -1 (the inner most modules).
top_modules (int, optional): the number of top modules to print in the aggregated profile. Defaults to 3.
warm_up (int, optional): the number of warm-up steps before measuring the latency of each module. Defaults to 1.
as_string (bool, optional): whether to print the output as string. Defaults to True.
output_file (str, optional): path to the output file. If None, the profiler prints to stdout.
ignore_modules ([type], optional): the list of modules to ignore during profiling. Defaults to None.
Returns:
The number of floating-point operations, multiply-accumulate operations (MACs), and parameters in the model.
"""
assert isinstance(model, nn.Module), "model must be a PyTorch module"
prof = FlopsProfiler(model)
model.eval()
if input_shape is not None:
assert type(input_shape) is tuple, "input_shape must be a tuple"
assert len(input_shape) >= 1, "input_shape must have at least one element"
try:
input = torch.ones(()).new_empty(
(*input_shape, ),
dtype=next(model.parameters()).dtype,
device=next(model.parameters()).device,
)
except StopIteration:
input = torch.ones(()).new_empty((*input_shape, ))
args = [input]
assert (len(args) > 0) or (len(kwargs) > 0), "args and/or kwargs must be specified if input_shape is None"
for _ in range(warm_up):
if kwargs:
if mode == 'forward':
_ = model(*args, **kwargs)
if mode == 'generate':
_ = model.generate(*args, **kwargs)
else:
if mode == 'forward':
_ = model(*args)
if mode == 'generate':
_ = model.generate(*args)
prof.start_profile(ignore_list=ignore_modules)
if kwargs:
if mode == 'forward':
_ = model(*args, **kwargs)
if mode == 'generate':
_ = model.generate(*args, **kwargs)
else:
if mode == 'forward':
_ = model(*args)
if mode == 'generate':
_ = model.generate(*args)
flops = prof.get_total_flops()
macs = prof.get_total_macs()
params = prof.get_total_params()
if print_profile:
prof.print_model_profile(profile_step=warm_up,
module_depth=module_depth,
top_modules=top_modules,
detailed=detailed,
output_file=output_file)
prof.end_profile()
if as_string:
return number_to_string(flops), macs_to_string(macs), params_to_string(params)
return flops, macs, params
| 51,583 | 39.26854 | 650 | py |
DeepSpeed | DeepSpeed-master/deepspeed/compression/basic_layer.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import math
from torch import nn
from torch.nn import init
import deepspeed.comm as dist
from .utils import TopKBinarizer, SymQuantizer, AsymQuantizer, TernaryQuantizer, BinaryQuantizer
from deepspeed.utils import logger
g_mpu = None
class QuantAct(nn.Module):
"""
Class to quantize given activations. Note that when using this function, the input activation quantization range will be fixed for all
tokens/images for inference. This generally will affect some accuracy but achieve better latency performance.
Parameters:
----------
act_range_momentum : float, default 0.95
Momentum for updating the activation quantization range.
quant_mode : str, default 'symmetric'
"""
def __init__(self, act_range_momentum=0.95, quant_mode='symmetric'):
super(QuantAct, self).__init__()
self.act_range_momentum = act_range_momentum
self.quant_mode = quant_mode
if quant_mode == 'symmetric':
self.act_function = SymQuantizer.apply
else:
self.act_function = AsymQuantizer.apply
self.register_buffer('x_min_max', torch.zeros(2))
def forward(self, x, num_bits, *args):
"""
x: the activation that we need to quantize
num_bits: the number of bits we need to quantize the activation to
*args: some extra arguments that are useless but needed for align with the interface of other quantization functions
"""
if self.training:
x_min = x.data.min()
x_max = x.data.max()
# Initialization
if self.x_min_max[0] == self.x_min_max[1]:
self.x_min_max[0] = x_min
self.x_min_max[1] = x_max
# if do not need momentum, please set self.act_range_momentum = 0
self.x_min_max[0] = self.x_min_max[0] * self.act_range_momentum + x_min * (1 - self.act_range_momentum)
self.x_min_max[1] = self.x_min_max[1] * self.act_range_momentum + x_max * (1 - self.act_range_momentum)
x_q = self.act_function(x, num_bits, self.x_min_max[0], self.x_min_max[1])
return x_q
class Embedding_Compress(nn.Embedding):
def __init__(self, *kargs):
super(Embedding_Compress, self).__init__(*kargs)
self.weight.start_bits = None
self.weight.target_bits = None
self.weight.q_period = None
self.weight_quantization_enabled_in_forward = False
self.weight_quantization_enabled = False
def extra_repr(self):
return 'num_embeddings={}, embedding_dim={}, weight_quantization={}'.format(
self.num_embeddings, self.embedding_dim, self.weight.target_bits)
def enable_weight_quantization(self, start_bits, target_bits, quantization_period,
weight_quantization_enabled_in_forward, quantization_type, num_groups):
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = quantization_period
self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward
if self.weight_quantization_enabled_in_forward:
logger.warning(
"************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************"
)
if self.weight.target_bits >= 3:
if quantization_type == 'symmetric':
self.weight_quantizer = SymQuantizer.apply
else:
self.weight_quantizer = AsymQuantizer.apply
elif self.weight.target_bits == 2:
assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for ternary weight quantization'
self.weight_quantizer = TernaryQuantizer.apply
elif self.weight.target_bits == 1:
assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for binary weight quantization'
self.weight_quantizer = BinaryQuantizer.apply
# for embedding, we always use token-wise quantization
self.weight_quantize_num_groups = self.weight.size(0)
def fix_weight_quantization(self):
self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups).data
self.weight_quantization_enabled_in_forward = False
return None
def forward(self, input):
if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled:
weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups)
else:
weight = self.weight
out = nn.functional.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type,
self.scale_grad_by_freq, self.sparse)
return out
class LinearLayer_Compress(nn.Linear):
"""
Linear layer with compression.
"""
def __init__(self, *kargs, bias=True):
super(LinearLayer_Compress, self).__init__(*kargs, bias=bias)
self.sparse_pruning_method = None
self.row_pruning_method = None
self.head_pruning_method = None
self.activation_quantization_method = None
self.weight.start_bits = None
self.weight.target_bits = None
self.weight.q_period = None
self.weight_quantization_enabled_in_forward = False
self.weight_quantization_enabled = False
self.sparse_pruning_enabled = False
self.row_pruning_enabled = False
self.head_pruning_enabled = False
self.activation_quantization_enabled = False
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}, sparse pruning={}, row pruning={}, head pruning={}, activation quantization={}, weight_quantization={}'.format(
self.in_features, self.out_features, self.bias is not None, self.sparse_pruning_method is not None, \
self.row_pruning_method is not None, self.head_pruning_method is not None, self.activation_quantization_method is not None, self.weight.target_bits)
def enable_sparse_pruning(self, ratio, method):
# Here, we support two cases: L1 norm based pruning and topk based pruning
self.sparse_pruning_ratio = ratio
self.sparse_pruning_method = method
if method == 'l1':
weight_norm = torch.abs(self.weight.data)
mask = TopKBinarizer.apply(weight_norm, self.sparse_pruning_ratio, False)
mask = mask.view(self.weight.size())
mask = mask.to(self.weight.device)
elif method == 'topk':
self.sparse_mask_scores = nn.Parameter(torch.Tensor(self.weight.size()))
self.sparse_mask_scores.data = self.sparse_mask_scores.data.to(self.weight.device)
init.kaiming_uniform_(self.sparse_mask_scores, a=math.sqrt(5))
mask = None
else:
raise NotImplementedError
self.register_buffer('sparse_pruning_mask', mask)
def enable_row_pruning(self, ratio, method):
# Here, we support two cases: L1 norm based pruning and topk based pruning
self.row_pruning_ratio = ratio
self.row_pruning_method = method
if method == 'l1':
# compute the l1 norm of each column
weight_norm = torch.norm(self.weight.data, p=1, dim=1)
mask = TopKBinarizer.apply(weight_norm, self.row_pruning_ratio, False)
mask = mask.view(-1, 1)
mask = mask.to(self.weight.device)
elif method == 'topk':
self.row_mask_scores = nn.Parameter(torch.Tensor(self.weight.size(0), 1))
self.row_mask_scores.data = self.row_mask_scores.data.to(self.weight.device)
init.kaiming_uniform_(self.row_mask_scores, a=math.sqrt(5))
mask = None
else:
raise NotImplementedError
self.register_buffer('row_pruning_mask', mask)
def enable_head_pruning(self, ratio, method, num_heads):
# Here, we support only topk based pruning
self.num_heads = num_heads
self.head_pruning_ratio = ratio
self.head_pruning_method = method
if method not in ['topk']:
raise NotImplementedError
else:
self.head_pruning_ratio = ratio
self.head_pruning_scores = nn.Parameter(torch.Tensor(1,
self.num_heads)) # we apply the pruning to O matrix
self.head_pruning_scores.data = self.head_pruning_scores.data.to(self.weight.device)
init.kaiming_uniform_(self.head_pruning_scores, a=math.sqrt(5))
def fix_sparse_pruning_helper(self):
mask = self.get_mask(pruning_type='sparse')
self.weight.data = self.weight.data * mask
del self.sparse_pruning_mask
if self.sparse_pruning_method == 'topk':
del self.sparse_mask_scores
self.sparse_pruning_method = None
self.sparse_pruning_enabled = False
return None
def fix_row_col_pruning_helper(self, mask=None, dim_reduction=False):
# This function is used for row/col pruning
# particularly, if we have two back-to-back layers, F1 and F2; when
# we remove rows from F1, we also need to remove columns from F2
# However, if we only have one layer, F1, then we only need to mask pruned
# rows as 0 in F1
if mask is None:
mask = self.get_mask(pruning_type='row').bool()
if dim_reduction:
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
self.weight = nn.Parameter(self.weight.data[mask.view(-1), :])
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
if self.bias is not None:
self.bias = nn.Parameter(self.bias.data[mask.view(-1)])
self.out_features = self.weight.size(0)
else:
self.weight.data = self.weight.data * mask.view(-1, 1)
if self.bias is not None:
self.bias.data = self.bias.data * mask.view(-1)
del self.row_pruning_mask
if self.row_pruning_method == 'topk':
del self.row_mask_scores
self.row_pruning_method = None
else:
# this is generally for column pruning
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
self.weight = nn.Parameter(self.weight.data[:, mask.view(-1)])
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
self.in_features = self.weight.size(1)
mask = None
self.row_pruning_enabled = False
return mask
def fix_head_pruning_helper(self, mask=None, num_heads=None, dim_reduction=False):
# similar as row/col pruning, head pruning also needs to prune QKV which is associated with O matrix
num_heads = num_heads if num_heads else self.num_heads
if mask is None:
if self.head_pruning_method == 'topk':
mask = self.get_mask(pruning_type='head').bool()
if dim_reduction:
shape = self.weight.size(0)
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
self.weight = nn.Parameter(self.weight.data.t().reshape(num_heads,
-1)[mask.view(-1), :].reshape(-1,
shape).t())
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
else:
shape = self.weight.size()
self.weight.data = (self.weight.data.t().reshape(self.num_heads, -1) * mask.view(-1, 1)).reshape(
shape[1], shape[0]).t()
if self.head_pruning_method == 'topk':
del self.head_pruning_scores
self.head_pruning_method = None
else:
raise NotImplementedError
else:
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
shape = self.weight.size(1)
self.weight = nn.Parameter(self.weight.data.reshape(num_heads, -1)[mask.view(-1), :].reshape(-1, shape))
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
if self.bias is not None:
self.bias = nn.Parameter(self.bias.data.reshape(num_heads, -1)[mask.view(-1), :].reshape(-1))
self.head_pruning_enabled = False
return mask
def get_mask(self, pruning_type='row'):
if pruning_type == 'sparse':
if self.sparse_pruning_method == 'l1':
return self.sparse_pruning_mask.to(self.weight.device)
elif self.sparse_pruning_method == 'topk':
return TopKBinarizer.apply(self.sparse_mask_scores, self.sparse_pruning_ratio, False)
else:
raise NotImplementedError
if pruning_type == 'row':
if self.row_pruning_method == 'l1':
return self.row_pruning_mask.to(self.weight.device)
elif self.row_pruning_method == 'topk':
return TopKBinarizer.apply(self.row_mask_scores, self.row_pruning_ratio, False)
else:
raise NotImplementedError
elif pruning_type == 'head':
if self.head_pruning_method == 'topk':
return TopKBinarizer.apply(self.head_pruning_scores, self.head_pruning_ratio, False)
else:
raise NotImplementedError
else:
raise NotImplementedError
def enable_weight_quantization(self, start_bits, target_bits, quantization_period,
weight_quantization_enabled_in_forward, quantization_type, num_groups):
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = quantization_period
self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward
if self.weight_quantization_enabled_in_forward:
logger.warning(
"************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************"
)
if self.weight.target_bits >= 3:
if quantization_type == 'symmetric':
self.weight_quantizer = SymQuantizer.apply
else:
self.weight_quantizer = AsymQuantizer.apply
elif self.weight.target_bits == 2:
assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for ternary weight quantization'
self.weight_quantizer = TernaryQuantizer.apply
elif self.weight.target_bits == 1:
assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for binary weight quantization'
self.weight_quantizer = BinaryQuantizer.apply
self.weight_quantize_num_groups = num_groups
def fix_weight_quantization(self):
self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups).data
self.weight_quantization_enabled_in_forward = False
return None
def enable_activation_quantization(self, bits, quantization_type, range_calibration):
assert bits in [4, 8], 'Only 4/8 bits activation quantization are supported for now'
self.activation_quantization_bits = bits
self.activation_quantization_method = f"{quantization_type}_{range_calibration}"
if range_calibration == 'static':
self.activation_quantizer = QuantAct(quant_mode=quantization_type)
else:
if quantization_type == 'symmetric':
self.activation_quantizer = SymQuantizer.apply
else:
self.activation_quantizer = AsymQuantizer.apply
def head_pruning_reshape(self, w, mask):
shape = w.shape
return (w.t().reshape(self.num_heads, -1) * mask.view(-1, 1)).reshape(shape[1], shape[0]).t()
def forward(self, input, skip_bias_add=False):
if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled:
weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups)
bias = self.bias
else:
weight = self.weight
bias = self.bias
if self.sparse_pruning_enabled and self.sparse_pruning_method:
mask = self.get_mask(pruning_type='sparse')
weight = weight * mask.view(self.weight.size())
if self.row_pruning_enabled and self.row_pruning_method:
mask = self.get_mask(pruning_type='row')
weight = weight * mask.view(-1, 1)
if bias is not None:
bias = bias * mask.view(-1)
if self.head_pruning_enabled and self.head_pruning_method:
mask = self.get_mask(pruning_type='head')
weight = self.head_pruning_reshape(weight, mask)
if self.activation_quantization_enabled:
if 'dynamic' in self.activation_quantization_method:
num_groups = input.numel() // input.size(-1)
else:
num_groups = 1
input = self.activation_quantizer(input, self.activation_quantization_bits, None, None, num_groups)
if skip_bias_add:
# used for mpu linear layers
output = nn.functional.linear(input, weight, None)
return output, bias
else:
output = nn.functional.linear(input, weight, bias)
return output
class Conv2dLayer_Compress(nn.Conv2d):
"""
Conv2D layer with compression.
"""
def __init__(self, *kargs):
super(Conv2dLayer_Compress, self).__init__(*kargs)
self.sparse_pruning_method = None
self.channel_pruning_method = None
self.activation_quantization_method = None
self.weight.start_bits = None
self.weight.target_bits = None
self.weight.q_period = None
self.weight_quantization_enabled_in_forward = False
self.sparse_pruning_enabled = False
self.channel_pruning_enabled = False
self.activation_quantization_enabled = False
def __repr__(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0, ) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1, ) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0, ) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
if self.padding_mode != 'zeros':
s += ', padding_mode={padding_mode}'
output = s.format(**self.__dict__)
return output + ' sparse pruning={}, channel pruning={}, activation quantization={}, weight_quantization={}'.format(
self.sparse_pruning_method is not None, self.channel_pruning_method is not None,
self.activation_quantization_method is not None, self.weight.target_bits)
def enable_sparse_pruning(self, ratio, method):
self.sparse_pruning_ratio = ratio
self.sparse_pruning_method = method
if method == 'l1':
weight_norm = torch.abs(self.weight.data)
mask = TopKBinarizer.apply(weight_norm, self.sparse_pruning_ratio, False)
mask = mask.view(self.weight.size())
mask = mask.to(self.weight.device)
elif method == 'topk':
self.sparse_mask_scores = nn.Parameter(torch.Tensor(self.weight.size()))
self.sparse_mask_scores.data = self.sparse_mask_scores.data.to(self.weight.device)
init.kaiming_uniform_(self.sparse_mask_scores, a=math.sqrt(5))
mask = None
else:
raise NotImplementedError
self.register_buffer('sparse_pruning_mask', mask)
def enable_channel_pruning(self, ratio, method):
# Here, we support two cases: L1 norm based pruning and topk based pruning
self.channel_pruning_ratio = ratio
self.channel_pruning_method = method
if method == 'l1':
# compute the l1 norm of each conv2d kernel (the last three dimension)
weight_norm = torch.norm(self.weight.data, p=1, dim=[1, 2, 3])
mask = TopKBinarizer.apply(weight_norm, self.channel_pruning_ratio, False)
mask = mask.view(-1, 1, 1, 1)
mask = mask.to(self.weight.device)
elif method == 'topk':
self.channel_mask_scores = nn.Parameter(torch.Tensor(self.weight.size(0), 1, 1, 1))
self.channel_mask_scores.data = self.channel_mask_scores.data.to(self.weight.device)
init.kaiming_uniform_(self.channel_mask_scores, a=math.sqrt(5))
mask = None
else:
raise NotImplementedError
self.register_buffer('channel_pruning_mask', mask)
def fix_sparse_pruning_helper(self):
mask = self.get_mask(pruning_type='sparse')
self.weight.data = self.weight.data * mask
del self.sparse_pruning_mask
if self.sparse_pruning_method == 'topk':
del self.sparse_mask_scores
self.sparse_pruning_method = None
self.sparse_pruning_enabled = False
return None
def fix_channel_pruning_helper(self, mask=None, dim_reduction=False):
if mask is None:
if self.channel_pruning_method in ['l1', 'topk']:
mask = self.get_mask(pruning_type='channel').bool()
if dim_reduction:
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
self.weight = nn.Parameter(self.weight.data[mask.view(-1), ...])
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
if self.bias is not None:
self.bias = nn.Parameter(self.bias.data[mask.view(-1)])
else:
self.weight.data = self.weight.data * mask.view(-1, 1, 1, 1)
if self.bias is not None:
self.bias.data = self.bias.data * mask.view(-1)
del self.channel_pruning_mask
if self.channel_pruning_method == 'topk':
del self.channel_mask_scores
self.channel_pruning_method = None
else:
raise NotImplementedError
else:
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
self.weight = nn.Parameter(self.weight.data[:, mask.view(-1), ...])
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
mask = None
self.channel_pruning_enabled = False
return mask
def get_mask(self, pruning_type='sparse'):
if pruning_type == 'sparse':
if self.sparse_pruning_method == 'l1':
return self.sparse_pruning_mask.to(self.weight.device)
elif self.sparse_pruning_method == 'topk':
return TopKBinarizer.apply(self.sparse_mask_scores, self.sparse_pruning_ratio, False)
else:
raise NotImplementedError
elif pruning_type == 'channel':
if self.channel_pruning_method == 'l1':
return self.channel_pruning_mask.to(self.weight.device)
elif self.channel_pruning_method == 'topk':
return TopKBinarizer.apply(self.channel_mask_scores, self.channel_pruning_ratio, False)
else:
raise NotImplementedError
else:
raise NotImplementedError
def fix_weight_quantization(self):
self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups).data
self.weight_quantization_enabled_in_forward = False
return None
def enable_weight_quantization(self, start_bits, target_bits, quantization_period,
weight_quantization_enabled_in_forward, quantization_type, num_groups):
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = quantization_period
self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward
if self.weight_quantization_enabled_in_forward:
assert self.weight.target_bits >= 4, 'Only >=4 bits weight quantization are supported during forward pass for now'
logger.warning(
"************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************"
)
if quantization_type == 'symmetric':
self.weight_quantizer = SymQuantizer.apply
else:
self.weight_quantizer = AsymQuantizer.apply
self.weight_quantize_num_groups = num_groups
def enable_activation_quantization(self, bits, quantization_type, range_calibration):
assert bits in [4, 8], 'Only 4/8 bits activation quantization are supported for now'
self.activation_quantization_bits = bits
self.activation_quantization_method = f"{quantization_type}_{range_calibration}"
if range_calibration == 'static':
self.activation_quantizer = QuantAct(quant_mode=quantization_type)
else:
if quantization_type == 'symmetric':
self.activation_quantizer = SymQuantizer.apply
else:
self.activation_quantizer = AsymQuantizer.apply
def forward(self, input):
if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled:
weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups)
bias = self.bias
else:
weight = self.weight
bias = self.bias
if self.sparse_pruning_enabled and self.sparse_pruning_method:
mask = self.get_mask(pruning_type='sparse')
weight = weight * mask.view(self.weight.size())
if self.channel_pruning_enabled:
mask = self.get_mask(pruning_type='channel')
weight = weight * mask.view(-1, 1, 1, 1)
if bias is not None:
bias = bias * mask.view(-1)
if self.activation_quantization_enabled:
if 'dynamic' in self.activation_quantization_method:
num_groups = input.numel() // input[0].numel()
else:
num_groups = 1
input = self.activation_quantizer(input, self.activation_quantization_bits, None, None, num_groups)
return nn.functional.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
class BNLayer_Compress(nn.BatchNorm2d):
def fix_channel_pruning_helper(self, mask, dim_reduction=True):
self.weight = nn.Parameter(self.weight.data[mask.view(-1)])
self.bias = nn.Parameter(self.bias.data[mask.view(-1)])
self.running_mean = self.running_mean[mask.view(-1)]
self.running_var = self.running_var[mask.view(-1)]
def _reduce(input_):
"""All-reduce the the input tensor across model parallel group."""
group = g_mpu.get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if dist.get_world_size(group=group) == 1:
return input_
# All-reduce.
dist.all_reduce(input_, group=group)
return input_
def split_tensor_along_last_dim(tensor, num_partitions, contiguous_split_chunks=False):
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
assert tensor.size()[last_dim] % num_partitions == 0
last_dim_size = tensor.size()[last_dim] // num_partitions
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
def _split(input_):
"""Split the tensor along its last dimension and keep the
corresponding slice."""
group = g_mpu.get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if dist.get_world_size(group=group) == 1:
return input_
# Split along last dimension.
world_size = dist.get_world_size(group=group)
input_list = split_tensor_along_last_dim(input_, world_size)
# Note: torch.split does not create contiguous tensors by default.
rank = dist.get_rank(group=group)
output = input_list[rank].contiguous()
return output
def _gather(input_):
"""Gather tensors and concatenate along the last dimension."""
group = g_mpu.get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if dist.get_world_size(group=group) == 1:
return input_
# Size and dimension.
last_dim = input_.dim() - 1
rank = dist.get_rank(group=group)
world_size = dist.get_world_size(group=group)
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
tensor_list[rank] = input_
dist.all_gather(tensor_list, input_, group=group)
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=last_dim).contiguous()
return output
class _CopyToModelParallelRegion(torch.autograd.Function):
"""Pass the input to the model parallel region."""
@staticmethod
def forward(ctx, input_):
return input_
@staticmethod
def backward(ctx, grad_output):
return _reduce(grad_output)
class _ReduceFromModelParallelRegion(torch.autograd.Function):
"""All-reduce the input from the model parallel region."""
@staticmethod
def forward(ctx, input_):
return _reduce(input_)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class _ScatterToModelParallelRegion(torch.autograd.Function):
"""Split the input and keep only the corresponding chuck to the rank."""
@staticmethod
def forward(ctx, input_):
return _split(input_)
@staticmethod
def backward(ctx, grad_output):
return _gather(grad_output)
class _GatherFromModelParallelRegion(torch.autograd.Function):
"""Gather the input from model parallel region and concatenate."""
@staticmethod
def forward(ctx, input_):
return _gather(input_)
@staticmethod
def backward(ctx, grad_output):
return _split(grad_output)
# -----------------
# Helper functions.
# -----------------
def copy_to_model_parallel_region(input_):
return _CopyToModelParallelRegion.apply(input_)
def reduce_from_model_parallel_region(input_):
return _ReduceFromModelParallelRegion.apply(input_)
def scatter_to_model_parallel_region(input_):
return _ScatterToModelParallelRegion.apply(input_)
def gather_from_model_parallel_region(input_):
return _GatherFromModelParallelRegion.apply(input_)
class ColumnParallelLinear_Compress(LinearLayer_Compress):
def __init__(self, mpu, input_size, output_size, bias=True, gather_output=True, skip_bias_add=False):
# Keep input parameters
global g_mpu
g_mpu = mpu
self.input_size = input_size
self.output_size = output_size
self.gather_output = gather_output
self.skip_bias_add = skip_bias_add
# Divide the weight matrix along the last dimension.
world_size = mpu.get_model_parallel_world_size()
assert output_size % world_size == 0
self.output_size_per_partition = output_size // world_size
super(ColumnParallelLinear_Compress, self).__init__(self.input_size, self.output_size_per_partition, bias=bias)
def forward(self, input_):
# Set up backprop all-reduce.
input_parallel = copy_to_model_parallel_region(input_)
# Matrix multiply.
if self.skip_bias_add:
output_parallel, bias = super().forward(input_parallel, True)
else:
output_parallel = super().forward(input_parallel)
bias = None
if self.gather_output:
# All-gather across the partitions.
output = gather_from_model_parallel_region(output_parallel)
else:
output = output_parallel
return output, bias
class RowParallelLinear_Compress(LinearLayer_Compress):
def __init__(self, mpu, input_size, output_size, bias=True, input_is_parallel=False, skip_bias_add=False):
# Keep input parameters
global g_mpu
g_mpu = mpu
self.input_size = input_size
self.output_size = output_size
self.input_is_parallel = input_is_parallel
self.skip_bias_add = skip_bias_add
# Divide the weight matrix along the last dimension.
world_size = mpu.get_model_parallel_world_size()
assert input_size % world_size == 0
self.input_size_per_partition = input_size // world_size
super(RowParallelLinear_Compress, self).__init__(self.input_size_per_partition, self.output_size, bias=bias)
def forward(self, input_):
# Set up backprop all-reduce.
if self.input_is_parallel:
input_parallel = input_
else:
input_parallel = scatter_to_model_parallel_region(input_)
# Matrix multiply.
output_parallel, bias = super().forward(input_parallel, True)
# All-reduce across all the partitions.
output_ = reduce_from_model_parallel_region(output_parallel)
if not self.skip_bias_add:
if bias is not None:
output = output_ + bias
else:
output = output_
output_bias = None
else:
output = output_
output_bias = bias
return output, output_bias
| 36,033 | 41.846611 | 169 | py |
DeepSpeed | DeepSpeed-master/deepspeed/compression/compress.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import re
from .helper import compression_preparation, fix_compression, recursive_getattr, is_module_compressible
from .config import get_compression_config
from ..runtime.config_utils import dict_raise_error_on_duplicate_keys
from .constants import *
import os
import json
try:
import neural_compressor as nc
except ImportError as e:
nc = None
def check_deepspeed_config(config):
if isinstance(config, dict):
return config
elif os.path.exists(config):
return json.load(open(config, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
else:
raise ValueError(
f"Expected a string path to an existing deepspeed config, or a dictionary. Received: {config}")
def get_module_name(group_name, model, key_word, exist_module_name, mpu=None, verbose=True):
'''
get the associated module name from the model based on the key_word provided by users
'''
return_module_name = []
for name, module in model.named_modules():
module_check = is_module_compressible(module, mpu)
if re.search(key_word, name) is not None and module_check:
if name in exist_module_name and verbose:
# logger.warning
raise ValueError(
f"{name} is already added to compression, please check your config file for {group_name}.")
if name not in exist_module_name:
exist_module_name.add(name)
return_module_name.append(name)
return return_module_name, exist_module_name
def get_compress_methods(model, compress_methods, mpu=None):
# extract the compression module for each method in compress_methods
layer_added_compress_methods = []
for method, method_content in compress_methods.items():
if LAYER_REDUCTION in method:
continue
# for loop different methods, i.e., weight quantization, activation quantization etc
exist_module_name = set()
shared_parameters = method_content[SHARED_PARAMETERS] # get all the shared parameters
for group_name, method_parameters in method_content[DIFFERENT_GROUPS].items():
# for loop different groups, i.e., weight quantization group 1, weight quantization group 2 etc
module_name_list = []
related_module_name_list = []
if method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]:
# this is used for head/row/channel pruning, if users provide the related module scope, we can shrink the layer dim for them
# otherwise we just mask those as zeros
for key_word, related_key_words in zip(method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE],
method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]):
module_name, exist_module_name = get_module_name(group_name,
model,
key_word,
exist_module_name,
mpu=mpu)
module_name_list.append(module_name)
tmp_related_module_name_list = []
for rkw in related_key_words:
# related key word can be a list, for instance the QKV for O matrix in Attention
module_name, _ = get_module_name(group_name, model, rkw, set(), mpu=mpu)
tmp_related_module_name_list.append(module_name)
related_module_name_list.append(tmp_related_module_name_list)
else:
for key_word in method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE]:
module_name, exist_module_name = get_module_name(group_name,
model,
key_word,
exist_module_name,
mpu=mpu)
module_name_list.append(module_name)
if module_name_list:
# combine shared parameters with each group
combined_method_parameters = {
**(method_parameters.copy().pop(DIFFERENT_GROUPS_PARAMETERS)),
**shared_parameters
}
compression_item = [module_name_list, related_module_name_list, {method: combined_method_parameters}]
layer_added_compress_methods.append(compression_item)
return layer_added_compress_methods
def init_compression(model, deepspeed_config, teacher_model=None, mpu=None):
"""
Compress a model: replace linear/conv2d layer with deepspeed compression-aware modules
Args:
model (`torch.nn.Module`)
The model to compress.
deepspeed_config (`DeepSpeedConfig`)
The path of ds_config
mpu
The mpu module for Row/Column parallelism
"""
compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config))
if hasattr(model, 'module'):
c_model = model.module
else:
c_model = model
# For layer reduction
if compress_methods[LAYER_REDUCTION][LAYER_REDUCTION_ENABLED]:
assert teacher_model is not None, "Teacher model is required for layer reduction"
student_initialization(c_model, teacher_model, deepspeed_config)
layer_added_compress_methods = get_compress_methods(c_model, compress_methods, mpu=mpu)
compression_preparation(c_model, layer_added_compress_methods, mpu)
# For sparse pruning snip_momentum method
shared_parameters = compress_methods[SPARSE_PRUNING][SHARED_PARAMETERS]
if shared_parameters[SPARSE_PRUNING_ENABLED] and \
shared_parameters[SPARSE_PRUNING_METHOD] == SPARSE_PRUNING_METHOD_SNIP_MOMENTUM:
assert nc is not None, "please ensure the neural_compressor python package is installed by pip or conda if user wants to use snip_momentum sparse pruning"
from .helper import generate_pruners, register_on_step_begin
from nc import WeightPruningConfig
config = WeightPruningConfig(target_sparsity=1 - shared_parameters[SPARSE_PRUNING_DENSE_RATIO],
pattern=shared_parameters[SPARSE_PRUNING_BLOCK_PATTERN],
pruning_frequency=shared_parameters[SPARSE_PRUNING_SCHEDULE_OFFSET_STRIDE],
start_step=shared_parameters[SPARSE_PRUNING_SCHEDULE_OFFSET],
end_step=shared_parameters[SPARSE_PRUNING_SCHEDULE_OFFSET_END],
excluded_op_names=shared_parameters[SPARSE_PRUNING_EXCLUDED_MODULES])
pruners = generate_pruners(config, c_model)
c_model.pruners = pruners
register_on_step_begin(c_model)
return model
def redundancy_clean(model, deepspeed_config, mpu=None):
"""
Remove the redundancy of a model
Args:
model (`torch.nn.Module`)
The model to compress.
deepspeed_config (`DeepSpeedConfig`)
The path of ds_config
mpu
The mpu module for Row/Column parallelism
"""
compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config))
if hasattr(model, 'module'):
c_model = model.module
else:
c_model = model
layer_added_compress_methods_tmp = get_compress_methods(c_model, compress_methods, mpu=mpu)
# sort methods
order_list = [
WEIGHT_QUANTIZATION, SPARSE_PRUNING, ROW_PRUNING, HEAD_PRUNING, CHANNEL_PRUNING, ACTIVATION_QUANTIZATION
]
layer_added_compress_methods = sorted(layer_added_compress_methods_tmp,
key=lambda x: order_list.index(list(x[2].keys())[0]))
for module_name_lists, related_module_name_lists, compression_technique in layer_added_compress_methods:
stored_mask = []
need_mask = True if related_module_name_lists else False
for i, mnl in enumerate(module_name_lists):
for module_name in mnl:
mask = fix_compression(c_model, module_name, compression_technique, dim_reduction=need_mask)
if need_mask:
stored_mask.append(mask)
if need_mask:
for rmnl in related_module_name_lists[i]:
for j, module_name in enumerate(rmnl):
mask = fix_compression(c_model,
module_name,
compression_technique,
mask=stored_mask[j],
dim_reduction=True)
return model
def student_initialization(student_model, teacher_model, deepspeed_config):
'''
Given a student model and a teacher model, select the
Args:
student_model (`torch.nn.Module`)
The model we will update weight
teacher_model (`torch.nn.Module`)
The model guide the student to learn
deepspeed_config (`DeepSpeedConfig`)
The path of ds_config
'''
config = get_compression_config(check_deepspeed_config(deepspeed_config))
compress_methods = config[LAYER_REDUCTION]
module_name_prefix = compress_methods[MODULE_NAME_PREFIX]
teacher_layer = compress_methods[TEACHER_LAYER]
student_layer = [i for i in range(len(teacher_layer))]
other_module_name = compress_methods[OTHER_MODULE_NAME]
'''
name_prefix (`str`)
The prefix name before the layer #.
Example 1: bert.encoder.layer, for BERT_base model's prefix name
Example 2: transformer.h, for GPT-2 hugging face prefix name
teacher_layer (`list of integers`)
The layer of teacher will be used for student's reinitialization
Example 1: [1,3,5,7,9], means we want to matches the 2nd/4th/6th/8th/10th layer of teacher to the first 5 layers of student
student_layer (`list` or None)
The layer of student need to be re-initialized
Example 1: None, means we want to reinitialize all the layers
Example 1: [0,1,2,3,4], means we want to reinitialize the first 5 layers
other_module_name (`list of string`)
The modules will be used for student's reinitialization
Example 1: ['bert.pooler', 'bert.embeddings', 'classifier'], means we want to apply the weight in teacher's embedding/pooler/classier module to the student
Example 2: ['transformer.w', 'transformer.ln_f', 'lm_head'], means we want to apply the weight in teacher's embedding layers module to the student
Note that teacher_layer should matches student layer
'''
assert len(student_layer) == len(teacher_layer)
for s_name, t_name in zip(student_layer, teacher_layer):
s_module = recursive_getattr(student_model, module_name_prefix + '.' + str(s_name))
t_module = recursive_getattr(teacher_model, module_name_prefix + '.' + str(t_name))
for s_param, t_param in zip(s_module.parameters(), t_module.parameters()):
s_param.data.copy_(t_param.data)
for name in other_module_name:
s_module = recursive_getattr(student_model, name)
t_module = recursive_getattr(teacher_model, name)
print(name)
for s_param, t_param in zip(s_module.parameters(), t_module.parameters()):
s_param.data.copy_(t_param.data)
| 11,886 | 48.529167 | 167 | py |
DeepSpeed | DeepSpeed-master/deepspeed/compression/utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from torch import autograd
import math
class TopKBinarizer(autograd.Function):
"""
Top-k Binarizer.
Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}`
is among the k% highest values of S.
Implementation is inspired from:
https://github.com/yaozhewei/MLPruning
"""
@staticmethod
def forward(ctx, inputs: torch.tensor, threshold: float, sigmoid: bool):
"""
Args:
inputs (`torch.FloatTensor`)
The input matrix from which the binarizer computes the binary mask.
threshold (`float`)
The percentage of weights to keep (the rest is pruned).
`threshold` is a float between 0 and 1.
sigmoid (`bool`)
Whether to apply a sigmoid on the threshold
Returns:
mask (`torch.FloatTensor`)
Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is
retained, 0 - the associated weight is pruned).
"""
# Get the subnetwork by sorting the inputs and using the top threshold
if sigmoid:
threshold = torch.sigmoid(threshold).item()
ctx.sigmoid = sigmoid
mask = inputs.clone()
_, idx = inputs.flatten().sort(descending=True)
j = math.ceil(threshold * inputs.numel())
# flat_out and mask access the same memory.
flat_out = mask.flatten()
flat_out[idx[j:]] = 0.
flat_out[idx[:j]] = 1.
ctx.save_for_backward(mask)
return mask
@staticmethod
def backward(ctx, gradOutput):
mask, = ctx.saved_tensors
if ctx.sigmoid:
return gradOutput.clone(), ((gradOutput * mask).sum()).view(-1), None
else:
return gradOutput.clone(), None, None
class SymQuantizer(torch.autograd.Function):
"""
Symmetric quantization
"""
@staticmethod
def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
"""
Args:
inputs (`torch.FloatTensor`)
The input which needs to be quantized
num_bits (int, >=4)
Number of bits to use for quantization
min_value/max_value (torch.FloatTensor)
Used for static activation quantization
num_groups (int)
How many groups to partition the quantization into
Returns:
quantized_input (`torch.FloatTensor`)
Quantized input
"""
assert (min_value is None and max_value is None) or (min_value is not None and max_value is not None
and num_groups == 1)
q_range = 2**num_bits
input_shape = input.shape
if min_value is None:
input = input.reshape(num_groups, -1)
max_input = torch.amax(torch.abs(input), dim=-1).view(num_groups, -1)
else:
max_input = torch.max(min_value.abs(), max_value).view(-1)
scale = 2 * max_input / q_range
output = (input / scale).round().clamp(-q_range // 2, q_range // 2 - 1) * scale
output = output.reshape(input_shape).contiguous()
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input, None, None, None, None
class AsymQuantizer(torch.autograd.Function):
"""
Asymmetric quantization
"""
@staticmethod
def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
"""
Args:
inputs (`torch.FloatTensor`)
The input which needs to be quantized
num_bits (int, >=4)
Number of bits to use for quantization
min_value/max_value (torch.FloatTensor)
Used for static activation quantization
num_groups (int)
How many groups to partition the quantization into
Returns:
quantized_input (`torch.FloatTensor`)
Quantized input
"""
assert (min_value is None and max_value is None) or (min_value is not None and max_value is not None
and num_groups == 1)
q_range = 2**num_bits
input_shape = input.shape
if min_value is None:
input = input.reshape(num_groups, -1)
min_value = input.amin(dim=-1, keepdim=True)
max_value = input.amax(dim=-1, keepdim=True)
scale = (max_value - min_value) / q_range
zero_point = (min_value / scale).round() * scale
output = ((input - zero_point) / scale).round().clamp(0, q_range - 1) * scale + zero_point
output = output.reshape(input_shape).contiguous()
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input, None, None, None, None
class TernaryQuantizer(torch.autograd.Function):
"""
Ternary quantization
"""
@staticmethod
def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
"""
Args:
inputs (`torch.FloatTensor`)
The input which needs to be quantized
num_bits (int)
Dummy variable
min_value/max_value (torch.FloatTensor)
Used for static activation quantization; for now they are dummy variable
num_groups (int)
How many groups to partition the quantization into
Returns:
quantized_input (`torch.FloatTensor`)
Quantized input
"""
assert (min_value is None and max_value is None)
input_flat = input.reshape(num_groups, -1)
n = input_flat.shape[1]
m = input_flat.norm(p=1, dim=1).div(n)
thres = (0.7 * m).view(-1, 1)
pos = (input_flat > thres).type(input.type())
neg = (input_flat < -thres).type(input.type())
mask = (input_flat.abs() > thres).type(input.type())
alpha = ((mask * input_flat).abs().sum(dim=1) / mask.sum(dim=1)).view(-1, 1)
output = alpha * pos - alpha * neg
output = output.reshape(input.shape).contiguous()
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input, None, None, None, None
class BinaryQuantizer(torch.autograd.Function):
"""
Binary quantization
"""
@staticmethod
def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
"""
Args:
inputs (`torch.FloatTensor`)
The input which needs to be quantized
num_bits (int)
Dummy variable
min_value/max_value (torch.FloatTensor)
Used for static activation quantization; for now they are dummy variable
num_groups (int)
How many groups to partition the quantization into
Returns:
quantized_input (`torch.FloatTensor`)
Quantized input
"""
assert (min_value is None and max_value is None)
input_flat = input.reshape(num_groups, -1)
n = input_flat.shape[1]
m = input_flat.norm(p=1, dim=1, keepdim=True).div(n)
output = input_flat.sign().mul(m)
output = output.reshape(input.shape).contiguous()
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input, None, None, None, None
| 7,818 | 34.06278 | 108 | py |
DeepSpeed | DeepSpeed-master/deepspeed/compression/helper.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from .basic_layer import Embedding_Compress, LinearLayer_Compress, Conv2dLayer_Compress, BNLayer_Compress, ColumnParallelLinear_Compress, RowParallelLinear_Compress
from .constants import *
from deepspeed.utils import logger
try:
from neural_compressor.compression import pruner as nc_pruner
except ImportError as e:
nc_pruner = None
def recursive_getattr(model, module_name):
"""
Recursively get the attribute of a module.
Args:
model (`torch.nn.Module`)
The model to get the attribute from.
module_name (`str`)
The name of the module to get the attribute from.
"""
split_list = module_name.split('.')
output = model
for name in split_list:
output = getattr(output, name)
return output
def recursive_setattr(model, module_name, module):
"""
Recursively set the attribute of a module.
Args:
model (`torch.nn.Module`)
The model to set the attribute in.
module_name (`str`)
The name of the module to set the attribute in.
module (`torch.nn.Module`)
The module to set the attribute to.
"""
split_list = module_name.split('.')
output = model
for name in split_list[:-1]:
output = getattr(output, name)
output.__setattr__(split_list[-1], module)
def module_replacement(model, module_name, compression_technique=None, mpu=None):
"""
Replace a module with a new module.
Args:
model (`torch.nn.Module`)
The model to replace the module in.
module_name (`str`)
The name of the module to replace.
compression_technique (`str`)
The compression technique to use for the new module.
"""
# Get the old module
old_module = recursive_getattr(model, module_name)
need_bias = False
if hasattr(old_module, 'bias') and old_module.bias is not None:
need_bias = True
# Initialize the new module
if isinstance(old_module, LinearLayer_Compress) or isinstance(old_module, torch.nn.Linear):
if isinstance(old_module, LinearLayer_Compress):
new_module = old_module
else:
new_module = LinearLayer_Compress(old_module.in_features, old_module.out_features,
bias=need_bias).to(device=old_module.weight.device,
dtype=old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
if need_bias:
new_module.bias.data = old_module.bias.data
elif isinstance(old_module, Conv2dLayer_Compress) or isinstance(old_module, torch.nn.Conv2d):
if isinstance(old_module, Conv2dLayer_Compress):
new_module = old_module
else:
new_module = Conv2dLayer_Compress(old_module.in_channels, old_module.out_channels, old_module.kernel_size, old_module.stride, old_module.padding, \
old_module.dilation, old_module.groups, need_bias, \
old_module.padding_mode).to(device=old_module.weight.device, dtype=old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
if need_bias:
new_module.bias.data = old_module.bias.data
elif isinstance(old_module, torch.nn.BatchNorm2d):
new_module = BNLayer_Compress(old_module.num_features, old_module.eps, old_module.momentum, old_module.affine,
old_module.track_running_stats).to(old_module.weight.device,
old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
if need_bias:
new_module.bias.data = old_module.bias.data
new_module.running_mean.data = old_module.running_mean.data
new_module.running_var.data = old_module.running_var.data
elif isinstance(old_module, Embedding_Compress) or isinstance(old_module, torch.nn.Embedding):
if isinstance(old_module, Embedding_Compress):
new_module = old_module
else:
new_module = Embedding_Compress(old_module.num_embeddings, old_module.embedding_dim, old_module.padding_idx, old_module.max_norm, old_module.norm_type, \
old_module.scale_grad_by_freq, old_module.sparse).to(device=old_module.weight.device, dtype=old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
elif mpu is not None and (isinstance(old_module, ColumnParallelLinear_Compress)
or isinstance(old_module, mpu.ColumnParallelLinear)):
if isinstance(old_module, ColumnParallelLinear_Compress):
new_module = old_module
else:
new_module = ColumnParallelLinear_Compress(mpu,
old_module.input_size,
old_module.output_size,
gather_output=old_module.gather_output,
skip_bias_add=old_module.skip_bias_add,
bias=need_bias).to(device=old_module.weight.device,
dtype=old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
if need_bias:
new_module.bias.data = old_module.bias.data
elif mpu is not None and (isinstance(old_module, RowParallelLinear_Compress)
or isinstance(old_module, mpu.RowParallelLinear)):
if isinstance(old_module, RowParallelLinear_Compress):
new_module = old_module
else:
new_module = RowParallelLinear_Compress(mpu,
old_module.input_size,
old_module.output_size,
input_is_parallel=old_module.input_is_parallel,
skip_bias_add=old_module.skip_bias_add,
bias=need_bias).to(device=old_module.weight.device,
dtype=old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
if need_bias:
new_module.bias.data = old_module.bias.data
else:
new_module = None
if compression_technique is not None:
for k, v in compression_technique.items():
if k == SPARSE_PRUNING:
if v[SPARSE_PRUNING_ENABLED]:
new_module.enable_sparse_pruning(v[SPARSE_PRUNING_DENSE_RATIO], v[SPARSE_PRUNING_METHOD])
elif k == ROW_PRUNING:
if v[ROW_PRUNING_ENABLED]:
new_module.enable_row_pruning(v[ROW_PRUNING_DENSE_RATIO], v[ROW_PRUNING_METHOD])
elif k == HEAD_PRUNING:
if v[HEAD_PRUNING_ENABLED]:
new_module.enable_head_pruning(v[HEAD_PRUNING_DENSE_RATIO], v[HEAD_PRUNING_METHOD],
v[HEAD_PRUNING_NUM_HEADS])
elif k == ACTIVATION_QUANTIZATION:
if v[ACTIVATION_QUANTIZATION_ENABLED]:
new_module.enable_activation_quantization(v[ACTIVATION_QUANTIZE_BITS], v[ACTIVATION_QUANTIZE_TYPE],
v[ACTIVATION_QUANTIZE_RANGE])
elif k == WEIGHT_QUANTIZATION:
if v[WEIGHT_QUANTIZE_ENABLED]:
new_module.enable_weight_quantization(v[WEIGHT_QUANTIZE_START_BITS],
v[WEIGHT_QUANTIZE_TARGET_BITS],
v[WEIGHT_QUANTIZATION_PERIOD],
v[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED],
v[WEIGHT_QUANTIZE_TYPE], v[WEIGHT_QUANTIZE_GROUPS])
elif k == CHANNEL_PRUNING:
if v[CHANNEL_PRUNING_ENABLED]:
new_module.enable_channel_pruning(v[CHANNEL_PRUNING_DENSE_RATIO], v[CHANNEL_PRUNING_METHOD])
else:
raise NotImplementedError('Compression technique {} is not implemented'.format(k))
# Replace the old module with the new one
recursive_setattr(model, module_name, new_module)
def is_module_compressible(module, mpu=None):
ret = isinstance(module, torch.nn.Linear) or \
isinstance(module, torch.nn.Conv2d) or \
isinstance(module, torch.nn.Embedding) or \
isinstance(module, torch.nn.BatchNorm2d)
if mpu is not None:
ret = ret or isinstance(module, mpu.RowParallelLinear) or isinstance(module, mpu.ColumnParallelLinear)
return ret
def compression_preparation(model, compression_technique_list, mpu):
"""
Prepare the compression techniques of a model.
Args:
model (`torch.nn.Module`)
The model to prepare the compression techniques of.
compression_technique_list (`list`)
The list of compression techniques to prepare the model to.
list[]
"""
# Here we first replace all module with our linear wrapper
for module_name, module in model.named_modules():
if is_module_compressible(module, mpu):
module_replacement(model, module_name, mpu=mpu)
for module_name_lists, _, compression_technique in compression_technique_list:
for mnl in module_name_lists:
for module_name in mnl:
module_replacement(model, module_name, compression_technique)
return model
def fix_compression(model, module_name, compression_technique, mask=None, dim_reduction=False):
"""
Fix the compression technique of a module.
Args:
model (`torch.nn.Module`)
The model to fix the compression technique of.
module_name (`str`)
The name of the module to fix the compression technique of.
compression_technique (`str`)
The compression technique to fix the module to.
"""
# Here we can make things much simpler by just replacing the module
module = recursive_getattr(model, module_name)
for k, v in compression_technique.items():
if k == WEIGHT_QUANTIZATION and v[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED] and v[WEIGHT_QUANTIZE_ENABLED]:
return module.fix_weight_quantization()
elif k == SPARSE_PRUNING and v[SPARSE_PRUNING_ENABLED]:
return module.fix_sparse_pruning_helper()
elif k == ROW_PRUNING and (v[ROW_PRUNING_ENABLED] or mask is not None):
return module.fix_row_col_pruning_helper(mask, dim_reduction=dim_reduction)
elif k == HEAD_PRUNING and (v[HEAD_PRUNING_ENABLED] or mask is not None):
return module.fix_head_pruning_helper(mask, v[HEAD_PRUNING_NUM_HEADS], dim_reduction=dim_reduction)
elif k == CHANNEL_PRUNING and (v[CHANNEL_PRUNING_ENABLED] or mask is not None):
return module.fix_channel_pruning_helper(mask, dim_reduction=dim_reduction)
def convert_conv1d_to_linear(model, convert_type):
'''
This is a help function to convert conv1d to linear (e.g., convert GPT2 from HF)
'''
if hasattr(model, 'module'):
c_model = model.module
else:
c_model = model
for name, module in c_model.named_modules():
if isinstance(module, convert_type):
old_module = recursive_getattr(c_model, name)
new_module = torch.nn.Linear(old_module.weight.data.size(0),
old_module.weight.data.size(1),
bias=True if old_module.bias is not None else False)
new_module.weight.data = old_module.weight.data.t().contiguous()
if new_module.bias is not None:
new_module.bias.data = old_module.bias.data.view(-1)
recursive_setattr(c_model, name, new_module)
return model
def generate_pruners(config, model):
"""Generate pruners.
Args:
config (`neural_compressor.WeightPruningConfig`)
The object to the class WeightPruningConfig.
model (`torch.nn.module`)
The torch module object to be pruned.
"""
assert nc_pruner is not None, "please ensure the neural_compressor python package is installed by pip or conda if user wants to use snip_momentum sparse pruning"
from nc_pruner.utils import process_config, parse_to_prune
from nc_pruner.pruners import get_pruner
assert isinstance(model, torch.nn.Module)
pruners_info = process_config(config)
pruners = []
for info in pruners_info:
modules = parse_to_prune(info, model)
if modules == {}:
logger.warning("one pruner hooks no layers, please have a check")
pruners.append(get_pruner(info, modules))
info['modules'] = [key for key in modules.keys()]
info['len_of_modules'] = len(info['modules'])
logger.info(info)
return pruners
def register_on_step_begin(model):
"""Mount on_step_begin to the model.
Args:
model (`torch.nn.module`)
The torch module object to be pruned.
"""
def hook(module, input):
for pruner in module.pruners:
pruner.on_step_begin(0)
hook_handle = model.register_forward_pre_hook(hook)
return hook_handle
def rewrite_optimizer_step(opt: torch.optim.Optimizer):
"""Mount on_before/after_optimizer_step to the optimizer.
Args:
model (`torch.opt.Optimizer`)
The torch optimizer object to be hooked.
"""
def new_step(self, closure=None):
if hasattr(self, "pruners"):
for pruner in self.pruners:
pruner.on_before_optimizer_step()
if closure is not None:
res = self.orig_step(closure)
else:
res = self.orig_step()
if hasattr(self, "pruners"):
for pruner in self.pruners:
pruner.on_after_optimizer_step()
return res
opt.orig_step = opt.step
import types
opt.step = types.MethodType(new_step, opt)
return opt
| 14,637 | 44.318885 | 165 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/lr_schedules.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Implementation of learning rate schedules.
Taken and modified from PyTorch v1.0.1 source
https://github.com/pytorch/pytorch/blob/v1.1.0/torch/optim/lr_scheduler.py
"""
import argparse
from torch.optim import Optimizer
import math
from deepspeed.utils import logger
LR_SCHEDULE = 'lr_schedule'
LR_RANGE_TEST = 'LRRangeTest'
ONE_CYCLE = 'OneCycle'
WARMUP_LR = 'WarmupLR'
WARMUP_DECAY_LR = 'WarmupDecayLR'
VALID_LR_SCHEDULES = [LR_RANGE_TEST, ONE_CYCLE, WARMUP_LR, WARMUP_DECAY_LR]
LR_RANGE_TEST_MIN_LR = 'lr_range_test_min_lr'
LR_RANGE_TEST_STEP_RATE = 'lr_range_test_step_rate'
LR_RANGE_TEST_STEP_SIZE = 'lr_range_test_step_size'
LR_RANGE_TEST_STAIRCASE = 'lr_range_test_staircase'
EDGE_VALUE = 'edge_value'
MID_VALUE = 'mid_value'
CYCLE_FIRST_STEP_SIZE = 'cycle_first_step_size'
CYCLE_FIRST_STAIR_COUNT = 'cycle_first_stair_count'
CYCLE_SECOND_STEP_SIZE = 'cycle_second_step_size'
CYCLE_SECOND_STAIR_COUNT = 'cycle_second_stair_count'
DECAY_STEP_SIZE = 'decay_step_size'
CYCLE_MIN_LR = 'cycle_min_lr'
CYCLE_MAX_LR = 'cycle_max_lr'
DECAY_LR_RATE = 'decay_lr_rate'
CYCLE_MIN_MOM = 'cycle_min_mom'
CYCLE_MAX_MOM = 'cycle_max_mom'
DECAY_MOM_RATE = 'decay_mom_rate'
WARMUP_MIN_LR = 'warmup_min_lr'
WARMUP_MAX_LR = 'warmup_max_lr'
WARMUP_NUM_STEPS = 'warmup_num_steps'
WARMUP_TYPE = 'warmup_type'
WARMUP_LOG_RATE = 'log'
WARMUP_LINEAR_RATE = 'linear'
TOTAL_NUM_STEPS = 'total_num_steps'
def add_tuning_arguments(parser):
group = parser.add_argument_group('Convergence Tuning', 'Convergence tuning configurations')
# LR scheduler
group.add_argument('--lr_schedule', type=str, default=None, help='LR schedule for training.')
# Learning rate range test
group.add_argument("--lr_range_test_min_lr", type=float, default=0.001, help='Starting lr value.')
group.add_argument("--lr_range_test_step_rate", type=float, default=1.0, help='scaling rate for LR range test.')
group.add_argument("--lr_range_test_step_size", type=int, default=1000, help='training steps per LR change.')
group.add_argument("--lr_range_test_staircase",
type=bool,
default=False,
help='use staircase scaling for LR range test.')
# OneCycle schedule
group.add_argument("--cycle_first_step_size",
type=int,
default=1000,
help='size of first step of 1Cycle schedule (training steps).')
group.add_argument("--cycle_first_stair_count",
type=int,
default=-1,
help='first stair count for 1Cycle schedule.')
group.add_argument("--cycle_second_step_size",
type=int,
default=-1,
help='size of second step of 1Cycle schedule (default first_step_size).')
group.add_argument("--cycle_second_stair_count",
type=int,
default=-1,
help='second stair count for 1Cycle schedule.')
group.add_argument("--decay_step_size",
type=int,
default=1000,
help='size of intervals for applying post cycle decay (training steps).')
# 1Cycle LR
group.add_argument("--cycle_min_lr", type=float, default=0.01, help='1Cycle LR lower bound.')
group.add_argument("--cycle_max_lr", type=float, default=0.1, help='1Cycle LR upper bound.')
group.add_argument("--decay_lr_rate", type=float, default=0.0, help='post cycle LR decay rate.')
# 1Cycle Momentum
group.add_argument('--cycle_momentum', default=False, action='store_true', help='Enable 1Cycle momentum schedule.')
group.add_argument("--cycle_min_mom", type=float, default=0.8, help='1Cycle momentum lower bound.')
group.add_argument("--cycle_max_mom", type=float, default=0.9, help='1Cycle momentum upper bound.')
group.add_argument("--decay_mom_rate", type=float, default=0.0, help='post cycle momentum decay rate.')
# Warmup LR
group.add_argument('--warmup_min_lr', type=float, default=0, help='WarmupLR minimum/initial LR value')
group.add_argument('--warmup_max_lr', type=float, default=0.001, help='WarmupLR maximum LR value.')
group.add_argument('--warmup_num_steps', type=int, default=1000, help='WarmupLR step count for LR warmup.')
group.add_argument('--warmup_type',
type=str,
default=WARMUP_LOG_RATE,
help='WarmupLR increasing function during warmup')
return parser
def parse_arguments():
parser = argparse.ArgumentParser()
parser = add_tuning_arguments(parser)
lr_sched_args, unknown_args = parser.parse_known_args()
return lr_sched_args, unknown_args
def override_lr_range_test_params(args, params):
if hasattr(args, LR_RANGE_TEST_MIN_LR) and args.lr_range_test_min_lr is not None:
params[LR_RANGE_TEST_MIN_LR] = args.lr_range_test_min_lr
if hasattr(args, LR_RANGE_TEST_STEP_RATE) and args.lr_range_test_step_rate is not None:
params[LR_RANGE_TEST_STEP_RATE] = args.lr_range_test_step_rate
if hasattr(args, LR_RANGE_TEST_STEP_SIZE) and args.lr_range_test_step_size is not None:
params[LR_RANGE_TEST_STEP_SIZE] = args.lr_range_test_step_size
if hasattr(args, LR_RANGE_TEST_STAIRCASE) and args.lr_range_test_staircase is not None:
params[LR_RANGE_TEST_STAIRCASE] = args.lr_range_test_staircase
def override_1cycle_params(args, params):
if hasattr(args, CYCLE_FIRST_STEP_SIZE) and args.cycle_first_step_size is not None:
params[CYCLE_FIRST_STEP_SIZE] = args.cycle_first_step_size
if hasattr(args, CYCLE_FIRST_STAIR_COUNT) and args.cycle_first_stair_count is not None:
params[CYCLE_FIRST_STAIR_COUNT] = args.cycle_first_stair_count
if hasattr(args, CYCLE_SECOND_STEP_SIZE) and args.cycle_second_step_size is not None:
params[CYCLE_SECOND_STEP_SIZE] = args.cycle_second_step_size
if hasattr(args, CYCLE_SECOND_STAIR_COUNT) and args.cycle_second_stair_count is not None:
params[CYCLE_SECOND_STAIR_COUNT] = args.cycle_second_stair_count
if hasattr(args, DECAY_STEP_SIZE) and args.decay_step_size is not None:
params[DECAY_STEP_SIZE] = args.decay_step_size
# 1Cycle LR params
if hasattr(args, CYCLE_MIN_LR) and args.cycle_min_lr is not None:
params[CYCLE_MIN_LR] = args.cycle_min_lr
if hasattr(args, CYCLE_MAX_LR) and args.cycle_max_lr is not None:
params[CYCLE_MAX_LR] = args.cycle_max_lr
if hasattr(args, DECAY_LR_RATE) and args.decay_lr_rate is not None:
params[DECAY_LR_RATE] = args.decay_lr_rate
# 1Cycle MOM params
if hasattr(args, CYCLE_MIN_MOM) and args.cycle_min_mom is not None:
params[CYCLE_MIN_MOM] = args.cycle_min_mom
if hasattr(args, CYCLE_MAX_MOM) and args.cycle_max_mom is not None:
params[CYCLE_MAX_MOM] = args.cycle_max_mom
if hasattr(args, DECAY_MOM_RATE) and args.decay_mom_rate is not None:
params[DECAY_MOM_RATE] = args.decay_mom_rate
def override_warmupLR_params(args, params):
if hasattr(args, WARMUP_MIN_LR) and args.warmup_min_lr is not None:
params[WARMUP_MIN_LR] = args.warmup_min_lr
if hasattr(args, WARMUP_MAX_LR) and args.warmup_max_lr is not None:
params[WARMUP_MAX_LR] = args.warmup_max_lr
if hasattr(args, WARMUP_NUM_STEPS) and args.warmup_num_steps is not None:
params[WARMUP_NUM_STEPS] = args.warmup_num_steps
if hasattr(args, WARMUP_TYPE) and args.warmup_type is not None:
params[WARMUP_TYPE] = args.warmup_type
def override_params(args, params):
# LR range test params
override_lr_range_test_params(args, params)
# 1Cycle params
override_1cycle_params(args, params)
# WarmupLR params
override_warmupLR_params(args, params)
def get_config_from_args(args):
if not hasattr(args, LR_SCHEDULE) or args.lr_schedule is None:
return None, '--{} not specified on command line'.format(LR_SCHEDULE)
if not args.lr_schedule in VALID_LR_SCHEDULES:
return None, '{} is not supported LR schedule'.format(args.lr_schedule)
config = {}
config['type'] = args.lr_schedule
config['params'] = {}
if args.lr_schedule == LR_RANGE_TEST:
override_lr_range_test_params(args, config['params'])
elif args.lr_schedule == ONE_CYCLE:
override_1cycle_params(args, config['params'])
else:
override_warmupLR_params(args, config['params'])
return config, None
def get_lr_from_config(config):
if not 'type' in config:
return None, 'LR schedule type not defined in config'
if not 'params' in config:
return None, 'LR schedule params not defined in config'
lr_schedule = config['type']
lr_params = config['params']
if not lr_schedule in VALID_LR_SCHEDULES:
return None, '{} is not a valid LR schedule'.format(lr_schedule)
if lr_schedule == LR_RANGE_TEST:
return lr_params[LR_RANGE_TEST_MIN_LR], ''
if lr_schedule == ONE_CYCLE:
return lr_params[CYCLE_MAX_LR], ''
# Warmup LR
return lr_params[WARMUP_MAX_LR], ''
"""
Only optimizers that are subclass of torch.optim.Optimizer are supported. So check the passed optimizer and wrapped
optimizer to see if requirement is satisfied.
TODO: Looking under the hood to examine the wrapped optimizer is a hack that requires a better long-term fix.
"""
def get_torch_optimizer(optimizer):
if isinstance(optimizer, Optimizer):
return optimizer
if hasattr(optimizer, 'optimizer') and isinstance(optimizer.optimizer, Optimizer):
return optimizer.optimizer
raise TypeError('{} is not a subclass of torch.optim.Optimizer'.format(type(optimizer).__name__))
class LRRangeTest(object):
"""Sets the learning rate of each parameter group according to
learning rate range test (LRRT) policy. The policy increases learning
rate starting from a base value with a constant frequency, as detailed in
the paper `A disciplined approach to neural network hyper-parameters: Part1`_.
LRRT policy is used for finding maximum LR that trains a model without divergence, and can be used to
configure the LR boundaries for Cyclic LR schedules.
LRRT changes the learning rate after every batch.
`step` should be called after a batch has been used for training.
Args:
optimizer (Optimizer): Wrapped optimizer.
lr_range_test_min_lr (float or list): Initial learning rate which is the
lower boundary in the range test for each parameter group.
lr_range_test_step_size (int): Interval of training steps to increase learning rate. Default: 2000
lr_range_test_step_rate (float): Scaling rate for range test. Default: 1.0
lr_range_test_staircase (bool): Scale in staircase fashion, rather than continuous. Default: False.
last_batch_iteration (int): The index of the last batch. This parameter is used when
resuming a training job. Since `step()` should be invoked after each
batch instead of after each epoch, this number represents the total
number of *batches* computed, not the total number of epochs computed.
When last_batch_iteration=-1, the schedule is started from the beginning.
Default: -1
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = LRRangeTest(optimizer)
>>> data_loader = torch.utils.data.DataLoader(...)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> scheduler.step()
_A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, momentum, and weight decay:
https://arxiv.org/abs/1803.09820
"""
def __init__(self,
optimizer: Optimizer,
lr_range_test_min_lr: float = 1e-3,
lr_range_test_step_size: int = 2000,
lr_range_test_step_rate: float = 1.0,
lr_range_test_staircase: bool = False,
last_batch_iteration: int = -1):
self.optimizer = get_torch_optimizer(optimizer)
if isinstance(lr_range_test_min_lr, list) or isinstance(lr_range_test_min_lr, tuple):
if len(lr_range_test_min_lr) != len(self.optimizer.param_groups):
raise ValueError("expected {} lr_range_test_min_lr, got {}".format(len(self.optimizer.param_groups),
len(lr_range_test_min_lr)))
self.min_lr = list(lr_range_test_min_lr)
else:
self.min_lr = [lr_range_test_min_lr] * len(self.optimizer.param_groups)
self.step_size = lr_range_test_step_size
self.step_rate = lr_range_test_step_rate
self.last_batch_iteration = last_batch_iteration
self.staircase = lr_range_test_staircase
self.interval_fn = self._staircase_interval if lr_range_test_staircase else self._continuous_interval
if last_batch_iteration == -1:
self._update_optimizer(self.min_lr)
def _staircase_interval(self):
return math.floor(float(self.last_batch_iteration + 1) / self.step_size)
def _continuous_interval(self):
return float(self.last_batch_iteration + 1) / self.step_size
def _get_increase(self):
return (1 + self.step_rate * self.interval_fn())
def get_lr(self):
lr_increase = self._get_increase()
return [lr_range_test_min_lr * lr_increase for lr_range_test_min_lr in self.min_lr]
def get_last_lr(self):
""" Return last computed learning rate by current scheduler.
"""
assert getattr(self, '_last_lr', None) is not None, "need to call step() first"
return self._last_lr
def _update_optimizer(self, group_lrs):
for param_group, lr in zip(self.optimizer.param_groups, group_lrs):
param_group['lr'] = lr
def step(self, batch_iteration=None):
if batch_iteration is None:
batch_iteration = self.last_batch_iteration + 1
self.last_batch_iteration = batch_iteration
self._update_optimizer(self.get_lr())
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
def state_dict(self):
return {'last_batch_iteration': self.last_batch_iteration}
def load_state_dict(self, sd):
self.last_batch_iteration = sd['last_batch_iteration']
class OneCycle(object):
"""Sets the learning rate of each parameter group according to
1Cycle learning rate policy (1CLR). 1CLR is a variation of the
Cyclical Learning Rate (CLR) policy that involves one cycle followed by
decay. The policy simultaneously cycles the learning rate (and momentum)
between two boundaries with a constant frequency, as detailed in
the paper `A disciplined approach to neural network hyper-parameters`_.
1CLR policy changes the learning rate after every batch.
`step` should be called after a batch has been used for training.
This implementation was adapted from the github repo: `pytorch/pytorch`_
Args:
optimizer (Optimizer): Wrapped optimizer.
cycle_min_lr (float or list): Initial learning rate which is the
lower boundary in the cycle for each parameter group.
cycle_max_lr (float or list): Upper learning rate boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (cycle_max_lr - cycle_min_lr).
The lr at any cycle is the sum of cycle_min_lr
and some scaling of the amplitude; therefore
cycle_max_lr may not actually be reached depending on
scaling function.
decay_lr_rate(float): Decay rate for learning rate. Default: 0.
cycle_first_step_size (int): Number of training iterations in the
increasing half of a cycle. Default: 2000
cycle_second_step_size (int): Number of training iterations in the
decreasing half of a cycle. If cycle_second_step_size is None,
it is set to cycle_first_step_size. Default: None
cycle_first_stair_count(int): Number of stairs in first half of cycle phase. This means
lr/mom are changed in staircase fashion. Default 0, means staircase disabled.
cycle_second_stair_count(int): Number of stairs in second half of cycle phase. This means
lr/mom are changed in staircase fashion. Default 0, means staircase disabled.
decay_step_size (int): Intervals for applying decay in decay phase. Default: 0, means no decay.
cycle_momentum (bool): If ``True``, momentum is cycled inversely
to learning rate between 'cycle_min_mom' and 'cycle_max_mom'.
Default: True
cycle_min_mom (float or list): Initial momentum which is the
lower boundary in the cycle for each parameter group.
Default: 0.8
cycle_max_mom (float or list): Upper momentum boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (cycle_max_mom - cycle_min_mom).
The momentum at any cycle is the difference of cycle_max_mom
and some scaling of the amplitude; therefore
cycle_min_mom may not actually be reached depending on
scaling function. Default: 0.9
decay_mom_rate (float): Decay rate for momentum. Default: 0.
last_batch_iteration (int): The index of the last batch. This parameter is used when
resuming a training job. Since `step()` should be invoked after each
batch instead of after each epoch, this number represents the total
number of *batches* computed, not the total number of epochs computed.
When last_batch_iteration=-1, the schedule is started from the beginning.
Default: -1
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = OneCycle(optimizer, 0.0001, 0.0010)
>>> data_loader = torch.utils.data.DataLoader(...)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> scheduler.step()
.. _A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, momentum, and weight decay: https://arxiv.org/abs/1803.09820
"""
def __init__(self,
optimizer,
cycle_min_lr,
cycle_max_lr,
decay_lr_rate=0.,
cycle_first_step_size=2000,
cycle_second_step_size=None,
cycle_first_stair_count=0,
cycle_second_stair_count=None,
decay_step_size=0,
cycle_momentum=True,
cycle_min_mom=0.8,
cycle_max_mom=0.9,
decay_mom_rate=0.,
last_batch_iteration=-1):
self.optimizer = get_torch_optimizer(optimizer)
# Initialize cycle shape
self._initialize_cycle(cycle_first_step_size, cycle_second_step_size, cycle_first_stair_count,
cycle_second_stair_count, decay_step_size)
# Initialize cycle lr
self._initialize_lr(self.optimizer, cycle_min_lr, cycle_max_lr, decay_lr_rate, last_batch_iteration)
# Initialize cyclic momentum
self.cycle_momentum = cycle_momentum
if cycle_momentum:
self._initialize_momentum(self.optimizer, cycle_min_mom, cycle_max_mom, decay_mom_rate,
last_batch_iteration)
# Initialize batch iteration tracker
self.last_batch_iteration = last_batch_iteration
# Configure cycle shape
def _initialize_cycle(self, cycle_first_step_size, cycle_second_step_size, cycle_first_stair_count,
cycle_second_stair_count, decay_step_size):
cycle_first_step_size = float(cycle_first_step_size)
cycle_second_step_size = float(
cycle_second_step_size) if cycle_second_step_size is not None else cycle_first_step_size
self.total_size = cycle_first_step_size + cycle_second_step_size
self.step_ratio = cycle_first_step_size / self.total_size
self.first_stair_count = cycle_first_stair_count
self.second_stair_count = cycle_first_stair_count if cycle_second_stair_count is None else cycle_second_stair_count
self.decay_step_size = decay_step_size
if math.isclose(self.decay_step_size, 0):
self.skip_lr_decay = True
self.skip_mom_decay = True
else:
self.skip_lr_decay = False
self.skip_mom_decay = False
# Configure lr schedule
def _initialize_lr(self, optimizer, cycle_min_lr, cycle_max_lr, decay_lr_rate, last_batch_iteration):
self.min_lrs = [cycle_min_lr] * len(optimizer.param_groups)
if last_batch_iteration == -1:
for lr, group in zip(self.min_lrs, optimizer.param_groups):
group['lr'] = lr
self.max_lrs = [cycle_max_lr] * len(optimizer.param_groups)
self.decay_lr_rate = decay_lr_rate
if math.isclose(self.decay_lr_rate, 0):
self.skip_lr_decay = True
# Configure momentum schedule
def _initialize_momentum(self, optimizer, cycle_min_mom, cycle_max_mom, decay_mom_rate, last_batch_iteration):
if 'betas' not in optimizer.defaults:
optimizer_name = type(optimizer).__name__
logger.warn(
f"cycle_momentum is disabled because optimizer {optimizer_name} does not support momentum, no betas attribute in defaults"
)
self.cycle_momentum = False
return
self.decay_mom_rate = decay_mom_rate
self.min_moms = [(cycle_min_mom, 0.99)] * len(optimizer.param_groups)
self.max_moms = [(cycle_max_mom, 0.99)] * len(optimizer.param_groups)
if last_batch_iteration == -1:
for momentum, group in zip(self.min_moms, optimizer.param_groups):
group['betas'] = momentum
if math.isclose(self.decay_mom_rate, 0):
self.skip_mom_decay = True
def _get_scale_factor(self):
batch_iteration = (self.last_batch_iteration + 1)
cycle = math.floor(1 + batch_iteration / self.total_size)
x = 1. + batch_iteration / self.total_size - cycle
if x <= self.step_ratio:
scale_factor = x / self.step_ratio
else:
scale_factor = (x - 1) / (self.step_ratio - 1)
return scale_factor
def _get_cycle_mom(self):
scale_factor = self._get_scale_factor()
momentums = []
for base_betas, max_betas in zip(self.min_moms, self.max_moms):
cycle_min_mom = base_betas[0]
cycle_max_mom = max_betas[0]
base_height = (cycle_max_mom - cycle_min_mom) * scale_factor
momentum = cycle_max_mom - base_height
momentums.append((momentum, base_betas[1]))
return momentums
def _get_cycle_lr(self):
scale_factor = self._get_scale_factor()
lrs = []
for cycle_min_lr, cycle_max_lr in zip(self.min_lrs, self.max_lrs):
base_height = (cycle_max_lr - cycle_min_lr) * scale_factor
lr = cycle_min_lr + base_height
lrs.append(lr)
return lrs
def _get_decay_mom(self, decay_batch_iteration):
if self.skip_mom_decay:
return self.max_moms
decay_interval = decay_batch_iteration / self.decay_step_size
mom_decay_factor = (1 + self.decay_mom_rate * decay_interval)
momentums = [(beta0 * mom_decay_factor, beta1) for beta0, beta1 in self.max_moms]
return momentums
def _get_decay_lr(self, decay_batch_iteration):
"""Calculates the learning rate at batch index. This function is used
after the cycle completes and post cycle decaying of lr/mom is enabled.
This function treats `self.last_batch_iteration` as the last batch index.
"""
if self.skip_lr_decay:
return self.min_lrs
decay_interval = decay_batch_iteration / self.decay_step_size
lr_decay_factor = (1 + self.decay_lr_rate * decay_interval)
lrs = [cycle_min_lr / lr_decay_factor for cycle_min_lr in self.min_lrs]
return lrs
def get_lr(self):
"""Calculates the learning rate at batch index. This function treats
`self.last_batch_iteration` as the last batch index.
"""
if self.last_batch_iteration < self.total_size:
return self._get_cycle_lr()
return self._get_decay_lr(self.last_batch_iteration - self.total_size + 1)
def get_mom(self):
"""Calculates the momentum at batch index. This function treats
`self.last_batch_iteration` as the last batch index.
"""
if not self.cycle_momentum:
return None
if self.last_batch_iteration < self.total_size:
return self._get_cycle_mom()
return self._get_decay_mom(self.last_batch_iteration - self.total_size + 1)
def get_last_lr(self):
""" Return last computed learning rate by current scheduler.
"""
assert getattr(self, '_last_lr', None) is not None, "need to call step() first"
return self._last_lr
def step(self, batch_iteration=None):
""" Updates the optimizer with the learning rate for the last batch index.
`self.last_batch_iteration` is treated as the last batch index.
If self.cycle_momentum is true, also updates optimizer momentum.
"""
if batch_iteration is None:
batch_iteration = self.last_batch_iteration + 1
self.last_batch_iteration = batch_iteration
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
if self.cycle_momentum:
momentums = self.get_mom()
for param_group, momentum in zip(self.optimizer.param_groups, momentums):
param_group['betas'] = momentum
def state_dict(self):
return {'last_batch_iteration': self.last_batch_iteration}
def load_state_dict(self, sd):
self.last_batch_iteration = sd['last_batch_iteration']
class WarmupLR(object):
"""Increase the learning rate of each parameter group from min lr to max lr
over warmup_num_steps steps, and then fix at max lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
warmup_min_lr (float or list): minimum learning rate. Default: 0
warmup_max_lr (float or list): maximum learning rate. Default: 0.001
warmup_num_steps (int): number of steps to warm up from min_lr to max_lr. Default: 1000
warmup_type {‘log’, ‘linear’}: increasing function from min_lr to max_lr during warmup. Default: log
last_batch_iteration (int): The index of the last batch. Default: -1.
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = WarmupLR(optimizer)
>>> data_loader = torch.utils.data.DataLoader(...)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> scheduler.step()
"""
def __init__(self,
optimizer: Optimizer,
warmup_min_lr: float = 0.0,
warmup_max_lr: float = 0.001,
warmup_num_steps: int = 1000,
warmup_type: str = WARMUP_LOG_RATE,
last_batch_iteration: int = -1):
self.optimizer = get_torch_optimizer(optimizer)
self.min_lrs = self._format_param(self.optimizer, warmup_min_lr, "min_lr")
self.max_lrs = self._format_param(self.optimizer, warmup_max_lr, "max_lr")
self.delta_lrs = [big - small for big, small in zip(self.max_lrs, self.min_lrs)]
self.warmup_num_steps = max(2, warmup_num_steps)
# Currently only support linear and log function
if warmup_type not in {WARMUP_LOG_RATE, WARMUP_LINEAR_RATE}:
logger.warning(f"Using unknown warmup_type: {warmup_type}. The increasing function "
f"is set to default (log)")
warmup_type = WARMUP_LOG_RATE
self.warmup_type = warmup_type
self.inverse_log_warm_up = 1.0 / math.log(self.warmup_num_steps)
self.last_batch_iteration = last_batch_iteration
def get_lr(self):
if self.last_batch_iteration < 0:
logger.warning("Attempting to get learning rate from scheduler before it has started")
return [0.0]
gamma = self._get_gamma()
return [min_lr + (delta_lr * gamma) for min_lr, delta_lr in zip(self.min_lrs, self.delta_lrs)]
def get_last_lr(self):
""" Return last computed learning rate by current scheduler.
"""
assert getattr(self, '_last_lr', None) is not None, "need to call step() first"
return self._last_lr
def step(self, last_batch_iteration=None):
if last_batch_iteration is None:
last_batch_iteration = self.last_batch_iteration + 1
self.last_batch_iteration = last_batch_iteration
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
def state_dict(self):
return {'last_batch_iteration': self.last_batch_iteration}
def load_state_dict(self, sd):
self.last_batch_iteration = sd['last_batch_iteration']
def _get_gamma(self):
if self.last_batch_iteration < self.warmup_num_steps:
if self.warmup_type == WARMUP_LOG_RATE:
return self.inverse_log_warm_up * math.log(self.last_batch_iteration + 1)
elif self.warmup_type == WARMUP_LINEAR_RATE:
return self.last_batch_iteration / self.warmup_num_steps
return 1.0
def _format_param(self, optimizer, param_value, param_name):
if isinstance(param_value, list) or isinstance(param_value, tuple):
if len(param_value) != len(optimizer.param_groups):
raise ValueError("expected {} value for {}, got {}".format(len(optimizer.param_groups), param_name,
FileNotFoundError(param_value)))
return list(param_value)
return [param_value] * len(optimizer.param_groups)
class WarmupDecayLR(WarmupLR):
"""Increase the learning rate of each parameter group from min lr to max lr
over warmup_num_steps steps, and then decay at linear rate over the remaining training steps.
Args:
optimizer (Optimizer): Wrapped optimizer.
total_num_steps (int): total number of training steps
warmup_min_lr (float or list): minimum learning rate. Default: 0
warmup_max_lr (float or list): maximum learning rate. Default: 0.001
warmup_num_steps (int): number of steps to warm up from min_lr to max_lr. Default: 1000
warmup_type {‘log’, ‘linear’}: increasing function from min_lr to max_lr during warmup. Default: log
last_batch_iteration (int): The index of the last batch. Default: -1.
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = WarmupDecayLR(optimizer, 1000000)
>>> data_loader = torch.utils.data.DataLoader(...)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> scheduler.step()
"""
def __init__(self,
optimizer: Optimizer,
total_num_steps: int,
warmup_min_lr: float = 0.0,
warmup_max_lr: float = 0.001,
warmup_num_steps: int = 1000,
warmup_type: str = WARMUP_LOG_RATE,
last_batch_iteration: int = -1):
self.total_num_steps = total_num_steps
super(WarmupDecayLR, self).__init__(optimizer, warmup_min_lr, warmup_max_lr, warmup_num_steps, warmup_type,
last_batch_iteration)
if self.total_num_steps < self.warmup_num_steps:
logger.warning('total_num_steps {} is less than warmup_num_steps {}'.format(
total_num_steps, warmup_num_steps))
def _get_gamma(self):
if self.last_batch_iteration < self.warmup_num_steps:
if self.warmup_type == WARMUP_LOG_RATE:
return self.inverse_log_warm_up * math.log(self.last_batch_iteration + 1)
elif self.warmup_type == WARMUP_LINEAR_RATE:
return self.last_batch_iteration / self.warmup_num_steps
return max(
0.0,
float(self.total_num_steps - self.last_batch_iteration) /
float(max(1.0, self.total_num_steps - self.warmup_num_steps)))
| 33,541 | 42.903141 | 164 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/bf16_optimizer.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from collections import OrderedDict
import torch
import sys
import os
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from deepspeed import comm as dist
from deepspeed.runtime.constants import PIPE_REPLICATED
from deepspeed.runtime import ZeROOptimizer
from packaging import version as pkg_version
from deepspeed.git_version_info import version
from deepspeed.runtime.utils import (get_global_norm_of_tensors, clip_tensors_by_global_norm, DummyOptim,
align_dense_tensors, all_gather_dp_groups, bwc_tensor_model_parallel_rank,
is_model_parallel_parameter, see_memory_usage)
from deepspeed.utils import link_hp_params, fragment_address
from deepspeed.checkpoint import enable_universal_checkpoint
from deepspeed.checkpoint.constants import (DS_VERSION, PARTITION_COUNT, BASE_OPTIMIZER_STATE,
SINGLE_PARTITION_OF_FP32_GROUPS, CLIP_GRAD, GROUP_PADDINGS,
PARAM_SLICE_MAPPINGS)
setattr(sys.modules[__name__], 'fragment_address', fragment_address)
class BF16_Optimizer(ZeROOptimizer):
def __init__(self,
init_optimizer,
param_names,
mpu=None,
clip_grad=0.0,
norm_type=2,
allgather_bucket_size=5000000000,
dp_process_group=None,
timers=None):
super().__init__()
see_memory_usage('begin bf16_optimizer', force=True)
self.timers = timers
self.optimizer = init_optimizer
self.param_names = param_names
self.using_real_optimizer = not isinstance(self.optimizer, DummyOptim)
self.clip_grad = clip_grad
self.norm_type = norm_type
self.mpu = mpu
self.allgather_bucket_size = int(allgather_bucket_size)
self.dp_process_group = dp_process_group
self.dp_rank = dist.get_rank(group=self.dp_process_group)
self.real_dp_process_group = [dp_process_group for i in range(len(self.optimizer.param_groups))]
# Use torch (un)flatten ops
self.flatten = _flatten_dense_tensors
self.unflatten = _unflatten_dense_tensors
#align nccl all-gather send buffers to 4-bye boundary
self.nccl_start_alignment_factor = 2 # 4-byte alignment/sizeof(fp16) = 2
# Build BF16/FP32 groups
self.bf16_groups = []
self.bf16_groups_flat = []
self.bf16_partitioned_groups = []
self.fp32_groups_flat_partition = []
# Maintain different fp32 gradients views for convenience
self.fp32_groups_gradients = []
self.fp32_groups_gradient_dict = {}
self.fp32_groups_gradients_flat = []
self.fp32_groups_actual_gradients_flat = []
self.fp32_groups_gradient_flat_partition = []
self.fp32_groups_has_gradients = []
self.step_count = 0
self.group_paddings = []
if self.using_real_optimizer:
self._setup_for_real_optimizer()
see_memory_usage('end bf16_optimizer', force=True)
def _setup_for_real_optimizer(self):
dp_world_size = dist.get_world_size(group=self.dp_process_group)
self.partition_count = [dp_world_size for i in range(len(self.optimizer.param_groups))]
for i, param_group in enumerate(self.optimizer.param_groups):
see_memory_usage(f'before initializing group {i}', force=True)
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
# grab the original list
trainable_parameters = [param for param in param_group['params'] if param.requires_grad]
self.bf16_groups.append(trainable_parameters)
# create flat bf16 params
self.bf16_groups_flat.append(
self._flatten_dense_tensors_aligned(self.bf16_groups[i],
self.nccl_start_alignment_factor * dp_world_size))
# Make bf16 params point to flat tensor storage
self._update_storage_to_flattened_tensor(tensor_list=self.bf16_groups[i],
flat_tensor=self.bf16_groups_flat[i])
# divide flat weights into equal sized partitions
partition_size = self.bf16_groups_flat[i].numel() // dp_world_size
bf16_dp_partitions = [
self.bf16_groups_flat[i].narrow(0, dp_index * partition_size, partition_size)
for dp_index in range(dp_world_size)
]
self.bf16_partitioned_groups.append(bf16_dp_partitions)
# create fp32 params partition
self.fp32_groups_flat_partition.append(bf16_dp_partitions[partition_id].clone().float().detach())
self.fp32_groups_flat_partition[i].requires_grad = True
num_elem_list = [t.numel() for t in self.bf16_groups[i]]
# create fp32 gradients
self.fp32_groups_gradients_flat.append(torch.zeros_like(self.bf16_groups_flat[i], dtype=torch.float32))
# track individual fp32 gradients for entire model
fp32_gradients = self._split_flat_tensor(flat_tensor=self.fp32_groups_gradients_flat[i],
num_elem_list=num_elem_list)
self.fp32_groups_gradients.append(fp32_gradients)
self.fp32_groups_gradient_dict[i] = fp32_gradients
# flat tensor corresponding to actual fp32 gradients (i.e., minus alignment padding)
length_without_padding = sum(num_elem_list)
self.fp32_groups_actual_gradients_flat.append(
torch.narrow(self.fp32_groups_gradients_flat[i], 0, 0, length_without_padding))
# flat tensor corresponding to gradient partition
self.fp32_groups_gradient_flat_partition.append(
torch.narrow(self.fp32_groups_gradients_flat[i], 0, partition_id * partition_size, partition_size))
# track fp32 gradient updates
self.fp32_groups_has_gradients.append([False] * len(self.bf16_groups[i]))
# Record padding required for alignment
if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1:
padding = self.bf16_groups_flat[i].numel() - length_without_padding
else:
padding = 0
self.group_paddings.append(padding)
# update optimizer param groups to reference fp32 params partition
param_group['params'] = [self.fp32_groups_flat_partition[i]]
see_memory_usage(f'after initializing group {i}', force=True)
see_memory_usage('before initialize_optimizer', force=True)
self.initialize_optimizer_states()
see_memory_usage('end initialize_optimizer', force=True)
# Need optimizer states initialized before linking lp to optimizer state
self._link_all_hp_params()
self._enable_universal_checkpoint()
self._param_slice_mappings = self._create_param_mapping()
def _enable_universal_checkpoint(self):
for lp_param_group in self.bf16_groups:
enable_universal_checkpoint(param_list=lp_param_group)
def _create_param_mapping(self):
param_mapping = []
for i, _ in enumerate(self.optimizer.param_groups):
param_mapping_per_group = OrderedDict()
for lp in self.bf16_groups[i]:
if lp._hp_mapping is not None:
lp_name = self.param_names[lp]
param_mapping_per_group[lp_name] = lp._hp_mapping.get_hp_fragment_address()
param_mapping.append(param_mapping_per_group)
return param_mapping
def _link_all_hp_params(self):
dp_world_size = dist.get_world_size(group=self.dp_process_group)
for i, _ in enumerate(self.optimizer.param_groups):
# Link bf16 and fp32 params in partition
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
partition_size = self.bf16_groups_flat[i].numel() // dp_world_size
flat_hp_partition = self.fp32_groups_flat_partition[i]
link_hp_params(lp_param_list=self.bf16_groups[i],
flat_hp_partition=flat_hp_partition,
gradient_dict=self.fp32_groups_gradient_dict,
offload_gradient_dict=None,
use_offload=False,
param_group_index=i,
partition_start=partition_id * partition_size,
partition_size=partition_size,
partition_optimizer_state=self.optimizer.state[flat_hp_partition],
dp_group=self.real_dp_process_group[i])
def initialize_optimizer_states(self):
"""Take an optimizer step with zero-valued gradients to allocate internal
optimizer state.
This helps prevent memory fragmentation by allocating optimizer state at the
beginning of training instead of after activations have been allocated.
"""
for param_partition, grad_partition in zip(self.fp32_groups_flat_partition,
self.fp32_groups_gradient_flat_partition):
param_partition.grad = grad_partition
self.optimizer.step()
self.clear_hp_grads()
def _split_flat_tensor(self, flat_tensor, num_elem_list):
assert sum(num_elem_list) <= flat_tensor.numel()
tensor_list = []
offset = 0
for num_elem in num_elem_list:
dense_tensor = torch.narrow(flat_tensor, 0, offset, num_elem)
tensor_list.append(dense_tensor)
offset += num_elem
return tensor_list
def _update_storage_to_flattened_tensor(self, tensor_list, flat_tensor):
updated_params = self.unflatten(flat_tensor, tensor_list)
for p, q in zip(tensor_list, updated_params):
p.data = q.data
def _flatten_dense_tensors_aligned(self, tensor_list, alignment):
return self.flatten(align_dense_tensors(tensor_list, alignment))
@torch.no_grad()
def step(self, closure=None):
if closure is not None:
raise NotImplementedError(f'{self.__class__} does not support closure.')
all_groups_norm = get_global_norm_of_tensors(input_tensors=self.get_grads_for_norm(),
mpu=self.mpu,
norm_type=self.norm_type)
self._global_grad_norm = all_groups_norm
assert all_groups_norm > 0.
if self.clip_grad > 0.:
clip_tensors_by_global_norm(input_tensors=self.get_grads_for_norm(for_clipping=True),
max_norm=self.clip_grad,
global_norm=all_groups_norm,
mpu=self.mpu)
self.optimizer.step()
self.update_lp_params()
self.clear_hp_grads()
self.step_count += 1
def backward(self, loss, update_hp_grads=True, clear_lp_grads=False, **bwd_kwargs):
"""Perform a backward pass and copy the low-precision gradients to the
high-precision copy.
We copy/accumulate to the high-precision grads now to prevent accumulating in the
bf16 grads after successive backward() calls (i.e., grad accumulation steps > 1)
The low-precision grads are deallocated during this procedure.
"""
self.clear_lp_grads()
loss.backward(**bwd_kwargs)
if update_hp_grads:
self.update_hp_grads(clear_lp_grads=clear_lp_grads)
@torch.no_grad()
def update_hp_grads(self, clear_lp_grads=False):
for i, group in enumerate(self.bf16_groups):
for j, lp in enumerate(group):
if lp.grad is None:
continue
hp_grad = self.fp32_groups_gradients[i][j]
assert hp_grad is not None, \
f'high precision param has no gradient, lp param_id = {id(lp)} group_info = [{i}][{j}]'
hp_grad.data.add_(lp.grad.data.to(hp_grad.dtype).view(hp_grad.shape))
lp._hp_grad = hp_grad
self.fp32_groups_has_gradients[i][j] = True
# clear gradients
if clear_lp_grads:
lp.grad = None
@torch.no_grad()
def get_grads_for_reduction(self):
return self.fp32_groups_gradients_flat
@torch.no_grad()
def get_grads_for_norm(self, for_clipping=False):
grads = []
tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu)
for i, group in enumerate(self.bf16_groups):
for j, lp in enumerate(group):
if not for_clipping:
if hasattr(lp, PIPE_REPLICATED) and lp.ds_pipe_replicated:
continue
if not (tensor_mp_rank == 0 or is_model_parallel_parameter(lp)):
continue
if not self.fp32_groups_has_gradients[i][j]:
continue
grads.append(self.fp32_groups_gradients[i][j])
return grads
@torch.no_grad()
def update_lp_params(self):
for i, (bf16_partitions,
fp32_partition) in enumerate(zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition)):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
bf16_partitions[partition_id].data.copy_(fp32_partition.data)
# print_rank_0(f'update_lp_params {i=} {partition_id=}', force=True)
# if i == 0:
# print_rank_0(f'{fp32_partition[:10]=}', force=True)
all_gather_dp_groups(partitioned_param_groups=self.bf16_partitioned_groups,
dp_process_group=self.real_dp_process_group,
start_alignment_factor=self.nccl_start_alignment_factor,
allgather_bucket_size=self.allgather_bucket_size)
def clear_hp_grads(self):
for flat_gradients in self.fp32_groups_gradients_flat:
flat_gradients.zero_()
for i, group in enumerate(self.fp32_groups_gradients):
self.fp32_groups_has_gradients[i] = [False] * len(group)
def clear_lp_grads(self):
for group in self.bf16_groups:
for param in group:
param.grad = None
def state_dict(self):
state_dict = {}
state_dict[CLIP_GRAD] = self.clip_grad
state_dict[BASE_OPTIMIZER_STATE] = self.optimizer.state_dict()
state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = self.fp32_groups_flat_partition
state_dict[GROUP_PADDINGS] = self.group_paddings
state_dict[PARTITION_COUNT] = self.partition_count
state_dict[DS_VERSION] = version
state_dict[PARAM_SLICE_MAPPINGS] = self._param_slice_mappings
return state_dict
# Restore base optimizer fp32 weights bfloat16 weights
def _restore_from_bit16_weights(self):
for i, group in enumerate(self.bf16_groups):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
for bf16_partitions, fp32_partition in zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition):
fp32_partition.data.copy_(bf16_partitions[partition_id].data)
def refresh_fp32_params(self):
self._restore_from_bit16_weights()
def load_state_dict(self,
state_dict_list,
checkpoint_folder,
load_optimizer_states=True,
load_from_fp32_weights=False):
if checkpoint_folder:
self._load_universal_checkpoint(checkpoint_folder, load_optimizer_states, load_from_fp32_weights)
else:
self._load_legacy_checkpoint(state_dict_list, load_optimizer_states, load_from_fp32_weights)
def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, load_from_fp32_weights=False):
dp_rank = dist.get_rank(group=self.dp_process_group)
current_rank_sd = state_dict_list[dp_rank]
ckpt_version = current_rank_sd.get(DS_VERSION, False)
assert ckpt_version, f"Empty ds_version in checkpoint, not clear how to proceed"
ckpt_version = pkg_version.parse(ckpt_version)
self.clip_grad = current_rank_sd.get(CLIP_GRAD, self.clip_grad)
if load_optimizer_states:
self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE])
if load_from_fp32_weights:
for current, saved in zip(self.fp32_groups_flat_partition,
current_rank_sd[SINGLE_PARTITION_OF_FP32_GROUPS]):
src_tensor = _get_padded_tensor(saved, current.numel())
current.data.copy_(src_tensor.data)
if load_optimizer_states:
self._link_all_hp_params()
def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights):
self._load_hp_checkpoint_state(checkpoint_folder)
@property
def param_groups(self):
"""Forward the wrapped optimizer's parameters."""
return self.optimizer.param_groups
def _load_hp_checkpoint_state(self, checkpoint_dir):
checkpoint_dir = os.path.join(checkpoint_dir, "zero")
tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu)
tp_world_size = self.mpu.get_slice_parallel_world_size()
for i, _ in enumerate(self.optimizer.param_groups):
for lp in self.bf16_groups[i]:
if lp._hp_mapping is not None:
#print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}")
lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank,
tp_world_size)
def _get_padded_tensor(src_tensor, size):
if src_tensor.numel() >= size:
return src_tensor
padded_tensor = torch.zeros(size, dtype=src_tensor.dtype, device=src_tensor.device)
slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel())
slice_tensor.data.copy_(src_tensor.data)
return padded_tensor
| 18,534 | 42.611765 | 118 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/engine.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import re
import stat
import torch
import hashlib
from collections import defaultdict, OrderedDict, deque
from shutil import copyfile
import gc
from torch.nn.modules import Module
from torch.nn.parameter import Parameter
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from typing import Callable, Dict, Union, Iterable
import deepspeed
from deepspeed import comm as dist
from deepspeed.runtime.utils import see_memory_usage, DummyOptim
from .zero.offload_config import OffloadDeviceEnum
from deepspeed.runtime.zero.stage_1_and_2 import DeepSpeedZeroOptimizer
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
from deepspeed.runtime.zero.utils import is_zero_supported_optimizer, ZeRORuntimeException
from deepspeed.runtime.zero.parameter_offload import DeepSpeedZeRoOffload
from deepspeed.runtime.zero.config import ZERO_OPTIMIZATION
from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer
from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer
from deepspeed.runtime.bf16_optimizer import BF16_Optimizer
from deepspeed.runtime.config import DEEPSPEED_OPTIMIZERS, \
ADAGRAD_OPTIMIZER, ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, ONEBIT_LAMB_OPTIMIZER, \
TORCH_ADAM_PARAM, ADAM_W_MODE, ADAM_W_MODE_DEFAULT, ZERO_ONE_ADAM_OPTIMIZER
from deepspeed.runtime.dataloader import DeepSpeedDataLoader
from deepspeed.runtime.constants import \
ROUTE_TRAIN, ROUTE_PREDICT, ROUTE_EVAL, \
PLD_THETA, PLD_GAMMA, BFLOAT16, FP16, AMP, GRADIENT_ACCUMULATION_STEPS, \
DATA_PARALLEL_GROUP, GLOBAL_RANK
from deepspeed.runtime.zero.config import ZeroStageEnum
from deepspeed.compression import compression_scheduler
from deepspeed.compression.constants import \
WEIGHT_QUANTIZE_IN_FORWARD_ENABLED, \
WEIGHT_QUANTIZATION, SHARED_PARAMETERS, \
WEIGHT_QUANTIZE_ENABLED, \
WEIGHT_QUANTIZE_GROUPS, \
WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE, \
WEIGHT_QUANTIZE_CHANGE_RATIO, \
WEIGHT_QUANTIZE_TYPE, \
WEIGHT_QUANTIZE_ROUNDING, \
WEIGHT_QUANTIZE_VERBOSE, \
WEIGHT_QUANTIZE_KERNEL
from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, FROZEN_PARAM_FRAGMENTS
from deepspeed.runtime.sparse_tensor import SparseTensor
from deepspeed.runtime import lr_schedules
from deepspeed.utils import groups
from deepspeed.utils import logger, log_dist, instrument_w_nvtx
from deepspeed.utils.timer import ThroughputTimer, SynchronizedWallClockTimer
from deepspeed.utils.debug import debug_extract_module_and_param_names
from deepspeed.monitor.monitor import MonitorMaster
from deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop
from deepspeed.runtime.utils import clip_grad_norm_
from deepspeed.runtime.eigenvalue import Eigenvalue
from deepspeed.runtime.data_pipeline.constants import DATA_SAMPLING, \
DATA_ROUTING, DATA_SAMPLING_ENABLED, CURRICULUM_LEARNING, \
CURRICULUM_LEARNING_ENABLED, DATA_SAMPLING_NUM_WORKERS, RANDOM_LTD, \
RANDOM_LTD_ENABLED, RANDOM_LTD_LAYER_ID, RANDOM_LTD_LAYER_NUM, \
RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE, RANDOM_LTD_LAYER_TOKEN_LR_ENABLED, \
RANDOM_LTD_GLOBAL_BATCH_SIZE, RANDOM_LTD_MICRO_BATCH_SIZE, DATA_EFFICIENCY
from deepspeed.runtime.data_pipeline.curriculum_scheduler import CurriculumScheduler
from deepspeed.runtime.data_pipeline.data_routing.scheduler import RandomLTDScheduler
from deepspeed.runtime.data_pipeline.data_routing.helper import remove_random_ltd_state_dict
from deepspeed.runtime.data_pipeline.data_routing.basic_layer import RandomLayerTokenDrop
from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine
from .pipe.module import PipelineModule
from .utils import get_ma_status
from ..ops.adam import FusedAdam
from ..moe.sharded_moe import TopKGate, MOELayer
from ..moe.layer import MoE
from ..moe.utils import is_moe_param
from ..git_version_info import version
from deepspeed.profiling.flops_profiler.profiler import FlopsProfiler
from deepspeed.utils.logging import print_json_dist, print_configuration
from deepspeed.accelerator import get_accelerator
from deepspeed.runtime.config import DtypeEnum
MEMORY_OPT_ALLREDUCE_SIZE = 500000000
DeepSpeedOptimizerCallable = \
Callable[[Union[Iterable[Parameter], Dict[str, Iterable]]], Optimizer]
DeepSpeedSchedulerCallable = Callable[[Optimizer], _LRScheduler]
try:
import apex
from apex import amp
APEX_INSTALLED = True
except ImportError:
# Fail silently so we don't spam logs unnecessarily if user isn't using amp
APEX_INSTALLED = False
def split_half_float_double_sparse(tensors):
device_type = get_accelerator().device_name()
supported_types = [
"torch.{}.HalfTensor".format(device_type), "torch.{}.FloatTensor".format(device_type),
"torch.{}.DoubleTensor".format(device_type), "torch.{}.BFloat16Tensor".format(device_type),
SparseTensor.type()
]
for t in tensors:
assert t.type() in supported_types, f"attempting to reduce an unsupported grad type: {t.type()}"
buckets = []
for i, dtype in enumerate(supported_types):
bucket = [t for t in tensors if t.type() == dtype]
if bucket:
buckets.append((dtype, bucket))
return buckets
FORWARD_MICRO_TIMER = 'forward_microstep'
FORWARD_GLOBAL_TIMER = 'forward'
BACKWARD_MICRO_TIMER = 'backward_microstep'
BACKWARD_GLOBAL_TIMER = 'backward'
BACKWARD_INNER_MICRO_TIMER = 'backward_inner_microstep'
BACKWARD_INNER_GLOBAL_TIMER = 'backward_inner'
BACKWARD_REDUCE_MICRO_TIMER = 'backward_allreduce_microstep'
BACKWARD_REDUCE_GLOBAL_TIMER = 'backward_allreduce'
STEP_MICRO_TIMER = 'step_microstep'
STEP_GLOBAL_TIMER = 'step'
class EngineTimers(object):
r"""Wallclock timers for DeepSpeedEngine"""
def __init__(self, enable_micro_timers, enable_global_timers):
self.forward_timers = []
self.backward_timers = []
self.backward_inner_timers = []
self.backward_reduce_timers = []
self.step_timers = []
self.global_timers = []
self.micro_timers = []
if enable_micro_timers:
self.forward_timers += [FORWARD_MICRO_TIMER]
self.backward_timers += [BACKWARD_MICRO_TIMER]
self.backward_inner_timers += [BACKWARD_INNER_MICRO_TIMER]
self.backward_reduce_timers += [BACKWARD_REDUCE_MICRO_TIMER]
self.step_timers += [STEP_MICRO_TIMER]
self.micro_timers += [
FORWARD_MICRO_TIMER, BACKWARD_MICRO_TIMER, BACKWARD_INNER_MICRO_TIMER, BACKWARD_REDUCE_MICRO_TIMER,
STEP_MICRO_TIMER
]
if enable_global_timers:
self.forward_timers += [FORWARD_GLOBAL_TIMER]
self.backward_timers += [BACKWARD_GLOBAL_TIMER]
self.backward_inner_timers += [BACKWARD_INNER_GLOBAL_TIMER]
self.backward_reduce_timers += [BACKWARD_REDUCE_GLOBAL_TIMER]
self.step_timers += [STEP_GLOBAL_TIMER]
self.global_timers += [
FORWARD_GLOBAL_TIMER, BACKWARD_GLOBAL_TIMER, BACKWARD_INNER_GLOBAL_TIMER, BACKWARD_REDUCE_GLOBAL_TIMER,
STEP_GLOBAL_TIMER
]
class DeepSpeedEngine(Module):
r"""DeepSpeed engine for training."""
def __init__(
self,
args,
model,
optimizer=None,
model_parameters=None,
training_data=None,
lr_scheduler=None,
mpu=None,
dist_init_required=None,
collate_fn=None,
config=None,
config_class=None,
dont_change_device=False,
):
super(DeepSpeedEngine, self).__init__()
self.dont_change_device = dont_change_device
self.client_optimizer = optimizer
self.client_lr_scheduler = lr_scheduler
self.training_data = training_data
self.collate_fn = collate_fn
self.mpu = mpu
self.all_to_all_group = None
self.data_parallel_group = None
self.global_steps = 0
self.global_samples = 0
self.micro_steps = 0
self.skipped_steps = 0
self.gradient_average = True
self.warn_unscaled_loss = True
self.config = config
self._config = config_class
self.loaded_checkpoint_mp_world_size = None
self.loaded_checkpoint_dp_world_size = None
self.enable_backward_allreduce = True
self.progressive_layer_drop = None
self.eigenvalue = None
self.block_eigenvalue = None
self.gas_boundary_ctr = 0
self.dist_backend = get_accelerator().communication_backend_name()
self.has_moe_layers = False
self.num_experts = []
self.gate_modules = []
self.moe_layers = []
self._step_applied = False
self._global_grad_norm = None
self.use_ds_comm = False # False --> Use torch.dist, True --> Use ds.comm backend.
self.checkpoint_engine = None
self._is_gradient_accumulation_boundary = None
self.scale_wrt_gas = None
self.losses = []
# for debug purposes - can then debug print: debug_get_module_name(module)
debug_extract_module_and_param_names(model)
# needed for zero_to_fp32 weights reconstruction to remap nameless data to state_dict
self.param_names = {param: name for name, param in model.named_parameters()}
self._do_args_sanity_check(args)
self._configure_with_arguments(args, mpu)
self._do_sanity_check()
see_memory_usage(f"DeepSpeed Engine: After args sanity test", force=self.memory_breakdown())
if mpu is not None:
if self.elasticity_enabled():
if not self.is_elastic_model_parallel_supported():
assert not self.elasticity_enabled(), ("Elasticity is not currently supported"
" with model parallelism.")
self._set_distributed_vars(args)
dist.configure(self._config)
self.monitor = MonitorMaster(self._config.monitor_config)
see_memory_usage(
f"DeepSpeed Engine: Before configure distributed model",
force=self.memory_breakdown(),
)
self.pipeline_parallelism = isinstance(model, PipelineModule)
# Configure distributed model
self._configure_distributed_model(model)
self._get_model_parameters()
see_memory_usage(f"DeepSpeed Engine: After configure distributed model")
# Configure wall clock timers
self.timers = SynchronizedWallClockTimer()
# Throughput timer
self.tput_timer = ThroughputTimer(
batch_size=self.train_batch_size(),
steps_per_output=self.steps_per_print(),
monitor_memory=False,
)
log_dist(f"DeepSpeed Flops Profiler Enabled: {self.flops_profiler_enabled()}", ranks=[0])
if self.flops_profiler_enabled():
self.flops_profiler = FlopsProfiler(self.module, self, self.flops_profiler_recompute_fwd_factor())
if training_data:
self.training_dataloader = self.deepspeed_io(training_data)
else:
self.training_dataloader = None
# Configure optimizer and scheduler
self.optimizer = None
self.basic_optimizer = None
self.lr_scheduler = None
has_optimizer = False
if optimizer or self.optimizer_name():
has_optimizer = True
# If no parameters given by init default to module parameters
if model_parameters is None:
model_parameters = self.module.parameters()
# Convert model parameters from generator to list
if not isinstance(model_parameters, list):
model_parameters = list(model_parameters)
if has_optimizer:
self._configure_optimizer(optimizer, model_parameters)
self._configure_lr_scheduler(lr_scheduler)
self._report_progress(0)
elif self.zero_optimization():
# no optim selected but zero is enabled
self.optimizer = self._configure_zero_optimizer(optimizer=None)
elif self.bfloat16_enabled():
self.optimizer = self._configure_bf16_optimizer(optimizer=None)
# Hook optimizer for snip_momentum pruning
if hasattr(model, 'pruners'):
from ..compression.helper import rewrite_optimizer_step
self.optimizer.pruners = model.pruners
rewrite_optimizer_step(self.optimizer)
# Bookkeeping for sparse support
self.sparse_tensor_module_names = set()
# if self.sparse_gradients_enabled():
for name, module in self.module.named_modules():
if isinstance(module, (torch.nn.Embedding, torch.nn.EmbeddingBag)) and self.sparse_gradients_enabled():
self.sparse_tensor_module_names.add(name + ".weight")
logger.info("Will convert {} to sparse tensor during training".format(name))
self.save_non_zero_checkpoint = False
self.save_zero_checkpoint = False
if not isinstance(self.optimizer, DeepSpeedZeRoOffload):
self._configure_checkpointing(dist_init_required)
if self.eigenvalue_enabled():
self.eigenvalue = self._configure_eigenvalue()
if self.pld_enabled():
self.progressive_layer_drop = self._configure_progressive_layer_drop()
if self.curriculum_enabled_legacy():
self.curriculum_scheduler_legacy = self._configure_curriculum_scheduler_legacy()
if self.random_ltd_enabled():
random_ltd_config = self.random_ltd_config()
random_ltd_config[RANDOM_LTD_GLOBAL_BATCH_SIZE] = self.train_batch_size()
random_ltd_config[RANDOM_LTD_MICRO_BATCH_SIZE] = self.train_micro_batch_size_per_gpu()
self.random_ltd_scheduler = self._configure_random_ltd_scheduler(random_ltd_config)
# Engine timers
self.engine_timers = EngineTimers(enable_micro_timers=self.wall_clock_breakdown(),
enable_global_timers=self.wall_clock_breakdown()
or self.flops_profiler_enabled())
if self.global_rank == 0:
self._config.print("DeepSpeedEngine configuration")
if self.dump_state():
print_configuration(self, "DeepSpeedEngine")
# Use torch (un)flatten ops
self.flatten = _flatten_dense_tensors
self.unflatten = _unflatten_dense_tensors
def destroy(self):
if self.optimizer is not None and hasattr(self.optimizer, 'destroy'):
self.optimizer.destroy()
def _get_model_parameters(self):
if self.autotuning_profile_model_info():
self.autotuning_model_info = {}
num_params = 0
trainable_num_params = 0
for p in self.module.parameters():
# since user code might call deepspeed.zero.Init() before deepspeed.initialize(), need to check the attribute to check if the parameter is partitioned in zero 3 already or not
n = 0
if hasattr(p, "ds_tensor"): # if the parameter is partitioned in zero 3
n += p.ds_numel
else: # if the parameter is not partitioned in zero 3 yet
n += p.numel()
num_params += n
if p.requires_grad:
trainable_num_params += n
if self.global_rank == 0:
self.autotuning_model_info["num_params"] = num_params * self.mp_world_size
self.autotuning_model_info["trainable_num_params"] = trainable_num_params * self.mp_world_size
logger.info(f"model parameter = {num_params}")
def get_batch_info(self):
"""Get all training batch related settings.
Returns:
train_batch_size (int): The effective training batch size. This is the amount of data
samples that leads to one step of model update.
train_micro_batch_size_per_gpu (int): Batch size to be processed by one GPU in one
step (without gradient accumulation).
gradient_accumulation_steps (int): Number of training steps to accumulate gradients
before averaging and applying them.
"""
return (
self.train_batch_size,
self.train_micro_batch_size_per_gpu,
self.gradient_accumulation_steps,
)
def set_train_batch_size(self, train_batch_size):
"""Adjust the global batch size by increasing or decreasing the number of
micro-batches (i.e., gradient accumulation steps). The size of each micro-batch
(i.e., ``train_micro_batch_size_per_gpu``) is not changed.
Args:
train_batch_size (int): The new global batch size for training.
Raises:
ValueError: if ``train_batch_size`` is not divisible by the
configured micro-batch size and data parallelism.
"""
if train_batch_size % (self.train_micro_batch_size_per_gpu() * self.dp_world_size) != 0:
#print(f'{train_batch_size=} {self.train_micro_batch_size_per_gpu()=} {self.dp_world_size=}')
raise ValueError(f'Train batch size must be divisible by micro-batch data parallelism')
new_gas = train_batch_size // (self.train_micro_batch_size_per_gpu() * self.dp_world_size)
# overwrite config
self._config.train_batch_size = train_batch_size
self._config.gradient_accumulation_steps = new_gas
def set_train_micro_batch_size(self, micro_batch_size):
"""Adjust the micro batch size(i.e., the micro batch size in every data parallel group),
while keep the gradient accumulation steps the same.
Args:
micro_batch_size (int): The new micro batch size for training.
"""
# overwrite config
new_global_batch_size = micro_batch_size * self._config.gradient_accumulation_steps * self.dp_world_size
self._config.train_batch_size = new_global_batch_size
self._config.train_micro_batch_size_per_gpu = micro_batch_size
def set_data_post_process_func(self, post_process_func):
if self.training_dataloader is not None:
self.training_dataloader.post_process_func = post_process_func
def set_custom_curriculum_learning_schedule(self, schedule_func_dict):
if self.training_dataloader is not None and self.curriculum_learning_enabled():
self.training_dataloader.data_sampler.set_custom_curriculum_learning_schedule(schedule_func_dict)
def get_global_grad_norm(self) -> float:
"""Return the 2-norm of all gradients. If there is model parallelism,
the norm will be global.
The computed norm will be cached and reused until the next step() pass.
.. note::
In the presence of model parallelism, this is a collective call
and acts as a barrier among ``mpu.get_model_parallel_group()``.
Returns:
float: norm
"""
return self._global_grad_norm
def __getattr__(self, name):
"""
Pass through attributes defined in the model if they are not overridden by ds-engine.
"""
_module = {}
if "module" in self.__dict__:
_module = self.__dict__['module']
if name in dir(self):
return getattr(self, name)
elif name in dir(_module):
return getattr(_module, name)
else:
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
def checkpoint_tag_validation_enabled(self):
return self._config.checkpoint_tag_validation_enabled
def checkpoint_tag_validation_fail(self):
return self._config.checkpoint_tag_validation_fail
def elasticity_enabled(self):
return self._config.elasticity_enabled
def is_elastic_model_parallel_supported(self):
if self.elasticity_enabled():
# Add code for finding number of GPUs per node automatically
if self._config.num_gpus_per_node % self._config.elastic_model_parallel_size == 0:
return True
else:
return False
def pld_enabled(self):
return self._config.pld_enabled
def pld_params(self):
return self._config.pld_params
def pld_theta(self):
return self.pld_params()[PLD_THETA]
def pld_gamma(self):
return self.pld_params()[PLD_GAMMA]
def eigenvalue_enabled(self):
return self._config.eigenvalue_enabled
def eigenvalue_verbose(self):
return self._config.eigenvalue_verbose
def eigenvalue_max_iter(self):
return self._config.eigenvalue_max_iter
def eigenvalue_tol(self):
return self._config.eigenvalue_tol
def eigenvalue_stability(self):
return self._config.eigenvalue_stability
def eigenvalue_gas_boundary_resolution(self):
return self._config.eigenvalue_gas_boundary_resolution
def eigenvalue_layer_name(self):
return self._config.eigenvalue_layer_name
def eigenvalue_layer_num(self):
return self._config.eigenvalue_layer_num
def curriculum_enabled_legacy(self):
return self._config.curriculum_enabled_legacy
def curriculum_params_legacy(self):
return self._config.curriculum_params_legacy
def data_efficiency_enabled(self):
return self._config.data_efficiency_enabled
def data_efficiency_config(self):
return self._config.data_efficiency_config
def data_sampling_enabled(self):
return self._config.data_efficiency_config[DATA_SAMPLING][DATA_SAMPLING_ENABLED]
def data_sampling_config(self):
return self._config.data_efficiency_config[DATA_SAMPLING]
def curriculum_learning_enabled(self):
return self._config.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]
def curriculum_learning_config(self):
return self._config.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING]
def random_ltd_enabled(self):
return self._config.data_efficiency_config[DATA_ROUTING][RANDOM_LTD][RANDOM_LTD_ENABLED]
def random_ltd_config(self):
return self._config.data_efficiency_config[DATA_ROUTING][RANDOM_LTD]
def random_ltd_initialize(self):
assert self.random_ltd_enabled()
random_ltd_config = self.random_ltd_config()
random_ltd_queue = deque([x for x in sorted(random_ltd_config[RANDOM_LTD_LAYER_ID])])
count = 0
for name, layer in self.module.named_modules():
if isinstance(layer, RandomLayerTokenDrop):
if len(random_ltd_queue) != 0 and str(random_ltd_queue[0]) in name: ###[1,2,3]
layer.init_config(random_ltd_config, self.random_ltd_scheduler, count)
random_ltd_queue.popleft()
count += 1
if random_ltd_config[RANDOM_LTD_LAYER_NUM] != count:
raise ValueError(f'random_ltd_layer_num {random_ltd_config[RANDOM_LTD_LAYER_NUM]} must be \
equivalent to the len of random_ltd_layer_id {count}')
if random_ltd_config[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE][RANDOM_LTD_LAYER_TOKEN_LR_ENABLED]:
assert self.client_lr_scheduler is None
raise ValueError(f'not yet support')
#self.lr_scheduler = lr_schedules.WarmupLayerTokenDecayLR(self.optimizer, self.random_ltd_scheduler)
def wall_clock_breakdown(self):
return self._config.wall_clock_breakdown
def flops_profiler_enabled(self):
return self._config.flops_profiler_config.enabled or self.autotuning_enabled()
def flops_profiler_recompute_fwd_factor(self):
return self._config.flops_profiler_config.recompute_fwd_factor
def flops_profiler_profile_step(self):
step = self._config.flops_profiler_config.profile_step
if self._config.autotuning_config.enabled:
step = self.autotuning_start_profile_step()
return step
def flops_profiler_module_depth(self):
return self._config.flops_profiler_config.module_depth
def flops_profiler_top_modules(self):
return self._config.flops_profiler_config.top_modules
def flops_profiler_detailed(self):
if self._config.autotuning_config.enabled:
return False
return self._config.flops_profiler_config.detailed
def flops_profiler_output_file(self):
return self._config.flops_profiler_config.output_file
def memory_breakdown(self):
return self._config.memory_breakdown
def autotuning_enabled(self):
return self._config.autotuning_config.enabled
def autotuning_start_profile_step(self):
return self._config.autotuning_config.start_profile_step
def autotuning_end_profile_step(self):
return self._config.autotuning_config.end_profile_step
def autotuning_metric_path(self):
path = self._config.autotuning_config.metric_path
if not path:
path = os.path.join(os.getcwd(), "autotuning_metric.json")
return path
def autotuning_model_info_path(self):
path = self._config.autotuning_config.model_info_path
if not path:
path = os.path.join(os.getcwd(), "autotuning_model_info.json")
return path
def autotuning_metric(self):
return self._config.autotuning_config.metric
def autotuning_profile_model_info(self):
return self.autotuning_enabled(
) and self._config.autotuning_config.model_info and self._config.autotuning_config.model_info.get(
"profile", False)
def sparse_gradients_enabled(self):
return self._config.sparse_gradients_enabled
def train_batch_size(self):
return self._config.train_batch_size
def train_micro_batch_size_per_gpu(self):
return self._config.train_micro_batch_size_per_gpu
def optimizer_name(self):
return (self.client_optimizer.__class__.__name__ if self.client_optimizer else self._config.optimizer_name)
def optimizer_params(self):
return self._config.optimizer_params
def optimizer_legacy_fusion(self):
return self._config.optimizer_legacy_fusion
def scheduler_name(self):
return self._config.scheduler_name
def scheduler_params(self):
return self._config.scheduler_params
def quantize_training(self):
return (
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS]
[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_ENABLED],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_GROUPS],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS]
[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_CHANGE_RATIO],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_TYPE],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_ROUNDING],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_VERBOSE],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_KERNEL],
)
def zero_optimization(self):
return self._config.zero_enabled
def zero_allow_untested_optimizer(self):
return self._config.zero_allow_untested_optimizer
def zero_force_ds_cpu_optimizer(self):
return self._config.zero_force_ds_cpu_optimizer
def zero_reduce_scatter(self):
return self._config.zero_config.reduce_scatter
def zero_overlap_comm(self):
return self._config.zero_config.overlap_comm
def zero_offload_optimizer(self):
return self._config.zero_config.offload_optimizer
def zero_offload_param(self):
return self._config.zero_config.offload_param
def zero_use_cpu_optimizer(self):
if self._config.zero_config.offload_optimizer is not None:
return self._config.zero_config.offload_optimizer.device in [OffloadDeviceEnum.cpu, OffloadDeviceEnum.nvme]
return False
def zero_cpu_offload(self):
if self._config.zero_config.offload_optimizer is not None:
return self._config.zero_config.offload_optimizer.device == OffloadDeviceEnum.cpu
return False
def zero_sub_group_size(self):
return self._config.zero_config.sub_group_size
def zero_optimization_stage(self):
return self._config.zero_optimization_stage
def mics_shard_size(self):
return self._config.mics_shard_size
def zero_reduce_bucket_size(self):
return self._config.zero_config.reduce_bucket_size
def zero_allgather_bucket_size(self):
return self._config.zero_config.allgather_bucket_size
def zero_optimization_partition_gradients(self):
return self.zero_optimization_stage() >= ZeroStageEnum.gradients
def zero_optimization_partition_weights(self):
return self.zero_optimization_stage() >= ZeroStageEnum.weights
def zero_contiguous_gradients(self):
return self._config.zero_config.contiguous_gradients
def zero_load_from_fp32_weights(self):
return self._config.zero_config.load_from_fp32_weights
def zero_elastic_checkpoint(self):
return self._config.zero_config.elastic_checkpoint
def zero_max_live_parameters(self):
return self._config.zero_config.max_live_parameters
def zero_max_reuse_distance(self):
return self._config.zero_config.max_reuse_distance
def zero_prefetch_bucket_size(self):
return self._config.zero_config.prefetch_bucket_size
def zero_param_persistence_threshold(self):
return self._config.zero_config.param_persistence_threshold
def zero_model_persistence_threshold(self):
return self._config.zero_config.model_persistence_threshold
def zero_gather_16bit_weights_on_model_save(self):
return self._config.zero_config.gather_16bit_weights_on_model_save
def zero_grad_hooks(self):
return self._config.zero_config.grad_hooks
def zero_legacy_stage1(self):
return self._config.zero_config.legacy_stage1
def zero_ignore_unused_parameters(self):
return self._config.zero_config.ignore_unused_parameters
def fp16_enabled(self):
return self._config.fp16_enabled
def bfloat16_enabled(self):
return self._config.bfloat16_enabled
def fp16_master_weights_and_gradients(self):
return self._config.fp16_master_weights_and_gradients
def amp_enabled(self):
return self._config.amp_enabled
def amp_params(self):
return self._config.amp_params
def fp16_auto_cast(self):
return self._config.fp16_auto_cast
def loss_scale(self):
return self._config.loss_scale
def gradient_accumulation_steps(self):
return self._config.gradient_accumulation_steps
def use_node_local_storage(self):
return self._config.use_node_local_storage
def load_universal_checkpoint(self):
return self._config.load_universal_checkpoint
@property
def communication_data_type(self):
res = self._config.communication_data_type
if res is not None:
return res
if self.fp16_enabled():
return torch.float16
if self.bfloat16_enabled():
return torch.bfloat16
return torch.float32
def postscale_gradients(self):
return not self._config.prescale_gradients
def gradient_predivide_factor(self):
return self._config.gradient_predivide_factor
def steps_per_print(self):
return self._config.steps_per_print
def zero_allgather_partitions(self):
return self._config.zero_config.allgather_partitions
def zero_round_robin_gradients(self):
return self._config.zero_config.round_robin_gradients
def zero_hpz_partition_size(self):
return self._config.zero_config.zero_hpz_partition_size
def zero_quantized_weights(self):
return self._config.zero_config.zero_quantized_weights
def zero_quantized_gradients(self):
return self._config.zero_config.zero_quantized_gradients
def dump_state(self):
return self._config.dump_state
def gradient_clipping(self):
return self._config.gradient_clipping
def dynamic_loss_scale(self):
return self._config.loss_scale == 0
def initial_dynamic_scale(self):
return self._config.initial_dynamic_scale
def dynamic_loss_scale_args(self):
return self._config.dynamic_loss_scale_args
def swap_tensor_config(self):
return self._config.swap_tensor_config
def aio_config(self):
return self._config.aio_config
def get_data_types(self):
model_dtype = torch.float32
if self.fp16_enabled():
model_dtype = torch.float16
elif self.bfloat16_enabled():
model_dtype = torch.bfloat16
if self._config.grad_accum_dtype is None:
if model_dtype == torch.bfloat16 and not self.zero_optimization():
grad_accum_dtype = torch.float32
else:
grad_accum_dtype = model_dtype
else:
grad_accum_dtype = DtypeEnum(self._config.grad_accum_dtype).value
return (model_dtype, grad_accum_dtype)
def _configure_lr_scheduler(self, client_lr_scheduler):
# First check for scheduler in json configuration
lr_scheduler = self._scheduler_from_config(self.optimizer)
if lr_scheduler:
log_dist(f"DeepSpeed using configured LR scheduler = {self.scheduler_name()}", ranks=[0])
self.lr_scheduler = lr_scheduler
else:
if isinstance(client_lr_scheduler, Callable):
log_dist('DeepSpeed using client callable to create LR scheduler', ranks=[0])
self.lr_scheduler = client_lr_scheduler(self.basic_optimizer)
else:
log_dist('DeepSpeed using client LR scheduler', ranks=[0])
self.lr_scheduler = client_lr_scheduler
log_dist(f'DeepSpeed LR Scheduler = {self.lr_scheduler}', ranks=[0])
def _configure_checkpointing(self, dist_init_required):
self.checkpoint_engine = TorchCheckpointEngine()
if self._config is not None and self._config.nebula_config.enabled:
try:
from deepspeed.runtime.checkpoint_engine.nebula_checkpoint_engine import \
NebulaCheckpointEngine
self.checkpoint_engine = NebulaCheckpointEngine(config_params=self._config.nebula_config)
except ImportError as err:
logger.error(f"No torch_nebula was found! Will fall back to torch.save. Details: {err}")
self.checkpoint_engine = TorchCheckpointEngine()
dp_rank = self.global_rank
if self.mpu:
dp_rank = self.mpu.get_data_parallel_rank()
rank = self.local_rank if self.use_node_local_storage() else dp_rank
# only the first data parallel process needs to store the model checkpoint
# if you want to use node local storage this must be done by rank 0 on each
# node
self.save_non_zero_checkpoint = (rank == 0) or self.zero_optimization_partition_weights()
if self.zero_optimization() or self.bfloat16_enabled():
param_rank = dist.get_rank(group=self.optimizer.dp_process_group)
# Only the first parameter parallel process needs to store the
# optimizer state checkpoints for zero
self.save_zero_checkpoint = param_rank == dp_rank
def _scheduler_from_config(self, optimizer):
scheduler_name = self.scheduler_name()
if scheduler_name is not None:
if hasattr(lr_schedules, scheduler_name):
scheduler = getattr(lr_schedules, scheduler_name)
else:
assert hasattr(torch.optim.lr_scheduler,
scheduler_name), f"DeepSpeed does not recognize LR scheduler {scheduler_name}"
scheduler = getattr(torch.optim.lr_scheduler, scheduler_name)
scheduler_params = self.scheduler_params()
instantiated_scheduler = scheduler(optimizer, **scheduler_params)
return instantiated_scheduler
else:
return None
def _set_distributed_vars(self, args):
device_rank = args.device_rank if args is not None and hasattr(args, 'device_rank') else self.local_rank
if device_rank >= 0:
get_accelerator().set_device(device_rank)
self.device = torch.device(get_accelerator().device_name(), device_rank)
self.world_size = dist.get_world_size()
self.global_rank = dist.get_rank()
else:
self.world_size = 1
self.global_rank = 0
self.device = torch.device(get_accelerator().device_name())
# Configure based on command line arguments
def _configure_with_arguments(self, args, mpu):
# After the distributed backend is initialized we are guaranteed the LOCAL_RANK
# environment variable is set. We must align args.local_rank to this value for
# backwards compatibility with scripts relying on [args|self].local_rank containing
# the correct local rank info. _do_args_sanity_check will ensure this is the case.
if "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ:
ompi_local_rank = os.environ.get("OMPI_COMM_WORLD_LOCAL_RANK")
local_rank = os.environ.get('LOCAL_RANK', ompi_local_rank)
assert ompi_local_rank == local_rank, f"LOCAL_RANK ({local_rank}) != OMPI_COMM_WORLD_LOCAL_RANK ({ompi_local_rank}), " \
"not sure how to proceed as we're seeing conflicting local rank info."
os.environ['LOCAL_RANK'] = local_rank
self.local_rank = int(os.environ['LOCAL_RANK'])
if hasattr(args, 'local_rank'):
args.local_rank = self.local_rank
# Validate command line arguments
def _do_args_sanity_check(self, args):
assert "LOCAL_RANK" in os.environ or "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ, "DeepSpeed requires the LOCAL_RANK environment " \
"variable, it is set by the deepspeed launcher, deepspeed.init_distributed, or the torch's launcher. If using a " \
"different launcher please ensure LOCAL_RANK is set prior to initializing deepspeed."
if hasattr(args, 'local_rank') and args.local_rank is not None:
assert isinstance(args.local_rank,
int), f"args.local_rank of {args.local_rank} is an unknown type {type(args.local_rank)}"
if args.local_rank >= 0:
env_local_rank = int(os.environ.get("LOCAL_RANK"))
assert (
env_local_rank == args.local_rank
), f"Mismatch in local rank setting, args.local_rank={args.local_rank} but env['LOCAL_RANK']={env_local_rank}."
def _is_supported_optimizer(self, optimizer_name):
return (optimizer_name in DEEPSPEED_OPTIMIZERS or getattr(torch.optim, optimizer_name, None) is not None)
def _supported_optims(self):
FairseqOptimizer = None
try:
from fairseq.optim.fairseq_optimizer import FairseqOptimizer
except ImportError:
pass
expected_optim_types = [Optimizer]
if FairseqOptimizer:
# fairseq optims are not torch.optim objects
expected_optim_types.append(FairseqOptimizer)
return expected_optim_types
# Validate configuration based on command line arguments
def _do_sanity_check(self):
expected_optim_types = self._supported_optims()
expected_optim_types += [type(None), Callable]
assert isinstance(self.client_optimizer, tuple(expected_optim_types)), \
f'Client Optimizer is of unexpected type {type(self.client_optimizer)}'
if not self.client_optimizer:
if self.optimizer_name() is not None:
assert self._is_supported_optimizer(
self.optimizer_name()), "{} is not a supported DeepSpeed Optimizer".format(self.optimizer_name())
if (self.optimizer_name() == LAMB_OPTIMIZER or self.optimizer_name() == ONEBIT_LAMB_OPTIMIZER):
assert (self.dynamic_loss_scale()), "DeepSpeed {} optimizer requires dynamic loss scaling".format(
self.optimizer_name())
# Detect invalid combinations of client optimizer and client scheduler
if isinstance(self.client_lr_scheduler, _LRScheduler):
assert isinstance(self.client_optimizer, Optimizer), \
f'Client Optimizer (type = {type(self.client_optimizer)} is not instantiated but Client LR Scheduler is instantiated'
def _broadcast_model(self):
def is_replicated(p):
if hasattr(p, "ds_status") and p.ds_status is not ZeroParamStatus.AVAILABLE:
return False
return True
for p in self.module.parameters():
# Broadcast the model for different parameters
if is_moe_param(p):
if torch.is_tensor(p) and is_replicated(p):
dist.broadcast(p,
groups._get_expert_broadcast_src_rank(p.group_name),
group=self.expert_data_parallel_group[p.group_name])
else:
if torch.is_tensor(p) and is_replicated(p):
dist.broadcast(p, groups._get_broadcast_src_rank(), group=self.data_parallel_group)
@staticmethod
def __check_params(model: Module, dtype: torch.dtype) -> None:
return
if not all(param.dtype == dtype for param in model.parameters()) and dist.get_rank() == 0:
raise ValueError(f"{dtype} is enabled but the following parameters have dtype that is "
f"not {dtype}: "
f"{[(n, p.dtype) for n, p in model.named_parameters() if p.dtype != dtype]}")
def _set_client_model(self, model):
# register client model in _modules so that nn.module methods work correctly
modules = self.__dict__.get('_modules')
modules['module'] = model
# register module attribute in engine but avoid getattr
self.__dict__['module'] = model
def _configure_distributed_model(self, model):
self._set_client_model(model)
is_zero3_model = self.zero_optimization_partition_weights() and any(
[hasattr(param, "ds_id") for param in self.module.parameters()])
if self.fp16_enabled():
if is_zero3_model:
self.__check_params(self.module, torch.half)
self.module.half()
elif self.bfloat16_enabled():
if is_zero3_model:
self.__check_params(self.module, torch.bfloat16)
self.module.bfloat16()
else:
self.__check_params(self.module, torch.float)
# zero.Init() handles device placement of model
if not self.dont_change_device:
self.module.to(self.device)
# MoE related initialization
for _, module in self.module.named_modules():
if isinstance(module, MoE):
self.has_moe_layers = True
self.num_experts.append(module.num_experts)
if self.has_moe_layers:
for _, module in self.module.named_modules():
if isinstance(module, TopKGate):
self.gate_modules.append(module)
if self.wall_clock_breakdown():
module.wall_clock_breakdown = True
if isinstance(module, MOELayer):
self.moe_layers.append(module)
if self.wall_clock_breakdown():
module.wall_clock_breakdown = True
# Pass the mpu from here to groups. For subsequent use, just query groups
if self.mpu is not None:
groups.mpu = self.mpu
# Set deepspeed parallelism spec. for the model including expert parallelism
for _, module in self.module.named_modules():
if hasattr(module, 'set_deepspeed_parallelism'):
module.set_deepspeed_parallelism()
# Query the groups module to get information about various parallel groups
self.local_all_to_all_group = None
if self.zero_quantized_gradients():
log_dist("Using quantized gradients", ranks=[0])
self.local_all_to_all_group = groups._get_local_all_to_all_group()
self.data_parallel_group = groups._get_data_parallel_group()
self.dp_world_size = groups._get_data_parallel_world_size()
self.mp_world_size = groups._get_model_parallel_world_size()
self.expert_parallel_group = groups._get_expert_parallel_group_dict()
self.expert_data_parallel_group = groups._get_expert_data_parallel_group_dict()
if not self.amp_enabled():
self._broadcast_model()
# check if parameters are duplicated in optimizer param_groups
def _check_for_duplicates(self, optimizer):
for name, param in self.module.named_parameters():
param_id = id(param)
def ids_list(group):
return [id(param) for param in group]
occurrence = sum([
ids_list(group['params']).count(param_id) if param_id in ids_list(group['params']) else 0
for group in optimizer.param_groups
])
assert occurrence <= 1, f"Parameter with name: {name} occurs multiple times in optimizer.param_groups. Make sure it only appears once to prevent undefined behavior."
def _do_optimizer_sanity_check(self, basic_optimizer):
model_dtype, grad_accum_dtype = self.get_data_types()
zero_enabled = self.zero_optimization()
amp_enabled = self.amp_enabled()
# config based assertions
assert (
not (amp_enabled and zero_enabled)
), "Amp and ZeRO are not currently compatible, please use (legacy) fp16 mode which performs similar to amp opt_mode=O2"
if zero_enabled:
if not is_zero_supported_optimizer(basic_optimizer):
assert (
self.zero_allow_untested_optimizer()
), 'You are using an untested ZeRO Optimizer. Please add <"zero_allow_untested_optimizer": true> in the configuration file to use it.'
if self.global_rank == 0:
logger.warning("**** You are using ZeRO with an untested optimizer, proceed with caution *****")
if model_dtype == torch.bfloat16 and grad_accum_dtype == torch.float32 and self.zero_optimization_stage(
) == 1:
return BFLOAT16
if model_dtype != grad_accum_dtype:
raise NotImplementedError(
"Model data type and gradient accumulation data type must be equal to use ZeRO")
return ZERO_OPTIMIZATION
elif amp_enabled:
if model_dtype != grad_accum_dtype:
raise NotImplementedError(
"Model data type and gradient accumulation data type must be equal to use Amp")
if model_dtype == torch.bfloat16 or model_dtype == torch.float16:
raise NotImplementedError("Cannot enable both amp with (legacy) fp16 or bfloat16 mode")
try:
logger.info("Initializing Apex amp from: {}".format(amp.__path__))
except NameError:
# If apex/amp is available it will be imported above
raise RuntimeError("Unable to import apex/amp, please make sure it is installed")
return AMP
# data type checks
elif model_dtype == grad_accum_dtype:
if model_dtype == torch.bfloat16:
raise NotImplementedError(
"Bfloat16 wrapper must use a gradient accumulation type of fp32, enable ZeRO to use Bfloat16 gradient accumulation"
)
if model_dtype == torch.float16:
return FP16
# else optimizer_wrapper = None
elif model_dtype == torch.bfloat16 and grad_accum_dtype == torch.float32:
return BFLOAT16
else:
raise NotImplementedError("unsupported mix of model dtype and gradient accumulation type")
return None
# Configure optimizer
def _configure_optimizer(self, client_optimizer, model_parameters):
if client_optimizer is not None:
if isinstance(client_optimizer, tuple(self._supported_optims())):
client_optimizer.param_groups[:] = [
pg for pg in client_optimizer.param_groups if len(pg["params"]) != 0
]
log_dist("Removing param_group that has no 'params' in the client Optimizer", ranks=[0])
basic_optimizer = client_optimizer
log_dist('Using client Optimizer as basic optimizer', ranks=[0])
else:
basic_optimizer = client_optimizer(model_parameters)
log_dist('Using client callable to create basic optimizer', ranks=[0])
if self.zero_use_cpu_optimizer() and not isinstance(basic_optimizer, deepspeed.ops.adam.DeepSpeedCPUAdam):
if self.zero_force_ds_cpu_optimizer():
msg = f'You are using ZeRO-Offload with a client provided optimizer ({type(basic_optimizer)}) which in most cases will yield poor performance. Please either use deepspeed.ops.adam.DeepSpeedCPUAdam or set an optimizer in your ds-config (https://www.deepspeed.ai/docs/config-json/#optimizer-parameters). If you really want to use a custom optimizer w. ZeRO-Offload and understand the performance impacts you can also set <"zero_force_ds_cpu_optimizer": false> in your configuration file.'
raise ZeRORuntimeException(msg)
else:
basic_optimizer = self._configure_basic_optimizer(model_parameters)
log_dist(f"Using DeepSpeed Optimizer param name {self.optimizer_name()} as basic optimizer", ranks=[0])
self._check_for_duplicates(basic_optimizer)
self.basic_optimizer = basic_optimizer
log_dist("DeepSpeed Basic Optimizer = {}".format(basic_optimizer.__class__.__name__), ranks=[0])
optimizer_wrapper = self._do_optimizer_sanity_check(basic_optimizer)
if optimizer_wrapper == ZERO_OPTIMIZATION:
self.optimizer = self._configure_zero_optimizer(basic_optimizer)
elif optimizer_wrapper == AMP:
amp_params = self.amp_params()
log_dist(f"Initializing AMP with these params: {amp_params}", ranks=[0])
model, self.optimizer = amp.initialize(self.module, basic_optimizer, **amp_params)
self._set_client_model(model)
self._broadcast_model()
# TODO: maybe need to broadcast experts differently?
elif optimizer_wrapper == FP16:
self.optimizer = self._configure_fp16_optimizer(basic_optimizer)
elif optimizer_wrapper == BFLOAT16:
self.optimizer = self._configure_bf16_optimizer(basic_optimizer)
else:
self.optimizer = basic_optimizer
log_dist("DeepSpeed Final Optimizer = {}".format(self.optimizer_name()), ranks=[0])
self.compression_scheduler = self._configure_compression_scheduler()
self.quantizer = self._configure_quantization()
def _configure_basic_optimizer(self, model_parameters):
optimizer_parameters = self.optimizer_params()
if optimizer_parameters is None:
optimizer_parameters = {}
# print(optimizer_parameters.keys())
if "max_grad_norm" in optimizer_parameters.keys():
raise ValueError(
"'max_grad_norm' is not supported as an optimizer parameter, please switch to using the deepspeed parameter 'gradient_clipping' see: https://www.deepspeed.ai/docs/config-json/#gradient-clipping for more details"
)
if self.optimizer_name() in [ADAM_OPTIMIZER, ADAMW_OPTIMIZER]:
torch_adam = optimizer_parameters.pop(TORCH_ADAM_PARAM, False)
adam_w_mode = optimizer_parameters.pop(ADAM_W_MODE, ADAM_W_MODE_DEFAULT)
# Optimizer name of Adam forces AdamW logic unless adam_w_mode is explicitly set
effective_adam_w_mode = self.optimizer_name() == ADAMW_OPTIMIZER or adam_w_mode
if torch_adam:
if not effective_adam_w_mode:
optimizer = torch.optim.Adam(model_parameters, **optimizer_parameters)
else:
optimizer = torch.optim.AdamW(model_parameters, **optimizer_parameters)
else:
if self.zero_use_cpu_optimizer():
from deepspeed.ops.adam import DeepSpeedCPUAdam
optimizer = DeepSpeedCPUAdam(model_parameters,
**optimizer_parameters,
adamw_mode=effective_adam_w_mode)
else:
from deepspeed.ops.adam import FusedAdam
optimizer = FusedAdam(
model_parameters,
**optimizer_parameters,
adam_w_mode=effective_adam_w_mode,
)
elif self.optimizer_name() == ADAGRAD_OPTIMIZER:
if self.zero_use_cpu_optimizer():
from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad
optimizer = DeepSpeedCPUAdagrad(model_parameters, **optimizer_parameters)
else:
optimizer = torch.optim.Adagrad(model_parameters, **optimizer_parameters)
elif self.optimizer_name() == LAMB_OPTIMIZER:
from deepspeed.ops.lamb import FusedLamb
optimizer = FusedLamb(model_parameters, **optimizer_parameters)
elif self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER:
assert not self.zero_optimization(), "1bit-Adam is not compatible with ZeRO"
from deepspeed.runtime.fp16.onebit.adam import OnebitAdam
optimizer = OnebitAdam(model_parameters, self, **optimizer_parameters)
if not self.fp16_enabled():
logger.warning(f"Currently the convergence of 1-bit Adam is only verified under FP16")
elif self.optimizer_name() == ZERO_ONE_ADAM_OPTIMIZER:
assert not self.zero_optimization(), "0/1 Adam is not compatible with ZeRO"
from deepspeed.runtime.fp16.onebit.zoadam import ZeroOneAdam
optimizer = ZeroOneAdam(model_parameters, self, **optimizer_parameters)
if not self.fp16_enabled():
logger.warning(f'Currently the convergence of 0/1 Adam is only verified under FP16')
elif self.optimizer_name() == ONEBIT_LAMB_OPTIMIZER:
assert not self.zero_optimization(), "1bit-Lamb is not compatible with ZeRO"
from deepspeed.runtime.fp16.onebit.lamb import OnebitLamb
optimizer = OnebitLamb(model_parameters, self, **optimizer_parameters)
if not self.fp16_enabled():
logger.warning(f"Currently the convergence of 1-bit Lamb is only verified under FP16")
else:
torch_optimizer = getattr(torch.optim, self.optimizer_name())
optimizer = torch_optimizer(model_parameters, **optimizer_parameters)
return optimizer
def _configure_compression_scheduler(self):
return compression_scheduler(self.module, self._config.compression_config)
def _configure_random_ltd_scheduler(self, configs):
return RandomLTDScheduler(configs)
def _configure_quantization(self):
(
quantize_weight_in_forward,
quantize_enabled,
q_groups,
q_mixed_fp16,
q_change_ratio,
q_type,
q_rounding,
q_verbose,
use_quantizer_kernel,
) = self.quantize_training()
if quantize_enabled and not quantize_weight_in_forward:
assert self.fp16_enabled(
), "MoQ (quantize in optimization step) weight quantization is only supported for FP16"
quantizer = None
if quantize_enabled and not quantize_weight_in_forward:
from deepspeed.runtime.quantize import Quantizer
quantizer = Quantizer(
q_groups,
q_mixed_fp16,
q_change_ratio,
q_type,
q_rounding,
q_verbose,
self.eigenvalue_enabled(),
use_quantizer_kernel,
self.eigenvalue_layer_num() if self.eigenvalue_enabled() else 0,
)
return quantizer
def _configure_fp16_optimizer(self, optimizer):
initial_dynamic_scale = self.initial_dynamic_scale()
dynamic_loss_args = self.dynamic_loss_scale_args()
clip_grad = self.gradient_clipping()
if APEX_INSTALLED:
fused_opts = (apex.optimizers.FusedAdam, FusedAdam)
else:
fused_opts = FusedAdam
if isinstance(optimizer, fused_opts) \
or self.optimizer_name() in [ONEBIT_ADAM_OPTIMIZER, ZERO_ONE_ADAM_OPTIMIZER]:
if self.dynamic_loss_scale():
log_dist(f'Creating fp16 optimizer with dynamic loss scale', ranks=[0])
timers = self.timers if self.wall_clock_breakdown() else None
optimizer = FP16_Optimizer(
optimizer,
deepspeed=self,
dynamic_loss_scale=True,
initial_dynamic_scale=initial_dynamic_scale,
dynamic_loss_args=dynamic_loss_args,
mpu=self.mpu,
clip_grad=clip_grad,
fused_adam_legacy=self.optimizer_legacy_fusion(),
timers=timers,
has_moe_layers=self.has_moe_layers,
)
else:
log_dist(f'Creating fp16 optimizer with static loss scale: {self.loss_scale()}', ranks=[0])
optimizer = FP16_Optimizer(
optimizer,
deepspeed=self,
static_loss_scale=self.loss_scale(),
mpu=self.mpu,
clip_grad=clip_grad,
fused_adam_legacy=self.optimizer_legacy_fusion(),
has_moe_layers=self.has_moe_layers,
)
else:
log_dist(f'Creating fp16 unfused optimizer with dynamic loss scale', ranks=[0])
optimizer = FP16_UnfusedOptimizer(
optimizer,
deepspeed=self,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=dynamic_loss_args,
mpu=self.mpu,
clip_grad=clip_grad,
fused_lamb_legacy=self.optimizer_name() == LAMB_OPTIMIZER,
)
return optimizer
def _configure_bf16_optimizer(self, optimizer):
clip_grad = self.gradient_clipping()
if optimizer is None:
optimizer = DummyOptim(list(self.module.parameters()))
log_dist('Creating BF16 optimizer', ranks=[0])
timers = self.timers if self.wall_clock_breakdown() else None
optimizer = BF16_Optimizer(optimizer,
self.param_names,
mpu=self.mpu,
clip_grad=clip_grad,
allgather_bucket_size=self.zero_allgather_bucket_size(),
dp_process_group=self.data_parallel_group,
timers=timers)
return optimizer
def _configure_zero_optimizer(self, optimizer):
zero_stage = self.zero_optimization_stage()
mics_shard_size = self.mics_shard_size()
model_dtype, grad_accum_dtype = self.get_data_types()
timers = self.timers if self.wall_clock_breakdown() else None
if optimizer is None:
optimizer = DummyOptim(list(self.module.parameters()))
if self.zero_legacy_stage1():
raise Exception(
"The deprecated version of ZeRO Stage 1 is not supported in deepspeed >= 0.5.9. Please downgrade to a version less than 0.5.9 if you need to use this deprecated version of ZeRO."
)
if zero_stage <= ZeroStageEnum.gradients:
overlap_comm = self.zero_overlap_comm()
contiguous_gradients = self.zero_contiguous_gradients()
round_robin_gradients = self.zero_round_robin_gradients()
assert not isinstance(optimizer, DummyOptim), "zero stage {} requires an optimizer".format(zero_stage)
log_dist(f'Creating {model_dtype} ZeRO stage {zero_stage} optimizer', ranks=[0])
# Overlap and contiguous grads are meaningless in stage 1 and are ignored
if zero_stage == ZeroStageEnum.optimizer_states:
overlap_comm = False
round_robin_gradients = False
# Non-MoE requires contiguous grads to be disabled w. stage 1
if not self.has_moe_layers:
contiguous_gradients = False
if isinstance(self.module, PipelineModule):
if overlap_comm:
logger.warning("Pipeline parallelism does not support overlapped communication, will be disabled.")
overlap_comm = False
optimizer = DeepSpeedZeroOptimizer(
optimizer,
self.param_names,
timers=timers,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
contiguous_gradients=contiguous_gradients,
reduce_bucket_size=self.zero_reduce_bucket_size(),
allgather_bucket_size=self.zero_allgather_bucket_size(),
dp_process_group=self.data_parallel_group,
expert_parallel_group=self.expert_parallel_group if self.has_moe_layers else None,
expert_data_parallel_group=self.expert_data_parallel_group if self.has_moe_layers else None,
reduce_scatter=self.zero_reduce_scatter(),
overlap_comm=overlap_comm,
cpu_offload=self.zero_cpu_offload(),
mpu=self.mpu,
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_accumulation_steps=self.gradient_accumulation_steps(),
ignore_unused_parameters=self.zero_ignore_unused_parameters(),
partition_grads=zero_stage == ZeroStageEnum.gradients,
round_robin_gradients=round_robin_gradients,
has_moe_layers=self.has_moe_layers,
fp16_master_weights_and_gradients=self.fp16_master_weights_and_gradients(),
communication_data_type=self.communication_data_type,
elastic_checkpoint=self.zero_elastic_checkpoint())
elif zero_stage == ZeroStageEnum.weights:
assert not self.has_moe_layers, "MoE not supported with Stage 3"
if isinstance(optimizer, DummyOptim):
log_dist("Creating ZeRO Offload", ranks=[0])
zpg = groups._get_zero_param_intra_parallel_group()
if self.zero_hpz_partition_size() > 1 and zpg is None:
self._set_zero_group_parallelism()
zpg = groups._get_zero_param_intra_parallel_group()
optimizer = DeepSpeedZeRoOffload(self.module,
timers=timers,
ds_config=self.config,
overlap_comm=self.zero_overlap_comm(),
prefetch_bucket_size=self.zero_prefetch_bucket_size(),
max_reuse_distance=self.zero_max_reuse_distance(),
max_live_parameters=self.zero_max_live_parameters(),
param_persistence_threshold=self.zero_param_persistence_threshold(),
model_persistence_threshold=self.zero_model_persistence_threshold(),
offload_param_config=self.zero_offload_param(),
mpu=self.mpu,
zero_param_parallel_group=zpg,
zero_quantized_weights=self.zero_quantized_weights())
else:
log_dist(
f'Creating fp16 ZeRO stage {zero_stage} optimizer,'
f' MiCS is enabled {mics_shard_size>0},'
f' Hierarchical params gather {self._config.mics_hierarchial_params_gather}',
ranks=[0])
if mics_shard_size > 0:
return self._return_mics_optimizer(optimizer, timers)
log_dist(f'Creating {model_dtype} ZeRO stage {zero_stage} optimizer', ranks=[0])
from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3
optimizer = DeepSpeedZeroOptimizer_Stage3(
self.module,
optimizer,
timers=timers,
ds_config=self.config,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
contiguous_gradients=self.zero_contiguous_gradients(),
reduce_bucket_size=self.zero_reduce_bucket_size(),
prefetch_bucket_size=self.zero_prefetch_bucket_size(),
max_reuse_distance=self.zero_max_reuse_distance(),
max_live_parameters=self.zero_max_live_parameters(),
param_persistence_threshold=self.zero_param_persistence_threshold(),
model_persistence_threshold=self.zero_model_persistence_threshold(),
dp_process_group=self.data_parallel_group,
all2all_process_group=self.local_all_to_all_group,
reduce_scatter=self.zero_reduce_scatter(),
overlap_comm=self.zero_overlap_comm(),
offload_optimizer_config=self.zero_offload_optimizer(),
offload_param_config=self.zero_offload_param(),
sub_group_size=self.zero_sub_group_size(),
mpu=self.mpu,
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_accumulation_steps=self.gradient_accumulation_steps(),
aio_config=self.aio_config(),
communication_data_type=self.communication_data_type,
zero_hpz_partition_size=self.zero_hpz_partition_size(),
zero_quantized_weights=self.zero_quantized_weights())
else:
raise NotImplementedError("ZeRO stage {} not implemented".format(zero_stage))
return optimizer
def _return_mics_optimizer(self, basic_optimizer, timers):
from deepspeed.runtime.zero.mics import MiCS_Optimizer
optimizer = MiCS_Optimizer(self.module,
basic_optimizer,
timers=timers,
ds_config=self.config,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
contiguous_gradients=self.zero_contiguous_gradients(),
reduce_bucket_size=self.zero_reduce_bucket_size(),
prefetch_bucket_size=self.zero_prefetch_bucket_size(),
max_reuse_distance=self.zero_max_reuse_distance(),
max_live_parameters=self.zero_max_live_parameters(),
param_persistence_threshold=self.zero_param_persistence_threshold(),
model_persistence_threshold=self.zero_model_persistence_threshold(),
dp_process_group=self.data_parallel_group,
reduce_scatter=self.zero_reduce_scatter(),
overlap_comm=self.zero_overlap_comm(),
offload_optimizer_config=self.zero_offload_optimizer(),
offload_param_config=self.zero_offload_param(),
sub_group_size=self.zero_sub_group_size(),
mpu=self.mpu,
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_accumulation_steps=self.gradient_accumulation_steps(),
aio_config=self.aio_config(),
communication_data_type=self.communication_data_type)
return optimizer
def _configure_eigenvalue(self):
eigenvalue = Eigenvalue(
verbose=self.eigenvalue_verbose(),
max_iter=self.eigenvalue_max_iter(),
tol=self.eigenvalue_tol(),
stability=self.eigenvalue_stability(),
gas_boundary_resolution=self.eigenvalue_gas_boundary_resolution(),
layer_name=self.eigenvalue_layer_name(),
layer_num=self.eigenvalue_layer_num(),
)
return eigenvalue
def _configure_progressive_layer_drop(self):
pld = ProgressiveLayerDrop(theta=self.pld_theta(), gamma=self.pld_gamma())
return pld
def _configure_curriculum_scheduler_legacy(self):
scheduler = CurriculumScheduler(self.curriculum_params_legacy())
return scheduler
@staticmethod
def is_map_style_dataset(obj):
return hasattr(obj, "__getitem__") and hasattr(obj, "__len__")
@staticmethod
def is_iterable_style_dataset(obj):
return isinstance(obj, torch.utils.data.IterableDataset) # hasattr(obj, "__iter__") should work as well
def dataloader_drop_last(self):
return self._config.dataloader_drop_last
def was_step_applied(self) -> bool:
"""Returns True if the latest ``step()`` produced in parameter updates.
Note that a ``False`` return is not an error condition. Steps are frequently
no-ops, such as between gradient accumulation boundaries or when overflows
occur.
Returns:
bool: Whether the latest ``step()`` modified model parameters.
"""
return self._step_applied
def deepspeed_io(self,
dataset,
batch_size=None,
route=ROUTE_TRAIN,
pin_memory=True,
data_sampler=None,
collate_fn=None,
num_local_io_workers=None):
if not (self.is_map_style_dataset(dataset) or self.is_iterable_style_dataset(dataset)):
raise ValueError("Training data must be a torch Dataset")
if batch_size is None:
batch_size = self.train_micro_batch_size_per_gpu()
if collate_fn is None:
collate_fn = self.collate_fn
# Currently we only use timer in train route
deepspeed_io_timer = None
if route == ROUTE_TRAIN:
deepspeed_io_timer = self.tput_timer
# If mpu is provided, forward world size and parallel rank to sampler.
data_parallel_world_size = self.dp_world_size
data_parallel_rank = self.global_rank
if self.mpu is not None:
data_parallel_world_size = self.mpu.get_data_parallel_world_size()
data_parallel_rank = self.mpu.get_data_parallel_rank()
if data_sampler is None and (route == ROUTE_PREDICT or route == ROUTE_EVAL):
data_sampler = torch.utils.data.DistributedSampler(
dataset,
num_replicas=data_parallel_world_size,
rank=data_parallel_rank,
shuffle=False,
)
deepspeed_dataloader_config = {}
if self.curriculum_learning_enabled():
deepspeed_dataloader_config = {
CURRICULUM_LEARNING: self.curriculum_learning_enabled(),
DATA_EFFICIENCY: self.data_efficiency_config(),
DATA_PARALLEL_GROUP: self.data_parallel_group,
GRADIENT_ACCUMULATION_STEPS: self.gradient_accumulation_steps(),
GLOBAL_RANK: self.global_rank,
DATA_SAMPLING_NUM_WORKERS: self.data_sampling_config()[DATA_SAMPLING_NUM_WORKERS]
}
return DeepSpeedDataLoader(dataset=dataset,
batch_size=batch_size,
pin_memory=pin_memory,
collate_fn=collate_fn,
local_rank=self.local_rank,
tput_timer=deepspeed_io_timer,
num_local_io_workers=num_local_io_workers,
data_sampler=data_sampler,
data_parallel_world_size=data_parallel_world_size,
data_parallel_rank=data_parallel_rank,
dataloader_drop_last=self.dataloader_drop_last(),
deepspeed_dataloader_config=deepspeed_dataloader_config)
def train(self, mode=True):
r""""""
self.warn_unscaled_loss = True
self.module.train(mode)
def eval(self):
r""""""
self.warn_unscaled_loss = True
self.module.train(False)
def _scale_loss_by_gas(self, prescaled_loss):
if isinstance(prescaled_loss, torch.Tensor):
scaled_loss = prescaled_loss / self.gradient_accumulation_steps()
elif isinstance(prescaled_loss, tuple) or isinstance(prescaled_loss, list):
scaled_loss = []
for l in prescaled_loss:
if isinstance(l, torch.Tensor):
scaled_loss.append(l / self.gradient_accumulation_steps())
else:
scaled_loss.append(l)
else:
scaled_loss = prescaled_loss
if self.warn_unscaled_loss:
logger.warning(f"DeepSpeed unable to scale loss because of type: {type(prescaled_loss)}")
self.warn_unscaled_loss = False
return scaled_loss
@instrument_w_nvtx
def forward(self, *inputs, **kwargs):
r"""Execute forward propagation
Arguments:
*inputs: Variable length input list
**kwargs: variable length keyword arguments
"""
if self.autotuning_profile_model_info():
ma = get_ma_status()
else:
see_memory_usage("Engine before forward", force=self.memory_breakdown())
flops_profiler_active = (self.flops_profiler_enabled()
and self.global_steps == self.flops_profiler_profile_step() and self.global_rank == 0)
# used to check quantization happens at step 0!
if self.global_steps == 0 and hasattr(self, "compression_scheduler"):
self.compression_scheduler.step(step_zero_check=True)
if self.quantizer:
tensor_to_quantize = self.optimizer.bit16_groups if self.zero_optimization_stage(
) == 2 else self.optimizer.fp16_groups
if self.compression_scheduler.weight_quantization_enabled:
self.quantizer.quantize(
tensor_to_quantize,
(self.optimizer.overflow if self.fp16_enabled() else False),
self.eigenvalue_enabled(),
None,
)
if flops_profiler_active:
self.flops_profiler.start_profile(ignore_list=None)
if self.module.training:
if self.progressive_layer_drop:
kwargs.update(self.progressive_layer_drop.get_state())
if self.__class__.__name__ != "PipelineEngine":
# TODO: The above if condition is a HACK since for PipelineEngine
# it's difficult to inject argument in forward pass.
if self.module.training and self.curriculum_enabled_legacy():
self.curriculum_scheduler_legacy.update_difficulty(self.global_steps + 1)
if self.curriculum_params_legacy()["curriculum_type"] == "seqlen":
kwargs.update({"curriculum_seqlen": self.curriculum_scheduler_legacy.get_current_difficulty()})
if self.module.training and self.random_ltd_enabled():
self.random_ltd_scheduler.update_seq(self.global_steps)
if self.zero_optimization_partition_weights():
# Enable automated discovery of external parameters by indicating that
# we are in a forward pass.
for module in self.module.modules():
module._parameters._in_forward = True
self._start_timers(self.engine_timers.forward_timers)
if self.training_dataloader is None:
self.tput_timer.start()
if self.fp16_auto_cast():
inputs = self._cast_inputs_half(inputs)
loss = self.module(*inputs, **kwargs)
if self.zero_optimization_partition_weights():
# Disable automated discovery of external parameters
for module in self.module.modules():
module._parameters._in_forward = False
self._stop_timers(self.engine_timers.forward_timers)
if flops_profiler_active:
self.flops_profiler.stop_profile()
if self.autotuning_profile_model_info():
activation_mem = get_ma_status() - ma
self.autotuning_model_info["activation_mem_per_gpu"] = activation_mem
print_json_dist(self.autotuning_model_info, [0], path=self.autotuning_model_info_path())
exit()
else:
see_memory_usage("Engine after forward", force=self.memory_breakdown())
return loss
def _cast_inputs_half(self, inputs):
if isinstance(inputs, (list, tuple)):
new_inputs = []
for v in inputs:
new_inputs.append(self._cast_inputs_half(v))
return inputs.__class__(new_inputs)
elif isinstance(inputs, dict):
new_inputs = {}
for k, v in inputs.items():
new_inputs[k] = self._cast_inputs_half(v)
return new_inputs
elif hasattr(inputs, 'half'):
return inputs.half()
else:
return inputs
def print_forward_breakdown(self, fwd_time):
gate_time = 0.0
moe_time = 0.0
falltoall = 0.0
salltoall = 0.0
for gate in self.gate_modules:
#logger.info(f"Individual TopK gate time: {gate.gate_time:.2f} ms")
gate_time += gate.gate_time
for l in self.moe_layers:
#logger.info(f"MoE layer; total: {l.time_moe:.2f} ms, first alltoall: {l.time_falltoall:.2f}, second alltoall: {l.time_salltoall:.2f}")
moe_time += l.time_moe
falltoall += l.time_falltoall
salltoall += l.time_salltoall
# TODO: Allreduce/average them across ranks for more accurate timing.
# if deepspeed.comm.get_rank() == 0:
log_dist(
f"rank={dist.get_rank()} time (ms) | forward: {fwd_time:.2f} (forward_moe: {moe_time:.2f}, 1st alltoall: {falltoall:.2f}, 2nd alltoall: {salltoall:.2f}, top-k: {gate_time:.2f})",
ranks=[0])
@instrument_w_nvtx
def allreduce_gradients(self, bucket_size=MEMORY_OPT_ALLREDUCE_SIZE):
assert not (self.bfloat16_enabled() and self.pipeline_parallelism), \
f'allreduce_gradients() is not valid when bfloat+pipeline_parallelism is enabled'
# Pass (PP) gas boundary flag to optimizer (required for zero)
self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary()
# ZeRO stage >= 2 communicates during non gradient accumulation boundaries as well
if self.zero_optimization_partition_gradients():
self.optimizer.overlapping_partition_gradients_reduce_epilogue()
# Communicate only at gradient accumulation boundaries
elif self.is_gradient_accumulation_boundary():
if self.zero_optimization_stage() == ZeroStageEnum.optimizer_states and hasattr(
self.optimizer, 'reduce_gradients'):
self.optimizer.reduce_gradients(pipeline_parallel=self.pipeline_parallelism)
else:
self.buffered_allreduce_fallback(elements_per_buffer=bucket_size)
@instrument_w_nvtx
def backward(self, loss, allreduce_gradients=True, release_loss=False, retain_graph=False, scale_wrt_gas=True):
r"""Execute backward pass on the loss
Arguments:
loss: Torch tensor on which to execute backward propagation
allreduce_gradients: is deprecated, ignored, and will soon be removed'
retain_graph: bool, default: false
forward on user defined choice of retain_graph
"""
see_memory_usage("Engine before backward", force=self.memory_breakdown())
if self.scale_wrt_gas is not None:
scale_wrt_gas = self.scale_wrt_gas
if not allreduce_gradients:
logger.warning(f"Argument `allreduce_gradients` is deprecated, ignored, and will soon be removed")
# scale loss w.r.t. gradient accumulation if needed
if self.gradient_accumulation_steps() > 1 and scale_wrt_gas:
loss = self._scale_loss_by_gas(loss.float())
# Log training Loss
if self.monitor.enabled:
if self.is_gradient_accumulation_boundary():
if self.global_rank == 0:
self.summary_events = [(
f"Train/Samples/train_loss",
sum(self.losses) / self.gradient_accumulation_steps(),
self.global_samples,
)]
self.monitor.write_events(self.summary_events)
if self.is_gradient_accumulation_boundary():
self.losses = []
else:
self.losses.append(loss.mean().item())
self._start_timers(self.engine_timers.backward_timers)
assert self.optimizer is not None and not isinstance(self.optimizer, DummyOptim), \
"must provide optimizer during init in order to use backward"
self._start_timers(self.engine_timers.backward_inner_timers)
if self.zero_optimization():
self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary()
self.optimizer.backward(loss, retain_graph=retain_graph)
elif self.amp_enabled():
# AMP requires delaying unscale when inside gradient accumulation boundaries
# https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations
delay_unscale = not self.is_gradient_accumulation_boundary()
with amp.scale_loss(loss, self.optimizer, delay_unscale=delay_unscale) as scaled_loss:
scaled_loss.backward(retain_graph=retain_graph)
elif self.fp16_enabled():
if self.eigenvalue_enabled():
self.optimizer.backward(loss, create_graph=True, retain_graph=True)
else:
self.optimizer.backward(loss, retain_graph=retain_graph)
elif self.bfloat16_enabled():
self.optimizer.backward(loss)
else:
if self.eigenvalue_enabled():
loss.backward(create_graph=True, retain_graph=True)
else:
loss.backward(retain_graph=retain_graph)
self._stop_timers(self.engine_timers.backward_inner_timers)
self._start_timers(self.engine_timers.backward_reduce_timers)
if allreduce_gradients and self.enable_backward_allreduce:
# Traditional code path that allreduces the module parameter grads
self.allreduce_gradients()
self._stop_timers(self.engine_timers.backward_reduce_timers)
self._stop_timers(self.engine_timers.backward_timers)
if release_loss:
# loss.data = None
pass
see_memory_usage("Engine after backward", force=self.memory_breakdown())
return loss
def is_gradient_accumulation_boundary(self):
"""
Query whether the current micro-batch is at the boundary of
gradient accumulation, and thus will trigger gradient reductions and
an optimizer step.
Returns:
bool: if the current step is a gradient accumulation boundary.
"""
if self._is_gradient_accumulation_boundary is None:
return (self.micro_steps + 1) % \
self.gradient_accumulation_steps() == 0
else:
return self._is_gradient_accumulation_boundary
def set_gradient_accumulation_boundary(self, is_boundary):
"""
Manually overrides the DeepSpeed engine's gradient accumulation boundary state, this is an optional
feature and should be used with care. The state should be set before to the intended
value before each forward/backward. The final forward/backward should have the
boundary state set to True. This style allows client code to only call engine.step() once after all
the gradient accumulation passes are complete. See example below:
.. code-block:: python
engine.set_gradient_accumulation_boundary(False)
for _ in range(gradient_accumulation_steps - 1):
micro_batch = next(data_loader)
loss = engine(micro_batch)
engine.backward(loss)
engine.set_gradient_accumulation_boundary(True)
micro_batch = next(data_loader)
loss = engine(micro_batch)
engine.backward(loss)
engine.step()
Arguments:
is_boundary (bool): are we at a gradient accumulation boundary or not?
"""
self._is_gradient_accumulation_boundary = is_boundary
self.optimizer.is_gradient_accumulation_boundary = is_boundary
def zero_grad(self):
"""
Zero parameter grads.
"""
for param_name, param in self.module.named_parameters():
param.grad = None
def clip_fp32_gradients(self):
clip_grad_norm_(parameters=self.module.parameters(), max_norm=self.gradient_clipping(), mpu=self.mpu)
def _take_model_step(self, lr_kwargs, block_eigenvalue={}):
if self.gradient_clipping() > 0.0:
if not (self.fp16_enabled() or self.bfloat16_enabled() or self.amp_enabled() or self.zero_optimization()):
self.clip_fp32_gradients()
elif self.amp_enabled():
# AMP's recommended way of doing clipping
# https://nvidia.github.io/apex/advanced.html#gradient-clipping
master_params = amp.master_params(self.optimizer)
clip_grad_norm_(parameters=master_params, max_norm=self.gradient_clipping(), mpu=self.mpu)
self.optimizer.step()
if hasattr(self.optimizer, '_global_grad_norm'):
self._global_grad_norm = self.optimizer._global_grad_norm
# Quantize the updated parameter if there is no overflow
if self.quantizer:
tensor_to_quantize = self.optimizer.bit16_groups if self.zero_optimization_stage(
) == 2 else self.optimizer.fp16_groups
if self.compression_scheduler.weight_quantization_enabled:
self.quantizer.quantize(
tensor_to_quantize,
(self.optimizer.overflow if self.fp16_enabled() else False),
self.eigenvalue_enabled(),
block_eigenvalue,
)
# zero grad in basic optimizer could be unreliable and may not exhibit
# the behavior that we want
if self.bfloat16_enabled():
# TODO: Temporary until bf16_optimizer and zero_optimizer are integrated
if self.zero_optimization() and hasattr(self.optimizer, "zero_grad"):
self.optimizer.zero_grad()
else:
pass
elif self.zero_optimization() or self.fp16_enabled() or self.amp_enabled():
self.optimizer.zero_grad()
else:
self.zero_grad()
report_progress = self.global_rank == 0 if self.global_rank else True
# Check overflow here since in DS fp16 optimizer, the overflow is updated in above step() function.
overflow = False
if hasattr(self.optimizer, "overflow"):
overflow = self.optimizer.overflow
self._step_applied = not overflow
if overflow:
self.skipped_steps += 1
else:
self.compression_scheduler.step()
if self.lr_scheduler is not None:
try:
self.lr_scheduler.step(**(lr_kwargs or {}))
except TypeError:
# XXX Hack to work with Megatron 2.0 and DeepSpeed pipelines.
# We don't currently have a way to specify lr_kwargs from
# pipe_engine.train_batch()
self.lr_scheduler.step(increment=self.train_batch_size())
if report_progress and (self.global_steps + 1) % self.steps_per_print() == 0:
self._report_progress(self.global_steps + 1)
self.global_steps += 1
self.global_samples += self.train_batch_size()
def step(self, lr_kwargs=None):
r"""Execute the weight update step after forward and backward propagation
on effective_train_batch.
"""
see_memory_usage("Engine before step", force=self.memory_breakdown())
# Check early because self.global_steps is incremented at some point here.
# TODO: Delay self.global_steps increment until very end of this function.
flops_profiler_active = self.flops_profiler_enabled(
) and self.global_steps == self.flops_profiler_profile_step() and self.global_rank == 0
self._start_timers(self.engine_timers.step_timers)
assert self.optimizer is not None and not isinstance(self.optimizer, DummyOptim), \
"must provide optimizer during init in order to use step"
report_progress = False
self._step_applied = False # assume False, will flip to True
# Update the model when we reach gradient accumulation boundaries
if self.is_gradient_accumulation_boundary():
self.gas_boundary_ctr += 1
if (self.eigenvalue_enabled() and (self.gas_boundary_ctr % self.eigenvalue_gas_boundary_resolution() == 0)
and self.quantizer.any_precision_switch()):
log_dist(f"computing eigenvalue...", ranks=[0])
self.block_eigenvalue = self.eigenvalue.compute_eigenvalue(self.module, self.device,
self.optimizer.cur_scale)
if self.progressive_layer_drop:
self.progressive_layer_drop.update_state(self.global_steps)
if (self.eigenvalue_enabled() and not self.gas_boundary_ctr % self.eigenvalue_gas_boundary_resolution()
and self.quantizer.any_precision_switch()):
self._take_model_step(lr_kwargs, self.block_eigenvalue)
else:
self._take_model_step(lr_kwargs)
report_progress = self.global_rank == 0 if self.global_rank else True
self.tput_timer.stop(global_step=self.is_gradient_accumulation_boundary(), report_speed=report_progress)
self._stop_timers(self.engine_timers.step_timers)
# Log learning rate
if self.monitor.enabled:
if self.is_gradient_accumulation_boundary():
if self.global_rank == 0:
self.summary_events = [(f"Train/Samples/lr", self.get_lr()[0], self.global_samples)]
if self.fp16_enabled() and hasattr(self.optimizer, "cur_scale"):
self.summary_events.append((
f"Train/Samples/loss_scale",
self.optimizer.cur_scale,
self.global_samples,
))
if (self.eigenvalue_enabled()
and not self.gas_boundary_ctr % self.eigenvalue_gas_boundary_resolution()):
ev_values = self.block_eigenvalue.values()
for i in range(len(ev_values)):
self.summary_events.append((
f"Train/Eigenvalues/ModelBlockParam_{i}",
self.ev_values[i][0],
self.global_samples,
))
self.monitor.write_events(self.summary_events)
# Check flops profiling
if flops_profiler_active:
if self.autotuning_enabled():
self.flops = self.flops_profiler.get_total_flops() * 3
else:
self.flops_profiler.print_model_profile(
profile_step=self.global_steps,
module_depth=self.flops_profiler_module_depth(),
top_modules=self.flops_profiler_top_modules(),
detailed=self.flops_profiler_detailed(),
output_file=self.flops_profiler_output_file(),
)
self.flops_profiler.end_profile()
if self.autotuning_enabled() and self.global_steps == (self.autotuning_end_profile_step() + 1):
self._autotuning_exit()
if self.wall_clock_breakdown():
# Log micro timing and reset
self.timers.log(names=self.engine_timers.micro_timers, memory_breakdown=self.memory_breakdown())
if self.wall_clock_breakdown() or self.flops_profiler_enabled():
# Log global timing and reset
if self.is_gradient_accumulation_boundary():
if self.monitor.enabled:
self._write_monitor()
if self.has_moe_layers:
fwd_time = self.timers(FORWARD_GLOBAL_TIMER).elapsed(reset=False)
self.print_forward_breakdown(fwd_time=fwd_time)
self.timers.log(self.engine_timers.global_timers)
self.micro_steps += 1
see_memory_usage("Engine after step", force=self.memory_breakdown())
def _start_timers(self, timer_names):
for name in timer_names:
self.timers(name).start()
def _stop_timers(self, timer_names):
record = self.is_gradient_accumulation_boundary() and \
self.flops_profiler_enabled() and \
(self.global_steps >= self.flops_profiler_profile_step())
for name in timer_names:
self.timers(name).stop(record=record)
def _autotuning_exit(self):
if self.global_rank == 0:
msg = self.timers.get_mean([
FORWARD_GLOBAL_TIMER,
BACKWARD_GLOBAL_TIMER,
STEP_GLOBAL_TIMER,
], reset=False)
titer = 0.0
titer += msg[FORWARD_GLOBAL_TIMER] if FORWARD_GLOBAL_TIMER in msg else 0
titer += msg[BACKWARD_GLOBAL_TIMER] if BACKWARD_GLOBAL_TIMER in msg else 0
titer += msg[STEP_GLOBAL_TIMER] if STEP_GLOBAL_TIMER in msg else 0
msg["latency"] = titer
msg["FLOPS_per_gpu"] = self.flops * 1_000_000 * self.gradient_accumulation_steps() / titer
msg["throughput"] = self.train_batch_size() * 1_000_000 / \
msg["latency"]
print_json_dist(msg, [0], path=self.autotuning_metric_path())
log_dist(
f"Wrote metrics to {self.autotuning_metric_path()}, {os.path.abspath(self.autotuning_metric_path())}",
ranks=[0])
import atexit
atexit.register(print, "Autotuning: done with running current ds config.")
exit()
def _write_monitor(self):
if self.global_rank == 0:
self.summary_events = [
(
f"Train/Samples/elapsed_time_ms_forward",
self.timers(FORWARD_GLOBAL_TIMER).elapsed(reset=False),
self.global_samples,
),
(
f"Train/Samples/elapsed_time_ms_backward",
self.timers(BACKWARD_GLOBAL_TIMER).elapsed(reset=False),
self.global_samples,
),
(
f"Train/Samples/elapsed_time_ms_backward_inner",
self.timers(BACKWARD_INNER_GLOBAL_TIMER).elapsed(reset=False),
self.global_samples,
),
(
f"Train/Samples/elapsed_time_ms_backward_allreduce",
self.timers(BACKWARD_REDUCE_GLOBAL_TIMER).elapsed(reset=False),
self.global_samples,
),
(
f"Train/Samples/elapsed_time_ms_step",
self.timers(STEP_GLOBAL_TIMER).elapsed(reset=False),
self.global_samples,
),
]
self.monitor.write_events(self.summary_events)
def _get_optimizer_param(self, param_name):
result = []
if not self.optimizer:
return result
for group in self.optimizer.param_groups:
if param_name in group:
result.append(group[param_name])
else:
result.append(0.0)
return result
def get_lr(self):
return self._get_optimizer_param("lr")
def get_type(self):
return self._get_optimizer_param("type")
def get_mom(self):
if self.optimizer_name() in ["SGD", "RMSprop"]:
return self._get_optimizer_param("momentum")
else:
return self._get_optimizer_param("betas")
def get_pld_theta(self):
if self.progressive_layer_drop:
return self.progressive_layer_drop.get_theta()
else:
return None
def _report_progress(self, step):
lr = self.get_lr()
mom = self.get_mom()
log_dist(f"step={step}, skipped={self.skipped_steps}, lr={lr}, mom={mom}", ranks=[0])
def allreduce_bucket(self, bucket, dp_group):
tensor = self.flatten(bucket)
tensor_to_allreduce = tensor
if self.communication_data_type != tensor.dtype:
tensor_to_allreduce = tensor.to(self.communication_data_type)
if self.postscale_gradients():
if self.gradient_predivide_factor() != 1.0:
tensor_to_allreduce.mul_(1.0 / self.gradient_predivide_factor())
dist.all_reduce(tensor_to_allreduce, group=dp_group)
if self.gradient_average:
if self.gradient_predivide_factor() != dist.get_world_size(group=dp_group):
tensor_to_allreduce.mul_(self.gradient_predivide_factor() / dist.get_world_size(group=dp_group))
else:
tensor_to_allreduce.mul_(1. / dist.get_world_size(group=dp_group))
dist.all_reduce(tensor_to_allreduce, group=dp_group)
if self.communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce:
tensor.copy_(tensor_to_allreduce)
return tensor
def allreduce_and_copy(self, small_bucket, dp_group):
allreduced = self.allreduce_bucket(small_bucket, dp_group)
for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):
buf.copy_(synced)
def allreduce_no_retain(self, bucket, dp_group, numel_per_bucket=500000000):
small_bucket = []
numel = 0
for tensor in bucket:
small_bucket.append(tensor)
numel = numel + tensor.numel()
if numel > numel_per_bucket:
self.allreduce_and_copy(small_bucket, dp_group)
small_bucket = []
numel = 0
if len(small_bucket) > 0:
self.allreduce_and_copy(small_bucket, dp_group)
def _get_gradients_for_reduction(self):
non_expert_grads = []
expert_grads = {}
if self.has_moe_layers:
for key in self.expert_data_parallel_group.keys():
expert_grads[key] = []
for param_name, param in self.module.named_parameters():
if not param.requires_grad:
continue
if param.grad is None:
# In cases where there is an imbalance of empty grads across
# ranks we must create empty grads, this will ensure that every
# rank is reducing the same size. In some cases it may make
# sense in the future to support the ability to average not
# w.r.t. world size but with a different value.
param.grad = torch.zeros(param.size(), dtype=param.dtype, device=param.device)
grad_data = param.grad.data
if param_name in self.sparse_tensor_module_names or grad_data.is_sparse:
# Call param.grad without data to avoid problem with setting of updated grads
grad_data = SparseTensor(param.grad)
if is_moe_param(param):
expert_grads[param.group_name].append(grad_data)
else:
non_expert_grads.append(grad_data)
return non_expert_grads, expert_grads
def _reduce_non_expert_gradients(self, grads, elements_per_buffer):
split_buckets = split_half_float_double_sparse(grads)
for _, bucket_tuple in enumerate(split_buckets):
bucket_type, bucket = bucket_tuple
if self.pipeline_parallelism:
dp_group = self.mpu.get_data_parallel_group()
else:
dp_group = groups._get_data_parallel_group()
if bucket_type == SparseTensor.type():
self.sparse_allreduce_no_retain(bucket, dp_group=dp_group)
else:
self.allreduce_no_retain(bucket, dp_group=dp_group, numel_per_bucket=elements_per_buffer)
def _reduce_expert_gradients(self, expert_grads, elements_per_buffer):
for ep_name, expert_grads_group in expert_grads.items():
expert_split_buckets = split_half_float_double_sparse(expert_grads_group)
for i, bucket_tuple in enumerate(expert_split_buckets):
bucket_type, bucket = bucket_tuple
if bucket_type == SparseTensor.type():
self.sparse_allreduce_no_retain(bucket, groups._get_expert_data_parallel_group(ep_name))
else:
# Separate between diff groups
self.allreduce_no_retain(bucket,
dp_group=groups._get_expert_data_parallel_group(ep_name),
numel_per_bucket=elements_per_buffer)
def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000):
if grads is None:
non_expert_grads, expert_grads = self._get_gradients_for_reduction()
else:
assert not self.has_moe_layers, "attempting to reduce grads in unsupported way w.r.t. MoE"
non_expert_grads = grads
self._reduce_non_expert_gradients(non_expert_grads, elements_per_buffer)
if self.has_moe_layers:
self._reduce_expert_gradients(expert_grads, elements_per_buffer)
def sparse_allreduce_no_retain(self, bucket, dp_group):
allreduced_sparses = self.sparse_allreduce_bucket(bucket, dp_group)
# Densify sparse tensor and copy back to original location
for tensor in allreduced_sparses:
if tensor.is_sparse:
tensor.orig_dense_tensor.data = tensor.to_coo_tensor()
else:
tensor.orig_dense_tensor.copy_(tensor.to_dense())
def sparse_allreduce_bucket(self, bucket, dp_group):
sparse_list = []
for sparse in bucket:
sparse_list.append(self.sparse_allreduce(sparse, dp_group))
return sparse_list
def sparse_allreduce(self, sparse, dp_group):
original_data_type = sparse.values.dtype
if self.communication_data_type != sparse.values.dtype:
if self.communication_data_type in (torch.float16, torch.bfloat16):
indices = sparse.indices.to(torch.int32)
else:
indices = sparse.indices
values = sparse.values.to(self.communication_data_type)
else:
indices = sparse.indices
values = sparse.values
if self.postscale_gradients():
if self.gradient_average:
values.mul_(self.gradient_predivide_factor() / dist.get_world_size(group=dp_group))
else:
values.mul_(1. / dist.get_world_size(group=dp_group))
indices_device_list = self.sparse_all_gather(indices, dp_group)
values_device_list = self.sparse_all_gather(values, dp_group)
sparse.indices = torch.cat(indices_device_list).to(torch.long)
sparse.values = torch.cat(values_device_list).to(original_data_type)
return sparse
def sparse_all_gather(self, value, dp_group):
my_size = torch.LongTensor([value.size()[0]]).to(self.device)
all_sizes = self.all_gather_scalar(my_size, dp_group)
max_size = torch.cat(all_sizes).max()
fill_size = max_size - my_size
assert value.dim() in [1, 2]
if value.dim() == 1:
if fill_size > 0:
value = torch.cat([value, value.new_empty(fill_size)])
tensor_list = [value.new_empty(max_size) for _ in range(dist.get_world_size(group=dp_group))]
else:
if fill_size > 0:
value = torch.cat([value, value.new_empty(fill_size, value.size()[1])])
tensor_list = [
value.new_empty(max_size,
value.size()[1]) for _ in range(dist.get_world_size(group=dp_group))
]
dist.all_gather(tensor_list, value, group=dp_group)
tensors = []
for dev_idx, t in enumerate(tensor_list):
size = all_sizes[dev_idx][0]
tensors.append(t.index_select(0, torch.arange(size, dtype=torch.long, device=self.device)))
return tensors
def all_gather_scalar(self, value, dp_group):
tensor_list = [value.new_zeros(value.size()) for _ in range(dist.get_world_size(group=dp_group))]
dist.all_gather(tensor_list, value, group=dp_group)
return tensor_list
def module_state_dict(self, destination=None, prefix="", keep_vars=False):
sd = self.module.state_dict(destination, prefix, keep_vars)
if self.random_ltd_enabled():
sd = remove_random_ltd_state_dict(sd)
return sd
@staticmethod
def load_moe_state_dict(checkpoint_path,
tag,
state_dict,
old_moe_load,
model=None,
mpu=None,
num_experts=1,
checkpoint_engine=TorchCheckpointEngine()):
if old_moe_load:
expp_rank = groups._get_expert_data_parallel_rank(groups._get_max_expert_size_name())
num_local_experts = max(num_experts) // groups._get_expert_parallel_world_size(
groups._get_max_expert_size_name())
for local_expert_id in range(num_local_experts):
global_expert_id = expp_rank * num_local_experts + local_expert_id
expert_state_dict = checkpoint_engine.load(
DeepSpeedEngine._get_expert_ckpt_name(
checkpoint_path,
-1, # -1 means ignore layer_id
global_expert_id,
tag,
mpu),
map_location=torch.device('cpu'))
# Updating global -> local expert ids
moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.'
for key in list(expert_state_dict.keys()):
local_key = key.replace(f'{moe_str_prefix}{global_expert_id}',
f'{moe_str_prefix}{local_expert_id}')
expert_state_dict[local_key] = expert_state_dict.pop(key)
state_dict.update(expert_state_dict)
else:
moe_layer_id = 0
for n_module, module in model.named_modules():
if isinstance(module, MoE): # and deepspeed.comm.get_rank() == 0:
group_name = module.expert_group_name
num_local_experts = module.num_local_experts
expp_rank = groups._get_expert_parallel_rank(group_name)
# loop all local_experts
for local_expert_id in range(num_local_experts):
global_expert_id = expp_rank * num_local_experts + local_expert_id
expert_state_dict = checkpoint_engine.load(DeepSpeedEngine._get_expert_ckpt_name(
checkpoint_path, moe_layer_id, global_expert_id, tag, mpu),
map_location=torch.device('cpu'))
# print(expert_state_dict.keys())
# Updating global -> local expert ids
moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.'
for key in list(expert_state_dict.keys()):
local_key = key.replace(f'{moe_str_prefix}{global_expert_id}',
f'{moe_str_prefix}{local_expert_id}')
expert_state_dict[local_key] = expert_state_dict.pop(key)
state_dict.update(expert_state_dict)
moe_layer_id += 1
def load_module_state_dict(self, checkpoint, strict=True, custom_load_fn=None):
module_state_dict = checkpoint['module']
if custom_load_fn:
custom_load_fn(src=module_state_dict, dst=self.module)
else:
self.module.load_state_dict(
module_state_dict, # TODO
strict=strict)
if checkpoint.get(FROZEN_PARAM_FRAGMENTS, None) is not None:
saved_frozen_params = checkpoint[FROZEN_PARAM_FRAGMENTS]
for param in self.module.parameters():
if param.requires_grad:
continue
if param not in self.param_names:
raise ValueError(f"failed to find frozen {param} in named params")
name = self.param_names[param]
if hasattr(param, 'ds_id'):
param.ds_tensor.data.copy_(saved_frozen_params[name].data)
else:
param.data.copy_(saved_frozen_params[name].data)
def _get_zero_ckpt_prefix(self, dp_rank, bf16_mode):
return f'{"bf16_" if bf16_mode else ""}zero_pp_rank_{dp_rank}'
def _get_rank_zero_ckpt_name(self, checkpoints_path, tag, mp_rank, dp_rank, bf16_mode):
file_prefix = self._get_zero_ckpt_prefix(dp_rank, bf16_mode=bf16_mode)
zero_ckpt_name = os.path.join(
checkpoints_path,
str(tag),
f"{file_prefix}_mp_rank_{mp_rank:02d}_optim_states.pt",
)
return zero_ckpt_name
def _get_zero_ckpt_name(self, checkpoints_path, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
pp_rank = dist.get_rank(group=self.optimizer.dp_process_group)
bf16_mode = self.bfloat16_enabled()
return self._get_rank_zero_ckpt_name(checkpoints_path, tag, mp_rank, pp_rank, bf16_mode)
def _get_ckpt_name(self, checkpoints_path, tag, mp_placeholder=None):
if mp_placeholder is not None:
mp_rank_str = mp_placeholder
else:
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
mp_rank_str = f"{mp_rank:02d}"
if self.zero_optimization_partition_weights():
filename = "zero_pp_rank_{}".format(dist.get_rank(group=self.optimizer.dp_process_group))
ckpt_name = os.path.join(
checkpoints_path,
str(tag),
f"{filename}_mp_rank_{mp_rank_str}_model_states.pt",
)
else:
ckpt_name = os.path.join(
checkpoints_path,
str(tag),
"mp_rank_" + mp_rank_str + "_model_states.pt",
)
return ckpt_name
def _get_optimizer_ckpt_name(self, checkpoints_path, tag, expp_rank):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
ckpt_name = os.path.join(checkpoints_path, str(tag),
f'expp_rank_{expp_rank}_mp_rank_{mp_rank:02d}_optim_states.pt')
return ckpt_name
@staticmethod
def _get_expert_ckpt_name(checkpoints_path, layer_id, expert_id, tag, mpu=None):
mp_rank = 0 if mpu is None else mpu.get_model_parallel_rank()
if layer_id <= -1:
# Used to support old checkpoint loading
ckpt_name = os.path.join(checkpoints_path, '' if tag is None else str(tag),
f'expert_{expert_id}_mp_rank_{mp_rank:02d}_model_states.pt')
else:
# Used to support new checkpoint loading
ckpt_name = os.path.join(checkpoints_path, '' if tag is None else str(tag),
f'layer_{layer_id}_expert_{expert_id}_mp_rank_{mp_rank:02d}_model_states.pt')
return ckpt_name
def _get_all_ckpt_names(self, checkpoints_path, tag):
# It is required that (checkpoints_path, tag) are consistent among all ranks.
ckpt_file_pattern = self._get_ckpt_name(checkpoints_path, tag, mp_placeholder="*")
import glob
ckpt_files = glob.glob(ckpt_file_pattern)
ckpt_files.sort()
return ckpt_files
def load_checkpoint(self,
load_dir,
tag=None,
load_module_strict=True,
load_optimizer_states=True,
load_lr_scheduler_states=True,
load_module_only=False,
custom_load_fn=None):
"""
Load training checkpoint
Arguments:
load_dir: Required. Directory to load the checkpoint from
tag: Checkpoint tag used as a unique identifier for checkpoint, if not provided will attempt to load tag in 'latest' file
load_module_strict: Optional. Boolean to strictly enforce that the keys in state_dict of module and checkpoint match.
load_optimizer_states: Optional. Boolean to load the training optimizer states from Checkpoint. Ex. ADAM's momentum and variance
load_lr_scheduler_states: Optional. Boolean to add the learning rate scheduler states from Checkpoint.
load_module_only: Optional. Boolean to load only the model weights from the checkpoint. Ex. warmstarting.
custom_load_fn: Optional. Custom model load function.
Returns:
A tuple of ``load_path`` and ``client_state``.
*``load_path``: Path of the loaded checkpoint. ``None`` if loading the checkpoint failed.
*``client_state``: State dictionary used for loading required training states in the client code.
Important: under ZeRO3, one cannot load checkpoint with ``engine.load_checkpoint()`` right
after ``engine.save_checkpoint()``. It is because ``engine.module`` is partitioned, and
``load_checkpoint()`` wants a pristine model. If insisting to do so, please reinitialize engine
before ``load_checkpoint()``.
"""
if tag is None:
latest_tag = "latest_universal" if self.load_universal_checkpoint() else "latest"
latest_path = os.path.join(load_dir, latest_tag)
if os.path.isfile(latest_path):
with open(latest_path, "r") as fd:
tag = fd.read().strip()
else:
if self.load_universal_checkpoint():
raise ValueError(f'Invalid for universal checkpoint: {latest_path} does not exist')
else:
logger.warning(
f"Unable to find latest file at {latest_path}, if trying to load latest "
"checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint."
)
return None, None
if self.zero_optimization_partition_weights():
# Prepare for checkpoint load by ensuring all parameters are partitioned
self.optimizer.checkpoint_event_prologue()
load_path, client_states = self._load_checkpoint(load_dir,
tag,
load_module_strict=load_module_strict,
load_optimizer_states=load_optimizer_states,
load_lr_scheduler_states=load_lr_scheduler_states,
load_module_only=load_module_only,
custom_load_fn=custom_load_fn)
load_zero_checkpoint = self.zero_optimization() or self.bfloat16_enabled()
if load_zero_checkpoint and load_path is not None:
success = self._load_zero_checkpoint(load_dir, tag, load_optimizer_states=load_optimizer_states)
if not success:
self.optimizer._restore_from_bit16_weights()
if self.zero_optimization_partition_weights():
self.optimizer.checkpoint_event_epilogue()
return load_path, client_states
def _load_checkpoint(self,
load_dir,
tag,
load_module_strict=True,
load_optimizer_states=True,
load_lr_scheduler_states=True,
load_module_only=False,
custom_load_fn=None):
from deepspeed.runtime.state_dict_factory import SDLoaderFactory
ckpt_list = self._get_all_ckpt_names(load_dir, tag)
sd_loader = SDLoaderFactory.get_sd_loader(ckpt_list, checkpoint_engine=self.checkpoint_engine)
is_pipe_parallel = isinstance(self.module, PipelineModule)
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
load_path, checkpoint, _ = sd_loader.load(self.mp_world_size, mp_rank, is_pipe_parallel=is_pipe_parallel)
if checkpoint is None:
return None, None
if is_pipe_parallel:
# Pipeline parallelism uses this to load its own checkpoint files.
self._curr_ckpt_path = os.path.join(load_dir, tag)
if self.has_moe_layers:
# print(checkpoint.keys())
old_moe_load = False
if not isinstance(checkpoint['num_experts'], list):
old_moe_load = True
DeepSpeedEngine.load_moe_state_dict(load_dir,
tag,
state_dict=checkpoint['module'],
old_moe_load=old_moe_load,
model=self.module,
mpu=self.mpu,
num_experts=self.num_experts,
checkpoint_engine=self.checkpoint_engine)
if not self.load_universal_checkpoint():
self.load_module_state_dict(checkpoint=checkpoint,
strict=load_module_strict,
custom_load_fn=custom_load_fn)
self.loaded_checkpoint_dp_world_size = checkpoint['dp_world_size']
if load_module_only:
deepspeed_states = ['module']
if self.optimizer is not None and self.fp16_enabled():
self.optimizer.refresh_fp32_params()
else:
if self.has_moe_layers:
largest_group_name = groups._get_max_expert_size_name()
expp_rank = groups._get_expert_parallel_rank(largest_group_name)
optim_load_path = self._get_optimizer_ckpt_name(load_dir, tag, expp_rank)
optim_checkpoint = self.checkpoint_engine.load(optim_load_path, map_location=torch.device('cpu'))
else:
optim_checkpoint = checkpoint
has_zero_optimizer_state = self.zero_optimization() or self.bfloat16_enabled()
if load_optimizer_states and self.optimizer is not None and not has_zero_optimizer_state:
if self.fp16_enabled():
self.optimizer.load_state_dict(optim_checkpoint['optimizer'],
load_optimizer_states=load_optimizer_states)
else:
self.optimizer.load_state_dict(optim_checkpoint['optimizer'])
if load_lr_scheduler_states and self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
if self.random_ltd_enabled() and self.random_ltd_scheduler is not None and 'random_ltd' in checkpoint:
self.random_ltd_scheduler.load_state_dict(checkpoint['random_ltd'])
if self.training_dataloader is not None and self.curriculum_learning_enabled(
) and 'data_sampler' in checkpoint:
self.training_dataloader.data_sampler.load_state_dict(checkpoint['data_sampler'])
def get_sparse_tensor_module_names(original_set, loaded_set, original_parameters, loaded_parameters):
result = set()
for name in original_set:
if name in loaded_parameters and name not in loaded_set:
continue # parameter existed in previous model and was not sparse
result.add(name)
for name in loaded_set:
if name in original_parameters:
result.add(name) # parameter exists in both configs and it was sparse
return result
if 'sparse_tensor_module_names' in checkpoint:
sparse_tensor_module_names = checkpoint['sparse_tensor_module_names']
elif 'csr_tensor_module_names' in checkpoint:
sparse_tensor_module_names = checkpoint['csr_tensor_module_names']
else:
sparse_tensor_module_names = None
if sparse_tensor_module_names is not None:
if load_module_strict:
self.sparse_tensor_module_names = sparse_tensor_module_names
else:
self.sparse_tensor_module_names = get_sparse_tensor_module_names(
self.sparse_tensor_module_names, sparse_tensor_module_names,
dict(self.module.named_parameters()), checkpoint["module"])
self.global_steps = checkpoint['global_steps']
self.global_samples = checkpoint.get('global_samples', self.global_steps * self.train_batch_size())
self.skipped_steps = checkpoint['skipped_steps']
self.loaded_checkpoint_mp_world_size = checkpoint['mp_world_size']
deepspeed_states = [
'module', 'sparse_tensor_module_names', 'skipped_steps', 'global_steps', 'dp_world_size',
'mp_world_size', 'data_sampler', 'random_ltd'
]
client_state = {}
if load_lr_scheduler_states:
deepspeed_states.append('lr_scheduler')
if load_optimizer_states:
deepspeed_states.append('optimizer')
client_state = {key: value for key, value in checkpoint.items() if not key in deepspeed_states}
if not load_optimizer_states and not load_module_only:
client_state['optimizer'] = optim_checkpoint['optimizer']
return load_path, client_state
def _load_zero_checkpoint(self, load_dir, tag, load_optimizer_states=True):
if self.load_universal_checkpoint():
zero_sd_list = None
checkpoint_folder = f'{os.path.join(load_dir, tag)}'
else:
if load_optimizer_states and self.dp_world_size != self.loaded_checkpoint_dp_world_size:
raise ZeRORuntimeException("The checkpoint being loaded used a DP " \
f"world size of {self.loaded_checkpoint_dp_world_size} but the " \
f"current world size is {self.dp_world_size}. Automatic adjustment " \
"of ZeRO's optimizer state partitioning with a new world size is not " \
"currently supported.")
checkpoint_folder = None
zero_sd_list = self._get_all_zero_checkpoints(load_dir, tag)
if zero_sd_list is None:
return False
self.optimizer.load_state_dict(state_dict_list=zero_sd_list,
load_optimizer_states=load_optimizer_states,
load_from_fp32_weights=self.zero_load_from_fp32_weights(),
checkpoint_folder=checkpoint_folder)
if self.load_universal_checkpoint():
logger.info(f'loaded universal zero checkpoints from {checkpoint_folder} for rank {self.global_rank}')
else:
logger.info(f"loading {len(zero_sd_list)} zero partition checkpoints for rank {self.global_rank}")
return True
def _get_mp_rank_zero_checkpoint_names(self, load_dir, tag, mp_rank, dp_world_size, bf16_mode):
zero_ckpt_names = []
for dp_rank in range(dp_world_size):
ckpt_name = self._get_rank_zero_ckpt_name(checkpoints_path=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_rank=dp_rank,
bf16_mode=bf16_mode)
zero_ckpt_names.append(ckpt_name)
return zero_ckpt_names
def _get_all_zero_checkpoint_names(self, load_dir, tag, bf16_mode):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
zero_ckpt_names = self._get_mp_rank_zero_checkpoint_names(load_dir=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_world_size=self.loaded_checkpoint_dp_world_size,
bf16_mode=bf16_mode)
for i, ckpt_name in enumerate(zero_ckpt_names):
if not os.path.exists(ckpt_name):
# transparently handle the old file pattern for optim_states
if "optim_states.pt" in ckpt_name:
ckpt_name_try = ckpt_name.replace("_optim_states.pt", "optim_states.pt")
if os.path.exists(ckpt_name_try):
zero_ckpt_names[i] = ckpt_name_try
continue
return zero_ckpt_names
def _get_all_zero_checkpoint_state_dicts(self, zero_ckpt_names):
zero_sd_list = []
for i, ckpt_name in enumerate(zero_ckpt_names):
_state = None
if ckpt_name is None:
_state = {OPTIMIZER_STATE_DICT: None}
# Fully load state for current rank
elif self.zero_elastic_checkpoint() or dist.get_rank(group=self.optimizer.dp_process_group) == i:
_state = self.checkpoint_engine.load(
ckpt_name,
map_location='cpu',
)
else:
_state = {OPTIMIZER_STATE_DICT: None}
zero_sd_list.append(_state)
zero_optimizer_sd = [sd[OPTIMIZER_STATE_DICT] for sd in zero_sd_list]
logger.info(f"successfully read {len(zero_optimizer_sd)} ZeRO state_dicts for rank {self.global_rank}")
return zero_optimizer_sd
def _get_all_zero_checkpoints(self, load_dir, tag):
for bf16_mode in [self.bfloat16_enabled(), not self.bfloat16_enabled()]:
zero_ckpt_names = self._get_all_zero_checkpoint_names(load_dir, tag, bf16_mode)
if zero_ckpt_names is not None:
# Warn if loading checkpoint of different bit16 type
if bf16_mode is not self.bfloat16_enabled():
checkpoint_bit16 = BFLOAT16 if bf16_mode else FP16
engine_bit16 = BFLOAT16 if self.bfloat16_enabled() else FP16
logger.warn(f'Loading {checkpoint_bit16} zero checkpoints into {engine_bit16} training engine')
return self._get_all_zero_checkpoint_state_dicts(zero_ckpt_names)
return None
def _checkpoint_tag_validation(self, tag):
if self.checkpoint_tag_validation_enabled():
s_hash = hashlib.sha1(tag.encode())
bhash = torch.ByteTensor([s_hash.digest()]).flatten().to(self.device)
max_bhash = bhash.clone()
min_bhash = bhash.clone()
dist.all_reduce(max_bhash, op=dist.ReduceOp.MAX)
dist.all_reduce(min_bhash, op=dist.ReduceOp.MIN)
valid = all(min_bhash == bhash) and all(max_bhash == bhash)
msg = (f"[rank={dist.get_rank()}] The checkpoint tag name '{tag}' is not consistent across "
"all ranks. Including rank unique information in checkpoint tag could cause issues when "
"restoring with different world sizes.")
if self.checkpoint_tag_validation_fail():
assert valid, msg
elif not valid:
logger.warning(msg)
def save_checkpoint(self, save_dir, tag=None, client_state={}, save_latest=True):
"""Save training checkpoint
Arguments:
save_dir: Required. Directory for saving the checkpoint
tag: Optional. Checkpoint tag used as a unique identifier for the checkpoint, global step is
used if not provided. Tag name must be the same across all ranks.
client_state: Optional. State dictionary used for saving required training states in the client code.
save_latest: Optional. Save a file 'latest' pointing to the latest saved checkpoint.
Important: all processes must call this method and not just the process with rank 0. It is
because each process needs to save its master weights and scheduler+optimizer states. This
method will hang waiting to synchronize with other processes if it's called just for the
process with rank 0.
"""
if self.zero_optimization_partition_weights():
# Prepare for checkpoint save by ensuring all parameters are partitioned
self.optimizer.checkpoint_event_prologue()
rank = self.local_rank if self.use_node_local_storage() else self.global_rank
# This is to make sure the checkpoint names are created without collision
# There seems to be issue creating them in parallel
# Ensure save_dir directory exists
self.checkpoint_engine.makedirs(save_dir, exist_ok=True)
dist.barrier()
if tag is None:
tag = f"global_step{self.global_steps}"
# Ensure tag is a string
tag = str(tag)
self.checkpoint_engine.create(tag)
# Ensure checkpoint tag is consistent across ranks
self._checkpoint_tag_validation(tag)
if self.has_moe_layers:
self.save_non_zero_checkpoint = False
self._create_checkpoint_file(save_dir, tag, False)
self._save_moe_checkpoint(save_dir, tag, client_state=client_state)
# We distribute the task of saving layer checkpoint files among
# data parallel instances, so all procs should call _save_checkpoint.
# All procs then call module_state_dict(), but only procs of data
# parallel rank 0 save the general model params.
if not self.has_moe_layers:
self._create_checkpoint_file(save_dir, tag, False)
self._save_checkpoint(save_dir, tag, client_state=client_state)
if self.save_zero_checkpoint:
self._create_zero_checkpoint_files(save_dir, tag)
self._save_zero_checkpoint(save_dir, tag)
if self.zero_optimization_partition_weights():
self.optimizer.checkpoint_event_epilogue()
# Save latest checkpoint tag
self.checkpoint_engine.commit(tag)
if save_latest and rank == 0:
with open(os.path.join(save_dir, 'latest'), 'w') as fd:
fd.write(tag)
dist.barrier()
return True
def _get_non_moe_state_dict(self, full_state_dict):
"""
Get the state dict of the non-moe layers
"""
for key in list(full_state_dict.keys()):
if 'expert' in key and 'moe.gate.wg.weight' not in key:
full_state_dict.pop(key)
return full_state_dict
def _save_moe_checkpoint(self, save_dir, tag, client_state={}):
save_path = self._get_ckpt_name(save_dir, tag)
# A hack to save the checkpointing directory. Pipeline parallelism overrides
# module_state_dict() and uses this path to save the model. module_state_dict()
# then instead just returns None.
# Using layer_#_export_# to save the model's expert state_dict
moe_layer_id = 0
for n_module, module in self.module.named_modules():
if isinstance(module, MoE): # and deepspeed.comm.get_rank() == 0:
group_name = module.expert_group_name
num_local_experts = module.num_local_experts
expp_rank = groups._get_expert_parallel_rank(group_name)
exp_dp_rank = groups._get_expert_data_parallel_rank(group_name)
# print(expp_rank, exp_dp_rank)
if exp_dp_rank != 0:
moe_layer_id += 1
continue
# get all moe parameters
moe_state_dict = {}
for n, p in module.state_dict().items():
if 'expert' in n and 'moe.gate.wg.weight' not in n:
moe_state_dict[n_module + '.' + n] = p
moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.'
# print(moe_state_dict.keys()) # until now, everything is fine. So the bug happens at next few lines
# Reorder the moe name rank, so that each checkpoint only has one expert
experts_state_dict = defaultdict(dict)
for key in list(moe_state_dict.keys()):
m = re.match(f".*{moe_str_prefix}([0-9]+).*", key)
local_expert_id = None
if not m:
logger.warn(f'No expert found in key {key}.')
else:
local_expert_id = m.group(1)
global_expert_id = expp_rank * \
num_local_experts + int(local_expert_id)
expert_key = key.replace(f'{moe_str_prefix}{local_expert_id}',
f'{moe_str_prefix}{global_expert_id}')
# truncating extra tensor (shared) storage
truncated = moe_state_dict.pop(key).clone().detach()
experts_state_dict[str(global_expert_id)][expert_key] = truncated
# let save the moe parameters
for global_expert_id, expert_state_dict in experts_state_dict.items():
# save the moe parameters
moe_save_path = self._get_expert_ckpt_name(save_dir, moe_layer_id, global_expert_id, tag, self.mpu)
if self.random_ltd_enabled():
expert_state_dict = remove_random_ltd_state_dict(expert_state_dict)
self.checkpoint_engine.save(expert_state_dict, moe_save_path)
moe_layer_id += 1
self._curr_ckpt_path = os.path.join(save_dir, tag)
largest_group_name = groups._get_max_expert_size_name()
expp_rank = groups._get_expert_parallel_rank(largest_group_name)
exp_dp_rank = groups._get_expert_data_parallel_rank(largest_group_name)
# In the case of E + D parallelism, only the
# first expert parallel group should save the expert weights
# since each expert parallel group is a copy of the model's experts
if exp_dp_rank != 0:
return
# Save optimizer states. They are different across each exp parallel rank.
optimizer_state = {
'optimizer': self.optimizer.state_dict() if self.optimizer and not self.zero_optimization() else None
}
# TODO: why use BufferedWriter not the path
file_path = self._get_optimizer_ckpt_name(save_dir, tag, expp_rank)
self.checkpoint_engine.save(optimizer_state, file_path)
# get non-moe parameters
model_state_dict = self._get_non_moe_state_dict(self.module_state_dict())
if expp_rank == 0:
# TODO: update num experts info,.. in checkpoint
state = {
'module':
model_state_dict,
'lr_scheduler':
self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None,
'data_sampler':
self.training_dataloader.data_sampler.state_dict() if
(self.training_dataloader is not None and self.curriculum_learning_enabled()) else None,
'random_ltd':
self.random_ltd_scheduler.state_dict() if self.random_ltd_enabled() else None,
'sparse_tensor_module_names':
self.sparse_tensor_module_names,
'skipped_steps':
self.skipped_steps,
'global_steps':
self.global_steps,
'global_samples':
self.global_samples,
'dp_world_size':
self.dp_world_size,
'mp_world_size':
self.mp_world_size,
'num_experts':
self.num_experts
}
state.update(client_state)
logger.info(f'Saving model checkpoint: {save_path}')
self.checkpoint_engine.save(state, save_path)
self._curr_save_path = None
def _create_checkpoint_file(self, save_dir, tag, zero_checkpoint):
name_function = (self._get_zero_ckpt_name if zero_checkpoint else self._get_ckpt_name)
try:
checkpoint_name = name_function(save_dir, tag)
path = os.path.dirname(checkpoint_name)
self.checkpoint_engine.makedirs(path, exist_ok=True)
except:
logger.error(f"Failed saving model checkpoint to {save_dir} with tag {tag}")
return False
return True
def _create_zero_checkpoint_files(self, save_dir, tag):
success = True
# zero checkpoint files are created sequentially
for rank in range(dist.get_world_size(self.optimizer.dp_process_group)):
if rank == self.global_rank:
success = self._create_checkpoint_file(save_dir, tag, True)
dist.barrier(group=self.optimizer.dp_process_group)
return success
def _save_checkpoint(self, save_dir, tag, client_state={}):
save_path = self._get_ckpt_name(save_dir, tag)
zero_optimizer_state = self.zero_optimization() or self.bfloat16_enabled()
save_frozen_param = self.zero_optimization_partition_gradients()
# A hack to save the checkpointing directory. Pipeline parallelism overrides
# module_state_dict() and uses this path to save the model. module_state_dict()
# then instead just returns None. The module_state_dict() implementation in
# PipelineEngine expects the save path to be set in self._curr_ckpt_path.
self._curr_ckpt_path = os.path.join(save_dir, tag)
module = self.module_state_dict()
self._curr_ckpt_path = None
state = dict(module=module,
buffer_names=self._get_buffer_names(),
optimizer=self.optimizer.state_dict() if self.optimizer and not zero_optimizer_state else None,
param_shapes=self._get_zero_param_shapes() if self.optimizer and zero_optimizer_state else None,
frozen_param_shapes=self._get_zero_frozen_param_attributes(self._get_param_shape_func)
if save_frozen_param else None,
shared_params=self._get_shared_params() if self.optimizer and zero_optimizer_state else None,
frozen_param_fragments=self._get_zero_frozen_param_attributes(self._get_param_fragment_func)
if save_frozen_param else None,
lr_scheduler=self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None,
data_sampler=self.training_dataloader.data_sampler.state_dict() if
(self.training_dataloader is not None and self.curriculum_learning_enabled()) else None,
random_ltd=self.random_ltd_scheduler.state_dict() if self.random_ltd_enabled() else None,
sparse_tensor_module_names=self.sparse_tensor_module_names,
skipped_steps=self.skipped_steps,
global_steps=self.global_steps,
global_samples=self.global_samples,
dp_world_size=self.dp_world_size,
mp_world_size=self.mp_world_size,
ds_config=self.config,
ds_version=version)
state.update(client_state)
if self.save_non_zero_checkpoint:
log_dist(message=f'Saving model checkpoint: {save_path}', ranks=[0, 1])
self.checkpoint_engine.save(state, save_path)
def _get_buffer_names(self):
buffer_names = []
# we save buffer names so that we could extract later the real buffers from the saved
# state_dict["module"] in the non-zero checkpoint - the buffers are already there but they
# are intermixed with param placeholders
# have to traverse the tree to be able to skip non-persistent buffers
def get_layer_named_buffers(module, prefix=""):
for name, buf in module.named_buffers(recurse=False):
if buf is not None and name not in module._non_persistent_buffers_set:
buffer_names.append(prefix + name)
for name, child in module.named_children():
if child is not None:
get_layer_named_buffers(child, prefix + name + ".")
get_layer_named_buffers(self.module, prefix="")
return buffer_names
def _get_param_shape_func(self, param):
return param.ds_shape if hasattr(param, 'ds_id') else param.shape
def _get_param_fragment_func(self, param):
return param.ds_tensor.detach().cpu() if hasattr(param, 'ds_id') else param.detach().cpu()
def _get_zero_frozen_param_attributes(self, attr_func):
frozen_param_fragments = OrderedDict()
for param in self.module.parameters():
if param.requires_grad:
continue
if param not in self.param_names:
raise ValueError(f"failed to find frozen {param} in named params")
name = self.param_names[param]
frozen_param_fragments[name] = attr_func(param)
return frozen_param_fragments
def _get_zero_param_shapes(self):
"""Returns a dict of name to shape mapping, only for the flattened fp32 weights saved by the
optimizer. the names are exactly as in state_dict. The order is absolutely important, since
the saved data is just flattened data with no identifiers and requires reconstruction in the
same order it was saved.
We can't rely on self.module.named_parameters() to get the saved tensors, as some params
will be missing and others unsaved and then it'd be impossible to reconstruct state_dict
from the flattened weights.
optimizer.bit16_groups seems to be the easiest to use as it's in all zeroX versions.
"""
param_group_shapes = []
cnt = 0
numel = 0
# zero2 started using a round_robin_bit16_groups which is a shuffled version of bit16_groups -
# if we don't use it, we get parameters ordered incorrectly
if hasattr(self.optimizer, "round_robin_bit16_groups"):
bit16_groups = self.optimizer.round_robin_bit16_groups
elif self.bfloat16_enabled() and not self.zero_optimization():
bit16_groups = self.optimizer.bf16_groups
else:
bit16_groups = self.optimizer.bit16_groups if self.zero_optimization_stage(
) == 2 else self.optimizer.fp16_groups
for bit16_group in bit16_groups:
param_shapes = OrderedDict()
for param in bit16_group:
cnt += 1
numel += param.ds_numel if hasattr(param, "ds_numel") else param.numel()
shape = param.ds_shape if hasattr(param, "ds_shape") else param.shape
if param not in self.param_names:
raise ValueError(f"failed to find optimizer param in named params")
name = self.param_names[param]
param_shapes[name] = shape
# uncomment to debug zero_to_fp32.py problems
# if self.global_rank == 0: print(f"saving param {name} {shape} (numel={shape.numel()})")
param_group_shapes.append(param_shapes)
# if self.global_rank == 0: print(f"Total saved {numel} numels in {cnt} params")
return param_group_shapes
def _get_shared_params(self):
"""
Returns a dict of shared params, which can later be used to reconstruct the original state dict,
e.g. in `zero_to_fp32`. Each dict entry is a pair of param names, where the key is the name
of the variable that isn't stored and the value is the actual param holding data.
"""
shared_ds_ids = {}
shared_params_by_full_name = {}
def get_layer_state_dict(module, prefix=""):
# handle params
for name, param in module.named_parameters(recurse=False):
if param is None or not hasattr(param, "ds_id"):
continue
key = prefix + name
# can't rely on param.data_ptr() as it will be reused as weights gets
# gathered and reduced, but param.ds_id is unique across all zero weights
# (and shared params will have the same param.ds_id)
if param.ds_id in shared_ds_ids:
# shared weights
#print(f"`{key}` is shared with `{shared_ds_ids[param.ds_id]}`")
shared_params_by_full_name[key] = shared_ds_ids[param.ds_id]
else:
shared_ds_ids[param.ds_id] = key
for name, child in module.named_children():
if child is not None:
get_layer_state_dict(child, prefix + name + ".")
if dist.get_rank() == 0:
get_layer_state_dict(self.module, prefix="")
return shared_params_by_full_name
def _copy_recovery_script(self, save_path):
base_dir = os.path.dirname(os.path.dirname(__file__))
script = "zero_to_fp32.py"
src = os.path.join(base_dir, "utils", script)
dst = os.path.join(save_path, script)
#logger.info(f"creating recovery script {dst}")
copyfile(src, dst)
# make executable
os.chmod(dst, os.stat(dst).st_mode | stat.S_IEXEC)
def _save_zero_checkpoint(self, save_path, tag):
zero_checkpoint_name = self._get_zero_ckpt_name(save_path, tag)
zero_sd = dict(optimizer_state_dict=self.optimizer.state_dict(), ds_config=self.config, ds_version=version)
self.checkpoint_engine.save(zero_sd, zero_checkpoint_name)
if self.global_rank == 0:
self._copy_recovery_script(save_path)
ckpt_type = 'zero' if self.zero_optimization() else 'bf16_zero'
logger.info(f'{ckpt_type} checkpoint saved {zero_checkpoint_name}')
def _zero3_consolidated_16bit_state_dict(self):
"""
Get a full non-partitioned state_dict with fp16 weights on cpu.
Important: this function must be called on all ranks and not just rank 0.
This is similar to nn.Module.state_dict (modelled after _save_to_state_dict), but:
1. consolidates the weights from different partitions on gpu0
2. works on one layer at a time to require as little gpu0 memory as possible, by
moving the already consolidated weights to cpu
3. takes care to keep the shared params shared when gradually copying the params to cpu
Returns:
a consolidated fp16 ``state_dict`` on cpu on rank 0, ``None`` on other ranks
"""
if not self.zero_optimization_partition_weights():
raise ValueError("this function requires ZeRO-3 mode")
state_dict = OrderedDict() if dist.get_rank() == 0 else None
shared_params = {}
def get_layer_state_dict(module, prefix=""):
# gather one layer at a time to be memory-efficient
# must use modifier_rank=0 to release GPU memory after each layer gathered
#see_memory_usage("before GatheredParameters", force=True)
with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0):
if dist.get_rank() == 0:
# handle params
for name, param in module.named_parameters(recurse=False):
if param is None:
continue
key = prefix + name
# can't rely on param.data_ptr() as it will be reused as weights gets
# gathered and reduced, but param.ds_id is unique across all zero weights
# (and shared params will have the same param.ds_id)
if param.ds_id in shared_params:
# shared weights
#print(f"`{key}` is shared with `{shared_params[param.ds_id]}`")
state_dict[key] = state_dict[shared_params[param.ds_id]]
else:
state_dict[key] = param.detach().cpu()
shared_params[param.ds_id] = key
#print(f"param {param.ds_id} {param.shape} {key} ")
# now buffers - not sure if need to take care of potentially shared weights here
for name, buf in module.named_buffers(recurse=False):
if (buf is not None and name not in module._non_persistent_buffers_set):
state_dict[prefix + name] = buf.detach().cpu()
#see_memory_usage("after GatheredParameters", force=True)
for name, child in module.named_children():
if child is not None:
get_layer_state_dict(child, prefix + name + ".")
# Prepare for checkpoint save by ensuring all parameters are partitioned
self.optimizer.checkpoint_event_prologue()
see_memory_usage("before get_layer_state_dict", force=False)
get_layer_state_dict(self.module, prefix="")
see_memory_usage("after get_layer_state_dict", force=False)
self.optimizer.checkpoint_event_epilogue()
return state_dict
def save_fp16_model(self, save_dir, save_filename="pytorch_model.bin"):
"""has been renamed to save_16bit_model, keeping this around for backwards
compatibility"""
return self.save_16bit_model(save_dir, save_filename)
def save_16bit_model(self, save_dir, save_filename="pytorch_model.bin"):
"""
Save 16bit model weights
This method saves the 16bit model weights at the desired destination.
Arguments:
save_dir: Required. Directory for saving the model
save_filename: Optional. Filename to save to. Defaults to ``pytorch_model.bin``
Returns:
``True`` when a model has been saved, ``False`` otherwise. It will not be saved if
stage3_gather_16bit_weights_on_model_save is ``False``.
Important: all processes must call this method and not just the process with rank 0. It is
because the processes need to work in sync to gather the weights. This method will hang
waiting to synchronize with other processes if it's called just for the process with rank 0.
"""
path = os.path.join(save_dir, save_filename)
if self.zero_optimization_partition_weights():
if self.zero_gather_16bit_weights_on_model_save():
# consolidation is expensive in time and memory and therefore isn't a default
state_dict = self._zero3_consolidated_16bit_state_dict()
else:
# the model will be bogus if not consolidated so don't confuse the user by saving it
logger.info(
f"Did not save the model {path} because `stage3_gather_16bit_weights_on_model_save` is False")
return False
else:
state_dict = self.module.state_dict()
tag = f"global_step{self.global_steps}"
tag = str(tag)
self.checkpoint_engine.create(tag)
if dist.get_rank() == 0:
self.checkpoint_engine.makedirs(save_dir, exist_ok=True)
logger.info(f"Saving model weights to {path}, tag: {tag}")
self.checkpoint_engine.save(state_dict, path)
self.checkpoint_engine.commit(tag)
return True
def empty_partition_cache(self):
"""
Release GPU memory consumed by offloaded model parameters.
"""
if hasattr(self.optimizer, 'empty_partition_cache'):
self.optimizer.empty_partition_cache()
gc.collect()
get_accelerator().empty_cache()
| 157,235 | 45.12379 | 506 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/weight_quantizer.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..module_inject.replace_policy import HFBertLayerPolicy, replace_policies
from deepspeed.accelerator import get_accelerator
class WeightQuantization(object):
def __init__(self, mlp_extra_grouping=True, mp_size=1):
self.dense_scales = []
self.qkv_scales = []
self.mlp4hh_scales = []
self.mlph4h_scales = []
self.mlp_extra_grouping = mlp_extra_grouping
self.mp_size = mp_size
def quantize_data(self, data, quantize_bits, groups, key=None):
data_groups = torch.split(data.float().view(-1), data.numel() // groups)
max_d = [max(g.max(), g.min().abs()) for g in data_groups]
data_scale = [float(1 << quantize_bits) / (2 * mx + 1e-5) for mx in max_d]
data_int = [(g * s) for g, s in zip(data_groups, data_scale)]
data_int = [
di.round().clamp(-(1 << (quantize_bits - 1)), (((1 << (quantize_bits - 1)) - 1))) for di in data_int
]
data_int = torch.cat(data_int).reshape(data.shape)
data_int = data_int.to(torch.int8)
data_scale = torch.cat([s.unsqueeze(0).unsqueeze(0) for s in data_scale])
return data_int, data_scale
def is_mlp(self, data, merge_count=1):
return ((self.mp_size *data.shape[0] * merge_count) / data.shape[1] == 4 or \
(self.mp_size *data.shape[1] * merge_count) / data.shape[0] == 4)
def is_qkv(self, data):
return ((self.mp_size * data.shape[0]) / data.shape[1] == 3 or \
(self.mp_size * data.shape[1]) / data.shape[0] == 3)
def Quantize(self, value_list, quantize_bits, groups, key, merge_dim=0):
if self.mlp_extra_grouping and self.is_mlp(value_list[0], merge_count=len(value_list)):
groups *= 2
q_scale = []
index = 0
for data in value_list:
data_int, data_scale = self.quantize_data(data, quantize_bits, groups, key)
q_scale.append(data_scale)
value_list[index] = data_int
index += 1
q_scale = (1 /
torch.cat(q_scale, dim=merge_dim).to(get_accelerator().current_device_name()).view(-1).unsqueeze(0))
if "mlp.dense_4h_to_h.weight" in key:
self.mlp4hh_scales.append(q_scale)
elif "mlp.dense_h_to_4h.weight" in key:
self.mlph4h_scales.append(q_scale)
elif "attention.query_key_value.weight" in key:
self.qkv_scales.append(q_scale)
else:
self.dense_scales.append(q_scale)
return value_list
def merge_layer_scales(self, layer_scales):
max_dim = max([s.shape[-1] for s in layer_scales])
layer_scales = [
torch.cat((s, torch.zeros((1, max_dim - s.shape[-1]), device=get_accelerator().current_device_name())),
dim=-1) if s.shape[-1] < max_dim else s for s in layer_scales
]
return torch.cat(layer_scales).unsqueeze(0)
def merge_scales(self):
all_scales = []
for dense_scale, qkv_scale, m4hh_scale, mh4h_scale in \
zip(self.dense_scales, self.qkv_scales, self.mlp4hh_scales, self.mlph4h_scales):
all_scales.append(self.merge_layer_scales([qkv_scale, dense_scale, mh4h_scale, m4hh_scale]))
return torch.cat(all_scales)
def merge_scales_split(self, split_count):
all_scales = [[] for _ in range(split_count)]
for dense_scale, qkv_scale, m4hh_scale, mh4h_scale in \
zip(self.dense_scales, self.qkv_scales, self.mlp4hh_scales, self.mlph4h_scales):
dense_scale = torch.split(dense_scale, dense_scale.numel() // split_count)
qkv_scale = torch.split(qkv_scale, qkv_scale.numel() // split_count)
m4hh_scale = torch.split(m4hh_scale, m4hh_scale.numel() // split_count)
mh4h_scale = torch.split(mh4h_scale, mh4h_scale.numel() // split_count)
for s in range(split_count):
all_scales[s].append(
torch.cat([
torch.cat((qkv_scale[s], torch.zeros_like(qkv_scale[s])), dim=1),
torch.cat((dense_scale[s], torch.zeros_like(dense_scale[s])), dim=1), mh4h_scale[s],
m4hh_scale[s]
]).unsqueeze(0))
for scales_a in all_scales:
torch.cat(scales_a)
return all_scales
def sd_quantize_megatron(self, sd, quantize_bits, groups):
keys = sd.keys()
for key in keys:
value_list = [sd[key]]
if "attention.dense.weight" in key or "mlp.dense_4h_to_h.weight" in key or \
"mlp.dense_h_to_4h.weight" in key or "attention.query_key_value.weight" in key:
value_list = self.Quantize(value_list, quantize_bits, groups, key=key)
sd[key] = value_list[0]
all_scales = self.merge_scales()
return sd, all_scales
def model_quantize(self, model, quantize_policy, quantize_bits, groups):
all_scales = []
def quantize_fn(layer, policy_cls):
policy = policy_cls(layer)
_, qkvw, _, dense_w, _, _ = policy.attention()
_, _h4h_w, _, _4hh_w, _ = policy.mlp()
keys = [qkvw, dense_w, _h4h_w, _4hh_w]
layer_scales = []
for key in range(len(keys)):
if self.mlp_extra_grouping and self.is_mlp(keys[key]):
data_quantized, data_scale = self.quantize_data(keys[key], quantize_bits, groups * 2)
elif policy_cls is HFBertLayerPolicy and self.is_qkv(keys[key]):
data_quantized, data_scale = self.quantize_data(keys[key], quantize_bits, groups * 3)
else:
data_quantized, data_scale = self.quantize_data(keys[key], quantize_bits, groups)
keys[key].copy_(data_quantized)
layer_scales.append((1 / data_scale.to(get_accelerator().current_device_name()).view(-1).unsqueeze(0)))
all_scales.append(self.merge_layer_scales(layer_scales))
return layer
def _quantize_module(model, policies):
for name, child in model.named_children():
if child.__class__ in policies:
quantize_fn, replace_policy = policies[child.__class__]
setattr(model, name, quantize_fn(child, replace_policy))
else:
_quantize_module(child, policies)
return model
policy = {}
if quantize_policy is not None:
for layer_name, replace_policy in quantize_policy.items():
policy.update({layer_name: (quantize_fn, replace_policy)})
else:
for plcy in replace_policies:
policy.update({plcy._orig_layer_class: (quantize_fn, plcy)})
quantized_module = _quantize_module(model, policy)
return quantized_module, torch.cat(all_scales)
| 7,027 | 44.636364 | 119 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/dataloader.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from deepspeed.accelerator import get_accelerator
from deepspeed.runtime.data_pipeline.data_sampling.data_sampler import DeepSpeedDataSampler
from deepspeed.runtime.data_pipeline.constants import CURRICULUM_LEARNING, \
DATA_EFFICIENCY, DATA_SAMPLING_NUM_WORKERS
from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, \
DATA_PARALLEL_GROUP, GLOBAL_RANK
class RepeatingLoader:
def __init__(self, loader):
"""Wraps an iterator to allow for infinite iteration. This is especially useful
for DataLoader types that we wish to automatically restart upon completion.
Args:
loader (iterator): The data loader to repeat.
"""
self.loader = loader
self.data_iter = iter(self.loader)
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.data_iter)
except StopIteration:
self.data_iter = iter(self.loader)
batch = next(self.data_iter)
return batch
class DeepSpeedDataLoader(object):
def __init__(self,
dataset,
batch_size,
pin_memory,
local_rank,
tput_timer,
collate_fn=None,
num_local_io_workers=None,
data_sampler=None,
data_parallel_world_size=None,
data_parallel_rank=None,
dataloader_drop_last=False,
deepspeed_dataloader_config={}):
self.deepspeed_dataloader_config = deepspeed_dataloader_config
self.tput_timer = tput_timer
self.batch_size = batch_size
self.curriculum_learning_enabled = False
if CURRICULUM_LEARNING in deepspeed_dataloader_config:
self.curriculum_learning_enabled = deepspeed_dataloader_config[CURRICULUM_LEARNING]
if self.curriculum_learning_enabled:
data_sampler = DeepSpeedDataSampler(self.deepspeed_dataloader_config[DATA_EFFICIENCY],
len(dataset),
self.batch_size,
data_parallel_rank,
data_parallel_world_size,
self.deepspeed_dataloader_config[DATA_PARALLEL_GROUP],
self.deepspeed_dataloader_config[GRADIENT_ACCUMULATION_STEPS],
self.deepspeed_dataloader_config[GLOBAL_RANK],
drop_last=dataloader_drop_last)
device_count = get_accelerator().device_count()
num_local_io_workers = self.deepspeed_dataloader_config[DATA_SAMPLING_NUM_WORKERS]
else:
if local_rank >= 0:
if data_sampler is None:
data_sampler = DistributedSampler(dataset=dataset,
num_replicas=data_parallel_world_size,
rank=data_parallel_rank)
device_count = 1
else:
if data_sampler is None:
data_sampler = RandomSampler(dataset)
device_count = get_accelerator().device_count()
batch_size *= device_count
if num_local_io_workers is None:
num_local_io_workers = 2 * device_count
self.num_local_io_workers = num_local_io_workers
self.data_sampler = data_sampler
self.dataset = dataset
self.collate_fn = collate_fn
self.device_count = device_count
self.batch_size = batch_size
self.pin_memory = pin_memory
self.data = None
self.dataloader_drop_last = dataloader_drop_last
self.post_process_func = None
if self.dataloader_drop_last:
self.len = len(self.data_sampler) // self.batch_size
else:
from math import ceil
self.len = ceil(len(self.data_sampler) / self.batch_size)
def __iter__(self):
self._create_dataloader()
return self
def __len__(self):
return self.len
def __next__(self):
if self.tput_timer:
self.tput_timer.start()
if self.curriculum_learning_enabled:
data = next(self.data_iterator)
if self.post_process_func is not None:
data = self.post_process_func(data, self.data_sampler.state_dict())
return data
else:
return next(self.data)
def _create_dataloader(self):
if self.curriculum_learning_enabled:
if self.collate_fn is None:
self.dataloader = DataLoader(self.dataset,
pin_memory=self.pin_memory,
batch_sampler=self.data_sampler,
num_workers=self.num_local_io_workers)
else:
self.dataloader = DataLoader(self.dataset,
pin_memory=self.pin_memory,
batch_sampler=self.data_sampler,
collate_fn=self.collate_fn,
num_workers=self.num_local_io_workers)
self.data_iterator = iter(self.dataloader)
return self.dataloader
else:
if self.collate_fn is None:
self.dataloader = DataLoader(self.dataset,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
sampler=self.data_sampler,
num_workers=self.num_local_io_workers,
drop_last=self.dataloader_drop_last)
else:
self.dataloader = DataLoader(self.dataset,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
sampler=self.data_sampler,
collate_fn=self.collate_fn,
num_workers=self.num_local_io_workers,
drop_last=self.dataloader_drop_last)
self.data = (x for x in self.dataloader)
return self.dataloader
# DataLoader([(torch.randn(3, 3), torch.tensor(i % 2)) for i in range(10)], batch_size=2))
| 6,977 | 41.809816 | 110 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/eigenvalue.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.utils import log_dist
import numpy as np
import logging
class Eigenvalue(object):
def __init__(self,
verbose=False,
max_iter=100,
tol=1e-2,
stability=0,
gas_boundary_resolution=1,
layer_name='',
layer_num=0):
super().__init__()
self.verbose = verbose
self.max_iter = max_iter
self.tol = tol
self.stability = stability
self.gas_boundary_resolution = gas_boundary_resolution
self.layer_name = layer_name
self.layer_num = layer_num
assert len(self.layer_name) > 0 and layer_num > 0
log_dist(
f'enabled eigenvalue with verbose={verbose}, max_iter={max_iter}, tol={tol}, stability={stability}, gas_boundary_resolution={gas_boundary_resolution}, layer_name={layer_name}, layer_num={layer_num}',
ranks=[0])
# Replace all nan/pos-inf/neg-inf to zero
# TODO: Pytorch new version may add this function, replace this one by then.
def nan_to_num(self, x):
device = x.device
x = x.cpu().numpy()
x = np.nan_to_num(x=x, copy=False, nan=0.0, posinf=0.0, neginf=0.0)
return torch.from_numpy(x).to(device)
def normalize(self, v):
norm_squared = self.inner_product(v, v)
norm = norm_squared**0.5 + self.stability
normalized_vectors = [vector / norm for vector in v]
normalized_vectors = [self.nan_to_num(vector) for vector in normalized_vectors]
return normalized_vectors
def inner_product(self, xs, ys):
return sum([torch.sum(x * y) for (x, y) in zip(xs, ys)])
def get_layers(self, module):
scope_names = self.layer_name.split('.')
assert len(scope_names) > 0
m = module
for name in scope_names:
assert hasattr(m, name), "layer_name configuration is invalid."
m = getattr(m, name)
return m
def compute_eigenvalue(self, module, device=None, scale=1.0):
block_eigenvalue = []
param_keys = []
layers = self.get_layers(module)
for block in range(self.layer_num):
model_block = layers[block]
# We found this randn() has obvious accuracy impact in some cases, save/recover random state here.
rng_state = torch.random.get_rng_state()
if device is None:
v = [
torch.randn(p.size()) for p in model_block.parameters()
if p.grad is not None and p.grad.grad_fn is not None
]
else:
v = [
torch.randn(p.size(), device=device) for p in model_block.parameters()
if p.grad is not None and p.grad.grad_fn is not None
]
torch.random.set_rng_state(rng_state)
grads = [
param.grad for param in model_block.parameters()
if param.grad is not None and param.grad.grad_fn is not None
]
params = [
param for param in model_block.parameters()
if param.grad is not None and param.grad.grad_fn is not None
]
layer_keys = [id(p) for p in model_block.parameters()]
param_keys.append(layer_keys)
v = self.normalize(v)
# Disable eigenvalue if the model doesn't support second order gradients computation,
# e.g. when enabling DS transformer kernel.
if len(grads) == 0 or len(params) == 0:
log_dist(f'The model does NOT support eigenvalue computation.', ranks=[0], level=logging.WARNING)
return []
i = 0
eigenvalue_current, eigenvalue_previous = 1., 0.
while (i < self.max_iter) and abs(eigenvalue_current) > 0 and (abs(
(eigenvalue_current - eigenvalue_previous) / eigenvalue_current) >=
self.tol): # test convergence criteria
eigenvalue_previous = eigenvalue_current
Hv = torch.autograd.grad(grads, params, grad_outputs=v, only_inputs=True, retain_graph=True)
#Hv = [hv.float() for hv in Hv]
Hv = [self.nan_to_num(hv).float() for hv in Hv]
eigenvalue_current = self.inner_product(Hv, v).item()
v = self.normalize(Hv)
v = [x / scale for x in v]
i += 1
eigenvalue_current *= scale
block_eigenvalue.append(eigenvalue_current)
if self.verbose:
log_dist(f'block: {block}, power iteration: {i}, eigenvalue: {eigenvalue_current}', ranks=[0])
block_eigenvalue = self.post_process(block_eigenvalue)
if self.verbose:
log_dist(f'post processed block_eigenvalue: {block_eigenvalue}', ranks=[0])
# {param_id: (eigenvalue, layer_id)}
ev_dict = {}
for i, (layer_keys, value) in enumerate(zip(param_keys, block_eigenvalue)):
ev_dict.update(dict.fromkeys(layer_keys, (value, i)))
return ev_dict
# 1. Map all eigenvalues to [0, 1.0].
# 2. Some layers can't generate valid eigenvalues on fp16 precision, use 1.0 instead.
def post_process(self, value_list):
max_value = abs(max(value_list, key=abs))
return [abs(v) / max_value if v != 0.0 else 1.0 for v in value_list]
| 5,625 | 36.506667 | 211 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Copyright NVIDIA/Megatron
Helper functions and classes from multiple sources.
"""
from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from deepspeed import comm as dist
try:
from torch._six import inf
except ModuleNotFoundError:
from torch import inf
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
from deepspeed.accelerator import get_accelerator
from deepspeed.module_inject.policy import transpose
from torch.nn import functional as F
torch_memory_reserved = get_accelerator().memory_reserved
torch_max_memory_reserved = get_accelerator().max_memory_reserved
class DummyOptim():
"""
Dummy optimizer presents model parameters as a param group, this is
primarily used to allow ZeRO-3 without an optimizer
"""
def __init__(self, params):
self.param_groups = []
self.param_groups.append({'params': params})
def noop_decorator(func):
return func
def ensure_directory_exists(filename):
"""Create the directory path to ``filename`` if it does not already exist.
Args:
filename (str): A file path.
"""
dirname = os.path.dirname(filename)
os.makedirs(dirname, exist_ok=True)
def set_random_seed(seed):
"""Set the random seed for common PRNGs used during training: random, numpy, and torch.
Args:
seed (int): the seed to use
"""
import numpy
import random
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
def is_model_parallel_parameter(p) -> bool:
if hasattr(p, 'model_parallel') and p.model_parallel:
return True
if hasattr(p, 'tensor_model_parallel') and p.tensor_model_parallel:
return True
return False
def bwc_tensor_model_parallel_rank(mpu=None):
"""Backwards-compatible way of querying the tensor model parallel rank from
an ``mpu`` object.
*Tensor* model parallelism means that tensors are physically split across
processes. This contrasts with *pipeline* model parallelism, in which the
layers are partitioned but tensors left intact.
The API for tensor model parallelism has changed across versions and this
helper provides a best-effort implementation across versions of ``mpu``
objects. The preferred mechanism is
``mpu.get_tensor_model_parallel_rank()``.
This should "just work" with both Megatron-LM and DeepSpeed's pipeline
parallelism.
Args:
mpu (model parallel unit, optional): The tensor model parallel rank.
If ``mpu=None``, returns 0. Defaults to ``None``.
Returns:
int: the rank
"""
if mpu is None:
# No model parallelism in easy :)
return 0
if hasattr(mpu, 'get_tensor_model_parallel_rank'):
# New Megatron and DeepSpeed convention (post pipeline-parallelism release)
return mpu.get_tensor_model_parallel_rank()
elif hasattr(mpu, 'get_slice_parallel_rank'):
# Some DeepSpeed + pipeline parallelism versions
return mpu.get_slice_parallel_rank()
else:
# Deprecated Megatron and DeepSpeed convention
return mpu.get_model_parallel_rank()
def copy_to_device(item, device, criterion_func):
"""
Return a copy of tensor on specified device.
Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.
Parameters:
item: tensor to copy or (possibly nested) container of tensors to copy.
device: target device
criterion_func: Function to restrict copy operation to items meet criterion
Returns:
None
"""
if criterion_func(item):
return item.to(device)
elif isinstance(item, list):
return [copy_to_device(v, device, criterion_func) for v in item]
elif isinstance(item, tuple):
return tuple([copy_to_device(v, device, criterion_func) for v in item])
elif isinstance(item, dict):
return {k: copy_to_device(v, device, criterion_func) for k, v in item.items()}
else:
return item
def move_to_device(item, device, criterion_func):
"""
Move tensor on to specified device by changing the storage.
Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.
Parameters:
item: tensor to move or (possibly nested) container of tensors to move.
device: target device
criterion_func: Function to restrict move operation to items meet criterion
Returns:
None
"""
if criterion_func(item):
device_copy = item.to(device)
item.data = device_copy.data
return item
elif isinstance(item, list):
return [move_to_device(v, device, criterion_func) for v in item]
elif isinstance(item, tuple):
return tuple([move_to_device(v, device, criterion_func) for v in item])
elif isinstance(item, dict):
return {k: move_to_device(v, device, criterion_func) for k, v in item.items()}
else:
return item
class CheckOverflow(object):
'''Checks for overflow in gradient across parallel process'''
def __init__(self, param_groups=None, mpu=None, zero_reduce_scatter=False, deepspeed=None):
self.mpu = mpu
self.params = [] if param_groups else None
self.zero_reduce_scatter = zero_reduce_scatter
self.deepspeed = deepspeed
self.has_moe_params = False
if param_groups:
for group in param_groups:
for param in group:
self.params.append(param)
if is_moe_param(param):
self.has_moe_params = True
def check_using_norm(self, norm_group, reduce_overflow=True):
# TODO: I don't think reduce_overflow is needed if mpu is None
overflow = -1 in norm_group
overflow_gpu = get_accelerator().FloatTensor([overflow])
if self.has_moe_params:
# In this case, we need to do an all_reduce across
# the expert_parallel_group, so that if there was
# an overflow due to expert weights, we detect it
# Only need to check groups.get_largest_expert_parallel_group()
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=groups._get_max_expert_parallel_group())
if self.mpu is not None:
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.mpu.get_model_parallel_group())
elif reduce_overflow:
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX)
dist.barrier()
overflow = overflow_gpu[0].item()
return bool(overflow)
def check(self, param_groups=None):
params = []
has_moe_params = False
if param_groups is None:
params = self.params
has_moe_params = self.has_moe_params
else:
assert param_groups is not None, \
"self.params and param_groups both cannot be none"
for group in param_groups:
for param in group:
params.append(param)
if is_moe_param(param):
has_moe_params = True
return self.has_overflow(params, has_moe_params=has_moe_params)
# `params` is a list / generator of torch.Variable
def has_overflow_serial(self, params):
for i, p in enumerate(params):
if p.grad is not None and self._has_inf_or_nan(p.grad.data, i):
return True
return False
def has_overflow(self, params, has_moe_params=None):
if has_moe_params is None:
has_moe_params = self.has_moe_params
overflow = self.has_overflow_serial(params)
# Since each model parallel GPU carries only part of the model,
# make sure overflow flag is synced across all the model parallel GPUs
overflow_gpu = get_accelerator().ByteTensor([overflow])
# deepspeed.comm.all_reduce(overflow_gpu,
# op=deepspeed.comm.ReduceOp.MAX,
# group=mpu.get_model_parallel_group())
if has_moe_params:
# All reduce this across expert_parallel_group, so that if an expert
# overflows, we detect it here
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=groups._get_max_expert_parallel_group())
if self.zero_reduce_scatter:
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=dist.get_world_group())
elif self.mpu is not None:
if self.deepspeed is not None:
using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
if (using_pipeline and self.deepspeed.pipeline_enable_backward_allreduce is False) or (
not using_pipeline and self.deepspeed.enable_backward_allreduce is False):
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.mpu.get_data_parallel_group())
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.mpu.get_model_parallel_group())
elif self.deepspeed is not None and self.deepspeed.enable_backward_allreduce is False:
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=dist.get_world_group())
overflow = overflow_gpu[0].item()
return bool(overflow)
# `x` is a torch.Tensor
@staticmethod
def _has_inf_or_nan(x, i):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
def _handle_overflow(cpu_sum, x, i):
import math
rank = dist.get_rank()
if rank == 0:
t_i = -1
for v_i, v in enumerate(x.data.contiguous().view(-1)):
if not math.isfinite(float(v)):
t_i = v_i
break
logger.info(f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}")
def get_global_norm(norm_list):
""" Compute total from a list of norms
"""
total_norm = 0.0
for norm in norm_list:
total_norm += norm**2.0
# logger.info(f'norm_list = {norm_list} global = {sqrt(total_norm)}')
return sqrt(total_norm)
def clip_grad_norm_(parameters, max_norm, norm_type=2, mpu=None):
"""Clips gradient norm of an iterable of parameters.
This has been adapted from Nvidia megatron. We add norm averaging
to consider MoE params when calculating norm as they will result
in different norms across different ranks.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
# Take max across all GPUs.
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0
for p in parameters:
if mpu is not None:
if (mpu.get_model_parallel_rank() == 0) or is_model_parallel_parameter(p):
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item()**norm_type
else:
param_norm = p.grad.data.float().norm(norm_type)
total_norm += param_norm.item()**norm_type
# Sum across all model parallel GPUs.
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
# Need to average total_norm across different GPUs due to the presence of moe params
pg = groups._get_data_parallel_group()
scaled_norm = total_norm * 1.0 / float(dist.get_world_size(group=pg))
scaled_norm_tensor = get_accelerator().FloatTensor([float(scaled_norm)])
dist.all_reduce(scaled_norm_tensor, group=pg)
total_norm = scaled_norm_tensor.item()
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
p.grad.data.mul_(clip_coef)
return total_norm
def get_grad_norm(parameters, norm_type=2, mpu=None):
"""Get grad norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place. Taken from Nvidia Megatron.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
# Take max across all GPUs.
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0.
tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu)
for p in parameters:
# Pipeline parallelism may replicate parameters. Avoid multi-counting.
if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:
continue
# Filter to avoid over-counting replicated tensors from tensor
# model parallelism
if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p):
continue
param_norm = p.grad.data.float().norm(norm_type)
total_norm += param_norm.item()**norm_type
# Sum across all model parallel GPUs.
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
def get_grad_zeros(parameters, mpu=None):
"""Compute the number of grads with zero values.
This is adapted from get_grad_norm
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
Returns:
Total number of params with zero values (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
total_zeros = 0.
tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu)
for p in parameters:
# Pipeline parallelism may replicate parameters. Avoid multi-counting.
if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:
continue
# Filter to avoid over-counting replicated tensors from tensor
# model parallelism
if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p):
continue
count_zeros = p.grad.numel() - torch.count_nonzero(p.grad)
total_zeros += count_zeros.item()
# Sum across all model parallel GPUs.
total_zeros_cuda = get_accelerator().FloatTensor([float(total_zeros)])
if mpu is not None:
dist.all_reduce(total_zeros_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group())
total_zeros = total_zeros_cuda[0].item()
return total_zeros
def get_weight_norm(parameters, norm_type=2, mpu=None):
"""Get norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place. Taken from Nvidia Megatron.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.data.abs().max() for p in parameters)
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
# Take max across all GPUs.
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0.
tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu)
for p in parameters:
# Pipeline parallelism may replicate parameters. Avoid multi-counting.
if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:
continue
# Filter to avoid over-counting replicated tensors from tensor
# model parallelism
if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p):
continue
param_norm = p.data.float().norm(norm_type)
total_norm += param_norm**norm_type
# Sum across all model parallel GPUs.
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
def prefix_sum_inc(weights):
""" Compute an inclusive prefix sum.
Example:
>>> prefix_sum_inc([3,4,5])
[3, 7, 12]
"""
weights_ = [w for w in weights]
for x in range(1, len(weights_)):
weights_[x] += weights_[x - 1]
return weights_
def partition_uniform(num_items, num_parts):
parts = [0] * (num_parts + 1)
# First check for the trivial edge case
if num_items <= num_parts:
for p in range(num_parts + 1):
parts[p] = min(p, num_items)
return parts
chunksize = floor(num_items / num_parts)
for p in range(num_parts):
parts[p] = min(chunksize * p, num_items)
parts[num_parts] = num_items
return parts
def _lprobe(weights, num_parts, bottleneck):
num_items = len(weights)
total_weight = weights[-1]
# initialize partitioning
parts = [0] * (num_parts + 1)
for p in range(1, num_parts + 1):
parts[p] = num_items
bsum = bottleneck # running sum of target weight for pth partition
chunksize = num_items // num_parts
step = chunksize
for p in range(1, num_parts):
# Jump to the next bucket
while (step < num_items) and (weights[step] < bsum):
step += chunksize
# Find the end index of partition p
parts[p] = bisect_left(weights, bsum, lo=step - chunksize, hi=min(step, num_items))
# Nothing more to partition, return early
if parts[p] == num_items:
# See if the current partition is overweight.
part_size = weights[-1] - weights[parts[p - 1]]
return parts, part_size < bottleneck
# Next partition target
bsum = weights[parts[p] - 1] + bottleneck
return parts, bsum >= total_weight
def _rb_partition_balanced(weights, num_parts, eps):
total_weight = weights[-1]
lower = total_weight / num_parts # best case heaviest partition
upper = total_weight # worst case heaviest partition
# Do a binary search for the best partitioning
while upper > lower + eps:
mid = lower + ((upper - lower) / 2)
parts, success = _lprobe(weights, num_parts, mid)
if success:
upper = mid
else:
lower = mid + eps
return upper
def partition_balanced(weights, num_parts, eps=1e-3):
num_items = len(weights)
# First check for the trivial edge case
if num_items <= num_parts:
return partition_uniform(num_items, num_parts)
weights_ = prefix_sum_inc(weights)
# Find the smallest bottleneck (weight of heaviest partition)
bottleneck = _rb_partition_balanced(weights_, num_parts, eps=eps)
# Now compute that partitioning
parts, success = _lprobe(weights_, num_parts, bottleneck)
assert success
return parts
class PartitionedTensor:
def __init__(self, tensor, group, partition_meta=None):
super().__init__()
self.group = group
self.num_parts = dist.get_world_size(group=self.group)
self.rank = dist.get_rank(group=self.group)
self.orig_size = list(tensor.size())
self.orig_device = tensor.device
self.local_data, self.partition = self._partition_tensor(tensor)
@classmethod
def from_meta(cls, meta, local_part, group, device=get_accelerator().device_name()):
assert meta.dtype == torch.long
dummy = torch.ones(dist.get_world_size(group=group))
part_obj = cls(tensor=dummy, group=group)
meta = meta.tolist()
# [N, list0, ..., listN-1]
part_obj.orig_size = meta[1:(1 + meta[0])]
meta = meta[1 + meta[0]:]
part_obj.orig_device = device
part_obj.local_data = local_part.detach()
part_obj.group = group
# Partition is encoded like the rowptr of a CSR matrix:
# [num_parts, rank, 0, part_1, ..., part_num_parts]
# TODO: support shuffle between different partition granularities
assert part_obj.num_parts == meta[0]
assert part_obj.rank == meta[1]
part_obj.partition = meta[2:] # length num_parts+1
return part_obj
def _partition_tensor(self, tensor):
partition = partition_uniform(num_items=tensor.numel(), num_parts=self.num_parts)
start = partition[self.rank]
length = partition[self.rank + 1] - start
tensor_part = tensor.detach().contiguous().view(-1).narrow(0, start=start, length=length).clone()
return tensor_part, partition
def full(self, device=None):
if device is None:
device = self.orig_device
# Allocate the full tensor as a flat buffer.
full_numel = prod(self.full_size())
flat_tensor = torch.zeros([full_numel], dtype=self.local_data.dtype, device=device)
# Prepare all-gather buffer
partition_tensors = []
for part_id in range(self.num_parts):
part_size = self.partition[part_id + 1] - self.partition[part_id]
buf = flat_tensor.narrow(0, start=self.partition[part_id], length=part_size)
if part_id == self.rank:
buf.copy_(self.local_data)
partition_tensors.append(buf)
# Collect the full tensor
dist.all_gather(partition_tensors, partition_tensors[self.rank], group=self.group)
for i in range(len(partition_tensors)):
partition_tensors[i].data = torch.zeros(1)
partition_tensors[i] = None
return flat_tensor.view(self.full_size()).clone().detach()
def to_meta(self):
"""Returns a torch.LongTensor that encodes partitioning information.
Can be used along with ``data()`` to serialize a ``PartitionedTensor`` for
communication.
Returns:
torch.LongTensor: a tensor encoding the meta-information for the partitioning
"""
meta = []
meta.append(len(self.orig_size))
meta += list(self.orig_size)
meta.append(self.num_parts)
meta.append(self.rank)
meta += self.partition
return torch.LongTensor(data=meta).to(self.orig_device)
def data(self):
return self.local_data
def local_size(self):
return self.local_data.size()
def full_size(self):
return self.orig_size
mem_alloced = 0
mem_cached = 0
def memory_status(msg, print_rank=-1, reset_max=False):
global mem_alloced, mem_cached
rank = dist.get_rank()
if print_rank != -1 and rank != print_rank:
return
get_accelerator().synchronize()
if reset_max:
get_accelerator().reset_max_memory_cached()
get_accelerator().reset_max_memory_allocated()
new_alloced = get_accelerator().memory_allocated()
new_cached = get_accelerator().memory_cached()
delta_alloced = new_alloced - mem_alloced
delta_cached = new_cached - mem_cached
mem_cached = new_cached
mem_alloced = new_alloced
max_alloced = get_accelerator().max_memory_allocated()
max_cached = get_accelerator().max_memory_cached()
# convert to GB for printing
new_alloced /= 1024**3
new_cached /= 1024**3
delta_alloced /= 1024**3
delta_cached /= 1024**3
max_alloced /= 1024**3
max_cached /= 1024**3
print(
f'RANK={rank} MEMSTATS', msg, f'device={get_accelerator().current_device_name()} '
f'current alloc={new_alloced:0.4f}GB (delta={delta_alloced:0.4f}GB max={max_alloced:0.4f}GB) '
f'current cache={new_cached:0.4f}GB (delta={delta_cached:0.4f}GB max={max_cached:0.4f}GB)')
def get_ma_status():
if dist.is_initialized() and not dist.get_rank() == 0:
return 0
return get_accelerator().memory_allocated()
def empty_cache():
get_accelerator().empty_cache()
get_accelerator().reset_peak_memory_stats()
def see_memory_usage(message, force=False):
if not force:
return
if dist.is_initialized() and not dist.get_rank() == 0:
return
# python doesn't do real-time garbage collection so do it explicitly to get the correct RAM reports
gc.collect()
# Print message except when distributed but not rank 0
logger.info(message)
logger.info(f"MA {round(get_accelerator().memory_allocated() / (1024 * 1024 * 1024),2 )} GB \
Max_MA {round(get_accelerator().max_memory_allocated() / (1024 * 1024 * 1024),2)} GB \
CA {round(torch_memory_reserved() / (1024 * 1024 * 1024),2)} GB \
Max_CA {round(torch_max_memory_reserved() / (1024 * 1024 * 1024))} GB ")
vm_stats = psutil.virtual_memory()
used_GB = round(((vm_stats.total - vm_stats.available) / (1024**3)), 2)
logger.info(f'CPU Virtual Memory: used = {used_GB} GB, percent = {vm_stats.percent}%')
# get the peak memory to report correct data, so reset the counter for the next call
get_accelerator().reset_peak_memory_stats()
def call_to_str(base, *args, **kwargs):
"""Construct a string representation of a call.
Args:
base (str): name of the call
args (tuple, optional): args to ``base``
kwargs (dict, optional): kwargs supplied to ``base``
Returns:
str: A string representation of base(*args, **kwargs)
"""
name = f'{base}('
if args:
name += ', '.join(repr(arg) for arg in args)
if kwargs:
name += ', '
if kwargs:
name += ', '.join(f'{key}={repr(arg)}' for key, arg in kwargs.items())
name += ')'
return name
def get_only_unique_item(items):
item_set = set(items)
if len(item_set) != 1:
raise RuntimeError(f"expected there to be only one unique element in {items}")
unique_item, = item_set
return unique_item
def clip_gradients(parameters, max_norm=1.0, global_grad_norm=None, mpu=None, eps=1e-6):
"""Clip the gradient of a list of parameters.
Args:
parameters: List of parameters whose .grad will be clipped.
global_grad_norm (float, optional): Precomputed gradient norm. Defaults to None.
mpu (optional): model parallelism unit. Defaults to None.
eps (float, optional): epsilon value added to grad norm. Defaults to 1e-6
Returns:
float: the global gradient norm
"""
if global_grad_norm is None:
global_grad_norm = get_grad_norm(parameters, mpu=mpu)
clip_coef = max_norm / (global_grad_norm + eps)
if clip_coef < 1:
for p in parameters:
p.grad.detach().mul_(clip_coef)
return global_grad_norm
def get_global_norm_of_tensors(input_tensors, norm_type=2, mpu=None):
"""Get norm of an iterable of tensors.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Taken from Nvidia Megatron.
Arguments:
input_tensors (Iterable[Tensor]): an iterable of Tensors will have norm computed
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the tensors (viewed as a single vector).
"""
assert isinstance(input_tensors, Iterable), f'expected Iterable type not {type(input_tensors)}'
assert all([torch.is_tensor(t) for t in input_tensors]), f'expected list of only tensors'
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(t.data.abs().max() for t in input_tensors)
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = sum([t.data.float().norm(norm_type).item()**norm_type for t in input_tensors])
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
def clip_tensors_by_global_norm(input_tensors, max_norm=1.0, global_norm=None, mpu=None, eps=1e-6):
"""Clip list of tensors by global norm.
Args:
input_tensors: List of tensors to be clipped
global_norm (float, optional): Precomputed norm. Defaults to None.
mpu (optional): model parallelism unit. Defaults to None.
eps (float, optional): epsilon value added to grad norm. Defaults to 1e-6
Returns:
float: the global norm
"""
if global_norm is None:
global_norm = get_global_norm_of_tensors(input_tensors, mpu=mpu)
clip_coef = max_norm / (global_norm + eps)
if clip_coef < 1:
for t in input_tensors:
t.detach().mul_(clip_coef)
return global_norm
def align_dense_tensors(tensor_list, alignment):
num_elements = sum(t.numel() for t in tensor_list)
remaining = num_elements % alignment
if remaining:
elements_to_add = alignment - remaining
pad_tensor = torch.zeros(elements_to_add, device=tensor_list[0].device, dtype=tensor_list[0].dtype)
padded_tensor_list = tensor_list + [pad_tensor]
else:
padded_tensor_list = tensor_list
return padded_tensor_list
def all_gather_dp_groups(partitioned_param_groups, dp_process_group, start_alignment_factor, allgather_bucket_size):
for group_id, partitioned_params in enumerate(partitioned_param_groups):
# Sequential AllGather Best of both worlds
partition_id = dist.get_rank(group=dp_process_group[group_id])
dp_world_size = dist.get_world_size(group=dp_process_group[group_id])
num_shards = max(1, partitioned_params[partition_id].numel() * dp_world_size // allgather_bucket_size)
shard_size = partitioned_params[partition_id].numel() // num_shards
# Enforce nccl/rccl alignment of start location of each shard
shard_size = shard_size - (shard_size % start_alignment_factor)
num_elements = shard_size
assert shard_size * num_shards <= partitioned_params[partition_id].numel()
for shard_id in range(num_shards):
if shard_id == (num_shards - 1):
num_elements = partitioned_params[partition_id].numel() - shard_id * shard_size
shard_list = []
for dp_id in range(dp_world_size):
curr_shard = partitioned_params[dp_id].narrow(0, shard_id * shard_size, num_elements).detach()
shard_list.append(curr_shard)
dist.all_gather(shard_list, shard_list[partition_id], dp_process_group[group_id])
class TLinear(torch.nn.Linear):
def __init__(self, orig_layer, name=""):
self.name = name
super().__init__(orig_layer.weight.shape[1], orig_layer.weight.shape[0], bias=(orig_layer.bias is not None))
self.weight.data = transpose(orig_layer.weight.data)
self.bias = orig_layer.bias
self._fwd_func = self._fwd_bias_add if self.bias is not None else self._fwd
def _fwd(self, input):
return F.linear(input, self.weight)
def _fwd_bias_add(self, input):
return F.linear(input, self.weight, bias=self.bias)
def forward(self, input):
return self._fwd_func(input)
def get_inactive_params(param_list):
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
return [param for param in param_list if (hasattr(param, 'ds_id') and \
param.ds_status == ZeroParamStatus.NOT_AVAILABLE)]
| 35,805 | 35.686475 | 116 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/state_dict_factory.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import os
import copy
import collections
import json
from abc import ABC, abstractmethod
from deepspeed.utils import logger
from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine
from .weight_quantizer import WeightQuantization
AUTO_MODULE_KEY = 'auto'
class SDLoaderFactory:
@staticmethod
def get_sd_loader_json(json_file, checkpoint_engine):
if isinstance(json_file, str):
with open(json_file) as f:
data = json.load(f)
else:
assert isinstance(json_file, dict)
data = json_file
sd_type = data['type']
ckpt_list = data['checkpoints']
version = data['version']
ckpt_type = data.get('parallelization', 'pp')
mp_size = data.get('mp_size', 0)
if sd_type.lower() in ['bloom', 'ds_model']:
return data
return SDLoaderFactory.get_sd_loader(ckpt_list, checkpoint_engine, sd_type, version)
@staticmethod
def get_sd_loader(ckpt_list, checkpoint_engine, sd_type='Megatron', version=None):
if sd_type == 'Megatron':
return MegatronSDLoader(ckpt_list, version, checkpoint_engine)
else:
assert False, '{} checkpoint type is not supported'.format(sd_type)
class SDLoaderBase(ABC):
def __init__(self, ckpt_list, version, checkpoint_engine):
self.module_key = None
self.ckpt_list = ckpt_list
self.version = version
self.checkpoint_engine = TorchCheckpointEngine() if checkpoint_engine is None else checkpoint_engine
self.check_ckpt_list()
def load(self,
mp_world_size,
mp_rank,
module_key=AUTO_MODULE_KEY,
is_pipe_parallel=False,
quantize=False,
quantize_bits=8,
quantize_groups=64,
mlp_extra_grouping=True):
self.module_key = module_key
num_ckpt = len(self.ckpt_list)
idx = mp_rank * num_ckpt // mp_world_size
""" We have multiple cases to handle here for both training and inference:
1. PipeModule loading mp_rank_*.pt files, is_pipe_parallel=True, module_key is not None
a. if no mp_size/pp_size resizing occurs, for both training & inference, loading
the mp_rank related checkpoint directly.
b. if has mp_size/pp_size resizing, only Megatron model inference is supported,
in this case each mp_rank_*.pt have same content, we will load the first checkpoint
file (idx=0), to avoid idx exceeding file list boundary.
2. PipeModule loading layer_*.pt files, is_pipe_parallel=True, module_key is None
a. if no mp_size resizing occurs, for both training & inference, loading
the mp_rank related checkpoint directly.
b. if has mp_size resizing, only Megatron model inference is supported,
checkpoint file(s) will be merged/split according to mp_rank, mp_world_size and
checkpoint file list.
3. Non-PipeModule loading mp_rank_*.pt files, is_pipe_parallel=False
Same with case (2).
"""
if is_pipe_parallel and module_key is not None and mp_world_size != num_ckpt:
mp_world_size = num_ckpt
idx = 0
load_path = self.ckpt_list[idx]
merge_count = 1
if num_ckpt == mp_world_size:
assert os.path.exists(load_path)
#logger.info(f'rank: {mp_rank} loading checkpoint: {load_path}')
sd = self.checkpoint_engine.load(load_path, map_location=lambda storage, \
loc: storage)
if quantize:
quantizer = WeightQuantization(mlp_extra_grouping=mlp_extra_grouping, mp_size=mp_world_size)
sd_module, all_scales = quantizer.sd_quantize_megatron(self.get_module(sd), quantize_bits,
quantize_groups)
self.set_module(sd, sd_module)
else:
all_scales = None
elif num_ckpt > mp_world_size:
sd, all_scales, merge_count = self.merge_state_dict(mp_world_size, mp_rank, quantize, \
quantize_bits, quantize_groups, mlp_extra_grouping)
else:
sd, all_scales = self.split_state_dict(mp_world_size, mp_rank, quantize, quantize_bits, \
quantize_groups, mlp_extra_grouping)
return load_path, sd, (all_scales, merge_count)
def get_merge_state_dicts(self, mp_world_size, mp_rank):
num_ckpt = len(self.ckpt_list)
assert num_ckpt % mp_world_size == 0, 'Invalid checkpoints and world size for sd merge'
num_to_merge = num_ckpt // mp_world_size
ckpt_list = [self.ckpt_list[i] for i in range(num_to_merge * mp_rank, num_to_merge * (mp_rank + 1))]
logger.info(f"mp_rank: {mp_rank}, ckpt_list: {ckpt_list}")
sd_list = [self.checkpoint_engine.load(ckpt, map_location=lambda storage, loc: storage) for ckpt in ckpt_list]
return sd_list
def get_split_state_dict(self, mp_world_size, mp_rank):
num_ckpt = len(self.ckpt_list)
assert mp_world_size % num_ckpt == 0, 'Invalid checkpoints and world size for sd split'
num_to_split = mp_world_size // num_ckpt
ckpt_index = mp_rank // num_to_split
ckpt_offset = mp_rank % num_to_split
logger.info(f"mp_rank: {mp_rank}, ckpt_list: {self.ckpt_list[ckpt_index]}, offset: {ckpt_offset}")
sd = self.checkpoint_engine.load(self.ckpt_list[ckpt_index], map_location=lambda storage, loc: storage)
return sd, num_to_split, ckpt_offset
def _choose_module_key(self, sd):
assert not ('module' in sd
and 'model' in sd), "checkpoint has both 'model' and 'module' keys, not sure how to proceed"
assert 'module' in sd or 'model' in sd, "checkpoint contains neither 'model' or 'module' keys, not sure how to proceed"
if 'module' in sd:
return 'module'
elif 'model' in sd:
return 'model'
def get_module(self, sd):
if self.module_key is None:
return sd
elif self.module_key == AUTO_MODULE_KEY:
return sd[self._choose_module_key(sd)]
else:
return sd[self.module_key]
def set_module(self, sd, module):
if self.module_key is None:
sd = module
elif self.module_key == AUTO_MODULE_KEY:
sd[self._choose_module_key(sd)] = module
else:
sd[self.module_key] = module
return sd
def check_ckpt_list(self):
#logger.info(f'checkpoint file list: {self.ckpt_list}')
assert len(self.ckpt_list) > 0
sd = self.checkpoint_engine.load(self.ckpt_list[0], map_location=lambda storage, loc: storage)
# check checkpoint count is same with saved mp_world_size
if 'mp_world_size' in sd.keys():
assert len(self.ckpt_list) == sd[
'mp_world_size'], f"checkpoint count {len(self.ckpt_list)} is different from saved mp_world_size {sd['mp_world_size']}"
@abstractmethod
def merge_state_dict(self, mp_world_size, mp_rank, quantize, quantize_bits, groups, mlp_extra_grouping):
pass
@abstractmethod
def split_state_dict(self, mp_world_size, mp_rank, quantize, quantize_bits, groups, mlp_extra_grouping):
pass
@abstractmethod
def sanity_check(self, ckpt_file_name):
pass
class MegatronSDLoader(SDLoaderBase):
def __init__(self, ckpt_list, version, checkpoint_engine):
super().__init__(ckpt_list, version, checkpoint_engine)
"""
## Q/K/V data need special processing
key: transformer.layers.0.attention.query_key_value.weight, shape: torch.Size([3192, 4256])
key: transformer.layers.0.attention.query_key_value.bias, shape: torch.Size([3192])
## merge or split on axis=0
key: word_embeddings.weight, shape: torch.Size([12672, 4256])
key: transformer.layers.0.mlp.dense_h_to_4h.bias, shape: torch.Size([4256])
key: transformer.layers.0.mlp.dense_h_to_4h.weight, shape: torch.Size([4256, 4256])
## merge or split on axis=1
key: transformer.layers.0.attention.dense.weight, shape: torch.Size([4256, 1064])
key: transformer.layers.0.mlp.dense_4h_to_h.weight, shape: torch.Size([4256, 4256])
## no change required
key: transformer.layers.0.mlp.dense_4h_to_h.bias, shape: torch.Size([4256])
key: transformer.final_layernorm.weight, shape: torch.Size([4256])
key: transformer.final_layernorm.bias, shape: torch.Size([4256])
key: transformer.layers.0.attention.dense.bias, shape: torch.Size([4256])
key: transformer.layers.0.post_attention_layernorm.weight, shape: torch.Size([4256])
key: transformer.layers.0.post_attention_layernorm.bias, shape: torch.Size([4256])
key: transformer.layers.0.input_layernorm.weight, shape: torch.Size([4256])
key: transformer.layers.0.input_layernorm.bias, shape: torch.Size([4256])
key: position_embeddings.weight, shape: torch.Size([1024, 4256])
"""
def merge_query_key_value(self, param_list, ckpt_ver):
"""
Up to now we found 3 Q/K/V parameter formats in different Megatron checkpoint versions:
1. version 0, there is no version information saved in checkpoint.
format: [(3 * np * hn), h]
2. version 1.0
format: [(np * hn * 3), h]
3. version 2.0
format: [(np * 3 * hn), h]
h: hidden size
n: number of attention heads
p: number of model parallel partitions
np: n/p
hn: h/n
"""
new_qkv = None
if ckpt_ver == 0:
# [(3 * np * hn), h]
assert param_list[0].shape[0] % 3 == 0
size_qkv = param_list[0].shape[0] // 3
split_tensors = [torch.split(param, size_qkv, dim=0) for param in param_list]
tensors = []
for i in range(3):
tensor_tuple = [t[i] for t in split_tensors]
tensors.append(torch.cat(tensor_tuple, axis=0))
new_qkv = torch.cat(tensors, axis=0)
elif ckpt_ver == 1.0 or ckpt_ver == 2.0:
# [(np * hn * 3), h] or [(np * 3 * hn), h]
new_qkv = torch.cat(param_list, axis=0)
else:
assert False, f'checkpoint version: {ckpt_ver} is not supported'
return new_qkv
def split_query_key_value(self, param, num_to_split, offset, ckpt_ver):
"""
Up to now we found 3 Q/K/V parameter formats in different Megatron checkpoint versions:
1. version 0, there is no version information saved in checkpoint.
format: [(3 * np * hn), h]
2. version 1.0
format: [(np * hn * 3), h]
3. version 2.0
format: [(np * 3 * hn), h]
h: hidden size
n: number of attention heads
p: number of model parallel partitions
np: n/p
hn: h/n
"""
new_qkv = None
if ckpt_ver == 0:
# [(3 * np * hn), h]
assert param.shape[0] % 3 == 0
size_qkv = param.shape[0] // 3
split_tensors = torch.split(param, size_qkv, dim=0)
assert split_tensors[0].shape[0] % num_to_split == 0
split_size = split_tensors[0].shape[0] // num_to_split
tensors = []
for i in range(3):
tensors.append(torch.split(split_tensors[i], split_size, dim=0)[offset])
new_qkv = torch.cat(tensors, axis=0)
elif ckpt_ver == 1.0 or ckpt_ver == 2.0:
# [(np * hn * 3), h] or [(np * 3 * hn), h]
assert param.shape[0] % num_to_split == 0
size_qkv = param.shape[0] // num_to_split
split_tensors = torch.split(param, size_qkv, dim=0)
new_qkv = split_tensors[offset]
else:
assert False, f'checkpoint version: {ckpt_ver} is not supported'
return new_qkv
def merge_state_dict(self,
mp_world_size,
mp_rank,
quantize=False,
quantize_bits=8,
groups=64,
mlp_extra_grouping=True):
self.sanity_check(self.ckpt_list[0])
sd_list = self.get_merge_state_dicts(mp_world_size, mp_rank)
ds_sd = copy.deepcopy(sd_list[0])
new_client_sd = collections.OrderedDict()
client_sd_list = [self.get_module(sd) for sd in sd_list]
keys = client_sd_list[0].keys()
ckpt_ver = self.get_checkpoint_version(ds_sd)
logger.info(f"checkpoint version: {ckpt_ver}")
if quantize:
quantizer = WeightQuantization(mlp_extra_grouping=mlp_extra_grouping, mp_size=mp_world_size)
for key in keys:
value_list = [sd[key] for sd in client_sd_list]
if "attention.dense.weight" in key or "mlp.dense_4h_to_h.weight" in key:
if quantize:
value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key, merge_dim=1)
new_client_sd[key] = torch.cat(value_list, axis=1)
elif "attention.query_key_value" in key:
if quantize and "attention.query_key_value.weight" in key:
value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key)
new_client_sd[key] = torch.cat(value_list, axis=0)
else:
if quantize:
new_client_sd[key] = torch.cat(value_list, axis=0)
else:
new_client_sd[key] = self.merge_query_key_value(value_list, ckpt_ver)
elif "mlp.dense_h_to_4h.weight" in key or "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key:
if quantize and "mlp.dense_h_to_4h.weight" in key:
value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key)
new_client_sd[key] = torch.cat(value_list, axis=0)
else:
new_client_sd[key] = value_list[0]
if quantize:
all_scales = quantizer.merge_scales()
ds_sd = self.set_module(ds_sd, new_client_sd)
return ds_sd, (all_scales if quantize else None), len(client_sd_list)
def split_state_dict(self,
mp_world_size,
mp_rank,
quantize=False,
quantize_bits=8,
groups=64,
mlp_extra_grouping=True):
#self.sanity_check(self.ckpt_list[0])
sd, num_to_split, ckpt_offset = self.get_split_state_dict(mp_world_size, mp_rank)
ds_sd = copy.deepcopy(sd)
new_client_sd = collections.OrderedDict()
client_sd = self.get_module(sd)
ckpt_ver = self.get_checkpoint_version(ds_sd)
logger.info(f"checkpoint version: {ckpt_ver}")
if quantize:
quantizer = WeightQuantization(mlp_extra_grouping=mlp_extra_grouping, mp_size=mp_world_size)
for key in client_sd.keys():
value = client_sd[key]
if "attention.dense.weight" in key or "mlp.dense_4h_to_h.weight" in key:
assert value.shape[1] % num_to_split == 0
split_size = value.shape[1] // num_to_split
if quantize:
q_vals = quantizer.Quantize([value], quantize_bits, groups, key)
value = q_vals[0]
new_client_sd[key] = torch.split(value, split_size, dim=1)[ckpt_offset]
elif "attention.query_key_value" in key:
if quantize and "attention.query_key_value.weight" in key:
q_vals = quantizer.Quantize([value], quantize_bits, groups, key)
value = q_vals[0]
new_client_sd[key] = self.split_query_key_value(value, num_to_split, ckpt_offset, ckpt_ver)
elif "mlp.dense_h_to_4h.weight" in key or "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key or "final_linear.weight" in key:
assert value.shape[0] % num_to_split == 0
split_size = value.shape[0] // num_to_split
if quantize and "mlp.dense_h_to_4h.weight" in key:
q_vals = quantizer.Quantize([value], quantize_bits, groups, key)
value = q_vals[0]
new_client_sd[key] = torch.split(value, split_size, dim=0)[ckpt_offset]
else:
new_client_sd[key] = value
if quantize:
all_scales = quantizer.merge_scales_split(num_to_split)
ds_sd = self.set_module(ds_sd, new_client_sd)
return ds_sd, (all_scales if quantize else None)
def sanity_check(self, ckpt_file_name):
keys_to_check = [
"attention.dense.weight", "mlp.dense_4h_to_h.weight", "attention.query_key_value",
"mlp.dense_h_to_4h.weight", "mlp.dense_h_to_4h.bias"
]
sd = self.checkpoint_engine.load(ckpt_file_name, map_location=lambda storage, loc: storage)
# partial_key is a sub-string of one key in the sd
def check_key_exist(partial_key, sd):
keys = sd.keys()
found = False
for k in keys:
if partial_key in k:
found = True
break
return found
for key in keys_to_check:
assert check_key_exist(key,
self.get_module(sd)), f'key: {key} is not found in the checkpoint {ckpt_file_name}'
def get_checkpoint_version(self, state_dict):
# Use 0 if version info doesn't exist
return self.version if self.version is not None else state_dict.get('checkpoint_version', 0)
| 18,177 | 41.471963 | 153 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/config.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from typing import Union
from enum import Enum
import torch
import json
import hjson
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
CONSECUTIVE_HYSTERESIS,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import get_monitor_config
from deepspeed import comm as dist
from deepspeed.runtime.config_utils import DeepSpeedConfigModel
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARALLEL_SIZE,
MODEL_PARALLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
from .data_pipeline.config import get_data_efficiency_enabled, get_data_efficiency_config, get_curriculum_enabled_legacy, get_curriculum_params_legacy
from .data_pipeline.constants import *
TENSOR_CORE_ALIGN_SIZE = 8
ADAGRAD_OPTIMIZER = 'adagrad'
ADAM_OPTIMIZER = 'adam'
ADAMW_OPTIMIZER = 'adamw'
LAMB_OPTIMIZER = 'lamb'
ONEBIT_ADAM_OPTIMIZER = 'onebitadam'
ZERO_ONE_ADAM_OPTIMIZER = 'zerooneadam'
ONEBIT_LAMB_OPTIMIZER = 'onebitlamb'
DEEPSPEED_OPTIMIZERS = [
ADAGRAD_OPTIMIZER, ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, ONEBIT_LAMB_OPTIMIZER,
ZERO_ONE_ADAM_OPTIMIZER
]
# extra optimizer parameters for adam/adamw
TORCH_ADAM_PARAM = "torch_adam"
# default to adamw logic for adam/adamw optimizers unless user explicitly opts out
ADAM_W_MODE = "adam_w_mode"
ADAM_W_MODE_DEFAULT = True
class DeepSpeedConfigError(Exception):
pass
class DtypeEnum(Enum):
# The torch dtype must always be the first value (so we return torch.dtype)
fp16 = torch.float16, "torch.float16", "fp16", "float16", "half"
fp32 = torch.float32, "torch.float32", "fp32", "float32", "float"
int8 = torch.int8, "torch.int8", "int8"
bf16 = torch.bfloat16, "torch.bfloat16", "bf16", "bfloat16"
# Copied from https://stackoverflow.com/a/43210118
# Allows us to use multiple values for each Enum index and returns first
# listed value when Enum is called
def __new__(cls, *values):
obj = object.__new__(cls)
# first value is canonical value
obj._value_ = values[0]
for other_value in values[1:]:
cls._value2member_map_[other_value] = obj
obj._all_values = values
return obj
def __repr__(self):
return "<%s.%s: %s>" % (
self.__class__.__name__,
self._name_,
", ".join([repr(v) for v in self._all_values]),
)
def get_pld_enabled(param_dict):
if PROGRESSIVE_LAYER_DROP in param_dict.keys():
return get_scalar_param(param_dict[PROGRESSIVE_LAYER_DROP], PLD_ENABLED, PLD_ENABLED_DEFAULT)
else:
return False
def get_pld_params(param_dict):
if PROGRESSIVE_LAYER_DROP in param_dict.keys():
pld_params = copy.copy(param_dict[PROGRESSIVE_LAYER_DROP])
pld_params.pop(PLD_ENABLED)
return pld_params
else:
return False
def get_amp_enabled(param_dict):
if AMP in param_dict.keys():
return get_scalar_param(param_dict[AMP], AMP_ENABLED, AMP_ENABLED_DEFAULT)
else:
return False
def get_amp_params(param_dict):
if AMP in param_dict.keys():
amp_params = copy.copy(param_dict[AMP])
amp_params.pop(AMP_ENABLED)
return amp_params
else:
return False
def get_fp16_enabled(param_dict):
if FP16 in param_dict.keys():
return get_scalar_param(param_dict[FP16], FP16_ENABLED, FP16_ENABLED_DEFAULT)
else:
return False
def get_bfloat16_enabled(param_dict):
for key in [BFLOAT16, BFLOAT16_OLD]:
if key in param_dict.keys():
return get_scalar_param(param_dict[key], BFLOAT16_ENABLED, BFLOAT16_ENABLED_DEFAULT)
return False
def get_fp16_master_weights_and_grads_enabled(param_dict):
if get_fp16_enabled(param_dict):
return get_scalar_param(param_dict[FP16], FP16_MASTER_WEIGHTS_AND_GRADS, FP16_MASTER_WEIGHTS_AND_GRADS_DEFAULT)
else:
return False
def get_fp16_auto_cast(param_dict):
if get_fp16_enabled(param_dict):
return get_scalar_param(param_dict[FP16], FP16_AUTO_CAST, FP16_AUTO_CAST_DEFAULT)
def get_loss_scale(param_dict):
if get_fp16_enabled(param_dict):
return get_scalar_param(param_dict[FP16], FP16_LOSS_SCALE, FP16_LOSS_SCALE_DEFAULT)
elif get_bfloat16_enabled(param_dict):
return 1.0
else:
return FP16_LOSS_SCALE_DEFAULT
def get_initial_dynamic_scale(param_dict):
if get_fp16_enabled(param_dict):
initial_scale_power = get_scalar_param(param_dict[FP16], FP16_INITIAL_SCALE_POWER,
FP16_INITIAL_SCALE_POWER_DEFAULT)
elif get_bfloat16_enabled(param_dict):
initial_scale_power = 0
else:
initial_scale_power = FP16_INITIAL_SCALE_POWER_DEFAULT
return 2**initial_scale_power
def get_dynamic_loss_scale_args(param_dict):
loss_scale_args = None
if get_fp16_enabled(param_dict):
fp16_dict = param_dict[FP16]
dynamic_loss_args = [
FP16_INITIAL_SCALE_POWER,
FP16_LOSS_SCALE_WINDOW,
FP16_MIN_LOSS_SCALE,
FP16_HYSTERESIS,
FP16_CONSECUTIVE_HYSTERESIS,
]
if any(arg in list(fp16_dict.keys()) for arg in dynamic_loss_args):
init_scale = get_scalar_param(fp16_dict, FP16_INITIAL_SCALE_POWER, FP16_INITIAL_SCALE_POWER_DEFAULT)
scale_window = get_scalar_param(fp16_dict, FP16_LOSS_SCALE_WINDOW, FP16_LOSS_SCALE_WINDOW_DEFAULT)
delayed_shift = get_scalar_param(fp16_dict, FP16_HYSTERESIS, FP16_HYSTERESIS_DEFAULT)
consecutive_hysteresis = get_scalar_param(fp16_dict, FP16_CONSECUTIVE_HYSTERESIS,
FP16_CONSECUTIVE_HYSTERESIS_DEFAULT)
min_loss_scale = get_scalar_param(fp16_dict, FP16_MIN_LOSS_SCALE, FP16_MIN_LOSS_SCALE_DEFAULT)
loss_scale_args = {
INITIAL_LOSS_SCALE: 2**init_scale,
SCALE_WINDOW: scale_window,
DELAYED_SHIFT: delayed_shift,
CONSECUTIVE_HYSTERESIS: consecutive_hysteresis,
MIN_LOSS_SCALE: min_loss_scale,
}
return loss_scale_args
def get_gradient_accumulation_steps(param_dict):
return get_scalar_param(param_dict, GRADIENT_ACCUMULATION_STEPS, GRADIENT_ACCUMULATION_STEPS_DEFAULT)
def get_sparse_gradients_enabled(param_dict):
return get_scalar_param(param_dict, SPARSE_GRADIENTS, SPARSE_GRADIENTS_DEFAULT)
def get_communication_data_type(param_dict):
val = get_scalar_param(param_dict, COMMUNICATION_DATA_TYPE, COMMUNICATION_DATA_TYPE_DEFAULT)
val = val.lower() if val is not None else val
if val is None:
return val # we must determine it by other parameters
elif val == "fp32":
return torch.float32
elif val == "fp16":
return torch.float16
elif val == "bfp16":
return torch.bfloat16
raise ValueError(f"Invalid communication_data_type. Supported data types: ['fp16', 'bfp16', 'fp32']. Got: {val}")
def get_prescale_gradients(param_dict):
return get_scalar_param(param_dict, PRESCALE_GRADIENTS, PRESCALE_GRADIENTS_DEFAULT)
def get_gradient_predivide_factor(param_dict):
return get_scalar_param(param_dict, GRADIENT_PREDIVIDE_FACTOR, GRADIENT_PREDIVIDE_FACTOR_DEFAULT)
def get_steps_per_print(param_dict):
return get_scalar_param(param_dict, STEPS_PER_PRINT, STEPS_PER_PRINT_DEFAULT)
def get_disable_allgather(param_dict):
return get_scalar_param(param_dict, DISABLE_ALLGATHER, DISABLE_ALLGATHER_DEFAULT)
def get_dump_state(param_dict):
return get_scalar_param(param_dict, DUMP_STATE, DUMP_STATE_DEFAULT)
def get_gradient_clipping(param_dict):
return get_scalar_param(param_dict, GRADIENT_CLIPPING, GRADIENT_CLIPPING_DEFAULT)
def get_sparse_attention(param_dict):
if SPARSE_ATTENTION in param_dict.keys():
sparsity = param_dict[SPARSE_ATTENTION]
mode = get_sparse_attention_mode(sparsity)
if mode == SPARSE_DENSE_MODE:
return get_sparse_dense_config(sparsity)
elif mode == SPARSE_FIXED_MODE:
return get_sparse_fixed_config(sparsity)
elif mode == SPARSE_VARIABLE_MODE:
return get_sparse_variable_config(sparsity)
elif mode == SPARSE_BIGBIRD_MODE:
return get_sparse_bigbird_config(sparsity)
elif mode == SPARSE_BSLONGFORMER_MODE:
return get_sparse_bslongformer_config(sparsity)
else:
raise NotImplementedError(f"Given sparsity mode, {mode}, has not been implemented yet!")
else:
return None
def get_sparse_dense_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
return {SPARSE_MODE: SPARSE_DENSE_MODE, SPARSE_BLOCK: block}
def get_sparse_fixed_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
)
num_local_blocks = get_scalar_param(sparsity, SPARSE_NUM_LOCAL_BLOCKS, SPARSE_NUM_LOCAL_BLOCKS_DEFAULT)
num_global_blocks = get_scalar_param(sparsity, SPARSE_NUM_GLOBAL_BLOCKS, SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT)
attention = get_scalar_param(sparsity, SPARSE_ATTENTION_TYPE, SPARSE_ATTENTION_TYPE_DEFAULT)
horizontal_global_attention = get_scalar_param(
sparsity,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT,
)
num_different_global_patterns = get_scalar_param(
sparsity,
SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS,
SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS_DEFAULT,
)
return {
SPARSE_MODE: SPARSE_FIXED_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_LOCAL_BLOCKS: num_local_blocks,
SPARSE_NUM_GLOBAL_BLOCKS: num_global_blocks,
SPARSE_ATTENTION_TYPE: attention,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION: horizontal_global_attention,
SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS: num_different_global_patterns,
}
def get_sparse_variable_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
)
num_random_blocks = get_scalar_param(sparsity, SPARSE_NUM_RANDOM_BLOCKS, SPARSE_NUM_RANDOM_BLOCKS_DEFAULT)
local_window_blocks = get_scalar_param(sparsity, SPARSE_LOCAL_WINDOW_BLOCKS, SPARSE_LOCAL_WINDOW_BLOCKS_DEFAULT)
global_block_indices = get_scalar_param(sparsity, SPARSE_GLOBAL_BLOCK_INDICES, SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT)
global_block_end_indices = get_scalar_param(
sparsity,
SPARSE_GLOBAL_BLOCK_END_INDICES,
SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT,
)
attention = get_scalar_param(sparsity, SPARSE_ATTENTION_TYPE, SPARSE_ATTENTION_TYPE_DEFAULT)
horizontal_global_attention = get_scalar_param(
sparsity,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT,
)
return {
SPARSE_MODE: SPARSE_VARIABLE_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_RANDOM_BLOCKS: num_random_blocks,
SPARSE_LOCAL_WINDOW_BLOCKS: local_window_blocks,
SPARSE_GLOBAL_BLOCK_INDICES: global_block_indices,
SPARSE_GLOBAL_BLOCK_END_INDICES: global_block_end_indices,
SPARSE_ATTENTION_TYPE: attention,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION: horizontal_global_attention,
}
def get_sparse_bigbird_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
)
num_random_blocks = get_scalar_param(sparsity, SPARSE_NUM_RANDOM_BLOCKS, SPARSE_NUM_RANDOM_BLOCKS_DEFAULT)
num_sliding_window_blocks = get_scalar_param(
sparsity,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT,
)
num_global_blocks = get_scalar_param(sparsity, SPARSE_NUM_GLOBAL_BLOCKS, SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT)
return {
SPARSE_MODE: SPARSE_BIGBIRD_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_RANDOM_BLOCKS: num_random_blocks,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS: num_sliding_window_blocks,
SPARSE_NUM_GLOBAL_BLOCKS: num_global_blocks,
}
def get_sparse_bslongformer_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
)
num_sliding_window_blocks = get_scalar_param(
sparsity,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT,
)
global_block_indices = get_scalar_param(sparsity, SPARSE_GLOBAL_BLOCK_INDICES, SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT)
global_block_end_indices = get_scalar_param(
sparsity,
SPARSE_GLOBAL_BLOCK_END_INDICES,
SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT,
)
return {
SPARSE_MODE: SPARSE_BSLONGFORMER_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS: num_sliding_window_blocks,
SPARSE_GLOBAL_BLOCK_INDICES: global_block_indices,
SPARSE_GLOBAL_BLOCK_END_INDICES: global_block_end_indices,
}
def get_sparse_attention_mode(param_dict):
if SPARSE_MODE in param_dict.keys():
return param_dict[SPARSE_MODE]
else:
return SPARSE_MODE_DEFAULT
def get_sparse_attention_type(param_dict):
if SPARSE_ATTENTION_TYPE in param_dict.keys():
return param_dict[SPARSE_ATTENTION_TYPE]
else:
return SPARSE_ATTENTION_TYPE_DEFAULT
def get_pipeline_config(param_dict):
"""Parses pipeline engine configuration. """
default_pipeline = {
"stages": "auto",
"partition": "best",
"seed_layers": False,
"activation_checkpoint_interval": 0,
}
config = default_pipeline
for key, val in param_dict.get("pipeline", {}).items():
config[key] = val
return config
def get_optimizer_name(param_dict):
if OPTIMIZER in param_dict.keys() and TYPE in param_dict[OPTIMIZER].keys():
return param_dict[OPTIMIZER][TYPE]
else:
return OPTIMIZER_TYPE_DEFAULT
def get_optimizer_params(param_dict):
if (get_optimizer_name(param_dict) is not None and OPTIMIZER_PARAMS in param_dict[OPTIMIZER].keys()):
return param_dict[OPTIMIZER][OPTIMIZER_PARAMS]
else:
return None
def get_optimizer_gradient_clipping(param_dict):
optimizer_params = get_optimizer_params(param_dict)
if optimizer_params is not None and MAX_GRAD_NORM in optimizer_params.keys():
return optimizer_params[MAX_GRAD_NORM]
else:
return None
def get_optimizer_legacy_fusion(param_dict):
if OPTIMIZER in param_dict.keys() and LEGACY_FUSION in param_dict[OPTIMIZER].keys():
return param_dict[OPTIMIZER][LEGACY_FUSION]
else:
return LEGACY_FUSION_DEFAULT
def get_zero_allow_untested_optimizer(param_dict):
return get_scalar_param(param_dict, ZERO_ALLOW_UNTESTED_OPTIMIZER, ZERO_ALLOW_UNTESTED_OPTIMIZER_DEFAULT)
def get_zero_force_ds_cpu_optimizer(param_dict):
return get_scalar_param(param_dict, ZERO_FORCE_DS_CPU_OPTIMIZER, ZERO_FORCE_DS_CPU_OPTIMIZER_DEFAULT)
def get_scheduler_name(param_dict):
if SCHEDULER in param_dict.keys() and TYPE in param_dict[SCHEDULER].keys():
return param_dict[SCHEDULER][TYPE]
else:
return SCHEDULER_TYPE_DEFAULT
def get_scheduler_params(param_dict):
if (get_scheduler_name(param_dict) is not None and SCHEDULER_PARAMS in param_dict[SCHEDULER].keys()):
return param_dict[SCHEDULER][SCHEDULER_PARAMS]
else:
return None
def get_train_batch_size(param_dict):
return get_scalar_param(param_dict, TRAIN_BATCH_SIZE, TRAIN_BATCH_SIZE_DEFAULT)
def get_train_micro_batch_size_per_gpu(param_dict):
return get_scalar_param(
param_dict,
TRAIN_MICRO_BATCH_SIZE_PER_GPU,
TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT,
)
def get_wall_clock_breakdown(param_dict):
return get_scalar_param(param_dict, WALL_CLOCK_BREAKDOWN, WALL_CLOCK_BREAKDOWN_DEFAULT)
def get_memory_breakdown(param_dict):
return get_scalar_param(param_dict, MEMORY_BREAKDOWN, MEMORY_BREAKDOWN_DEFAULT)
class HybridEngineConfig(DeepSpeedConfigModel):
enabled: bool = False
max_out_tokens: int = 512
inference_tp_size: int = 1
release_inference_cache: bool = False
pin_parameters: bool = True
tp_gather_partition_size: int = 8
def get_hybrid_engine_config(param_dict):
hybrid_engine_config_dict = param_dict.get("hybrid_engine", {})
hybrid_engine_config = HybridEngineConfig(**hybrid_engine_config_dict)
return hybrid_engine_config
def get_eigenvalue_config(param_dict):
if get_quantize_enabled(param_dict):
param_dict = param_dict[QUANTIZE_TRAINING]
assert not get_eigenvalue_enabled(param_dict), "Eigenvalue based MoQ is temporarily disabled"
return (
get_eigenvalue_enabled(param_dict),
get_eigenvalue_verbose(param_dict),
get_eigenvalue_max_iter(param_dict),
get_eigenvalue_tol(param_dict),
get_eigenvalue_stability(param_dict),
get_eigenvalue_gas_boundary_resolution(param_dict),
get_eigenvalue_layer_name(param_dict),
get_eigenvalue_layer_num(param_dict),
)
else:
return (
EIGENVALUE_ENABLED_DEFAULT,
EIGENVALUE_VERBOSE_DEFAULT,
EIGENVALUE_MAX_ITER_DEFAULT,
EIGENVALUE_TOL_DEFAULT,
EIGENVALUE_STABILITY_DEFAULT,
EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT,
EIGENVALUE_LAYER_NAME_DEFAULT,
EIGENVALUE_LAYER_NUM_DEFAULT,
)
def get_eigenvalue_enabled(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_ENABLED, EIGENVALUE_ENABLED_DEFAULT)
else:
return EIGENVALUE_ENABLED_DEFAULT
def get_eigenvalue_verbose(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_VERBOSE, EIGENVALUE_VERBOSE_DEFAULT)
else:
return EIGENVALUE_VERBOSE_DEFAULT
def get_eigenvalue_max_iter(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_MAX_ITER, EIGENVALUE_MAX_ITER_DEFAULT)
else:
return EIGENVALUE_MAX_ITER_DEFAULT
def get_eigenvalue_tol(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_TOL, EIGENVALUE_TOL_DEFAULT)
else:
return EIGENVALUE_TOL_DEFAULT
def get_eigenvalue_stability(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_STABILITY, EIGENVALUE_STABILITY_DEFAULT)
else:
return EIGENVALUE_STABILITY_DEFAULT
def get_eigenvalue_gas_boundary_resolution(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(
param_dict[EIGENVALUE],
EIGENVALUE_GAS_BOUNDARY_RESOLUTION,
EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT,
)
else:
return EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT
def get_eigenvalue_layer_name(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_LAYER_NAME, EIGENVALUE_LAYER_NAME_DEFAULT)
else:
return EIGENVALUE_LAYER_NAME_DEFAULT
def get_eigenvalue_layer_num(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_LAYER_NUM, EIGENVALUE_LAYER_NUM_DEFAULT)
else:
return EIGENVALUE_LAYER_NUM_DEFAULT
def get_checkpoint_params(param_dict):
return param_dict.get(CHECKPOINT, {})
def get_data_types_params(param_dict):
return param_dict.get(DATA_TYPES, {})
def get_checkpoint_tag_validation_mode(checkpoint_params):
tag_validation_mode = checkpoint_params.get(CHECKPOINT_TAG_VALIDATION, CHECKPOINT_TAG_VALIDATION_DEFAULT)
tag_validation_mode = tag_validation_mode.upper()
if tag_validation_mode in CHECKPOINT_TAG_VALIDATION_MODES:
return tag_validation_mode
else:
raise DeepSpeedConfigError(
"Checkpoint config contains invalid tag_validation "
f"value of {tag_validation_mode}, expecting one of {CHECKPOINT_TAG_VALIDATION_MODES}")
def get_checkpoint_parallel_write_pipeline(checkpoint_params):
par_write_params = checkpoint_params.get(CHECKPOINT_PARALLEL_WRITE, {})
par_write_pipeline = par_write_params.get(CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE,
CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE_DEFAULT)
if par_write_pipeline in [True, False]:
return par_write_pipeline
else:
raise DeepSpeedConfigError("checkpoint::parallel_write::pipeline_stage "
f"value of '{par_write_pipeline}' is invalid, expecting: true or false")
def get_dataloader_drop_last(param_dict):
return get_scalar_param(param_dict, DATALOADER_DROP_LAST, DATALOADER_DROP_LAST_DEFAULT)
'''Write deepspeed config files by modifying basic templates.
Can be used for quickly changing parameters via command line parameters.'''
class DeepSpeedConfigWriter:
def __init__(self, data=None):
self.data = data if data is not None else {}
def add_config(self, key, value):
self.data[key] = value
def load_config(self, filename):
self.data = json.load(open(filename, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
def write_config(self, filename):
with open(filename, "w") as outfile:
json.dump(self.data, outfile)
class DeepSpeedConfig(object):
def __init__(self, config: Union[str, dict], mpu=None):
super(DeepSpeedConfig, self).__init__()
if isinstance(config, dict):
self._param_dict = config
elif os.path.exists(config):
self._param_dict = hjson.load(open(config, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
else:
try:
config_decoded = base64.urlsafe_b64decode(config).decode('utf-8')
self._param_dict = hjson.loads(config_decoded)
except (UnicodeDecodeError, AttributeError):
raise ValueError(
f"Expected a string path to an existing deepspeed config, or a dictionary or a valid base64. Received: {config}"
)
try:
self.global_rank = dist.get_rank()
if mpu is None:
self.world_size = dist.get_world_size()
else:
self.world_size = mpu.get_data_parallel_world_size()
except:
self.global_rank = 0
self.world_size = 1
# If elastic-mode enabled, update compute + update _param_dict
self.elasticity_enabled = elasticity_enabled(self._param_dict)
if self.elasticity_enabled:
logger.info("DeepSpeed elasticity support enabled")
final_batch_size, valid_gpus, micro_batch_size = compute_elastic_config(
ds_config=self._param_dict,
target_deepspeed_version=__version__,
world_size=self.world_size,
)
elastic_dict = self._param_dict[ELASTICITY]
# Ensure the resource scheduler saw the same elastic config we are using at runtime
ensure_immutable_elastic_config(runtime_elastic_config_dict=elastic_dict)
self.elastic_model_parallel_size = elastic_dict.get(MODEL_PARALLEL_SIZE, MODEL_PARALLEL_SIZE_DEFAULT)
if self.elastic_model_parallel_size < 1:
raise ElasticityConfigError("Model-Parallel size cannot be less than 1, "
f"given model-parallel size: {self.elastic_model_parallel_size}")
self.num_gpus_per_node = elastic_dict.get(NUM_GPUS_PER_NODE, NUM_GPUS_PER_NODE_DEFAULT)
if self.num_gpus_per_node < 1:
raise ElasticityConfigError("NUmber of GPUs per node cannot be less than 1, "
f"given number of GPUs per node: {self.num_gpus_per_node}")
ignore_non_elastic_batch_info = elastic_dict.get(IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT)
if not ignore_non_elastic_batch_info:
batch_params = [
TRAIN_BATCH_SIZE,
TRAIN_MICRO_BATCH_SIZE_PER_GPU,
GRADIENT_ACCUMULATION_STEPS,
]
if any(map(lambda t: t in self._param_dict, batch_params)):
raise ElasticityConfigError("One or more batch related parameters were found in your " \
f"ds_config ({TRAIN_BATCH_SIZE}, {TRAIN_MICRO_BATCH_SIZE_PER_GPU}, and/or " \
f"{GRADIENT_ACCUMULATION_STEPS}). These parameters *will not be used* since " \
"elastic training is enabled, which takes control of these parameters. " \
"If you want to suppress this error (the parameters will be silently ignored) " \
f"please set {IGNORE_NON_ELASTIC_BATCH_INFO}':true in your elasticity config.")
# micro_bsz * world_size * gas = total_batch_size
# gas = total_batch_size // (micro_bsz * world_size)
gradient_accu_steps = final_batch_size // (micro_batch_size * self.world_size)
if TRAIN_BATCH_SIZE in self._param_dict:
logger.warning("[Elasticity] overriding training_batch_size: "
f"{self._param_dict[TRAIN_BATCH_SIZE]} -> {final_batch_size}")
if TRAIN_MICRO_BATCH_SIZE_PER_GPU in self._param_dict:
logger.warning("[Elasticity] overriding train_micro_batch_size_per_gpu: "
f"{self._param_dict[TRAIN_MICRO_BATCH_SIZE_PER_GPU]} -> {micro_batch_size}")
if GRADIENT_ACCUMULATION_STEPS in self._param_dict:
logger.warning("[Elasticity] overriding gradient_accumulation_steps: "
f"{self._param_dict[GRADIENT_ACCUMULATION_STEPS]} -> {gradient_accu_steps}")
logger.info(f"[Elasticity] valid GPU counts: {valid_gpus}")
self._param_dict[TRAIN_BATCH_SIZE] = final_batch_size
self._param_dict[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = micro_batch_size
self._param_dict[GRADIENT_ACCUMULATION_STEPS] = gradient_accu_steps
# Pass a copy so that user json is unmodified, e.g. for logging
self._initialize_params(copy.copy(self._param_dict))
self._configure_train_batch_size()
self._do_sanity_check()
def _initialize_params(self, param_dict):
self.train_batch_size = get_train_batch_size(param_dict)
#print(f"beginning get_train_batch_size = {get_train_batch_size}")
self.train_micro_batch_size_per_gpu = get_train_micro_batch_size_per_gpu(param_dict)
self.gradient_accumulation_steps = get_gradient_accumulation_steps(param_dict)
self.steps_per_print = get_steps_per_print(param_dict)
self.dump_state = get_dump_state(param_dict)
self.disable_allgather = get_disable_allgather(param_dict)
self.communication_data_type = get_communication_data_type(param_dict)
self.prescale_gradients = get_prescale_gradients(param_dict)
self.gradient_predivide_factor = get_gradient_predivide_factor(param_dict)
self.sparse_gradients_enabled = get_sparse_gradients_enabled(param_dict)
self.zero_config = get_zero_config(param_dict)
self.mics_shard_size = self.zero_config.mics_shard_size
self.mics_hierarchial_params_gather = self.zero_config.mics_hierarchical_params_gather
self.zero_optimization_stage = self.zero_config.stage
self.zero_enabled = self.zero_optimization_stage > 0
self.activation_checkpointing_config = DeepSpeedActivationCheckpointingConfig(param_dict)
self.comms_config = DeepSpeedCommsConfig(param_dict)
self.monitor_config = get_monitor_config(param_dict)
self.gradient_clipping = get_gradient_clipping(param_dict)
self.fp16_enabled = get_fp16_enabled(param_dict)
self.fp16_auto_cast = get_fp16_auto_cast(param_dict)
self.bfloat16_enabled = get_bfloat16_enabled(param_dict)
assert not (self.fp16_enabled
and self.bfloat16_enabled), 'bfloat16 and fp16 modes cannot be simultaneously enabled'
self.fp16_master_weights_and_gradients = get_fp16_master_weights_and_grads_enabled(param_dict)
self.amp_enabled = get_amp_enabled(param_dict)
self.amp_params = get_amp_params(param_dict)
self.loss_scale = get_loss_scale(param_dict)
self.initial_dynamic_scale = get_initial_dynamic_scale(param_dict)
self.dynamic_loss_scale_args = get_dynamic_loss_scale_args(param_dict)
self.compression_config = get_compression_config(param_dict)
self.optimizer_name = get_optimizer_name(param_dict)
if (self.optimizer_name is not None and self.optimizer_name.lower() in DEEPSPEED_OPTIMIZERS):
self.optimizer_name = self.optimizer_name.lower()
self.optimizer_params = get_optimizer_params(param_dict)
self.optimizer_legacy_fusion = get_optimizer_legacy_fusion(param_dict)
self.zero_allow_untested_optimizer = get_zero_allow_untested_optimizer(param_dict)
self.zero_force_ds_cpu_optimizer = get_zero_force_ds_cpu_optimizer(param_dict)
self.scheduler_name = get_scheduler_name(param_dict)
self.scheduler_params = get_scheduler_params(param_dict)
self.flops_profiler_config = DeepSpeedFlopsProfilerConfig(param_dict)
self.wall_clock_breakdown = (get_wall_clock_breakdown(param_dict) | self.flops_profiler_config.enabled)
self.memory_breakdown = get_memory_breakdown(param_dict)
self.autotuning_config = DeepSpeedAutotuningConfig(param_dict)
(
self.eigenvalue_enabled,
self.eigenvalue_verbose,
self.eigenvalue_max_iter,
self.eigenvalue_tol,
self.eigenvalue_stability,
self.eigenvalue_gas_boundary_resolution,
self.eigenvalue_layer_name,
self.eigenvalue_layer_num,
) = get_eigenvalue_config(param_dict)
self.hybrid_engine = get_hybrid_engine_config(param_dict)
self.sparse_attention = get_sparse_attention(param_dict)
self.pipeline = get_pipeline_config(param_dict)
self.pld_enabled = get_pld_enabled(param_dict)
self.pld_params = get_pld_params(param_dict)
self.curriculum_enabled_legacy = get_curriculum_enabled_legacy(param_dict)
self.curriculum_params_legacy = get_curriculum_params_legacy(param_dict)
self.data_efficiency_enabled = get_data_efficiency_enabled(param_dict)
self.data_efficiency_config = get_data_efficiency_config(param_dict)
checkpoint_params = get_checkpoint_params(param_dict)
validation_mode = get_checkpoint_tag_validation_mode(checkpoint_params)
self.checkpoint_tag_validation_enabled = (validation_mode != ValidationMode.IGNORE)
self.checkpoint_tag_validation_fail = validation_mode == ValidationMode.FAIL
self.load_universal_checkpoint = checkpoint_params.get(LOAD_UNIVERSAL_CHECKPOINT,
LOAD_UNIVERSAL_CHECKPOINT_DEFAULT)
self.use_node_local_storage = checkpoint_params.get(USE_NODE_LOCAL_STORAGE_CHECKPOINT,
USE_NODE_LOCAL_STORAGE_CHECKPOINT_DEFAULT)
data_types_params = get_data_types_params(param_dict)
self.grad_accum_dtype = data_types_params.get(GRAD_ACCUM_DTYPE, GRAD_ACCUM_DTYPE_DEFAULT)
par_write_pipe = get_checkpoint_parallel_write_pipeline(checkpoint_params)
self.checkpoint_parallel_write_pipeline = par_write_pipe
self.aio_config = get_aio_config(param_dict)
self.dataloader_drop_last = get_dataloader_drop_last(param_dict)
self.nebula_config = DeepSpeedNebulaConfig(param_dict)
def _batch_assertion(self):
train_batch = self.train_batch_size
micro_batch = self.train_micro_batch_size_per_gpu
grad_acc = self.gradient_accumulation_steps
assert (train_batch > 0), f"Train batch size: {train_batch} has to be greater than 0"
assert (micro_batch > 0), f"Micro batch size per gpu: {micro_batch} has to be greater than 0"
assert (grad_acc > 0), f"Gradient accumulation steps: {grad_acc} has to be greater than 0"
assert train_batch == micro_batch * grad_acc * self.world_size, (
f"Check batch related parameters. train_batch_size is not equal "
"to micro_batch_per_gpu * gradient_acc_step * world_size "
f"{train_batch} != {micro_batch} * {grad_acc} * {self.world_size}")
def _set_batch_related_parameters(self):
train_batch = self.train_batch_size
micro_batch = self.train_micro_batch_size_per_gpu
grad_acc = self.gradient_accumulation_steps
#print(f"train_batch = {train_batch}, micro_batch={micro_batch}")
# all values are provided nothing needs to be set
if train_batch is not None and micro_batch is not None and grad_acc is not None:
return
# global_accumulation_steps needs to be set
elif train_batch is not None and micro_batch is not None:
grad_acc = train_batch // micro_batch
grad_acc //= self.world_size
self.gradient_accumulation_steps = grad_acc
# micro_batch_per_gpu needs to be set
elif train_batch is not None and grad_acc is not None:
micro_batch = train_batch // self.world_size
micro_batch //= grad_acc
self.train_micro_batch_size_per_gpu = micro_batch
# train_batch_size needs to be set
elif micro_batch is not None and grad_acc is not None:
train_batch_size = micro_batch * grad_acc
train_batch_size *= self.world_size
self.train_batch_size = train_batch_size
# gradient_accumulation_steps and micro_batch_per_gpus is set
elif train_batch is not None:
self.gradient_accumulation_steps = 1
self.train_micro_batch_size_per_gpu = train_batch // self.world_size
# train_batch_size and gradient_accumulation_step is set
elif micro_batch is not None:
self.train_batch_size = micro_batch * self.world_size
self.gradient_accumulation_steps = 1
# either none of the three parameters are provided or just gradient_accumulation_step is provided
else:
assert False, \
'Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided'
def _configure_train_batch_size(self):
self._set_batch_related_parameters()
self._batch_assertion()
def _do_sanity_check(self):
self._do_error_check()
self._do_warning_check()
def print_user_config(self):
logger.info(" json = {}".format(
json.dumps(
self._param_dict,
sort_keys=True,
indent=4,
cls=ScientificNotationEncoder,
separators=(",", ":"),
)))
def print(self, name):
logger.info("{}:".format(name))
for arg in sorted(vars(self)):
if arg != "_param_dict":
dots = "." * (29 - len(arg))
logger.info(" {} {} {}".format(arg, dots, getattr(self, arg)))
self.print_user_config()
def _do_error_check(self):
assert (self.train_micro_batch_size_per_gpu
), "DeepSpeedConfig: {} is not defined".format(TRAIN_MICRO_BATCH_SIZE_PER_GPU)
assert (
self.gradient_accumulation_steps), "DeepSpeedConfig: {} is not defined".format(GRADIENT_ACCUMULATION_STEPS)
if self.zero_enabled:
assert (self.zero_optimization_stage <=
ZeroStageEnum.max_stage), "DeepSpeedConfig: Maximum supported ZeRO stage is {}".format(
ZeroStageEnum.max_stage)
if self.fp16_master_weights_and_gradients:
assert self.zero_enabled and self.zero_optimization_stage == ZeroStageEnum.gradients, "Fp16_master_weights_and_grads is only supported with ZeRO Stage 2 for now."
def _do_warning_check(self):
fp16_enabled = self.fp16_enabled
vocabulary_size = self._param_dict.get(VOCABULARY_SIZE, VOCABULARY_SIZE_DEFAULT)
if vocabulary_size and vocabulary_size % TENSOR_CORE_ALIGN_SIZE != 0:
logger.warning(
"DeepSpeedConfig: vocabulary size {} is not aligned to {}, may import tensor core utilization.".format(
vocabulary_size, TENSOR_CORE_ALIGN_SIZE))
if (self.optimizer_params is not None and MAX_GRAD_NORM in self.optimizer_params.keys()
and self.optimizer_params[MAX_GRAD_NORM] > 0):
if fp16_enabled:
if self.global_rank == 0:
logger.warning("DeepSpeedConfig: In FP16 mode, DeepSpeed will pass {}:{} to FP16 wrapper".format(
MAX_GRAD_NORM, self.optimizer_params[MAX_GRAD_NORM]))
else:
if self.global_rank == 0:
logger.warning(
"DeepSpeedConfig: In FP32 mode, DeepSpeed does not permit MAX_GRAD_NORM ({}) > 0, setting to zero"
.format(self.optimizer_params[MAX_GRAD_NORM]))
self.optimizer_params[MAX_GRAD_NORM] = 0.0
| 39,863 | 38.705179 | 174 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/sparse_tensor.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Implementation of a compressed sparse tensor. Similar in
functionality to TensorFlow's IndexedSlices implementation.
"""
import torch
class SparseTensor(object):
""" Compressed Sparse Tensor """
def __init__(self, dense_tensor=None):
self.orig_dense_tensor = dense_tensor
self.is_sparse = dense_tensor.is_sparse
if dense_tensor is not None:
if dense_tensor.is_sparse:
dense_tensor = dense_tensor.coalesce()
self.indices = dense_tensor.indices().flatten()
self.values = dense_tensor.values()
else:
result = torch.sum(dense_tensor, dim=1)
self.indices = result.nonzero().flatten()
self.values = dense_tensor[self.indices]
self.dense_size = list(dense_tensor.size())
else:
self.indices = None
self.values = None
self.dense_size = None
def to_coo_tensor(self):
return torch.sparse_coo_tensor(self.indices.unsqueeze(0), self.values, self.dense_size)
@staticmethod
def type():
return "deepspeed.SparseTensor"
def to_dense(self):
it = self.indices.unsqueeze(1)
full_indices = torch.cat([it for _ in range(self.dense_size[1])], dim=1)
return self.values.new_zeros(self.dense_size).scatter_add_(0, full_indices, self.values)
def sparse_size(self):
index_size = list(self.indices.size())
index_size = index_size[0]
value_size = list(self.values.size())
value_size = value_size[0] * value_size[1]
dense_size = self.dense_size[0] * self.dense_size[1]
return index_size + value_size, dense_size
def add(self, b):
assert self.dense_size == b.dense_size
self.indices = torch.cat([self.indices, b.indices])
self.values = torch.cat([self.values, b.values])
def __str__(self):
sparse_size, dense_size = self.sparse_size()
return "DeepSpeed.SparseTensor(indices_size={}, values_size={}, " \
"dense_size={}, device={}, reduction_factor={})".format(
self.indices.size(), self.values.size(), self.dense_size,
self.indices.get_device(), dense_size / sparse_size
)
def __repr__(self):
return self.__str__()
| 2,416 | 34.028986 | 96 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/quantize.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import math
from deepspeed.utils import logger
from deepspeed.ops.quantizer import ds_quantizer
TWO_D_PARAMS = 6
class Quantizer(object):
def __init__(self,
q_groups=1,
q_mixed_fp16=False,
q_change_ratio=0.01,
q_type=0,
q_rounding=0,
q_verbose=False,
q_eigenvalue=False,
use_quantizer_kernel=False,
layer_num=0):
self.q_groups = q_groups
self.q_mixed_fp16 = q_mixed_fp16
self.q_change_ratio = q_change_ratio
self.q_type = q_type
self.qsteps = 0
self.quantize_real_ratio = 1.000
self.q_verbose = q_verbose
self.q_eigenvalue = q_eigenvalue
self.use_quantizer_kernel = use_quantizer_kernel
self.q_rounding = q_rounding
self.layer_num = layer_num
def any_precision_switch(self):
# Temporary disabled functionality
if self.layer_num == 0:
return True
result = False
for index in range(self.layer_num):
if self.q_start_bits[index] != self.q_target_bits:
next_step = self.qsteps + (TWO_D_PARAMS * (self.layer_num if self.layer_num != 0 else 1))
if next_step >= self.q_period[index]:
result = True
return result
def quantize(self, parameter_group, overflow, eigenvalue_enabled, block_eigenvalue={}):
if overflow and not eigenvalue_enabled:
return
self.step()
self.update_fp16_ratio()
for i in range(len(parameter_group)):
for p in parameter_group[i]:
if len(p.size()) > 1 and hasattr(p, "start_bits") and p.start_bits:
param_id = id(p)
if block_eigenvalue is None:
eigenvalue, layer_id = None, 0
else:
eigenvalue, layer_id = block_eigenvalue[param_id] if param_id in block_eigenvalue else (None,
0)
if eigenvalue is not None:
factor = 1 + math.floor(eigenvalue * 4)
p.data = self.compute_quantization(p.data, layer_id, factor)
else:
p.data = self.compute_quantization(p, layer_id)
def step(self):
self.qsteps += 1
def quantize_highbit(self, inputs, num_bits):
q_range = 2**num_bits
input_flat = inputs.reshape(self.q_groups, -1)
g_min = input_flat.amin(dim=-1, keepdim=True)
g_max = input_flat.amax(dim=-1, keepdim=True)
# Random number generator (Uniform)
if self.q_rounding == 'nearest':
p = 0.
else:
p = input_flat.new(input_flat.shape).uniform_(-0.5, 0.5)
if self.q_type == 'symmetric':
scale = 2 * torch.max(torch.abs(g_min), torch.abs(g_max)) / q_range
zero_point = 0.
input_flat = (input_flat / scale + p).round().clamp(-(q_range >> 1), (q_range >> 1) - 1) * scale
elif self.q_type == 'asymmetric':
scale = (g_max - g_min) / q_range
zero_point = (g_min / scale).round() * scale
input_flat = ((input_flat - zero_point) / scale + p).round().clamp(0, (q_range - 1)) * scale + zero_point
output = input_flat.reshape(inputs.shape).contiguous()
return output
def quantize_tenary(self, inputs):
input_flat = inputs.reshape(self.q_groups, -1)
n = input_flat.shape[1]
m = input_flat.norm(p=1, dim=1).div(n)
thres = (0.7 * m).view(-1, 1) #.expand_as(input_flat)
pos = (input_flat > thres).type(inputs.type())
neg = (input_flat < -thres).type(inputs.type())
mask = (input_flat.abs() > thres).type(inputs.type())
alpha = ((mask * input_flat).abs().sum(dim=1) / mask.sum(dim=1)).view(-1, 1)
output = alpha * pos - alpha * neg
output = output.reshape(inputs.shape).contiguous()
return output
def quantize_binary(self, inputs):
input_flat = inputs.reshape(self.q_groups, -1)
n = input_flat.shape[1]
m = input_flat.norm(p=1, dim=1, keepdim=True).div(n)
output = input_flat.sign().mul(m)
output = output.reshape(inputs.shape).contiguous()
return output
def mixed_fp16_quantize(self, input, input_q, index):
if self.q_mixed_fp16 and self.q_start_bits[index] >= (self.q_target_bits - 1):
input_q = input * self.quantize_real_ratio + (1 - self.quantize_real_ratio) * input_q
return input_q
return input_q
def compute_quantization(self, input, index=0, factor=1):
# fixing the quantization bits based on the training steps
# when reducing 1 bit at each period, we increase the period
# to go slowly toward the target quantization bits
# the period and starting bit can be configured
if input.start_bits != input.target_bits:
if self.qsteps >= input.q_period:
self.quantize_real_ratio = 1.0
input.q_period <<= 1
input.q_period *= factor
input.start_bits -= 1
if self.q_verbose:
logger.info(
f'Quantization settings: current bit-precision = {input.start_bits}, step = {self.qsteps}, quantization period = {input.q_period}, index = {index}'
)
assert (input.start_bits >= input.target_bits), \
'Quantization bit is lower than target precision bits!'
if self.use_quantizer_kernel:
if input.start_bits <= 2:
raise ValueError('Quantization bit is too low, please do it without quantization kernel!')
input_q = ds_quantizer(input.data.clone(),
self.q_groups,
input.start_bits,
asym=False if self.q_type == 'symmetric' else True,
sr=False if self.q_rounding == 'nearest_neighbor' else True)
else:
if input.start_bits >= 3:
input_flat = self.quantize_highbit(input.data, input.start_bits)
elif input.start_bits == 2:
assert self.q_type == 'symmetric', 'Quantization type is not symmetric!'
assert self.q_rounding == 'nearest', 'Quantization rounding is not nearest_neighbor!'
input_flat = self.quantize_tenary(input.data)
elif input.start_bits == 1:
assert self.q_type == 'symmetric', 'Quantization type is not symmetric!'
assert self.q_rounding == 'nearest', 'Quantization rounding is not nearest_neighbor!'
input_flat = self.quantize_binary(input.data)
if self.use_quantizer_kernel:
return self.mixed_fp16_quantize(input.data, input_q, index)
else:
if self.q_mixed_fp16 and input.start_bits >= input.target_bits - 1:
input_flat = self.quantize_real_ratio * input.data + \
(1 - self.quantize_real_ratio) * input_flat
return input_flat
def update_fp16_ratio(self):
if self.q_mixed_fp16:
if self.quantize_real_ratio > 0:
self.quantize_real_ratio -= self.q_change_ratio
else:
self.quantize_real_ratio = 0.000
| 7,699 | 41.541436 | 171 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/hybrid_engine.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.inference.config import DeepSpeedInferenceConfig
from deepspeed.module_inject.replace_policy import replace_policies
from deepspeed.module_inject.utils import policy_to_ds_container
from .engine import DeepSpeedEngine
from .utils import TLinear, get_inactive_params
from deepspeed.runtime.zero import GatheredParameters
import time
import gc
import math
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
from torch import nn
from deepspeed.utils import logger
from deepspeed.ops.op_builder import InferenceBuilder
from deepspeed.module_inject.layers import LinearLayer, Normalize, EmbeddingLayer, OPTEmbedding
try:
import transformers
OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding
except:
OPTLearnedPositionalEmbedding = None
inference_cuda_module = None
class DeepSpeedHybridEngine(DeepSpeedEngine):
r"""DeepSpeed engine for training and inference."""
inference_mp_group = None
def __init__(self, args, model, **kwargs):
super().__init__(args, model, **kwargs)
# synch seed between all GPUs
_rng_state = get_accelerator().get_rng_state().to(get_accelerator().current_device_name())
dist.broadcast(_rng_state, 0)
get_accelerator().set_rng_state(_rng_state.cpu())
self.Z3_enabled = (self._config.zero_config.stage == 3)
self.gather_all_layers = self._config.hybrid_engine.pin_parameters
# inference containers / fwds
self._inference_containers = []
self._orig_modules = []
self._orig_fwds = []
self.create_inference_module()
# Performance stats
self._t_start = None
self._total_latency = 0
self._iters = 0
self._training_start_time = None
self._generate_latency = 0
self._training_latency = 0
self._total_batch_size = None
self._gather_latency = 0
global inference_cuda_module
if inference_cuda_module is None:
builder = InferenceBuilder()
inference_cuda_module = builder.load()
self.is_lora_fused = False
def convert_to_linear_transposed(self, model):
def _replace_linear_layer(r_module, parent_type=None, prev_type=None):
for name, child in r_module.named_children():
if child.__class__ in [torch.nn.Linear] and \
(parent_type is torch.nn.ModuleList or prev_type is torch.nn.ModuleList):
setattr(r_module, name, TLinear(child, name))
else:
_replace_linear_layer(child, type(r_module), prev_type=parent_type)
return r_module
_replace_linear_layer(model)
def new_inference_container(self, orig_layer, policy_cls, layer_id):
policy = policy_cls(orig_layer, inference=True)
if self._config.fp16_enabled:
inference_dtype = torch.float16
elif self._config.bfloat16_enabled:
inference_dtype = torch.bfloat16
else:
inference_dtype = torch.float32
_container = policy_to_ds_container(
policy=policy,
config=DeepSpeedInferenceConfig(
set_empty_params=True,
dtype=inference_dtype,
max_out_tokens=self._config.hybrid_engine.max_out_tokens,
min_out_tokens=self._config.hybrid_engine.max_out_tokens,
transposed_mode=True,
),
model_config=self.module.config if hasattr(self.module, 'config') else None,
layer_id=layer_id,
child=orig_layer)
if self.mpu is not None:
if hasattr(self.mpu, 'get_model_parallel_world_size'):
_container.set_tensor_parallel_config(self.mpu.get_model_parallel_world_size(),
self.mpu.get_model_parallel_group())
else:
_container.set_tensor_parallel_config(self.mpu.get_tensor_model_parallel_world_size(),
self.mpu.get_tensor_model_parallel_group())
else:
_container.set_tensor_parallel_config(self._config.hybrid_engine.inference_tp_size, self.mp_group)
_container.initialize_tensors(enable_training=True)
_container.create_ds_model_config()
_container.create_module()
_container.set_params_wo_copy(Z3_enabled=self.Z3_enabled)
return _container
def populate_all_inference_policies(self):
self.inference_policies = {}
for plcy in replace_policies:
_ = plcy(None)
if isinstance(plcy._orig_layer_class, list):
for orig_layer_class in plcy._orig_layer_class:
self.inference_policies.update({orig_layer_class: (self.new_inference_container, plcy)})
elif plcy._orig_layer_class is not None:
self.inference_policies.update({plcy._orig_layer_class: (self.new_inference_container, plcy)})
self.inference_policies.update({
nn.Linear: (LinearLayer, ),
nn.Embedding: (EmbeddingLayer, ),
nn.LayerNorm: (Normalize, ),
OPTLearnedPositionalEmbedding: (OPTEmbedding, )
})
def _fuse_lora_layer(self, layer_id):
self._inference_containers[layer_id].fuse_lora()
def fuse_lora_weight(self):
for layer_id in range(len(self.layer_params)):
self._fuse_lora_layer(layer_id)
def _unfuse_lora_layer(self, layer_id):
self._inference_containers[layer_id].unfuse_lora()
def unfuse_lora_weight(self):
for layer_id in range(len(self.layer_params)):
self._unfuse_lora_layer(layer_id)
def unfuse_lora_weight_non_pinned(self):
for layer_id in range(len(self.layer_params)):
non_active_params = get_inactive_params(self.layer_params[layer_id])
non_active_lora_params = get_inactive_params(self.layer_lora_params[layer_id])
non_active_params.extend(non_active_lora_params)
with GatheredParameters(non_active_params):
self._unfuse_lora_layer(layer_id)
def retake_inference_cache(self):
if self._config.hybrid_engine.release_inference_cache:
retake_success = inference_cuda_module.retake_workspace()
if not retake_success:
logger.warning("Unable to acquire workspace on first attempt, emptying cache and retrying.")
gc.collect()
get_accelerator().empty_cache()
retake_success = inference_cuda_module.retake_workspace()
if not retake_success:
raise RuntimeError("Unable to retake inference workspace.")
def generate(self, *inputs, **kwargs):
if self._total_batch_size is None:
bsz = inputs[0].shape[0] if len(inputs) > 0 else \
kwargs['input_ids'].shape[0]
self._total_batch_size = bsz * dist.get_world_size()
self._t0 = time.time()
if self.Z3_enabled and self.gather_all_layers:
if self._config.hybrid_engine.inference_tp_size > 1:
non_tp_params = []
for other_layer in self._other_layers:
non_tp_params.extend(list(other_layer.parameters()))
partition_size = self._config.hybrid_engine.tp_gather_partition_size
layer_groups = math.ceil(len(self.layer_params) / partition_size)
for lg in range(layer_groups):
non_active_params = []
non_active_lora_params = []
for layer_id in range(lg * partition_size, min(len(self.layer_params), (lg + 1) * partition_size),
1):
non_tp_params.extend(self.layer_params[layer_id][:4])
non_active_params.extend(get_inactive_params(self.layer_params[layer_id]))
non_active_params.extend(get_inactive_params(self.layer_lora_params[layer_id]))
with GatheredParameters(non_active_params):
for layer_id in range(lg * partition_size,
min(len(self.layer_params), (lg + 1) * partition_size), 1):
if len(self.all_lora_params) > 0:
self._fuse_lora_layer(layer_id)
if self.mpu is not None:
self._inference_containers[layer_id].apply_tensor_parallelism(self.mp_replace,
reversed_dim=True)
# TODO(cmikeh2) Evaluate if this can be deferred when release_inference_cache
# is enabled.
gc.collect()
get_accelerator().empty_cache()
self._gather_latency = time.time() - self._t0
input_shape = inputs[0].shape if len(inputs) > 0 else \
kwargs['input_ids'].shape
output = torch.zeros(
(input_shape[0] * self._config.hybrid_engine.inference_tp_size, ) + input_shape[1:],
dtype=inputs[0].dtype if len(inputs) > 0 else kwargs['input_ids'].dtype,
device=inputs[0].device if len(inputs) > 0 else kwargs['input_ids'].device)
input_cont = inputs[0].contiguous() if len(inputs) > 0 else kwargs['input_ids'].contiguous()
dist.all_gather_into_tensor(output, input_cont, group=self.mp_group)
if len(inputs) > 0:
inputs = (output, )
else:
kwargs['input_ids'] = output
self.retake_inference_cache()
non_active_params = get_inactive_params(non_tp_params)
with GatheredParameters(non_active_params):
generate_ret_vals = self._generate(*inputs, **kwargs)
for layer_id in range(len(self.layer_params)):
self._inference_containers[layer_id].release_memory()
rank = dist.get_rank(group=self.mp_group)
generate_ret_vals = generate_ret_vals[input_shape[0] * rank:input_shape[0] * (rank + 1)]
else:
non_active_layers = get_inactive_params(self.all_layers_params)
non_active_lora_params = get_inactive_params(self.all_lora_params)
non_active_layers.extend(non_active_lora_params)
with GatheredParameters(non_active_layers):
self._gather_latency = time.time() - self._t0
if len(self.all_lora_params) > 0:
self.fuse_lora_weight()
self.retake_inference_cache()
generate_ret_vals = self._generate(*inputs, **kwargs)
if len(self.all_lora_params) > 0:
self.unfuse_lora_weight()
else:
if len(self.all_lora_params) > 0 and (not self.Z3_enabled):
self.fuse_lora_weight()
self.retake_inference_cache()
generate_ret_vals = self._generate(*inputs, **kwargs)
if len(self.all_lora_params) > 0:
if (not self.Z3_enabled):
self.unfuse_lora_weight()
else:
self.unfuse_lora_weight_non_pinned()
self.is_lora_fused = False
if self._config.hybrid_engine.release_inference_cache:
inference_cuda_module.release_workspace()
gc.collect()
get_accelerator().empty_cache()
self._generate_latency = time.time() - self._t0 - self._gather_latency
return generate_ret_vals
def create_inference_containers(self, module, layer_id=0):
for name, child in module.named_children():
if child.__class__ in self.inference_policies:
if self.inference_policies[child.__class__][0] == self.new_inference_container:
self._inference_containers.append(self.inference_policies[child.__class__][0](
child, self.inference_policies[child.__class__][-1], layer_id))
self._orig_modules.append(child)
self._orig_fwds.append(child.forward)
self.layer_params.append(self._inference_containers[layer_id].get_all_params())
self.lora_params.append(self._inference_containers[layer_id].get_lora_params())
self.layer_lora_params.append([])
for lora_param in self.lora_params[layer_id]:
self.layer_lora_params[layer_id].extend(lora_param[:-1])
self.all_lora_params.extend(lora_param[:-1])
layer_id += 1
else:
self._other_layers.append(self.inference_policies[child.__class__][0](
weight=child.weight, bias=child.bias if hasattr(child, 'bias') else None))
self._orig_modules_others.append(child)
self._orig_fwds_others.append(child.forward)
else:
self.create_inference_containers(child, layer_id=layer_id)
def create_inference_module(self):
self.layer_params = []
self.layer_lora_params = []
self.lora_params = []
self.all_lora_params = []
self._other_layers = []
self._orig_modules_others = []
self._orig_fwds_others = []
if self._config.hybrid_engine.inference_tp_size > 1:
if self.mpu is None:
global_rank = dist.get_rank()
world_size = dist.get_world_size()
mp_group_id = global_rank // self._config.hybrid_engine.inference_tp_size
num_mp_groups = world_size // self._config.hybrid_engine.inference_tp_size
for mp_group_id in range(num_mp_groups):
ranks = list(
range(mp_group_id * self._config.hybrid_engine.inference_tp_size, \
(mp_group_id + 1) * self._config.hybrid_engine.inference_tp_size, \
1)
)
mp_group = dist.new_group(ranks)
if global_rank in ranks:
# mp_group is used for broader collective
self.mp_group = mp_group
# mp_replace is used for container tensor slicing
from deepspeed.module_inject import ReplaceWithTensorSlicing
self.mp_replace = ReplaceWithTensorSlicing(
mp_group=self.mp_group,
mp_size=self._config.hybrid_engine.inference_tp_size,
out_dim=0,
in_dim=1)
else:
self.mp_group = self.mpu.get_model_parallel_group() if hasattr(self.mpu, 'get_model_parallel_group') else \
self.mpu.get_tensor_model_parallel_group()
from deepspeed.module_inject import ReplaceWithTensorSlicing
self.mp_replace = ReplaceWithTensorSlicing(mp_group=self.mp_group,
mp_size=self._config.hybrid_engine.inference_tp_size,
out_dim=0,
in_dim=1)
else:
self.mp_group = None
self.mp_replace = None
self.populate_all_inference_policies()
self.all_layers_params = list(self.module.parameters())
self.create_inference_containers(self.module)
if len(self._inference_containers) > 0:
self._generate = self.module.generate
self.module.generate = self.generate
self._t0 = time.time()
def _zero3_forward(self, layer_id):
def run_forward(*inputs, **kwargs):
non_active_params = get_inactive_params(self.layer_params[layer_id])
non_active_lora_params = get_inactive_params(self.layer_lora_params[layer_id])
non_active_params.extend(non_active_lora_params)
with GatheredParameters(non_active_params):
if len(self.all_lora_params) > 0:
# Use the is_lora_fused flag to prevent multiple fusion in Z3 with non-pinned memory
if not self.is_lora_fused:
self._fuse_lora_layer(layer_id)
# Set the is_lora_fused to true when reaching the last layer
if layer_id == len(self.layer_params) - 1:
self.is_lora_fused = True
return self._inference_containers[layer_id].module.forward(*inputs, **kwargs)
return run_forward
def eval(self):
if self._t_start is not None:
latency = time.time() - self._t_start
self._total_latency = self._total_latency + latency
self._iters = self._iters + 1
if not dist.is_initialized() or dist.get_rank() == 0:
others = latency - (self._generate_latency + self._training_latency)
print(f'|E2E latency={(latency):.2f}s ' + \
f'|Gather latency={self._gather_latency:.2f}s ({(self._gather_latency / latency * 100):.2f}%) '
f'|Generate time={(self._generate_latency):.2f}s ({(self._generate_latency / latency * 100):.2f}%) ' + \
f'|Training time={(self._training_latency):.2f}s ({(self._training_latency / latency * 100):.2f}%) ' + \
f'|Others={others:.2f} ({(others / latency * 100):.2f}%)'
f'|CurSamplesPerSec={(1 / latency * self._total_batch_size):.2f} ' + \
f'|AvgSamplesPerSec={(1 / (self._total_latency / self._iters) * self._total_batch_size):.2f}')
self._t_start = time.time()
self._training_latency = 0
super().eval()
if len(self._inference_containers) > 0:
for i, (orig_module, inference_container) in enumerate(zip(self._orig_modules,
self._inference_containers)):
if self.Z3_enabled and not self.gather_all_layers:
orig_module.forward = self._zero3_forward(i)
else:
orig_module.forward = inference_container.module.forward
inference_container.transform_for_inference()
if not self.Z3_enabled or self.gather_all_layers:
for orig_module, inference_layer in zip(self._orig_modules_others, self._other_layers):
orig_module.forward = inference_layer.forward
if self.Z3_enabled:
gc.collect()
get_accelerator().empty_cache()
if self._t_start is None:
self._t_start = time.time()
def train(self, mode=True):
if mode and len(self._orig_modules) > 0:
for inference_container, orig_module, orig_fwd in zip(self._inference_containers, self._orig_modules,
self._orig_fwds):
inference_container.transform_for_training()
orig_module.forward = orig_fwd
for orig_module, orig_fwd in zip(self._orig_modules_others, self._orig_fwds_others):
orig_module.forward = orig_fwd
super().train(mode)
if mode:
self._training_start_time = time.time()
def step(self, lr_kwargs=None):
super().step(lr_kwargs=lr_kwargs)
if len(self._inference_containers) > 0:
if not self.Z3_enabled:
for inference_container in self._inference_containers:
inference_container.reset_params()
if self._training_start_time is not None:
self._training_latency += (time.time() - self._training_start_time)
self._training_start_time = time.time()
| 20,369 | 45.190476 | 126 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/activation_checkpointing/checkpointing.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Use to partition the activations stored for backward propagation
Therefore reduces the memory consumption
Also implements CPU checkpointing and contiguous memory checkpointing
Reduces memory consumption and memory fragmentation
Code for rng checkpointing taken from NVIDIA Megatron-LM mpu/random.py
b886b7bb972afe72bac0f5de4f42a4a7bae8ebef
"""
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
import copy
import torch
import contextlib
from deepspeed import comm as dist
import mmap
from torch import _C
from deepspeed.runtime.config import DeepSpeedConfig
from deepspeed.utils import logger
from deepspeed.runtime.utils import copy_to_device, move_to_device, see_memory_usage, bwc_tensor_model_parallel_rank
from deepspeed.utils.timer import SynchronizedWallClockTimer as Timers
from deepspeed.accelerator import get_accelerator
# DeepSpeed Checkpointing Enabled or Disabled
deepspeed_checkpointing_enabled = False
# MP parameters
mpu = None
mp_rank = None
mp_size = None
mp_group = None
# Model Parameters
num_layers = None
# Checkpointing buffers
contiguous_data_buffers = []
data_offsets = []
contiguous_size_buffers = []
size_offsets = []
timers = None
# optimization flags
PARTITION_ACTIVATIONS = False
CPU_CHECKPOINT = False
CONTIGUOUS_CHECKPOINTING = False
SYNCHRONIZE = False
PROFILE_TIME = False
# Default name for the model parallel rng tracker.
_MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng'
transport_stream = None
cuda_device = None
def detach_variable(inputs, device=None):
if isinstance(inputs, tuple):
out = []
for inp in inputs:
if not isinstance(inp, torch.Tensor):
out.append(inp)
continue
requires_grad = inp.requires_grad
if device is not None:
x = inp.to(device=device)
else:
x = inp
x = x.detach()
x.requires_grad = requires_grad
out.append(x)
return tuple(out)
else:
raise RuntimeError("Only tuple of tensors is supported. Got Unsupported input type: ", type(inputs).__name__)
def _set_cuda_rng_state(new_state, device=-1):
"""Sets the random number generator state of the current GPU.
Arguments:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state) #ignore-cuda
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
"""
if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState):
# older PyTorch
def cb():
with get_accelerator().device(device):
_C._cuda_setRNGState(new_state)
else:
# newer PyTorch
if device == -1:
device = torch.device(get_accelerator().device_name())
elif isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device(get_accelerator().device_name(), device)
def cb():
idx = device.index
if idx is None:
idx = get_accelerator().current_device()
default_generator = get_accelerator().default_generator(idx)
default_generator.set_state(new_state)
get_accelerator().lazy_call(cb)
class CudaRNGStatesTracker:
"""Tracker for the cuda RNG states.
Using the `add` method, a cuda rng state is initialized based on
the input `seed` and is assigned to `name`. Later, by forking the
rng state, we can perform operations and return to our starting
cuda state.
"""
def __init__(self):
# Map from a string name to the cuda rng state.
self.states_ = {}
# Seeds are just for book keeping and ensure no seed is set twice.
self.seeds_ = set()
def reset(self):
"""Set to the initial state (no tracker)."""
self.states_ = {}
self.seeds_ = set()
def get_states(self):
"""Get rng states. Copy the dictionary so we have direct
pointers to the states, not just a pointer to the dictionary."""
return copy.copy(self.states_)
def set_states(self, states):
"""Set the rng states. For efficiency purposes, we do not check
the size of seed for compatibility."""
self.states_ = states
def add(self, name, seed):
"""Track the rng state."""
# Check seed is not already used.
if seed in self.seeds_:
raise Exception('seed {} already exists'.format(seed))
self.seeds_.add(seed)
# Check that state is not already defined.
if name in self.states_:
raise Exception('cuda rng state {} already exists'.format(name))
# Get the current rng state.
orig_rng_state = get_accelerator().get_rng_state()
# Set the new state and store it.
get_accelerator().manual_seed(seed)
self.states_[name] = get_accelerator().get_rng_state()
# Reset rng state to what it was.
_set_cuda_rng_state(orig_rng_state)
@contextlib.contextmanager
def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME):
"""Fork the cuda rng state, perform operations, and exit with
the original state."""
# Check if we have added the state
if name not in self.states_:
raise Exception('cuda rng state {} is not added'.format(name))
# Store current rng state.
orig_cuda_rng_state = get_accelerator().get_rng_state()
# Set rng state to the desired one
_set_cuda_rng_state(self.states_[name])
# Do the stuff we wanted to do.
try:
yield
finally:
# Update the current rng state for later use.
self.states_[name] = get_accelerator().get_rng_state()
# And set the state to the original state we started with.
_set_cuda_rng_state(orig_cuda_rng_state)
# RNG tracker object.
_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
def get_cuda_rng_tracker():
"""Get cuda rng tracker."""
return _CUDA_RNG_STATE_TRACKER
def model_parallel_cuda_manual_seed(seed):
"""Initialize model parallel cuda seed.
This function should be called after the model parallel is
initialized. Also, no get_accelerator().manual_seed should be called
after this function. Basically, this is replacement for that
function.
Two set of RNG states are tracked:
default state: This is for data parallelism and is the same among a
set of model parallel GPUs but different across
different model parallel groups. This is used for
example for dropout in the non-model-parallel regions.
model-parallel state: This state is different among a set of model
parallel GPUs, but the same across data parallel
groups. This is used for example for dropout in
model parallel regions.
"""
global mpu
tp_rank = bwc_tensor_model_parallel_rank(mpu)
# 2718 is just for fun and any POSITIVE value will work.
offset = seed + 2718
model_parallel_seed = offset + tp_rank
# Data parallel gets the original seed.
data_parallel_seed = seed
if dist.get_rank() == 0:
logger.info(
'> initializing model parallel cuda seeds on global rank {}, '
'model parallel rank {}, and data parallel rank {} with '
'model parallel seed: {} and data parallel seed: {}'.format(dist.get_rank(), tp_rank,
mpu.get_data_parallel_rank(),
model_parallel_seed, data_parallel_seed), )
_CUDA_RNG_STATE_TRACKER.reset()
# Set the default state.
get_accelerator().manual_seed(data_parallel_seed)
# and model parallel state.
_CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, model_parallel_seed)
def get_partition_start(item):
global mp_rank, mp_size, mp_group
size = item.numel()
partition_size = size / mp_size
start = partition_size * mp_rank
return int(start)
def get_partition_size(item):
global mp_rank, mp_size, mp_group
size = item.numel()
assert size % mp_size == 0, "Doesn't handle if partition activation if item is not divisible by mp size"
partition_size = size / mp_size
return int(partition_size)
def gather_partitioned_activations(tensors, device=None):
global mp_rank, mp_size, mp_group
assert len(tensors) % 2 == 0, f'Expected even count of tensors, instead got {len(tensors)}'
inputs = []
num_args = int(len(tensors) / 2)
for i in range(num_args):
item = tensors[2 * i]
size = tensors[2 * i + 1]
if not is_activation_to_checkpoint(item):
inputs.append(item)
continue
# don't need to do all_gather if model parallel is not enabled
if mp_group is None or mp_size == 1:
item = item.view(list(size.numpy()))
inputs.append(item)
continue
partition_size = item.numel()
tensor_size = partition_size * mp_size
if device is not None:
flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=device)
else:
flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=item.device)
partitions = []
for i in range(mp_size):
part_i = flat_tensor.narrow(0, partition_size * i, partition_size)
if i == mp_rank:
part_i.copy_(item)
partitions.append(part_i)
dist.all_gather(partitions, partitions[mp_rank], group=mp_group)
input_tensor = flat_tensor.view(list(size.numpy()))
item.data = input_tensor.data
inputs.append(item)
return tuple(inputs)
def extract_tensors(all_objects):
"""
Separate objects in list/tuple into tensors and non-tensors and create a mapping to enable re-aggregation.
The order of tensors and non-tensors is preserved in their respective output groups.
Parameters:
all_objects (list/tuple): Objects containing tensors and non-tensors to be split.
Returns:
tuple: Containing tensors, non-tensors, and bools of whether each position in original list/tuple was a tensor.
"""
tensor_objects = [v for v in all_objects if torch.is_tensor(v)]
non_tensor_objects = [v for v in all_objects if not torch.is_tensor(v)]
tensor_flags = [torch.is_tensor(v) for v in all_objects]
if type(all_objects) is tuple:
return tuple(tensor_objects), tuple(non_tensor_objects), tuple(tensor_flags)
return tensor_objects, non_tensor_objects, tensor_flags
def merge_tensors(tensor_objects, non_tensor_objects, tensor_flags):
"""
Merge two lists (or tuples) of tensors and non-tensors using a mapping of positions in merged list (or tuple).
Parameters:
tensor_objects (list/tuple): Tensors to merge.
non_tensor_objects (list/tuple): Non-tensors to merge.
tensor_flags (list/tuple): Indicates whether each position in output is a tensor.
Returns:
tuple: Merge of tensors and non-tensors
"""
merged_objects = []
tensor_idx = 0
non_tensor_idx = 0
real_tensor_flags = None
# remove the flags that are assigned to the size of the flattened tensors
if PARTITION_ACTIVATIONS:
real_tensor_flags = []
previous_flag = False
for flag in tensor_flags:
if previous_flag:
previous_flag = False
continue
previous_flag = flag
real_tensor_flags.append(flag)
else:
real_tensor_flags = tensor_flags
for is_tensor in real_tensor_flags:
if is_tensor:
merged_objects.append(tensor_objects[tensor_idx])
tensor_idx += 1
else:
merged_objects.append(non_tensor_objects[non_tensor_idx])
non_tensor_idx += 1
return tuple(merged_objects)
def is_activation_to_checkpoint(item):
"""
Is an activation to be checkpointed
"""
global mp_size
return torch.is_tensor(item) and item.is_floating_point() and item.numel() >= mp_size
def partition_activations(args, cpu_checkpoint, contiguous_checkpoint):
global contiguous_data_buffers, data_offsets
inputs = []
num_non_fp_tensors = 0
for arg_index, item in enumerate(args):
if not is_activation_to_checkpoint(item):
inputs.append(item)
num_non_fp_tensors += 1
continue
i = arg_index - num_non_fp_tensors
partition_size = get_partition_size(item)
partition = item.detach().contiguous().view(-1).narrow(0, get_partition_start(item), partition_size).clone()
buffer_device = torch.device('cpu') if cpu_checkpoint else partition.device
if contiguous_checkpoint:
if i >= len(contiguous_data_buffers):
tensor_list = [
torch.tensor(()).new_empty([partition_size], dtype=partition.dtype, device=buffer_device)
for _ in range(num_layers)
]
contiguous_data_buffers.append(tensor_list)
data_offsets.append(0)
elif contiguous_data_buffers[i] is None:
tensor_list = [
torch.tensor(()).new_empty([partition_size], dtype=partition.dtype, device=buffer_device)
for _ in range(num_layers)
]
contiguous_data_buffers[i] = tensor_list
data_offsets[i] = 0
# Because the 'new_empty' returns uninitialized pages,
# the pages need to be populated during the cudaMemcpy time
# which increases the data copy time. To avoid this, we
# pre-populate these pages by simply writing 0 ahead of
# the actual cudaMemcpy operation time. Due to the
# previously launched GPU kernels, there is a small
# window of time here for CPUs to populate pages asynchronously.
contiguous_data_buffers[i][data_offsets[i]].data[range(
0, contiguous_data_buffers[i][data_offsets[i]].data.shape[0],
int(mmap.PAGESIZE / contiguous_data_buffers[i][data_offsets[i]].data.element_size()))] = 0
contiguous_partition = contiguous_data_buffers[i][data_offsets[i]].data.copy_(partition.data)
data_offsets[i] = data_offsets[i] + 1
inputs.append(contiguous_partition)
else:
partition = partition.cpu() if CPU_CHECKPOINT else partition
inputs.append(partition)
return inputs
def get_partitioned_activations_for_backward(args, inputs, contiguous_checkpoint):
global contiguous_size_buffers, size_offsets
new_args = []
num_non_fp_tensors = 0
for arg_index, (arg, inp) in enumerate(zip(args, inputs)):
size = torch.tensor(arg.size()) if torch.is_tensor(arg) else None
if not is_activation_to_checkpoint(arg):
new_args.append(arg)
new_args.append(size)
num_non_fp_tensors += 1
continue
arg.data = inp.data
new_args.append(arg)
i = arg_index - num_non_fp_tensors
if contiguous_checkpoint:
numel = size.numel()
if i >= len(contiguous_size_buffers):
tmp = torch.tensor(())
contiguous_size_buffers.append(
tmp.new_empty([numel * num_layers], dtype=size.dtype, device=size.device))
size_offsets.append(0)
elif contiguous_size_buffers[i] is None:
tmp = torch.tensor(())
contiguous_size_buffers[i] = tmp.new_empty([numel * num_layers], dtype=size.dtype, device=size.device)
size_offsets[i] = 0
contiguous_size = contiguous_size_buffers[i].narrow(0, size_offsets[i], numel).data.copy_(size.data)
contiguous_size = contiguous_size.view_as(size)
size_offsets[i] = size_offsets[i] + numel
new_args.append(contiguous_size)
else:
new_args.append(size)
return new_args
def get_cpu_activations_for_backward(args, inputs):
new_args = []
for i, (arg, inp) in enumerate(zip(args, inputs)):
if not is_activation_to_checkpoint(arg):
new_args.append(arg)
continue
arg.data = inp.data
new_args.append(arg)
return new_args
class CheckpointFunction(torch.autograd.Function):
"""This function is adapted from torch.utils.checkpoint with
two main changes:
1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state` #ignore-cuda
2) the states in the model parallel tracker are also properly
tracked/set/reset.
3) Performance activation partitioning, contiguous memory optimization
4) CPU Checkpointing
5) Profile forward and backward functions
"""
@staticmethod
def forward(ctx, run_function, all_outputs, *args):
global mpu, timers, SYNCHRONIZE, PROFILE_TIME
def save_args_for_backward(*all_args):
tensor_args, non_tensor_args, tensor_flags = extract_tensors(all_objects=all_args)
ctx.deepspeed_saved_tensors = tensor_args
ctx.non_tensor_args = non_tensor_args
ctx.tensor_flags = tensor_flags
if SYNCHRONIZE:
get_accelerator().synchronize()
if timers is None and PROFILE_TIME:
timers = Timers()
if PROFILE_TIME:
timers('forward').start()
ctx.run_function = run_function
global num_layers
global mp_rank, mp_size, mp_group
global contiguous_data_buffers, contiguous_size_buffers
global data_offsets, size_offsets
if mp_rank is None:
if mpu is not None:
if hasattr(mpu, 'get_tensor_model_parallel_rank'):
mp_rank = mpu.get_tensor_model_parallel_rank()
mp_size = mpu.get_tensor_model_parallel_world_size()
mp_group = mpu.get_tensor_model_parallel_group()
else:
mp_rank = mpu.get_model_parallel_rank()
mp_size = mpu.get_model_parallel_world_size()
mp_group = mpu.get_model_parallel_group()
else:
mp_rank = 0
mp_size = 1
mp_group = None
global cuda_device, transport_stream, PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset
if cuda_device is None:
see_memory_usage("First Forward Beginning", force=False)
if dist.get_rank() == 0:
logger.info(f"Activation Checkpointing Information")
logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}")
logger.info(
f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers")
logger.info(f"----Synchronization {SYNCHRONIZE}")
logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}")
cuda_device = get_accelerator().current_device_name()
transport_stream = get_accelerator().Stream(device=cuda_device)
if PARTITION_ACTIVATIONS:
inputs = partition_activations(args, CPU_CHECKPOINT, CONTIGUOUS_CHECKPOINTING)
elif CPU_CHECKPOINT:
inputs = copy_to_device(args, device=torch.device('cpu'), criterion_func=is_activation_to_checkpoint)
# just in case something funky is happening such as reuse of inputs
inputs_cuda = copy_to_device(args, device=cuda_device, criterion_func=is_activation_to_checkpoint)
# Copy the rng states.
ctx.fwd_cpu_rng_state = torch.get_rng_state()
ctx.fwd_cuda_rng_state = get_accelerator().get_rng_state()
ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
see_memory_usage("Before running forward on the layer", force=False)
# ctx.save_for_backward(*args)
with torch.no_grad():
outputs = run_function(*inputs_cuda)
see_memory_usage("After running forward on the layer", force=False)
del inputs_cuda
if PARTITION_ACTIVATIONS:
new_args = get_partitioned_activations_for_backward(args, inputs, CONTIGUOUS_CHECKPOINTING)
assert len(new_args) % 2 == 0, f'save_for_backward called with odd number of args, {len(new_args)}'
save_args_for_backward(*new_args)
elif CPU_CHECKPOINT:
new_args = get_cpu_activations_for_backward(args, inputs)
save_args_for_backward(*new_args)
else:
save_args_for_backward(*args)
if PROFILE_TIME:
timers('forward').stop()
timers.log(['forward'])
if SYNCHRONIZE:
get_accelerator().synchronize()
# Tensors returned from forward() may not be differentiable.
if torch.is_tensor(outputs):
non_grad_outputs = [outputs] if not outputs.is_floating_point() else []
else:
non_grad_outputs = [o for o in outputs if torch.is_tensor(o) and not o.is_floating_point()]
ctx.mark_non_differentiable(*non_grad_outputs)
if torch.is_tensor(outputs):
all_outputs += [outputs]
return outputs
else:
all_outputs += outputs
outputs, _, _ = extract_tensors(all_objects=outputs)
return tuple(outputs)
@staticmethod
def backward(ctx, *grads):
global timers
see_memory_usage("In backward", force=False)
# removing pointers to the contiguous buffer memory
# so that they can be garbage collected once the checkpoints
# have been used
if SYNCHRONIZE:
get_accelerator().synchronize()
if PROFILE_TIME:
timers('backward').start()
if CONTIGUOUS_CHECKPOINTING:
global data_offsets, size_offsets
global contiguous_data_buffers, contiguous_size_buffers
for buffers in contiguous_data_buffers:
buffers = []
# frees up all the pointers to the checkpoints except for the ones
# stored by save for backward
contiguous_data_buffers = []
contiguous_size_buffers = []
data_offsets = []
size_offsets = []
see_memory_usage("In backward checkpointing code", force=False)
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError("Checkpointing is not compatible with .grad(), "
"please use .backward() if possible")
global cuda_device, transport_stream, PARTITION_ACTIVATIONS
if PARTITION_ACTIVATIONS:
# with get_accelerator().stream(transport_stream):
inputs = gather_partitioned_activations(ctx.deepspeed_saved_tensors,
device=cuda_device if CPU_CHECKPOINT else None)
detached_inputs = detach_variable(inputs)
elif CPU_CHECKPOINT:
inputs = move_to_device(ctx.deepspeed_saved_tensors, cuda_device, is_activation_to_checkpoint)
detached_inputs = detach_variable(inputs)
else:
inputs = ctx.deepspeed_saved_tensors
detached_inputs = detach_variable(inputs)
# Add non tensor input args
detached_inputs = merge_tensors(tensor_objects=detached_inputs,
non_tensor_objects=ctx.non_tensor_args,
tensor_flags=ctx.tensor_flags)
# Store the current states.
bwd_cpu_rng_state = torch.get_rng_state()
bwd_cuda_rng_state = get_accelerator().get_rng_state()
bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
# Set the states to what it used to be before the forward pass.
torch.set_rng_state(ctx.fwd_cpu_rng_state)
_set_cuda_rng_state(ctx.fwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
# if PARTITION_ACTIVATIONS:
# current_stream=get_accelerator().current_stream()
# current_stream.wait_stream(transport_stream)
see_memory_usage("In backward checkpointing code before forward", force=False)
with torch.enable_grad():
outputs = ctx.run_function(*detached_inputs)
see_memory_usage("In backward checkpointing code after forward", force=False)
# Set the states back to what it was at the start of this function.
torch.set_rng_state(bwd_cpu_rng_state)
_set_cuda_rng_state(bwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
if isinstance(outputs, torch.Tensor):
outputs = (outputs, )
# Filter out non tensor outputs
outputs, _, _ = extract_tensors(all_objects=outputs)
# Construct arguments to autograd.backward().
# This is usually just outputs and grads, but forward() can return tensors that
# are not differentiable.
output_tensors = []
grad_tensors = []
for out, grad in zip(outputs, grads):
if out.requires_grad:
output_tensors.append(out)
grad_tensors.append(grad)
see_memory_usage("In backward checkpointing code before backward", force=False)
torch.autograd.backward(output_tensors, grad_tensors)
# Force clear our stashed tensors to prevent a memory leak in certain scenarios
ctx.deepspeed_saved_tensors = None
ctx.non_tensor_args = None
ctx.tensor_flags = None
see_memory_usage("After backward checkpointing code after backward", force=False)
if PROFILE_TIME:
timers('backward').stop()
timers.log(['backward'])
if SYNCHRONIZE:
get_accelerator().synchronize()
ret_list = [None, None] # first None for ctx
for inp in detached_inputs:
if torch.is_tensor(inp):
ret_list.append(inp.grad)
else:
ret_list.append(None)
return tuple(ret_list)
def checkpoint(function, *args):
"""Checkpoint a model or part of the model.
This has been directly copied from torch.utils.checkpoint. """
all_outputs = []
CheckpointFunction.apply(function, all_outputs, *args)
if len(all_outputs) == 1:
return all_outputs[0]
else:
return tuple(all_outputs)
def partition_activations_in_checkpoint(partition_activation):
global PARTITION_ACTIVATIONS
PARTITION_ACTIVATIONS = partition_activation
if dist.get_rank() == 0:
logger.info(f"**************Partition Activations {PARTITION_ACTIVATIONS}************")
def set_num_layers(nlayers):
global num_layers
num_layers = nlayers
def reset():
"""Resets memory buffers related to contiguous memory optimizations.
Should be called during eval when multiple forward propagations are
computed without any backward propagation that usually clears these
buffers.
Arguments:
None
Return:
None
"""
if CONTIGUOUS_CHECKPOINTING:
global data_offsets, size_offsets
global contiguous_data_buffers, contiguous_size_buffers
for buffers in contiguous_data_buffers:
buffers = []
# frees up all the pointers to the checkpoints except for the ones
# stored by save for backward
contiguous_data_buffers = []
contiguous_size_buffers = []
data_offsets = []
size_offsets = []
def _configure_using_config_file(config, mpu=None):
global num_layers, PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \
CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME
config = DeepSpeedConfig(config, mpu=mpu).activation_checkpointing_config
if dist.get_rank() == 0:
logger.info(config.repr())
PARTITION_ACTIVATIONS = config.partition_activations
CONTIGUOUS_CHECKPOINTING = config.contiguous_memory_optimization
num_layers = config.number_checkpoints
CPU_CHECKPOINT = config.cpu_checkpointing
SYNCHRONIZE = config.synchronize_checkpoint_boundary
PROFILE_TIME = config.profile
def _configure_defaults():
global mpu, num_layers, deepspeed_checkpointing_enabled
global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \
CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME
PARTITION_ACTIVATIONS = False
CONTIGUOUS_CHECKPOINTING = False
num_layers = False
CPU_CHECKPOINT = False
SYNCHRONIZE = False
PROFILE_TIME = False
deepspeed_checkpointing_enabled = True
def configure(
mpu_,
deepspeed_config=None,
partition_activations=None,
contiguous_checkpointing=None,
num_checkpoints=None,
checkpoint_in_cpu=None,
synchronize=None,
profile=None,
):
"""Configure DeepSpeed Activation Checkpointing.
Arguments:
mpu_: Optional: An object that implements the following methods
get_model_parallel_rank/group/world_size, and get_data_parallel_rank/group/world_size
deepspeed_config: Optional: DeepSpeed Config json file when provided will be used to
configure DeepSpeed Activation Checkpointing
partition_activations: Optional: Partitions activation checkpoint across model parallel
GPUs when enabled. By default False. Will overwrite deepspeed_config if provided
contiguous_checkpointing: Optional: Copies activation checkpoints to a contiguous memory
buffer. Works only with homogeneous checkpoints when partition_activations is enabled.
Must provide num_checkpoints. By default False. Will overwrite deepspeed_config if
provided
num_checkpoints: Optional: Number of activation checkpoints stored during the forward
propagation of the model. Used to calculate the buffer size for contiguous_checkpointing
Will overwrite deepspeed_config if provided
checkpoint_in_cpu: Optional: Moves the activation checkpoint to CPU. Only works with
partition_activation. Default is false. Will overwrite deepspeed_config if provided
synchronize: Optional: Performs get_accelerator().synchronize() at the beginning and end of
each call to deepspeed.checkpointing.checkpoint for both forward and backward pass.
By default false. Will overwrite deepspeed_config if provided
profile: Optional: Logs the forward and backward time for each
deepspeed.checkpointing.checkpoint invocation. Will overwrite deepspeed_config
if provided
Returns:
None
"""
global mpu, num_layers, deepspeed_checkpointing_enabled
global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \
CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME
_configure_defaults()
if mpu_ is not None:
mpu = mpu_
if deepspeed_config is not None:
_configure_using_config_file(deepspeed_config, mpu=mpu)
if partition_activations is not None:
PARTITION_ACTIVATIONS = partition_activations
if contiguous_checkpointing is not None:
CONTIGUOUS_CHECKPOINTING = contiguous_checkpointing
if num_checkpoints is not None:
num_layers = num_checkpoints
if checkpoint_in_cpu is not None:
CPU_CHECKPOINT = checkpoint_in_cpu
if synchronize is not None:
SYNCHRONIZE = synchronize
if profile is not None:
PROFILE_TIME = profile
if CONTIGUOUS_CHECKPOINTING:
assert PARTITION_ACTIVATIONS, "Contiguous Checkpointing is only available with partitioned activations. Set partitioned activations to true in deepspeed config"
if CONTIGUOUS_CHECKPOINTING:
assert num_layers is not None, "Must specify the number of layers with contiguous memory checkpointing"
def is_configured():
"""True if deepspeed activation checkpointing has been configured
by calling deepspeed.checkpointing.configure, else returns false
Arguments:
None
Return:
True of configured, else False
"""
return deepspeed_checkpointing_enabled
| 32,764 | 36.148526 | 168 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/fp16/fused_optimizer.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Copyright NVIDIA/apex
This file is adapted from FP16_Optimizer in NVIDIA/apex
"""
import torch
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from deepspeed.runtime import DeepSpeedOptimizer
from deepspeed.runtime.utils import get_global_norm, get_grad_norm, CheckOverflow, get_weight_norm
from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE
from deepspeed.utils import groups, logger, log_dist
from deepspeed import comm as dist
from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, CLIP_GRAD
from deepspeed.accelerator import get_accelerator
class FP16_Optimizer(DeepSpeedOptimizer):
"""
FP16 Optimizer for training fp16 models. Handles loss scaling.
For usage example please see, TODO: DeepSpeed V2 Tutorial
"""
def __init__(self,
init_optimizer,
deepspeed=None,
static_loss_scale=1.0,
dynamic_loss_scale=False,
initial_dynamic_scale=2**32,
dynamic_loss_args=None,
verbose=True,
mpu=None,
clip_grad=0.0,
fused_adam_legacy=False,
has_moe_layers=False,
timers=None):
self.fused_adam_legacy = fused_adam_legacy
self.timers = timers
self.deepspeed = deepspeed
self.has_moe_layers = has_moe_layers
self.using_pipeline = self.deepspeed.pipeline_parallelism
if not get_accelerator().is_available():
raise SystemError("Cannot use fp16 without accelerator.")
self.optimizer = init_optimizer
# param flattened by groups
self.fp16_groups = []
self.fp16_groups_flat = []
self.fp32_groups_flat = []
self._global_grad_norm = 0.
# loop to deal with groups
for i, param_group in enumerate(self.optimizer.param_groups):
# push this group to list before modify
self.fp16_groups.append(param_group['params'])
# init fp16 weight buffer, flattened
self.fp16_groups_flat.append(_flatten_dense_tensors([p.clone().detach() for p in self.fp16_groups[i]]))
# set model fp16 weight to slices of flattened buffer
updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i], self.fp16_groups[i])
for p, q in zip(self.fp16_groups[i], updated_params):
p.data = q.data
# init master weight, flattened
self.fp32_groups_flat.append(self.fp16_groups_flat[i].clone().float().detach())
# modify optimizer of have flat master weight
self.fp32_groups_flat[i].requires_grad = True # keep this in case internal optimizer uses it
param_group['params'] = [self.fp32_groups_flat[i]]
# we may have a way of fusing dynamic scale. Do not support for now
if dynamic_loss_scale:
self.dynamic_loss_scale = True
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = 2
if dynamic_loss_args is None:
self.cur_scale = initial_dynamic_scale
self.scale_window = 1000
self.min_loss_scale = 1
else:
self.cur_scale = dynamic_loss_args[INITIAL_LOSS_SCALE]
self.scale_window = dynamic_loss_args[SCALE_WINDOW]
self.min_loss_scale = dynamic_loss_args[MIN_LOSS_SCALE]
else:
self.dynamic_loss_scale = False
self.cur_iter = 0
self.cur_scale = static_loss_scale
self.verbose = verbose
self.custom_loss_scaler = False
self.external_loss_scale = None
self.clip_grad = clip_grad
self.norm_type = 2
self.step_count = 0
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 0 and TORCH_MINOR <= 4:
self.clip_grad_norm = torch.nn.utils.clip_grad_norm
else:
self.clip_grad_norm = torch.nn.utils.clip_grad_norm_
#model parallel object
self.mpu = mpu
self.overflow = False
self.overflow_checker = CheckOverflow(self.fp16_groups, mpu=self.mpu, deepspeed=deepspeed)
self.initialize_optimizer_states()
def initialize_optimizer_states(self):
for i, group in enumerate(self.fp16_groups):
self.fp32_groups_flat[i].grad = torch.zeros(self.fp32_groups_flat[i].size(),
device=self.fp32_groups_flat[i].device)
self.optimizer.step()
for i, group in enumerate(self.fp16_groups):
self.fp32_groups_flat[i].grad = None
return
def zero_grad(self, set_to_none=False):
"""
Zero FP16 parameter grads.
"""
# For speed, set model fp16 grad to None by default
for group in self.fp16_groups:
for p in group:
if set_to_none:
p.grad = None
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def step_fused_adam(self, closure=None):
"""
Not supporting closure.
"""
# First compute norm for all group so we know if there is overflow
grads_groups_flat = []
norm_groups = []
for i, group in enumerate(self.fp16_groups):
grads_groups_flat.append(
_flatten_dense_tensors([
torch.zeros(p.size(), dtype=p.dtype, device=p.device) if p.grad is None else p.grad for p in group
]))
norm_groups.append(get_weight_norm(grads_groups_flat[i], mpu=self.mpu))
self.overflow = self.overflow_checker.check_using_norm(norm_groups)
prev_scale = self.cur_scale
self._update_scale(self.overflow)
if self.overflow:
if self.verbose:
logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss "
"scale: {}, reducing to {}".format(prev_scale, self.cur_scale))
return self.overflow
scaled_grad_norm = get_global_norm(norm_list=norm_groups)
combined_scale = self.unscale_and_clip_grads(grads_groups_flat, scaled_grad_norm, apply_scale=False)
# Stash unscaled gradient norm
self._global_grad_norm = scaled_grad_norm / self.cur_scale
# norm is in fact norm*cur_scale
self.optimizer.step(grads=[[g] for g in grads_groups_flat],
output_params=[[p] for p in self.fp16_groups_flat],
scale=combined_scale,
grad_norms=norm_groups)
# TODO: we probably don't need this? just to be safe
for i in range(len(norm_groups)):
updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i], self.fp16_groups[i])
for p, q in zip(self.fp16_groups[i], updated_params):
p.data = q.data
return self.overflow
def start_timers(self, name_list):
if self.timers is not None:
for name in name_list:
self.timers(name).start()
def stop_timers(self, name_list):
if self.timers is not None:
for name in name_list:
self.timers(name).stop()
def log_timers(self, name_list):
if self.timers is not None:
self.timers.log(name_list)
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
def get_lr(self):
"""Return the current learning rate."""
return self.optimizer.param_groups[0]["lr"]
def override_loss_scale(self, loss_scale):
if loss_scale != self.external_loss_scale:
logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}')
self.custom_loss_scaler = True
self.external_loss_scale = loss_scale
def step(self, closure=None):
"""
Not supporting closure.
"""
if self.fused_adam_legacy:
return self.step_fused_adam()
COMPUTE_NORM = "compute_norm"
OVERFLOW_CHECK = 'overflow_check'
OVERFLOW_TIMERS = [COMPUTE_NORM, OVERFLOW_CHECK]
UNSCALE_AND_CLIP = 'unscale_and_clip'
BASIC_STEP = 'basic_step'
UPDATE_FP16 = 'update_fp16'
STEP_TIMERS = OVERFLOW_TIMERS + [UNSCALE_AND_CLIP, BASIC_STEP, UPDATE_FP16]
# First determine if there is overflow.
self.start_timers([OVERFLOW_CHECK])
fp16_params = []
for i, group in enumerate(self.fp16_groups):
fp16_params.extend([p for p in group if p.grad is not None])
self.overflow = self.overflow_checker.has_overflow(fp16_params)
self.stop_timers([OVERFLOW_CHECK])
prev_scale = self.cur_scale
self._update_scale(self.overflow)
if self.overflow:
if self.verbose:
log_dist(
"Overflow detected. Skipping step. Attempted loss "
f"scale: {prev_scale}, reducing to {self.cur_scale}",
ranks=[0])
# Clear gradients
for i, group in enumerate(self.fp16_groups):
for p in group:
p.grad = None
self.log_timers(OVERFLOW_TIMERS)
return self.overflow
grads_groups_flat = []
for i, group in enumerate(self.fp16_groups):
data_type = self.fp32_groups_flat[i].dtype
grads_groups_flat.append(
_flatten_dense_tensors([
torch.zeros(p.size(), dtype=data_type, device=p.device) if p.grad is None else p.grad.to(data_type)
for p in group
]))
for p in group:
p.grad = None
self.fp32_groups_flat[i].grad = grads_groups_flat[i]
self.start_timers([COMPUTE_NORM])
all_groups_norm = get_grad_norm(self.fp32_groups_flat, mpu=self.mpu)
self.stop_timers([COMPUTE_NORM])
if self.has_moe_layers:
all_groups_norm = self._get_norm_with_moe_layers(all_groups_norm)
scaled_global_grad_norm = get_global_norm(norm_list=[all_groups_norm])
# Stash unscaled gradient norm
self._global_grad_norm = scaled_global_grad_norm / self.cur_scale
self.start_timers([UNSCALE_AND_CLIP])
self.unscale_and_clip_grads(grads_groups_flat, scaled_global_grad_norm)
self.stop_timers([UNSCALE_AND_CLIP])
self.start_timers([BASIC_STEP])
self.optimizer.step()
self.stop_timers([BASIC_STEP])
#get rid of the fp32 gradients. Not needed anymore
for group in self.fp32_groups_flat:
group.grad = None
self.start_timers([UPDATE_FP16])
for i in range(len(self.fp16_groups)):
updated_params = _unflatten_dense_tensors(self.fp32_groups_flat[i], self.fp16_groups[i])
for p, q in zip(self.fp16_groups[i], updated_params):
p.data.copy_(q.data)
self.stop_timers([UPDATE_FP16])
self.log_timers(STEP_TIMERS)
self.step_count += 1
return self.overflow
def _get_norm_with_moe_layers(self, all_groups_norm):
#all_groups_norm_old = all_groups_norm
# Need to allreduce (avg) the norms across different ranks because moe params will not be synced during allreduce
if self.using_pipeline:
pg = self.deepspeed.mpu.get_data_parallel_group()
else:
pg = groups._get_data_parallel_group()
scaled_norm = all_groups_norm * 1.0 / float(dist.get_world_size(group=pg))
scaled_norm_tensor = torch.tensor(scaled_norm, device=self.fp32_groups_flat[0].device, dtype=torch.float)
dist.all_reduce(scaled_norm_tensor, group=pg)
all_groups_norm = scaled_norm_tensor.item()
#print(f"old = {all_groups_norm_old} and new = {all_groups_norm} at rank: {deepspeed.comm.get_rank()}")
return all_groups_norm
def unscale_and_clip_grads(self, grad_groups_flat, total_norm, apply_scale=True):
# compute combined scale factor for this group
combined_scale = self.cur_scale
if self.clip_grad > 0.:
# norm is in fact norm*scale
clip = ((total_norm / self.cur_scale) + 1e-6) / self.clip_grad
if clip > 1:
combined_scale = clip * self.cur_scale
if apply_scale:
for grad in grad_groups_flat:
grad.data.mul_(1. / combined_scale)
return combined_scale
def backward(self, loss, create_graph=False, retain_graph=False):
"""
:attr:`backward` performs the following steps:
1. fp32_loss = loss.float()
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
"""
if self.custom_loss_scaler:
scaled_loss = self.external_loss_scale * loss
scaled_loss.backward()
else:
scaled_loss = (loss.float()) * self.cur_scale
scaled_loss.backward(create_graph=create_graph, retain_graph=retain_graph)
def _update_scale(self, skip):
if self.dynamic_loss_scale:
prev_scale = self.cur_scale
if skip:
self.cur_scale = max(self.cur_scale / self.scale_factor, self.min_loss_scale)
self.last_overflow_iter = self.cur_iter
if self.verbose:
logger.info(f"\nGrad overflow on iteration {self.cur_iter}")
logger.info(f"Reducing dynamic loss scale from {prev_scale} to {self.cur_scale}")
else:
# Ensure self.scale_window updates since last overflow
stable_interval = (self.cur_iter - self.last_overflow_iter) - 1
if (stable_interval > 0) and (stable_interval % self.scale_window == 0):
self.cur_scale *= self.scale_factor
if self.verbose:
logger.info(f"No Grad overflow for {self.scale_window} iterations")
logger.info(f"Increasing dynamic loss scale from {prev_scale} to {self.cur_scale}")
else:
if skip:
logger.info("Grad overflow on iteration: %s", self.cur_iter)
logger.info("Using static loss scale of: %s", self.cur_scale)
self.cur_iter += 1
return
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['cur_scale'] = self.cur_scale
state_dict['cur_iter'] = self.cur_iter
if state_dict['dynamic_loss_scale']:
state_dict['last_overflow_iter'] = self.last_overflow_iter
state_dict['scale_factor'] = self.scale_factor
state_dict['scale_window'] = self.scale_window
state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict()
state_dict['fp32_groups_flat'] = self.fp32_groups_flat
state_dict[CLIP_GRAD] = self.clip_grad
return state_dict
# Refresh fp32 master params from fp16 copies
def refresh_fp32_params(self):
for current, saved in zip(self.fp32_groups_flat, self.fp16_groups_flat):
current.data.copy_(saved.data)
def load_state_dict(self, state_dict, load_optimizer_states=True):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.cur_scale = state_dict['cur_scale']
self.cur_iter = state_dict['cur_iter']
if state_dict['dynamic_loss_scale']:
self.last_overflow_iter = state_dict['last_overflow_iter']
self.scale_factor = state_dict['scale_factor']
self.scale_window = state_dict['scale_window']
if load_optimizer_states:
self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT])
self.clip_grad = state_dict[CLIP_GRAD]
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current, saved in zip(self.fp32_groups_flat, state_dict['fp32_groups_flat']):
current.data.copy_(saved.data)
def __repr__(self):
return repr(self.optimizer)
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
if self.custom_loss_scaler:
return self.external_loss_scale
else:
return self.cur_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
| 20,052 | 40.346392 | 126 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/fp16/loss_scaler.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Taken and modified for DeepSpeed from:
https://github.com/NVIDIA/Megatron-LM/blob/master/fp16/loss_scaler.py
Commit: 93ab4bea59dc5cbf97c079d313741866af4deac9
"""
import torch
from deepspeed import comm as dist
from deepspeed.utils import logger
INITIAL_LOSS_SCALE = 'init_scale'
SCALE_WINDOW = 'scale_window'
DELAYED_SHIFT = 'delayed_shift'
CONSECUTIVE_HYSTERESIS = 'consecutive_hysteresis'
MIN_LOSS_SCALE = 'min_scale'
# item() is a recent addition, so this helps with backward compatibility.
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
return t[0]
class LossScalerBase:
"""LossScalarBase
Base class for a loss scaler
"""
def __init__(self, cur_scale):
self.cur_scale = cur_scale
self.dynamic = False
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def update_scale(self, overflow):
pass
def backward(self, loss, retain_graph=False):
scaled_loss = loss * self.loss_scale
scaled_loss.backward(retain_graph=retain_graph)
# print(f'LossScalerBackward: {scaled_loss=}')
class LossScaler(LossScalerBase):
"""
Class that manages a static loss scale. This class is intended to interact with
:class:`FP16_Optimizer`, and should not be directly manipulated by the user.
Use of :class:`LossScaler` is enabled via the ``static_loss_scale`` argument to
:class:`FP16_Optimizer`'s constructor.
Args:
scale (float, optional, default=1.0): The loss scale.
"""
def __init__(self, scale=1):
super(LossScaler, self).__init__(scale)
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
return False
class DynamicLossScaler(LossScalerBase):
"""
Class that manages dynamic loss scaling. It is recommended to use :class:`DynamicLossScaler`
indirectly, by supplying ``dynamic_loss_scale=True`` to the constructor of
:class:`FP16_Optimizer`. However, it's important to understand how :class:`DynamicLossScaler`
operates, because the default options can be changed using the
the ``dynamic_loss_args`` argument to :class:`FP16_Optimizer`'s constructor.
Loss scaling is designed to combat the problem of underflowing gradients encountered at long
times when training fp16 networks. Dynamic loss scaling begins by attempting a very high loss
scale. Ironically, this may result in OVERflowing gradients. If overflowing gradients are
encountered, :class:`DynamicLossScaler` informs :class:`FP16_Optimizer` that an overflow has
occurred.
:class:`FP16_Optimizer` then skips the update step for this particular iteration/minibatch,
and :class:`DynamicLossScaler` adjusts the loss scale to a lower value.
If a certain number of iterations occur without overflowing gradients detected,
:class:`DynamicLossScaler` increases the loss scale once more.
In this way :class:`DynamicLossScaler` attempts to "ride the edge" of
always using the highest loss scale possible without incurring overflow.
Args:
init_scale (float, optional, default=2**32): Initial loss scale attempted by :class:`DynamicLossScaler.`
scale_factor (float, optional, default=2.0): Factor used when adjusting the loss scale. If an overflow is encountered, the loss scale is readjusted to loss scale/``scale_factor``. If ``scale_window`` consecutive iterations take place without an overflow, the loss scale is readjusted to loss_scale*``scale_factor``.
scale_window (int, optional, default=1000): Number of consecutive iterations without an overflow to wait before increasing the loss scale.
consecutive_hysteresis (bool, optional, default=False): Whether to refill hysteresis if we reach an iteration that doesn't overflow
"""
def __init__(self,
init_scale=2**32,
scale_factor=2.,
scale_window=1000,
min_scale=1,
delayed_shift=1,
consecutive_hysteresis=False,
raise_error_at_min_scale=True,
dtype=torch.half):
super(DynamicLossScaler, self).__init__(init_scale)
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
self.min_scale = min_scale
self.delayed_shift = delayed_shift
self.cur_hysteresis = delayed_shift
self.consecutive_hysteresis = consecutive_hysteresis
self.raise_error_at_min_scale = raise_error_at_min_scale
self.dynamic = True
self.dtype = dtype
# `params` is a list / generator of torch.Variable
def has_overflow_serial(self, params):
for p in params:
if p.grad is not None and self._has_inf_or_nan(p.grad.data):
return True
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum in [float('inf'), -float('inf')] or cpu_sum != cpu_sum:
return True
return False
# `overflow` is boolean indicating whether the gradient overflowed
def update_scale(self, overflow):
if overflow:
# self.cur_scale /= self.scale_factor
if self.delayed_shift == 1 or self.cur_hysteresis == 1:
if (self.cur_scale == self.min_scale) and self.raise_error_at_min_scale:
raise Exception(
"Current loss scale already at minimum - cannot decrease scale anymore. Exiting run.")
else:
next_scale = max(self.cur_scale / self.scale_factor, self.min_scale)
if dist.get_rank() == 0:
overflow_msg = f"[deepspeed] OVERFLOW! Rank {dist.get_rank()} Skipping step."
if self.dtype == torch.half:
overflow_msg += f" Attempted loss scale: {int(self.cur_scale)}, reducing to {int(next_scale)}"
logger.info(overflow_msg)
self.cur_scale = next_scale
else:
if dist.get_rank() == 0:
overflow_msg = f"[deepspeed] OVERFLOW! Rank {dist.get_rank()} Skipping step."
if self.dtype == torch.half:
overflow_msg += f" Attempted loss scale: {int(self.cur_scale)}, but hysteresis is {self.cur_hysteresis}. Reducing hysteresis to {self.cur_hysteresis-1}"
logger.info(overflow_msg)
self.cur_hysteresis -= 1
self.last_overflow_iter = self.cur_iter
else:
if self.consecutive_hysteresis:
if dist.get_rank() == 0:
hysteresis_msg = f"Consecutive hysteresis is enabled. Restoring hysteresis to {self.delayed_shift}"
logger.info(hysteresis_msg)
self.cur_hysteresis = self.delayed_shift
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
if not self.consecutive_hysteresis:
self.cur_hysteresis = self.delayed_shift
self.cur_scale *= self.scale_factor
self.cur_iter += 1
# Although loss scaling is only defined for fp16, yet for backwards compatibility
# we still create a scaler for other dtypes (fp32, bf16) which does not perform any scaling.
def CreateLossScaler(dtype, static_loss_scale, dynamic_scaling, dynamic_loss_args):
if dtype == torch.half and dynamic_scaling:
if dynamic_loss_args is None:
return DynamicLossScaler(dtype=dtype)
return DynamicLossScaler(dtype=dtype, **dynamic_loss_args)
loss_scale_value = static_loss_scale if dtype == torch.half else 1.0
return LossScaler(scale=loss_scale_value)
##############################################################
# Example usage below here -- assuming it's in a separate file
##############################################################
"""
TO-DO separate out into an example.
if __name__ == "__main__":
import torch
from torch.autograd import Variable
from dynamic_loss_scaler import DynamicLossScaler
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in), requires_grad=False)
y = Variable(torch.randn(N, D_out), requires_grad=False)
w1 = Variable(torch.randn(D_in, H), requires_grad=True)
w2 = Variable(torch.randn(H, D_out), requires_grad=True)
parameters = [w1, w2]
learning_rate = 1e-6
optimizer = torch.optim.SGD(parameters, lr=learning_rate)
loss_scaler = DynamicLossScaler()
for t in range(500):
y_pred = x.mm(w1).clamp(min=0).mm(w2)
loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale
print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale))
print('Iter {} scaled loss: {}'.format(t, loss.data[0]))
print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale))
# Run backprop
optimizer.zero_grad()
loss.backward()
# Check for overflow
has_overflow = DynamicLossScaler.has_overflow(parameters)
# If no overflow, unscale grad and update as usual
if not has_overflow:
for param in parameters:
param.grad.data.mul_(1. / loss_scaler.loss_scale)
optimizer.step()
# Otherwise, don't do anything -- ie, skip iteration
else:
print('fp16 dynamic loss scale overflow!')
# Update loss scale for next iteration
loss_scaler.update_scale(has_overflow)
"""
| 11,492 | 41.409594 | 325 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/fp16/unfused_optimizer.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Copyright NVIDIA/apex
This file is adapted from FP16_Optimizer in NVIDIA/apex
"""
from deepspeed.moe.utils import split_params_grads_into_shared_and_expert_params
import torch
from torch._utils import _flatten_dense_tensors
from deepspeed.runtime import DeepSpeedOptimizer
from deepspeed.runtime.utils import get_global_norm, CheckOverflow, get_weight_norm
from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE
from deepspeed.utils import logger
from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT
from deepspeed.accelerator import get_accelerator
from deepspeed import comm as dist
class FP16_UnfusedOptimizer(DeepSpeedOptimizer):
"""
FP16 Optimizer without weight fusion to support LAMB optimizer
For usage example please see, TODO: DeepSpeed V2 Tutorial
"""
def __init__(self,
init_optimizer,
deepspeed=None,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True,
mpu=None,
clip_grad=0.0,
fused_lamb_legacy=False):
self.fused_lamb_legacy = fused_lamb_legacy
self._global_grad_norm = 0.
if dist.get_rank() == 0:
logger.info(f'Fused Lamb Legacy : {self.fused_lamb_legacy} ')
if not get_accelerator().is_available():
raise SystemError("Cannot use fp16 without accelerator.")
self.optimizer = init_optimizer
# param groups
self.fp16_groups = []
self.fp32_groups = []
# loop to deal with groups
for i, param_group in enumerate(self.optimizer.param_groups):
#fp16 weights that represents the actual model weights
self.fp16_groups.append(param_group['params'])
#creating a fp32 copy of the weights that will be updated first then
#copied to fp16 weights
fp32_group = [p.clone().float().detach() for p in param_group['params']]
#in case the internal optimizer needs it
for p in fp32_group:
p.requires_grad = True
#setting the param groups in the optimizer to point to fp32
#note these are not the weights used by the model
#the model uses the fp16 version that we added to fp16_group
self.fp32_groups.append(fp32_group)
param_group['params'] = self.fp32_groups[i]
# we may have a way of fusing dynamic scale. Do not support for now
if dynamic_loss_scale:
self.dynamic_loss_scale = True
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = 2.0
if dynamic_loss_args is None:
self.cur_scale = 1.0 * 2**16
self.scale_window = 1000
self.min_loss_scale = 0.25
else:
self.cur_scale = dynamic_loss_args[INITIAL_LOSS_SCALE]
self.scale_window = dynamic_loss_args[SCALE_WINDOW]
self.min_loss_scale = dynamic_loss_args[MIN_LOSS_SCALE]
else:
self.dynamic_loss_scale = False
self.cur_iter = 0
self.cur_scale = static_loss_scale
self.custom_loss_scaler = False
self.external_loss_scale = None
self.verbose = verbose
self.clip_grad = clip_grad
self.norm_type = 2
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 0 and TORCH_MINOR <= 4:
self.clip_grad_norm = torch.nn.utils.clip_grad_norm
else:
self.clip_grad_norm = torch.nn.utils.clip_grad_norm_
self.mpu = mpu
self.overflow = False
self.overflow_checker = CheckOverflow(self.fp16_groups, mpu=self.mpu, deepspeed=deepspeed)
self.initialize_optimizer_states()
def zero_grad(self, set_to_none=False):
"""
Zero FP16 parameter grads.
"""
# FP32 grad should never exist outside of the step function
# For speed, set model fp16 grad to None by default
for group in self.fp16_groups:
for p in group:
if set_to_none:
p.grad = None
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def step_fused_lamb(self, closure=None):
"""
Not supporting closure.
"""
# First compute norm for all group so we know if there is overflow
grads_groups_flat = []
grads_groups = []
norm_groups = []
expert_norm_groups = []
for i, group in enumerate(self.fp16_groups):
grads = [
torch.zeros(p.size(), dtype=p.dtype, device=p.device) if p.grad is None else p.grad for p in group
]
grads_groups.append(grads)
grads_groups_flat.append(_flatten_dense_tensors(grads))
grads_for_norm, expert_grads_for_norm = split_params_grads_into_shared_and_expert_params(group)
norm_group_value = 0.0
if len(grads_for_norm) > 0:
norm_group_value = get_weight_norm(_flatten_dense_tensors(grads_for_norm), mpu=self.mpu)
norm_groups.append(norm_group_value)
expert_norm_group_value = 0.0
if len(expert_grads_for_norm) > 0:
expert_norm_group_value = get_weight_norm(_flatten_dense_tensors(expert_grads_for_norm), mpu=self.mpu)
expert_norm_groups.append(expert_norm_group_value)
self.overflow = self.overflow_checker.check_using_norm(norm_groups + expert_norm_groups)
prev_scale = self.cur_scale
self._update_scale(self.overflow)
if self.overflow:
if self.verbose:
logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss "
"scale: {}, reducing to {}".format(prev_scale, self.cur_scale))
return self.overflow
self._global_grad_norm = get_global_norm(norm_list=norm_groups)
combined_scale = self.unscale_and_clip_grads(self._global_grad_norm, apply_scale=False)
self.optimizer.step(grads=grads_groups, output_params=self.fp16_groups, scale=combined_scale)
for fp32_group, fp16_group in zip(self.fp32_groups, self.fp16_groups):
for idx, (fp32_param, fp16_param) in enumerate(zip(fp32_group, fp16_group)):
#remove the fp32 grad
fp32_param.grad = None
#copy data from fp32 to fp16
fp16_param.data.copy_(fp32_param.data)
return self.overflow
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
def get_lr(self):
"""Return the current learning rate."""
return self.optimizer.param_groups[0]["lr"]
def override_loss_scale(self, loss_scale):
if loss_scale != self.external_loss_scale:
logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}')
self.custom_loss_scaler = True
self.external_loss_scale = loss_scale
def step(self, closure=None):
"""
Not supporting closure.
"""
if self.fused_lamb_legacy:
return self.step_fused_lamb()
self.overflow = self.overflow_checker.check()
prev_scale = self.cur_scale
self._update_scale(self.overflow)
if self.overflow:
if self.verbose:
logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss "
"scale: {}, reducing to {}".format(prev_scale, self.cur_scale))
return self.overflow
norm_groups = []
for i, group in enumerate(self.fp16_groups):
grads_for_norm, _ = split_params_grads_into_shared_and_expert_params(group)
norm_group_value = 0.0
if len(grads_for_norm) > 0:
norm_group_value = get_weight_norm(grads_for_norm, mpu=self.mpu)
norm_groups.append(norm_group_value)
# copying gradients to fp32 to wor k with fp32 parameters
for fp32_param, fp16_param in zip(self.fp32_groups[i], self.fp16_groups[i]):
if fp16_param.grad is None:
fp32_param.grad = torch.zeros(fp16_param.size(), dtype=fp32_param.dtype, device=fp32_param.device)
else:
fp32_param.grad = fp16_param.grad.to(fp32_param.dtype)
self._global_grad_norm = get_global_norm(norm_list=norm_groups)
self.unscale_and_clip_grads(self._global_grad_norm)
self.optimizer.step()
for fp32_group, fp16_group in zip(self.fp32_groups, self.fp16_groups):
for idx, (fp32_param, fp16_param) in enumerate(zip(fp32_group, fp16_group)):
#remove the fp32 grad
fp32_param.grad = None
#copy data from fp32 to fp16
fp16_param.data.copy_(fp32_param.data)
return self.overflow
def unscale_and_clip_grads(self, total_norm, apply_scale=True):
# compute combined scale factor for this group
combined_scale = self.cur_scale
if self.clip_grad > 0.:
# norm is in fact norm*scale
clip = ((total_norm / self.cur_scale) + 1e-6) / self.clip_grad
if clip > 1:
combined_scale = clip * self.cur_scale
if apply_scale:
for group in self.fp32_groups:
for param in group:
if param.grad is not None:
param.grad.data.mul_(1. / combined_scale)
return combined_scale
def backward(self, loss, create_graph=False, retain_graph=False):
"""
:attr:`backward` performs the following steps:
1. fp32_loss = loss.float()
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
"""
if self.custom_loss_scaler:
scaled_loss = self.external_loss_scale * loss
scaled_loss.backward()
else:
scaled_loss = (loss.float()) * self.cur_scale
scaled_loss.backward(create_graph=create_graph, retain_graph=retain_graph)
def _update_scale(self, skip):
if self.dynamic_loss_scale:
prev_scale = self.cur_scale
if skip:
self.cur_scale = max(self.cur_scale / self.scale_factor, self.min_loss_scale)
self.last_overflow_iter = self.cur_iter
if self.verbose:
logger.info("Grad overflow on iteration: %s", self.cur_iter)
logger.info(f"Reducing dynamic loss scale from {prev_scale} to {self.cur_scale}")
else:
# Ensure self.scale_window updates since last overflow
stable_interval = (self.cur_iter - self.last_overflow_iter) - 1
if (stable_interval > 0) and (stable_interval % self.scale_window == 0):
self.cur_scale *= self.scale_factor
if self.verbose:
logger.info(f"No Grad overflow for {self.scale_window} iterations")
logger.info(f"Increasing dynamic loss scale from {prev_scale} to {self.cur_scale}")
else:
if skip:
logger.info("Grad overflow on iteration %s", self.cur_iter)
logger.info("Using static loss scale of %s", self.cur_scale)
self.cur_iter += 1
return
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
if self.custom_loss_scaler:
return self.external_loss_scale
else:
return self.cur_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['cur_scale'] = self.cur_scale
state_dict['cur_iter'] = self.cur_iter
if state_dict['dynamic_loss_scale']:
state_dict['last_overflow_iter'] = self.last_overflow_iter
state_dict['scale_factor'] = self.scale_factor
state_dict['scale_window'] = self.scale_window
state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict()
state_dict['fp32_groups'] = self.fp32_groups
return state_dict
# Refresh fp32 master params from fp16 copies
def refresh_fp32_params(self):
for current_group, saved_group in zip(self.fp32_groups, self.fp16_groups):
for current, saved in zip(current_group, saved_group):
current.data.copy_(saved.data)
def load_state_dict(self, state_dict, load_optimizer_states=True):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.cur_scale = state_dict['cur_scale']
self.cur_iter = state_dict['cur_iter']
if state_dict['dynamic_loss_scale']:
self.last_overflow_iter = state_dict['last_overflow_iter']
self.scale_factor = state_dict['scale_factor']
self.scale_window = state_dict['scale_window']
if load_optimizer_states:
self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current_group, saved_group in zip(self.fp32_groups, state_dict['fp32_groups']):
for current, saved in zip(current_group, saved_group):
current.data.copy_(saved.data)
def __repr__(self):
return repr(self.optimizer)
def initialize_optimizer_states(self):
for i, group in enumerate(self.fp16_groups):
for param in group:
param.grad = torch.zeros(param.size(),
dtype=param.dtype,
device=get_accelerator().current_device_name())
for i, group in enumerate(self.fp32_groups):
for param in group:
param.grad = torch.zeros(param.size(),
dtype=param.dtype,
device=get_accelerator().current_device_name())
self.optimizer.step()
for i, group in enumerate(self.fp16_groups):
for param in group:
param.grad = None
for i, group in enumerate(self.fp32_groups):
for param in group:
param.grad = None
| 18,076 | 41.039535 | 126 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/fp16/onebit/adam.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import types
import torch
import numpy as np
from deepspeed.accelerator import get_accelerator
from deepspeed import comm as dist
class OnebitAdam(torch.optim.Optimizer):
"""Implements the 1-bit Adam algorithm. Currently GPU-only.
For usage example please see https://www.deepspeed.ai/tutorials/onebit-adam/
For technical details please read https://arxiv.org/abs/2102.02888
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
freeze_step (int, optional): Number of steps for warmup (uncompressed)
stage before we start using compressed communication. (default 100000)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in 1-bit Adam!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
cuda_aware (boolean, required): Set True if the underlying MPI implementation
supports CUDA-Aware communication. (default: False)
comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl')
.. _Adam\\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self,
params,
deepspeed=None,
lr=1e-3,
freeze_step=100000,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.,
max_grad_norm=0.,
amsgrad=False,
cuda_aware=False,
comm_backend_name='nccl'):
if amsgrad:
raise RuntimeError('1-bit Adam does not support the AMSGrad variant.')
defaults = dict(lr=lr,
bias_correction=bias_correction,
betas=betas,
eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(OnebitAdam, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
assert (dist.is_initialized())
self.comm_time = 0.0
self.step_time = 0.0
self.ave_step = 1
self.bk_time = 0.0
self.deepspeed = deepspeed
self.adam_freeze_key = False
self.initialize = False
self.freeze_step = freeze_step
self.cuda_aware = cuda_aware
self.using_pipeline = False
self.comm_backend_name = comm_backend_name
# Empty initializer. Set handle based on the comm backend as follows.
self.comm_backend_handle = None
if self.comm_backend_name == 'nccl':
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
assert (
(TORCH_MAJOR == 1 and TORCH_MINOR >= 8) or TORCH_MAJOR >= 2
), "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend"
assert dist.is_initialized() == True, "Please initialize the torch distributed backend."
from deepspeed.runtime.comm.nccl import NcclBackend
self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
self.comm_backend_handle = NcclBackend(self.deepspeed.mpu)
elif self.comm_backend_name == 'mpi':
from deepspeed.runtime.comm.mpi import MpiBackend
self.comm_backend_handle = MpiBackend(cuda_aware)
self.size = self.comm_backend_handle.size
self.divider = int(self.size * 8 / np.gcd(self.size, 8))
def step(self, closure=None, grads=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
gather_time = 0
allgather_time = 0
all_time = 0
if self.adam_freeze_key is False:
v_diff_buffer = 0.0
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
for group, grads_this_group in zip(self.param_groups, grads_group):
if grads_this_group is None:
grads_this_group = [None] * len(group['params'])
bias_correction = 1 if group['bias_correction'] else 0
for p, grad in zip(group['params'], grads_this_group):
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('1-bit Adam does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if not self.initialize or (self.adam_freeze_key and 'worker_error' not in state.keys()):
state['tensor_size'] = torch.numel(p.data)
state['corrected_tensor_size'] = state['tensor_size']
if state['tensor_size'] % (self.size * self.divider) != 0:
state['corrected_tensor_size'] += ((self.size * self.divider) - (state['tensor_size'] %
(self.size * self.divider)))
state['server_chunk_size'] = state['corrected_tensor_size'] // self.size
get_accelerator().empty_cache()
state['worker_error'] = torch.zeros(state['corrected_tensor_size'], device=p.device)
state['server_error'] = torch.zeros(state['server_chunk_size'], device=p.device)
get_accelerator().empty_cache()
self.adam_freeze_key = True
if not self.initialize and dist.get_rank() == 0:
print("Cupy Buffers Initialized Successfully.")
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if self.adam_freeze_key is False:
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
grad = None
if self.initialize:
update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
else:
if 'non_freeze' in group.keys() and group['non_freeze'] is True:
dist.all_reduce(grad)
grad.mul_(1 / dist.get_world_size())
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
grad = None
else:
if self.initialize is True:
exp_avg.mul_(beta1).add_(1 - beta1, grad)
grad = None
if self.size > 1:
exp_avg.set_(
self.comm_backend_handle.compressed_allreduce(exp_avg, state['worker_error'],
state['server_error'],
self.deepspeed.local_rank))
# Because 1-bit compression cannot represent exact zero, it is required to
# provide a momentum mask for those params that have constant exact zeros in their
# momentums, otherwise the compression error would keep accumulating.
# For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight
# always have exact zeros in its momentum for row 129 to 512, because it only
# learns up to seq length 128 while the model supports up to 512 seq length.
# (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
if 'exp_avg_mask' in group:
if exp_avg.device != group['exp_avg_mask'].device:
group['exp_avg_mask'] = group['exp_avg_mask'].to(device=exp_avg.device)
exp_avg.mul_(group['exp_avg_mask'])
if self.initialize:
update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
if self.initialize:
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
with torch.no_grad():
p.add_(-group['lr'] * update)
if not self.initialize:
print('Pop out errors', flush=True)
state.pop('worker_error')
state.pop('server_error')
if not self.initialize:
self.adam_freeze_key = False
self.initialize = True
print(f"Finished the initialization step at rank {dist.get_rank()}")
return loss
if self.adam_freeze_key is False:
if state['step'] >= self.freeze_step:
print('OnebitAdam - starting compressed communication')
self.adam_freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
return loss
def load_state_dict(self, state_dict):
"""
Overrides load_state_dict() to add special handling when loading checkpoints
"""
# Because at different stage exp_avg_mask may change (e.g.,
# BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask
# in checkpoints but always use the one user provided in training script.
# (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
# Thus here we keep the exp_avg_mask unchanged when loading checkpoint
for i, group in enumerate(self.param_groups):
if 'exp_avg_mask' in group:
state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask']
elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]:
state_dict['param_groups'][i].pop('exp_avg_mask')
super().load_state_dict(state_dict)
if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step:
if dist.get_rank() == 0:
print("Checkpoint loaded and OnebitAdam warmup stage starts/continues.")
if self.adam_freeze_key is True:
self.adam_freeze_key = False
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = True
else:
self.deepspeed.enable_backward_allreduce = True
else:
if dist.get_rank() == 0:
print("Checkpoint loaded and OnebitAdam compression stage starts/continues.")
if self.adam_freeze_key is False:
self.adam_freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
# We reset the compression errors when loading checkpoints for 3 reasons:
# 1) The worker and server error at each GPU are distinct, so in current implementation
# only rank 0's errors are saved in the checkpoint. Thus we have to reset the errors.
# If we want to save them correctly we need O(num_gpu*model_size) memory in order to
# gather all the error, which is a very large memory requirement. It's possible to save
# them in a distributed way, but it will make the checkpoint saving/loading much more complicated.
# 2) Even if we are able to save the compression errors correctly, you need to have the
# exact same number of GPUs in order to load them correctly.
# 3) We verified on BERT pre-training that occasionally resetting the compression error
# at checkpoint loading does not affect the convergence.
# However, please avoid frequent checkpoint loading which could break the error
# compensation mechanism thus affect the convergence.
for group in self.param_groups:
for p in group['params']:
if 'worker_error' in self.state[p]:
self.state[p].pop('worker_error')
if 'server_error' in self.state[p]:
self.state[p].pop('server_error')
| 15,258 | 48.381877 | 196 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/fp16/onebit/zoadam.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import types
import torch
import numpy as np
from deepspeed.accelerator import get_accelerator
from deepspeed import comm as dist
class ZeroOneAdam(torch.optim.Optimizer):
"""Implements the 0/1 Adam algorithm. Currently GPU-only.
For usage example please see https://www.deepspeed.ai/tutorials/zero-one-adam/
For technical details please read https://arxiv.org/abs/2202.06009
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
var_freeze_step (int, optional): The latest step to update the variance,
using the notation from https://arxiv.org/abs/2202.06009, it denotes the
max{i|i in T_v}. Note that this is different from the freeze step from the
1-bit Adam. The var_freeze_step is usually the end of the learning rate warmup
and thus does not require tuning. (default: 100000)
var_update_scaler (int, optional): The interval to update the variance. Note that
the update policy for variance follows an exponential rule, where var_update_scaler
denotes the kappa in the 0/1 Adam paper. (default: 16)
local_step_scaler (int, optional): The interval to scale the local steps interval
according to the learning rate policy. (default: 32678)
local_step_clipper (int, optional): The largest interval for local steps with
learning rate policy. This corresponds to the variable H in the 0/1 Adam paper.
(default: 16)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in 0/1 Adam!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
cuda_aware (boolean, required): Set True if the underlying MPI implementation
supports CUDA-Aware communication. (default: False)
comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl')
.. _Adam\\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self,
params,
deepspeed=None,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.,
max_grad_norm=0.,
var_freeze_step=100000,
var_update_scaler=16,
local_step_scaler=32678,
local_step_clipper=16,
amsgrad=False,
cuda_aware=False,
comm_backend_name='nccl'):
if amsgrad:
raise RuntimeError('0/1 Adam does not support the AMSGrad variant.')
defaults = dict(lr=lr,
bias_correction=bias_correction,
betas=betas,
eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(ZeroOneAdam, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
assert (dist.is_initialized())
self.deepspeed = deepspeed
self.initialize = False
self.cuda_aware = cuda_aware
self.using_pipeline = False
self.var_freeze_step = var_freeze_step
self.var_update_scaler = var_update_scaler
self.local_step_scaler = local_step_scaler
self.local_step_clipper = local_step_clipper
self.freeze_key = False
self.reinitial_error_buffer = False
self.comm_backend_name = comm_backend_name
# Empty initializer. Set handle based on the comm backend as follows.
self.comm_backend_handle = None
if self.comm_backend_name == 'nccl':
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
assert (
(TORCH_MAJOR == 1 and TORCH_MINOR >= 8) or TORCH_MAJOR >= 2
), "Please use torch 1.8 or greater to enable NCCL backend in 0/1 Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend"
assert dist.is_initialized() == True, "Please initialize the torch distributed backend."
from deepspeed.runtime.comm.nccl import NcclBackend
self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
self.comm_backend_handle = NcclBackend(self.deepspeed.mpu)
elif self.comm_backend_name == 'mpi':
from deepspeed.runtime.comm.mpi import MpiBackend
self.comm_backend_handle = MpiBackend(cuda_aware)
self.size = self.comm_backend_handle.size
self.divider = int(self.size * 8 / np.gcd(self.size, 8))
def step(self, closure=None, grads=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
for group, grads_this_group in zip(self.param_groups, grads_group):
if grads_this_group is None:
grads_this_group = [None] * len(group['params'])
bias_correction = 1 if group['bias_correction'] else 0
for p, grad in zip(group['params'], grads_this_group):
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('0/1 Adam does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if not self.initialize or 'worker_error' not in state.keys():
# Some scalars to help scale the variance update/local step policies
state['var_interval'] = 1
state['var_counter'] = 0
state['local_step_interval'] = 1
state['local_step_counter'] = 0
state['lrs'] = 0
state['tensor_size'] = torch.numel(p.data)
state['corrected_tensor_size'] = state['tensor_size']
if state['tensor_size'] % (self.size * self.divider) != 0:
state['corrected_tensor_size'] += ((self.size * self.divider) - (state['tensor_size'] %
(self.size * self.divider)))
state['server_chunk_size'] = state['corrected_tensor_size'] // self.size
get_accelerator().empty_cache()
state['worker_error'] = torch.zeros(state['corrected_tensor_size'], device=p.device)
state['server_error'] = torch.zeros(state['server_chunk_size'], device=p.device)
# Accumulation of momentum, i.e., the u variable in the 0/1 Adam paper
state['momentum_accumulator'] = torch.zeros_like(p.data)
get_accelerator().empty_cache()
# self.freeze_key = True
if not self.initialize and dist.get_rank() == 0:
print("Cupy Buffers Initialized Successfully.")
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
comm_buffer = state['momentum_accumulator']
beta1, beta2 = group['betas']
state['step'] += 1
if self.initialize:
if self.freeze_key is False:
if state['step'] % state['var_interval'] == 0:
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
else:
if self.size > 1:
with torch.no_grad():
grad_onebit = self.comm_backend_handle.compressed_allreduce(
grad, state['worker_error'], state['server_error'], self.deepspeed.local_rank)
if 'exp_avg_mask' in group:
if grad_onebit.device != group['exp_avg_mask'].device:
group['exp_avg_mask'] = group['exp_avg_mask'].to(device=grad_onebit.device)
grad_onebit.mul_(group['exp_avg_mask'])
exp_avg.mul_(beta1).add_(1 - beta1, grad_onebit)
else:
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
state['lrs'] += group['lr']
grad = None
if not self.initialize:
if self.size > 1:
comm_buffer.set_(
self.comm_backend_handle.compressed_allreduce(comm_buffer, state['worker_error'],
state['server_error'],
self.deepspeed.local_rank))
if 'exp_avg_mask' in group:
if comm_buffer.device != group['exp_avg_mask'].device:
group['exp_avg_mask'] = group['exp_avg_mask'].to(device=comm_buffer.device)
comm_buffer.mul_(group['exp_avg_mask'])
if self.initialize:
update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
with torch.no_grad():
p.data.add_(-group['lr'] * update)
if self.freeze_key is True:
comm_buffer.add_(-group['lr'] * update)
if state['step'] % state['local_step_interval'] == 0 and self.freeze_key:
with torch.no_grad():
p.data.add_(-1 * comm_buffer)
comm_buffer.mul_(exp_avg_sq.sqrt() + group['eps'])
if self.size > 1:
comm_buffer.copy_(
self.comm_backend_handle.compressed_allreduce(comm_buffer, state['worker_error'],
state['server_error'],
self.deepspeed.local_rank))
if 'exp_avg_mask' in group:
if comm_buffer.device != group['exp_avg_mask'].device:
group['exp_avg_mask'] = group['exp_avg_mask'].to(device=comm_buffer.device)
comm_buffer.mul_(group['exp_avg_mask'])
exp_avg.zero_().add_(comm_buffer / state['lrs'], alpha=-1)
p.data.add_(comm_buffer / (exp_avg_sq.sqrt() + group['eps']))
comm_buffer.zero_()
state['lrs'] = 0
# According to 0/1 Adam theory, a fixed variance would allow more accurate estimation of momentum
# However, in practice, we can also disable the manual freezing of variance, since the interval of
# updating variance will increase exponentially, so that it has negligible effect on the estimation.
if self.freeze_key is False:
if state['step'] % state['var_interval'] == 0:
state['var_counter'] += 1
if state['var_counter'] == self.var_update_scaler:
state['var_counter'] = 0
state['var_interval'] *= 2
if (state['step'] + 1) % state['var_interval'] == 0:
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = True
else:
self.deepspeed.enable_backward_allreduce = True
else:
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
else:
state['local_step_counter'] += 1
if state['local_step_counter'] == self.local_step_scaler:
state['local_step_counter'] = 0
state['local_step_interval'] = min(self.local_step_clipper,
state['local_step_interval'] * 2)
if not self.initialize:
print('Pop out errors', flush=True)
self.freeze_key = False
state.pop('worker_error')
state.pop('server_error')
if not self.initialize:
self.initialize = True
print(f"Finished the initialization step at rank {dist.get_rank()}")
return loss
if self.state[self.param_groups[0]['params'][0]]['step'] > self.var_freeze_step:
self.freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
if self.freeze_key is True and self.reinitial_error_buffer is False:
# We need to reinitialize the error buffers when local step > 1 since
# the errors will be logged for different metrics (gradient vs. accumulated momentum).
for group in self.param_groups:
for p in group['params']:
self.state[p]['worker_error'].zero_()
self.state[p]['server_error'].zero_()
self.reinitial_error_buffer = True
return loss
def load_state_dict(self, state_dict):
"""
Overrides load_state_dict() to add special handling when loading checkpoints
"""
# Because at different stage exp_avg_mask may change (e.g.,
# BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask
# in checkpoints but always use the one user provided in training script.
# (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
# Thus here we keep the exp_avg_mask unchanged when loading checkpoint
for i, group in enumerate(self.param_groups):
if 'exp_avg_mask' in group:
state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask']
elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]:
state_dict['param_groups'][i].pop('exp_avg_mask')
super().load_state_dict(state_dict)
if self.state[self.param_groups[0]['params'][0]]['step'] < self.var_freeze_step:
self.var_freeze_key = False
if (self.state[self.param_groups[0]['params'][0]]['step'] +
1) % self.state[self.param_groups[0]['params'][0]]['var_interval'] == 0:
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = True
else:
self.deepspeed.enable_backward_allreduce = True
else:
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
else:
self.var_freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
self.reinitial_error_buffer = False
for group in self.param_groups:
for p in group['params']:
if 'worker_error' in self.state[p]:
self.state[p].pop('worker_error')
if 'server_error' in self.state[p]:
self.state[p].pop('server_error')
if 'momentum_accumulator' in self.state[p]:
self.state[p].pop('momentum_accumulator')
| 19,112 | 51.798343 | 194 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/fp16/onebit/lamb.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import types
import torch
import numpy as np
from deepspeed import comm as dist
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from deepspeed.accelerator import get_accelerator
class OnebitLamb(torch.optim.Optimizer):
"""Implements the 1-bit Lamb algorithm. Currently GPU-only.
For usage example please see https://www.deepspeed.ai/tutorials/onebit-lamb/
For technical details please see our paper https://arxiv.org/abs/2104.06069.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
freeze_step (int, optional): Number of steps for warmup (uncompressed)
stage before we start using compressed communication. (default 100000)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
max_coeff(float, optional): maximum value of the lamb coefficient (default: 10.0)
min_coeff(float, optional): minimum value of the lamb coefficient (default: 0.01)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in 1-bit Lamb!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
cuda_aware (boolean, required): Set True if the underlying MPI implementation
supports CUDA-Aware communication. (default: False)
comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl')
coeff_beta (float, optional): coefficient used for computing
running averages of lamb coefficient (default: 0.9) note that you may want to
increase or decrease this beta depending on the freeze_step you choose, as
1/(1 - coeff_beta) should be smaller than or equal to freeze_step
factor_max (float, optional): maximum value of scaling factor to the frozen lamb
coefficient during compression stage (default: 4.0)
factor_min (float, optional): minimum value of scaling factor to the frozen lamb
coefficient during compression stage (default: 0.5)
factor_threshold (float, optional): threshold of how much the scaling factor can
fluctuate between steps (default: 0.1)
.. _Large Batch Optimization for Deep Learning\\: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
.. _Adam\\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self,
params,
deepspeed=None,
lr=1e-3,
freeze_step=100000,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.,
max_grad_norm=0.,
max_coeff=10.0,
min_coeff=0.01,
amsgrad=False,
cuda_aware=False,
comm_backend_name='nccl',
coeff_beta=0.9,
factor_max=4.0,
factor_min=0.5,
factor_threshold=0.1):
if amsgrad:
raise RuntimeError('1-bit Lamb does not support the AMSGrad variant.')
defaults = dict(lr=lr,
bias_correction=bias_correction,
betas=betas,
eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm,
max_coeff=max_coeff,
min_coeff=min_coeff)
super(OnebitLamb, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
assert (dist.is_initialized())
self.deepspeed = deepspeed
self.lamb_freeze_key = False
self.initialize = False
self.freeze_step = freeze_step
self.cuda_aware = cuda_aware
self.coeff_beta = coeff_beta
self.factor_max = factor_max
self.factor_min = factor_min
self.factor_threshold = factor_threshold
self.using_pipeline = False
self.comm_backend_name = comm_backend_name
# Empty initializer. Set handle based on the comm backend as follows.
self.comm_backend_handle = None
if self.comm_backend_name == 'nccl':
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
assert (
(TORCH_MAJOR == 1 and TORCH_MINOR >= 8) or TORCH_MAJOR >= 2
), "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend"
assert dist.is_initialized() == True, "Please initialize the torch distributed backend."
from deepspeed.runtime.comm.nccl import NcclBackend
self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
self.comm_backend_handle = NcclBackend(self.deepspeed.mpu)
elif self.comm_backend_name == 'mpi':
from deepspeed.runtime.comm.mpi import MpiBackend
self.comm_backend_handle = MpiBackend(cuda_aware)
self.size = self.comm_backend_handle.size
self.divider = int(self.size * 8 / np.gcd(self.size, 8))
self.exp_avg_flat = []
self.dummy_exp_avg = {}
self.corrected_tensor_sizes = []
self.server_chunk_sizes = []
self.worker_errors = []
self.server_errors = []
self.lamb_coeffs = []
def step(self, closure=None, grads=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
#remove the previous stats
del self.lamb_coeffs[:]
if self.lamb_freeze_key:
exp_avg_last_step = []
for group in self.param_groups:
exp_avg_last_step.append([self.state[p]['exp_avg'].detach().clone() for p in group['params']])
if 'scaling_coeff' not in self.state[self.param_groups[0]['params'][0]]:
# Compute the scaling_coeff for each momentum at the end of warmup stage.
# This is used to reduce compression error during compression stage.
momentum_scales = []
for group in self.param_groups:
momentum_scales.append([
(torch.norm(self.state[p]['exp_avg']) / np.sqrt(torch.numel(self.state[p]['exp_avg']))).item()
for p in group['params']
])
united_scale = sum([sum(x) for x in momentum_scales]) / sum([len(x) for x in momentum_scales])
for i, group in enumerate(self.param_groups):
for j, p in enumerate(group['params']):
self.state[p]['scaling_coeff'] = united_scale / momentum_scales[i][j]
for group, grads_this_group in zip(self.param_groups, grads_group):
if grads_this_group is None:
grads_this_group = [None] * len(group['params'])
bias_correction = 1 if group['bias_correction'] else 0
for p, grad in zip(group['params'], grads_this_group):
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('1-bit Lamb does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0 or (len(state) == 1 and 'scaling_coeff' in state.keys()):
state['step'] = 0
state['lamb_coeff_freeze'] = 0.0
state['last_factor'] = 1.0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
state['exp_avg_sq_fresh'] = torch.zeros_like(p.data)
if not self.initialize:
self.lamb_freeze_key = True
exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state[
'exp_avg_sq_fresh']
beta1, beta2 = group['betas']
max_coeff = group['max_coeff']
min_coeff = group['min_coeff']
state['step'] += 1
if self.lamb_freeze_key is False:
# warmup stage, baseline Lamb optimization
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if state['step'] == self.freeze_step:
exp_avg_sq_fresh.data = exp_avg_sq.detach().clone()
grad = None
if self.initialize:
weight_norm = p.data.pow(2).sum().sqrt()
update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
update_norm = update.pow(2).sum().sqrt()
lamb_coeff = 1.0
if weight_norm != 0 and update_norm != 0:
lamb_coeff = (weight_norm / update_norm).item()
if lamb_coeff > max_coeff:
lamb_coeff = max_coeff
if lamb_coeff < min_coeff:
lamb_coeff = min_coeff
if lamb_coeff != 1.0:
state['lamb_coeff_freeze'] = self.coeff_beta * state['lamb_coeff_freeze'] + (
1 - self.coeff_beta) * lamb_coeff
self.lamb_coeffs.append(lamb_coeff)
with torch.no_grad():
p.add_(-group['lr'] * lamb_coeff * update)
else:
# compression stage, update each momentum locally, then
# communicate based on the compressed_allreduce below
if self.initialize:
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg.mul_(self.state[p]['scaling_coeff'])
grad = None
# init fused momentum
if len(self.exp_avg_flat) == 0:
momentum_groups = []
tensor_size = 0
for group in self.param_groups:
for p in group['params']:
momentum_groups.append(self.state[p]['exp_avg'])
tensor_size += torch.numel(p.data)
corrected_tensor_size = tensor_size
if tensor_size % (self.size * self.divider) != 0:
difference = ((self.size * self.divider) - (tensor_size % (self.size * self.divider)))
corrected_tensor_size += difference
self.dummy_exp_avg[0] = torch.zeros(difference, device=momentum_groups[0].data.device)
momentum_groups.append(self.dummy_exp_avg[0])
self.corrected_tensor_sizes.append(corrected_tensor_size)
self.server_chunk_sizes.append(corrected_tensor_size // self.size)
self.exp_avg_flat.append(_flatten_dense_tensors([p.detach().clone() for p in momentum_groups]))
updated_params = _unflatten_dense_tensors(self.exp_avg_flat[0], momentum_groups)
for p, q in zip(momentum_groups, updated_params):
p.data = q.data
if self.initialize and len(self.worker_errors) == 0:
get_accelerator().empty_cache()
for i in range(len(self.exp_avg_flat)):
self.worker_errors.append(
torch.zeros(self.corrected_tensor_sizes[i], device=self.exp_avg_flat[i].device))
self.server_errors.append(torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device))
get_accelerator().empty_cache()
if self.lamb_freeze_key:
if self.size > 1:
for i in range(len(self.exp_avg_flat)):
if not self.initialize:
get_accelerator().empty_cache()
self.worker_errors.append(
torch.zeros(self.corrected_tensor_sizes[i], device=self.exp_avg_flat[i].device))
self.server_errors.append(
torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device))
get_accelerator().empty_cache()
if dist.get_rank() == 0:
print("Cupy Buffers Initialized Successfully.")
self.comm_backend_handle.compressed_allreduce(self.exp_avg_flat[i], self.worker_errors[0],
self.server_errors[0], self.deepspeed.local_rank)
if dist.get_rank() == 0:
print('Pop out errors', flush=True)
del self.worker_errors[:]
del self.server_errors[:]
else:
self.comm_backend_handle.compressed_allreduce(self.exp_avg_flat[i], self.worker_errors[i],
self.server_errors[i], self.deepspeed.local_rank)
if self.lamb_freeze_key and self.initialize:
for i, group in enumerate(self.param_groups):
bias_correction = 1 if group['bias_correction'] else 0
for j, p in enumerate(group['params']):
state = self.state[p]
exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state[
'exp_avg_sq_fresh']
beta1, beta2 = group['betas']
exp_avg.div_(self.state[p]['scaling_coeff'])
# Because 1-bit compression cannot represent exact zero, it is required to
# provide a momentum mask for those params that have constant exact zeros in their
# momentums, otherwise the compression error would keep accumulating.
# For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight
# always have exact zeros in its momentum for row 129 to 512, because it only
# learns up to seq length 128 while the model supports up to 512 seq length.
# (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py about how
# to add this exp_avg_mask for BERT pre-training.)
if 'exp_avg_mask' in group:
if exp_avg.device != group['exp_avg_mask'].device:
group['exp_avg_mask'] = group['exp_avg_mask'].to(device=exp_avg.device)
exp_avg.mul_(group['exp_avg_mask'])
grad_reconstruct = ((exp_avg - exp_avg_last_step[i][j] * beta1) / (1 - beta1))
exp_avg_sq_fresh.mul_(beta2).addcmul_(1 - beta2, grad_reconstruct, grad_reconstruct)
denom = exp_avg_sq.sqrt() + group['eps']
update_prelim = exp_avg / denom
if group['weight_decay'] > 0.0:
update = update_prelim + group['weight_decay'] * p.data
else:
update = update_prelim
lamb_coeff = 1.0
update_norm = update.pow(2).sum().sqrt()
denom_real = exp_avg_sq_fresh.sqrt() + group['eps']
factor = (denom / denom_real).max().item()
if group['weight_decay'] > 0.0:
update_ratio = min(1.0, (update_prelim.pow(2).sum().sqrt() / update_norm).item())
factor = factor * update_ratio + (1.0 - update_ratio)
if factor > self.factor_max:
factor = self.factor_max
if factor < self.factor_min:
factor = self.factor_min
if factor > state['last_factor'] * (1.0 + self.factor_threshold):
factor = state['last_factor'] * (1.0 + self.factor_threshold)
if factor < state['last_factor'] * (1.0 - self.factor_threshold):
factor = state['last_factor'] * (1.0 - self.factor_threshold)
state['last_factor'] = factor
lamb_coeff = state['lamb_coeff_freeze'] * factor
self.lamb_coeffs.append(lamb_coeff)
with torch.no_grad():
p.add_(-group['lr'] * lamb_coeff * update)
del exp_avg_last_step[:]
exp_avg_last_step = None
if not self.initialize:
self.lamb_freeze_key = False
self.initialize = True
print(f"Finished the initialization step at rank {dist.get_rank()}")
return loss
if self.lamb_freeze_key is False:
if state['step'] >= self.freeze_step:
print('OnebitLamb - starting compressed communication')
self.lamb_freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
return loss
def load_state_dict(self, state_dict):
"""
Overrides load_state_dict() to add special handling when loading checkpoints
"""
# Because at different stage exp_avg_mask may change (e.g.,
# BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask
# in checkpoints but always use the one user provided in training script.
# (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
# Thus here we keep the exp_avg_mask unchanged when loading checkpoint
for i, group in enumerate(self.param_groups):
if 'exp_avg_mask' in group:
state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask']
elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]:
state_dict['param_groups'][i].pop('exp_avg_mask')
super().load_state_dict(state_dict)
# need to reset the fused momentum since loading states will break the linking
del self.exp_avg_flat[:]
self.dummy_exp_avg.clear()
del self.corrected_tensor_sizes[:]
del self.server_chunk_sizes[:]
if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step:
if dist.get_rank() == 0:
print("Checkpoint loaded and OnebitLamb warmup stage starts/continues.")
if self.lamb_freeze_key is True:
self.lamb_freeze_key = False
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = True
else:
self.deepspeed.enable_backward_allreduce = True
for group in self.param_groups:
for p in group['params']:
self.state[p]['lamb_coeff_freeze'] = 0.0
self.state[p]['last_factor'] = 1.0
if 'scaling_coeff' in self.state[p]:
self.state[p].pop('scaling_coeff')
else:
if dist.get_rank() == 0:
print("Checkpoint loaded and OnebitLamb compression stage starts/continues.")
if self.lamb_freeze_key is False:
self.lamb_freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
# We reset the compression errors when loading checkpoints for 3 reasons:
# 1) The worker and server error at each GPU are distinct, so in current implementation
# only rank 0's errors are saved in the checkpoint. Thus we have to reset the errors.
# If we want to save them correctly we need O(num_gpu*model_size) memory in order to
# gather all the error, which is a very large memory requirement. It's possible to save
# them in a distributed way, but it will make the checkpoint saving/loading much more complicated.
# 2) Even if we are able to save the compression errors correctly, you need to have the
# exact same number of GPUs in order to load them correctly.
# 3) We verified on BERT pre-training that occasionally resetting the compression error
# at checkpoint loading does not affect the convergence.
# However, please avoid frequent checkpoint loading which could break the error
# compensation mechanism thus affect the convergence.
del self.worker_errors[:]
del self.server_errors[:]
def get_lamb_coeffs(self):
return self.lamb_coeffs
| 23,085 | 50.762332 | 196 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/swap_tensor/optimizer_utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping tensors to/from (NVMe) storage devices.
"""
import os
import torch
from deepspeed import comm as dist
from deepspeed.utils.logging import logger
from deepspeed.runtime.swap_tensor.constants import *
from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, \
MIN_AIO_BYTES, AIO_ALIGNED_BYTES, get_sized_buffers
from deepspeed.runtime.swap_tensor.utils import SwapBufferManager, SwapBufferPool
class FlattenedTensorSwapInfo(object):
def __init__(self, path, length, offset):
self.path = path
self.offset = offset
self.length = length
class OptimizerStateSwapInfo(object):
def __init__(self, parameter, numel, base_folder):
self.tensors = []
self.param_id = id(parameter)
self.swap_folder = base_folder
self.swap_paths = []
self.swapped_gradients = {}
self.unswapped_gradients = {}
self.tensor_numel = numel
self.tensor_dtype = parameter.dtype
self.tensor_device = parameter.device
self.has_state_tensors = False
self._add_tensors([parameter])
def numel(self):
return self.tensor_numel
def has_gradients(self):
return self.swapped_gradients or self.unswapped_gradients
def _add_tensors(self, tensor_list):
for t in tensor_list:
self.tensors.append(t)
self.swap_paths.append(os.path.join(self.swap_folder, f'{id(t)}.tensor.swp'))
def add_state_tensors(self, tensor_list):
self.has_state_tensors = True
self._add_tensors(tensor_list)
def device(self):
return self.tensor_device
def dtype(self):
return self.tensor_dtype
def release_memory(self):
for tensor in self.tensors:
tensor.data = torch.Tensor()
def get_or_create_gradient_paths(self, offsets, lengths):
gradient_paths = []
for offset, length in zip(offsets, lengths):
if not offset in self.swapped_gradients.keys():
path = os.path.join(self.swap_folder, f'{self.param_id}_gradient_{offset}_{length}.tensor.swp')
self.swapped_gradients[offset] = FlattenedTensorSwapInfo(path, length, offset)
gradient_paths.append(self.swapped_gradients[offset].path)
return gradient_paths
def set_swap_buffers(self, buffers):
compute_lengths = [self.numel()] * len(self.tensors)
compute_buffers = get_sized_buffers(buffers, compute_lengths)
for t, buffer in zip(self.tensors, compute_buffers):
t.data = buffer.data
def get_swap_gradient_buffers(self, swap_buffer):
assert self.numel() <= swap_buffer.numel()
return [swap_buffer.narrow(0, grad.offset, grad.length) for grad in self.swapped_gradients.values()]
def get_swap_gradient_paths(self):
return [grad.path for grad in self.swapped_gradients.values()]
def get_unpinned_state_tensors(self):
return [t for t in self.tensors if not t.is_pinned()]
def read_unswapped_gradients(self, dest_buffer):
num_elem_count = 0
for offset, grad_partition in self.unswapped_gradients.items():
dst_tensor = dest_buffer.narrow(0, offset, grad_partition.numel())
dst_tensor.data.copy_(grad_partition.data)
num_elem_count += grad_partition.numel()
return num_elem_count
def release_unswapped_gradients(self):
self.unswapped_gradients = {}
SWAPPER_DEBUG_MODE = False
SWAP_OUT_GRADIENT_TIMER = 'swap_out_gradient'
class OptimizerSwapper(object):
def __init__(self, swap_config, aio_config, base_folder, optimizer, largest_numel, device, dtype, timers):
self.swap_config = swap_config
self.aio_config = aio_config
# NVMe swap management
self.swap_params_info = {}
self.swap_element_size = torch.tensor([], dtype=dtype).element_size()
self.swap_folder = os.path.join(base_folder, 'optimizer', f'rank{dist.get_rank()}')
os.makedirs(self.swap_folder, exist_ok=True)
self.optimizer = optimizer
# Read/Write alignment for each thread during Intra-request parallelism
self.min_aio_bytes = max(MIN_AIO_BYTES, aio_config[AIO_BLOCK_SIZE])
self.aligned_bytes = AIO_ALIGNED_BYTES * aio_config[AIO_THREAD_COUNT]
self.numel_alignment = self.aligned_bytes // self.swap_element_size
# Swap buffer management
self.largest_numel = self._io_aligned_numel(largest_numel)
self.dtype = dtype
self.swap_buffer_manager = SwapBufferManager(num_elems=self.largest_numel,
count=swap_config.buffer_count,
dtype=dtype)
# Timers
self.timers = timers
self.timer_names = set()
# Print exclusion list
self.print_exclude_list = [
'optimizer',
'swap_buffer_manager',
'swap_params_info',
'timers',
'timer_names',
]
def swappable_tensor(self, param=None, numel=None):
assert param is not None or numel is not None, "Either param or numel must be provided"
if param is not None:
return self.min_aio_bytes <= (param.numel() * self.swap_element_size)
return self.min_aio_bytes <= (numel * self.swap_element_size)
def init_timers(self):
self.timer_names = set()
def log_timers(self):
if self.timer_names:
self._log_timers(list(self.timer_names), force=True)
def pre_backward(self):
self.init_timers()
def post_backward(self):
pass
def _flush_gradient_swapper(self, gradient_swapper):
if gradient_swapper.has_buffers():
self._start_timer(SWAP_OUT_GRADIENT_TIMER)
pinned_buffers = gradient_swapper.release_buffers()
self.swap_buffer_manager.free(pinned_buffers)
self._stop_timer(SWAP_OUT_GRADIENT_TIMER)
self.timer_names.add(SWAP_OUT_GRADIENT_TIMER)
self.timer_names.update(gradient_swapper.get_timer_names())
def _swap_out_gradients(self, parameter, gradient_offsets, gradient_tensors, gradient_swapper):
if not id(parameter) in self.swap_params_info.keys():
return
swap_info = self.swap_params_info[id(parameter)]
swappable_tensors = []
swappable_offsets = []
swappable_lengths = []
aligned_gradients, aligned_offsets = self._adjust_for_misaligned_lengths(tensors=gradient_tensors,
offsets=gradient_offsets)
self._start_timer(SWAP_OUT_GRADIENT_TIMER)
for tensor, offset in zip(aligned_gradients, aligned_offsets):
if not self.swappable_tensor(param=tensor):
swap_info.unswapped_gradients[offset] = tensor
continue
swappable_tensors.append(tensor)
swappable_offsets.append(offset)
swappable_lengths.append(tensor.numel())
if len(swappable_tensors) > 0:
if not gradient_swapper.has_buffers():
pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype)
gradient_swapper.add_buffers(pinned_buffers)
swappable_paths = swap_info.get_or_create_gradient_paths(swappable_offsets, swappable_lengths)
gradient_swapper.swap_out_tensors(tensor_list=swappable_tensors, path_list=swappable_paths)
self._stop_timer(SWAP_OUT_GRADIENT_TIMER)
self.timer_names.add(SWAP_OUT_GRADIENT_TIMER)
def _initialize_from_swapped_fp16_params(self, aio_handle, fp16_partitions_info, fp16_num_elems,
fp16_pinned_buffers, fp32_parameters):
assert len(fp32_parameters) == len(fp16_partitions_info)
assert len(fp32_parameters) == len(fp16_num_elems)
assert all([buffer.is_pinned() for buffer in fp16_pinned_buffers])
fp32_swap_paths = self._get_swap_paths(parameters=fp32_parameters, num_elems=fp16_num_elems)
fp32_pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype)
fp16_buffer_numel = [buf.numel() for buf in fp16_pinned_buffers]
assert all([numel >= self.largest_numel for numel in fp16_buffer_numel]), \
f"numel of fp16 buffers {fp16_buffer_numel} is too small for initializing fp32 params {self.largest_numel}"
fp32_swap_buffers = SwapBufferPool(fp32_pinned_buffers)
fp16_swap_buffers = SwapBufferPool(fp16_pinned_buffers)
curr_index = 0
while curr_index < len(fp32_parameters):
fp16_pinned_tensors = self._swap_in_fp16_params(aio_handle=aio_handle,
fp16_num_elems=fp16_num_elems[curr_index:],
fp16_partitions_info=fp16_partitions_info[curr_index:],
fp16_swap_buffers=fp16_swap_buffers)
if dist.get_rank() == 0 and SWAPPER_DEBUG_MODE:
for i, tensor in enumerate(fp16_pinned_tensors):
true_index = curr_index + i
logger.info(
f'swap_in_fp16_param: fp32_id = {id(fp32_parameters[true_index])} index = {true_index} orig_num_elem = {fp16_num_elems[true_index]}, swap_num_elem = {fp16_pinned_tensors[i].numel()}'
)
swap_out_count = self._swap_out_fp16_params(aio_handle=aio_handle,
fp32_swap_paths=fp32_swap_paths[curr_index:],
fp32_swap_buffers=fp32_swap_buffers,
fp16_pinned_tensors=fp16_pinned_tensors)
assert swap_out_count == len(fp16_pinned_tensors), \
f"{swap_out_count} does not match {len(fp16_pinned_tensors)}"
fp16_swap_buffers.reset()
fp32_swap_buffers.reset()
curr_index += swap_out_count
self.swap_buffer_manager.free(fp32_pinned_buffers)
def _swap_in_fp16_params(self, aio_handle, fp16_num_elems, fp16_partitions_info, fp16_swap_buffers):
assert len(fp16_num_elems) > 0
swapped_fp16_tensors = []
swap_tensors = []
swap_paths = []
unswapped_srcs = []
unswapped_dsts = []
for i, numel in enumerate(fp16_num_elems):
pinned_tensor, _ = fp16_swap_buffers.allocate_tensor(numel, None, numel)
if pinned_tensor is None:
break
swapped_fp16_tensors.append(pinned_tensor)
offset = 0
for tensor, partition_numel, partition_path in fp16_partitions_info[i]:
dst_tensor = pinned_tensor.narrow(0, offset, partition_numel)
if partition_path is None:
unswapped_srcs.append(tensor)
unswapped_dsts.append(dst_tensor)
else:
swap_paths.append(partition_path)
swap_tensors.append(dst_tensor)
offset += partition_numel
assert len(swapped_fp16_tensors) + len(unswapped_srcs) > 0
ret = swap_in_tensors(aio_handle, swap_tensors, swap_paths)
for src, dst in zip(unswapped_srcs, unswapped_dsts):
dst.data.copy_(src.data)
assert len(swap_tensors) == aio_handle.wait()
return swapped_fp16_tensors
def _swap_out_fp16_params(self, aio_handle, fp32_swap_paths, fp32_swap_buffers, fp16_pinned_tensors):
assert len(fp16_pinned_tensors) <= len(fp32_swap_paths)
swap_out_count = 0
for i, fp16_tensor in enumerate(fp16_pinned_tensors):
if not fp32_swap_buffers.has_space(fp16_tensor.numel()):
fp32_swap_buffers.swap_out(aio_handle)
fp32_swap_buffers.reset()
pinned_tensor, _ = fp32_swap_buffers.insert_tensor(fp16_tensor, fp32_swap_paths[i],
self._io_aligned_numel(fp16_tensor.numel()))
assert pinned_tensor is not None
swap_out_count += 1
if len(fp32_swap_buffers.get_swap_tensors()) > 0:
fp32_swap_buffers.swap_out(aio_handle)
return swap_out_count
def _initialize_parameters(self, parameters, src_tensors, aio_handle):
assert len(parameters) == len(src_tensors)
swap_paths = self._get_swap_paths(parameters=parameters, num_elems=[src.numel() for src in src_tensors])
SWAP_INIT_TIMER = "swap_init_write"
self._start_timer(SWAP_INIT_TIMER)
pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype)
assert pinned_buffers is not None
self._swap_out_unpinned_tensors(aio_handle=aio_handle,
unpinned_tensors=src_tensors,
dest_paths=swap_paths,
pinned_buffers=pinned_buffers)
if dist.get_rank() == 0 and SWAPPER_DEBUG_MODE:
for i, tensor in enumerate(src_tensors):
logger.info(
f'copy_in_fp16_param: fp32_id = {id(parameters[i])} index = {i}, swap_num_elem = {src_tensors[i].numel()}'
)
self.swap_buffer_manager.free(pinned_buffers)
self._stop_timer(SWAP_INIT_TIMER)
self._log_timers([SWAP_INIT_TIMER])
def _get_swap_paths(self, parameters, num_elems):
swap_info_list = [
self._create_param_swap_info(parameter=p,
numel=numel) \
for p, numel in zip(parameters, num_elems)
]
assert len(swap_info_list) == len(num_elems)
swap_paths = [info.swap_paths[0] for info in swap_info_list]
return swap_paths
def _swap_out_unpinned_tensors(self, aio_handle, unpinned_tensors, dest_paths, pinned_buffers):
swap_buffer_count = len(pinned_buffers)
unpinned_tensor_count = len(unpinned_tensors)
for i in range(0, unpinned_tensor_count, swap_buffer_count):
swap_tensor_count = min((unpinned_tensor_count - i), swap_buffer_count)
src_tensors = unpinned_tensors[i:(i + swap_tensor_count)]
compute_lengths = [t.numel() for t in src_tensors]
compute_buffers = get_sized_buffers(pinned_buffers, compute_lengths)
for dst, src in zip(compute_buffers, src_tensors):
dst.data.copy_(src.data)
swap_lengths = [self._io_aligned_numel(t.numel()) for t in src_tensors]
swap_buffers = get_sized_buffers(pinned_buffers, swap_lengths)
swap_paths = dest_paths[i:(i + swap_tensor_count)]
swap_out_tensors(aio_handle, swap_buffers, swap_paths)
assert aio_handle.wait() == swap_tensor_count
def _adjust_for_misaligned_lengths(self, tensors, offsets):
new_tensors = []
new_offsets = []
for orig_tensor, orig_offset in zip(tensors, offsets):
if not self.swappable_tensor(param=orig_tensor):
new_tensors.append(orig_tensor)
new_offsets.append(orig_offset)
continue
remainder = orig_tensor.numel() % self.numel_alignment
if remainder == 0:
new_tensors.append(orig_tensor)
new_offsets.append(orig_offset)
continue
# Split into two by making remainder a tensor
aligned_length = (orig_tensor.numel() // self.numel_alignment) * self.numel_alignment
new_tensors.append(orig_tensor.narrow(0, 0, aligned_length))
new_offsets.append(orig_offset)
# remainder tensor
new_tensors.append(orig_tensor.narrow(0, aligned_length, remainder))
new_offsets.append(orig_offset + aligned_length)
return new_tensors, new_offsets
def _retrieve_unswapped_grad_partitions(self, swap_info, dest_buffer):
UNSWAPPED_READ_GRADIENTS = 'unswapped_read_gradients'
self._start_timer(UNSWAPPED_READ_GRADIENTS)
tensor_count = len(swap_info.unswapped_gradients)
num_elem_count = swap_info.read_unswapped_gradients(dest_buffer)
self._stop_timer(UNSWAPPED_READ_GRADIENTS)
self._log_timers([UNSWAPPED_READ_GRADIENTS])
# It should be safe to discard unswapped gradient partitions
swap_info.release_unswapped_gradients()
if SWAPPER_DEBUG_MODE:
logger.info(
f'optimizer_retrieve_unswapped_gradients: param={swap_info.param_id} tensor_count={tensor_count} elem_count={num_elem_count}'
)
def _get_state_tensors(self, parameter):
if not parameter in self.optimizer.state:
return []
tensor_list = []
for value in self.optimizer.state[parameter].values():
if torch.is_tensor(value):
tensor_list.append(value)
return tensor_list
def _update_param_state_info(self, swap_info, parameter):
if not swap_info.has_state_tensors:
state_tensors = self._get_state_tensors(parameter)
if state_tensors:
swap_info.add_state_tensors(state_tensors)
def _create_param_swap_info(self, parameter, numel):
param_id = id(parameter)
assert not param_id in self.swap_params_info
self.swap_params_info[param_id] = OptimizerStateSwapInfo(parameter=parameter,
numel=numel,
base_folder=self.swap_folder)
swap_info = self.swap_params_info[param_id]
self._update_param_state_info(swap_info, parameter)
return swap_info
def _get_param_swap_info(self, parameter):
param_id = id(parameter)
swap_info = self.swap_params_info.get(param_id, None)
if swap_info is not None:
self._update_param_state_info(swap_info, parameter)
return swap_info
def _start_timer(self, name):
if self.timers:
self.timers(name).start()
def _stop_timer(self, name):
if self.timers:
self.timers(name).stop()
def _log_timers(self, name_list, force=False):
if self.timers and (SWAPPER_DEBUG_MODE or force):
self.timers.log(name_list)
def _io_aligned_numel(self, numel):
remainder = numel % self.numel_alignment
return numel if remainder == 0 else (numel + self.numel_alignment - remainder)
| 18,967 | 39.271762 | 206 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/swap_tensor/utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping tensors to/from (NVMe) storage devices.
"""
import torch
from deepspeed.utils.logging import logger
from deepspeed.accelerator import get_accelerator
from deepspeed import comm as dist
MIN_AIO_BYTES = 1024**2
AIO_ALIGNED_BYTES = 1024
def swap_in_tensors(swap_handle, tensor_buffers, swap_paths):
for buffer, path in zip(tensor_buffers, swap_paths):
assert (swap_handle.async_pread(buffer, path) == 0)
def swap_out_tensors(swap_handle, tensor_buffers, swap_paths):
for buffer, path in zip(tensor_buffers, swap_paths):
assert (swap_handle.async_pwrite(buffer, path) == 0)
def print_object(obj, name, exclude_list=[]):
logger.info('{}:'.format(name))
for arg in sorted(vars(obj)):
if not arg in exclude_list:
dots = '.' * (29 - len(arg))
logger.info(' {} {} {}'.format(arg, dots, getattr(obj, arg)))
class SwapBuffer(object):
def __init__(self, buffer):
self.buffer = buffer
self.reset()
def reset(self):
self.offset = 0
self.swap_tensors = {}
self.compute_tensors = {}
self.swap_paths = {}
self.num_elem = 0
def insert_tensor(self, tensor, swap_path, aligned_numel):
swap_tensor, compute_tensor = self.allocate_tensor(swap_path, tensor.numel(), aligned_numel)
compute_tensor.data.copy_(tensor.data)
return swap_tensor, compute_tensor
def allocate_tensor(self, swap_path, numel, aligned_numel):
assert self.has_space(aligned_numel)
assert not self.offset in self.swap_tensors
allocate_offset = self.offset
swap_tensor = self.buffer.narrow(0, allocate_offset, aligned_numel)
dest_tensor = swap_tensor.narrow(0, 0, numel)
self.swap_tensors[allocate_offset] = swap_tensor
self.compute_tensors[allocate_offset] = dest_tensor
self.swap_paths[allocate_offset] = swap_path
self.offset += aligned_numel
self.num_elem += numel
return self.swap_tensors[allocate_offset], self.compute_tensors[allocate_offset]
def has_space(self, numel):
return (self.offset + numel) <= self.buffer.numel()
def get_swap_tensors(self):
return [tensor for tensor in self.swap_tensors.values()]
def get_swap_paths(self):
return [path for path in self.swap_paths.values()]
def get_compute_tensors(self):
return [tensor for tensor in self.compute_tensors.values()]
def get_num_elem(self):
return self.num_elem
def get_swap_tensor(self, offset):
return self.swap_tensors.get(offset, None)
def get_compute_tensor(self, offset):
return self.compute_tensors.get(offset, None)
def get_swap_path(self, offset):
return self.swap_paths(offset, None)
class SwapBufferPool(object):
def __init__(self, buffers):
assert all([buf.is_pinned() for buf in buffers])
self.buffers = [SwapBuffer(buf) for buf in buffers]
self.current_index = 0
def reset(self):
self.current_index = 0
for buffer in self.buffers:
buffer.reset()
def allocate_tensor(self, numel, swap_path, aligned_numel):
if self.has_space(aligned_numel):
swap_tensor, compute_tensor = self._get_current_buffer().allocate_tensor(swap_path, numel, aligned_numel)
return swap_tensor, compute_tensor
return None, None
def insert_tensor(self, tensor, swap_path, aligned_numel):
if self.has_space(aligned_numel):
swap_tensor, compute_tensor = self._get_current_buffer().insert_tensor(tensor, swap_path, aligned_numel)
return swap_tensor, compute_tensor
return None, None
def get_swap_tensors(self):
swap_tensors = []
for buffer in self._get_used_buffers():
swap_tensors += buffer.get_swap_tensors()
return swap_tensors
def get_swap_paths(self):
swap_paths = []
for buffer in self._get_used_buffers():
swap_paths += buffer.get_swap_paths()
return swap_paths
def get_compute_tensors(self):
compute_tensors = []
for buffer in self._get_used_buffers():
compute_tensors += buffer.get_compute_tensors()
return compute_tensors
def has_space(self, numel):
if self._get_current_buffer().has_space(numel):
return True
if self.current_index == len(self.buffers) - 1:
return False
self.current_index += 1
return self._get_current_buffer().has_space(numel)
def swap_out(self, aio_handle, async_op=False):
swap_tensors = self.get_swap_tensors()
swap_paths = self.get_swap_paths()
assert all([p is not None for p in swap_paths])
swap_out_tensors(aio_handle, swap_tensors, swap_paths)
if not async_op:
assert len(swap_tensors) == aio_handle.wait()
def swap_in(self, aio_handle, async_op=False):
swap_tensors = self.get_swap_tensors()
swap_paths = self.get_swap_paths()
assert all([p is not None for p in swap_paths])
swap_in_tensors(aio_handle, swap_tensors, swap_paths)
if not async_op:
assert len(swap_tensors) == aio_handle.wait()
def _get_current_buffer(self):
return self.buffers[self.current_index]
def _get_used_buffers(self):
return self.buffers[:self.current_index + 1]
class SwapBufferManager(object):
def __init__(self, num_elems, count, dtype):
self.num_elems = num_elems
self.count = count
self.dtype = dtype
self.all_buffers = [
get_accelerator().pin_memory(torch.zeros(num_elems, device='cpu', dtype=dtype)) for _ in range(count)
]
self.free_buffer_index = [i for i in range(count)]
self.used_buffer_index = {}
self.gigabytes = (self.all_buffers[0].element_size() * num_elems * count) / (1024**3)
if dist.get_rank() == 0:
exclude_list = ['all_buffers']
print_object(obj=self, name='SwapBufferManager', exclude_list=exclude_list)
def allocate(self, num_elems, count, dtype):
assert dtype == self.dtype
assert num_elems <= self.num_elems
if count > len(self.free_buffer_index):
return None
used_indices = self.free_buffer_index[-count:]
self.free_buffer_index = self.free_buffer_index[:-count]
buffers = []
for i in used_indices:
tmp_buffer = self.all_buffers[i].narrow(0, 0, num_elems)
buffers.append(tmp_buffer)
self.used_buffer_index[id(tmp_buffer)] = i
return buffers
def allocate_all(self, num_elems, dtype):
return self.allocate(num_elems=num_elems, count=len(self.free_buffer_index), dtype=dtype)
def free(self, buffers):
buffer_ids = []
for buf in buffers:
buffer_ids.append(id(buf))
assert all([b_id in self.used_buffer_index for b_id in buffer_ids])
for b_id in buffer_ids:
self.free_buffer_index.append(self.used_buffer_index[b_id])
del (self.used_buffer_index[b_id])
def get_sized_buffer(buffer, num_elems):
assert num_elems <= buffer.numel(), \
f'num_elems {num_elems} > buffer {buffer.numel()}'
return buffer.narrow(0, 0, num_elems) if num_elems < buffer.numel() else buffer
def get_sized_buffers(buffer_list, num_elems_list):
swap_buffers = [
get_sized_buffer(buffer, num_elems) \
for buffer, num_elems in zip(buffer_list, num_elems_list)
]
return swap_buffers
| 7,734 | 31.229167 | 117 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/swap_tensor/async_swapper.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping tensors to/from (NVMe) storage devices.
"""
import torch
from deepspeed import comm as dist
from deepspeed.utils.logging import logger
from deepspeed.runtime.swap_tensor.utils import swap_out_tensors, SwapBuffer
INVALID_BUFFER_INDEX = -1
ASYNC_SWAPPER_WAIT_TIMER = 'async_swap_gradient_wait'
class AsyncTensorSwapper(object):
def __init__(self, aio_handle, numel_alignment, timers):
self.free_buffer_index = []
self.swapping_buffer_index = []
self.ready_buffer_index = []
self.current_buffer_index = INVALID_BUFFER_INDEX
self.all_buffers = []
self.aio_handle = aio_handle
self.numel_alignment = numel_alignment
self.max_numel = 0
self.num_pending_swaps = 0
self.timers = timers
self.timer_names = set()
self.num_elements_swapped = 0
self.dtype = None
def has_buffers(self):
return len(self.all_buffers) > 0
def add_buffers(self, buffer_list):
assert len(self.all_buffers) == 0
assert all([buffer.is_pinned() for buffer in buffer_list])
dtype = buffer_list[0].dtype
assert all([buffer.dtype == dtype for buffer in buffer_list])
self.dtype = dtype
self.all_buffers = [SwapBuffer(buffer) for buffer in buffer_list]
self.free_buffer_index += [i for i in range(len(self.all_buffers))]
self.max_numel = max([buffer.numel() for buffer in buffer_list])
self.timer_names = set()
def get_timer_names(self):
return list(self.timer_names)
def release_buffers(self):
self._report_statistics('Swapped out[Before flush]')
self._flush_buffers_until_complete()
self._report_statistics('Swapped out[After flush]')
pinned_buffers = [buf.buffer for buf in self.all_buffers]
self.all_buffers = []
self.free_buffer_index = []
self.current_buffer_index = INVALID_BUFFER_INDEX
self.num_elements_swapped = 0
self.dtype = None
return pinned_buffers
def swap_out_tensors(self, tensor_list, path_list):
for tensor, swap_path in zip(tensor_list, path_list):
self._swap_out_tensor(tensor, swap_path)
def _report_statistics(self, message):
if dist.get_rank() == 0:
element_size = torch.tensor([], dtype=self.dtype).element_size()
swapped_GB = (self.num_elements_swapped * element_size) / (1024**3)
logger.debug(f'{message} num_elems = {self.num_elements_swapped}, {swapped_GB:5.2f} GB')
def _swap_out_tensor(self, tensor, swap_path):
assert len(self.all_buffers) > 0
aligned_numel = self._io_aligned_numel(tensor.numel())
assert aligned_numel <= self.max_numel
self._make_swap_space(aligned_numel)
assert self.current_buffer_index != INVALID_BUFFER_INDEX
swap_buffer = self._get_current_buffer()
swap_buffer.insert_tensor(tensor, swap_path, aligned_numel)
def _make_swap_space(self, numel):
if self.current_buffer_index == INVALID_BUFFER_INDEX:
self._allocate_buffer()
return
if not self._get_current_buffer().has_space(numel):
if len(self.free_buffer_index) > 0:
self._flush_ready_buffers()
else:
self._flush_buffers_until_complete()
self._allocate_buffer()
def _io_aligned_numel(self, numel):
remainder = numel % self.numel_alignment
return numel if remainder == 0 else (numel + self.numel_alignment - remainder)
def _allocate_buffer(self):
assert self.current_buffer_index == INVALID_BUFFER_INDEX
assert len(self.all_buffers) > 0
assert len(self.free_buffer_index) > 0
self.current_buffer_index = self.free_buffer_index[-1]
self.free_buffer_index = self.free_buffer_index[:-1]
def _flush_ready_buffers(self):
if self.current_buffer_index != INVALID_BUFFER_INDEX:
self.ready_buffer_index.append(self.current_buffer_index)
self.current_buffer_index = INVALID_BUFFER_INDEX
self._swap_out_ready_buffers()
def _flush_buffers_until_complete(self):
self._flush_ready_buffers()
assert len(self.ready_buffer_index) == 0
self._wait_for_swap_complete()
assert len(self.swapping_buffer_index) == 0
assert len(self.free_buffer_index) == len(self.all_buffers)
def _swap_out_ready_buffers(self):
for buffer_index in self.ready_buffer_index:
buffer = self._get_buffer(buffer_index)
swap_tensors = buffer.get_swap_tensors()
swap_paths = buffer.get_swap_paths()
self.num_pending_swaps += len(swap_tensors)
swap_out_tensors(self.aio_handle, swap_tensors, swap_paths)
self.swapping_buffer_index += self.ready_buffer_index
self.ready_buffer_index = []
def _wait_for_swap_complete(self):
assert len(self.swapping_buffer_index) > 0
self._start_timer(ASYNC_SWAPPER_WAIT_TIMER)
assert self.aio_handle.wait() == self.num_pending_swaps
self._stop_timer(ASYNC_SWAPPER_WAIT_TIMER)
self.timer_names.add(ASYNC_SWAPPER_WAIT_TIMER)
self.num_pending_swaps = 0
for buffer_index in self.swapping_buffer_index:
buffer = self._get_buffer(buffer_index)
self.num_elements_swapped += buffer.get_num_elem()
buffer.reset()
self.free_buffer_index += self.swapping_buffer_index
assert len(self.free_buffer_index) <= len(self.all_buffers)
self.swapping_buffer_index = []
def _get_buffer(self, index):
assert index != INVALID_BUFFER_INDEX
return self.all_buffers[index]
def _get_current_buffer(self):
return self._get_buffer(self.current_buffer_index)
def _start_timer(self, name):
if self.timers:
self.timers(name).start()
def _stop_timer(self, name):
if self.timers:
self.timers(name).stop()
def _log_timers(self, name_list, force=False):
if self.timers and force:
self.timers.log(name_list)
| 6,282 | 34.902857 | 100 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/swap_tensor/partitioned_param_swapper.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping tensors to/from (NVMe) storage devices.
"""
import os
import shutil
from enum import Enum
import torch
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import AsyncIOBuilder
from .constants import *
from .utils import swap_in_tensors, swap_out_tensors, MIN_AIO_BYTES, AIO_ALIGNED_BYTES, print_object, SwapBufferPool
def print_rank_0(message, debug=False, force=False):
if dist.get_rank() == 0 and (debug or force):
print(message)
class PartitionedParamStatus(Enum):
# Partitioned parameters are present and ready for use
AVAILABLE = 1
# partitioned params are in some non-memory device
NOT_AVAILABLE = 2
# partitioned params are being read from some non-memory device.
INFLIGHT = 3
class AsyncPartitionedParameterSwapper(object):
def __init__(self, ds_config, model_dtype):
aio_op = AsyncIOBuilder().load(verbose=False)
self.aio_handle = aio_op.aio_handle
self.dtype = model_dtype
#set swap buffers, create aio handles
self._configure_aio(ds_config)
#mapping from param id to path
self.id_to_path = {}
#mapping from pram_id to buffer id
self.param_id_to_buffer_id = {}
# mapping from param_id to swap buffer
self.param_id_to_swap_buffer = {}
#number of elements in the param
self.param_id_to_numel = {}
self.pending_writes = 0
self.pending_reads = 0
#keep track of async swap in params and buffers
self.inflight_params = []
self.inflight_swap_in_buffers = []
self.inflight_numel = 0
#keep track of available params
self.available_params = set()
self.available_numel = 0
# for swapping out from partitioned fp32 params
self.partitioned_swap_buffer = None
self.partitioned_swap_pool = None
self.invalid_buffer = torch.tensor(1).half()
if dist.get_rank() == 0:
exclude_list = ['aio_read_handle', 'aio_write_handle', 'buffers']
print_object(obj=self, name='AsyncPartitionedParameterSwapper', exclude_list=exclude_list)
def available_swap_in_buffers(self):
return len(self.available_buffer_ids)
def _configure_aio(self, ds_config):
self.swap_config = ds_config.zero_config.offload_param
torch_dtype_string = str(self.dtype).split(".")[1]
self.swap_folder = os.path.join(self.swap_config.nvme_path, 'zero_stage_3', f'{torch_dtype_string}params',
f'rank{dist.get_rank()}')
shutil.rmtree(self.swap_folder, ignore_errors=True)
os.makedirs(self.swap_folder, exist_ok=True)
self.swap_element_size = torch.tensor([], dtype=self.dtype).element_size()
self.aio_config = ds_config.aio_config
# Read/Write alignment for each thread during Intra-request parallelism
self.min_aio_bytes = max(MIN_AIO_BYTES, self.aio_config[AIO_BLOCK_SIZE])
self.aligned_bytes = AIO_ALIGNED_BYTES * self.aio_config[AIO_THREAD_COUNT]
self.numel_alignment = self.aligned_bytes // self.swap_element_size
self.elements_per_buffer = self.swap_config.buffer_size
self.aligned_elements_per_buffer = self._io_aligned_numel(self.elements_per_buffer)
self.param_buffer_count = self.swap_config.buffer_count
self.available_buffer_ids = [i for i in range(self.param_buffer_count)]
self.reserved_buffer_ids = []
self.buffers = get_accelerator().pin_memory(
torch.empty(int(self.aligned_elements_per_buffer * self.param_buffer_count),
dtype=self.dtype,
requires_grad=False))
self.aio_read_handle = self.aio_handle(self.aio_config[AIO_BLOCK_SIZE], self.aio_config[AIO_QUEUE_DEPTH],
self.aio_config[AIO_SINGLE_SUBMIT], self.aio_config[AIO_OVERLAP_EVENTS],
self.aio_config[AIO_THREAD_COUNT])
self.aio_write_handle = self.aio_handle(self.aio_config[AIO_BLOCK_SIZE], self.aio_config[AIO_QUEUE_DEPTH],
self.aio_config[AIO_SINGLE_SUBMIT],
self.aio_config[AIO_OVERLAP_EVENTS], self.aio_config[AIO_THREAD_COUNT])
self.swap_out_params = []
#Check if partitioned param or numel in a tensor is swappable or not
def swappable_tensor(self, param=None, numel=None):
if param is not None:
assert numel is None, "Both parma and numel cannot be provided"
numel = param.ds_tensor.ds_numel
if numel is not None:
return self.min_aio_bytes <= numel * self.swap_element_size
assert False, "Either param or numel must be provided"
def get_path(self, param, must_exist=False):
paths = self._get_swap_paths([param], must_exist=must_exist)
return paths[0]
def _get_swap_paths(self, params, must_exist=False):
paths = []
for param in params:
param_id = param.ds_id
if param_id in self.id_to_path.keys():
param_path = self.id_to_path[param_id]
else:
assert not must_exist, f"Path for param id {param_id} does not exist"
param_path = os.path.join(self.swap_folder, f'{param_id}_param.tensor.swp')
self.id_to_path[param_id] = param_path
paths.append(param_path)
return paths
def _get_swap_buffers(self, params):
buffers = []
for param in params:
param_id = param.ds_id
assert param_id in self.param_id_to_swap_buffer.keys(), \
f'param {param_id} has not been assigned a swap buffer'
buffers.append(self.param_id_to_swap_buffer[param_id])
return buffers
def _track_numel(self, params):
for param in params:
assert param.ds_tensor is not None, "Partitioned tensor is None"
self.param_id_to_numel[param.ds_id] = param.ds_tensor.ds_numel
def _allocate_and_return_buffers_for_swap_in(self, params):
compute_buffers = []
swap_buffers = []
for param in params:
param_id = param.ds_id
assert param_id in self.param_id_to_numel.keys(), f" Number of elements in param {param_id} is unknown"
assert param_id not in self.param_id_to_buffer_id.keys(
), f"param {param_id} already assigned swap buffer id {self.param_id_to_buffer_id[param_id]}"
assert param_id not in self.param_id_to_swap_buffer.keys(
), f"param {param_id} has already been assigned a swap buffer"
buffer_id = self.available_buffer_ids.pop()
print_rank_0(f"param {param.ds_id} is assigned swap in buffer id {buffer_id} ")
self.param_id_to_buffer_id[param_id] = buffer_id
aligned_swap_numel = self._io_aligned_numel(self.param_id_to_numel[param_id])
swap_buffer = self.buffers.narrow(0, int(buffer_id * self.aligned_elements_per_buffer), aligned_swap_numel)
self.param_id_to_swap_buffer[param_id] = swap_buffer
compute_buffer = swap_buffer.narrow(0, 0, self.param_id_to_numel[param_id])
compute_buffers.append(compute_buffer)
swap_buffers.append(swap_buffer)
return compute_buffers, swap_buffers
#waits for inflight nvme write to complete
def synchronize_writes(self):
if self.pending_writes == 0:
return
assert self.pending_writes == self.aio_write_handle.wait()
self.pending_writes = 0
self.remove_partition_and_release_buffers(self.swap_out_params)
self.swap_out_params = []
#waits for inflight nvme reads to complete
def synchronize_reads(self):
if self.pending_reads == 0:
return
assert self.pending_reads == self.aio_read_handle.wait()
self.pending_reads = 0
for param, swap_in_buffer in zip(self.inflight_params, self.inflight_swap_in_buffers):
param_id = param.ds_id
compute_buffer = swap_in_buffer.narrow(0, 0, self.param_id_to_numel[param_id])
param.ds_tensor.data = compute_buffer.data
param.ds_tensor.status = PartitionedParamStatus.AVAILABLE
self.available_params.update([param.ds_id for param in self.inflight_params])
self.available_numel += self.inflight_numel
self.inflight_params = []
self.inflight_swap_in_buffers = []
self.inflight_numel = 0
#Removes the memory assignment and releases the buffers
#Should only be executed after swapping out the tensors
def remove_partition_and_release_buffers(self, params):
for param in params:
param_id = param.ds_id
if param_id in self.param_id_to_buffer_id.keys():
buffer_id = self.param_id_to_buffer_id[param_id]
assert buffer_id is not None, "Missing buffer id for releasing"
self.available_buffer_ids.append(buffer_id)
del self.param_id_to_buffer_id[param_id]
del self.param_id_to_swap_buffer[param_id]
print_rank_0(f"param {param.ds_id} releases buffer id {buffer_id} ")
if param_id in self.available_params:
self.available_params.remove(param_id)
self.available_numel -= self.param_id_to_numel[param_id]
param.ds_tensor.data = self.invalid_buffer.data
param.ds_tensor.status = PartitionedParamStatus.NOT_AVAILABLE
#writes from in memory to nvme. Does not release the buffers
def _swap_out(self, params, async_op=True):
swap_out_paths = self._get_swap_paths(params)
swap_out_params = self._get_swap_buffers(params)
self._track_numel(params)
swap_out_tensors(self.aio_write_handle, swap_out_params, swap_out_paths)
self.pending_writes += len(swap_out_params)
self.swap_out_params += params
if not async_op:
self.synchronize_writes()
#blocking swap out followed by releasing the memory buffers
def swap_out_and_release(self, params, async_op=False, force_buffer_release=False):
if async_op:
assert force_buffer_release, "Should not release preallocated buffers without completing the swap out. Set force_buffer_release to True to do it anyways"
self._swap_out(params, async_op=async_op)
# book keeping function for inflight swap in
def _update_inflight_swap_in(self, params, swap_in_buffers, inflight_numel):
self.inflight_params.extend(params)
self.inflight_swap_in_buffers.extend(swap_in_buffers)
self.inflight_numel += inflight_numel
for param in params:
param.ds_tensor.status = PartitionedParamStatus.INFLIGHT
self.pending_reads += len(params)
#assigns an in memory buffer and swaps in from nvme
def swap_in(self, params, async_op=True, swap_in_buffers=None):
assert all([param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE
for param in params]), "Some params are already available or in flight"
swap_in_paths = self._get_swap_paths(params)
if swap_in_buffers is None:
if len(self.available_buffer_ids) < len(swap_in_paths):
ids = [p.ds_id for p in params]
print_rank_0(
f'Not enough swap in buffers {len(self.available_buffer_ids)} for {len(swap_in_paths)} params, ids = {ids}',
force=True)
print_rank_0(
f'Num inflight: params {len(self.inflight_params)}, buffers {len(self.inflight_swap_in_buffers)}, numel = {self.inflight_numel}',
force=True)
print_rank_0(
f'Num available params: count = {len(self.available_params)}, ids = {self.available_params}, numel = {self.available_numel}',
force=True)
assert len(swap_in_paths) <= len(
self.available_buffer_ids
), f"Not enough buffers {len(self.available_buffer_ids)} for swapping {len(swap_in_paths)}"
compute_buffers, swap_in_buffers = self._allocate_and_return_buffers_for_swap_in(params)
inflight_numel = sum([t.numel() for t in compute_buffers])
else:
inflight_numel = sum([t.numel() for t in swap_in_buffers])
swap_in_tensors(self.aio_read_handle, swap_in_buffers, swap_in_paths)
self._update_inflight_swap_in(params, swap_in_buffers, inflight_numel)
if not async_op:
self.synchronize_reads()
# Enables swapping into buffer that is out the control of swapper. This is always synchronous
def swap_into_buffer(self, param, dest_buffer):
assert param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE, f"param {param.ds_id} is already available or inflight"
require_swap_buffer = not (dest_buffer.is_pinned() and self._is_io_aligned(dest_buffer.numel()))
if require_swap_buffer:
assert len(self.available_buffer_ids) > 0, f"No buffer available to swap param {param.ds_id}."
compute_buffers, swap_in_buffers = self._allocate_and_return_buffers_for_swap_in([param])
inflight_numel = compute_buffers[0].numel()
else:
swap_in_buffers = [dest_buffer]
inflight_numel = dest_buffer.numel()
swap_in_paths = self._get_swap_paths([param])
swap_in_tensors(self.aio_read_handle, swap_in_buffers, swap_in_paths)
self._update_inflight_swap_in([param], swap_in_buffers, inflight_numel)
self.synchronize_reads()
if require_swap_buffer:
dest_buffer.data.copy_(param.ds_tensor.data)
# Release swap buffer memory assignment. Note, this will mark the parameter not available.
self.remove_partition_and_release_buffers([param])
#assign a buffer to a param and return the buffer
def get_buffer(self, param, numel):
param_id = param.ds_id
assert self.available_swap_in_buffers(
) > 0, f"No swap buffers to allocate for fp16 param {param_id} of numel = {numel}"
assert numel < self.elements_per_buffer, f"More elements {numel} than buffer size {self.elements_per_buffer}"
self.param_id_to_numel[param_id] = numel
buffer_id = self.available_buffer_ids.pop()
self.param_id_to_buffer_id[param_id] = buffer_id
aligned_swap_numel = self._io_aligned_numel(self.param_id_to_numel[param_id])
swap_buffer = self.buffers.narrow(0, int(buffer_id * self.aligned_elements_per_buffer), aligned_swap_numel)
self.param_id_to_swap_buffer[param_id] = swap_buffer
compute_buffer = swap_buffer.narrow(0, 0, self.param_id_to_numel[param_id])
print_rank_0(f"param {param.ds_id} is assigned swap in buffer id {buffer_id}")
return compute_buffer
def reserve_available_buffers(self):
buffers = []
for id in self.available_buffer_ids:
buffers.append(
self.buffers.narrow(0, int(id * self.aligned_elements_per_buffer),
int(self.aligned_elements_per_buffer)))
self.reserved_buffer_ids.append(id)
self.available_buffer_ids = []
return buffers
def release_reserved_buffers(self):
for id in self.reserved_buffer_ids:
self.available_buffer_ids.append(id)
self.reserved_buffer_ids = []
def _io_aligned_numel(self, numel):
remainder = numel % self.numel_alignment
return numel if remainder == 0 else (numel + self.numel_alignment - remainder)
def _is_io_aligned(self, numel):
return (numel % self.numel_alignment) == 0
def reserve_partitioned_swap_space(self, partition_num_elems):
aligned_numel = sum([self._io_aligned_numel(numel) for numel in partition_num_elems])
self.partitioned_swap_buffer = get_accelerator().pin_memory(
torch.zeros(aligned_numel, device='cpu', dtype=self.dtype))
self.partitioned_swap_pool = SwapBufferPool([self.partitioned_swap_buffer])
def swap_out_partitioned_params(self, dst_fp16_params, src_fp32_params):
assert self.partitioned_swap_buffer is not None, f'partitioned swap buffers for fp16 params not initialized'
assert self.partitioned_swap_pool is not None, f'partitioned swap pool for fp16 params not initialized'
assert len(dst_fp16_params) == len(src_fp32_params), \
f'mismatch in number of fp16 params {len(dst_fp16_params)} and fp32 params {len(src_fp32_params)}'
fp16_swap_paths = self._get_swap_paths(dst_fp16_params, must_exist=True)
self.synchronize_writes()
self.partitioned_swap_pool.reset()
for i, fp32_tensor in enumerate(src_fp32_params):
swap_tensor, _ = self.partitioned_swap_pool.insert_tensor(fp32_tensor, fp16_swap_paths[i],
self._io_aligned_numel(fp32_tensor.numel()))
assert swap_tensor is not None
dst_fp16_params[i].ds_tensor.status = PartitionedParamStatus.AVAILABLE
self.partitioned_swap_pool.swap_out(self.aio_write_handle)
for param in dst_fp16_params:
param.ds_tensor.status = PartitionedParamStatus.NOT_AVAILABLE
| 17,684 | 42.774752 | 165 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/swap_tensor/partitioned_optimizer_swapper.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
import torch
from deepspeed.utils.logging import logger
from deepspeed.ops.op_builder import AsyncIOBuilder
from deepspeed import comm as dist
from deepspeed.runtime.swap_tensor.constants import *
from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, print_object, \
get_sized_buffers
from deepspeed.runtime.swap_tensor.async_swapper import AsyncTensorSwapper
from deepspeed.runtime.swap_tensor.optimizer_utils import OptimizerSwapper
DEBUG_MODE = False
SWAP_IN_PARAM_TIMER = 'swap_in_param'
SWAP_OUT_PARAM_TIMER = 'swap_out_param'
SWAP_IN_GRADIENT_TIMER = 'swap_in_gradient'
class PartitionedOptimizerSwapper(OptimizerSwapper):
def __init__(self, swap_config, aio_config, base_folder, optimizer, largest_numel, device, dtype, timers):
super(PartitionedOptimizerSwapper, self).__init__(swap_config, aio_config, base_folder, optimizer,
largest_numel, device, dtype, timers)
aio_op = AsyncIOBuilder().load()
self.aio_handle = aio_op.aio_handle(aio_config[AIO_BLOCK_SIZE], aio_config[AIO_QUEUE_DEPTH],
aio_config[AIO_SINGLE_SUBMIT], aio_config[AIO_OVERLAP_EVENTS],
aio_config[AIO_THREAD_COUNT])
# Overlap swapping out
self.gradient_swapper = AsyncTensorSwapper(aio_handle=self.aio_handle,
numel_alignment=self.numel_alignment,
timers=self.timers)
self.print_exclude_list += ['aio_handle', 'gradient_swapper', 'print_exclude_list']
if dist.get_rank() == 0:
print_object(obj=self, name='PartitionedOptimizerSwapper', exclude_list=self.print_exclude_list)
def initialize_parameters(self, parameters, src_tensors):
self._initialize_parameters(parameters=parameters, src_tensors=src_tensors, aio_handle=self.aio_handle)
def initialize_from_swapped_fp16_params(self, fp16_partitions_info, fp16_num_elems, fp16_pinned_buffers,
fp32_parameters):
self._initialize_from_swapped_fp16_params(aio_handle=self.aio_handle,
fp16_partitions_info=fp16_partitions_info,
fp16_num_elems=fp16_num_elems,
fp16_pinned_buffers=fp16_pinned_buffers,
fp32_parameters=fp32_parameters)
def flush_gradients(self):
self._flush_gradient_swapper(self.gradient_swapper)
def swap_in_optimizer_state(self, parameter, async_parameter=None):
swap_info = self._get_param_swap_info(parameter)
if swap_info is None:
return
self._flush_gradient_swapper(self.gradient_swapper)
required_buffer_count = len(swap_info.tensors) + (1 if swap_info.has_gradients() else 0)
aligned_numel = self._io_aligned_numel(swap_info.numel())
pinned_buffers = self.swap_buffer_manager.allocate(num_elems=aligned_numel,
count=required_buffer_count,
dtype=parameter.dtype)
assert pinned_buffers is not None
self.allocated_swap_buffers = pinned_buffers.copy()
self._start_timer(SWAP_IN_PARAM_TIMER)
self._swap_in_parameter(aio_handle=self.aio_handle,
parameter=parameter,
dest_buffers=pinned_buffers[:required_buffer_count])
self._stop_timer(SWAP_IN_PARAM_TIMER)
self.timer_names.add(SWAP_IN_PARAM_TIMER)
self._start_timer(SWAP_IN_GRADIENT_TIMER)
self._swap_in_gradients(aio_handle=self.aio_handle, parameter=parameter, dest_buffer=pinned_buffers[-1])
self._stop_timer(SWAP_IN_GRADIENT_TIMER)
self.timer_names.add(SWAP_IN_GRADIENT_TIMER)
def swap_out_optimizer_state(self, parameter, async_swap=False):
swap_info = self._get_param_swap_info(parameter=parameter)
if swap_info is None:
return
self._start_timer(SWAP_OUT_PARAM_TIMER)
pinned_tensors, pinned_paths, unpinned_tensors, unpinned_paths = self._separate_pinned_tensors(swap_info)
swap_bytes = sum([self._io_aligned_numel(t.numel()) * t.element_size() for t in swap_info.tensors])
WRITE_TIMER = 'swap_submit_write'
self._start_timer(WRITE_TIMER)
swap_out_tensors(self.aio_handle, pinned_tensors, pinned_paths)
assert self.aio_handle.wait() == len(pinned_tensors)
for t in pinned_tensors:
t.data = torch.Tensor()
if len(unpinned_tensors) > 0:
pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype)
self._swap_out_unpinned_tensors(aio_handle=self.aio_handle,
unpinned_tensors=unpinned_tensors,
dest_paths=unpinned_paths,
pinned_buffers=pinned_buffers)
self.allocated_swap_buffers += pinned_buffers
for t in unpinned_tensors:
t.data = torch.Tensor()
self._stop_timer(WRITE_TIMER)
self.swap_buffer_manager.free(self.allocated_swap_buffers)
self.allocated_swap_buffers = []
self._stop_timer(SWAP_OUT_PARAM_TIMER)
self.timer_names.add(SWAP_OUT_PARAM_TIMER)
self._log_timers([WRITE_TIMER])
if DEBUG_MODE and dist.get_rank() == 0:
logger.info(f'optimizer_param_swap_out: {(swap_bytes/(1024**3)):5.2f} GB')
def swap_out_gradients(self, parameter, gradient_offsets, gradient_tensors):
self._swap_out_gradients(parameter=parameter,
gradient_offsets=gradient_offsets,
gradient_tensors=gradient_tensors,
gradient_swapper=self.gradient_swapper)
def _swap_in_parameter(self, aio_handle, parameter, dest_buffers):
swap_info = self._get_param_swap_info(parameter)
if swap_info is None:
return
assert len(swap_info.tensors) <= len(dest_buffers)
swap_lengths = [self._io_aligned_numel(swap_info.numel())] * len(swap_info.tensors)
swap_buffers = get_sized_buffers(dest_buffers, swap_lengths)
READ_TIMER = 'swap_submit_read_param'
WAIT_TIMER = 'swap_wait_read_param'
self._start_timer(READ_TIMER)
swap_in_tensors(aio_handle, swap_buffers, swap_info.swap_paths)
self._stop_timer(READ_TIMER)
swap_bytes = sum([buffer.numel() * buffer.element_size() for buffer in swap_buffers])
self._start_timer(WAIT_TIMER)
aio_handle.wait()
self._stop_timer(WAIT_TIMER)
compute_lengths = [swap_info.numel()] * len(swap_info.tensors)
compute_buffers = get_sized_buffers(dest_buffers, compute_lengths)
for t, buffer in zip(swap_info.tensors, compute_buffers):
t.data = buffer.data
self._log_timers([READ_TIMER, WAIT_TIMER])
if DEBUG_MODE and dist.get_rank() == 0:
logger.info(f'optimizer_param_swap_in: {(swap_bytes/(1024**3)):5.2f} GB')
def _separate_pinned_tensors(self, swap_info):
pinned_tensors = []
pinned_paths = []
unpinned_tensors = []
unpinned_paths = []
for tensor, path in zip(swap_info.tensors, swap_info.swap_paths):
if tensor.is_pinned():
pinned_tensors.append(tensor)
pinned_paths.append(path)
else:
unpinned_tensors.append(tensor)
unpinned_paths.append(path)
return pinned_tensors, pinned_paths, unpinned_tensors, unpinned_paths
def _swap_in_pinned_gradients(self, aio_handle, parameter, gradient_tensor):
swap_info = self.swap_params_info[id(parameter)]
param_gradients = swap_info.swapped_gradients.values()
swap_buffers = [gradient_tensor.narrow(0, grad.offset, grad.length) for grad in param_gradients]
swap_paths = [grad.path for grad in param_gradients]
SWAP_READ_GRADIENTS = 'swap_submit_read_gradient'
SWAP_WAIT_GRADIENTS = 'swap_submit_wait_gradient'
self._start_timer(SWAP_READ_GRADIENTS)
swap_in_tensors(aio_handle, swap_buffers, swap_paths)
self._stop_timer(SWAP_READ_GRADIENTS)
self._start_timer(SWAP_WAIT_GRADIENTS)
assert len(swap_buffers) == aio_handle.wait()
self._stop_timer(SWAP_WAIT_GRADIENTS)
self._log_timers([SWAP_READ_GRADIENTS, SWAP_WAIT_GRADIENTS])
def _swap_in_gradients(self, aio_handle, parameter, dest_buffer):
swap_info = self.swap_params_info.get(id(parameter), None)
if not (swap_info and swap_info.has_gradients()):
return
assert dest_buffer.is_pinned()
assert parameter.numel() <= dest_buffer.numel()
parameter.grad = dest_buffer.narrow(0, 0, parameter.numel())
if swap_info.swapped_gradients:
self._swap_in_pinned_gradients(aio_handle, parameter, parameter.grad)
if swap_info.unswapped_gradients:
self._retrieve_unswapped_grad_partitions(swap_info=swap_info, dest_buffer=parameter.grad)
| 9,654 | 43.086758 | 114 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/compression/cupy.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import cupy
from torch.utils.dlpack import to_dlpack
from torch.utils.dlpack import from_dlpack
class CupyBackend(object):
def __init__(self):
pass
def torch2cupy(self, tensor):
return cupy.fromDlpack(to_dlpack(tensor))
def cupy2torch(self, cupy_tensor):
return from_dlpack(cupy_tensor.toDlpack())
def compress_by_chunk(self, cupy_bool_tensor, num_chunks):
packed_sign = cupy.packbits(cupy_bool_tensor)
sign_list_packed = cupy.split(packed_sign, num_chunks)
cupy.cuda.get_current_stream().synchronize()
return sign_list_packed
| 701 | 25 | 62 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/comm/nccl.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed import comm as dist
import cupy
import numpy as np
from deepspeed.runtime.compression.cupy import CupyBackend
from deepspeed.accelerator import get_accelerator
class NcclBackend(object):
def __init__(self, mpu=None):
if mpu is None:
self.world_group = dist.new_group(ranks=range(dist.get_world_size()))
else:
self.mpu = mpu
self.world_group = self.mpu.get_data_parallel_group()
self.rank = dist.get_rank(group=self.world_group)
self.size = dist.get_world_size(group=self.world_group)
self.compression_backend = CupyBackend()
self.bool_not_supported = False
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if (TORCH_MAJOR == 1 and TORCH_MINOR >= 10) or TORCH_MAJOR == 2:
self.bool_not_supported = True
def my_igather(self, rank, size, group, sendbuf, recvbuf, root):
req = []
if rank == root:
for idx in range(size):
if idx != rank:
req.append(dist.irecv(recvbuf[idx], src=idx, group=group))
else:
recvbuf[rank] = sendbuf
else:
req.append(dist.isend(sendbuf, group=group, dst=root))
return req
def my_gather(self, rank, size, group, sendbuf, recvbuf, root):
if rank == root:
for idx in range(size):
if idx != rank:
dist.recv(recvbuf[idx], src=idx, group=group)
else:
recvbuf[rank] = sendbuf
else:
dist.send(sendbuf, group=group, dst=root)
def compressed_allreduce(self, buffer_m: torch.tensor, worker_error, server_error, local_rank):
# all_start_time = time.time()
original_shape = buffer_m.size()
if len(original_shape) > 1:
buffer_m = torch.flatten(buffer_m)
original_size = buffer_m.numel()
worker_error_size = worker_error.numel()
cupy.cuda.Device(local_rank).use()
if original_size != worker_error_size:
empty_tensor = torch.zeros(worker_error_size - original_size, device=buffer_m.device)
buffer_m = torch.cat([buffer_m, empty_tensor])
buffer_m.add_(worker_error)
worker_scale = torch.norm(buffer_m) / np.sqrt(buffer_m.numel())
worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
if self.bool_not_supported:
cupy_sign_list_packed = self.compression_backend.compress_by_chunk(
self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool().to(dtype=torch.uint8)), self.size)
else:
cupy_sign_list_packed = self.compression_backend.compress_by_chunk(
self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool()), self.size)
cupy_worker_scale = self.compression_backend.torch2cupy(worker_scale)
cupy_recvbuf_sign = cupy.zeros([self.size, cupy_sign_list_packed[self.rank].size],
dtype=cupy_sign_list_packed[0].dtype)
# cupy_recvbuf_scale = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype)
sign_list_packed = [
self.compression_backend.cupy2torch(cupy_sign_list_packed[idx]) for idx in range(self.size)
]
# worker_scale = self.compression_backend.cupy2torch(cupy_worker_scale)
recvbuf_sign = self.compression_backend.cupy2torch(cupy_recvbuf_sign)
#recvbuf_scale = self.compression_backend.cupy2torch(cupy_recvbuf_scale)
recvbuf_scale = [
torch.zeros(1, dtype=worker_scale.dtype, device=torch.device(get_accelerator().device_name(local_rank)))
for i in range(self.size)
]
# communication phase 1
# gather_start = time.time()
# Alltoall for sign
dist.all_to_all_single(recvbuf_sign, torch.stack(sign_list_packed), group=self.world_group)
# Allgather for scale
dist.all_gather(recvbuf_scale, worker_scale, group=self.world_group)
# gather_end = time.time()
# cupy_sign_list_packed, sign_list_packed, cupy_worker_scale, worker_scale = None, None, None, None
cupy_sign_list_packed = None
cupy_recvbuf_sign = self.compression_backend.torch2cupy(recvbuf_sign)
#cupy_recvbuf_scale = self.compression_backend.torch2cupy(torch.stack(recvbuf_scale))
compensated_server_m = self.compression_backend.cupy2torch(
(cupy.unpackbits(cupy_recvbuf_sign.flatten())).reshape(self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
torch.stack(recvbuf_scale).mul_(1 / self.size)).sum(0)
compensated_server_m.add_(server_error)
server_scale = torch.norm(compensated_server_m) / np.sqrt(compensated_server_m.numel())
server_error.set_(compensated_server_m -
server_scale * compensated_server_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
# cupy_server_scale = self.compression_backend.torch2cupy(server_scale)
if self.bool_not_supported:
cupy_server_sign_packed = self.compression_backend.compress_by_chunk(
self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool().to(dtype=torch.uint8)),
1)
else:
cupy_server_sign_packed = self.compression_backend.compress_by_chunk(
self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool()), 1)
compensated_server_m = None
cupy_recvbuf_sign_server = cupy.zeros([self.size, cupy_server_sign_packed[0].size],
dtype=cupy_recvbuf_sign.dtype)
# cupy_recvbuf_sign, recvbuf_sign = None, None
cupy_recvbuf_sign = None
server_sign_packed = [self.compression_backend.cupy2torch(cupy_server_sign_packed[0])]
recvbuf_sign_server = [
self.compression_backend.cupy2torch(cupy_recvbuf_sign_server[idx]) for idx in range(self.size)
]
# server_scale = self.compression_backend.cupy2torch(cupy_server_scale)
cupy_recvbuf_scale_server = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype)
# cupy_recvbuf_scale, recvbuf_scale = None, None
recvbuf_scale_server = [
self.compression_backend.cupy2torch(cupy_recvbuf_scale_server[idx]) for idx in range(self.size)
]
# Communication Phase 2
dist.all_gather(recvbuf_sign_server, server_sign_packed[0], group=self.world_group)
dist.all_gather(recvbuf_scale_server, server_scale, group=self.world_group)
cupy_server_sign_packed = None
# need to convert from a tensor list to a single tensor
# dist.all_gather only provides a tensor list as the recv/output buffer
recvbuf_sign_server = torch.stack(recvbuf_sign_server)
cupy_recvbuf_sign_server = self.compression_backend.torch2cupy(recvbuf_sign_server)
buffer_m.data.copy_(
self.compression_backend.cupy2torch((cupy.unpackbits(cupy_recvbuf_sign_server.flatten())).reshape(
self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
self.compression_backend.cupy2torch(cupy_recvbuf_scale_server)).flatten().data)
if original_size != worker_error_size:
buffer_m = buffer_m[0:original_size]
if len(original_shape) > 1:
buffer_m = buffer_m.reshape(original_shape)
return buffer_m
| 7,712 | 44.370588 | 119 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/comm/coalesced_collectives.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
batched collective operations for overhead amortization and better
bandwidth utilization
"""
import math
from typing import List
import torch
from torch import Tensor
from deepspeed import comm as dist
# NOTE: Use torch.distributed's ProcessGroup class until we have our own.
from torch.distributed import ProcessGroup, all_to_all_single
from deepspeed.accelerator import get_accelerator
from deepspeed.utils import instrument_w_nvtx
from deepspeed.ops import op_builder
def _torch_reduce_scatter_fn(input_tensor: Tensor, output_tensor: Tensor, group=None, async_op=False, prof=False):
return instrument_w_nvtx(dist.reduce_scatter_fn)(output_tensor, input_tensor, group=group, async_op=False)
quantizer_module = None
@instrument_w_nvtx
@torch.no_grad()
def all_to_all_quant_reduce(tensors: List[Tensor], groups: {}) -> List[Tensor]:
global quantizer_module
if quantizer_module is None:
quantizer_module = op_builder.QuantizerBuilder().load()
local_world_size = get_accelerator().device_count()
global_world_size = dist.get_world_size()
num_nodes = global_world_size // local_world_size
this_rank = dist.get_rank()
intra_idx = int(this_rank / local_world_size)
inter_idx = this_rank % local_world_size
output_lst: List[Tensor] = [None] * len(tensors)
for idx, tensor in enumerate(tensors):
if tensor.dim() == 1:
intra_quant_group = global_world_size
output_lst[idx] = reduce_scatter_coalesced([tensor])[0]
continue
else:
intra_quant_group = max(tensor.shape[0], tensor.shape[1], global_world_size)
inter_quant_group = intra_quant_group // local_world_size
intra_quant_int4, intra_q_scales = quantizer_module.swizzle_quant(tensor, intra_quant_group, 4,
quantizer_module.Symmetric, 1, num_nodes,
local_world_size)
local_output = torch.empty_like(intra_quant_int4)
scale_output = torch.empty_like(intra_q_scales)
all_to_all_single(local_output, intra_quant_int4, group=groups[f'local_{intra_idx}'])
all_to_all_single(scale_output, intra_q_scales, group=groups[f'local_{intra_idx}'])
global_input_tensor, global_scales = quantizer_module.quantized_reduction(
local_output, scale_output, intra_quant_group, inter_quant_group, 4, quantizer_module.Symmetric)
global_output = torch.empty_like(global_input_tensor)
global_scale_output = torch.empty_like(global_scales)
all_to_all_single(global_output, global_input_tensor, group=groups[f'global_{inter_idx}'])
all_to_all_single(global_scale_output, global_scales, group=groups[f'global_{inter_idx}'])
final_output = quantizer_module.dequantize(global_output, global_scale_output, global_scale_output.numel(),
4, quantizer_module.Symmetric)
output_lst[idx] = (sum(list(final_output.chunk(num_nodes))) / num_nodes).view(-1)
return output_lst
@instrument_w_nvtx
@torch.no_grad()
def reduce_scatter_coalesced(
tensors: List[Tensor],
group: ProcessGroup = None,
) -> List[Tensor]:
"""simultaneously reduce-scatter a list of tensors - this can be done more
efficiently than individual reduce scatter calls
TODO. see if PyTorch team wants a c++ version of this for ProcessGroupNCCL
"""
this_rank = dist.get_rank(group)
world_sz = dist.get_world_size(group)
partition_lst_for_each_tensor = [None] * len(tensors)
for tensor_idx, tensor in enumerate(tensors):
flattened_tensor = tensor.view(-1)
chunk_sz = math.ceil(tensor.numel() / world_sz)
partition_lst_for_each_tensor[tensor_idx] = [
flattened_tensor[rank * chunk_sz:rank * chunk_sz + chunk_sz] for rank in range(0, world_sz)
]
padded_partition_sz_for_each_tensor = tuple(math.ceil(t.numel() / world_sz) for t in tensors)
if len(tensors) == 1 and tensors[0].numel() % world_sz == 0:
# if there's only one tensor being reduced and we don't need to pad
# we have an opportunity to avoid a memory allocation
tensor_partition_flat_buffer = tensors[0].view(-1)
else:
# interleave tensor partitions such that the correct reduced partitions of each tensor
# end up at each rank
tensor_partitions_lst_with_padding = []
for rank in range(world_sz):
for tensor_idx in range(len(tensors)):
# add tensor content
tensor_chunk = partition_lst_for_each_tensor[tensor_idx][rank]
tensor_partitions_lst_with_padding.append(tensor_chunk)
# add padding if necessary
padding_sz = padded_partition_sz_for_each_tensor[tensor_idx] - tensor_chunk.numel()
if padding_sz > 0:
tensor_partitions_lst_with_padding.append(
torch.empty(padding_sz, dtype=tensor_chunk.dtype, device=tensor_chunk.device))
tensor_partition_flat_buffer = instrument_w_nvtx(torch.cat)(tensor_partitions_lst_with_padding)
tensor_partition_flat_buffer.div_(world_sz) # pre-divide
tensor_partition_buffer_for_each_rank: List[Tensor] = torch.chunk(tensor_partition_flat_buffer, world_sz)
# batched reduce-scatter call
_torch_reduce_scatter_fn(tensor_partition_flat_buffer,
tensor_partition_buffer_for_each_rank[this_rank],
group=group)
# reverse procedure of the interleaving done previously, done on the
# result of the batched reduce-scatter
output_lst: List[Tensor] = [None] * len(tensors)
offset = 0
for tensor_idx in range(len(tensors)):
output_lst[tensor_idx] = tensor_partition_buffer_for_each_rank[this_rank].narrow(
0, offset, partition_lst_for_each_tensor[tensor_idx][this_rank].numel())
offset += padded_partition_sz_for_each_tensor[tensor_idx]
return output_lst
| 6,261 | 46.082707 | 119 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/comm/mpi.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import cupy
import time
import numpy as np
from mpi4py import MPI
from deepspeed.runtime.compression.cupy import CupyBackend
class MpiBackend(object):
def __init__(self, cuda_aware):
self.comm = MPI.COMM_WORLD
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size()
self.cuda_aware = cuda_aware
self.compression_backend = CupyBackend()
def my_igather(self, rank, size, comm, sendbuf, recbuf, root):
req = []
if rank == root:
for idx in range(size):
if idx != rank:
req.append(comm.Irecv(recbuf[idx], source=idx))
else:
recbuf[rank] = sendbuf
else:
req.append(comm.Isend(sendbuf, dest=root))
return req
def gather_cuda(self, rank, world_size, comm, cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale,
cupy_recvbuf_scale):
# We do in-place operations on cupy buffers so we do not return any buffers
requests = []
for idx in range(world_size):
req_sign = self.my_igather(rank, world_size, comm, cupy_sign_list_packed[idx], cupy_recvbuf_sign, root=idx)
requests += req_sign
for idx in range(world_size):
req_scale = self.my_igather(rank, world_size, comm, cupy_worker_scale, cupy_recvbuf_scale, root=idx)
requests += req_scale
MPI.Request.Waitall(requests)
def gather_host(self, rank, world_size, comm, cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale,
cupy_recvbuf_scale):
# In-place operations are not possible for newly created cupy arrays
# so we need to return the new buffers
numpy_recvbuf_sign = np.zeros([world_size, cupy_sign_list_packed[rank].size],
dtype=cupy_sign_list_packed[0].dtype)
numpy_recvbuf_scale = np.zeros([world_size, 1], dtype=cupy_worker_scale.dtype)
# 1. convert from cupy to numpy
numpy_sign_list_packed = cupy_sign_list_packed
for idx in range(world_size):
numpy_sign_list_packed[idx] = cupy.asnumpy(cupy_sign_list_packed[idx])
numpy_worker_scale = cupy.asnumpy(cupy_worker_scale)
numpy_recvbuf_scale = cupy.asnumpy(cupy_recvbuf_scale)
cupy.cuda.get_current_stream().synchronize()
# 2. use numpy buffers for communication
requests = []
for idx in range(world_size):
req_sign = self.my_igather(rank,
world_size,
comm,
numpy_sign_list_packed[idx],
numpy_recvbuf_sign,
root=idx)
requests += req_sign
for idx in range(world_size):
req_scale = self.my_igather(rank, world_size, comm, numpy_worker_scale, numpy_recvbuf_scale, root=idx)
requests += req_scale
MPI.Request.Waitall(requests)
# 3. Convert back from numpy to cupy
cupy_recvbuf_sign = cupy.asarray(numpy_recvbuf_sign)
for idx in range(world_size):
cupy_sign_list_packed[idx] = cupy.asarray(numpy_sign_list_packed[idx])
cupy_worker_scale = cupy.asarray(numpy_worker_scale)
cupy_recvbuf_scale = cupy.asarray(numpy_recvbuf_scale)
cupy.cuda.get_current_stream().synchronize()
return cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale, cupy_recvbuf_scale
def allgather_cuda(self, comm, cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale,
cupy_recvbuf_scale_server):
comm.Allgather(cupy_server_sign_packed, cupy_recvbuf_sign_server)
comm.Allgather(cupy_server_scale, cupy_recvbuf_scale_server)
def allgather_host(self, comm, cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale,
cupy_recvbuf_scale_server):
# 1. Convert cupy to numpy
numpy_recvbuf_sign_server = np.zeros([comm.Get_size(), cupy_server_sign_packed.size],
dtype=cupy_server_sign_packed.dtype)
numpy_recvbuf_scale_server = np.zeros([comm.Get_size(), 1], dtype=cupy_server_scale.dtype)
numpy_server_sign_packed = cupy.asnumpy(cupy_server_sign_packed)
numpy_recvbuf_sign_server = cupy.asnumpy(cupy_recvbuf_sign_server)
numpy_server_scale = cupy.asnumpy(cupy_server_scale)
numpy_recvbuf_scale_server = cupy.asnumpy(cupy_recvbuf_scale_server)
cupy.cuda.get_current_stream().synchronize()
# 2. Communicate numpy buffers
comm.Allgather(numpy_server_sign_packed, numpy_recvbuf_sign_server)
comm.Allgather(numpy_server_scale, numpy_recvbuf_scale_server)
comm.Barrier()
# 3. Convert numpy back to cupy
cupy_server_sign_packed = cupy.asarray(numpy_server_sign_packed)
cupy_recvbuf_sign_server = cupy.asarray(numpy_recvbuf_sign_server)
cupy_server_scale = cupy.asarray(numpy_server_scale)
cupy_recvbuf_scale_server = cupy.asarray(numpy_recvbuf_scale_server)
cupy.cuda.get_current_stream().synchronize()
return cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale, cupy_recvbuf_scale_server
def compressed_allreduce(self, buffer_m: torch.tensor, worker_error, server_error, local_rank):
all_start_time = time.time()
original_shape = buffer_m.size()
if len(original_shape) > 1:
buffer_m = torch.flatten(buffer_m)
original_size = buffer_m.numel()
worker_error_size = worker_error.numel()
cupy.cuda.Device(local_rank).use()
if original_size != worker_error_size:
empty_tensor = torch.zeros(worker_error_size - original_size, device=buffer_m.device)
buffer_m = torch.cat([buffer_m, empty_tensor])
buffer_m.add_(worker_error)
worker_scale = torch.norm(buffer_m) / np.sqrt(torch.numel(buffer_m))
worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
cupy_sign_list_packed = self.compression_backend.compress_by_chunk(
self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool()), self.size)
cupy_worker_scale = self.compression_backend.torch2cupy(worker_scale)
cupy_recvbuf_sign = cupy.zeros([self.size, cupy_sign_list_packed[self.rank].size],
dtype=cupy_sign_list_packed[0].dtype)
cupy_recvbuf_scale = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype)
# Communication Phase 1
gather_start = time.time()
if self.cuda_aware:
self.gather_cuda(self.rank, self.size, self.comm, cupy_sign_list_packed, cupy_recvbuf_sign,
cupy_worker_scale, cupy_recvbuf_scale)
else:
_, cupy_recvbuf_sign, _, cupy_recvbuf_scale = self.gather_host(self.rank, self.size, self.comm,
cupy_sign_list_packed, cupy_recvbuf_sign,
cupy_worker_scale, cupy_recvbuf_scale)
gather_end = time.time()
# cupy_sign_list_packed, cupy_worker_scale, worker_scale = None, None, None
cupy_sign_list_packed = None
compensated_server_m = self.compression_backend.cupy2torch(
(cupy.unpackbits(cupy_recvbuf_sign.flatten())).reshape(self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
self.compression_backend.cupy2torch(cupy_recvbuf_scale).mul_(1 / self.size)).sum(0)
compensated_server_m.add_(server_error)
server_scale = torch.norm(compensated_server_m) / np.sqrt(compensated_server_m.numel())
server_error.set_(compensated_server_m -
server_scale * compensated_server_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
cupy_server_scale = self.compression_backend.torch2cupy(server_scale)
cupy_server_sign_packed = self.compression_backend.compress_by_chunk(
self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool()), 1)
compensated_server_m = None
cupy_recvbuf_sign_server = cupy.zeros([self.size, cupy_server_sign_packed[0].size],
dtype=cupy_recvbuf_sign.dtype)
cupy_recvbuf_scale_server = cupy.zeros([self.size, 1], dtype=cupy_recvbuf_scale.dtype)
# cupy_recvbuf_sign, cupy_recvbuf_scale = None, None
cupy_recvbuf_sign = None
# Communication Phase 2
if self.cuda_aware:
self.allgather_cuda(self.comm, cupy_server_sign_packed[0], cupy_recvbuf_sign_server, cupy_server_scale,
cupy_recvbuf_scale_server)
else:
_, cupy_recvbuf_sign_server, _, cupy_recvbuf_scale_server = self.allgather_host(
self.comm, cupy_server_sign_packed[0], cupy_recvbuf_sign_server, cupy_server_scale,
cupy_recvbuf_scale_server)
# cupy_server_sign_packed, cupy_server_scale, server_scale = None, None, None
cupy_server_sign_packed = None
buffer_m.data.copy_(
self.compression_backend.cupy2torch((cupy.unpackbits(cupy_recvbuf_sign_server.flatten())).reshape(
self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
self.compression_backend.cupy2torch(cupy_recvbuf_scale_server)).flatten().data)
if original_size != worker_error_size:
buffer_m = buffer_m[0:original_size]
if len(original_shape) > 1:
buffer_m = buffer_m.reshape(original_shape)
# cupy_recvbuf_sign_server, cupy_recvbuf_scale_server = None, None
return buffer_m
| 10,062 | 45.587963 | 119 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from collections import defaultdict
import csv
import time
from multiprocessing import Process, Manager
import numpy as np
import torch
from torch.utils.data import BatchSampler, SequentialSampler, DataLoader, Subset
from deepspeed.utils import logger
from .indexed_dataset import MMapIndexedDataset
from .utils import split_dataset, split_index, create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype
class DataAnalyzer(object):
def __init__(self,
dataset,
num_workers=1,
worker_id=0,
num_threads=1,
num_threads_reduce=1,
specific_threads=[],
batch_size=1,
metric_names=[],
metric_functions=[],
metric_types=[],
metric_dtypes=[],
save_path="./",
collate_fn=None,
custom_map_init=None,
custom_map_update=None,
custom_map_finalize=None,
custom_reduce=None):
super().__init__()
self.dataset = dataset
self.num_workers = num_workers
self.worker_id = worker_id
self.num_threads = num_threads
self.num_threads_reduce = num_threads_reduce
self.specific_threads = specific_threads
self.batch_size = batch_size
self.metric_names = metric_names
self.metric_functions = metric_functions
self.metric_types = metric_types
self.metric_dtypes = metric_dtypes
self.save_path = save_path
self.collate_fn = collate_fn
self.custom_map_init = custom_map_init
self.custom_map_update = custom_map_update
self.custom_map_finalize = custom_map_finalize
self.custom_reduce = custom_reduce
def init_metric_results(self, thread_id, metric_names, metric_types, metric_dtypes, save_path, worker_id):
metric_results = []
for m_idx in range(len(metric_names)):
metric_name, metric_type, metric_dtype = metric_names[m_idx], \
metric_types[m_idx], metric_dtypes[m_idx]
assert metric_dtype not in [
np.float64, np.double
], "Currently floating point metric values are not supported. Please change your metric into integer values (and potentially multiply a larger coefficient to keep the precision)."
metric_save_path = f"{save_path}/{metric_name}/worker{worker_id}_thread{thread_id}/"
os.makedirs(metric_save_path, exist_ok=True)
if metric_type == 'single_value_per_sample':
sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric"
sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_dtype)
metric_to_sample_fname = f"{metric_save_path}/{metric_name}_metric_to_sample"
os.system(f"rm -rf {metric_to_sample_fname}*")
metric_to_sample_dict = defaultdict(list)
metric_results.append({
"sample_to_metric_fname": sample_to_metric_fname,
"sample_to_metric_builder": sample_to_metric_builder,
"metric_to_sample_fname": metric_to_sample_fname,
"metric_to_sample_dict": metric_to_sample_dict
})
elif metric_type == 'accumulate_value_over_samples':
metric_value = None
metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value"
metric_results.append({"metric_value": metric_value, "metric_value_fname": metric_value_fname})
return metric_results
def update_metric_results(self, data, metric_types, metric_functions, metric_results):
for m_idx in range(len(metric_types)):
metric_type, metric_function, metric_result = metric_types[m_idx], \
metric_functions[m_idx], metric_results[m_idx]
if metric_type == 'single_value_per_sample':
metric_values = metric_function(data)
for row in range(metric_values.size()[0]):
metric_result["sample_to_metric_builder"].add_item(metric_values[row].reshape(-1))
metric_result["metric_to_sample_dict"][metric_values[row].item()].append(
data['index'][row][0].item())
for m_value in metric_result["metric_to_sample_dict"]:
if len(metric_result["metric_to_sample_dict"][m_value]) > 100:
metric_fname = metric_result["metric_to_sample_fname"]
with open(f"{metric_fname}_{m_value}.csv", 'a') as f:
writer = csv.writer(f)
writer.writerows([metric_result["metric_to_sample_dict"][m_value]])
metric_result["metric_to_sample_dict"][m_value] = []
elif metric_type == 'accumulate_value_over_samples':
metric_values = metric_function(data)
if metric_result["metric_value"] is None:
metric_result["metric_value"] = metric_values
else:
metric_result["metric_value"].add_(metric_values)
def finalize_metric_results(self, metric_types, metric_dtypes, metric_results):
for m_idx in range(len(metric_types)):
metric_type, metric_dtype, metric_result = metric_types[m_idx], \
metric_dtypes[m_idx], metric_results[m_idx]
if metric_type == 'single_value_per_sample':
metric_fname = metric_result["sample_to_metric_fname"]
close_mmap_dataset_builder(metric_result["sample_to_metric_builder"], metric_fname)
for m_value in metric_result["metric_to_sample_dict"]:
if len(metric_result["metric_to_sample_dict"][m_value]) > 0:
metric_fname = metric_result["metric_to_sample_fname"]
with open(f"{metric_fname}_{m_value}.csv", 'a') as f:
writer = csv.writer(f)
writer.writerows([metric_result["metric_to_sample_dict"][m_value]])
metric_result["metric_to_sample_dict"][m_value] = []
elif metric_type == 'accumulate_value_over_samples':
if metric_result["metric_value"] is not None:
metric_value_builder = create_mmap_dataset_builder(metric_result["metric_value_fname"],
metric_dtype)
metric_value_builder.add_item(metric_result["metric_value"].reshape(-1))
close_mmap_dataset_builder(metric_value_builder, metric_result["metric_value_fname"])
def run_map_helper(self, thread_id):
start_idx, end_idx = self.thread_splits[thread_id][0], \
self.thread_splits[thread_id][1]
logger.info(f"worker {self.worker_id} thread {thread_id}: start working " \
f"on data subset {start_idx} to {end_idx}")
thread_dataset = Subset(self.dataset, list(range(start_idx, end_idx)))
sampler = BatchSampler(SequentialSampler(thread_dataset), batch_size=self.batch_size, drop_last=False)
if self.collate_fn is None:
iterator = iter(DataLoader(thread_dataset, batch_sampler=sampler, num_workers=0, pin_memory=False))
else:
iterator = iter(
DataLoader(thread_dataset,
batch_sampler=sampler,
num_workers=0,
collate_fn=self.collate_fn,
pin_memory=False))
if self.custom_map_init is None:
metric_results = self.init_metric_results(thread_id, self.metric_names, self.metric_types,
self.metric_dtypes, self.save_path, self.worker_id)
else:
metric_results = self.custom_map_init(thread_id, self.metric_names, self.metric_types, self.metric_dtypes,
self.save_path, self.worker_id)
total_sample = len(thread_dataset)
processed_sample = 0
start = time.time()
while True:
try:
data = next(iterator)
if self.custom_map_update is None:
self.update_metric_results(data, self.metric_types, self.metric_functions, metric_results)
else:
self.custom_map_update(data, self.metric_types, self.metric_functions, metric_results)
processed_sample += self.batch_size
duration = (time.time() - start) / 3600.0
remain_duration = duration * total_sample / processed_sample - duration
logger.info(
f"worker {self.worker_id} thread {thread_id}: {processed_sample} " \
f"out of {total_sample} processed in {duration:.2f} hr, " \
f"estimated to finish in {remain_duration:.2f} hr")
except StopIteration:
logger.info(f"worker {self.worker_id} thread {thread_id}: reach end of file")
break
if self.custom_map_finalize is None:
self.finalize_metric_results(self.metric_types, self.metric_dtypes, metric_results)
else:
self.custom_map_finalize(self.metric_types, self.metric_dtypes, metric_results)
logger.info(f"worker {self.worker_id} thread {thread_id}: finished")
def run_map(self):
self.worker_splits, self.thread_splits = split_dataset(self.dataset, self.num_workers, self.worker_id,
self.num_threads)
if len(self.specific_threads) > 0:
threads_to_run = self.specific_threads
else:
threads_to_run = list(range(self.num_threads))
if self.num_threads > 1:
p = []
for thread in threads_to_run:
p.append(Process(target=self.run_map_helper, args=(thread, )))
p[thread].start()
for thread in threads_to_run:
p[thread].join()
else:
assert self.num_threads == 1
self.run_map_helper(0)
def get_metric_value_percentiles(self, metric_name, num_sample_per_value, total_num_samples):
logger.info(f"Checking the value percentiles of metric {metric_name}...")
processed_samples = 0
current_percentile = 5
for key in sorted(num_sample_per_value.keys()):
processed_samples += num_sample_per_value[key]
if processed_samples >= total_num_samples * current_percentile / 100.0:
logger.info(f"Metric {metric_name} {current_percentile}th percentile: {key}")
current_percentile += 5
def merge_gather_map_stats(self, num_workers, num_threads, num_threads_reduce, t_idx_reduce, metric_save_path,
metric_name, return_dict):
results = []
for w_idx in range(num_workers):
for t_idx in range(num_threads):
if (w_idx * num_threads + t_idx) % num_threads_reduce == t_idx_reduce:
w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/"
w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric"
w_sample_to_metric = MMapIndexedDataset(w_sample_to_metric_fname, skip_warmup=True)
unique_v = list(np.unique(w_sample_to_metric))
sample_to_metric_count = len(w_sample_to_metric)
logger.info(f"Finished gathering map stats from worker {w_idx} thread {t_idx}.")
results.append([unique_v, sample_to_metric_count])
return_dict[t_idx_reduce] = results
def merge_sample_to_metric(self, t_idx_reduce, metric_save_path, metric_name, metric_value_dtype,
map_worker_thread):
sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}"
sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_value_dtype)
for w_t in map_worker_thread:
w_metric_save_path = f"{metric_save_path}/worker{w_t[0]}_thread{w_t[1]}/"
w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric"
w_data = MMapIndexedDataset(w_sample_to_metric_fname, skip_warmup=True)
for row in range(len(w_data)):
sample_to_metric_builder.add_item(torch.tensor(w_data[row].astype(np.int64), dtype=torch.long))
logger.info(f"Finished merge_sample_to_metric from worker {w_t[0]} thread {w_t[1]}.")
close_mmap_dataset_builder(sample_to_metric_builder, sample_to_metric_fname)
def merge_metric_to_sample(self, t_idx_reduce, metric_save_path, metric_name, sample_idx_dtype, metric_value_dtype,
unique_metric_values, num_workers, num_threads):
index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}"
index_to_sample_builder = create_mmap_dataset_builder(index_to_sample_fname, sample_idx_dtype)
index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}"
index_to_metric_builder = create_mmap_dataset_builder(index_to_metric_fname, metric_value_dtype)
for unique_v in unique_metric_values:
samples = []
for w_idx in range(num_workers):
for t_idx in range(num_threads):
w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/"
w_metric_to_sample_fname = f"{w_metric_save_path}/{metric_name}_metric_to_sample_{unique_v}.csv"
if os.path.isfile(w_metric_to_sample_fname):
with open(w_metric_to_sample_fname, 'r') as f:
datareader = csv.reader(f)
for row in datareader:
samples += [int(x) for x in row]
index_to_sample_builder.add_item(torch.tensor(samples, dtype=torch.long))
index_to_metric_builder.add_item(torch.tensor([unique_v], dtype=torch.long))
logger.info(f"Finished reducing metric {metric_name} value {unique_v}.")
close_mmap_dataset_builder(index_to_sample_builder, index_to_sample_fname)
close_mmap_dataset_builder(index_to_metric_builder, index_to_metric_fname)
def merge_map_results(self, dataset, metric_names, metric_types, save_path, num_workers, num_threads,
num_threads_reduce):
total_num_samples = len(dataset)
sample_idx_dtype = find_fit_int_dtype(0, total_num_samples - 1)
logger.info(
f"Total number of data samples: {total_num_samples}. Will use {sample_idx_dtype} to store the sample indexes."
)
for m_idx in range(len(metric_names)):
metric_name, metric_type = metric_names[m_idx], metric_types[m_idx]
if metric_type == 'single_value_per_sample':
metric_save_path = f"{save_path}/{metric_name}/"
sample_to_metric_count = 0
unique_metric_values = set([])
manager = Manager()
return_dict = manager.dict()
p = []
for t_idx_reduce in range(num_threads_reduce):
p.append(
Process(target=self.merge_gather_map_stats,
args=(
num_workers,
num_threads,
num_threads_reduce,
t_idx_reduce,
metric_save_path,
metric_name,
return_dict,
)))
p[t_idx_reduce].start()
for t_idx_reduce in range(num_threads_reduce):
p[t_idx_reduce].join()
for t_idx_reduce in range(num_threads_reduce):
results = return_dict[t_idx_reduce]
for res in results:
unique_metric_values = unique_metric_values.union(set(res[0]))
sample_to_metric_count += res[1]
value_max = max(unique_metric_values)
value_min = min(unique_metric_values)
assert sample_to_metric_count == total_num_samples, "The number of samples in map result files are not correct. It's possible that some map worker didn't finish successfully."
metric_value_dtype = find_fit_int_dtype(value_min, value_max)
logger.info(
f"Metric {metric_name} has values between {value_min} and {value_max}. Will use {metric_value_dtype} to store the metric values."
)
# sample_to_metric
map_worker_thread = []
for w_idx in range(num_workers):
for t_idx in range(num_threads):
map_worker_thread.append([w_idx, t_idx])
thread_splits = split_index(0, len(map_worker_thread), num_threads_reduce)
p = []
for t_idx_reduce in range(num_threads_reduce):
start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1]
p.append(
Process(target=self.merge_sample_to_metric,
args=(
t_idx_reduce,
metric_save_path,
metric_name,
metric_value_dtype,
map_worker_thread[start_idx:end_idx],
)))
p[t_idx_reduce].start()
for t_idx_reduce in range(num_threads_reduce):
p[t_idx_reduce].join()
sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric"
sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_value_dtype)
for t_idx_reduce in range(num_threads_reduce):
chunk_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}"
logger.info(f"Merging file {chunk_fname}")
sample_to_metric_builder.merge_file_(chunk_fname)
close_mmap_dataset_builder(sample_to_metric_builder, sample_to_metric_fname)
sample_to_metric = MMapIndexedDataset(sample_to_metric_fname, skip_warmup=True)
assert len(sample_to_metric) == total_num_samples
# metric_to_sample
unique_metric_values = list(sorted(unique_metric_values))
thread_splits = split_index(0, len(unique_metric_values), num_threads_reduce)
p = []
for t_idx_reduce in range(num_threads_reduce):
start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1]
p.append(
Process(target=self.merge_metric_to_sample,
args=(
t_idx_reduce,
metric_save_path,
metric_name,
sample_idx_dtype,
metric_value_dtype,
unique_metric_values[start_idx:end_idx],
num_workers,
num_threads,
)))
p[t_idx_reduce].start()
for t_idx_reduce in range(num_threads_reduce):
p[t_idx_reduce].join()
index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample"
index_to_sample_builder = create_mmap_dataset_builder(index_to_sample_fname, sample_idx_dtype)
index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric"
index_to_metric_builder = create_mmap_dataset_builder(index_to_metric_fname, metric_value_dtype)
for t_idx_reduce in range(num_threads_reduce):
chunk_is_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}"
logger.info(f"Merging file {chunk_is_fname}")
index_to_sample_builder.merge_file_(chunk_is_fname)
chunk_im_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}"
logger.info(f"Merging file {chunk_im_fname}")
index_to_metric_builder.merge_file_(chunk_im_fname)
close_mmap_dataset_builder(index_to_sample_builder, index_to_sample_fname)
close_mmap_dataset_builder(index_to_metric_builder, index_to_metric_fname)
num_sample_per_value = {}
index_to_sample = MMapIndexedDataset(index_to_sample_fname, skip_warmup=True)
index_to_metric = MMapIndexedDataset(index_to_metric_fname, skip_warmup=True)
index_to_sample_merged_fname = f"{metric_save_path}/{metric_name}_index_to_sample_percentile_merged"
index_to_sample_merged_builder = create_mmap_dataset_builder(index_to_sample_merged_fname,
sample_idx_dtype)
for v_idx in range(len(index_to_sample)):
if v_idx > 0:
assert index_to_metric[v_idx] > index_to_metric[v_idx - 1]
num_sample_per_value[index_to_metric[v_idx][0]] = len(index_to_sample[v_idx])
assert sum(num_sample_per_value.values()) == total_num_samples
merge_step = max(1, len(index_to_sample) // 100)
for v_idx in range(0, len(index_to_sample), merge_step):
merged_samples = np.copy(
np.concatenate(index_to_sample[v_idx:min(len(index_to_sample), (v_idx + merge_step))],
axis=None))
index_to_sample_merged_builder.add_item(
torch.tensor(merged_samples.astype(np.int64), dtype=torch.long))
logger.info(f"Finished merging index_to_sample {v_idx} to {v_idx+merge_step}.")
close_mmap_dataset_builder(index_to_sample_merged_builder, index_to_sample_merged_fname)
self.get_metric_value_percentiles(metric_name, num_sample_per_value, total_num_samples)
elif metric_type == 'accumulate_value_over_samples':
metric_save_path = f"{save_path}/{metric_name}/"
metric_value = None
for w_idx in range(num_workers):
for t_idx in range(num_threads):
w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/"
w_metric_value_fname = f"{w_metric_save_path}/{metric_name}_metric_value"
w_metric_value = MMapIndexedDataset(w_metric_value_fname, skip_warmup=True)
if metric_value is None:
metric_value = np.copy(w_metric_value[0])
else:
metric_value += np.copy(w_metric_value[0])
value_max = int(max(metric_value))
value_min = int(min(metric_value))
metric_value_dtype = find_fit_int_dtype(value_min, value_max)
metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value"
metric_value_builder = create_mmap_dataset_builder(metric_value_fname, metric_value_dtype)
metric_value_builder.add_item(torch.tensor(metric_value.astype(np.int64), dtype=torch.long))
close_mmap_dataset_builder(metric_value_builder, metric_value_fname)
def run_reduce(self):
if self.custom_reduce is None:
self.merge_map_results(self.dataset, self.metric_names, self.metric_types, self.save_path,
self.num_workers, self.num_threads, self.num_threads_reduce)
else:
self.custom_reduce(self.dataset, self.metric_names, self.metric_types, self.save_path, self.num_workers,
self.num_threads, self.num_threads_reduce)
| 25,023 | 58.866029 | 191 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
coding=utf-8
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Part of this code was adopted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/data_samplers.py
"""
import torch
import os
import numpy as np
import deepspeed.comm as dist
from deepspeed.utils import logger
from deepspeed.accelerator import get_accelerator
from ..constants import *
from ..curriculum_scheduler import CurriculumScheduler
from .indexed_dataset import MMapIndexedDataset
from .utils import create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype
class DeepSpeedDataSampler(object):
def __init__(self,
data_efficiency_config,
one_epoch_total_samples,
micro_batch_size,
data_parallel_rank,
data_parallel_size,
data_parallel_group,
gradient_accumulation_steps,
global_rank,
drop_last=True):
# Keep a copy of input params for later use.
self.data_efficiency_config = data_efficiency_config
self.one_epoch_total_samples = one_epoch_total_samples
self.index_dtype = find_fit_int_dtype(0, one_epoch_total_samples)
self.total_samples = one_epoch_total_samples * self.data_efficiency_config[DATA_SAMPLING][
DATA_SAMPLING_NUM_EPOCHS]
self.micro_batch_size = micro_batch_size
self.data_parallel_rank = data_parallel_rank
self.data_parallel_group = data_parallel_group
self.micro_batch_times_data_parallel_size = \
self.micro_batch_size * data_parallel_size
self.gradient_accumulation_steps = gradient_accumulation_steps
self.global_batch_size = self.micro_batch_times_data_parallel_size * \
self.gradient_accumulation_steps
self.global_rank = global_rank
self.drop_last = drop_last
self.np_rng = np.random.default_rng(self.data_efficiency_config[DATA_EFFICIENCY_SEED])
self.state = {}
self.batch = []
self.consumed_samples = 0
if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]:
self.curriculum_step = 0
self.current_difficulties = {}
self.data_cluster_paths = []
self.data_cluster_current_position = []
self.curriculum_schedulers = {}
self.curriculum_index_to_sample = {}
self.curriculum_index_to_metric = {}
self.difficulty_type = {}
self.clustering_type = {}
self.data_1epoch_size = None
if self.global_rank == 0:
self.data_clusters = []
self.data_cluster_sizes = []
cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
CURRICULUM_LEARNING_CLUSTER_PATH]
if not os.path.exists(cluster_path):
os.makedirs(cluster_path)
for metric in self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]:
self.curriculum_schedulers[metric] = CurriculumScheduler(
data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][metric])
self.difficulty_type[metric] = data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
CURRICULUM_LEARNING_METRICS][metric][CURRICULUM_LEARNING_DIFFICULTY_TYPE]
self.clustering_type[metric] = data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
CURRICULUM_LEARNING_METRICS][metric][CURRICULUM_LEARNING_CLUSTERING_TYPE]
if self.global_rank == 0:
if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER:
self.curriculum_index_to_sample[metric] = MMapIndexedDataset(
data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]
[metric][CURRICULUM_LEARNING_SAMPLE_PATH],
skip_warmup=True)
if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
self.curriculum_index_to_metric[metric] = MMapIndexedDataset(
data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]
[metric][CURRICULUM_LEARNING_METRIC_PATH],
skip_warmup=True)
# Sanity checks.
assert self.total_samples > 0, \
'no sample to consume: {}'.format(self.total_samples)
assert self.micro_batch_size > 0
assert data_parallel_size > 0
assert self.data_parallel_rank < data_parallel_size, \
'data_parallel_rank should be smaller than data size: {}, ' \
'{}'.format(self.data_parallel_rank, data_parallel_size)
def __len__(self):
return self.total_samples
def set_custom_curriculum_learning_schedule(self, schedule_func_dict):
for metric in self.curriculum_schedulers:
if metric in schedule_func_dict:
self.curriculum_schedulers[metric].set_custom_get_difficulty(schedule_func_dict[metric])
def get_start_end_idx(self):
start_idx = self.data_parallel_rank * self.micro_batch_size
end_idx = start_idx + self.micro_batch_size
return start_idx, end_idx
def get_sample_based_on_metric_value(self, metric, value_start, value_end):
new_samples = None
for row in range(len(self.curriculum_index_to_sample[metric])):
if self.curriculum_index_to_metric[metric][row] <= value_end and self.curriculum_index_to_metric[metric][
row] > value_start:
row_samples = np.copy(self.curriculum_index_to_sample[metric][row])
new_samples = row_samples if new_samples is None else np.concatenate(
(new_samples, row_samples), axis=None)
return new_samples
def get_sample_based_on_metric_percentile(self, metric, percentile_start, percentile_end):
new_samples = None
if self.data_1epoch_size is None:
self.data_1epoch_size = sum(len(x) for x in self.curriculum_index_to_sample[metric])
max_percentile = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][
metric][CURRICULUM_LEARNING_MAX_DIFFICULTY]
sample_per_percentile = self.data_1epoch_size // max_percentile
start_count = sample_per_percentile * percentile_start
end_count = sample_per_percentile * percentile_end
if percentile_end == max_percentile:
end_count = self.data_1epoch_size
current_count = 0
for row in range(len(self.curriculum_index_to_sample[metric])):
row_size = len(self.curriculum_index_to_sample[metric][row])
if current_count + row_size > start_count:
row_start = max(0, start_count - current_count)
if current_count + row_size <= end_count:
row_end = row_size
else:
row_end = end_count - current_count
row_samples = np.copy(self.curriculum_index_to_sample[metric][row][row_start:row_end])
new_samples = row_samples if new_samples is None else np.concatenate(
(new_samples, row_samples), axis=None)
current_count += row_size
if current_count >= end_count:
break
return new_samples
def get_new_cluster(self, previous_difficulties):
cluster_fname = CURRICULUM_LEARNING_CLUSTER_PREFIX
for metric in self.curriculum_schedulers:
cluster_fname = f"{cluster_fname}_{metric}{self.current_difficulties[metric]}"
cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
CURRICULUM_LEARNING_CLUSTER_PATH]
cluster_path = f"{cluster_path}/{cluster_fname}"
if self.global_rank == 0:
new_cluster = None
need_clustering = 0
for metric in self.clustering_type:
if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER:
need_clustering += 1
if need_clustering > 1:
for metric in self.curriculum_schedulers:
if self.clustering_type[metric] == CURRICULUM_LEARNING_SINGLE_CLUSTER:
metric_cluster = np.arange(start=0,
stop=self.one_epoch_total_samples,
step=1,
dtype=self.index_dtype)
else:
if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
metric_cluster = self.get_sample_based_on_metric_value(metric, float('-inf'),
self.current_difficulties[metric])
elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED:
metric_cluster = self.get_sample_based_on_metric_percentile(
metric, 0, self.current_difficulties[metric])
new_cluster = metric_cluster if new_cluster is None else \
np.intersect1d(new_cluster, metric_cluster, assume_unique=True)
for cluster in self.data_clusters:
new_cluster = np.setdiff1d(new_cluster, cluster[0], assume_unique=True)
else:
if len(self.data_clusters) == 0:
new_cluster = np.arange(start=0, stop=self.one_epoch_total_samples, step=1, dtype=self.index_dtype)
for metric in self.curriculum_schedulers:
if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER:
if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
new_cluster = self.get_sample_based_on_metric_value(metric, previous_difficulties[metric],
self.current_difficulties[metric])
elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED:
new_cluster = self.get_sample_based_on_metric_percentile(
metric, previous_difficulties[metric], self.current_difficulties[metric])
if new_cluster is not None and len(new_cluster) > 0:
logger.info(
f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) with size {len(new_cluster)} generated."
)
self.np_rng.shuffle(new_cluster)
cluster_builder = create_mmap_dataset_builder(cluster_path, self.index_dtype)
cluster_builder.add_item_numpy(new_cluster)
close_mmap_dataset_builder(cluster_builder, cluster_path)
self.data_clusters.append(MMapIndexedDataset(cluster_path, skip_warmup=True))
self.data_cluster_sizes.append(len(self.data_clusters[-1][0]))
else:
logger.info(
f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) has no matched data thus skipped."
)
dist.barrier(group=self.data_parallel_group)
if os.path.isfile(f"{cluster_path}.bin"):
self.data_cluster_paths.append(cluster_fname)
self.data_cluster_current_position.append(0)
def sample_from_clusters(self):
num_clusters = len(self.data_clusters)
weight_sum = sum(self.data_cluster_sizes)
weights = [x / weight_sum for x in self.data_cluster_sizes]
samples = self.np_rng.choice(num_clusters, self.global_batch_size, replace=True, p=weights)
samples = np.bincount(samples, minlength=num_clusters)
return samples
def reshuffle_clusters(self, cidx):
cluster_fname = self.data_cluster_paths[cidx]
cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
CURRICULUM_LEARNING_CLUSTER_PATH]
cluster_path = f"{cluster_path}/{cluster_fname}"
cluster = np.copy(self.data_clusters[cidx][0])
self.np_rng.shuffle(cluster)
cluster_builder = create_mmap_dataset_builder(cluster_path, self.index_dtype)
cluster_builder.add_item_numpy(cluster)
close_mmap_dataset_builder(cluster_builder, cluster_path)
self.data_clusters[cidx] = MMapIndexedDataset(cluster_path, skip_warmup=True)
def get_sample_from_cluster(self, cidx, num_samples):
start_idx = self.data_cluster_current_position[cidx]
samples = list(np.copy(self.data_clusters[cidx][0][start_idx:(start_idx + num_samples)]))
self.data_cluster_current_position[cidx] += num_samples
if len(samples) < num_samples:
num_samples_remained = num_samples - len(samples)
logger.info(f"reshuffling cluster {cidx}.")
self.reshuffle_clusters(cidx)
samples += list(np.copy(self.data_clusters[cidx][0][:num_samples_remained]))
self.data_cluster_current_position[cidx] = num_samples_remained
return samples
def get_next_global_batch(self):
if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]:
self.curriculum_step += 1
new_cluster = False
previous_difficulties = {}
for metric in self.curriculum_schedulers:
next_difficulty = self.curriculum_schedulers[metric].update_difficulty(self.curriculum_step)
if metric not in self.current_difficulties or \
next_difficulty != self.current_difficulties[metric]:
new_cluster = True
if metric in self.current_difficulties:
previous_difficulties[metric] = self.current_difficulties[metric]
else:
if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
previous_difficulties[metric] = float('-inf')
elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED:
previous_difficulties[metric] = 0
self.current_difficulties[metric] = next_difficulty
if new_cluster:
self.get_new_cluster(previous_difficulties)
if self.global_rank == 0:
samples_per_cluster = self.sample_from_clusters()
batch = []
for cidx in range(len(samples_per_cluster)):
batch += self.get_sample_from_cluster(cidx, samples_per_cluster[cidx])
self.np_rng.shuffle(batch)
batch = torch.tensor(batch, device=get_accelerator().current_device_name(), dtype=torch.long).view(-1)
else:
batch = torch.empty(self.global_batch_size,
device=get_accelerator().current_device_name(),
dtype=torch.long)
dist.broadcast(batch, 0, group=self.data_parallel_group)
self.batch = batch.tolist()
def __iter__(self):
while self.consumed_samples <= self.total_samples:
if len(self.batch) == 0:
self.get_next_global_batch()
current_batch = self.batch[:self.micro_batch_times_data_parallel_size]
self.batch = self.batch[self.micro_batch_times_data_parallel_size:]
if len(current_batch) == self.micro_batch_times_data_parallel_size or \
(len(current_batch) > 0 and not self.drop_last):
start_idx, end_idx = self.get_start_end_idx()
yield current_batch[start_idx:end_idx]
self.consumed_samples += len(current_batch)
current_batch = []
def state_dict(self):
return {
CURRICULUM_LEARNING_BATCH: self.batch,
CURRICULUM_LEARNING_CONSUMED_SAMPLES: self.consumed_samples,
CURRICULUM_LEARNING_STEP: self.curriculum_step,
CURRICULUM_LEARNING_CURRENT_DIFFICULTIES: self.current_difficulties,
CURRICULUM_LEARNING_DATA_CLUSTER_PATHS: self.data_cluster_paths,
CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION: self.data_cluster_current_position,
CURRICULUM_LEARNING_NP_RNG_STATE: np.random.get_state()
}
def load_state_dict(self, state_dict):
self.batch = state_dict[CURRICULUM_LEARNING_BATCH]
self.consumed_samples = state_dict[CURRICULUM_LEARNING_CONSUMED_SAMPLES]
self.curriculum_step = state_dict[CURRICULUM_LEARNING_STEP]
self.current_difficulties = state_dict[CURRICULUM_LEARNING_CURRENT_DIFFICULTIES]
self.data_cluster_paths = state_dict[CURRICULUM_LEARNING_DATA_CLUSTER_PATHS]
self.data_cluster_current_position = state_dict[CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION]
np.random.set_state(state_dict[CURRICULUM_LEARNING_NP_RNG_STATE])
cluster_root_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
CURRICULUM_LEARNING_CLUSTER_PATH]
# Backward compatibility: previously data_cluster_paths were stored as
# absolute paths. Now we changed it to just the file name so that even
# if user moved the cluster files, the checkpoint loading still works
# as long as user set the correct new CURRICULUM_LEARNING_CLUSTER_PATH
# in deepspeed json config.
for idx in range(len(self.data_cluster_paths)):
if '/' in self.data_cluster_paths[idx]:
self.data_cluster_paths[idx] = self.data_cluster_paths[idx].split('/')[-1]
if self.global_rank == 0:
for cluster_fname in self.data_cluster_paths:
cluster_path = f"{cluster_root_path}/{cluster_fname}"
self.data_clusters.append(MMapIndexedDataset(cluster_path, skip_warmup=True))
self.data_cluster_sizes.append(len(self.data_clusters[-1][0]))
| 19,160 | 55.522124 | 177 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/data_pipeline/data_sampling/indexed_dataset.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Part of this code was adopted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/indexed_dataset.py
"""
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# copied from fairseq/fairseq/data/indexed_dataset.py
# Removed IndexedRawTextDataset since it relied on Fairseq dictionary
# other slight modifications to remove fairseq dependencies
# Added document index to index file and made it accessible.
# An empty sentence no longer separates documents.
# Some of the fixes/improvements are adopted from
# https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/main/megatron/data/indexed_dataset.py
from functools import lru_cache
import os
import shutil
import struct
from itertools import accumulate
import numpy as np
import torch
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ['lazy', 'cached', 'mmap']
def infer_dataset_impl(path):
if IndexedDataset.exists(path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return 'cached'
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return 'mmap'
else:
return None
else:
print(f"Dataset does not exist: {path}")
print("Path should be a basename that both .idx and .bin can be appended to get full filenames.")
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == 'mmap':
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, skip_warmup=False):
if not IndexedDataset.exists(path):
print(f"Dataset does not exist: {path}")
print("Path should be a basename that both .idx and .bin can be appended to get full filenames.")
return None
if impl == 'infer':
impl = infer_dataset_impl(path)
if impl == 'lazy' and IndexedDataset.exists(path):
return IndexedDataset(path)
elif impl == 'cached' and IndexedDataset.exists(path):
return IndexedCachedDataset(path)
elif impl == 'mmap' and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path, skip_warmup)
print(f"Unknown dataset implementation: {impl}")
return None
def dataset_exists(path, impl):
if impl == 'mmap':
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float64,
7: np.double,
8: np.uint16,
9: np.uint32,
10: np.uint64
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
def create_doc_idx(sizes):
doc_idx = [0]
for i, s in enumerate(sizes):
if s == 0:
doc_idx.append(i + 1)
return doc_idx
class IndexedDataset(torch.utils.data.Dataset):
"""Loader for IndexedDataset"""
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path):
super().__init__()
self.path = path
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, ('Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.')
version = f.read(8)
assert struct.unpack('<Q', version) == (1, )
code, self.element_size = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack('<QQ', f.read(16))
self.doc_count = struct.unpack('<Q', f.read(8))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
self.doc_idx = read_longs(f, self.doc_count)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if not self.data_file:
self.read_data(self.path)
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
return a
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
sizes = self.sizes[self.dim_offsets[start]:self.dim_offsets[stop]]
size = sum(sizes)
a = np.empty(size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[start] * self.element_size)
self.data_file.readinto(a)
offsets = list(accumulate(sizes))
sents = np.split(a, offsets[:-1])
return sents
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return (os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path)))
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path):
super().__init__(path)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx:ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx:ptx + a.size])
return a
elif isinstance(idx, slice):
# Hack just to make this work, can optimizer later if necessary
sents = []
for i in range(*idx.indices(len(self))):
sents.append(self[i])
return sents
class IndexedDatasetBuilder(object):
element_sizes = {np.uint8: 1, np.int8: 1, np.int16: 2, np.int32: 4, np.int64: 8, np.float64: 4, np.double: 8}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
self.doc_idx = [0]
def add_item(self, tensor):
bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def end_document(self):
self.doc_idx.append(len(self.sizes))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
doc_offset = len(self.sizes)
begin = self.data_offsets[-1]
for data_offset in index.data_offsets[1:]:
self.data_offsets.append(begin + data_offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
self.doc_idx.extend((doc_offset + index.doc_idx)[1:])
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes)))
index.write(struct.pack('<Q', len(self.doc_idx)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
write_longs(index, self.doc_idx)
index.close()
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
def exscan_from_cumsum_(arr):
# given an array holding the result of an inclusive scan (cumsum),
# convert to an exclusive scan (shift to the right)
# [10, 30, 35, 50] --> [0, 10, 30, 35]
if arr.size > 1:
arr[1:] = arr[:-1]
if arr.size > 0:
arr[0] = 0
def get_pointers_with_total(sizes, elemsize, dtype):
"""Return a numpy array of type np.dtype giving the byte offsets.
Multiplies values in the sizes array by elemsize (bytes),
and then computes an exclusive scan to get byte offsets.
Returns the total number of bytes as second item in a tuple.
"""
# scale values in sizes array by elemsize to get sizes in bytes
pointers = np.array(sizes, dtype=dtype)
pointers *= elemsize
np.cumsum(pointers, axis=0, out=pointers)
# get total number of bytes from all sizes (last element)
bytes_last = pointers[-1] if len(sizes) > 0 else 0
# convert to byte offsets
exscan_from_cumsum_(pointers)
return pointers, bytes_last
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes, npdtype):
"""Return a numpy array of byte offsets given a list of sizes.
Multiplies values in the sizes array by dtype size (bytes),
and then computes an exclusive scan to get byte offsets.
"""
# compute element sizes in bytes
pointers, _ = get_pointers_with_total(sizes, dtype().itemsize, npdtype)
return pointers
def write(self, sizes, doc_idx):
self._file.write(struct.pack('<Q', len(sizes)))
self._file.write(struct.pack('<Q', len(doc_idx)))
sizes32 = np.array(sizes, dtype=np.int32)
self._file.write(sizes32.tobytes(order='C'))
del sizes32
pointers = self._get_pointers(sizes, np.int64)
del sizes
self._file.write(pointers.tobytes(order='C'))
del pointers
doc_idx = np.array(doc_idx, dtype=np.int64)
self._file.write(doc_idx.tobytes(order='C'))
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path, skip_warmup=False):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, ('Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.')
version = struct.unpack('<Q', stream.read(8))
assert (1, ) == version
dtype_code, = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
self._doc_count = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
if not skip_warmup:
print(" warming up index mmap file...")
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
print(" reading sizes...")
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
print(" reading pointers...")
self._pointers = np.frombuffer(self._bin_buffer,
dtype=np.int64,
count=self._len,
offset=offset + self._sizes.nbytes)
print(" reading document index...")
self._doc_idx = np.frombuffer(self._bin_buffer,
dtype=np.int64,
count=self._doc_count,
offset=offset + self._sizes.nbytes + self._pointers.nbytes)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@property
def doc_idx(self):
return self._doc_idx
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path, skip_warmup=False):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path, skip_warmup)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path, skip_warmup):
self._path = path
self._index = self.Index(index_file_path(self._path), skip_warmup)
if not skip_warmup:
print(" warming up data mmap file...")
_warmup_mmap_file(data_file_path(self._path))
print(" creating numpy buffer of mmap...")
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
print(" creating memory view of numpy buffer...")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
ptr, size = self._index[idx]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
return np_array
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
ptr = self._index._pointers[start]
sizes = self._index._sizes[idx]
offsets = list(accumulate(sizes))
total_size = sum(sizes)
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=total_size, offset=ptr)
sents = np.split(np_array, offsets[:-1])
return sents
def get(self, idx, offset=0, length=None):
""" Retrieves a single item from the dataset with the option to only
return a portion of the item.
get(idx) is the same as [idx] but get() does not support slicing.
"""
ptr, size = self._index[idx]
if length is None:
length = size - offset
ptr += offset * np.dtype(self._index.dtype).itemsize
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=length, offset=ptr)
return np_array
@property
def sizes(self):
return self._index.sizes
def size(self, index):
return self._index.sizes[index]
@property
def doc_idx(self):
return self._index.doc_idx
def get_doc_idx(self):
return self._index._doc_idx
def set_doc_idx(self, doc_idx_):
self._index._doc_idx = doc_idx_
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path)))
@property
def dtype(self):
return self._index.dtype
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
self._doc_idx = [0]
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def add_item_numpy(self, np_array):
if np_array.dtype != self._dtype:
np_array = np_array.astype(self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def end_document(self):
self._doc_idx.append(len(self._sizes))
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
total_len = len(index.sizes) + len(self._sizes)
print(f" concat {another_file} size={len(index.sizes)} for a total size of {total_len}")
offset = len(self._sizes)
self._sizes.extend(index.sizes)
self._doc_idx.extend((offset + index.doc_idx)[1:])
# Concatenate data
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes, self._doc_idx)
| 20,614 | 32.357605 | 115 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/data_pipeline/data_routing/basic_layer.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.utils import logger
from torch import Tensor
from torch.nn import Module
from ..constants import *
from deepspeed.ops.random_ltd.dropping_utils import gpt_sample_tokens, bert_sample_tokens, GatherTokens, ScatterTokens
#####based on the paper random-ltd: https://arxiv.org/abs/2211.11586
class RandomLayerTokenDrop(Module):
"""
A layer wrapper for random LTD
"""
def __init__(self, layer: Module):
super(RandomLayerTokenDrop, self).__init__()
self.random_ltd_layer = layer
self.reserved_length = None #config['max_value']
self.random_ltd_scheduler = None
self.max_length = None
self.reserved_length = -1
self.curr_seq = -1
self.batch_first = False
def init_config(self, config, scheduler, random_ltd_layer_id):
self.random_ltd_scheduler = scheduler
self.random_ltd_layer_id = random_ltd_layer_id
self.max_length = self.random_ltd_scheduler.state[RANDOM_LTD_MAX_VALUE]
self.mask_name = config[RANDOM_LTD_MODEL_MASK_NAME]
self.micro_bs = config[RANDOM_LTD_MICRO_BATCH_SIZE]
self.random_ltd_num_layer = self.random_ltd_scheduler.random_ltd_layer_num
hs_order = config[RANDOM_LTD_HIDDEN_STATE_ORDER]
self.model_type = config[RANDOM_LTD_MODEL_TYPE]
if hs_order == 'batch_seq_dim':
self.get_hidden_tensor_shape = self.get_bsh
self.batch_first = True
elif hs_order == 'seq_batch_dim':
self.get_hidden_tensor_shape = self.get_sbh
self.batch_first = False
else:
logger.warning(
"************For now, we only support batch_seq_dim or seq_batch_dim inputs. You can easily \
your own input dimension orders************")
raise NotImplementedError
if self.model_type == 'encoder':
self.index_generator = bert_sample_tokens
elif self.model_type == 'decoder':
self.index_generator = gpt_sample_tokens
else:
logger.warning("************For now, we only support encoder-only or decoder-only models************")
raise NotImplementedError
def get_bsh(self, hidden_stats):
self.curr_seq, self.curr_micro_batch = hidden_stats.size()[1], hidden_stats.size()[0]
def get_sbh(self, hidden_stats):
self.curr_seq, self.curr_micro_batch = hidden_stats.size()[0], hidden_stats.size()[1]
def forward(self, hidden_states, **kwargs) -> Tensor:
if self.random_ltd_scheduler is not None:
self.reserved_length = self.random_ltd_scheduler.get_current_seq()
self.get_hidden_tensor_shape(hidden_states)
if self.training and self.random_ltd_scheduler is not None and self.reserved_length < self.curr_seq:
if self.mask_name is not None:
mask = kwargs[self.mask_name]
else:
mask = None
if self.random_ltd_layer_id == 0:
sampled_indices, part_attention_mask = self.index_generator(self.reserved_length,\
self.curr_seq, \
self.curr_micro_batch, \
self.random_ltd_num_layer, \
hidden_states.device, mask)
self.random_ltd_scheduler.state[RANDOM_LTD_SAMPLE_INDEX] = sampled_indices
self.random_ltd_scheduler.state[RANDOM_LTD_ATTENTION_MASK] = part_attention_mask
else:
sampled_indices = self.random_ltd_scheduler.state[RANDOM_LTD_SAMPLE_INDEX]
part_attention_mask = self.random_ltd_scheduler.state[RANDOM_LTD_ATTENTION_MASK]
hidden_states, part_hidden_states = GatherTokens.apply(hidden_states,
sampled_indices[self.random_ltd_layer_id, :, :],
self.batch_first)
if self.mask_name is not None:
if self.model_type == 'encoder':
kwargs[self.mask_name] = part_attention_mask[self.random_ltd_layer_id]
else:
kwargs[self.mask_name] = part_attention_mask
outputs = self.random_ltd_layer(part_hidden_states, **kwargs)
if isinstance(outputs, tuple):
hidden_states = ScatterTokens.apply(hidden_states, outputs[0],
sampled_indices[self.random_ltd_layer_id, :, :], self.batch_first)
my_list = list(outputs)
my_list[0] = hidden_states
return tuple(my_list)
elif isinstance(outputs, Tensor):
hidden_states = ScatterTokens.apply(hidden_states, outputs,
sampled_indices[self.random_ltd_layer_id, :, :], self.batch_first)
return hidden_states
else:
logger.warning("************For now, we only support tuple and tensor output. \
You need to adjust the output according to the layer in your model************")
raise NotImplementedError
else:
return self.random_ltd_layer(hidden_states, **kwargs)
| 5,638 | 48.464912 | 118 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/data_pipeline/data_routing/utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
def bsh_decoder_gather(reserved_length, hidden_states, mask):
# random-layer-token-drop
rand_list = []
part_hidden_states = [] # batch, seq, hidden ## different from megatron
for k in range(hidden_states.size(0)):
B_tmp = torch.randperm(hidden_states.size(1), device=hidden_states.device)[:reserved_length]
B = B_tmp.sort()[0]
rand_list.append(B)
part_hidden_states.append(hidden_states[k:k + 1, B, :])
part_hidden_states = torch.cat(part_hidden_states, dim=0)
part_mask = mask[:, :, :reserved_length, :reserved_length]
return part_hidden_states, rand_list, part_mask
def bsh_decoder_scatter(hidden_states, part_hidden_states, rand_list):
for k in range(hidden_states.size(0)):
hidden_states[k, rand_list[k], :] = part_hidden_states[k, :, :]
return hidden_states
| 955 | 33.142857 | 100 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/zero/stage_1_and_2.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import os
from deepspeed import comm as dist
from packaging import version as pkg_version
from collections import OrderedDict
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from deepspeed.runtime import ZeROOptimizer
from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler
from deepspeed.runtime.utils import (bwc_tensor_model_parallel_rank, get_global_norm, empty_cache, see_memory_usage,
inf, is_model_parallel_parameter, align_dense_tensors, all_gather_dp_groups)
from deepspeed.runtime.zero.config import ZeroStageEnum
from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum
from deepspeed.ops.adam import DeepSpeedCPUAdam
from deepspeed.utils import logger
from deepspeed.moe.utils import is_moe_param
from deepspeed.git_version_info import version
from deepspeed.runtime.constants import PIPE_REPLICATED
from deepspeed.accelerator import get_accelerator
from deepspeed.checkpoint.constants import (DS_VERSION, GROUP_PADDINGS, PARTITION_COUNT,
SINGLE_PARTITION_OF_FP32_GROUPS, BASE_OPTIMIZER_STATE, CLIP_GRAD,
ZERO_STAGE, PARAM_SLICE_MAPPINGS)
from deepspeed.utils import link_hp_params
from deepspeed.checkpoint import enable_universal_checkpoint
# Toggle this to true to enable correctness test
# with gradient partitioning and without
pg_correctness_test = False
def input(msg):
return
def split_half_float_double(tensors):
device_type = get_accelerator().device_name()
dtypes = [
"torch.{}.HalfTensor".format(device_type), "torch.{}.FloatTensor".format(device_type),
"torch.{}.DoubleTensor".format(device_type), "torch.{}.BFloat16Tensor".format(device_type)
]
buckets = []
for i, dtype in enumerate(dtypes):
bucket = [t for t in tensors if t.type() == dtype]
if bucket:
buckets.append(bucket)
return buckets
def isclose(a, b, rtol=1e-09, atol=0.0):
return abs(a - b) <= max(rtol * max(abs(a), abs(b)), atol)
def lcm(x, y):
from fractions import gcd # or can import gcd from `math` in Python 3
return x * y // gcd(x, y)
def get_alignment_padding(tensor_list, alignment):
num_elements = sum([tensor.numel() for tensor in tensor_list])
remainder = num_elements % alignment
return (alignment - remainder) if remainder else remainder
def move_to_cpu(tensor_list):
for tensor in tensor_list:
tensor.data = tensor.data.cpu()
def print_rank_msg(msg):
print(f"rank {dist.get_rank()} - {msg}")
def _get_padded_tensor(src_tensor, size):
if src_tensor.numel() >= size:
return src_tensor
padded_tensor = torch.zeros(size, dtype=src_tensor.dtype, device=src_tensor.device)
slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel())
slice_tensor.data.copy_(src_tensor.data)
return padded_tensor
class DeepSpeedZeroOptimizer(ZeROOptimizer):
"""
DeepSpeedZeroOptimizer designed to reduce the memory footprint
required for training large deep learning models.
For more details please see ZeRO: Memory Optimization Towards Training A Trillion Parameter Models
https://arxiv.org/abs/1910.02054
For usage examples, refer to TODO: DeepSpeed Tutorial
"""
def __init__(self,
init_optimizer,
param_names,
timers,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True,
contiguous_gradients=True,
reduce_bucket_size=500000000,
allgather_bucket_size=5000000000,
dp_process_group=None,
expert_parallel_group=None,
expert_data_parallel_group=None,
reduce_scatter=True,
overlap_comm=False,
cpu_offload=False,
mpu=None,
clip_grad=0.0,
communication_data_type=torch.float16,
postscale_gradients=True,
gradient_predivide_factor=1.0,
gradient_accumulation_steps=1,
ignore_unused_parameters=True,
partition_grads=True,
round_robin_gradients=False,
has_moe_layers=False,
fp16_master_weights_and_gradients=False,
elastic_checkpoint=False):
if dist.get_rank() == 0:
logger.info(f"Reduce bucket size {reduce_bucket_size}")
logger.info(f"Allgather bucket size {allgather_bucket_size}")
logger.info(f"CPU Offload: {cpu_offload}")
logger.info(f'Round robin gradient partitioning: {round_robin_gradients}')
# The fused optimizer does all the work. We need this layer for two reason:
# 1. maintain same user API from apex.fp16_utils
# 2. keep common stuff here in case we need to add ne552w fused optimizer later
self.elastic_checkpoint = elastic_checkpoint
self.param_names = param_names
self.mpu = mpu
# differences from apex.fp16_utils:
# - assume all model params in fp16
# - assume all params requires grad
# - flat by groups, not keeping state. TODO: remove state explicitly?
# - master grad and unflat master weight never exist. TODO: a way to save out unflat master?
if not get_accelerator().is_available():
raise SystemError("Cannot use fp16 without accelerator.")
self.optimizer = init_optimizer
# Use torch (un)flatten ops
self.flatten = _flatten_dense_tensors
self.unflatten = _unflatten_dense_tensors
# ZeRO stage 1 (False) or 2 (True)
self.partition_gradients = partition_grads
self.zero_stage_string = "ZeRO-2" if partition_grads else "ZeRO-1"
self.timers = timers
self.reduce_scatter = reduce_scatter
self.overlap_comm = overlap_comm
self.cpu_offload = cpu_offload
self.deepspeed_adam_offload = cpu_offload
self.device = get_accelerator().current_device_name() if not self.cpu_offload else 'cpu'
self.dp_process_group = dp_process_group
#expert parallel group
self.ep_process_group = expert_parallel_group
#data parallel group for experts
self.expert_dp_process_group = expert_data_parallel_group
#data parallel size for non-experts
dp_size = dist.get_world_size(group=self.dp_process_group)
#For MoE models this maybe different for different param group
#It will be modified during MoE setup later in the init
self.real_dp_process_group = [dp_process_group for i in range(len(self.optimizer.param_groups))]
self.partition_count = [dp_size for i in range(len(self.optimizer.param_groups))]
self.is_gradient_accumulation_boundary = True
# CPU-Offload requires contiguous gradients
self.contiguous_gradients = contiguous_gradients or cpu_offload
self.has_moe_layers = has_moe_layers
if self.has_moe_layers:
self._configure_moe_settings()
self._global_grad_norm = 0.
if mpu is None:
self.model_parallel_group = None
self.model_parallel_world_size = 1
self.model_parallel_rank = 0
else:
self.model_parallel_group = mpu.get_model_parallel_group()
self.model_parallel_world_size = mpu.get_model_parallel_world_size()
self.model_parallel_rank = bwc_tensor_model_parallel_rank(mpu)
self.overflow = False
self.clip_grad = clip_grad
self.communication_data_type = communication_data_type
self.gradient_predivide_factor = gradient_predivide_factor
self.postscale_gradients = postscale_gradients
self.gradient_accumulation_steps = gradient_accumulation_steps
self.micro_step_id = 0
self.ignore_unused_parameters = ignore_unused_parameters
self.round_robin_gradients = round_robin_gradients
self.extra_large_param_to_reduce = None
self.fp16_master_weights_and_gradients = fp16_master_weights_and_gradients
if self.fp16_master_weights_and_gradients:
assert self.cpu_offload and type(self.optimizer) in [DeepSpeedCPUAdam], \
f"fp16_master_and_gradients requires optimizer to support keeping fp16 master and gradients while keeping the optimizer states in fp32."\
f"Currently only supported using ZeRO-Offload with DeepSpeedCPUAdam. But current setting is ZeRO-Offload:{self.cpu_offload} and optimizer type {type(self.optimizer)}." \
f"Either disable fp16_master_weights_and_gradients or enable {self.zero_stage_string} Offload with DeepSpeedCPUAdam."
if self.reduce_scatter:
valid_reduce_scatter_dtypes = (torch.float16, torch.bfloat16, torch.float32)
assert self.communication_data_type in valid_reduce_scatter_dtypes, f"{self.zero_stage_string} supports {valid_reduce_scatter_dtypes} communication_data_type with reduce scatter enabled. Got: '{self.communication_data_type}'"
assert self.gradient_predivide_factor == 1.0, "gradient_predivide_factor != 1.0 is not yet supported with {self.zero_stage_string} with reduce scatter enabled"
assert self.postscale_gradients, "pre-scale gradients is not yet supported with {self.zero_stage_string} with reduce scatter enabled"
# param flattened by groups
self.bit16_groups = []
self.bit16_groups_flat = []
# param partitioned by data parallel degree
# this will contain a list of equal sized tensors
# each of which will be updated by a different process
self.parallel_partitioned_bit16_groups = []
# a single 32-bit partition of the parallel partitioned parameters
# that this process will update
self.single_partition_of_fp32_groups = []
# param partition info
# These are the parameters in each group that will not be updated by this process directly
self.params_not_in_partition = []
# These are the parameters that will be updated by this process directly
self.params_in_partition = []
# Offset from the first parameter in the the self.params_in_partition
# the parameter boundaries may not align with partition boundaries
# so we need to keep track of the offset
self.first_offset = []
# number of elements per partition in each group
self.partition_size = []
# align nccl all-gather send buffers to 4-byte boundary
self.nccl_start_alignment_factor = 2 # 4-byte alignment/sizeof(fp16) = 2
assert (
allgather_bucket_size % self.nccl_start_alignment_factor == 0
), f"allgather_bucket_size must be a multiple of nccl_start_alignment_factor, {self.nccl_start_alignment_factor} "
self.all_reduce_print = False
self.dtype = self.optimizer.param_groups[0]['params'][0].dtype
self.round_robin_bit16_groups = []
self.round_robin_bit16_indices = []
# Use different parallel to do all_to_all_reduce related things
# padding on each partition for alignment purposes
self.groups_padding = []
# loop to deal with groups
for i, param_group in enumerate(self.optimizer.param_groups):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
# push this group to list before modify
# TODO: Explore simplification that avoids the extra book-keeping by pushing the reordered group
trainable_parameters = [param for param in param_group['params'] if param.requires_grad]
self.bit16_groups.append(trainable_parameters)
# not sure why apex was cloning the weights before flattening
# removing cloning here
see_memory_usage(f"Before moving param group {i} to CPU")
# move all the parameters to cpu to free up GPU space for creating flat buffer
move_to_cpu(self.bit16_groups[i])
empty_cache()
see_memory_usage(f"After moving param group {i} to CPU", force=False)
# Reorder group parameters for load balancing of gradient partitioning during backward among ranks.
# This ensures that gradients are reduced in a fashion such that ownership round robins among the ranks.
# For example, rather than 3 gradients (g_n+2, g_n+1, g_n) that are reduced consecutively belonging
# to the same rank, instead they will belong to 3 ranks (r_m+2, r_m+1, r_m).
if self.round_robin_gradients:
round_robin_tensors, round_robin_indices = self._round_robin_reorder(
self.bit16_groups[i], dist.get_world_size(group=self.real_dp_process_group[i]))
else:
round_robin_tensors = self.bit16_groups[i]
round_robin_indices = list(range(len(self.bit16_groups[i])))
self.round_robin_bit16_groups.append(round_robin_tensors)
self.round_robin_bit16_indices.append(round_robin_indices)
# create flat buffer in CPU and move to GPU
self.bit16_groups_flat.append(
self.flatten_dense_tensors_aligned(
self.round_robin_bit16_groups[i],
self.nccl_start_alignment_factor * dist.get_world_size(group=self.real_dp_process_group[i])).to(
get_accelerator().current_device_name()))
see_memory_usage(f"After flattening and moving param group {i} to GPU", force=False)
# Record padding required for alignment
if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1:
padding = self.bit16_groups_flat[i].numel() - sum(
[t.numel() for t in self.round_robin_bit16_groups[i]])
else:
padding = 0
self.groups_padding.append(padding)
if dist.get_rank(group=self.real_dp_process_group[i]) == 0:
see_memory_usage(f"After Flattening and after emptying param group {i} cache", force=False)
# set model bit16 weight to slices of flattened buffer
self._update_model_bit16_weights(i)
# divide the flat weights into near equal partition equal to the data parallel degree
# each process will compute on a different part of the partition
data_parallel_partitions = self.get_data_parallel_partitions(self.bit16_groups_flat[i], i)
self.parallel_partitioned_bit16_groups.append(data_parallel_partitions)
# verify that data partition start locations are 4-byte aligned
for partitioned_data in data_parallel_partitions:
assert (partitioned_data.data_ptr() % (2 * self.nccl_start_alignment_factor) == 0)
# A partition of the fp32 master weights that will be updated by this process.
# Note that the params in single_partition_of_fp32_groups is cloned and detached
# from the origin params of the model.
if not fp16_master_weights_and_gradients:
self.single_partition_of_fp32_groups.append(self.parallel_partitioned_bit16_groups[i][partition_id].to(
self.device).clone().float().detach())
else:
self.single_partition_of_fp32_groups.append(self.parallel_partitioned_bit16_groups[i][partition_id].to(
self.device).clone().half().detach())
# Set local optimizer to have flat params of its own partition.
# After this, the local optimizer will only contain its own partition of params.
# In that case, the local optimizer only saves the states(momentum, variance, etc.) related to its partition's params(zero stage1).
self.single_partition_of_fp32_groups[
i].requires_grad = True # keep this in case internal optimizer uses it
param_group['params'] = [self.single_partition_of_fp32_groups[i]]
partition_size = len(self.bit16_groups_flat[i]) / dist.get_world_size(group=self.real_dp_process_group[i])
params_in_partition, params_not_in_partition, first_offset = self.get_partition_info(
self.round_robin_bit16_groups[i], partition_size, partition_id)
self.partition_size.append(partition_size)
self.params_in_partition.append(params_in_partition)
self.params_not_in_partition.append(params_not_in_partition)
self.first_offset.append(first_offset)
for rank in range(dist.get_world_size()):
if dist.get_rank() == rank:
print(
f"Rank: {rank} partition count {self.partition_count} and sizes{[(p.numel(), self.is_moe_param_group[i] if hasattr(self, 'is_moe_param_group') else False) for i,p in enumerate(self.single_partition_of_fp32_groups)]} "
)
dist.barrier()
self.reduce_bucket_size = int(reduce_bucket_size)
self.allgather_bucket_size = int(allgather_bucket_size)
self.reduction_event = get_accelerator().Event(enable_timing=False, blocking=False)
self.reduction_stream = get_accelerator().Stream()
self.cpu_computation_stream = get_accelerator().Stream()
self.copy_grad_stream = get_accelerator().Stream()
self.callback_queued = False
self.param_dict = {}
# map between param_id and bool to specify if a param is in this partition
self.is_param_in_current_partition = {}
self.grads_in_ipg_bucket = []
self.params_in_ipg_bucket = []
self.elements_in_ipg_bucket = 0
self.params_already_reduced = []
self._release_ipg_buffers()
self.previous_reduced_grads = None
self.ipg_bucket_has_moe_params = False
# simplified param id
self.param_id = {}
#interesting code: unique ids being assigned to individual parameters
largest_param_numel = 0
count = 0
for i, params_group in enumerate(self.bit16_groups):
for param in params_group:
unique_id = id(param)
self.param_id[unique_id] = count
self.param_dict[count] = param
self.params_already_reduced.append(False)
if param.numel() > largest_param_numel:
largest_param_numel = param.numel()
count = count + 1
for param_group in self.params_in_partition:
for param in param_group:
self.is_param_in_current_partition[self.get_param_id(param)] = True
for param_group in self.params_not_in_partition:
for param in param_group:
self.is_param_in_current_partition[self.get_param_id(param)] = False
if self.cpu_offload:
self.accumulated_grads_in_cpu = {}
self.norm_for_param_grads = {}
self.local_overflow = False
self.grad_position = {}
self.temp_grad_buffer_for_cpu_offload = get_accelerator().pin_memory(
torch.zeros(largest_param_numel, device=self.device, dtype=self.dtype))
self.temp_grad_buffer_for_gpu_offload = torch.zeros(largest_param_numel,
device=get_accelerator().current_device_name(),
dtype=self.dtype)
for i, params_group in enumerate(self.bit16_groups):
self.get_grad_position(i, self.params_in_partition[i], self.first_offset[i], self.partition_size[i])
# mapping from parameter to partition that it belongs to
self.param_to_partition_ids = {}
# stores if a partition has been reduced in this step
self.is_partition_reduced = {}
# number of grads in partition that still need to be computed
self.remaining_grads_in_partition = {}
# total number of grads in partition
self.total_grads_in_partition = {}
# stores if a grad in a partition has been computed or not
self.is_grad_computed = {}
# stores the offset at which a parameter gradient needs to be inserted in a partition
self.grad_partition_insertion_offset = {}
# the offset in the gradient at which it must be inserted at the beginning of the partition
self.grad_start_offset = {}
# will store the averaged gradients required by this partition
self.averaged_gradients = {}
# For cpu_offload, will store the averaged gradients required by this partition
self.offload_gradient_dict = {}
# store index of first parameter in each partition
self.first_param_index_in_partition = {}
# initializes all data structures for implementing gradient partitioning
self.initialize_gradient_partitioning_data_structures()
# resets the data structure value for the next backward propagation
self.reset_partition_gradient_structures()
# creates backward hooks for gradient partitioning
if self.partition_gradients or self.overlap_comm:
self.create_reduce_and_remove_grad_hooks()
self.custom_loss_scaler = False
self.external_loss_scale = None
# we may have a way of fusing dynamic scale. Do not support for now
self.loss_scaler = CreateLossScaler(dtype=self.dtype,
static_loss_scale=static_loss_scale,
dynamic_scaling=dynamic_loss_scale,
dynamic_loss_args=dynamic_loss_args)
self.dynamic_loss_scale = self.loss_scaler.dynamic
if self.dtype != torch.float16:
# Only fp16 should use dynamic loss scaling
assert self.loss_scaler.cur_scale == 1.0
assert not self.dynamic_loss_scale
see_memory_usage("Before initializing optimizer states", force=True)
self.initialize_optimizer_states()
see_memory_usage("After initializing optimizer states", force=True)
if dist.get_rank() == 0:
logger.info(f"optimizer state initialized")
if dist.get_rank(group=self.dp_process_group) == 0:
see_memory_usage(f"After initializing ZeRO optimizer", force=True)
self._link_all_hp_params()
self._enable_universal_checkpoint()
self._param_slice_mappings = self._create_param_mapping()
def _enable_universal_checkpoint(self):
for lp_param_group in self.bit16_groups:
enable_universal_checkpoint(param_list=lp_param_group)
def _create_param_mapping(self):
param_mapping = []
for i, _ in enumerate(self.optimizer.param_groups):
param_mapping_per_group = OrderedDict()
for lp in self.bit16_groups[i]:
if lp._hp_mapping is not None:
lp_name = self.param_names[lp]
param_mapping_per_group[lp_name] = lp._hp_mapping.get_hp_fragment_address()
param_mapping.append(param_mapping_per_group)
return param_mapping
def _link_all_hp_params(self):
dp_world_size = dist.get_world_size(group=self.dp_process_group)
if self.cpu_offload:
self._get_offload_gradient_dict()
for i, _ in enumerate(self.optimizer.param_groups):
# Link bit16 and fp32 params in partition
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
partition_size = self.bit16_groups_flat[i].numel() // dp_world_size
flat_hp_partition = self.single_partition_of_fp32_groups[i]
link_hp_params(lp_param_list=self.bit16_groups[i],
flat_hp_partition=flat_hp_partition,
gradient_dict=self.averaged_gradients,
offload_gradient_dict=self.offload_gradient_dict,
use_offload=self.cpu_offload,
param_group_index=i,
partition_start=partition_id * partition_size,
partition_size=partition_size,
partition_optimizer_state=self.optimizer.state[flat_hp_partition],
dp_group=self.real_dp_process_group[i])
def is_moe_group(self, group):
return 'moe' in group and group['moe']
def _configure_moe_settings(self):
# if we're using ZeRO stage 2, ensure contiguous gradients are used
if self.partition_gradients:
assert self.contiguous_gradients, "Contiguous Gradients in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE"
# NOTE: To run ZeRO stage 1 with MoE, we need to set self.contiguous_gradients to True or ignore the assertion
if not self.partition_gradients and not self.contiguous_gradients:
logger.warn(
"ZeRO Stage 1 has not been thoroughly tested with MoE. This configuration is still experimental.")
assert self.reduce_scatter, "Reduce Scatter in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE"
assert any(
[self.is_moe_group(group) for group in self.optimizer.param_groups]
), "The model has moe layers, but None of the param groups are marked as MoE. Create a param group with 'moe' key set to True before creating optimizer"
self.is_moe_param_group = []
for i, group in enumerate(self.optimizer.param_groups):
if self.is_moe_group(group):
assert all([is_moe_param(param)
for param in group['params']]), "All params in MoE group must be MoE params"
self.real_dp_process_group[i] = self.expert_dp_process_group[group['name']]
self.partition_count[i] = dist.get_world_size(group=self.expert_dp_process_group[group['name']])
self.is_moe_param_group.append(True)
else:
self.is_moe_param_group.append(False)
assert self.expert_dp_process_group is not None, "Expert data parallel group should be configured with MoE"
assert self.ep_process_group is not None, "Expert parallel group should be configured with MoE"
def _update_model_bit16_weights(self, group_index):
updated_params = self.unflatten(self.bit16_groups_flat[group_index],
self.round_robin_bit16_groups[group_index])
for p, q in zip(self.round_robin_bit16_groups[group_index], updated_params):
p.data = q.data
# set model fp16 weight to slices of reordered flattened buffer
for param_index, param in enumerate(self.bit16_groups[group_index]):
new_index = self.round_robin_bit16_indices[group_index][param_index]
param.data = self.round_robin_bit16_groups[group_index][new_index].data
def _round_robin_reorder(self, tensor_list, num_partitions):
# disable round robin if need to debug something
# return tensor_list, list(range(len(tensor_list)))
partition_tensors = {}
for i, tensor in enumerate(tensor_list):
j = i % num_partitions
if not j in partition_tensors:
partition_tensors[j] = []
partition_tensors[j].append((i, tensor))
reordered_tensors = []
reordered_indices = {}
for partition_index in partition_tensors.keys():
for i, (original_index, tensor) in enumerate(partition_tensors[partition_index]):
reordered_indices[original_index] = len(reordered_tensors)
reordered_tensors.append(tensor)
return reordered_tensors, reordered_indices
def _release_ipg_buffers(self):
if self.contiguous_gradients:
self.ipg_buffer = None
self.grads_in_partition = None
self.grads_in_partition_offset = 0
def initialize_optimizer_states(self):
for i, group in enumerate(self.bit16_groups):
single_grad_partition = torch.zeros(int(self.partition_size[i]),
dtype=self.single_partition_of_fp32_groups[i].dtype,
device=self.device)
self.single_partition_of_fp32_groups[i].grad = get_accelerator().pin_memory(
single_grad_partition) if self.cpu_offload else single_grad_partition
# Initialize the optimizer states with the flattened fp32 partition.
# State initialization for the Adagrad optimizer occurs at construction as opposed to other optimizers
# which do lazy initialization of the state at the first call to step.
if isinstance(self.optimizer, torch.optim.Adagrad):
self.optimizer = torch.optim.Adagrad(self.single_partition_of_fp32_groups, **self.optimizer.defaults)
else:
self.optimizer.step()
if not self.cpu_offload:
for group in self.single_partition_of_fp32_groups:
group.grad = None #class init
return
#########################################################################
#################### ZeRO Stage 1 - reduce gradients ####################
#########################################################################
def reduce_gradients(self, pipeline_parallel=False):
world_size = dist.get_world_size(self.dp_process_group)
my_rank = dist.get_rank(self.dp_process_group)
# with PP we must create ipg buffer, since backward is handled outside zero
if pipeline_parallel and self.contiguous_gradients:
self.ipg_buffer = []
buf_0 = torch.empty(int(self.reduce_bucket_size),
dtype=self.dtype,
device=get_accelerator().current_device_name())
self.ipg_buffer.append(buf_0)
self.ipg_index = 0
if not self.overlap_comm:
for i, group in enumerate(self.bit16_groups):
for param in group:
if param.grad is not None:
self.reduce_ready_partitions_and_remove_grads(param, i)
# reduce any pending grads in either hook/non-hook case
self.overlapping_partition_gradients_reduce_epilogue()
#########################################################################
#########################ZeRO Partition Gradients########################
#########################################################################
def get_first_param_index(self, group_id, param_group, partition_id):
for index, param in enumerate(param_group):
param_id = self.get_param_id(param)
if partition_id in self.param_to_partition_ids[group_id][param_id]:
return index
return None
def initialize_gradient_partitioning_data_structures(self):
for i, param_group in enumerate(self.round_robin_bit16_groups):
total_partitions = dist.get_world_size(group=self.real_dp_process_group[i])
self.param_to_partition_ids[i] = {}
self.is_partition_reduced[i] = {}
self.total_grads_in_partition[i] = {}
self.remaining_grads_in_partition[i] = {}
self.is_grad_computed[i] = {}
self.grad_partition_insertion_offset[i] = {}
self.grad_start_offset[i] = {}
self.first_param_index_in_partition[i] = {}
for partition_id in range(total_partitions):
self.is_grad_computed[i][partition_id] = {}
self.grad_partition_insertion_offset[i][partition_id] = {}
self.grad_start_offset[i][partition_id] = {}
self.total_grads_in_partition[i][partition_id] = 0
self.initialize_gradient_partition(i, param_group, partition_id)
self.is_partition_reduced[i][partition_id] = False
self.first_param_index_in_partition[i][partition_id] = self.get_first_param_index(
i, param_group, partition_id)
def independent_gradient_partition_epilogue(self):
self.report_ipg_memory_usage(f"In ipg_epilogue before reduce_ipg_grads", 0)
self.reduce_ipg_grads()
self.report_ipg_memory_usage(f"In ipg_epilogue after reduce_ipg_grads", 0)
# if dist.get_rank() == 0:
# logger.info("Params already reduced %s", self.params_already_reduced)
for i in range(len(self.params_already_reduced)):
self.params_already_reduced[i] = False
if self.overlap_comm:
get_accelerator().synchronize()
# It is safe to clear previously reduced grads of other partitions
self._clear_previous_reduced_grads()
if self.cpu_offload is False:
for i, _ in enumerate(self.bit16_groups):
if not i in self.averaged_gradients or self.averaged_gradients[i] is None:
self.averaged_gradients[i] = self.get_flat_partition(
self.params_in_partition[i],
self.first_offset[i],
self.partition_size[i],
dtype=self.dtype,
device=get_accelerator().current_device_name(),
return_tensor_list=True)
else:
avg_new = self.get_flat_partition(self.params_in_partition[i],
self.first_offset[i],
self.partition_size[i],
dtype=self.dtype,
device=get_accelerator().current_device_name(),
return_tensor_list=True)
for accumulated_grad, new_avg_grad in zip(self.averaged_gradients[i], avg_new):
accumulated_grad.add_(new_avg_grad)
self._release_ipg_buffers()
# No need to keep the gradients anymore.
# All gradients required by the step
# are in self.averaged_gradients
self.zero_grad(set_to_none=True)
see_memory_usage(f"End ipg_epilogue")
# resets all partition to no reduced
# sets remaining grads to the total number of grads in each partition
# set is grad computed to false for all grads in partition
def reset_partition_gradient_structures(self):
for i, _ in enumerate(self.bit16_groups):
total_partitions = dist.get_world_size(group=self.real_dp_process_group[i])
for partition_id in range(total_partitions):
self.is_partition_reduced[i][partition_id] = False
self.remaining_grads_in_partition[i][partition_id] = self.total_grads_in_partition[i][partition_id]
for param_id in self.is_grad_computed[i][partition_id]:
self.is_grad_computed[i][partition_id][param_id] = False
def initialize_gradient_partition(self, i, param_group, partition_id):
def set_key_value_list(dictionary, key, value):
if key in dictionary:
dictionary[key].append(value)
else:
dictionary[key] = [value]
def increment_value(dictionary, key):
if key in dictionary:
dictionary[key] += 1
else:
dictionary[key] = 1
partition_size = self.partition_size[i]
start_index = partition_size * partition_id
end_index = partition_size * (partition_id + 1)
current_index = 0
first_offset = 0
for param in param_group:
param_size = param.numel()
param_id = self.get_param_id(param)
if (current_index >= start_index and current_index < end_index):
set_key_value_list(self.param_to_partition_ids[i], param_id, partition_id)
increment_value(self.total_grads_in_partition[i], partition_id)
self.is_grad_computed[i][partition_id][param_id] = False
self.grad_partition_insertion_offset[i][partition_id][param_id] = current_index - start_index
self.grad_start_offset[i][partition_id][param_id] = 0
elif start_index > current_index and start_index < (current_index + param_size):
assert (first_offset == 0
), "This can happen either zero or only once as this must be the first tensor in the partition"
first_offset = start_index - current_index
set_key_value_list(self.param_to_partition_ids[i], param_id, partition_id)
increment_value(self.total_grads_in_partition[i], partition_id)
self.is_grad_computed[i][partition_id][param_id] = False
self.grad_partition_insertion_offset[i][partition_id][param_id] = 0
self.grad_start_offset[i][partition_id][param_id] = first_offset
current_index = current_index + param_size
def overlapping_partition_gradients_reduce_epilogue(self):
self.independent_gradient_partition_epilogue()
def create_reduce_and_remove_grad_hooks(self):
self.grad_accs = []
for i, param_group in enumerate(self.bit16_groups):
for param in param_group:
if param.requires_grad:
def wrapper(param, i):
param_tmp = param.expand_as(param)
grad_acc = param_tmp.grad_fn.next_functions[0][0]
def reduce_partition_and_remove_grads(*notneeded):
self.reduce_ready_partitions_and_remove_grads(param, i)
grad_acc.register_hook(reduce_partition_and_remove_grads)
self.grad_accs.append(grad_acc)
wrapper(param, i)
def get_param_id(self, param):
unique_id = id(param)
return self.param_id[unique_id]
def report_ipg_memory_usage(self, tag, param_elems):
elem_count = self.elements_in_ipg_bucket + param_elems
percent_of_bucket_size = (100.0 * elem_count) // self.reduce_bucket_size
see_memory_usage(
f"{tag}: elems in_bucket {self.elements_in_ipg_bucket} param {param_elems} max_percent {percent_of_bucket_size}"
)
# create a flat tensor aligned at the alignment boundary
def flatten_dense_tensors_aligned(self, tensor_list, alignment):
return self.flatten(align_dense_tensors(tensor_list, alignment))
############### Independent Partition Gradient ########################
def reduce_independent_p_g_buckets_and_remove_grads(self, param, i):
if self.elements_in_ipg_bucket + param.numel() > self.reduce_bucket_size:
self.report_ipg_memory_usage("In ipg_remove_grads before reduce_ipg_grads", param.numel())
self.reduce_ipg_grads()
if self.contiguous_gradients and self.overlap_comm:
# Swap ipg_index between 0 and 1
self.ipg_index = 1 - self.ipg_index
self.report_ipg_memory_usage("In ipg_remove_grads after reduce_ipg_grads", param.numel())
param_id = self.get_param_id(param)
assert self.params_already_reduced[param_id] == False, \
f"The parameter {param_id} has already been reduced. \
Gradient computed twice for this partition. \
Multiple gradient reduction is currently not supported"
if self.contiguous_gradients:
if param.numel() > self.reduce_bucket_size:
self.extra_large_param_to_reduce = param
else:
# keeping the gradients contiguous to prevent memory fragmentation, and avoid flattening
new_grad_tensor = self.ipg_buffer[self.ipg_index].narrow(0, self.elements_in_ipg_bucket, param.numel())
new_grad_tensor.copy_(param.grad.view(-1))
param.grad.data = new_grad_tensor.data.view_as(param.grad)
self.elements_in_ipg_bucket += param.numel()
assert param.grad is not None, f"rank {dist.get_rank()} - Invalid to reduce Param {param_id} with None gradient"
self.grads_in_ipg_bucket.append(param.grad)
self.params_in_ipg_bucket.append((i, param, param_id))
#make sure the average tensor function knows how to average the gradients
if is_moe_param(param):
self.ipg_bucket_has_moe_params = True
self.report_ipg_memory_usage("End ipg_remove_grads", 0)
def print_rank_0(self, message):
if dist.get_rank() == 0:
logger.info(message)
def gradient_reduction_w_predivide(self, tensor):
dp_world_size = dist.get_world_size(group=self.dp_process_group)
tensor_to_allreduce = tensor
if self.communication_data_type != tensor.dtype:
tensor_to_allreduce = tensor.to(self.communication_data_type)
if self.postscale_gradients:
if self.gradient_predivide_factor != 1.0:
tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor)
dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
if self.gradient_predivide_factor != dp_world_size:
tensor_to_allreduce.mul_(self.gradient_predivide_factor / dp_world_size)
else:
tensor_to_allreduce.div_(dp_world_size)
dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
if self.communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce:
tensor.copy_(tensor_to_allreduce)
return tensor
def average_tensor(self, tensor):
if self.overlap_comm:
stream = self.reduction_stream
stream.wait_stream(get_accelerator().current_stream())
else:
stream = get_accelerator().current_stream()
with get_accelerator().stream(stream):
if not self.reduce_scatter:
self.gradient_reduction_w_predivide(tensor)
return
# Accumulate destination ranks and bucket offsets for each gradient slice.
# Note: potential future optimization, record access pattern of parameters
# in backward pass and partition gradients w.r.t. access pattern so that our
# bucket is guaranteed to be contiguous w.r.t. ranks
rank_and_offsets = []
real_dp_process_group = []
curr_size = 0
prev_id, prev_process_group = -1, None
process_group = self.dp_process_group
# count = 0
for i, param, param_id in self.params_in_ipg_bucket:
process_group = self.dp_process_group
#Averages gradients at parameter level if ipg has a moe param
#Otherwise averaging is done at the entire buffer level at the end of the loop
# MoE param have different groups
if self.ipg_bucket_has_moe_params:
process_group = self.expert_dp_process_group[param.group_name] if is_moe_param(
param) else self.dp_process_group
param.grad.data.div_(dist.get_world_size(group=process_group))
partition_ids = self.param_to_partition_ids[i][param_id]
assert all([p_id < dist.get_world_size(group=process_group) for p_id in partition_ids
]), f"world size {dist.get_world_size(group=process_group)} and p_ids: {partition_ids}"
partition_size = self.partition_size[i]
# Get all partition ids + their offsets
partition_ids_w_offsets = []
for partition_id in partition_ids:
offset = self.grad_start_offset[i][partition_id][param_id]
partition_ids_w_offsets.append((partition_id, offset))
partition_ids_w_offsets.sort(key=lambda t: t[1])
# Calculate rank and offsets for grad slices
for idx in range(len(partition_ids_w_offsets)):
partition_id, offset = partition_ids_w_offsets[idx]
# if dist.get_rank() == 0 and count < 100:
# print(f"Rank {dist.get_rank()} rank offset id {idx} calculated dp size {dist.get_world_size(group=process_group)} real dp size {dist.get_world_size(self.real_dp_process_group[i])} and dst: {partition_id}")
# count += 1
# Calculate numel for grad slice depending on partition location
if idx == len(partition_ids_w_offsets) - 1:
# Last partition_id uses its own offset
numel = param.numel() - offset
else:
# Set numel to next partition's offset
numel = partition_ids_w_offsets[idx + 1][1] - offset
# Merge bucket ranges if they belong to the same rank
if partition_id == prev_id and process_group == prev_process_group:
prev_pid, prev_size, prev_numel = rank_and_offsets[-1]
rank_and_offsets[-1] = (prev_pid, prev_size, prev_numel + numel)
else:
rank_and_offsets.append((partition_id, curr_size, numel))
real_dp_process_group.append(process_group)
curr_size += numel
prev_id, prev_process_group = partition_id, process_group
if not self.ipg_bucket_has_moe_params:
tensor.div_(dist.get_world_size(group=self.dp_process_group))
tensor_to_reduce = tensor
if self.communication_data_type != tensor.dtype:
tensor_to_reduce = tensor.to(self.communication_data_type)
async_handles = []
for i, (dst, bucket_offset, numel) in enumerate(rank_and_offsets):
grad_slice = tensor_to_reduce.narrow(0, int(bucket_offset), int(numel))
# if dist.get_rank() == 0:
# print(f"Rank {dist.get_rank()} rank offset id {i} real dp size {dist.get_world_size(group=real_dp_process_group[i])} and dst: {dst}")
# dist.barrier()
#dist.barrier()
dst_rank = dist.get_global_rank(real_dp_process_group[i], dst)
async_handle = dist.reduce(grad_slice, dst=dst_rank, group=real_dp_process_group[i], async_op=True)
async_handles.append(async_handle)
for handle in async_handles:
handle.wait()
if self.communication_data_type != tensor.dtype:
tensor.copy_(tensor_to_reduce)
##############################################################################
############################# CPU Offload Methods#############################
##############################################################################
def get_grad_position(self, group_id, tensor_list, first_offset, partition_size):
current_offset = 0
for i, tensor in enumerate(tensor_list):
param_id = self.get_param_id(tensor)
param_start_offset = 0
num_elements = tensor.numel()
# we need to offset to get to the right element
if i == 0 and first_offset > 0:
tensor_offset = first_offset
num_elements = num_elements - tensor_offset
param_start_offset = first_offset
# we dont need all elements of the tensor
if num_elements > (partition_size - current_offset):
num_elements = partition_size - current_offset
self.grad_position[param_id] = [
int(group_id), int(param_start_offset),
int(current_offset), int(num_elements)
]
current_offset += num_elements
def update_overflow_tracker_for_param_grad(self, param):
if param.grad is not None and self._has_inf_or_nan(param.grad.data):
self.local_overflow = True
def _get_offload_gradient_dict(self):
for param_group_index, _ in enumerate(self.optimizer.param_groups):
self.offload_gradient_dict[param_group_index] = []
for lp_param in self.params_in_partition[param_group_index]:
param_id = self.get_param_id(lp_param)
[_, _, dest_offset, num_elements] = self.grad_position[param_id]
dest_tensor = self.single_partition_of_fp32_groups[param_group_index].grad.view(-1).narrow(
0, dest_offset, num_elements)
self.offload_gradient_dict[param_group_index].append(dest_tensor)
def async_accumulate_grad_in_cpu_via_gpu(self, param):
param_id = self.get_param_id(param)
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
# copy to a preexisiting buffer to avoid memory allocation penalty
dest_buffer = self.temp_grad_buffer_for_gpu_offload.view(-1).narrow(0, 0, param.numel())
#buffer for storing gradients for this parameter in CPU
def buffer_to_accumulate_to_in_cpu():
if not self.fp16_master_weights_and_gradients:
return get_accelerator().pin_memory(torch.zeros(param.numel(), dtype=param.dtype, device=self.device))
else:
return self.single_partition_of_fp32_groups[i].grad.view(-1).narrow(0, dest_offset, num_elements)
#accumulate gradients into param.grad or parts of it that belongs to this partition
def accumulate_gradients():
if not self.fp16_master_weights_and_gradients:
dest_buffer.copy_(self.accumulated_grads_in_cpu[param_id].view(-1), non_blocking=True)
param.grad.data.view(-1).add_(dest_buffer)
else:
dest_buffer.narrow(0, source_offset,
num_elements).copy_(self.accumulated_grads_in_cpu[param_id].view(-1),
non_blocking=True)
param.grad.data.view(-1).narrow(0, source_offset,
num_elements).add_(dest_buffer.narrow(0, source_offset, num_elements))
#move accumulated gradients back to CPU
def copy_gradients_to_cpu():
if not self.fp16_master_weights_and_gradients:
self.accumulated_grads_in_cpu[param_id].data.copy_(param.grad.data.view(-1), non_blocking=True)
else:
self.accumulated_grads_in_cpu[param_id].data.copy_(param.grad.data.view(-1).narrow(
0, source_offset, num_elements),
non_blocking=True)
if param_id not in self.accumulated_grads_in_cpu:
self.accumulated_grads_in_cpu[param_id] = buffer_to_accumulate_to_in_cpu()
if self.micro_step_id > 0:
accumulate_gradients()
# at the boundary we will send 32bit directly
if not self.is_gradient_accumulation_boundary:
copy_gradients_to_cpu()
def set_norm_for_param_grad(self, param):
param_id = self.get_param_id(param)
accumulated_grad = self.accumulated_grads_in_cpu[
param_id] if self.gradient_accumulation_steps > 1 else param.grad
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
start = source_offset
accumulated_grad = accumulated_grad.view(-1).narrow(0, start, num_elements)
self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm(2)
def set_norm_for_param_grad_in_gpu(self, param):
param_id = self.get_param_id(param)
accumulated_grad = param.grad
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
start = source_offset
accumulated_grad = accumulated_grad.view(-1).narrow(0, start, num_elements)
self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm(2)
def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param):
param_id = self.get_param_id(param)
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
dest_tensor = self.single_partition_of_fp32_groups[i].grad.view(-1).narrow(0, dest_offset, num_elements)
src_tensor = param.grad.view(-1).narrow(0, source_offset, num_elements)
if not self.fp16_master_weights_and_gradients:
src_tensor = src_tensor.float()
dest_tensor.copy_(src_tensor, non_blocking=True)
param.grad = None #offload only
def complete_grad_norm_calculation_for_cpu_offload(self, params):
total_norm = 0.0
norm_type = 2.0
for p in params:
# Pipeline parallelism may replicate parameters. Avoid multi-counting.
if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:
continue
if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
param_id = self.get_param_id(p)
# as some model have trainable parameters but skipped in training,
# their backward hooks in self.create_reduce_and_remove_grad_hooks() will not run,
# so they have no norm_for_param_grads
if param_id in self.norm_for_param_grads:
param_norm = self.norm_for_param_grads[param_id]
total_norm += param_norm.item()**2
else:
# As unused parameters in modules may not be expected sometimes,
# add an explicit error msg when it occurred and an option to
# avoid the error
assert self.ignore_unused_parameters, """
This assert indicates that your module has parameters that
were not used in producing loss.
You can avoid this assert by
(1) enable ignore_unused_parameters option in zero_optimization config;
(2) making sure all trainable parameters and `forward` function
outputs participate in calculating loss.
"""
# Sum across all model parallel GPUs.
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group)
self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM)
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
############################################################################################
def copy_grads_in_partition(self, param):
if self.cpu_offload:
if self.gradient_accumulation_steps > 1:
self.async_accumulate_grad_in_cpu_via_gpu(param)
if self.is_gradient_accumulation_boundary:
self.set_norm_for_param_grad_in_gpu(param)
self.update_overflow_tracker_for_param_grad(param)
self.async_inplace_copy_grad_to_fp32_buffer_from_gpu(param)
return
#print(f"ID {self.get_param_id(param)} grad norm {param.grad.norm()}")
if self.grads_in_partition is None:
self.grads_in_partition_offset = 0
total_size = 0
for group in self.params_in_partition:
for param_in_partition in group:
total_size += param_in_partition.numel()
see_memory_usage(f"before copying {total_size} gradients into partition")
self.grads_in_partition = torch.empty(int(total_size),
dtype=self.dtype,
device=get_accelerator().current_device_name())
see_memory_usage(f"after copying {total_size} gradients into partition")
# The allreduce buffer will be rewritten. Copy the gradients in partition to a new buffer
new_grad_tensor = self.grads_in_partition.view(-1).narrow(0, self.grads_in_partition_offset, param.numel())
new_grad_tensor.copy_(param.grad.view(-1))
param.grad.data = new_grad_tensor.data.view_as(param.grad)
#print(f"Grad norm after copy to contiguous_buffer {param.grad.data.norm()}")
self.grads_in_partition_offset += param.numel()
def reduce_ipg_grads(self):
if self.contiguous_gradients:
if self.extra_large_param_to_reduce is not None:
assert len(self.params_in_ipg_bucket) == 1, "more than 1 param in ipg bucket, this shouldn't happen"
_, _, param_id = self.params_in_ipg_bucket[0]
assert self.get_param_id(self.extra_large_param_to_reduce
) == param_id, "param in ipg bucket does not match extra-large param"
self.average_tensor(self.extra_large_param_to_reduce.grad.view(-1))
self.extra_large_param_to_reduce = None
else:
self.average_tensor(self.ipg_buffer[self.ipg_index])
else:
self.buffered_reduce_fallback(None,
self.grads_in_ipg_bucket,
elements_per_buffer=self.elements_in_ipg_bucket)
if self.overlap_comm:
stream = self.reduction_stream
elif self.cpu_offload:
# TODO: copy_grad_stream is disabled because of race with reduce. This hurts perf and should be fixed.
# get_accelerator().synchronize()
# stream = self.copy_grad_stream
stream = get_accelerator().current_stream()
else:
stream = get_accelerator().current_stream()
with get_accelerator().stream(stream):
for _, param, param_id in self.params_in_ipg_bucket:
assert self.params_already_reduced[param_id] == False, \
f"The parameter {param_id} has already been reduced. \
Gradient computed twice for this partition. \
Multiple gradient reduction is currently not supported"
self.params_already_reduced[param_id] = True
if self.partition_gradients:
if not self.is_param_in_current_partition[param_id]:
if self.overlap_comm and self.contiguous_gradients is False:
# Clear grads of other partitions during the next reduction
# to avoid clearing them before the reduction is complete.
if self.previous_reduced_grads is None:
self.previous_reduced_grads = []
self.previous_reduced_grads.append(param)
else:
param.grad = None #only if self.partition_gradients
elif self.contiguous_gradients:
self.copy_grads_in_partition(param)
else: # zero stage 1 - partition only optimizer state
if self.contiguous_gradients and self.is_param_in_current_partition[param_id]:
self.copy_grads_in_partition(param)
self.grads_in_ipg_bucket = []
self.params_in_ipg_bucket = []
self.ipg_bucket_has_moe_params = False
self.elements_in_ipg_bucket = 0
#####################################################################
def reduce_ready_partitions_and_remove_grads(self, param, i):
if self.partition_gradients or self.is_gradient_accumulation_boundary:
self.reduce_independent_p_g_buckets_and_remove_grads(param, i)
def zero_reduced_gradients(self, partition_id, i):
def are_all_related_partitions_reduced(params_id):
for partition_id in self.param_to_partition_ids[i][params_id]:
if not self.is_partition_reduced[i][partition_id]:
return False
return True
for params_id in self.is_grad_computed[i][partition_id]:
if are_all_related_partitions_reduced(params_id):
self.param_dict[params_id].grad = None # dead code
def flatten_and_print(self, message, tensors, start=0, n=5):
flatten_tensor = self.flatten(tensors)
def print_func():
logger.info(flatten_tensor.contiguous().view(-1).narrow(0, start, n))
self.sequential_execution(print_func, message)
def get_grads_to_reduce(self, i, partition_id):
def get_reducible_portion(key):
grad = self.param_dict[key].grad
total_elements = grad.numel()
start = self.grad_start_offset[i][partition_id][key]
num_elements = min(total_elements - start,
self.partition_size[i] - self.grad_partition_insertion_offset[i][partition_id][key])
if not pg_correctness_test:
if num_elements == total_elements:
return grad
else:
return grad.contiguous().view(-1).narrow(0, int(start), int(num_elements))
else:
if num_elements == total_elements:
return grad.clone()
else:
return grad.clone().contiguous().view(-1).narrow(0, int(start), int(num_elements))
grads_to_reduce = []
for key in self.is_grad_computed[i][partition_id]:
grad = get_reducible_portion(key)
grads_to_reduce.append(grad)
return grads_to_reduce
def sequential_execution(self, function, message, group=None):
if group is None:
group = self.dp_process_group
if dist.get_rank(group=group) == 0:
logger.info(message)
for id in range(dist.get_world_size(group=group)):
if id == dist.get_rank(group=group):
function()
dist.barrier(group=group)
def set_none_gradients_to_zero(self, i, partition_id):
for param_id in self.is_grad_computed[i][partition_id]:
param = self.param_dict[param_id]
if param.grad is None:
param.grad = torch.zero_like(param)
######################Reduction Related Methods##############################
def allreduce_bucket(self, bucket, rank=None, log=None):
rank = None
tensor = self.flatten(bucket)
tensor_to_allreduce = tensor
if pg_correctness_test:
communication_data_type = torch.float32
else:
communication_data_type = self.communication_data_type
if communication_data_type != tensor.dtype:
tensor_to_allreduce = tensor.to(communication_data_type)
tensor_to_allreduce.div_(dist.get_world_size(group=self.dp_process_group))
if rank is None:
# "All Reducing"
dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
else:
global_rank = dist.get_global_rank(self.dp_process_group, rank)
dist.reduce(tensor_to_allreduce, global_rank, group=self.dp_process_group)
if communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce:
if rank is None or rank == dist.get_rank(group=self.dp_process_group):
tensor.copy_(tensor_to_allreduce)
return tensor
def _clear_previous_reduced_grads(self):
if self.previous_reduced_grads is not None:
for param in self.previous_reduced_grads:
param.grad = None # overlap enabled
self.previous_reduced_grads = None
# if rank is specified do a reduction instead of an allreduce
def allreduce_and_copy(self, small_bucket, rank=None, log=None):
if self.overlap_comm:
get_accelerator().synchronize()
# It is safe to clear the previously reduced grads of other partitions
self._clear_previous_reduced_grads()
stream = self.reduction_stream
else:
stream = get_accelerator().current_stream()
with get_accelerator().stream(stream):
allreduced = self.allreduce_bucket(small_bucket, rank=rank, log=log)
if rank is None or rank == dist.get_rank(group=self.dp_process_group):
for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):
buf.copy_(synced)
def allreduce_no_retain(self, bucket, numel_per_bucket=500000000, rank=None, log=None):
small_bucket = []
numel = 0
for tensor in bucket:
small_bucket.append(tensor)
numel = numel + tensor.numel()
if numel > numel_per_bucket:
self.allreduce_and_copy(small_bucket, rank=rank, log=None)
small_bucket = []
if len(small_bucket) > 0:
self.allreduce_and_copy(small_bucket, rank=rank, log=log)
# allows using reduction of gradients instead of using all_reduce
def buffered_reduce_fallback(self, rank, grads, elements_per_buffer=500000000, log=None):
split_buckets = split_half_float_double(grads)
for i, bucket in enumerate(split_buckets):
self.allreduce_no_retain(bucket, numel_per_bucket=elements_per_buffer, rank=rank, log=log)
#############################################################################
#############################################################################
#############################################################################
# views the tensor as multiple partitions and returns
# those partitions
def get_data_parallel_partitions(self, tensor, group_id):
partitions = []
dp = dist.get_world_size(group=self.real_dp_process_group[group_id])
# dp_id = dist.get_rank(group=self.real_dp_process_group[group_id])
total_num_elements = tensor.numel()
base_size = total_num_elements // dp
remaining = total_num_elements % dp
start = 0
for id in range(dp):
partition_size = base_size
if id < remaining:
partition_size = partition_size + 1
partitions.append(tensor.narrow(0, start, partition_size))
start = start + partition_size
return partitions
def get_partition_info(self, tensor_list, partition_size, partition_id):
params_in_partition = []
params_not_in_partition = []
start_index = partition_size * partition_id
end_index = partition_size * (partition_id + 1)
current_index = 0
first_offset = 0
for tensor in tensor_list:
tensor_size = tensor.numel()
if (current_index >= start_index and current_index < end_index):
params_in_partition.append(tensor)
elif start_index > current_index and start_index < (current_index + tensor_size):
params_in_partition.append(tensor)
assert (first_offset == 0
), "This can happen either zero or only once as this must be the first tensor in the partition"
first_offset = start_index - current_index
else:
params_not_in_partition.append(tensor)
current_index = current_index + tensor_size
return params_in_partition, params_not_in_partition, first_offset
def zero_grad(self, set_to_none=False):
"""
Zero FP16 parameter grads.
"""
# FP32 grad should never exist.
# For speed, set model fp16 grad to None by default
for group in self.bit16_groups:
for p in group:
if set_to_none:
p.grad = None # epilogue and in step
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def _model_parallel_all_reduce(self, tensor, op):
""" Perform all reduce within model parallel group, if any.
"""
if self.model_parallel_group is None or self.model_parallel_world_size == 1:
pass
else:
dist.all_reduce(tensor=tensor, op=op, group=self.model_parallel_group)
def get_grad_norm_direct(self, gradients, params, norm_type=2):
"""Clips gradient norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(g.data.abs().max() for g in gradients)
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=self.dp_process_group)
# Take max across all GPUs.
self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.MAX)
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0.0
# if dist.get_rank() == 0:
# logger.info(f"Total Norm beginning {total_norm}")
for g, p in zip(gradients, params):
# Pipeline parallelism may replicate parameters. Avoid multi-counting.
if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:
continue
if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
param_norm = g.data.double().norm(2)
total_norm += param_norm.item()**2
# Sum across all model parallel GPUs.
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group)
self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM)
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
# creates a flat fused tensor from the tensor list starting at the first_offset
# in the first tensor of the list. If there are not enough elements in the tensor
# list then the flat tensor will be padded with zeros
def get_flat_partition(self, tensor_list, first_offset, partition_size, dtype, device, return_tensor_list=False):
flat_tensor_list = []
current_size = 0
for i, tensor in enumerate(tensor_list):
if tensor.grad is None:
tensor.grad = torch.zeros_like(tensor)
tensor = tensor.grad
num_elements = tensor.numel()
tensor_offset = 0
# we need to offset to get to the right element
if i == 0 and first_offset > 0:
tensor_offset = first_offset
num_elements = num_elements - tensor_offset
# we dont need all elements of the tensor
if num_elements > (partition_size - current_size):
num_elements = partition_size - current_size
# we need a narrow view of the tensor based on the tensor offset and number of elements that
# we need from this tensor
if tensor_offset > 0 or num_elements < tensor.numel():
flat_tensor_list.append(tensor.contiguous().view(-1).narrow(0, int(tensor_offset), int(num_elements)))
else:
flat_tensor_list.append(tensor)
current_size = current_size + num_elements
# this means its the last partition and does not align with the dp boundary. We need to pad before flattening
if current_size < partition_size:
flat_tensor_list.append(torch.zeros(int(partition_size - current_size), dtype=dtype, device=device))
if return_tensor_list:
return flat_tensor_list
return self.flatten(flat_tensor_list)
def free_grad_in_param_list(self, param_list):
for p in param_list:
p.grad = None # in step
def reset_cpu_buffers(self):
self.norm_for_param_grads = {}
self.local_overflow = False
def log_timers(self, timer_names):
if self.timers is None:
return
self.timers.log(names=list(timer_names))
def start_timers(self, timer_names):
if self.timers is None:
return
for name in timer_names:
self.timers(name).start()
def stop_timers(self, timer_names):
if self.timers is None:
return
for name in timer_names:
self.timers(name).stop()
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
def get_lr(self):
"""Return the current learning rate."""
return self.optimizer.param_groups[0]["lr"]
def override_loss_scale(self, loss_scale):
if loss_scale != self.external_loss_scale:
logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}')
self.custom_loss_scaler = True
self.external_loss_scale = loss_scale
def scaled_global_norm(self, norm_type=2):
assert norm_type == 2, "only L2 norm supported"
norm_groups = []
for i, group in enumerate(self.bit16_groups):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
if self.cpu_offload:
norm_groups.append(self.complete_grad_norm_calculation_for_cpu_offload(self.params_in_partition[i]))
single_grad_partition = self.single_partition_of_fp32_groups[i].grad
else:
norm_groups.append(self.get_grad_norm_direct(self.averaged_gradients[i], self.params_in_partition[i]))
if self.has_moe_layers:
self._average_expert_grad_norms(norm_groups)
# note that the get_global_norm function only supports l2 norm
return get_global_norm(norm_list=norm_groups)
def get_bit16_param_group(self, group_no):
bit16_partitions = self.parallel_partitioned_bit16_groups[group_no]
partition_id = dist.get_rank(group=self.real_dp_process_group[group_no])
return [bit16_partitions[dist.get_rank(group=self.real_dp_process_group[group_no])]]
def _optimizer_step(self, group_no):
original_param_groups = self.optimizer.param_groups
self.optimizer.param_groups = [original_param_groups[group_no]]
# Disabling this as the C++ side copy & synchronize is not working correctly
#from deepspeed.ops.adam import DeepSpeedCPUAdam
#if type(self.optimizer) == DeepSpeedCPUAdam and self.dtype == torch.half:
# self.optimizer.step(fp16_param_groups=[self.get_bit16_param_group(group_no)])
#else:
# self.optimizer.step()
self.optimizer.step()
self.optimizer.param_groups = original_param_groups
def step(self, closure=None):
"""
Not supporting closure.
"""
self.micro_step_id = -1
see_memory_usage(f"In step before checking overflow")
# First compute norm for all group so we know if there is overflow
self.check_overflow()
OPTIMIZER_ALLGATHER = 'optimizer_allgather'
OPTIMIZER_GRADIENTS = 'optimizer_gradients'
OPTIMIZER_STEP = 'optimizer_step'
timer_names = [OPTIMIZER_ALLGATHER, OPTIMIZER_GRADIENTS, OPTIMIZER_STEP]
prev_scale = self.loss_scale
self._update_scale(self.overflow)
if self.overflow:
see_memory_usage('After overflow before clearing gradients')
self.zero_grad(set_to_none=True)
if self.cpu_offload:
self.reset_cpu_buffers()
else:
self.averaged_gradients = {}
see_memory_usage('After overflow after clearing gradients')
self.start_timers(timer_names)
self.stop_timers(timer_names)
return
# Step 1:- Calculate gradient norm using bit-16 grads
see_memory_usage('Before norm calculation')
scaled_global_grad_norm = self.scaled_global_norm()
self._global_grad_norm = scaled_global_grad_norm / prev_scale
see_memory_usage('After norm before optimizer')
# Step 2:- run optimizer and upscaling simultaneously
for i, group in enumerate(self.bit16_groups):
self.start_timers([OPTIMIZER_GRADIENTS])
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
if self.cpu_offload:
single_grad_partition = self.single_partition_of_fp32_groups[i].grad
self.unscale_and_clip_grads([single_grad_partition], scaled_global_grad_norm)
self.stop_timers([OPTIMIZER_GRADIENTS])
self.start_timers([OPTIMIZER_STEP])
self._optimizer_step(i)
# Disabled, this is not currently working
#from deepspeed.ops.adam import DeepSpeedCPUAdam
#if not (type(self.optimizer) == DeepSpeedCPUAdam and self.dtype == torch.half):
# bit16_partitions = self.parallel_partitioned_bit16_groups[i]
# fp32_partition = self.single_partition_of_fp32_groups[i]
# bit16_partitions[partition_id].data.copy_(fp32_partition.data)
bit16_partitions = self.parallel_partitioned_bit16_groups[i]
fp32_partition = self.single_partition_of_fp32_groups[i]
bit16_partitions[partition_id].data.copy_(fp32_partition.data)
self.stop_timers([OPTIMIZER_STEP])
else:
# free gradients for all the parameters that are not updated by this process(ZeRO stage2)
self.free_grad_in_param_list(self.params_not_in_partition[i])
# create a flat gradients for parameters updated by this process
# If we are last partition, ensure we have same size grads and partition size, if not pad with zero tensors
if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1:
single_grad_partition = self.flatten_dense_tensors_aligned(
self.averaged_gradients[i],
int(self.partition_size[i])).to(self.single_partition_of_fp32_groups[i].dtype)
else:
single_grad_partition = self.flatten(self.averaged_gradients[i]).to(
self.single_partition_of_fp32_groups[i].dtype)
assert single_grad_partition.numel() == self.partition_size[i], \
"averaged gradients have different number of elements that partition size {} {} {} {}".format(
single_grad_partition.numel(), self.partition_size[i], i, partition_id)
self.single_partition_of_fp32_groups[i].grad = single_grad_partition
# release all the gradient since we have already created a necessary copy in dp_grad_partition(ZeRO stage2)
self.free_grad_in_param_list(self.params_in_partition[i])
self.averaged_gradients[i] = None
self.unscale_and_clip_grads([single_grad_partition], scaled_global_grad_norm)
self.stop_timers([OPTIMIZER_GRADIENTS])
# Step 3:- run the optimizer if no offloading
self.start_timers([OPTIMIZER_STEP])
self._optimizer_step(i)
# Step 4:- get rid of the fp32 gradients. Not needed anymore
self.single_partition_of_fp32_groups[i].grad = None
del single_grad_partition
bit16_partitions = self.parallel_partitioned_bit16_groups[i]
fp32_partition = self.single_partition_of_fp32_groups[i]
bit16_partitions[partition_id].data.copy_(fp32_partition.data)
self.stop_timers([OPTIMIZER_STEP])
see_memory_usage('After optimizer before all-gather')
if self.cpu_offload:
self.reset_cpu_buffers()
self.start_timers([OPTIMIZER_ALLGATHER])
# Gather the updated weights from everyone.
# Then all partitions of the model parameters are updated and ready for next round forward.
all_gather_dp_groups(partitioned_param_groups=self.parallel_partitioned_bit16_groups,
dp_process_group=self.real_dp_process_group,
start_alignment_factor=self.nccl_start_alignment_factor,
allgather_bucket_size=self.allgather_bucket_size)
self.stop_timers([OPTIMIZER_ALLGATHER])
# TODO: we probably don't need this? just to be safe
for i in range(len(self.bit16_groups)):
self._update_model_bit16_weights(i)
self.log_timers(timer_names)
see_memory_usage('After zero_optimizer step')
return
@torch.no_grad()
def update_lp_params(self):
for i, (bit16_partitions, fp32_partition) in enumerate(
zip(self.parallel_partitioned_bit16_groups, self.single_partition_of_fp32_groups)):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
bit16_partitions[partition_id].data.copy_(fp32_partition.data)
# print_rank_0(f'update_lp_params {i=} {partition_id=}', force=True)
# if i == 0:
# print_rank_0(f'{fp32_partition[:10]=}', force=True)
all_gather_dp_groups(partitioned_param_groups=self.parallel_partitioned_bit16_groups,
dp_process_group=self.real_dp_process_group,
start_alignment_factor=self.nccl_start_alignment_factor,
allgather_bucket_size=self.allgather_bucket_size)
def _average_expert_grad_norms(self, norm_groups):
for i, norm in enumerate(norm_groups):
if self.is_moe_param_group[i]:
scaled_norm = norm * 1.0 / float(dist.get_world_size(group=self.real_dp_process_group[i]))
scaled_norm_tensor = torch.tensor(scaled_norm,
device=get_accelerator().device_name(),
dtype=torch.float)
dist.all_reduce(scaled_norm_tensor, group=self.real_dp_process_group[i])
norm_groups[i] = scaled_norm_tensor.item()
def unscale_and_clip_grads(self, grad_groups_flat, total_norm):
# compute combined scale factor for this group
combined_scale = self.loss_scale
if self.clip_grad > 0.:
# norm is in fact norm*scale
clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad
if clip > 1:
combined_scale = clip * self.loss_scale
for grad in grad_groups_flat:
if isinstance(grad, list):
sub_partitions = grad
for g in sub_partitions:
g.data.mul_(1. / combined_scale)
else:
grad.data.mul_(1. / combined_scale)
def _check_overflow(self, partition_gradients=True):
self.overflow = self.has_overflow(partition_gradients)
# `params` is a list / generator of torch.Variable
def has_overflow_serial(self, params, is_grad_list=False):
for p in params:
if p.grad is not None and self._has_inf_or_nan(p.grad.data):
return True
return False
def has_overflow_partitioned_grads_serial(self):
for i in range(len(self.bit16_groups)):
for j, grad in enumerate(self.averaged_gradients[i]):
if grad is not None and self._has_inf_or_nan(grad.data, j):
return True
return False
def has_overflow(self, partition_gradients=True):
if partition_gradients:
overflow = self.local_overflow if self.cpu_offload else self.has_overflow_partitioned_grads_serial()
overflow_gpu = get_accelerator().ByteTensor([overflow])
'''This will capture overflow across all data parallel and expert parallel process
Since expert parallel process are a subset of data parallel process'''
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.dp_process_group)
else:
params = []
for group in self.bit16_groups:
for param in group:
params.append(param)
overflow = self.has_overflow_serial(params, is_grad_list=partition_gradients)
overflow_gpu = get_accelerator().ByteTensor([overflow])
# Since each model parallel GPU carries only part of the model,
# make sure overflow flag is synced across all the model parallel GPUs
self._model_parallel_all_reduce(tensor=overflow_gpu, op=dist.ReduceOp.MAX)
overflow = overflow_gpu[0].item()
return bool(overflow)
# `x` is a torch.Tensor
@staticmethod
def _has_inf_or_nan(x, j=None):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
def backward(self, loss, retain_graph=False):
"""
:attr:`backward` performs the following steps:
1. fp32_loss = loss.float()
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
"""
self.micro_step_id += 1
if self.contiguous_gradients:
self.ipg_buffer = []
buf_0 = torch.empty(int(self.reduce_bucket_size),
dtype=self.dtype,
device=get_accelerator().current_device_name())
self.ipg_buffer.append(buf_0)
# Use double buffers to avoid data access conflict when overlap_comm is enabled.
if self.overlap_comm:
buf_1 = torch.empty(int(self.reduce_bucket_size),
dtype=self.dtype,
device=get_accelerator().current_device_name())
self.ipg_buffer.append(buf_1)
self.ipg_index = 0
if self.custom_loss_scaler:
scaled_loss = self.external_loss_scale * loss
scaled_loss.backward()
else:
self.loss_scaler.backward(loss.float(), retain_graph=retain_graph)
def check_overflow(self, partition_gradients=True):
self._check_overflow(partition_gradients)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
if self.custom_loss_scaler:
return self.external_loss_scale
else:
return self.loss_scaler.cur_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
cur_scale = property(_get_loss_scale, _set_loss_scale)
# Return group tensor after removing paddings that are added for alignment to DP world size.
# This method works on the assumption that each group contains a single flattened tensor.
def _get_groups_without_padding(self, groups_with_padding):
groups_without_padding = []
for i, group in enumerate(groups_with_padding):
lean_length = group.numel() - self.groups_padding[i]
groups_without_padding.append(group[:lean_length])
return groups_without_padding
# Return optimizer state after removing paddings that are added for alignment.
def _get_state_without_padding(self, state_with_padding, padding):
lean_state = {}
for key, value in state_with_padding.items():
if torch.is_tensor(value):
lean_length = value.numel() - padding
lean_state[key] = value[:lean_length]
else:
lean_state[key] = value
return lean_state
# Return base optimizer states.
# This method assumes that each param group contains a single flattened tensor.
def _get_base_optimizer_state(self):
optimizer_groups_state = []
for i, group in enumerate(self.optimizer.param_groups):
p = group['params'][0]
lean_optimizer_state = self._get_state_without_padding(self.optimizer.state[p], self.groups_padding[i])
optimizer_groups_state.append(lean_optimizer_state)
return optimizer_groups_state
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict[CLIP_GRAD] = self.clip_grad
if self.elastic_checkpoint:
state_dict[BASE_OPTIMIZER_STATE] = self._get_base_optimizer_state()
else:
state_dict[BASE_OPTIMIZER_STATE] = self.optimizer.state_dict()
# Remove paddings for DP alignment to enable loading for other alignment values
fp32_groups_without_padding = self._get_groups_without_padding(self.single_partition_of_fp32_groups)
state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = fp32_groups_without_padding
state_dict[
ZERO_STAGE] = ZeroStageEnum.gradients if self.partition_gradients else ZeroStageEnum.optimizer_states
state_dict[GROUP_PADDINGS] = self.groups_padding
state_dict[PARTITION_COUNT] = self.partition_count
state_dict[DS_VERSION] = version
state_dict[PARAM_SLICE_MAPPINGS] = self._param_slice_mappings
return state_dict
# Restore base optimizer fp32 weights from elastic checkpoint by:
# 1) Merging fp32 weights from checkpoints of all partitions
# 2) Extracting fp32 weights for current partition from merged weights
# 3) Using extracted weights to update base optimizer weights directly.
def _restore_from_elastic_fp32_weights(self, all_state_dict):
merged_single_partition_of_fp32_groups = []
for i in range(len(self.single_partition_of_fp32_groups)):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
merged_partitions = [sd[SINGLE_PARTITION_OF_FP32_GROUPS][i] for sd in all_state_dict]
if self.is_moe_group(self.optimizer.param_groups[i]):
ranks = self.get_ep_ranks(group_name=self.optimizer.param_groups[i]['name'])
merged_partitions = [merged_partitions[i] for i in ranks]
flat_merged_partitions = self.flatten_dense_tensors_aligned(
merged_partitions,
self.nccl_start_alignment_factor * dist.get_world_size(group=self.real_dp_process_group[i]))
dp_partitions = self.get_data_parallel_partitions(flat_merged_partitions, i)
merged_single_partition_of_fp32_groups.append(dp_partitions[partition_id])
for current, saved in zip(self.single_partition_of_fp32_groups, merged_single_partition_of_fp32_groups):
current.data.copy_(saved.data)
# Restore base optimizer fp32 weights from ZeRO fp16 or bfloat16 weights
def _restore_from_bit16_weights(self):
for group_id, (bit16_partitions, fp32_partition) in enumerate(
zip(self.parallel_partitioned_bit16_groups, self.single_partition_of_fp32_groups)):
partition_id = dist.get_rank(group=self.real_dp_process_group[group_id])
fp32_partition.data.copy_(bit16_partitions[partition_id].data)
# Refresh the fp32 master params from the fp16 or bfloat16 copies.
def refresh_fp32_params(self):
self._restore_from_bit16_weights()
# Extract optimizer state for current partition from merged states of all partitions
def _partition_base_optimizer_state(self, state_key, all_partition_states, group_id):
partition_id = dist.get_rank(group=self.real_dp_process_group[group_id])
alignment = dist.get_world_size(group=self.real_dp_process_group[group_id])
if torch.is_tensor(all_partition_states[0]):
flat_merged_partitions = self.flatten_dense_tensors_aligned(all_partition_states, alignment)
dp_partitions = self.get_data_parallel_partitions(flat_merged_partitions, group_id)
return dp_partitions[partition_id]
else:
# Assume non-tensor states are not partitioned and equal across ranks, so return first one
return all_partition_states[0]
def _restore_base_optimizer_state(self, base_optimizer_group_states):
if type(base_optimizer_group_states) == dict:
base_optimizer_group_states = base_optimizer_group_states['state']
for i, group in enumerate(self.optimizer.param_groups):
p = group['params'][0]
for key, saved in base_optimizer_group_states[i].items():
if torch.is_tensor(self.optimizer.state[p][key]):
dst_tensor = self.optimizer.state[p][key]
src_tensor = _get_padded_tensor(saved, dst_tensor.numel())
self.optimizer.state[p][key].data.copy_(src_tensor.data)
else:
self.optimizer.state[p][key] = saved
def get_ep_ranks(self, rank=0, group_name=None):
from deepspeed.utils import groups
expert_parallel_size_ = groups._get_expert_parallel_world_size(group_name)
world_size = groups._get_data_parallel_world_size()
rank = groups._get_expert_parallel_rank(group_name)
ranks = range(rank, world_size, expert_parallel_size_)
return list(ranks)
# Restore base optimizer state from elastic checkpoint by
# 1) Merging optimizer state from checkpoints of all partitions
# 2) Extracting optimizer state for current partition from the merged state
# 3) Using the extracted value to directly update the base optimizer.
def _restore_elastic_base_optimizer_state(self, all_state_dict):
base_optimizer_group_states = []
for i in range(len(self.optimizer.param_groups)):
partition_states = {}
all_partition_group_states = [sd[BASE_OPTIMIZER_STATE][i] for sd in all_state_dict]
if self.is_moe_group(self.optimizer.param_groups[i]):
ranks = self.get_ep_ranks(group_name=self.optimizer.param_groups[i]['name'])
all_partition_group_states = [all_partition_group_states[i] for i in ranks]
for key in all_partition_group_states[0].keys():
all_partition_states = [all_states[key] for all_states in all_partition_group_states]
partition_states[key] = self._partition_base_optimizer_state(key, all_partition_states, i)
base_optimizer_group_states.append(partition_states)
self._restore_base_optimizer_state(base_optimizer_group_states)
def load_state_dict(self,
state_dict_list,
load_optimizer_states=True,
load_from_fp32_weights=False,
checkpoint_folder=None):
if checkpoint_folder:
self._load_universal_checkpoint(checkpoint_folder, load_optimizer_states, load_from_fp32_weights)
else:
self._load_legacy_checkpoint(state_dict_list, load_optimizer_states, load_from_fp32_weights)
def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights):
self._load_hp_checkpoint_state(checkpoint_folder)
@property
def param_groups(self):
"""Forward the wrapped optimizer's parameters."""
return self.optimizer.param_groups
def _load_hp_checkpoint_state(self, checkpoint_dir):
checkpoint_dir = os.path.join(checkpoint_dir, "zero")
tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu)
tp_world_size = self.mpu.get_slice_parallel_world_size()
for i, _ in enumerate(self.optimizer.param_groups):
for lp in self.bit16_groups[i]:
if lp._hp_mapping is not None:
#print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}")
lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank,
tp_world_size)
def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, load_from_fp32_weights=False):
r"""Loading ZeRO checkpoint
Arguments:
state_dict_list: List of all saved ZeRO checkpoints, one for each saved partition.
Note that the number of saved partitions may differ from number of loading partitions to support
changing GPU count, specifically DP world size, between saving and loading checkpoints.
load_optimizer_states: Boolean indicating whether or not to load base optimizer states
load_from_fp32_weights: Boolean indicating whether to initialize fp32 master weights from fp32
copies in checkpoints (no precision loss) or from model's fp16 copies (with precision loss).
"""
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
dp_rank = dist.get_rank(group=self.dp_process_group)
current_rank_sd = state_dict_list[dp_rank]
self.loss_scaler = current_rank_sd.get('loss_scaler', self.loss_scaler)
self.dynamic_loss_scale = current_rank_sd.get('dynamic_loss_scale', self.dynamic_loss_scale)
self.overflow = current_rank_sd.get('overflow', self.overflow)
self.clip_grad = current_rank_sd.get(CLIP_GRAD, self.clip_grad)
ckpt_version = current_rank_sd.get(DS_VERSION, False)
assert ckpt_version, f"Empty ds_version in checkpoint, not clear how to proceed"
ckpt_version = pkg_version.parse(ckpt_version)
# zero stage 1 mode
if not self.partition_gradients:
required_version = pkg_version.parse("0.3.17")
error_str = f"ZeRO stage 1 changed in {required_version} and is not backwards compatible " \
"with older stage 1 checkpoints. If you'd like to load an old ZeRO-1 checkpoint " \
"please use an older version of DeepSpeed (<= 0.5.8) and set 'legacy_stage1': true in your zero config json."
assert required_version <= ckpt_version, f"Old version: {ckpt_version} {error_str}"
ckpt_is_rigid = isinstance(current_rank_sd[BASE_OPTIMIZER_STATE], dict)
# padding is always at the last rank/partition
# if DP=1024 and param-group elems=16 -> padding will be 1024-16 across all but one rank
# scenario-1 (shrink): saving w. 4 gpus -> loading w. 2 gpus
# scenario-2 (expand): saving w. 2 gpus -> loading w. 4 gpus
# if load_optimizer_states:
# if new_dp_size:
# self.strip_padding()
# self.add_padding_w_new_dp_size()
# self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE])
if load_optimizer_states:
if ckpt_is_rigid:
# loading rigid ckpt into either rigid or elastic exec
self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE])
else:
if self.elastic_checkpoint:
# loading elastic into elastic exec
self._restore_elastic_base_optimizer_state(state_dict_list)
else:
# loading an elastic checkpoint into rigid exec
self._restore_base_optimizer_state(current_rank_sd[BASE_OPTIMIZER_STATE])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 1 if changing DP degree and option 2 otherwise.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
if load_from_fp32_weights:
# option 2 from above
if self.elastic_checkpoint and not ckpt_is_rigid:
self._restore_from_elastic_fp32_weights(state_dict_list)
else:
# For non-elastic checkpoint, simply copying from saved weights of current rank is sufficient.
for current, saved in zip(self.single_partition_of_fp32_groups,
current_rank_sd[SINGLE_PARTITION_OF_FP32_GROUPS]):
src_tensor = _get_padded_tensor(saved, current.numel())
current.data.copy_(src_tensor.data)
else:
# option 1 from above
self._restore_from_bit16_weights()
if load_optimizer_states:
self._link_all_hp_params()
def _handle_overflow(cpu_sum, x, i):
import math
rank = dist.get_rank()
if rank == 0:
t_i = -1
for v_i, v in enumerate(x.data.contiguous().view(-1)):
if not math.isfinite(float(v)):
t_i = v_i
break
logger.info(f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}")
def estimate_zero2_model_states_mem_needs(total_params,
num_gpus_per_node=1,
num_nodes=1,
cpu_offload=True,
additional_buffer_factor=1.5):
total_gpus = num_nodes * num_gpus_per_node
if cpu_offload:
gpu_mem = 2 * total_params
cpu_mem = total_params * max(4 * total_gpus, 16) * additional_buffer_factor
else:
gpu_mem = 4 * total_params + int(16 * total_params / total_gpus)
cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor
return int(cpu_mem), int(gpu_mem)
def model_to_params(model):
# shared params calculated only once
total_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values())
return total_params
def estimate_zero2_model_states_mem_needs_all_live(model,
num_gpus_per_node=1,
num_nodes=1,
additional_buffer_factor=1.5):
"""
Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients
for a given ``model`` and hardware setup.
If you have an actual model object, use this function and everything will be derived
automatically.
If it's a hypothetical model, use ``estimate_zero2_model_states_mem_needs_all_cold`` where you have to pass
the ``total_params`` explicitly.
Args:
- ``model``: ``nn.Module`` object
- ``num_gpus_per_node``: how many gpus per node (defaults to 1)
- ``num_nodes``: how many nodes (defaults to 1),
- ``additional_buffer_factor``: estimation factor (defaults to 1.5):
"""
total_params = model_to_params(model)
estimate_zero2_model_states_mem_needs_all_cold(total_params=total_params,
num_gpus_per_node=num_gpus_per_node,
num_nodes=num_nodes,
additional_buffer_factor=additional_buffer_factor)
def estimate_zero2_model_states_mem_needs_all_cold(total_params,
num_gpus_per_node=1,
num_nodes=1,
additional_buffer_factor=1.5):
"""
Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients
for a given ``model`` and hardware setup.
If it's a hypothetical model, use this function where you have to pass
the ``total_params`` and ``largest_layer_params`` explicitly.
If you have an actual model object, use ``estimate_zero2_model_states_mem_needs_all_live`` and everything
will be derived automatically.
Args:
- ``total_params``: total model params
- ``num_gpus_per_node``: how many gpus per node (defaults to 1)
- ``num_nodes``: how many nodes (defaults to 1),
- ``additional_buffer_factor``: estimation factor (defaults to 1.5):
"""
def format_options(cpu_offload):
enabled = []
device = f'{OffloadDeviceEnum.cpu:4}' if cpu_offload else "none"
enabled.append(f"offload_optimizer={device}")
return ", ".join(enabled)
nodes_str = "nodes" if num_nodes > 1 else "node"
gpus_str = "GPUs" if num_gpus_per_node > 1 else "GPU"
print("Estimated memory needed for params, optim states and gradients for a:\n"
f"HW: Setup with {num_nodes} {nodes_str}, {num_gpus_per_node} {gpus_str} per node.\n"
f"SW: Model with {int(total_params/1e6)}M total params.")
print(" per CPU | per GPU | Options")
for cpu_offload in [True, False]:
cpu_mem, gpu_mem = estimate_zero2_model_states_mem_needs(total_params=total_params,
num_gpus_per_node=num_gpus_per_node,
num_nodes=num_nodes,
cpu_offload=cpu_offload,
additional_buffer_factor=additional_buffer_factor)
options_str = format_options(cpu_offload=cpu_offload)
print(f" {cpu_mem/2**30:7.2f}GB | {gpu_mem/2**30:6.2f}GB | {options_str}")
| 110,957 | 46.397693 | 237 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/zero/parameter_offload.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import sys
import torch
from collections import OrderedDict
from deepspeed.runtime.utils import see_memory_usage
from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum
from deepspeed.runtime.zero.partition_parameters import _init_external_params
from deepspeed.runtime.zero.partition_parameters import *
from deepspeed.runtime.zero.partitioned_param_coordinator import PartitionedParameterCoordinator, InflightParamRegistry, iter_params
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
FWD_MODULE_STACK = list()
def is_builtin_type(obj):
# https://stackoverflow.com/a/17795199
return obj.__class__.__module__ == '__builtin__' or obj.__class__.__module__ == "builtins"
def isinstance_namedtuple(obj: object) -> bool:
"""
Is this an instance of namedtuple/NamedTuple?
From: https://stackoverflow.com/a/62692640
Args:
obj (object): An object.
Returns:
bool: True if namedtuple/NamedTuple else False.
"""
return isinstance(obj, tuple) and hasattr(obj, '_asdict') and hasattr(obj, '_fields')
# ensure we only warn once, otherwise every iteration will trigger a warning
warned = False
def _apply_to_tensors_only(module, functional, backward_function, outputs):
"""
Apply a torch.autograd.Function that calls a `backward_function` to every Tensor in `outputs`.
Args:
module (torch.nn.Module): A torch module
functional (Type[torch.autograd.Function]): The function class to apply.
backward_function (Callable[[torch.nn.Module], None]): A backward_function to pass to
`functional.apply`.
outputs (Any): The output of `module`.
Returns:
Any: The output of `module`.
"""
if isinstance(outputs, (tuple, list)):
touched_outputs = []
for output in outputs:
touched_output = _apply_to_tensors_only(module, functional, backward_function, output)
touched_outputs.append(touched_output)
if isinstance_namedtuple(outputs):
# namedtuples require a slightly different syntax.
return outputs.__class__(*touched_outputs)
return outputs.__class__(touched_outputs)
elif isinstance(outputs, dict):
# apply inplace to avoid recreating dict inherited objects
for key in outputs.keys():
outputs[key] = _apply_to_tensors_only(module, functional, backward_function, outputs[key])
return outputs
elif isinstance(outputs, torch.Tensor):
# this also applies to torch.Tensor's subclasses like torch.nn.parameter.Parameter
touched_outputs = functional.apply(module, backward_function, outputs)
# restore zero param attributes if those get stripped by `backward_function`
if not is_zero_param(touched_outputs) and is_zero_param(outputs):
touched_outputs.ds_param_alias = outputs
return touched_outputs
else:
if not is_builtin_type(outputs):
global warned
if not warned and dist.get_rank() == 0:
logger.warning(
f"A module has unknown inputs or outputs type ({type(outputs)}) and the tensors embedded in it cannot be detected. "
"The ZeRO-3 hooks designed to trigger before or after backward pass of the module relies on knowing the input and "
"output tensors and therefore may not get triggered properly.")
warned = True
return outputs
#for each tensor in outputs run the forward_function and register backward_function as hook
def _apply_forward_and_backward_to_tensors_only(module, forward_function, backward_function, outputs):
if type(outputs) is tuple:
touched_outputs = []
for output in outputs:
touched_output = _apply_forward_and_backward_to_tensors_only(module, forward_function, backward_function,
output)
touched_outputs.append(touched_output)
return tuple(touched_outputs)
elif type(outputs) is torch.Tensor:
forward_function(outputs)
if outputs.requires_grad:
outputs.register_hook(backward_function)
return outputs
else:
return outputs
class ZeROOrderedDict(OrderedDict):
def __init__(self, parent_module, *args, **kwargs):
"""A replacement for ``collections.OrderedDict`` to detect external ZeRO params.
Args:
parent_module (``collections.OrderedDict``): the collection to replace
"""
super().__init__(*args, **kwargs)
self._parent_module = parent_module
self._in_forward = False
def __getitem__(self, key):
param = super().__getitem__(key)
# Params can be registered as None (e.g., bias)
if param is None:
return param
if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
if self._parent_module._parameters._in_forward:
register_external_parameter(FWD_MODULE_STACK[-1], param)
param.all_gather()
print_rank_0(f'Registering external parameter from getter {key} ds_id = {param.ds_id}', force=False)
return param
def _inject_parameters(module, cls):
for module in module.modules():
if cls == ZeROOrderedDict:
new_param = cls(parent_module=module)
else:
new_param = cls()
for key, param in module._parameters.items():
new_param[key] = param
module._parameters = new_param
class PreBackwardFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, module, pre_backward_function, outputs):
ctx.module = module
ctx.pre_backward_function = pre_backward_function
if not hasattr(module, "applied_pre_backward_ref_cnt"):
module.applied_pre_backward_ref_cnt = 0
module.applied_pre_backward_ref_cnt += 1
#print(f"After Forward: {ctx.module.__class__.__name__}")
outputs = outputs.detach()
return outputs
@staticmethod
def backward(ctx, *args):
#print(f"Before Backward: {ctx.module.__class__.__name__}")
ctx.pre_backward_function(ctx.module)
return (None, None) + args
class PostBackwardFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, module, pre_backward_function, output):
ctx.module = module
if output.requires_grad:
#TODO SOME TIMES post backward does not seem to be triggered debug in detail
#Should only cause increase in memory not correctness issue
#if output.grad_fn.__class__.__name__ == 'ViewBackward':
# ctx.view=True
# print(f"Warning view tensor for input to module : {module.__class__.__name__}. Backward hooks may not trigger properly")
#assert len(module.parameters(recurse=False)), "The input tensor to the module is a view, and autograd Function or register_hook is not triggered with view tensors."
#if module.ds_grads_remaining == 0:
# print(f"Before Forward: {ctx.module.__class__.__name__}")
module.ds_grads_remaining += 1
ctx.pre_backward_function = pre_backward_function
output = output.detach()
return output
@staticmethod
def backward(ctx, *args):
ctx.module.ds_grads_remaining = ctx.module.ds_grads_remaining - 1
if ctx.module.ds_grads_remaining == 0:
ctx.pre_backward_function(ctx.module)
#print(f"After Backward: {ctx.module.__class__.__name__}")
return (None, None) + args
class DeepSpeedZeRoOffload(object):
def __init__(self,
module,
timers,
ds_config,
overlap_comm=True,
prefetch_bucket_size=50000000,
max_reuse_distance=1000000000,
max_live_parameters=1000000000,
param_persistence_threshold=100000,
model_persistence_threshold=sys.maxsize,
offload_param_config=None,
mpu=None,
zero_param_parallel_group=None,
zero_quantized_weights=False):
see_memory_usage("DeepSpeedZeRoOffload initialize [begin]", force=True)
print_rank_0(f"initialized {__class__.__name__} with args: {locals()}", force=False)
self.module = module
self.timers = timers
self.dtype = list(module.parameters())[0].dtype
self.offload_device = None
self.offload_param_pin_memory = False
self.zero_param_parallel_group = zero_param_parallel_group
self.zero_quantized_weights = zero_quantized_weights
if offload_param_config is not None and offload_param_config.device != OffloadDeviceEnum.none:
self.offload_device = offload_param_config.device
self.offload_param_pin_memory = offload_param_config.pin_memory
self._convert_to_zero_parameters(ds_config, module, mpu)
for m in module.modules():
_init_external_params(m)
_inject_parameters(module, ZeROOrderedDict)
self.param_numel_persistence_threshold = int(param_persistence_threshold)
self.model_persistence_threshold = int(model_persistence_threshold)
self.persistent_parameters = self.mark_persistent_parameters(self.param_numel_persistence_threshold,
self.model_persistence_threshold)
self.param_coordinators = {}
self._prefetch_bucket_sz = int(prefetch_bucket_size)
self._max_reuse_distance_in_numel = int(max_reuse_distance)
self._max_available_parameters_in_numel = int(max_live_parameters)
self.__allgather_stream = get_accelerator().Stream() if overlap_comm else get_accelerator().default_stream()
if not hasattr(module, "ds_inflight_param_registry"):
module.ds_inflight_param_registry = dict()
# we need two registries, one for training and one for eval. They will be used when creating PartitionedParameterCoordinator
module.ds_inflight_param_registry[True] = InflightParamRegistry()
module.ds_inflight_param_registry[False] = InflightParamRegistry()
self.__inflight_param_registry = module.ds_inflight_param_registry
self.forward_hooks = []
self.backward_hooks = []
self.setup_zero_stage3_hooks()
print_rank_0(
f'Created module hooks: forward = {len(self.forward_hooks)}, backward = {len(self.backward_hooks)}',
force=False)
see_memory_usage("DeepSpeedZeRoOffload initialize [end]", force=True)
@instrument_w_nvtx
def partition_all_parameters(self):
"""Partitioning Parameters that were not partitioned usually if parameters
of modules whose input parameters do not require grad computation do not
trigger post call and will therefore will remain unpartitioned"""
self.get_param_coordinator(training=self.module.training).release_and_reset_all(self.module)
for param in iter_params(self.module, recurse=True):
if param.ds_status != ZeroParamStatus.NOT_AVAILABLE:
raise RuntimeError(f"{param.ds_summary()} expected to be released")
def get_param_coordinator(self, training):
if not training in self.param_coordinators:
self.param_coordinators[training] = PartitionedParameterCoordinator(
prefetch_bucket_sz=self._prefetch_bucket_sz,
max_reuse_distance_in_numel=self._max_reuse_distance_in_numel,
max_available_parameters_in_numel=self._max_available_parameters_in_numel,
allgather_stream=self.__allgather_stream,
inflight_param_registry=self.__inflight_param_registry[training],
prefetch_nvme=self.offload_device == OffloadDeviceEnum.nvme,
timers=self.timers,
)
return self.param_coordinators[training]
def empty_partition_cache(self):
self.partition_all_parameters()
def _convert_to_zero_parameters(self, ds_config, module, mpu):
non_zero_params = [p for p in module.parameters() if not is_zero_param(p)]
if non_zero_params:
zero_params = [p for p in module.parameters() if is_zero_param(p)]
if zero_params:
zero_params[0].convert_to_zero_parameters(param_list=non_zero_params)
else:
group = None
if mpu:
group = mpu.get_data_parallel_group()
Init(module=module,
data_parallel_group=group,
dtype=self.dtype,
config_dict_or_path=ds_config,
remote_device=self.offload_device,
pin_memory=self.offload_param_pin_memory,
mpu=mpu,
zero_param_parallel_group=self.zero_param_parallel_group,
zero_quantized_weights=self.zero_quantized_weights)
def destroy(self):
self._remove_module_hooks()
def _remove_module_hooks(self):
num_forward_hooks = len(self.forward_hooks)
num_backward_hooks = len(self.backward_hooks)
for hook in self.forward_hooks:
hook.remove()
for hook in self.backward_hooks:
hook.remove()
print_rank_0(f'Deleted module hooks: forward = {num_forward_hooks}, backward = {num_backward_hooks}',
force=False)
def setup_zero_stage3_hooks(self):
self.hierarchy = 0
#reset step if in inference mode
@instrument_w_nvtx
def _end_of_forward_hook(module, *args):
if not torch._C.is_grad_enabled():
self.get_param_coordinator(training=False).reset_step()
#likely one of them should be enough but just to be safe
self._register_hooks_recursively(self.module)
self.module.register_forward_hook(_end_of_forward_hook)
# Add top module to stack trace
global FWD_MODULE_STACK
FWD_MODULE_STACK.append(self.module)
def mark_persistent_parameters(self, param_threshold, model_threshold):
persistent_params = []
total_persistent_parameters = 0
params_count = 0
for name, param in self.module.named_parameters(recurse=True):
if param.ds_numel + total_persistent_parameters > model_threshold:
continue
if param.ds_numel <= param_threshold:
params_count += 1
param.ds_persist = True
persistent_params.append(param)
total_persistent_parameters += param.ds_numel
print_rank_0(
f"Parameter Offload: Total persistent parameters: {total_persistent_parameters} in {params_count} params",
force=True)
return persistent_params
def _register_hooks_recursively(self, module, count=[0]):
my_count = count[0]
module.id = my_count
#print(f"{module.__class__} : {module.id}")
for child in module.children():
count[0] = count[0] + 1
self._register_hooks_recursively(child, count=count)
@instrument_w_nvtx
def _pre_forward_module_hook(module, *args):
self.pre_sub_module_forward_function(module)
@instrument_w_nvtx
def _post_forward_module_hook(module, input, output):
global FWD_MODULE_STACK
FWD_MODULE_STACK.pop()
if output is None:
output = []
elif not isinstance(output, (list, tuple)):
if torch.is_tensor(output):
output = [output]
else:
#print(f'got UNKNOWN type {type(output)}')
outputs = []
output = output if isinstance(output, dict) else vars(output)
for name, val in output.items():
if not name.startswith('__') and torch.is_tensor(val):
outputs.append(val)
output = outputs
for item in filter(lambda item: is_zero_param(item) or hasattr(item, 'ds_param_alias'), output):
key = id(item) if hasattr(item, 'ds_id') else id(item.ds_param_alias)
actual_external_param = item if hasattr(item, 'ds_id') else item.ds_param_alias
if not any(key in m._external_params for m in FWD_MODULE_STACK):
actual_external_param.is_external_param = True
module_to_register = FWD_MODULE_STACK[-1]
register_external_parameter(module_to_register, actual_external_param)
print_rank_0(
f'Registering dangling parameter for module {module_to_register.__class__.__name__}, ds_id = {actual_external_param.ds_id}.',
force=False)
# It's possible that the parameter was already external to the completed module. If so, remove it the
# registration as it will be covered by the outer module instead.
if key in module._external_params:
print_rank_0(
f' Unregistering nested dangling parameter from module {module.__class__.__name__}, ds_id = {actual_external_param.ds_id}',
force=False)
unregister_external_parameter(module, actual_external_param)
actual_external_param.all_gather()
self.post_sub_module_forward_function(module)
def _pre_backward_module_hook(module, inputs, output):
@instrument_w_nvtx
def _run_before_backward_function(sub_module):
# some models (e.g. Albert) may run multiple forwards on the same layer in a loop
# before doing backwards, so each backward will need a pre-fetch - using reference
# counting to support this scenario
#print(f"COUNTER before: {sub_module.applied_pre_backward_ref_cnt}")
if sub_module.applied_pre_backward_ref_cnt > 0:
self.pre_sub_module_backward_function(sub_module)
sub_module.applied_pre_backward_ref_cnt -= 1
#print(f"COUNTER after: {sub_module.applied_pre_backward_ref_cnt}")
return _apply_to_tensors_only(module, PreBackwardFunction, _run_before_backward_function, output)
#This is an alternate to doing _post_backward_module_hook
#it uses tensor.register_hook instead of using torch.autograd.Function
def _alternate_post_backward_module_hook(module, inputs):
module.ds_grads_remaining = 0
#print(f"Before Forward {module.__class__.__name__}")
def _run_after_backward_hook(*unused):
module.ds_grads_remaining = module.ds_grads_remaining - 1
if module.ds_grads_remaining == 0:
#print(f"After backward {module.__class__.__name__}")
self.post_sub_module_backward_function(module)
def _run_before_forward_function(input):
if input.requires_grad:
module.ds_grads_remaining += 1
return _apply_forward_and_backward_to_tensors_only(module, _run_before_forward_function,
_run_after_backward_hook, inputs)
def _post_backward_module_hook(module, inputs):
module.ds_grads_remaining = 0
@instrument_w_nvtx
def _run_after_backward_function(sub_module):
if sub_module.ds_grads_remaining == 0:
self.post_sub_module_backward_function(sub_module)
return _apply_to_tensors_only(module, PostBackwardFunction, _run_after_backward_function, inputs)
# Pre forward hook
self.forward_hooks.append(module.register_forward_pre_hook(_pre_forward_module_hook))
# Post forward hook
self.forward_hooks.append(module.register_forward_hook(_post_forward_module_hook))
# Pre backward hook
self.backward_hooks.append(module.register_forward_hook(_pre_backward_module_hook))
# post backward hook
self.backward_hooks.append(module.register_forward_pre_hook(_post_backward_module_hook))
@torch.no_grad()
def pre_sub_module_forward_function(self, sub_module):
see_memory_usage(f"Before sub module function {sub_module.__class__.__name__}", force=False)
global FWD_MODULE_STACK
FWD_MODULE_STACK.append(sub_module)
param_coordinator = self.get_param_coordinator(training=sub_module.training)
param_coordinator.trace_prologue(sub_module)
if param_coordinator.is_record_trace():
param_coordinator.record_module(sub_module)
param_coordinator.fetch_sub_module(sub_module, forward=True)
see_memory_usage(f"Before sub module function {sub_module.__class__.__name__} after fetch", force=False)
@torch.no_grad()
def post_sub_module_forward_function(self, sub_module):
see_memory_usage(f"After sub module function {sub_module.__class__.__name__} {sub_module.id} before release",
force=False)
param_coordinator = self.get_param_coordinator(training=sub_module.training)
param_coordinator.release_sub_module(sub_module, backward=False)
see_memory_usage(f"After sub module function {sub_module.__class__.__name__} {sub_module.id} after release",
force=False)
@torch.no_grad()
def pre_sub_module_backward_function(self, sub_module):
assert sub_module.training, "backward pass is invalid for module in evaluation mode"
param_coordinator = self.get_param_coordinator(training=True)
param_coordinator.trace_prologue(sub_module)
if param_coordinator.is_record_trace():
param_coordinator.record_module(sub_module)
param_coordinator.fetch_sub_module(sub_module, forward=False)
@torch.no_grad()
def post_sub_module_backward_function(self, sub_module):
assert sub_module.training, "backward pass is invalid for module in evaluation mode"
see_memory_usage(
f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} before release",
force=False)
self.get_param_coordinator(training=True).release_sub_module(sub_module, backward=True)
see_memory_usage(
f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} after release",
force=False)
| 22,985 | 42.369811 | 177 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/zero/test.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.runtime.zero.contiguous_memory_allocator import ContiguousMemoryAllocator
def test1():
mem = ContiguousMemoryAllocator(1024, torch.half, 'cpu')
mem.print_allocation(resolution=100)
a1 = mem.allocate_tensor(64).mul_(0.0).add_(1.0)
mem.print_allocation(resolution=100)
mem.release_tensor(a1)
mem.print_allocation(resolution=100)
a2 = mem.allocate_tensor(64).mul_(0.0).add_(2.0)
a3 = mem.allocate_tensor(256).mul_(0.0).add_(3.0)
a4 = mem.allocate_tensor(128).mul_(0.0).add_(4.0)
mem.print_allocation(resolution=100)
mem.release_tensor(a3)
mem.print_allocation(resolution=100)
a5 = mem.allocate_tensor(64).mul_(0.0).add_(5.0)
a6 = mem.allocate_tensor(256).mul_(0.0).add_(6.0)
a7 = mem.allocate_tensor(128).mul_(0.0).add_(7.0)
mem.print_allocation(resolution=100)
a8 = mem.allocate_tensor(256).mul_(0.0).add_(8.0)
a9 = mem.allocate_tensor(128).mul_(0.0).add_(9.0)
mem.print_allocation(resolution=100)
mem.release_tensor(a9)
mem.release_tensor(a6)
mem.release_tensor(a2)
mem.release_tensor(a5)
a10 = mem.allocate_tensor(512).mul_(0.0).add_(10.0)
mem.print_allocation(resolution=100)
#print(f"a4:{a4}")
#print(f"a7:{a7}")
#print(f"a8:{a8}")
#print(f"a10:{a10}")
assert (a4.norm() + a7.norm() + a8.norm() + a10.norm()).item() == 474.50, "Test failed"
def test2():
mem = ContiguousMemoryAllocator(512, torch.half, 'cpu')
a1 = mem.allocate_tensor(64).mul_(0.0).add_(1.0)
a2 = mem.allocate_tensor(64).mul_(0.0).add_(2.0)
a3 = mem.allocate_tensor(64).mul_(0.0).add_(3.0)
a4 = mem.allocate_tensor(64).mul_(0.0).add_(4.0)
a5 = mem.allocate_tensor(64).mul_(0.0).add_(5.0)
a6 = mem.allocate_tensor(64).mul_(0.0).add_(6.0)
a7 = mem.allocate_tensor(64).mul_(0.0).add_(7.0)
a8 = mem.allocate_tensor(64).mul_(0.0).add_(8.0)
mem.release_tensor(a2)
mem.release_tensor(a4)
mem.release_tensor(a6)
mem.release_tensor(a8)
mem.print_allocation(resolution=100)
a9 = mem.allocate_tensor(128).mul_(0.0).add_(9.0)
a10 = mem.allocate_tensor(64).mul_(0.0).add_(10.0)
a11 = mem.allocate_tensor(64).mul_(0.0).add_(11.0)
mem.release_tensor(a1)
mem.release_tensor(a5)
mem.print_allocation(resolution=100)
a12 = mem.allocate_tensor(128).mul_(0.0).add_(12.0)
mem.print_allocation(resolution=100)
print(f"a7:{a7}")
print(f"a9:{a9}")
print(f"a10:{a10}")
print(f"a11:{a11}")
print(f"a12:{a12}")
assert (a7.norm() + a9.norm() + a10.norm() + a11.norm() + a12.norm()) == 460.75, "TestFailed"
test1()
test2()
| 2,727 | 33.974359 | 97 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/zero/contiguous_memory_allocator.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed import comm as dist
def print_rank_0(message):
if dist.get_rank() == 0:
print(message)
class ContiguousMemoryAllocator(object):
def __init__(self, size, dtype, device):
self.buffer = torch.zeros(size, dtype=dtype, device=device)
#address to contiguous size available
self.contiguous_sizes = {}
self.contiguous_sizes[0] = size
#tensor id to its address
self.tensor_addresses = {}
#tensor address to its size
self.tensor_sizes = {}
#tensor address to ids
self.tensor_ids = {}
#id to tensors
self.tensor_map = {}
#id to params. Maps each tensor buffer to list of parameters that uses it
self.id_to_params = {}
self.total_size = size
self.total_free = size
self.largest_contiguous = size
self.max_allocated = 0
self.count = 0
#create a tensor of size from the pre-allocated buffer
#if not enough free space will fail
#if not enough contiguous space, will defragment and allocate
def allocate_tensor(self, size):
free_before = self.total_free
assert size <= self.total_free, "Not enough memory in buffer. Allocation failed"
if self.largest_contiguous < size:
print_rank_0("Needs defragmentation to allocate. Before Defragmentation:")
self.print_allocation(resolution=100)
self._defragment_memory()
#set the param data to the new tensor buffer locations
self._reset_param_data()
print_rank_0("After defragmentation:")
self.print_allocation(resolution=100)
self.total_free = self.total_free - size
allocated = self.total_size - self.total_free
if allocated > self.max_allocated:
self.max_allocated = allocated
tensor_address = self._get_new_tensor_address(size)
ret_tensor = self._get_new_tensor(tensor_address, size)
print_rank_0(
f"Free before allocation {free_before}. Allocating {size}. Free after allocation {self.total_free}. Max allocated {self.max_allocated}"
)
assert self.total_free + size == free_before, "Allocation bookkeeping error"
return ret_tensor
#assigns the tensor data to the param data and keeps track of the assignment
#any change the the underlying buffer from defragmentation will cause a
#reassignment of the param data
def assign_to_param(self, tensor, param, numel, shape):
tensor_id = id(tensor)
assert tensor_id in self.tensor_map.keys(), "No such tensor allocated by the allocator."
assert tensor.numel() >= numel, "Assert tensor buffer does is not large enough"
assert not tensor_id in self.id_to_params.keys(), "This tensor has already been assigned to a param"
self.id_to_params[tensor_id] = [param]
replicated_tensor = tensor.narrow(0, 0, numel).view(shape)
param.data = replicated_tensor.data
param.contiguous_tensor_id = tensor_id
#deletes the tensor and frees up the underlying buffer
def release_tensor(self, tensor):
free_before = self.total_free
tensor_id = id(tensor)
tensor_size = tensor.numel()
self._release_tensor(tensor_id)
self._unassign_params(tensor_id)
self.total_free += tensor_size
print_rank_0(
f"Free before release {free_before}. Released {tensor.numel()}. Total free after {self.total_free}.")
assert self.total_free - tensor_size == free_before, "Release bookkeeping error"
def release_tensor_with_id(self, tensor_id):
free_before = self.total_free
assert tensor_id in self.tensor_map.keys(), "Invalid tensor id"
tensor = self.tensor_map[tensor_id]
tensor_size = tensor.numel()
self._release_tensor(tensor_id)
self._unassign_params(tensor_id)
self.total_free += tensor_size
print_rank_0(
f"Free before release {free_before}. Released {tensor.numel()}. Total free after {self.total_free}.")
assert self.total_free - tensor_size == free_before, "Release bookkeeping error"
#shows the current memory allocation at specified resolution
def print_allocation(self, resolution=200):
total_size = self.buffer.numel() * 1.0
empty = []
for addr, size in self.contiguous_sizes.items():
start = int(addr * resolution / total_size)
end = int((addr + size) * resolution / total_size)
empty.extend(range(start, end))
s = ''
for i in range(resolution):
s += '.' if i in empty else '|'
print_rank_0(s)
def max_allocated(self):
return self.max_allocated
#to be called after defragmentation that moves the tensor buffers
#this call reassigns the data of all the parameters using the tensor buffers
def _reset_param_data(self):
for id, tensor in self.tensor_map.items():
for param in self.id_to_params[id]:
param.data = tensor.narrow(0, 0, param.numel()).view(param.data.shape).data
def _unassign_params(self, tensor_id):
if tensor_id in self.id_to_params.keys():
del self.id_to_params[tensor_id]
def _release_tensor(self, tensor_id):
assert tensor_id in self.tensor_addresses, f"Tensor id {tensor_id} not found"
address = self.tensor_addresses[tensor_id]
contiguous_size = self.tensor_map[tensor_id].numel()
del self.tensor_addresses[tensor_id]
del self.tensor_ids[address]
del self.tensor_map[tensor_id]
del self.tensor_sizes[address]
self._consolidate_address(address, contiguous_size)
self.largest_contiguous = self._largest_contiguous()
def _consolidate_address(self, address, contiguous_size):
#consolidate next buffer
end_address = address + contiguous_size
if end_address in self.contiguous_sizes:
contiguous_size += self.contiguous_sizes[end_address]
del self.contiguous_sizes[end_address]
#consolidate previous buffer
for addr, size in self.contiguous_sizes.items():
if addr + size == address:
del self.contiguous_sizes[addr]
contiguous_size += size
address = addr
break
self.contiguous_sizes[address] = contiguous_size
def _defragment_memory(self):
empty_addresses = sorted(self.contiguous_sizes.keys())
tensor_addresses = sorted(self.tensor_addresses.values())
tensor_index = 0
while tensor_index < len(tensor_addresses):
empty_addr = empty_addresses[0]
empty_size = self.contiguous_sizes[empty_addr]
tensor_addr = tensor_addresses[tensor_index]
tensor_size = self.tensor_sizes[tensor_addr]
tensor_id = self.tensor_ids[tensor_addr]
tensor = self.tensor_map[self.tensor_ids[tensor_addr]]
assert tensor_size == tensor.numel(), \
"Size mismatch. {tensor_size} is allocated at addr {tensor_addr} but tensor size is {tensor.numel()} "
assert empty_addr != tensor_addr, \
f"Cannot have same empty address {empty_addr} and tensor address {tensor_addr}"
if empty_addr < tensor_addr:
if empty_size >= tensor_size:
dest_buffer = self.buffer.narrow(0, empty_addr, tensor_size)
src_buffer = self.buffer.narrow(0, tensor_addr, tensor_size)
dest_buffer.data.copy_(src_buffer.data)
else:
#print_rank_0(f'empty addr : {empty_addr}, empty size {empty_size} tensor addr {tensor_addr} tensor size {tensor_size}')
src_addr = tensor_addr
dest_addr = empty_addr
while src_addr < (tensor_addr + tensor_size):
copy_size = min(empty_size, tensor_addr + tensor_size - src_addr)
dest_buffer = self.buffer.narrow(0, dest_addr, copy_size)
src_buffer = self.buffer.narrow(0, src_addr, copy_size)
dest_buffer.data.copy_(src_buffer.data)
src_addr += copy_size
dest_addr += copy_size
self._replace_old_address_with_new(tensor_id, empty_addr)
tensor_index += 1
else:
tensor_index += 1
empty_addresses = sorted(self.contiguous_sizes.keys())
def _replace_old_address_with_new(self, tensor_id, new_address):
tensor = self.tensor_map[tensor_id]
tensor_size = tensor.numel()
tensor.data = self.buffer.narrow(0, new_address, tensor_size).data
self._release_tensor(tensor_id)
self._mark_as_occupied(new_address, tensor_size)
self.tensor_ids[new_address] = tensor_id
self.tensor_map[tensor_id] = tensor
self.tensor_addresses[tensor_id] = new_address
self.tensor_sizes[new_address] = tensor_size
def _get_new_tensor_address(self, size):
tensor_address = None
for address, contiguous_size in self.contiguous_sizes.items():
if contiguous_size >= size and \
(tensor_address is None or \
contiguous_size < self.contiguous_sizes[tensor_address]):
tensor_address = address
assert tensor_address is not None, "address cannot be None"
return tensor_address
def _get_new_tensor(self, address, size):
available_contiguous_size = self.contiguous_sizes[address]
assert size <= available_contiguous_size, \
f"Tensor numel {size} is large than available contiguous size {available_contiguous_size}"
self.count += 1
new_tensor = self.buffer.narrow(0, address, size)
tensor_id = id(new_tensor)
self.tensor_addresses[tensor_id] = address
self.tensor_sizes[address] = size
self.tensor_ids[address] = tensor_id
self.tensor_map[tensor_id] = new_tensor
self._mark_as_occupied(address, size)
return new_tensor
def _largest_contiguous(self):
if len(self.contiguous_sizes) > 0:
return max([size for _, size in self.contiguous_sizes.items()])
else:
return 0
def _mark_as_occupied(self, address, size):
available_contiguous_size = self.contiguous_sizes[address]
del self.contiguous_sizes[address]
if available_contiguous_size != size:
self.contiguous_sizes[address + size] = available_contiguous_size - size
self.largest_contiguous = self._largest_contiguous()
| 10,926 | 36.940972 | 147 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/zero/mics_utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from dataclasses import dataclass
from typing import List
import numpy as np
import torch
from torch import Tensor
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
from deepspeed.utils import logger
def _log_rank0(msg):
if dist.get_rank() == 0:
logger.info(msg)
@torch.jit.script
def scale_tensors(tensors: List[Tensor], scale: int):
for t in tensors:
t.div_(scale)
@dataclass
class MiCS_CommGroups:
""""""
param_shard_group = None
param_shard_size = -1
param_shard_rank = -1
param_repli_group = None
param_repli_size = -1
param_repli_rank = -1
param_intra_node_group = None
param_inter_node_shard_group = None
def create_mics_comm_groups(
shard_size,
dp_group,
hierarchical_allgather=False,
mpu=None,
):
"""
create shard-group, replicate-group from config_file
TODO: consider broadcast the config from rank0
Returns:
MiCS_CommGroups
"""
# env var for debugging purpose
ndevices_per_node = int(os.environ.get("NDEV_PER_NODE", get_accelerator().device_count()))
_log_rank0(f'creating MiCS communication groups with per node device size {ndevices_per_node}')
groups = MiCS_CommGroups()
if mpu is not None:
assert dp_group == mpu.get_data_parallel_group()
# full size of the world
world_size = dist.get_world_size()
# global rank
global_rank = dist.get_rank()
config = _generate_mics_config(world_size, ndevices_per_node, shard_size, 1)
ranks_of_shard_group = config['shard_groups']
ranks_of_repli_group = config['replicate_groups']
if len(ranks_of_repli_group) == 0:
assert len(ranks_of_shard_group) == 1, "replicate groups are empty only for single shard group"
for r in ranks_of_shard_group[0]:
ranks_of_repli_group.append([r])
# for simplicity
assert _sizes_all_same(ranks_of_repli_group), "replicate groups must have the same size"
assert _sizes_all_same(ranks_of_shard_group), "shard groups must have the same size"
assert sum([len(g) for g in ranks_of_shard_group]) == dist.get_world_size(), "all sharded ranks "
if len(ranks_of_shard_group) > 1: # if only shard on one group then no need for replicate groups
assert len(ranks_of_shard_group) == len(
ranks_of_repli_group[0]), "number of shard groups must equal to the size of each replicate group"
global_rank = dist.get_rank()
# create shard groups
for shard_ranks in ranks_of_shard_group:
_group = dist.new_group(shard_ranks)
if global_rank in shard_ranks:
groups.param_shard_group = _group
groups.param_shard_size = len(shard_ranks)
groups.param_shard_rank = dist.get_rank(_group)
logger.info(f'rank {global_rank}, shard group'
f' {groups.param_shard_rank}/{dist.get_world_size(group=_group)}')
# create replicate groups
for repli_ranks in ranks_of_repli_group:
if len(repli_ranks) > 1:
_group = dist.new_group(repli_ranks)
if global_rank in repli_ranks:
groups.param_repli_group = _group
groups.param_repli_size = len(repli_ranks)
groups.param_repli_rank = dist.get_rank(group=_group)
logger.info(f'rank {global_rank} '
f'replicate group {groups.param_repli_rank}/{dist.get_world_size(group=_group)}')
else:
groups.param_repli_group = None
groups.param_repli_size = 1
groups.param_repli_rank = 0
logger.info(f'rank {global_rank} replicate group 0/1')
# assign shard group size as world size
assert groups.param_shard_size == len(ranks_of_shard_group[0])
if hierarchical_allgather:
# create hierarchy inter-node, intra-node groups
# n_span_nodes = config['shard_span']
n_span_nodes = config['span_nodes']
assert n_span_nodes > 1, "sharding spans on single node, no need for hierarchy allgather"
assert len(ranks_of_shard_group[0]) % n_span_nodes == 0
n_gpu_per_node = len(ranks_of_shard_group[0]) // n_span_nodes
intra_node_ranks_group = []
inter_node_ranks_group = []
for shard_group in ranks_of_shard_group:
_intra_node_ranks = []
for i in range(0, len(shard_group), n_gpu_per_node):
_intra_node_ranks.append(shard_group[i:i + n_gpu_per_node])
_inter_node_ranks = []
for i in range(n_gpu_per_node):
_ranks = [_g[i] for _g in _intra_node_ranks]
_inter_node_ranks.append(_ranks)
intra_node_ranks_group.append(_intra_node_ranks)
inter_node_ranks_group.append(_inter_node_ranks)
_log_rank0(f"create for hierarchy all-gather groups: intra nodes {intra_node_ranks_group}")
_log_rank0(f"create for hierarchy all-gather groups: inter nodes {inter_node_ranks_group}")
# create communicators
for shard_group in intra_node_ranks_group:
for intra_node_ranks in shard_group:
_group = dist.new_group(intra_node_ranks)
if global_rank in intra_node_ranks:
groups.param_intra_node_group = _group
_log_rank0(f'create group for intra node ranks {intra_node_ranks}')
for shard_group in inter_node_ranks_group:
for inter_node_ranks in shard_group:
_group = dist.new_group(inter_node_ranks)
if global_rank in inter_node_ranks:
groups.param_inter_node_shard_group = _group
_log_rank0(f'create group for inter node ranks {inter_node_ranks}')
return groups
def _generate_mics_config(world_size, ndev_per_node, shard_size, pp_size=1):
"""Generating the configuration for sharding This shard config generation assume
that the pipeline stages are partitioned in order, i.e., first ranks
hold the stage0, etc.
Args:
shard_size (int): zero3 data-parallel shard size, FIXME:
change the name later
pp_size (int): pipeline parallel size, currently, only work with
pipeline parallelism + zero
"""
assert world_size % pp_size == 0
assert (world_size // pp_size) % shard_size == 0, \
f"dp group size is not dividable by dp_shard_size, "\
f" (world_size {world_size}, pp_size {pp_size}, dp_shard_size {shard_size})"
config = {}
shard_groups = np.arange(world_size).reshape(-1, shard_size)
replicate_groups = []
for i in range(shard_size):
same_shard_ranks = shard_groups[:, i].tolist()
n_ranks = len(same_shard_ranks)
replicate_size = n_ranks // pp_size
replicate_groups.extend([same_shard_ranks[j:j + replicate_size] for j in range(0, n_ranks, replicate_size)])
config['replicate_groups'] = replicate_groups
config['shard_groups'] = shard_groups.tolist()
config["span_nodes"] = len(shard_groups[0]) // ndev_per_node
return config
def _sizes_all_same(groups):
"""all groups have same length"""
all_same = True
for g in groups:
if len(g) != len(groups[0]):
return False
return all_same
| 7,500 | 35.769608 | 116 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/zero/partition_parameters.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
import os
import types
from typing import Callable, Iterable
from enum import Enum
import functools
import itertools
from typing import List
import logging
import torch
from torch import Tensor
from deepspeed import comm as dist
from torch.nn import Module
from torch.nn import Parameter
from .linear import zero3_linear_wrap
from deepspeed.utils import groups
import deepspeed
from ..utils import get_only_unique_item, see_memory_usage
from deepspeed.runtime.zero.config import DeepSpeedZeroConfig
from deepspeed.runtime.zero.utils import assert_ints_same_as_other_ranks
from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum
from deepspeed.runtime.config_utils import get_config_default
from deepspeed.utils import instrument_w_nvtx, logger
from deepspeed.comm.comm import init_distributed
from deepspeed.utils.debug import (debug_param2name_id_shape, debug_param2name_id_shape_device, debug_module2name,
debug_param2name_id, debug_param2name_id_shape_status)
from deepspeed.accelerator import get_accelerator
from ..swap_tensor.partitioned_param_swapper import AsyncPartitionedParameterSwapper, PartitionedParamStatus
param_count = 0
partitioned_param_data_shape = [0]
zero_init_context = 0
top_level_context = None
class NoGatherHandle:
def __init__(self, param: Parameter) -> None:
if param.ds_status != ZeroParamStatus.INFLIGHT:
raise RuntimeError(f"expected param {param.ds_summary()} to be available")
param.data = param.ds_tensor.data.to(device=get_accelerator().current_device_name(),
non_blocking=True).view(param.ds_shape)
self.__param = param
def wait(self) -> None:
get_accelerator().current_stream().synchronize()
self.__param.ds_status = ZeroParamStatus.AVAILABLE
class NoGatherCoalescedHandle:
def __init__(self, params: List[Parameter]) -> None:
self.__params = params
self.__complete = False
for param in self.__params:
if param.ds_status != ZeroParamStatus.INFLIGHT:
raise RuntimeError(f"expected param {param.ds_summary()} to not be available")
param.data = param.ds_tensor.data.to(device=get_accelerator().current_device_name(),
non_blocking=True).view(param.ds_shape)
@instrument_w_nvtx
def wait(self) -> None:
if self.__complete:
return
get_accelerator().current_stream().synchronize()
for param in self.__params:
assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight"
param.ds_status = ZeroParamStatus.AVAILABLE
self.__complete = True
def _dist_allgather_fn(input_tensor: Tensor, output_tensor: Tensor, group=None):
return instrument_w_nvtx(dist.allgather_fn)(output_tensor, input_tensor, group=group, async_op=True)
def print_rank_0(message, debug=False, force=False):
rank = dist.get_rank()
if rank == 0 and (debug or force):
print(message)
# other variations
# - print for all ranks w/o interleaving
# printflock(f"[{rank}] {message}")
# - print to log file per rank
# log_rank_file(rank, message)
def debug_rank0(msg: str) -> None:
if dist.get_rank() == 0:
logger.debug(msg)
def is_zero_param(parameter):
if not torch.is_tensor(parameter):
return False
return hasattr(parameter, 'ds_id')
def _init_external_params(module):
if not hasattr(module, '_external_params'):
module._external_params = {}
def external_parameters(self):
return self._external_params.items()
def all_parameters(self):
return itertools.chain(self.named_parameters(self, recurse=False), external_parameters(self))
module.ds_external_parameters = types.MethodType(external_parameters, module)
module.all_parameters = types.MethodType(all_parameters, module)
def register_external_parameter(module, parameter):
"""Instruct DeepSpeed to coordinate ``parameter``'s collection and partitioning in
the forward and backward passes of ``module``.
This is used when a parameter is accessed outside of its owning module's
``forward()``. DeepSpeed must know to collect it from its partitioned
state and when to release the memory.
.. note::
This is only applicable to training with ZeRO stage 3.
Args:
module (``torch.nn.Module``): The module that requires ``parameter`` in its forward pass.
parameter (``torch.nn.Parameter``): The parameter to register.
Raises:
RuntimeError: If ``parameter`` is not of type ``torch.nn.Parameter``.
Examples
========
#. Register a weight that is used in another module's forward pass (line 6).
Parameter ``layer1.weight`` is used by ``layer2`` (line 11).
.. code-block:: python
:linenos:
:emphasize-lines: 6,11
class ModuleZ3(torch.nn.Module):
def __init__(self, *args):
super().__init__(self, *args)
self.layer1 = SomeLayer()
self.layer2 = OtherLayer()
deepspeed.zero.register_external_parameter(self, self.layer1.weight)
def forward(self, input):
x = self.layer1(input)
# self.layer1.weight is required by self.layer2.forward
y = self.layer2(x, self.layer1.weight)
return y
"""
if not isinstance(parameter, torch.nn.Parameter):
raise RuntimeError('Parameter is not a torch.nn.Parameter')
if not hasattr(module, '_external_params'):
_init_external_params(module)
key = id(parameter)
module._external_params[key] = parameter
def unregister_external_parameter(module, parameter):
"""Reverses the effects of :meth:`register_external_parameter`.
Args:
module (``torch.nn.Module``): The module to affect.
parameter (``torch.nn.Parameter``): The parameter to unregister.
Raises:
RuntimeError: If ``parameter`` is not of type ``torch.nn.Parameter``.
RuntimeError: If ``parameter`` is not a registered external parameter of ``module``.
"""
if not isinstance(parameter, torch.nn.Parameter):
raise RuntimeError('Parameter is not a torch.nn.Parameter')
if not hasattr(module, '_external_params') or id(parameter) not in module._external_params:
raise RuntimeError('Parameter is not a registered external parameter of module.')
key = id(parameter)
del module._external_params[key]
class ZeroParamType(Enum):
# same as regular pytorch parameters
NORMAL = 1
# parameters are partitioned across data parallel process
PARTITIONED = 2
# the parameter is held with a unique process rank
# and is not available on all other process
REMOTE = 3
class ZeroParamStatus(Enum):
# parameters are fully present and ready for use on all processes
AVAILABLE = 1
# parameters are either partitioned or remote in some or all process
NOT_AVAILABLE = 2
# parameters are being gathered.
INFLIGHT = 3
_orig_torch_empty = torch.empty
_orig_torch_zeros = torch.zeros
_orig_torch_ones = torch.ones
_orig_torch_full = torch.full
_orig_torch_arange = torch.arange
_orig_torch_eye = torch.eye
def zero_wrapper_for_fp_tensor_constructor(fn: Callable, target_fp_dtype: torch.dtype) -> Callable:
def wrapped_fn(*args, **kwargs) -> Tensor:
if kwargs.get("device", None) is None:
kwargs['device'] = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"]))
tensor: Tensor = fn(*args, **kwargs)
if tensor.is_floating_point():
tensor = tensor.to(target_fp_dtype)
return tensor
return wrapped_fn
def get_new_tensor_fn_for_dtype(dtype: torch.dtype) -> Callable:
def new_tensor(cls, *args) -> Tensor:
device = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"]))
tensor = _orig_torch_empty(0, device=device).new_empty(*args)
if tensor.is_floating_point():
tensor = tensor.to(dtype)
return tensor
return new_tensor
# https://stackoverflow.com/a/63851681/9201239
def get_all_subclasses(cls):
subclass_list = []
def recurse(cl):
for subclass in cl.__subclasses__():
subclass_list.append(subclass)
recurse(subclass)
recurse(cls)
return set(subclass_list)
@instrument_w_nvtx
def free_param(param: Parameter) -> None:
"""Free underlying storage of a parameter."""
assert not param.ds_active_sub_modules, param.ds_summary()
if get_accelerator().on_accelerator(param.data):
# need to make sure that we don't free the parameter while it is still
# being used for computation
param.data.record_stream(get_accelerator().current_stream())
# param.data doesn't store anything meaningful in partitioned state
param.data = torch.empty(0, dtype=param.dtype, device=param.device)
param.ds_status = ZeroParamStatus.NOT_AVAILABLE
reuse_buffers = False
temp_contiguous_tensor = None
empty_buffers = {}
# Inserts _post_init_method at the end of init method
# for all sub classes of torch.nn.Module
class InsertPostInitMethodToModuleSubClasses(object):
def __init__(self, enabled=True, mem_efficient_linear=True, ds_config=None, dtype=None):
self.mem_efficient_linear = mem_efficient_linear
self.enabled = enabled
self._set_dtype(ds_config, dtype)
assert self.dtype in [
torch.half, torch.bfloat16, torch.float
], f"Invalid data type {self.dtype}, allowed values are [torch.half, torch.bfloat16, torch.float]"
self.wrapped_cls = set()
def __enter__(self):
if not self.enabled:
return
global zero_init_context
if zero_init_context == 0:
self.patch_init_and_builtins()
global top_level_context
top_level_context = self
zero_init_context += 1
def __exit__(self, exc_type, exc_value, traceback):
if not self.enabled:
return
global zero_init_context
zero_init_context -= 1
# Exiting the top level context
if zero_init_context == 0:
self.unpatch_init_and_builtins()
global top_level_context
top_level_context = None
if dist.get_rank() == 0:
logger.info("finished initializing model with %.2fB parameters", param_count / 1e9)
# Now that we cleaned up the metaclass injection, raise the exception.
if exc_type is not None:
return False
# To be implemented by inheriting classes
def _post_init_method(self, module):
pass
def _set_dtype(self, ds_config, dtype):
if ds_config is not None and dtype is None:
if ds_config.bfloat16_enabled and ds_config.fp16_enabled:
raise RuntimeError("bfloat16 and fp16 cannot be enabled at once")
if ds_config.bfloat16_enabled:
self.dtype = torch.bfloat16
elif ds_config.fp16_enabled:
self.dtype = torch.half
else:
self.dtype = torch.float
else:
self.dtype = dtype or torch.half
def patch_init_and_builtins(self):
def apply_with_gather(orig_module_apply_fn: Callable) -> Callable:
"""many models make use of child modules like Linear or Embedding which
perform their own weight initialization in their __init__ methods,
but will then have more weight initialization in a parent module's __init__
method that modifies weights of child modules, which is typically done
using the Module.apply method.
since the Init context manager partitions child modules immediately after
they are initialized, without modifying apply we would entirely skip
any initialization done by parent modules.
to get around this issue, we wrap the function passed to Module.apply
so that the applied function is applied to child modules correctly.
"""
def get_wrapped_fn_to_apply(fn_to_apply: Callable) -> Callable:
if hasattr(fn_to_apply, "wrapped"):
return fn_to_apply
@functools.wraps(fn_to_apply)
def wrapped_fn_to_apply(module_to_apply_fn_to: Module) -> None:
"""gathers parameters before calling apply function. afterwards
parameters are broadcasted to ensure consistency across all ranks
then re-partitioned.
takes the following steps:
1. allgathers parameters for the current module being worked on
2. calls the original function
3. broadcasts root rank's parameters to the other ranks
4. re-partitions the parameters
"""
if not all(is_zero_param(p) for p in module_to_apply_fn_to.parameters(recurse=False)):
raise RuntimeError(f"not all parameters for {module_to_apply_fn_to.__class__.__name__}, "
f"were zero params, is it possible that the parameters were "
f"overwritten after they were initialized? "
f"params: {[p for p in module_to_apply_fn_to.parameters(recurse=False)]} ")
params_to_apply_fn_to: Iterable[Parameter] = list(
sorted(module_to_apply_fn_to.parameters(recurse=False), key=lambda p: p.ds_id))
for param in params_to_apply_fn_to:
param.all_gather()
fn_to_apply(module_to_apply_fn_to)
for param in params_to_apply_fn_to:
dist.broadcast(param.data, 0, group=param.ds_process_group)
for param in params_to_apply_fn_to:
param.partition(has_been_updated=True)
wrapped_fn_to_apply.wrapped = True
return wrapped_fn_to_apply
@functools.wraps(orig_module_apply_fn)
def wrapped_apply(module: Module, fn_to_apply: Callable) -> None:
orig_module_apply_fn(module, get_wrapped_fn_to_apply(fn_to_apply))
return wrapped_apply
def partition_after(f):
@functools.wraps(f)
def wrapper(module, *args, **kwargs):
# important logic: We want to run post_init only after child's __init__ is
# completed, and do nothing after __init__ of any of its parents and grandparents in
# the inheritance ancestry. This way the partitioning will need to happen only once
# when the whole object is ready to be partitioned and not before. This is because
# often the child module will need to tweak the weights - for example running a
# custom weights init function. So if a parent created the weights param, the child
# won't need to gather it in order to tweak it
print_rank_0(f'Before initializing {module.__class__.__name__}', force=False)
is_child_module = False
if not hasattr(module, "_ds_child_entered"):
# child's __init__ was called, since parents all see the same object they can now skip post_init
is_child_module = True
setattr(module, "_ds_child_entered", True)
f(module, *args, **kwargs)
if is_child_module:
# child's __init__ is done, now we can run a single post_init on the child object
delattr(module, "_ds_child_entered")
print_rank_0(f'Running post_init for {module.__class__.__name__}', force=False)
self._post_init_method(module)
print_rank_0(f'After initializing followed by post init for {module.__class__.__name__}', force=False)
return wrapper
def _enable_class(cls):
cls._old_init = cls.__init__
cls.__init__ = partition_after(cls.__init__)
def _init_subclass(cls, **kwargs):
cls._old_init = cls.__init__
cls.__init__ = partition_after(cls.__init__)
# Replace .__init__() for all existing subclasses of torch.nn.Module recursively
for subclass in get_all_subclasses(torch.nn.modules.module.Module):
_enable_class(subclass)
# holding onto some methods so we can put them back the way they were in __exit__
torch.nn.modules.module.Module._old_init_subclass = torch.nn.modules.module.Module.__init_subclass__
torch.nn.modules.module.Module._old_apply = torch.nn.modules.module.Module.apply
torch.Tensor.__old_new__ = torch.Tensor.__new__
# Replace .__init__() for future subclasses of torch.nn.Module
torch.nn.modules.module.Module.__init_subclass__ = classmethod(_init_subclass)
torch.nn.modules.module.Module.apply = apply_with_gather(torch.nn.modules.module.Module._old_apply)
self._add_tensor_creation_wrappers()
if self.mem_efficient_linear:
print_rank_0(
"nn.functional.linear has been overridden with a more memory efficient version. This will persist unless manually reset.",
force=False)
self.linear_bk = torch.nn.functional.linear
torch.nn.functional.linear = zero3_linear_wrap
self.patched = True
def unpatch_init_and_builtins(self):
if self.patched:
def _disable_class(cls):
cls.__init__ = cls._old_init
for subclass in get_all_subclasses(torch.nn.modules.module.Module):
_disable_class(subclass)
# putting methods back the way we found them
torch.nn.modules.module.Module.__init_subclass__ = torch.nn.modules.module.Module._old_init_subclass
torch.nn.modules.module.Module.apply = torch.nn.modules.module.Module._old_apply
self._remove_tensor_creation_wrappers()
self.patched = False
def _add_tensor_creation_wrappers(self):
torch.Tensor.__new__ = get_new_tensor_fn_for_dtype(self.dtype)
torch.empty = zero_wrapper_for_fp_tensor_constructor(_orig_torch_empty, self.dtype)
torch.zeros = zero_wrapper_for_fp_tensor_constructor(_orig_torch_zeros, self.dtype)
torch.ones = zero_wrapper_for_fp_tensor_constructor(_orig_torch_ones, self.dtype)
torch.full = zero_wrapper_for_fp_tensor_constructor(_orig_torch_full, self.dtype)
torch.arange = zero_wrapper_for_fp_tensor_constructor(_orig_torch_arange, self.dtype)
torch.eye = zero_wrapper_for_fp_tensor_constructor(_orig_torch_eye, self.dtype)
def _remove_tensor_creation_wrappers(self):
torch.Tensor.__new__ = torch.Tensor.__old_new__
torch.empty = _orig_torch_empty
torch.zeros = _orig_torch_zeros
torch.ones = _orig_torch_ones
torch.full = _orig_torch_full
torch.arange = _orig_torch_arange
torch.eye = _orig_torch_eye
def shutdown_init_context():
"""
This function is used to initialize deepspeed engine inside the context of Init.
We need to remove the wrappers but keep the context.
"""
if top_level_context:
top_level_context.unpatch_init_and_builtins()
def restore_init_context():
"""
This function is used to restore the wrappers after deepspeed engine is initialized.
"""
if top_level_context:
top_level_context.patch_init_and_builtins()
class AllGatherHandle:
def __init__(self, handle, param: Parameter, quantization=None) -> None:
if param.ds_status != ZeroParamStatus.INFLIGHT:
raise RuntimeError(f"expected param {param.ds_summary()} to be available")
self.__handle = handle
self.__param = param
self.__quantization = quantization
def wait(self) -> None:
instrument_w_nvtx(self.__handle.wait)()
if self.__quantization:
instrument_w_nvtx(self.__quantization.quant_handle.wait)()
self.__param.data = self.__quantization.backend.dequantize(
self.__quantization.quantized_param, self.__quantization.scale_buffer).to(self.__param.device)
self.__param.ds_status = ZeroParamStatus.AVAILABLE
class AllGatherCoalescedHandle:
def __init__(
self,
allgather_handle,
params: List[Parameter],
partitions: List[Tensor],
world_size: int,
use_secondary_tensor=False,
forward=False,
quantization=None,
) -> None:
self.allgather_handle = allgather_handle
self.params = params
self.partitions = partitions
self.world_size = world_size
self.use_secondary_tensor = use_secondary_tensor
self.forward = forward
self.complete = False
self.quantization = quantization
for param in self.params:
if param.ds_status != ZeroParamStatus.INFLIGHT:
raise RuntimeError(f"expected param {param.ds_summary()} to not be available")
@instrument_w_nvtx
def wait(self) -> None:
if self.complete:
return
instrument_w_nvtx(self.allgather_handle.wait)()
if self.quantization:
instrument_w_nvtx(self.quantization.quant_handle.wait)()
flat_tensor = self.quantization.backend.dequantize(
self.quantization.quantized_param, self.quantization.scale_buffer).to(self.params[0].device)
self.partitions: List[Parameter] = []
for i in range(self.quantization.world_size):
self.partitions.append(
flat_tensor.narrow(0, self.quantization.partition_sz * i, self.quantization.partition_sz))
# split the single tensor out into individual tensors
param_offset = 0
for param in self.params:
assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight"
partitions: List[Tensor] = []
ds_tensor_numel = param.ds_tensor.ds_numel
if self.use_secondary_tensor and not self.forward:
ds_tensor_numel *= param.ds_secondary_tensor_num_of_groups
for rank in range(self.world_size):
param_start = rank * ds_tensor_numel
if param_start < param.ds_numel:
part_to_copy = self.partitions[rank].narrow(0, param_offset,
min(param.ds_numel - param_start, ds_tensor_numel))
partitions.append(part_to_copy)
param.data = instrument_w_nvtx(torch.cat)(partitions).view(param.ds_shape)
param.ds_status = ZeroParamStatus.AVAILABLE
for part_to_copy in partitions:
part_to_copy.record_stream(get_accelerator().current_stream())
param_offset += ds_tensor_numel
self.complete = True
class QuantizationInfo:
# a placeholder object to store all quant related vars used in handles
def __init__(self) -> None:
self.quantized_param = None
self.backend = None
self.quant_handle = None
self.scale_buffer = None
class CUDAQuantizer:
async_flag = True
target_group_size = 8000 # the optimal size is 4k, so we set the target to be below 8k
group_size_cache = dict()
def __init__(self):
self.quantizer_cuda_module = deepspeed.ops.op_builder.QuantizerBuilder().load()
def quantize(self, param, groups=None):
if groups is None:
try:
groups = self.group_size_cache[param.numel()]
except KeyError:
groups = math.ceil(param.numel() / self.target_group_size)
while groups < param.numel():
if param.numel() % (8 * groups) == 0:
break
groups += 1
while True:
if param.numel() % (8 * groups * 2) == 0 and param.numel(
) / groups > self.target_group_size: #hard limit of 16k group_size
groups *= 2
else:
break
assert (
param.numel() % (8 * groups) == 0
), f"Qantized weight requires the number of weights be a multiple of 8. Yet {param.numel()} cannot be divided by 8*{groups}"
assert (param.numel() / groups < 16000), f"{param.numel()} / {groups} is larger than 16k"
assert param.numel(
) > groups, f"Adaptive grouping algorithm cannot find a group size for input tensor of size {param.numel()}"
self.group_size_cache[param.numel()] = groups
return self.quantizer_cuda_module.quantize(param.to(get_accelerator().device_name()), groups, 8,
self.quantizer_cuda_module.Symmetric)
def dequantize(self, quantized_param, scale):
return self.quantizer_cuda_module.dequantize(quantized_param, scale, scale.numel(), 8,
self.quantizer_cuda_module.Symmetric)
def _no_gather_coalesced(params: Iterable[Parameter]) -> AllGatherCoalescedHandle:
for param in params:
if param.ds_status != ZeroParamStatus.NOT_AVAILABLE:
raise RuntimeError(param.ds_summary())
param.ds_status = ZeroParamStatus.INFLIGHT
params = sorted(params, key=lambda p: p.ds_id)
if len(params) == 1:
param, = params
return NoGatherHandle(param)
return NoGatherCoalescedHandle(params)
# Replaces all parameters in module with Scattered Parameters
class Init(InsertPostInitMethodToModuleSubClasses):
param_id = 0
param_persistence_threshold = get_config_default(DeepSpeedZeroConfig, "param_persistence_threshold")
model_persistence_threshold = get_config_default(DeepSpeedZeroConfig, "model_persistence_threshold")
num_persisted_parameters = 0
num_persisted_elements = 0
apply_param_persistence = False
def __init__(self,
module=None,
data_parallel_group=None,
mem_efficient_linear=True,
remote_device=None,
pin_memory=False,
config_dict_or_path=None,
config=None,
enabled=True,
dtype=None,
mpu=None,
zero_param_parallel_group=None,
zero_quantized_weights=False):
"""A context to enable massive model construction for training with
ZeRO-3. Models are automatically partitioned (or, sharded) across the
system and converted to half precision.
Args:
module (``torch.nn.Module``, optional): If provided, partition the model as
if it was constructed in the context.
data_parallel_group (``deepspeed.comm`` process group, optional):
The group of processes to partition among. Defaults to all processes.
mem_efficient_linear (bool, optional): Replace
torch.nn.functional.linear with an implementation that allows
DeepSpeed to partition parameters. Defaults to ``True``.
remote_device (string, optional): The initial device to store model
weights e.g., ``cpu``, ``nvme``. Passing ``"cpu"`` will create the model in CPU
memory. The model may still be moved to GPU based on the
offload settings for training. Defaults to param offload device if a config is
defined, otherwise GPU.
pin_memory (bool, optional): Potentially increase performance by
using pinned memory for model weights. ``remote_device`` must be
``"cpu"``. Defaults to pin_memory value in config, otherwise ``False``.
config_dict_or_path (dict or ``json file``, optional): If provided, provides configuration
for swapping fp16 params to NVMe.
config (dict or ``json file``, optional): Deprecated, use config_dict_or_path instead.
enabled (bool, optional): If ``False``, this context has no
effect. Defaults to ``True``.
dtype (``dtype``, optional): Can be used to change the data type of the parameters.
Supported options are ``torch.half`` and ``torch.float``. Defaults to ``None``
mpu (``object``, optional): A model parallelism unit object that implements get_{model,data}_parallel_{rank,group,world_size}.
zero_param_parallel_group(``object``, optional): Parallel (comm) group for dual partitioning of ZeRO params.
zero_quantized_weights (bool, optional): If ``True``, turn on quantized weights in all gather weights. Default is ``False``
This context accelerates model initialization and enables models that
are too large to allocate in their entirety in CPU memory. It has the
following effects:
#. allocates tensors to either GPU or CPU memory or NVMe
#. converts floating point tensors to half precision
#. immediately partitions tensors among the group of data-parallel devices
#. (*optional*) replaces ``torch.nn.functional.linear`` with a more
memory-efficient implementation
These modifications allow for models that exceed the size of local CPU/GPU
memory/NVMe, but fit within the total NVMe capacity (*i.e.*, aggregate CPU
or GPU memory or NVMe) across all nodes. Consider initializing a model with one
trillion parameters, whose weights occupy two terabytes (TB) in half
precision. The initial CPU allocation in full precision requires 4TB of
memory *per process*, and so a system with 8 GPUs per node would need 32TB of
CPU memory due to data-parallel redundancies. Instead, by immediately
partitioning tensors we remove the redundancies. The result is that
regardless of the number of GPUs, we still only require the original 4TB. This
allows for a linear increase in model size with the aggregate system memory.
For example, if a node has 1TB of memory and 8 GPUs, we could fit a trillion
parameter model with 4 nodes and 32 GPUs.
Important: If the fp16 weights of the model can't fit onto a single GPU memory
this feature must be used.
.. note::
Initializes ``deepspeed.comm`` if it has not already been done so.
See :meth:`deepspeed.init_distributed` for more information.
.. note::
Only applicable to training with ZeRO-3.
Examples
--------
#. Allocate a model and partition it among all processes:
.. code-block:: python
with deepspeed.zero.Init():
model = MyLargeModel()
#. Allocate a model in pinned CPU memory and partition it among a subgroup of processes:
.. code-block:: python
with deepspeed.zero.Init(data_parallel_group=mpu.get_data_parallel_group(),
remote_device="cpu",
pin_memory=True):
model = MyLargeModel()
#. Partition an already-allocated model in CPU memory:
.. code-block:: python
model = deepspeed.zero.Init(module=model)
"""
if config is not None:
config_dict_or_path = config
logger.warning(
f'zero.Init: the `config` argument is deprecated. Please use `config_dict_or_path` instead.')
_ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,
mpu) if config_dict_or_path is not None else None
if _ds_config is not None:
mem_efficient_linear = _ds_config.zero_config.memory_efficient_linear
super().__init__(enabled=enabled, mem_efficient_linear=mem_efficient_linear, ds_config=_ds_config, dtype=dtype)
if not dist.is_initialized():
init_distributed()
assert dist.is_initialized(), "Parameters cannot be scattered without initializing deepspeed.comm"
if data_parallel_group is None:
self.ds_process_group = dist.get_world_group()
else:
self.ds_process_group = data_parallel_group
self.rank = dist.get_rank(group=self.ds_process_group)
self.dp_world_size = dist.get_world_size(group=self.ds_process_group)
self.zero_param_process_group = zero_param_parallel_group
if _ds_config is not None and _ds_config.zero_config.zero_hpz_partition_size > 1 and self.zero_param_process_group is None:
groups._create_zero_param_parallel_group(_ds_config.zero_config.zero_hpz_partition_size)
self.zero_param_process_group = groups._get_zero_param_intra_parallel_group()
self.num_ranks_in_param_group = self.dp_world_size
self.rank_in_group = self.rank
self.num_param_groups = 1
if self.zero_param_process_group is not None:
self.num_ranks_in_param_group = groups._get_zero_param_intra_parallel_group_world_size()
self.num_param_groups = int(self.dp_world_size / self.num_ranks_in_param_group)
self.rank_in_group = groups._get_zero_param_intra_parallel_rank_in_mygroup()
print_rank_0(f"hpZeRO group size? {self.num_ranks_in_param_group}", force=True)
logger.debug(
"hpZeRO partition parameter my rank in world {} my rank in group {} ranks in my param partition group: {} "
.format(self.rank, self.rank_in_group, groups._get_zero_param_intra_parallel_group_ranks()))
# Local device is the device where the parameters are consumed, must be default device.
# It is the device where parameters are fully instantiated using allgather
self.local_device = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"]))
get_accelerator().set_device(self.local_device)
self.quantized_weights = zero_quantized_weights
if _ds_config is not None and _ds_config.zero_config.zero_quantized_weights and not self.quantized_weights:
self.quantized_weights = _ds_config.zero_config.zero_quantized_weights
self.module = module
if (self.quantized_weights):
self.quantizer_module = CUDAQuantizer()
print_rank_0(f'Using quantizer: {self.quantizer_module.__class__.__name__}', force=True)
if _ds_config is not None and _ds_config.zero_config.offload_param is not None:
remote_device = _ds_config.zero_config.offload_param.device
pin_memory = _ds_config.zero_config.offload_param.pin_memory
self._validate_remote_device(remote_device, _ds_config)
# Remote device is the device where parameter partitions are stored
# It can be same as local_device or it could be CPU or NVMe.
self.remote_device = self.local_device if remote_device in [None, OffloadDeviceEnum.none] else remote_device
self.pin_memory = pin_memory if (self.remote_device in [OffloadDeviceEnum.cpu, OffloadDeviceEnum.nvme
]) else False
# Enable fp16 param swapping to NVMe
if self.remote_device == OffloadDeviceEnum.nvme:
self.param_swapper = AsyncPartitionedParameterSwapper(_ds_config, self.dtype)
else:
self.param_swapper = None
# If we are provided an already-allocated module to prepare.
if module is not None:
assert isinstance(module, torch.nn.Module)
self._convert_to_zero_parameters(module.parameters(recurse=True))
self.use_all_gather_into_tensor = dist.has_all_gather_into_tensor()
if not self.use_all_gather_into_tensor:
logger.info(f"all_gather_into_tensor API is not available in torch {torch.__version__}")
def _update_persist_config(self, ds_config):
Init.apply_param_persistence = True
Init.param_persistence_threshold = ds_config.zero_config.param_persistence_threshold
Init.model_persistence_threshold = ds_config.zero_config.model_persistence_threshold // self.num_partitions
def _convert_to_zero_parameters(self, param_list):
for param in param_list:
if is_zero_param(param):
continue
self._convert_to_deepspeed_param(param)
param.partition()
def _validate_remote_device(self, remote_device, ds_config):
if ds_config is not None:
if remote_device in [None, OffloadDeviceEnum.cpu]:
if ds_config.zero_config.offload_param is not None:
offload_param_device = ds_config.zero_config.offload_param.device
assert offload_param_device != OffloadDeviceEnum.nvme, \
f"'device' in DeepSpeed Config cannot be {offload_param_device} if remote device is {remote_device}."
if remote_device == OffloadDeviceEnum.nvme:
assert ds_config.zero_config.offload_param is not None, \
f'"offload_param" must be defined in DeepSpeed Config if remote device is {OffloadDeviceEnum.nvme}.'
assert ds_config.zero_config.offload_param.nvme_path is not None, \
f'"nvme_path" in DeepSpeed Config cannot be None if remote device is {OffloadDeviceEnum.nvme}'
def _post_init_method(self, module):
#see_memory_usage(f"Before converting params in {module.__class__.__name__}", force=False)
print_rank_0(f'Converting Params in {module.__class__.__name__}', force=False)
see_memory_usage(f"Before converting and partitioning params in {module.__class__.__name__}", force=False)
global param_count
for name, param in module.named_parameters(recurse=False):
param_count += param.numel()
if not is_zero_param(param):
self._convert_to_deepspeed_param(param)
print_rank_0(
f"Partitioning param {debug_param2name_id_shape(param)} module={debug_module2name(module)}")
if get_accelerator().on_accelerator(param):
if dist.get_world_group() == self.get_dp_process_group():
dist.broadcast(param, 0, self.get_dp_process_group())
else:
dist.broadcast(param, dist.get_global_rank(self.get_dp_process_group(), 0),
self.get_dp_process_group())
else:
if dist.get_rank() == 0:
logger.warn(f"param `{name}` in {module.__class__.__name__} "
f"not on GPU so was not broadcasted from rank 0")
param.partition()
see_memory_usage(
f"Param count {param_count}. After converting and partitioning params in {module.__class__.__name__}",
force=False)
def _convert_to_deepspeed_param(self, param):
# Partitioned, Normal, Remote
param.ds_param_type = ZeroParamType.PARTITIONED
# Replicated vs Partitioned vs Inflight
param.ds_status = ZeroParamStatus.AVAILABLE
# Stores the shape of the original tensor
param.ds_shape = param.shape
# Stores the number of elements in the original parameter without padding
param.ds_numel = param.numel()
# Stores the partitioned copy of the tensor
param.ds_tensor = None
# Keeps track of how many active sub-modules need this param at any given point in time
param.ds_active_sub_modules = set()
# If this flag is true, then the parameters are replicated throughput training
# And only partitioned before the step
if Init.apply_param_persistence and param.ds_numel <= Init.param_persistence_threshold and Init.num_persisted_elements + param.ds_numel <= Init.model_persistence_threshold:
param.ds_persist = True
Init.num_persisted_parameters += 1
Init.num_persisted_elements += param.ds_numel
else:
param.ds_persist = False
param.is_external_param = False
# The group that the parameter is scattered across.
param.ds_process_group = self.ds_process_group
# Stores the secondary partitioned copy of the tensor
param.ds_secondary_tensor = None
#Process group for secondary partition all (group) gather
param.ds_zero_param_process_group = self.zero_param_process_group
param.ds_secondary_tensor_group_size = self.num_ranks_in_param_group
param.ds_secondary_tensor_num_of_groups = self.num_param_groups
# This is set to the Async Param swapper if remote device is nvme
# else this is set to None
param.nvme_swapper = self.param_swapper
# DeepSpeed Param ID
param.ds_id = Init.param_id
Init.param_id += 1
def all_gather(param_list=None, async_op=False, hierarchy=0):
cls = param
if param_list is None:
param_list = [cls]
return self._all_gather(param_list, async_op=async_op, hierarchy=hierarchy)
@instrument_w_nvtx
def all_gather_coalesced(params: Iterable[Parameter],
forward: bool,
safe_mode: bool = False) -> AllGatherCoalescedHandle:
# fetches from nvme if the partition is not available and in nvme
self._ensure_availability_of_partitioned_params(params)
quant = self.quantized_weights
if self.module is not None and self.module.training is False:
quant = False
if self.num_partitions == 1:
return _no_gather_coalesced(params)
for param in params:
if param.ds_status != ZeroParamStatus.NOT_AVAILABLE:
raise RuntimeError(param.ds_summary())
param.ds_status = ZeroParamStatus.INFLIGHT
#use appropriate all gather process group
ds_process_group = self.ds_process_group
rank_in_group = self.rank
world_size = self.dp_world_size
use_secondary_tensor = False
if self.zero_param_process_group and not forward:
ds_process_group = self.zero_param_process_group #intragroup
rank_in_group = self.rank_in_group
world_size = self.num_ranks_in_param_group
#pprint(dir(ds_process_group))
# ensure that each rank has params in same order. the allgather
# is done by flattening the parameter list into a single tensor that
# can be allgathered in a single call - this means that if each rank
# gives a list of the same parameters in a different order we will
# silently get incorrect parameter values, and have very difficult
# to debug correctness issues.
params = sorted(params, key=lambda p: p.ds_id)
if logger.isEnabledFor(logging.DEBUG):
debug_rank0(f"-allgather_coalesced: {[p.ds_id for p in params]}")
if safe_mode:
# ensure that same list (with same ordering) of parameters are
# being allgathered across all ranks, otherwise could mix
# data between tensors.
assert_ints_same_as_other_ranks([p.ds_id for p in params])
# ensure that tensors from each rank agree on the same ds_numel
# otherwise could mix data between tensors.
assert_ints_same_as_other_ranks([p.ds_tensor.ds_numel for p in params])
if len(params) == 1:
# have an opportunity to avoid some intermediate memory allocations
param, = params
buffer_size = math.ceil(param.ds_numel / world_size) * world_size
if not forward and param.ds_secondary_tensor is not None:
buffer_size = param.ds_secondary_tensor.shape[0] * world_size #make sure out is appropriately sized
param_buffer = torch.empty(
buffer_size,
dtype=param.dtype if not quant else torch.int8,
device=get_accelerator().current_device_name(),
requires_grad=False,
)
param_ds_tensor = param.ds_secondary_tensor if not forward and param.ds_secondary_tensor is not None else param.ds_tensor
if not quant:
handles = _dist_allgather_fn(
param_ds_tensor.to(get_accelerator().current_device_name()),
param_buffer,
ds_process_group,
)
param.data = param_buffer.narrow(0, 0, param.ds_numel).view(param.ds_shape).to(param.device)
return AllGatherHandle(handles, param)
else:
quantized_param, scales = self.quantizer_module.quantize(param_ds_tensor)
handle = _dist_allgather_fn(quantized_param.to(get_accelerator().current_device_name()),
param_buffer, ds_process_group)
quant_scale_buffer = torch.empty(
scales.numel() * world_size,
dtype=torch.float32,
device=get_accelerator().current_device_name(),
requires_grad=False,
)
quant_handle = _dist_allgather_fn(scales.to(get_accelerator().current_device_name()),
quant_scale_buffer, ds_process_group)
quant_info = QuantizationInfo()
quant_info.quantized_param = param_buffer.narrow(0, 0, param.ds_numel).view(param.ds_shape).to(
param.device)
quant_info.backend = self.quantizer_module
quant_info.quant_handle = quant_handle
quant_info.scale_buffer = quant_scale_buffer
return AllGatherHandle(handle, param, quantization=quant_info)
else:
partition_sz = sum(p.ds_tensor.ds_numel for p in params)
if params[0].ds_secondary_tensor is not None and not forward:
partition_sz = sum(p.ds_tensor.ds_numel * p.ds_secondary_tensor_num_of_groups for p in params)
flat_tensor = torch.empty(partition_sz * world_size,
dtype=get_only_unique_item(p.dtype
for p in params) if not quant else torch.int8,
device=get_accelerator().current_device_name(),
requires_grad=False)
if not quant:
partitions: List[Parameter] = []
for i in range(world_size):
partitions.append(flat_tensor.narrow(0, partition_sz * i, partition_sz))
if params[0].ds_secondary_tensor is not None and not forward:
use_secondary_tensor = True
instrument_w_nvtx(torch.cat)(
[p.ds_secondary_tensor.to(get_accelerator().current_device_name()) for p in params],
out=partitions[rank_in_group])
else:
instrument_w_nvtx(
torch.cat)([p.ds_tensor.to(get_accelerator().current_device_name()) for p in params],
out=partitions[rank_in_group])
handle = _dist_allgather_fn(partitions[rank_in_group], flat_tensor, ds_process_group)
#Fix get_partition_dp_group(params[0]))
return AllGatherCoalescedHandle(
allgather_handle=handle,
params=params,
partitions=partitions,
world_size=world_size,
use_secondary_tensor=use_secondary_tensor,
forward=forward,
)
else:
if params[0].ds_secondary_tensor is not None and not forward:
use_secondary_tensor = True
quantized_param, scales = self.quantizer_module.quantize(
instrument_w_nvtx(torch.cat)(
[p.ds_secondary_tensor.to(get_accelerator().current_device_name()) for p in params]))
else:
quantized_param, scales = self.quantizer_module.quantize(
instrument_w_nvtx(
torch.cat)([p.ds_tensor.to(get_accelerator().current_device_name()) for p in params]))
handle = _dist_allgather_fn(quantized_param, flat_tensor, ds_process_group)
quant_info = QuantizationInfo()
quant_scale_buffer = torch.empty(
scales.numel() * world_size,
dtype=torch.float32,
device=get_accelerator().current_device_name(),
requires_grad=False,
)
quant_handle = _dist_allgather_fn(scales, quant_scale_buffer, ds_process_group)
quant_info.quantized_param = flat_tensor
quant_info.backend = self.quantizer_module
quant_info.quant_handle = quant_handle
quant_info.scale_buffer = quant_scale_buffer
quant_info.partition_sz = partition_sz
quant_info.world_size = world_size
return AllGatherCoalescedHandle(
allgather_handle=handle,
params=params,
partitions=None,
world_size=world_size,
use_secondary_tensor=use_secondary_tensor,
forward=forward,
quantization=quant_info,
)
def partition(param_list=None, backward=False, hierarchy=0, has_been_updated=False):
cls = param
print_rank_0(f"{'--'*hierarchy}----Partitioning param {debug_param2name_id_shape_device(cls)}",
force=False)
if param_list is None:
param_list = [cls]
self._partition(param_list, has_been_updated=has_been_updated)
def reduce_gradients_at_owner(param_list=None, hierarchy=0):
cls = param
if param_list is None:
param_list = [cls]
print_rank_0(
f"{'--'*hierarchy}----Reducing Gradients for param with ids {[param.ds_id for param in param_list]} to owner"
)
self._reduce_scatter_gradients(param_list)
def partition_gradients(param_list=None, partition_buffers=None, hierarchy=0, accumulate=False):
cls = param
print_rank_0(
f"{'--'*hierarchy}----Partitioning param gradient with id {debug_param2name_id_shape_device(cls)}")
if param_list is None:
param_list = [cls]
if isinstance(partition_buffers, torch.Tensor):
partition_buffers = [partition_buffers]
self._partition_gradients(param_list, partition_buffers=partition_buffers, accumulate=accumulate)
def aligned_size():
return self._aligned_size(param)
def padding_size():
return self._padding_size(param)
def partition_numel():
return self._partition_numel(param)
def item_override():
param.all_gather()
return param._orig_item()
def ds_summary(slf: torch.Tensor, use_debug_name: bool = False) -> dict:
return {
"id": debug_param2name_id(slf) if use_debug_name else slf.ds_id,
"status": slf.ds_status.name,
"numel": slf.numel(),
"ds_numel": slf.ds_numel,
"shape": tuple(slf.shape),
"ds_shape": tuple(slf.ds_shape),
"requires_grad": slf.requires_grad,
"grad_shape": tuple(slf.grad.shape) if slf.grad is not None else None,
"persist": slf.ds_persist,
"active_sub_modules": slf.ds_active_sub_modules,
"ds_tensor.shape": slf.ds_tensor.shape if slf.ds_tensor is not None else None
}
def convert_to_zero_parameters(param_list):
self._convert_to_zero_parameters(param_list)
def allgather_before(func: Callable) -> Callable:
def wrapped(*args, **kwargs):
param.all_gather()
return func(*args, **kwargs)
return wrapped
# Collectives for gathering and partitioning parameters
param.all_gather = all_gather
param.all_gather_coalesced = all_gather_coalesced
param.partition = partition
# Collective for averaging gradients
param.reduce_gradients_at_owner = reduce_gradients_at_owner
param.partition_gradients = partition_gradients
# Partitioning size utilities
param.aligned_size = aligned_size
param.padding_size = padding_size
param.partition_numel = partition_numel
param.ds_summary = types.MethodType(ds_summary, param)
param.item = allgather_before(param.item)
param.convert_to_zero_parameters = convert_to_zero_parameters
def _aligned_size(self, param):
return param.ds_numel + self._padding_size(param)
def _padding_size(self, param):
remainder = param.ds_numel % self.num_partitions
return (self.num_partitions - remainder) if remainder else 0
def _partition_numel(self, param):
return param.ds_tensor.ds_numel
def _ensure_availability_of_partitioned_params(self, params):
swap_in_list = []
swap_in_flight = []
for param in params:
if param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE:
assert param.ds_tensor.final_location == OffloadDeviceEnum.nvme and param.ds_status == ZeroParamStatus.NOT_AVAILABLE
swap_in_list.append(param)
if param.ds_tensor.status == PartitionedParamStatus.INFLIGHT:
assert param.ds_tensor.final_location == OffloadDeviceEnum.nvme and param.ds_status == ZeroParamStatus.NOT_AVAILABLE
swap_in_flight.append(param)
if len(swap_in_list) > 0:
swap_in_list[0].nvme_swapper.swap_in(swap_in_list, async_op=False)
elif len(swap_in_flight) > 0:
swap_in_flight[0].nvme_swapper.synchronize_reads()
@instrument_w_nvtx
def _all_gather(self, param_list, async_op=False, hierarchy=None):
# fetches from nvme if the partition is not available and in nvme
self._ensure_availability_of_partitioned_params(param_list)
handles = []
all_gather_list = []
for param in param_list:
if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
if async_op:
handle = self._allgather_param(param, async_op=async_op, hierarchy=hierarchy)
param.ds_status = ZeroParamStatus.INFLIGHT # if async_op else ZeroParamStatus.AVAILABLE
handles.append(handle)
else:
all_gather_list.append(param)
if not async_op:
if len(param_list) == 1:
ret_value = self._allgather_params(all_gather_list, hierarchy=hierarchy)
else:
ret_value = self._allgather_params_coalesced(all_gather_list, hierarchy)
for param in all_gather_list:
param.ds_status = ZeroParamStatus.AVAILABLE
return ret_value
return handles
def _partition(self, param_list, force=False, has_been_updated=False):
for param in param_list:
print_rank_0(f"Before Partitioning Param {param.ds_id}", force=False)
if self.zero_param_process_group is not None:
self._partition_param_sec(param, has_been_updated=has_been_updated)
self._partition_param(param, has_been_updated=has_been_updated)
param.ds_status = ZeroParamStatus.NOT_AVAILABLE
# if param.ds_tensor is not None:
# assert id(param.data) == id(param.ds_tensor.data), \
# "After the parameters are initially partitioned, make sure we are not recreating the partition."
#print_rank_0(f"After Partitioning Param {param.ds_id} {param.ds_tensor.size()} {param.ds_tensor}",force=False)
@instrument_w_nvtx
def _partition_param(self, param, buffer=None, has_been_updated=False):
assert param.ds_status is not ZeroParamStatus.INFLIGHT, f" {param} Cannot partition a param in flight"
global reuse_buffers
print_rank_0(f"Param id {param.ds_id} status is {param.ds_status}", force=False)
if param.ds_status is ZeroParamStatus.AVAILABLE:
print_rank_0(f"Partitioning param id {param.ds_id} reuse buffers {reuse_buffers}", force=False)
# if reuse_buffers and False:
# numel = buffer.numel()
# buffer = param.data.view(-1)
# print_rank_0(
# "Returning buffer for param {param.ds_id} with numel {param.ds_numel} to empty buffers",
# force=False)
# if numel in empty_buffers:
# empty_buffers[numel].append(buffer)
# if deepspeed.comm.get_rank():
# print(f"Releasing {param.data.numel()}")
if param.ds_tensor is not None and not has_been_updated: ##param already partitioned
#print_rank_0(f"Param {param.ds_id} pri {param.ds_tensor.size()} loc? {param.ds_tensor.final_location}", force=True)
#param.data = param.ds_tensor.data
see_memory_usage(f'Before partitioning param {param.ds_id} {param.shape}', force=False)
# param.data does not store anything meaningful in partitioned state
free_param(param)
see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}', force=False)
if param.ds_tensor.final_location == OffloadDeviceEnum.nvme:
print_rank_0(f"Param {param.ds_id} partition released since it exists in nvme", force=False)
param.nvme_swapper.remove_partition_and_release_buffers([param])
print_rank_0(
f"after swap Param {param.ds_id} {param.ds_tensor.shape} partition released since it exists in nvme",
force=False)
return
tensor_size = self._aligned_size(param)
partition_size = tensor_size // self.num_partitions
if param.ds_tensor is None:
final_location = None
if self.remote_device == OffloadDeviceEnum.nvme and self.param_swapper.swappable_tensor(
numel=partition_size):
final_location = OffloadDeviceEnum.nvme
buffer = self.param_swapper.get_buffer(param, partition_size)
partitioned_tensor = torch.empty(0, dtype=param.dtype, device=buffer.device)
partitioned_tensor.data = buffer.data
print_rank_0(f"ID {param.ds_id} Initializing partition for the first time for nvme offload.")
else:
if param.ds_persist:
device = self.local_device
elif self.remote_device == OffloadDeviceEnum.nvme:
device = OffloadDeviceEnum.cpu
else:
device = self.remote_device
partitioned_tensor = torch.empty(partition_size, dtype=param.dtype, device=device)
if device == OffloadDeviceEnum.cpu and self.pin_memory:
partitioned_tensor = get_accelerator().pin_memory(partitioned_tensor)
partitioned_tensor.requires_grad = False
param.ds_tensor = partitioned_tensor
param.ds_tensor.ds_numel = partition_size
param.ds_tensor.status = PartitionedParamStatus.AVAILABLE
param.ds_tensor.final_location = final_location
start = partition_size * self.get_partition_rank()
end = start + partition_size
one_dim_param = param.contiguous().view(-1)
if start < param.ds_numel and end <= param.ds_numel:
src_tensor = one_dim_param.narrow(0, start, partition_size)
param.ds_tensor.copy_(src_tensor)
#partitioned_tensor = src_tensor.clone().detach().to(self.remote_device)
else:
# partitioned_tensor = torch.zeros(partition_size,
# dtype=param.dtype,
# device=self.remote_device )
if start < param.ds_numel:
elements_to_copy = param.ds_numel - start
param.ds_tensor.narrow(0, 0,
elements_to_copy).copy_(one_dim_param.narrow(0, start, elements_to_copy))
#print(f"Remote device {self.remote_device}")
#param.ds_tensor = partitioned_tensor
#param.data = param.ds_tensor.data
# param.data does not store anything meaningful in partitioned state
see_memory_usage(f'Before partitioning param {param.ds_id} {param.shape}', force=False)
free_param(param)
see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}', force=False)
if param.ds_tensor.final_location == OffloadDeviceEnum.nvme:
self.param_swapper.swap_out_and_release([param])
print_rank_0(f"ID {param.ds_id} Offloaded to nvme offload and buffers released.")
see_memory_usage(f"ID {param.ds_id} Offloaded to nvme offload and buffers released.", force=False)
print_rank_0(f"ID {param.ds_id} partitioned type {param.dtype} dev {param.device} shape {param.shape}")
@instrument_w_nvtx
def _partition_param_sec(self, param, buffer=None, has_been_updated=False):
assert param.ds_status is not ZeroParamStatus.INFLIGHT, f" {param} Cannot partition a param in flight"
global reuse_buffers
##support for NVME secondary param offload
#print_rank_0(f"SEC Param id {param.ds_id} status is {param.ds_status}", force=True)
if param.ds_status is ZeroParamStatus.AVAILABLE:
if param.ds_secondary_tensor is not None and not has_been_updated: ##param already partitioned
return
#check padding
tensor_size = self._aligned_size(param)
partition_size = tensor_size // self.dp_world_size
secondary_partition_size = int(tensor_size // self.num_ranks_in_param_group)
if param.ds_secondary_tensor is None:
final_location = None
secondary_partitioned_tensor = torch.empty(secondary_partition_size,
dtype=param.dtype,
device=self.remote_device)
if self.pin_memory:
secondary_partitioned_tensor = secondary_partitioned_tensor.pin_memory()
secondary_partitioned_tensor.requires_grad = False
param.ds_secondary_tensor = secondary_partitioned_tensor
param.ds_secondary_tensor.ds_numel = secondary_partition_size
param.ds_secondary_tensor.status = PartitionedParamStatus.AVAILABLE
param.ds_secondary_tensor.final_location = final_location
#use rank in group for secondary tensor
secondary_start = secondary_partition_size * self.rank_in_group
secondary_end = secondary_start + secondary_partition_size
one_dim_param = param.contiguous().view(-1)
start = partition_size * self.rank
end = start + partition_size
if start < param.ds_numel and end <= param.ds_numel:
if secondary_start < param.ds_numel and secondary_end <= param.ds_numel:
sec_src_tensor = one_dim_param.narrow(0, secondary_start, secondary_partition_size)
param.ds_secondary_tensor.copy_(sec_src_tensor)
else:
if start < param.ds_numel:
elements_to_copy = param.ds_numel - start
elements_to_copy_sec = elements_to_copy * param.ds_secondary_tensor_num_of_groups
param.ds_secondary_tensor.narrow(0, 0, elements_to_copy_sec).copy_(
one_dim_param.narrow(0, secondary_start, elements_to_copy_sec))
print_rank_0(f"{param.ds_id} partitioned type {param.dtype} dev {param.device} shape {param.shape}",
force=False)
def _param_status(self, param):
if param.ds_tensor is not None:
print_rank_0(
f"Param id {param.ds_id}, param status: {param.ds_status}, param numel {param.ds_numel}, partitioned numel {param.ds_tensor.numel()}, data numel {param.data.numel()}"
)
else:
print_rank_0(
f"Param id {param.ds_id}, param status: {param.ds_status}, param numel {param.ds_numel}, partitioned ds_tensor {param.ds_tensor}, data numel {param.data.numel()}"
)
def _allgather_param(self, param, async_op=False, hierarchy=0):
partition_size = param.ds_tensor.ds_numel
tensor_size = partition_size * self.num_partitions
aligned_param_size = self._aligned_size(param)
assert tensor_size == aligned_param_size, f'param id {param.ds_id} aligned size {aligned_param_size} does not match tensor size {tensor_size}'
print_rank_0(
f"{'--'* hierarchy}---- Before allocating allgather param {debug_param2name_id_shape_status(param)} partition size={partition_size}"
)
see_memory_usage(
f'Before allocate allgather param {debug_param2name_id_shape_status(param)} partition_size={partition_size} ',
force=False)
flat_tensor = torch.zeros(aligned_param_size, dtype=param.dtype, device=param.device).view(-1)
see_memory_usage(
f'After allocate allgather param {debug_param2name_id_shape_status(param)} {aligned_param_size} {partition_size} ',
force=False)
get_accelerator().synchronize()
print_rank_0(
f"{'--'* hierarchy}----allgather param with {debug_param2name_id_shape_status(param)} partition size={partition_size}"
)
# if not flat_tensor.numel() > 100000:
# replicated_tensor = flat_tensor.narrow(0,
# 0,
# param.ds_numel).view(param.ds_shape)
# param.data = replicated_tensor.data
# return None
if self.use_all_gather_into_tensor:
handle = dist.all_gather_into_tensor(flat_tensor,
param.ds_tensor.to(get_accelerator().device_name()),
group=self.get_partition_dp_group(param),
async_op=async_op)
else:
partitions = []
for i in range(self.num_partitions):
partitions.append(flat_tensor.narrow(0, partition_size * i, partition_size))
if i == dist.get_rank(group=self.get_partition_dp_group(param)):
partitions[i].data.copy_(param.ds_tensor.data, non_blocking=True)
handle = dist.all_gather(partitions,
partitions[self.get_partition_rank()],
group=self.get_partition_dp_group(param),
async_op=async_op)
replicated_tensor = flat_tensor.narrow(0, 0, param.ds_numel).view(param.ds_shape)
param.data = replicated_tensor.data
return handle
def _allgather_params_coalesced(self, param_list, hierarchy=0):
""" blocking call
avoid explicit memory copy in _allgather_params
"""
if len(param_list) == 0:
return
if self.num_partitions == 1:
handle = _no_gather_coalesced(param_list)
handle.wait()
return None
# collect local tensors and partition sizes
partition_sizes = []
local_tensors = []
for param in param_list:
partition_sizes.append(param.ds_tensor.ds_numel)
local_tensors.append(param.ds_tensor.to(get_accelerator().device_name()))
# allocate memory for allgather params
allgather_params = []
for psize in partition_sizes:
tensor_size = psize * self.num_partitions
flat_tensor = torch.empty(tensor_size, dtype=param_list[0].dtype, device=self.local_device).view(-1)
flat_tensor.requires_grad = False
allgather_params.append(flat_tensor)
# launch
launch_handles = []
for param_idx, param in enumerate(param_list):
input_tensor = local_tensors[param_idx].view(-1)
if self.use_all_gather_into_tensor:
# try the _all_gather_base from Pytorch master
h = dist.all_gather_into_tensor(allgather_params[param_idx],
input_tensor,
group=self.get_partition_dp_group(param),
async_op=True)
else:
output_list = []
for i in range(self.num_partitions):
psize = partition_sizes[param_idx]
partition = allgather_params[param_idx].narrow(0, i * psize, psize)
output_list.append(partition)
if not get_accelerator().on_accelerator(partition):
logger.warning(
f'param {param_idx}, partition {i} is not on CUDA, partition shape {partition.size()}')
# back to old all_gather function
h = dist.all_gather(output_list, input_tensor, group=self.get_partition_dp_group(param), async_op=True)
launch_handles.append(h)
# Wait ensures the operation is enqueued, but not necessarily complete.
launch_handles[-1].wait()
# assign to param.data (not copy)
for i, param in enumerate(param_list):
gathered_tensor = allgather_params[i]
param.data = gathered_tensor.narrow(0, 0, param.ds_numel).view(param.ds_shape).data
# guarantee the communication to be completed
get_accelerator().synchronize()
return None
def _allgather_params(self, param_list, hierarchy=0):
if len(param_list) == 0:
return
partition_size = sum([param.ds_tensor.ds_numel for param in param_list])
tensor_size = partition_size * self.num_partitions
flat_tensor = torch.empty(tensor_size, dtype=param_list[0].dtype, device=self.local_device)
flat_tensor.requires_grad = False
partitions = []
for i in range(self.num_partitions):
start = partition_size * i
partitions.append(flat_tensor.narrow(0, start, partition_size))
if i == self.get_partition_rank():
offset = 0
for param in param_list:
param_numel = param.ds_tensor.ds_numel
partitions[i].narrow(0, offset, param_numel).copy_(param.ds_tensor.data)
offset += param_numel
dist.all_gather(partitions,
partitions[self.get_partition_rank()],
group=self.get_partition_dp_group(param),
async_op=False)
param_offset = 0
for param in param_list:
param_partition_size = param.ds_tensor.ds_numel
param_size = param.ds_numel
replicated_tensor = torch.empty(param.ds_shape, dtype=param.dtype, device=self.local_device)
for i in range(self.num_partitions):
start = i * partition_size
param_start = i * param_partition_size
if param_start < param_size:
numel_to_copy = min(param_size - param_start, param_partition_size)
part_to_copy = partitions[i].narrow(0, param_offset, numel_to_copy)
replicated_tensor.view(-1).narrow(0, param_start, numel_to_copy).copy_(part_to_copy)
#param_offset += param.data.numel()
param_offset += param.ds_tensor.ds_numel
param.data = replicated_tensor.data
return None
def _reduce_scatter_gradients(self, param_list):
#print_rank_0([param.grad for param in param_list])
#assert any([param.grad is None for param in param_list]), "None gradients cannot be reduce scattered"
handles_and_reduced_partitions = []
for param in param_list:
assert param.grad.numel(
) == param.ds_numel, f"{param.grad.numel()} != {param.ds_numel} Cannot reduce scatter gradients whose size is not same as the params"
handles_and_reduced_partitions.append(self._reduce_scatter_gradient(param))
for param, (handle, reduced_partition) in zip(param_list, handles_and_reduced_partitions):
if handle is not None:
handle.wait()
# some ranks may have partitions that are padded to go beyond the grad size.
# For these ranks the output of reduce scatter is a separate buffer and needs
# to be copied in
partition_size = param.ds_tensor.ds_numel
start = self.get_partition_rank() * partition_size
end = start + partition_size
#print_rank_0("REduce scatter was executed for param {param.ds_id}")
if start < param.ds_numel and end > param.ds_numel:
elements = param.ds_numel - start
param.grad.view(-1).narrow(0, start, elements).copy_(reduced_partition.narrow(0, 0, elements))
def _reduce_scatter_gradient(self, param):
partition_size = param.ds_tensor.ds_numel
#output = torch.empty(partition_size, dtype=param.dtype, device=param.device)
total_size = partition_size * self.num_partitions
input_list = []
for i in range(self.num_partitions):
start = i * partition_size
end = start + partition_size
#print("before reduce scatter gradients")
if start < param.ds_numel and end <= param.ds_numel:
input = param.grad.view(-1).narrow(0, start, partition_size)
else:
input = torch.zeros(partition_size, dtype=param.dtype, device=param.device)
if start < param.ds_numel:
elements = param.ds_numel - start
input.narrow(0, 0, elements).copy_(param.grad.view(-1).narrow(0, start, elements))
#print("after reduce scatter gradients")
input_list.append(input)
rank = dist.get_rank(group=self.get_partition_dp_group(param))
handle = dist.reduce_scatter(input_list[rank],
input_list,
group=self.get_partition_dp_group(param),
async_op=True)
return handle, input_list[rank]
def _partition_gradients(self, param_list, partition_buffers=None, accumulate=False):
if partition_buffers is None:
partition_buffers = [None] * len(param_list)
for param, partition_buffer in zip(param_list, partition_buffers):
self._partition_gradient(param, partition_buffer=partition_buffer, accumulate=accumulate)
def _partition_gradient(self, param, partition_buffer=None, accumulate=False):
#import pdb;pdb.set_trace()
# param.grad=None
# param.grad.test()
print_rank_0(
f"Partitioning param {param.ds_id} gradient of size {param.grad.numel()} type {param.grad.dtype} part_size {param.ds_tensor.ds_numel}"
)
see_memory_usage("Before partitioning gradients", force=False)
partition_size = param.ds_tensor.ds_numel
if partition_buffer is None:
assert not accumulate, "No buffer to accumulate to"
partition_buffer = torch.zeros(partition_size, dtype=param.dtype, device=param.device)
else:
assert partition_buffer.numel(
) >= partition_size, f"The partition buffer size {partition_buffer.numel()} should match the size of param.ds_tensor {partition_size}"
rank = dist.get_rank(group=self.get_partition_dp_group(param))
start = partition_size * rank
end = start + partition_size
dest_tensor_full_buffer = partition_buffer.view(-1).narrow(0, 0, partition_size)
#print("before partition gradients")
if start < param.ds_numel:
elements = min(param.ds_numel - start, partition_size)
dest_tensor = dest_tensor_full_buffer.narrow(0, 0, elements)
src_tensor = param.grad.view(-1).narrow(0, start, elements)
# just copy the grad partition to the buffer
if not accumulate:
dest_tensor.copy_(src_tensor)
# if source and destination are on same device,
# add to the provided buffer
elif src_tensor.device == dest_tensor.device:
dest_tensor.add_(src_tensor)
# if source and destination are on different device, copy first to src
# then add and move back to the destination. This seems to run faster
# when src is gpu and dest is cpu
# adding directly to cpu is very slow
else:
acc_tensor = torch.empty(src_tensor.numel(), dtype=param.dtype, device=param.device)
acc_tensor.copy_(dest_tensor)
acc_tensor.add_(src_tensor)
dest_tensor.copy_(acc_tensor)
# partition_buffer.view(-1).narrow(
# 0,
# 0,
# elements).copy_(param.grad.view(-1).narrow(0,
# start,
# elements))
#print("after partition gradients")
param.grad.data = dest_tensor_full_buffer.data
see_memory_usage("After partitioning gradients", force=False)
def get_partition_dp_group(self, param):
return param.ds_process_group
def get_partition_rank(self):
"""subclass can overload to specify different relative rank in
parameter partition group"""
return self.rank
@property
def num_partitions(self):
return self.dp_world_size
def get_dp_process_group(self):
""" Return the communication group with all data-parallel ranks """
return self.ds_process_group
class GatheredParameters:
def __init__(self, params, modifier_rank=None, fwd_module=None, enabled=True):
"""A context that collects parameters that were partitioned via a
:class:`deepspeed.zero.Init` context. The parameters are partitioned
again upon exit.
Args:
params (``torch.nn.Parameter``): A single parameter, or an iterable of parameters (list, tuple, generator) of parameters to collect.
It's assumed that all parameters are zero params.
modifier_rank (int, optional): If specified, this rank's parameter will be
broadcasted on exit from the context. This argument is required if ``params`` are
modified, so that all processes have a consistent view of the data. Defaults
to ``None``.
fwd_module (``torch.nn.Module``, optional): If specified, ``params`` will be
registered as external parameters of ``fwd_module``. See :meth:`deepspeed.zero.register_external_parameter`.
enabled (bool, optional): If ``False``, this context is a no-op. Defaults to ``True``.
Important: Make sure to use ``modifier_rank`` that is not ``None`` (e.g., ``modifier_rank=0``)
if you need the GPU memory allocated by gather to be released upon exit from the context manager.
Important: if ``params`` isn't an iterable of parameters or a single parameter it'll be silently ignored!
Examples
========
#. Allocate a partitioned module, initialize its weight on rank 0, and update all
processes.
.. code-block:: python
with deepspeed.zero.Init():
linear = torch.nn.Linear(1000,1000)
with deepspeed.zero.GatheredParameters(linear.weight,
modifier_rank=0):
if deepspeed.comm.get_rank() == 0:
linear.weight.zero_()
with deepspeed.zero.GatheredParameters(linear.weight,
modifier_rank=0):
if deepspeed.comm.get_rank() == 0:
linear.weight.zero_()
#. Collect a partitioned weight to pass to another module during
training. The parameter will be registered as an external parameter
and made available during the backward pass.
.. code-block:: python
:emphasize-lines: 6
def forward(self, input):
x = self.layer1(input)
# self.layer1.weight is required by self.layer2.forward
with deepspeed.zero.GatheredParameters(self.layer1.weight,
fwd_module=self):
y = self.layer2(x, self.layer1.weight)
return y
#. Pretrained model loading
.. code-block:: python
with deepspeed.zero.Init():
model = MyModel()
state_dict = torch.load(model_path, map_location="cpu")
def load(module: nn.Module, prefix=""):
# because zero3 puts placeholders in model params, this context
# manager gathers (unpartitions) the params of the current layer, then loads from
# the state dict and then re-partitions them again
with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0):
if deepspeed.comm.get_rank() == 0:
module._load_from_state_dict(state_dict, prefix)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
load(model, prefix="")
If this approach is not used, then the full model will first be copied to each GPU. For models
bigger than the memory of a single GPU, this method is required.
"""
self.enabled = enabled
if not enabled:
return
if isinstance(params, Iterable) and not isinstance(params, torch.Tensor):
# deal with generators like model.parameters()
# must convert to list to be able to iterate more than once if we get a generator
params = list(params)
else:
# single param
params = [params]
# enable if at least one is zero-param, otherwise a noop
if not any(is_zero_param(p) for p in params):
self.enabled = False
return
self.params = [p for p in params if hasattr(p, "ds_id")]
self.params = sorted(
set(self.params), key=lambda x: x.ds_id
) # remove the duplicates to prevent racing condition, we must also make sure the order is the same on all ranks otherwise we'll get deadlocks
self.src_rank = None
if modifier_rank is not None:
if self.params[0].ds_process_group == dist.get_world_group():
self.src_rank = modifier_rank
else:
# A group was specified; convert DP rank to global rank
self.src_rank = dist.get_global_rank(self.params[0].ds_process_group, modifier_rank)
self.fwd_module = fwd_module
if self.fwd_module is not None:
# is a no-op if already registered
for p in self.params:
register_external_parameter(self.fwd_module, p)
def __enter__(self):
if not self.enabled:
return
self.params[0].all_gather(param_list=self.params)
def __exit__(self, *exc):
if not self.enabled:
return
if self.src_rank is None:
self.params[0].partition(param_list=self.params, has_been_updated=False)
return
handles = [dist.broadcast(p, self.src_rank, group=p.ds_process_group, async_op=True) for p in self.params]
for h in handles:
h.wait()
self.params[0].partition(param_list=self.params, has_been_updated=True)
| 87,015 | 44.062662 | 182 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/zero/stage3.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import sys
import gc
import collections
from typing import Deque, Dict, Tuple
from deepspeed import comm as dist
from deepspeed.utils import groups
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from deepspeed.runtime import ZeROOptimizer
from deepspeed.utils import logger
from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler
from deepspeed.runtime.comm.coalesced_collectives import reduce_scatter_coalesced, all_to_all_quant_reduce
from deepspeed.runtime.utils import inf, get_global_norm, is_model_parallel_parameter
from deepspeed.runtime.zero.partition_parameters import *
from deepspeed.runtime.zero.config import ZeroStageEnum
from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum
from deepspeed.runtime.zero.parameter_offload import DeepSpeedZeRoOffload
from deepspeed.ops.adam import DeepSpeedCPUAdam
from deepspeed.runtime.swap_tensor.partitioned_param_swapper import PartitionedParamStatus
from deepspeed.runtime.swap_tensor.partitioned_optimizer_swapper import PartitionedOptimizerSwapper
from deepspeed.runtime.swap_tensor.pipelined_optimizer_swapper import PipelinedOptimizerSwapper
from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, FP32_FLAT_GROUPS, PARTITION_COUNT, ZERO_STAGE
from deepspeed.accelerator import get_accelerator
# Toggle this to true to enable correctness test
# with gradient partitioning and without
pg_correctness_test = False
def print_rank_0(message, debug=False, force=False):
rank = dist.get_rank()
if rank == 0 and (debug or force):
logger.info(message)
# other variations
# - print for all ranks w/o interleaving
# printflock(f"[{rank}] {message}")
# - print to log file per rank
# log_rank_file(rank, message)
def input(msg):
return
def isclose(a, b, rtol=1e-09, atol=0.0):
return abs(a - b) <= max(rtol * max(abs(a), abs(b)), atol)
def lcm(x, y):
from fractions import gcd # or can import gcd from `math` in Python 3
return x * y // gcd(x, y)
def move_to_cpu(tensor_list):
for tensor in tensor_list:
tensor.data = tensor.data.cpu()
INITIAL_MICRO_STEP_ID = -1
class DeepSpeedZeroOptimizer_Stage3(ZeROOptimizer):
"""
DeepSpeedZeroOptimizer designed to reduce the memory footprint
required for training large deep learning models.
For more details please see ZeRO: Memory Optimization Towards Training A Trillion Parameter Models
https://arxiv.org/abs/1910.02054
For usage examples, refer to TODO: DeepSpeed Tutorial
"""
def __init__(self,
module,
init_optimizer,
timers,
ds_config,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True,
contiguous_gradients=True,
reduce_bucket_size=500000000,
prefetch_bucket_size=50000000,
max_reuse_distance=1000000000,
max_live_parameters=1000000000,
param_persistence_threshold=100000,
model_persistence_threshold=sys.maxsize,
dp_process_group=None,
reduce_scatter=True,
overlap_comm=False,
offload_optimizer_config=None,
offload_param_config=None,
sub_group_size=1000000000000,
mpu=None,
clip_grad=0.0,
communication_data_type=torch.float16,
postscale_gradients=True,
gradient_predivide_factor=1.0,
gradient_accumulation_steps=1,
elastic_checkpoint=False,
aio_config=None,
all2all_process_group=None,
zero_hpz_partition_size=1,
zero_quantized_weights=False):
see_memory_usage("Stage 3 initialize beginning", force=True)
print_rank_0(f"initialized {__class__.__name__} with args: {locals()}", force=False)
if dist.get_rank() == 0:
logger.info(f"Reduce bucket size {reduce_bucket_size}")
logger.info(f"Prefetch bucket size {prefetch_bucket_size}")
# The fused optimizer does all the work. We need this layer for two reason:
# 1. maintain same user API from apex.fp16_utils
# 2. keep common stuff here in case we need to add ne552w fused optimizer later
# differences from apex.fp16_utils:
# - assume all model params in fp16
# - assume all params requires grad
# - flat by groups, not keeping state. TODO: remove state explicitly?
# - master grad and unflat master weight never exist. TODO: a way to save out unflat master?
if not get_accelerator().is_available():
raise SystemError("Cannot use fp16 without accelerator.")
self.optimizer = init_optimizer
# Use torch (un)flatten ops
self.flatten = _flatten_dense_tensors
self.unflatten = _unflatten_dense_tensors
self.dtype = self.optimizer.param_groups[0]['params'][0].dtype
self._global_grad_norm = 0.
self.custom_loss_scaler = False
self.external_loss_scale = None
self.optimizer_swapper = None
self.swap_optimizer = False
self.offload_optimizer = False
self.offload_optimizer_pin_memory = False
self.offload_optimizer_fast_init = False
self.offload_param = False
self.offload_param_pin_memory = False
self.params_in_nvme_and_cpu = False
self.max_params_in_cpu = 0
#num of ranks in a ZeRO param partitioning group
self.zero_hpz_partition_size = zero_hpz_partition_size
zpg = groups._get_zero_param_intra_parallel_group()
print_rank_0(f"ZeRO Stage 3 param partitioning group {self.zero_hpz_partition_size} {zpg}", force=False)
if self.zero_hpz_partition_size > 1 and zpg is None:
self._set_zero_group_parallelism()
zpg = groups._get_zero_param_intra_parallel_group()
self.parameter_offload = self.initialize_ds_offload(module=module,
timers=timers,
ds_config=ds_config,
overlap_comm=overlap_comm,
prefetch_bucket_size=prefetch_bucket_size,
max_reuse_distance=max_reuse_distance,
max_live_parameters=max_live_parameters,
param_persistence_threshold=param_persistence_threshold,
model_persistence_threshold=model_persistence_threshold,
offload_param_config=offload_param_config,
mpu=mpu,
zpg=zpg,
zero_quantized_weights=zero_quantized_weights)
self.persistent_parameters = self.parameter_offload.persistent_parameters
self._configure_offloading(offload_optimizer_config, offload_param_config)
self.module = module
self.elastic_checkpoint = elastic_checkpoint
self.inf_or_nan_tracker: Tensor = torch.zeros(1,
dtype=torch.bool,
device=get_accelerator().current_device_name(),
requires_grad=False)
self.deepspeed_adam_offload = (self.offload_optimizer and type(init_optimizer) == DeepSpeedCPUAdam)
self.device = get_accelerator().current_device_name() if not self.offload_optimizer else OffloadDeviceEnum.cpu
### streams used for overlapping computation with communication
self.reduce_and_partition_stream = get_accelerator().Stream() if overlap_comm else get_accelerator(
).default_stream()
############################################################################
self.n_caching_allocator_flushes = 0
#-------------Stage 3 Setup-------------------#
self.timers = timers
self.all2all_process_group = all2all_process_group
self.reduce_scatter = reduce_scatter
self.dp_process_group = dp_process_group
self.all2all_process_group = all2all_process_group
self.partition_count = dist.get_world_size(group=self.dp_process_group)
if mpu is None:
self.model_parallel_group = None
self.model_parallel_rank = 0
else:
self.model_parallel_group = mpu.get_model_parallel_group()
self.model_parallel_rank = mpu.get_model_parallel_rank()
self.overflow = False
self.clip_grad = clip_grad
self.communication_data_type = communication_data_type
self.gradient_predivide_factor = gradient_predivide_factor
self.postscale_gradients = postscale_gradients
self.gradient_accumulation_steps = gradient_accumulation_steps
self.micro_step_id = 0
self.reduce_bucket_size = int(reduce_bucket_size)
if self.all2all_process_group is not None:
assert self.all2all_process_group is not None and self.reduce_scatter == True, "when enable all_to_all_reduce, reduce_scatter should also be enabled for data type checks."
if self.reduce_scatter:
valid_reduce_scatter_dtypes = (torch.float16, torch.bfloat16, torch.float32)
assert self.communication_data_type in valid_reduce_scatter_dtypes, f"ZeRO-3 supports {valid_reduce_scatter_dtypes} communication_data_type with reduce scatter enabled. Got: '{self.communication_data_type}'"
assert self.gradient_predivide_factor == 1.0, "gradient_predivide_factor != 1.0 is not yet supported with ZeRO-3 with reduce scatter enabled"
assert self.postscale_gradients, "pre-scale gradients is not yet supported with ZeRO-3 with reduce scatter enabled"
# Holds the mode parameter
# The param.data may not hold any meaningful data
# when param's status is NOT_AVAILABLE or IN_FLGHT
self.fp16_groups = []
# Hold partitioned parameters
self.fp16_partitioned_groups = []
# Holds a fused and flattened copy of the parameters
self.fp16_partitioned_groups_flat = []
self.fp16_partitioned_groups_flat_numel = []
#defragmented pinned memory
self.param_groups_fp16_flat_cpu_memory = []
#a single 32-bit partition of the parallel partitioned parameters
#that this process will update
self.fp32_partitioned_groups_flat = []
self.next_swappable_fp32_partitioned_groups = []
# number of elements per partition in each group
self.partition_size = []
self.all_reduce_print = False
self.prefetch_elements = int(prefetch_bucket_size)
self.contiguous_gradients = contiguous_gradients
# padding on each partition for alignment purposes
self.groups_padding = []
self.sub_group_size = sub_group_size
self.sub_group_to_group_id = {}
# Trainable parameters
self.trainable_param_groups = self._get_trainable_parameter_groups()
see_memory_usage("Before creating fp16 partitions", force=True)
self._create_fp16_partitions_with_defragmentation(self.trainable_param_groups)
num_fp16_subgroups = len(self.fp16_partitioned_groups_flat)
see_memory_usage(f"After creating fp16 partitions: {num_fp16_subgroups}", force=True)
# Optimizer tensor swapping
if self.swap_optimizer:
self._configure_tensor_swapping(offload_optimizer_config, aio_config)
self.is_gradient_accumulation_boundary: bool = True
self.param_reduce_events: Deque[get_accelerator().Event] = collections.deque()
# TODO. make this configurable via JSON
self.max_param_reduce_events: int = 2
self.param_dict = {}
# map between param_id and bool to specify if a param is in this partition
self.is_param_in_current_partition = {}
self.extra_large_param_to_reduce = None
self.grads_in_ipg_bucket = []
self.params_in_ipg_bucket = []
self.is_gradient_accumulation_boundary = True
self._release_ipg_buffers()
self.previous_reduced_grads = None
# simplified param id
self.param_id = {}
count = 0
for i, params_group in enumerate(self.fp16_groups):
for param in params_group:
unique_id = id(param)
self.param_id[unique_id] = count
self.param_dict[count] = param
count = count + 1
#Largest partitioned param
largest_partitioned_param_numel = max([
max([max(tensor.numel(), tensor.ds_numel) for tensor in fp16_partitioned_group])
for fp16_partitioned_group in self.fp16_partitioned_groups
])
print_rank_0(f'Largest partitioned param numel = {largest_partitioned_param_numel}', force=False)
self._setup_for_real_optimizer()
self.grad_position = {}
self.set_grad_positions()
if self.offload_optimizer:
self.norm_for_param_grads = {}
# stores if a partition has been reduced in this step
self.is_partition_reduced = {}
# stores if a grad in a partition has been computed or not
self.is_grad_computed = {}
# will store the averaged gradients required by this partition
self.averaged_gradients = {}
#creates backward hooks for gradient partitioning
###Calls all gather param
self.create_reduce_and_remove_grad_hooks()
#exit(0)
# we may have a way of fusing dynamic scale. Do not support for now
self.loss_scaler = CreateLossScaler(dtype=self.dtype,
static_loss_scale=static_loss_scale,
dynamic_scaling=dynamic_loss_scale,
dynamic_loss_args=dynamic_loss_args)
self.dynamic_loss_scale = self.loss_scaler.dynamic
self.debug_fp16_grads = [{} for _ in self.fp16_groups]
self._link_all_hp_params()
if dist.get_rank(group=self.dp_process_group) == 0:
see_memory_usage(f"After initializing ZeRO optimizer", force=True)
def destroy(self):
self.parameter_offload.destroy()
del self.__ipg_bucket_flat_buffer
def initialize_ds_offload(
self,
module,
timers,
ds_config,
overlap_comm,
prefetch_bucket_size,
max_reuse_distance,
max_live_parameters,
param_persistence_threshold,
model_persistence_threshold,
offload_param_config,
mpu,
zpg,
zero_quantized_weights,
):
return DeepSpeedZeRoOffload(module=module,
timers=timers,
ds_config=ds_config,
overlap_comm=overlap_comm,
prefetch_bucket_size=prefetch_bucket_size,
max_reuse_distance=max_reuse_distance,
max_live_parameters=max_live_parameters,
param_persistence_threshold=param_persistence_threshold,
model_persistence_threshold=model_persistence_threshold,
offload_param_config=offload_param_config,
mpu=mpu,
zero_param_parallel_group=zpg,
zero_quantized_weights=zero_quantized_weights)
def _get_trainable_parameter_groups(self):
param_groups = []
for param_group in self.optimizer.param_groups:
trainable_params = {"params": [p for p in param_group["params"] if p.requires_grad]}
param_groups.append(trainable_params)
return param_groups
def _set_zero_group_parallelism(self):
groups._create_zero_param_parallel_group(self.zero_hpz_partition_size)
def invalidate_secondary_tensor(self):
for fpg in self.fp16_groups:
for param in fpg:
if param.ds_secondary_tensor is not None:
param.ds_secondary_tensor = None
def _setup_for_real_optimizer(self):
see_memory_usage("Before creating fp32 partitions", force=True)
self._create_fp32_partitions()
see_memory_usage("After creating fp32 partitions", force=True)
dist.barrier()
# To support pipelined optimizer swapping
self._create_next_swappable_fp32_groups()
see_memory_usage("Before initializing optimizer states", force=True)
self.initialize_optimizer_states()
see_memory_usage("After initializing optimizer states", force=True)
dist.barrier()
if dist.get_rank() == 0:
logger.info(f"optimizer state initialized")
# IPG
if self.contiguous_gradients:
self.__ipg_bucket_flat_buffer: Tensor = torch.empty(self.reduce_bucket_size,
dtype=self.dtype,
device=get_accelerator().current_device_name())
self.grad_partitions_flat_buffer = None
self.__param_id_to_grad_partition: Dict[int, Tensor] = {}
all_params = list(itertools.chain.from_iterable(self.fp16_groups))
self.grad_partitions_flat_buffer: Tensor = torch.zeros(sum(p.partition_numel() for p in all_params),
dtype=self.dtype,
device=self.device)
if self.offload_optimizer_pin_memory:
self.grad_partitions_flat_buffer = get_accelerator().pin_memory(self.grad_partitions_flat_buffer)
offset = 0
for param in all_params:
self.__param_id_to_grad_partition[param.ds_id] = self.grad_partitions_flat_buffer.narrow(
0, offset, param.partition_numel())
offset += param.partition_numel()
def _link_all_hp_params(self):
for p in self.module.parameters():
p._z3_optimizer = self
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
def get_lr(self):
"""Return the current learning rate."""
return self.optimizer.param_groups[0]["lr"]
# TODO. factor out to a utility outside of stage3
@staticmethod
def defragment(tensors: List[Tensor]) -> Tensor:
"""move provided tensors into a contiguous flat buffer, with some additional
measures taken to reduce memory fragmentation"""
assert len(set(t.dtype for t in tensors)) == 1
assert len(set(t.device for t in tensors)) == 1
cpu_buffer = torch.empty(sum(p.numel() for p in tensors),
dtype=get_only_unique_item(t.dtype for t in tensors),
device="cpu")
tensor_infos: List[Tuple[Tensor, int, int]] = []
orig_device = get_only_unique_item(t.device for t in tensors)
offset = 0
for tensor in tensors:
tensor_numel = tensor.numel()
# move the tensor from device memory to host memory
cpu_buffer.narrow(0, offset, tensor_numel).copy_(tensor)
tensor.data = torch.empty(0, dtype=tensor.dtype, device=tensor.device)
# record some data so we can restore the device tensor later
tensor_infos.append((tensor, offset, tensor_numel))
offset += tensor_numel
gc.collect()
get_accelerator().empty_cache()
# copy tensors (now flattened and contiguous) back to GPU
device_buffer = cpu_buffer.to(orig_device)
# restore device tensors
for tensor, offset, tensor_numel in tensor_infos:
tensor.data = device_buffer.narrow(0, offset, tensor_numel)
return device_buffer
def _get_param_coordinator(self, training):
return self.parameter_offload.get_param_coordinator(training)
def _configure_offloading(self, offload_optimizer_config, offload_param_config):
###################### offload optimizer setup ##################################
if offload_optimizer_config is not None and offload_optimizer_config.device != OffloadDeviceEnum.none:
self.offload_optimizer = True
self.offload_optimizer_pin_memory = offload_optimizer_config.pin_memory
self.swap_optimizer = offload_optimizer_config.device == OffloadDeviceEnum.nvme
self.offload_optimizer_fast_init = offload_optimizer_config.fast_init
###################### offload param setup ##################################
if offload_param_config is not None and offload_param_config.device != OffloadDeviceEnum.none:
self.offload_param = True
self.offload_param_pin_memory = offload_param_config.pin_memory
self.params_in_nvme_and_cpu = offload_param_config.device == OffloadDeviceEnum.nvme
self.max_params_in_cpu = offload_param_config.max_in_cpu
print_rank_0(
f"FP16 params swapping is {self.params_in_nvme_and_cpu}, Max params in CPU is {self.max_params_in_cpu}",
force=False)
def _configure_tensor_swapping(self, offload_optimizer_config, aio_config):
nvme_swap_folder = os.path.join(offload_optimizer_config.nvme_path, 'zero_stage_3')
os.makedirs(nvme_swap_folder, exist_ok=True)
if dist.get_rank() == 0:
logger.info(f'Tensor Swapping: Adding optimizer tensors')
swapper_type = PipelinedOptimizerSwapper if offload_optimizer_config.pipeline else PartitionedOptimizerSwapper
self.optimizer_swapper = swapper_type(swap_config=offload_optimizer_config,
aio_config=aio_config,
base_folder=nvme_swap_folder,
optimizer=self.optimizer,
largest_numel=max(self.fp16_partitioned_groups_flat_numel),
device=self.device,
dtype=torch.float32,
timers=self.timers)
@property
def elements_in_ipg_bucket(self):
return sum(p.ds_numel for p in self.params_in_ipg_bucket)
def _move_to_flat_buffer(self, param_list, flat_buffer, avoid_copy=False):
'''If flat buffer is None then the parameters in the param_list are
not copied to the flat buffer. This is because they exceed the number of max_params_in_cpu
Some of these parameters may already be in CPU in unflattened buffers
or they maybe in GPU, or they maybe in NVME. If they are in NVME, then
they will be marked as NOT_AVAILABLE, and will be moved to CPU when they are
needed during training.'''
if flat_buffer is None:
# this dst buffer is on NVMe, so skip this
return
start = 0
for param in param_list:
src = param.ds_tensor
dest = flat_buffer.narrow(0, start, src.ds_numel)
start = start + src.ds_numel
'''if the parameter was initialized in nvme then bring it to the destination buffer directly'''
if src.status == PartitionedParamStatus.NOT_AVAILABLE:
print_rank_0(
f"Swapping in {param.ds_id} with partition size {param.partition_numel()} permanently to CPU")
param.nvme_swapper.swap_into_buffer(param, dest)
src.data = dest.data
src.status = PartitionedParamStatus.AVAILABLE
else:
assert src.status == PartitionedParamStatus.AVAILABLE, "Partitioned Param must be available here"
if not avoid_copy:
dest.data.copy_(src.data)
src.data = dest.data
# Final location must be gpu/cpu in this case
param.ds_tensor.final_location = 'not-nvme'
def _create_param_groups_fp16_flat_cpu_memory(self):
aggregate_params_count = 0
for j, param_group in enumerate(self.trainable_param_groups):
params_in_group = sum([p.partition_numel() for p in param_group['params']])
flat_buffer_size = params_in_group
if self.params_in_nvme_and_cpu and \
aggregate_params_count + params_in_group > self.max_params_in_cpu:
flat_buffer_size = max(0, self.max_params_in_cpu - aggregate_params_count)
aggregate_params_count += params_in_group
if flat_buffer_size > 0:
print_rank_0(f"group {j} flat buffer size {flat_buffer_size}", force=False)
self.param_groups_fp16_flat_cpu_memory.append(get_accelerator().pin_memory(
torch.empty(int(flat_buffer_size), dtype=self.dtype)))
else:
print_rank_0(f"No flat buffer size. Param group size was {params_in_group}", force=False)
self.param_groups_fp16_flat_cpu_memory.append(torch.empty(1, dtype=self.dtype))
def _create_fp16_partitions_with_defragmentation(self, fp16_param_groups):
dist.barrier()
param_groups: List[List[Parameter]] = tuple(
self._create_fp16_sub_groups(param_group["params"]) for param_group in fp16_param_groups)
# bookkeeping related to param groups
for param_group_idx, param_group in enumerate(param_groups):
for sub_group in param_group:
sub_group_idx = len(self.fp16_groups)
# record sub group and partitions
self.fp16_groups.append(sub_group)
self.fp16_partitioned_groups.append([param.ds_tensor for param in sub_group])
# record sub group -> group mapping
self.sub_group_to_group_id[sub_group_idx] = param_group_idx
# record total elements of parameter partitions in sub group
self.fp16_partitioned_groups_flat_numel.append(sum(p.partition_numel() for p in sub_group))
# record padding required to align group to world size (only applies to last rank)
rank_requires_padding = dist.get_rank(
self.dp_process_group) == dist.get_world_size(self.dp_process_group) - 1
self.groups_padding.append([p.padding_size() if rank_requires_padding else 0 for p in sub_group])
# move parameters to flattened buffer
if not self.offload_param: # partitioned params remain in GPU during training
# move parameter partitions into a single contiguous flat buffer
parameter_partitions: List[Tensor] = []
for sub_group in self.fp16_groups:
for param in sub_group:
parameter_partitions.append(param.ds_tensor)
device_buffer = __class__.defragment(parameter_partitions)
# setup flat buffers per subgroup, these are each just sections of the
# contiguous flat buffer for all parameters that we created earlier
offset = 0
for sub_group in self.fp16_groups:
sub_group_numel = sum(param.partition_numel() for param in sub_group)
self.fp16_partitioned_groups_flat.append(device_buffer.narrow(0, offset, sub_group_numel))
offset += sub_group_numel
else: # partitioned params offloaded to CPU when not in use
# create a flat CPU memory allocation for each param group
self._create_param_groups_fp16_flat_cpu_memory()
for param_group_idx, param_group in enumerate(param_groups):
flat_offset = 0
for i, sub_group in enumerate(param_group):
total_elements = sum(p.partition_numel() for p in sub_group)
print_rank_0(f"Params in nvme and cpu {self.params_in_nvme_and_cpu}")
#Flat buffer may not be available for parameters that reside in NVME
if not self.params_in_nvme_and_cpu or flat_offset + total_elements <= self.param_groups_fp16_flat_cpu_memory[
param_group_idx].numel():
fp16_partitioned_group_flat = self.param_groups_fp16_flat_cpu_memory[param_group_idx].narrow(
0, flat_offset, total_elements)
print_rank_0(
f"Creating a flat buffer for subgroup {i} requiring {total_elements} elements, and cumulative CPU elements {flat_offset + total_elements}",
force=False)
elif self.params_in_nvme_and_cpu:
fp16_partitioned_group_flat = None
print_rank_0(f"No flat buffer for sub group {i} of {total_elements} elements", force=False)
else:
assert False, "Either params are in nvme, or they are in CPU memory. This code path should not be triggered. Please see you max_params_in_cpu and params_in_nvme configs"
self.fp16_partitioned_groups_flat.append(fp16_partitioned_group_flat)
flat_offset += total_elements
self._move_to_flat_buffer(sub_group,
fp16_partitioned_group_flat,
avoid_copy=not self.offload_param)
# if necessary, create a pinned memory buffer to be used for swapping out
# params to NVME after optimizer step
should_create_fp16_flat_reuse_buffer = any(flattened_partition_group is None
for flattened_partition_group in self.fp16_partitioned_groups_flat)
if should_create_fp16_flat_reuse_buffer:
max_partition_numel, largest_partition_numel = 0, None
for sub_group in self.fp16_groups:
total_elements = sum(t.partition_numel() for t in sub_group)
if total_elements > max_partition_numel:
largest_partition_numel = [t.ds_numel for t in sub_group]
max_partition_numel = total_elements
assert len(largest_partition_numel) > 0, f'Unexpected that largest partition is empty'
self.fp16_groups[0][0].nvme_swapper.reserve_partitioned_swap_space(largest_partition_numel)
def _swap_in_sub_group_to_flat_buffer(self, flat_buffer, sub_group_id):
offset = 0
elements_in_sub_group = sum([t.ds_numel for t in self.fp16_partitioned_groups[sub_group_id]])
assert (flat_buffer.numel() == elements_in_sub_group)
for param, partitioned_param in zip(self.fp16_groups[sub_group_id],
self.fp16_partitioned_groups[sub_group_id]):
dest = flat_buffer.narrow(0, offset, partitioned_param.ds_numel)
if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE:
print_rank_0(
f"Swapping in {param.ds_id} with elements {param.ds_numel} and partition {param.partition_numel()}"
)
param.nvme_swapper.swap_in([param], async_op=False)
dest.data.copy_(partitioned_param.data)
param.nvme_swapper.remove_partition_and_release_buffers([param])
print_rank_0(f"Swapping in {param.ds_id} done")
else:
dest.data.copy_(partitioned_param.data)
offset += partitioned_param.ds_numel
def _create_next_swappable_fp32_groups(self):
reverse_order_indices = [i for i in range(len(self.fp32_partitioned_groups_flat))]
reverse_order_indices.reverse()
next_group = None
for i in reverse_order_indices:
self.next_swappable_fp32_partitioned_groups.append(next_group)
if self._swappable_optimizer_subgroup(i):
next_group = self.fp32_partitioned_groups_flat[i]
self.next_swappable_fp32_partitioned_groups.reverse()
def _get_sub_group_partitions(self, sub_group_id):
sub_group_partitions = []
for param, partitioned_param in zip(self.fp16_groups[sub_group_id],
self.fp16_partitioned_groups[sub_group_id]):
if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE:
swap_path = param.nvme_swapper.get_path(param, True)
sub_group_partitions.append((partitioned_param, param.partition_numel(), swap_path))
else:
sub_group_partitions.append((partitioned_param, partitioned_param.ds_numel, None))
return sub_group_partitions
def _create_fp32_partitions(self):
cpu_memory_usage = 0
cpu_memory_sub_groups = 0
nvme_memory_usage = 0
num_swappable_partitions = 0
num_swap_from_nvme_partitions = 0
num_swap_from_cpu_partitions = 0
swap_from_nvme_memory_usage = 0
swap_from_cpu_memory_usage = 0
GIGA_BYTES = (1024**3)
swappable_fp32_tensors = []
swappable_fp16_src_tensors = []
nvme_fp16_partitions_info = []
nvme_fp16_num_elems = []
nvme_fp32_dest_tensors = []
fp32_element_size = torch.tensor([], dtype=torch.float32).element_size()
for i, tensor in enumerate(self.fp16_partitioned_groups_flat):
num_elements = self.fp16_partitioned_groups_flat_numel[i]
# a partition of the fp32 master weights that will be updated by this process
if self._swappable_optimizer_subgroup(i):
self.fp32_partitioned_groups_flat.append(torch.Tensor())
nvme_memory_usage += (fp32_element_size * num_elements)
num_swappable_partitions += 1
if self.params_in_nvme_and_cpu and tensor is None:
num_swap_from_nvme_partitions += 1
swap_from_nvme_memory_usage += (fp32_element_size * num_elements)
if self.offload_optimizer_fast_init:
sub_group_partitions = self._get_sub_group_partitions(i)
nvme_fp16_partitions_info.append(sub_group_partitions)
nvme_fp16_num_elems.append(num_elements)
nvme_fp32_dest_tensors.append(self.fp32_partitioned_groups_flat[i])
else:
unpinned_fp32_buffer = torch.empty(num_elements, device=self.device, dtype=torch.float)
self._swap_in_sub_group_to_flat_buffer(unpinned_fp32_buffer, i)
self.optimizer_swapper.initialize_parameters(parameters=[self.fp32_partitioned_groups_flat[i]],
src_tensors=[unpinned_fp32_buffer])
else:
num_swap_from_cpu_partitions += 1
swap_from_cpu_memory_usage += (fp32_element_size * num_elements)
swappable_fp32_tensors.append(self.fp32_partitioned_groups_flat[i])
swappable_fp16_src_tensors.append(self.fp16_partitioned_groups_flat[i])
else:
cpu_memory_usage += (fp32_element_size * num_elements)
cpu_memory_sub_groups += 1
if self.params_in_nvme_and_cpu and tensor is None:
unpinned_fp32_buffer = torch.empty(num_elements, device=self.device, dtype=torch.float)
self._swap_in_sub_group_to_flat_buffer(unpinned_fp32_buffer, i)
self.fp32_partitioned_groups_flat.append(unpinned_fp32_buffer)
else:
self.fp32_partitioned_groups_flat.append(self.fp16_partitioned_groups_flat[i].to(
self.device).clone().float().detach())
self.fp32_partitioned_groups_flat[i].requires_grad = True # keep this in case internal optimizer uses it
if len(swappable_fp32_tensors) > 0:
self.optimizer_swapper.initialize_parameters(parameters=swappable_fp32_tensors,
src_tensors=swappable_fp16_src_tensors)
if len(nvme_fp32_dest_tensors) > 0:
fp16_pinned_buffers = self.fp16_groups[0][0].nvme_swapper.reserve_available_buffers()
assert len(fp16_pinned_buffers) > 0
self.optimizer_swapper.initialize_from_swapped_fp16_params(fp16_partitions_info=nvme_fp16_partitions_info,
fp16_num_elems=nvme_fp16_num_elems,
fp16_pinned_buffers=fp16_pinned_buffers,
fp32_parameters=nvme_fp32_dest_tensors)
self.fp16_groups[0][0].nvme_swapper.release_reserved_buffers()
nvme_gigabytes = nvme_memory_usage / GIGA_BYTES
print_rank_0(f'Swappable FP32 Partitions: count={num_swappable_partitions} size={nvme_gigabytes:5.2f} GB',
force=False)
if self.params_in_nvme_and_cpu:
print_rank_0(
f'Swap from NVMe Partitions: count = {num_swap_from_nvme_partitions}, size = {swap_from_nvme_memory_usage/GIGA_BYTES:5.2f}GB',
force=False)
print_rank_0(
f'Swap from CPU Partitions: count = {num_swap_from_cpu_partitions}, size = {swap_from_cpu_memory_usage/GIGA_BYTES:5.2f}GB',
force=False)
cpu_memory_gigabytes = cpu_memory_usage / GIGA_BYTES
print_rank_0(f'In-Memory FP32 Partitions: count={cpu_memory_sub_groups} size={cpu_memory_gigabytes:5.2f} GB',
force=False)
# Clear for on-the-fly population before the optimizer step
for param_group in self.optimizer.param_groups:
param_group['params'] = []
def _create_fp16_sub_groups(self, params_group):
params_group_numel = sum([param.partition_numel() for param in params_group])
sub_group_size = self.sub_group_size
if sub_group_size is None or sub_group_size >= params_group_numel:
return [params_group]
sub_groups = []
sub_group = []
local_sub_group_size = 0
for param in params_group:
sub_group.append(param)
local_sub_group_size += param.partition_numel()
if local_sub_group_size >= sub_group_size or id(param) == id(params_group[-1]):
sub_groups.append(sub_group)
sub_group = []
local_sub_group_size = 0
return sub_groups
def _release_ipg_buffers(self):
if self.contiguous_gradients:
self.ipg_buffer = None
def _optimizer_step(self, sub_group_id):
param_group_id = self.sub_group_to_group_id[sub_group_id]
fp32_param = self.fp32_partitioned_groups_flat[sub_group_id]
self.optimizer.param_groups[param_group_id]['params'] = [fp32_param]
self.optimizer.step()
self.optimizer.param_groups[param_group_id]['params'] = []
def _swappable_optimizer_subgroup(self, sub_group_id):
if not self.swap_optimizer:
return False
return self.optimizer_swapper.swappable_tensor(None,
numel=self.fp16_partitioned_groups_flat_numel[sub_group_id])
def _partitioned_params_swap_out(self, i):
offset = 0
fp32_param = self.fp32_partitioned_groups_flat[i]
assert fp32_param is not None, \
f'fp32 parameters of sub_group {i} is None'
swap_fp16_params = []
swap_fp32_params = []
for param, partitioned_param in zip(self.fp16_groups[i], self.fp16_partitioned_groups[i]):
src = fp32_param.narrow(0, offset, partitioned_param.ds_numel)
if partitioned_param.status == PartitionedParamStatus.AVAILABLE:
partitioned_param.data.copy_(src.data)
else:
swap_fp32_params.append(src)
swap_fp16_params.append(param)
offset += partitioned_param.ds_numel
if len(swap_fp16_params):
swap_fp16_params[0].nvme_swapper.swap_out_partitioned_params(dst_fp16_params=swap_fp16_params,
src_fp32_params=swap_fp32_params)
def initialize_optimizer_states(self):
num_subgroups = len(self.fp16_groups)
largest_numel = max([sum([p.ds_numel for p in psg]) for psg in self.fp16_partitioned_groups])
gradient_dtype = self.fp32_partitioned_groups_flat[0].dtype
gradient_buffer = torch.zeros(int(largest_numel), dtype=gradient_dtype, device=self.device)
timer_names = set()
# State initialization for the Adagrad optimizer occurs at construction as opposed to other optimizers
# which do lazy initialization of the state at the first call to step.
is_adagrad = isinstance(self.optimizer, torch.optim.Adagrad)
if self.swap_optimizer:
self.optimizer_swapper.init_timers()
INIT_OPTIMIZER_TIMER = 'init_optimizer_state'
timer_names.add(INIT_OPTIMIZER_TIMER)
self.start_timers([INIT_OPTIMIZER_TIMER])
for i, group in enumerate(self.fp16_groups):
swappable_optimizer_subgroup = self._swappable_optimizer_subgroup(i)
swappable_param_subgroup = self.fp16_partitioned_groups_flat[i] is None
num_elements = int(self.fp16_partitioned_groups_flat_numel[i])
see_memory_usage(
f'[Begin] Initialize optimizer states {i} / {num_subgroups} subgroups, num_elems: {num_elements}, swappable opt/param:{swappable_optimizer_subgroup}/{swappable_param_subgroup}',
force=False)
if swappable_optimizer_subgroup:
self._optimizer_states_and_gradient_swap_in(i, timer_names)
if self.offload_optimizer and not swappable_optimizer_subgroup:
subgroup_gradient_buffer = torch.zeros(num_elements, dtype=gradient_dtype, device=self.device)
if self.offload_optimizer_pin_memory:
subgroup_gradient_buffer = get_accelerator().pin_memory(subgroup_gradient_buffer)
self.fp32_partitioned_groups_flat[i].grad = subgroup_gradient_buffer
else:
self.fp32_partitioned_groups_flat[i].grad = gradient_buffer.narrow(0, 0, num_elements)
# Initialize the optimizer states with the flattened fp32 partition.
if not is_adagrad:
self._optimizer_step(i)
if swappable_param_subgroup:
self._partitioned_params_swap_out(i)
if swappable_optimizer_subgroup:
self._optimizer_states_and_gradient_swap_out(i, timer_names)
see_memory_usage(
f'[End] Initialize optimizer states {i} / {num_subgroups} subgroups, num_elems: {num_elements}, swappable opt/param:{swappable_optimizer_subgroup}/{swappable_param_subgroup}',
force=False)
# Initialize the optimizer states with the flattened fp32 partition.
if is_adagrad:
self.optimizer = torch.optim.Adagrad(self.fp32_partitioned_groups_flat, **self.optimizer.defaults)
self.stop_timers([INIT_OPTIMIZER_TIMER])
self.log_timers(timer_names)
if self.swap_optimizer:
self.optimizer_swapper.log_timers()
if not self.offload_optimizer:
for group in self.fp32_partitioned_groups_flat:
group.grad = None
# Reset steps
return
#########################################################################
#########################ZeRO Partition Gradients########################
#########################################################################
def get_first_param_index(self, group_id, param_group, partition_id):
for index, param in enumerate(param_group):
param_id = self.get_param_id(param)
if partition_id in self.param_to_partition_ids[group_id][param_id]:
return index
return None
def initialize_gradient_partitioning_data_structures(self):
total_partitions = dist.get_world_size(group=self.dp_process_group)
for i, param_group in enumerate(self.fp16_groups):
self.param_to_partition_ids[i] = {}
self.is_partition_reduced[i] = {}
self.total_grads_in_partition[i] = {}
self.remaining_grads_in_partition[i] = {}
self.is_grad_computed[i] = {}
self.grad_partition_insertion_offset[i] = {}
self.grad_start_offset[i] = {}
self.first_param_index_in_partition[i] = {}
for partition_id in range(total_partitions):
self.is_grad_computed[i][partition_id] = {}
self.grad_partition_insertion_offset[i][partition_id] = {}
self.grad_start_offset[i][partition_id] = {}
self.initialize_gradient_partition(i, param_group, partition_id)
self.is_partition_reduced[i][partition_id] = False
self.first_param_index_in_partition[i][partition_id] = self.get_first_param_index(
i, param_group, partition_id)
@instrument_w_nvtx
def independent_gradient_partition_epilogue(self):
self.report_ipg_memory_usage(f"In ipg_epilogue before reduce_ipg_grads", 0)
self.__reduce_and_partition_ipg_grads()
self.report_ipg_memory_usage(f"In ipg_epilogue after reduce_ipg_grads", 0)
self.reduce_and_partition_stream.synchronize()
#in case of cpu offload, averaged gradients are already in fp32_partitioned_groups_flat.grad
#TODO: use a similar code path for both cpu_offload and non-cpu offload
if not self.offload_optimizer:
for i, sub_group in enumerate(self.fp16_groups):
self.averaged_gradients[i] = [
self.__param_id_to_grad_partition[param.ds_id]
if param.requires_grad else torch.zeros_like(param.ds_tensor) for param in sub_group
]
# self.averaged_gradients[i] = self.get_flat_partition(
# self.fp16_groups[i],
# 0,
# self.fp32_partitioned_groups_flat[i].numel(),
# return_tensor_list=True)
# this method gets called after every backward. need to increment
# here because if it gets incremented in backward() the micro step
# id will be off by one when we do the reduce and partition at the.
# start of this method.
# TODO. make this less error prone
self.micro_step_id += 1
def overlapping_partition_gradients_reduce_epilogue(self):
self.independent_gradient_partition_epilogue()
def create_reduce_and_remove_grad_hooks(self):
print_rank_0(f'[Begin] Create gradient reduction hooks')
self.grad_accs = []
for i, param_group in enumerate(self.fp16_groups):
for param in param_group:
if param.requires_grad:
#print_rank_0(f" Before all gather {param.device}, {param.shape}")
print_rank_0(f"Before all gather {param.device}, {param.shape}", force=False)
# The hook must be created in un-partitioned parameter
param.all_gather()
#print(f"After all gather {param.device}, {param.shape}")
def wrapper(param, i):
param_tmp = param.expand_as(param)
grad_acc = param_tmp.grad_fn.next_functions[0][0]
@instrument_w_nvtx
def reduce_partition_and_remove_grads(*notneeded):
self.reduce_ready_partitions_and_remove_grads(param, i)
grad_acc.register_hook(reduce_partition_and_remove_grads)
self.grad_accs.append(grad_acc)
#print(f"param grad fn {param.expand_as(param).grad_fn}")
wrapper(param, i)
# Partition the parameter after creating the hook
param.partition()
print_rank_0(f'[End] Create gradient reduction hooks')
def get_param_id(self, param):
unique_id = id(param)
return self.param_id[unique_id]
def report_ipg_memory_usage(self, tag, param_elems):
elem_count = self.elements_in_ipg_bucket + param_elems
percent_of_bucket_size = (100.0 * elem_count) // self.reduce_bucket_size
see_memory_usage(
f"{tag}: elems in_bucket {self.elements_in_ipg_bucket} param {param_elems} max_percent {percent_of_bucket_size}",
force=False)
###############Independent Partition Gradient ########################
def reduce_independent_p_g_buckets_and_remove_grads(self, param, i):
#print_rank_0(f"Inside reduce ipg buckets. {debug_param2name_id_shape(param)}, ipg elements {self.elements_in_ipg_bucket}, reduce bucket size {self.reduce_bucket_size}", force=True)
# Because the ipg bucket is initialized with a random place holder tensor, we must
# explicitly check that the bucket has any real data in it (self.elements_in_ipg_bucket >
# 0). Otherwise if the incoming param.ds_numel is large, this branch may get triggered on a
# garbage data and `self.average_tensor()` will crash because its params_to_reduce will be
# empty, while reduction_list will have that garbage data.
if self.elements_in_ipg_bucket + param.ds_numel > self.reduce_bucket_size and self.elements_in_ipg_bucket > 0:
self.report_ipg_memory_usage("In ipg_remove_grads before reduce_ipg_grads", param.ds_numel)
self.__reduce_and_partition_ipg_grads()
self.__add_grad_to_ipg_bucket(param)
@instrument_w_nvtx
@torch.no_grad()
def __add_grad_to_ipg_bucket(self, param: Parameter) -> None:
self.reduce_and_partition_stream.wait_stream(get_accelerator().default_stream())
if self.contiguous_gradients and self.elements_in_ipg_bucket + param.grad.numel() < self.reduce_bucket_size:
# move the gradient to a contiguous buffer
with get_accelerator().stream(self.reduce_and_partition_stream):
# move the parameter's gradient to the contiguous flat buffer
new_grad_tensor = self.__ipg_bucket_flat_buffer.narrow(0, self.elements_in_ipg_bucket,
param.grad.numel()).view_as(param.grad)
new_grad_tensor.copy_(param.grad, non_blocking=True)
param.grad.record_stream(get_accelerator().current_stream())
param.grad.data = new_grad_tensor
self.params_in_ipg_bucket.append(param)
@instrument_w_nvtx
@torch.no_grad()
def __reduce_and_partition_ipg_grads(self, safe_mode: bool = False) -> None:
if not self.params_in_ipg_bucket:
return
for param in self.params_in_ipg_bucket:
if param.grad.numel() != param.ds_numel:
raise RuntimeError(f"{param.grad.numel()} != {param.ds_numel} Cannot reduce scatter "
f"gradients whose size is not same as the params")
assert len(set(p.ds_id for p in self.params_in_ipg_bucket)) == len(self.params_in_ipg_bucket)
while self.param_reduce_events and self.param_reduce_events[0].query():
self.param_reduce_events.popleft()
if len(self.param_reduce_events) > self.max_param_reduce_events:
self.param_reduce_events.popleft().synchronize()
with get_accelerator().stream(self.reduce_and_partition_stream):
if safe_mode:
assert_ints_same_as_other_ranks([p.ds_id for p in self.params_in_ipg_bucket])
if self.contiguous_gradients and self.elements_in_ipg_bucket <= self.reduce_bucket_size and not self.reduce_scatter:
grad_bucket = self.__ipg_bucket_flat_buffer.narrow(0, 0, self.elements_in_ipg_bucket)
grad_partitions = self.__avg_scatter_contiguous_grads(grad_bucket)
else:
self.params_in_ipg_bucket.sort(key=lambda p: p.ds_id)
grad_partitions = self.__avg_scatter_grads(self.params_in_ipg_bucket)
self.partition_grads(self.params_in_ipg_bucket, grad_partitions)
self.params_in_ipg_bucket.clear()
event = get_accelerator().Event()
event.record()
self.param_reduce_events.append(event)
@instrument_w_nvtx
def __avg_scatter_contiguous_grads(self, buffer_to_reduce: Tensor) -> List[Tensor]:
dtype = buffer_to_reduce.dtype
if self.communication_data_type == self.dtype:
buffer_to_reduce = buffer_to_reduce.to(self.communication_data_type)
if self.postscale_gradients and self.gradient_predivide_factor != 1.0:
buffer_to_reduce = buffer_to_reduce.div_(self.gradient_predivide_factor)
world_sz = dist.get_world_size(self.dp_process_group)
rank = dist.get_rank(self.dp_process_group)
buffer_to_reduce.div_(world_sz)
dist.all_reduce(buffer_to_reduce, group=self.dp_process_group)
if self.postscale_gradients and self.gradient_predivide_factor != world_sz:
buffer_to_reduce = buffer_to_reduce.mul(self.gradient_predivide_factor)
if self.communication_data_type != self.dtype:
buffer_to_reduce = buffer_to_reduce.to(self.dtype)
grad_partitions = []
grad_offset_in_buffer = 0
for param in self.params_in_ipg_bucket:
grad = param.grad
chunk_sz = math.ceil(grad.numel() / world_sz)
start_offset = grad_offset_in_buffer + min(rank * chunk_sz, grad.numel())
end_offset = grad_offset_in_buffer + min(rank * chunk_sz + chunk_sz, grad.numel())
partition = buffer_to_reduce[start_offset:end_offset]
if param.partition_numel() != partition.numel():
padded_partition = torch.zeros(param.partition_numel(), device=grad.device, dtype=grad.dtype)
if partition.numel() > 0:
padded_partition[:partition.numel()] = partition
grad_partitions.append(padded_partition)
else:
grad_partitions.append(partition)
grad_offset_in_buffer += grad.numel()
return grad_partitions
@instrument_w_nvtx
def __avg_scatter_grads(self, params_to_reduce: List[Parameter]) -> List[Tensor]:
"""average gradients and scatter partitions across ranks"""
full_grads_for_rank = [p.grad for p in params_to_reduce]
if self.communication_data_type != self.dtype:
full_grads_for_rank = [g.to(self.communication_data_type) for g in full_grads_for_rank]
if self.postscale_gradients and self.gradient_predivide_factor != 1.0:
full_grads_for_rank = [g.div(self.gradient_predivide_factor) for g in full_grads_for_rank]
local_world_size = get_accelerator().device_count()
global_world_size = dist.get_world_size()
num_nodes = global_world_size // local_world_size
if self.all2all_process_group is not None and num_nodes > 1:
grad_partitions_for_rank = all_to_all_quant_reduce(full_grads_for_rank, self.all2all_process_group)
else:
grad_partitions_for_rank = reduce_scatter_coalesced(full_grads_for_rank, self.dp_process_group)
if self.postscale_gradients and self.gradient_predivide_factor != 1.0 and self.gradient_predivide_factor != dist.get_world_size(
self.dp_process_group):
grad_partitions_for_rank = [g.mul(self.gradient_predivide_factor) for g in grad_partitions_for_rank]
if self.communication_data_type != self.dtype:
grad_partitions_for_rank = [g.to(self.dtype) for g in grad_partitions_for_rank]
return grad_partitions_for_rank
def set_grad_positions(self):
for i, group in enumerate(self.fp16_groups):
current_offset = 0
for param in group:
param_id = self.get_param_id(param)
num_elements = param.partition_numel()
self.grad_position[param_id] = [int(i), int(current_offset), int(num_elements)]
#print(f"param id {param_id} i:{i}, ds_tensor {num_elements} numel {param.numel()}")
current_offset += num_elements
see_memory_usage(f"After Set Grad positions", force=False)
def _constant_buffered_norm2(self, input, buffer_size=250000000):
norm = None
for part in input.view(-1).split(buffer_size):
if norm is None:
norm = part.data.double().norm(2)**2.0
else:
norm += part.data.double().norm(2)**2.0
return norm**0.5
def set_norm_for_param_grad_in_gpu(self, param):
param_id = self.get_param_id(param)
#self.norm_for_param_grads[param_id] = param.grad.data.double().norm(2)
#Using a more memory efficient version
self.norm_for_param_grads[param_id] = self._constant_buffered_norm2(param.grad)
def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param, fp32_grad_tensor):
with get_accelerator().stream(self.copy_grad_stream):
param_id = self.get_param_id(param)
src_tensor = param.grad.view(-1).float()
#print(f"src_tensor {src_tensor.size()} and fp32 grad {fp32_grad_tensor.size()}")
fp32_grad_tensor.copy_(src_tensor, non_blocking=True)
param.grad = None
def complete_grad_norm_calculation_for_cpu_offload(self, params):
total_norm = 0.0
norm_type = 2.0
for p in params:
if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
param_id = self.get_param_id(p)
if param_id in self.norm_for_param_grads.keys():
param_norm = self.norm_for_param_grads[param_id]
total_norm += param_norm.item()**2
# Sum across all model parallel GPUs.
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group)
self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM)
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
@instrument_w_nvtx
def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None:
offload_fp32_gradients = {}
offload_fp32_offsets = {}
buffers = []
for param, grad_partition in zip(params_to_release, grad_partitions):
contains_real_data = param.partition_numel() * dist.get_rank(self.dp_process_group) < param.ds_numel
if not contains_real_data:
# this grad partition is empty - don't need to do anything
param.grad = None
continue
# move or accumulate gradient partition to target buffer
grad_buffer = self.__param_id_to_grad_partition[param.ds_id].narrow(0, 0, grad_partition.numel())
buffers.append(grad_buffer)
if self.micro_step_id == 0: # don't accumulate
grad_buffer.copy_(grad_partition, non_blocking=True)
# ensure grad buffer is a CUDA buffer to speed up the next few
# operations and so it can be used asynchronously
grad_buffer = grad_buffer.to(grad_partition.device, non_blocking=True)
elif get_accelerator().on_accelerator(grad_buffer):
grad_buffer.add_(grad_partition)
else:
# if dst is CPU, copy first to src device, do the addition
# there, then move back to dst. adding directly to cpu is very slow
cuda_grad_buffer = grad_buffer.to(grad_partition.device, non_blocking=True)
cuda_grad_buffer.add_(grad_partition)
grad_buffer.copy_(cuda_grad_buffer, non_blocking=True)
# ensure grad buffer is a CUDA buffer to speed up the next few
# operations and so it can be used asynchronously
grad_buffer = cuda_grad_buffer
# offload the gradient partition if applicable
if self.offload_optimizer:
i, dest_offset, _ = self.grad_position[self.get_param_id(param)]
offload_fp32_gradients = {}
offload_fp32_offsets = {}
if self.is_gradient_accumulation_boundary:
self.norm_for_param_grads[self.get_param_id(param)] = self._constant_buffered_norm2(grad_buffer)
if self._swappable_optimizer_subgroup(i):
if not i in offload_fp32_gradients.keys():
offload_fp32_gradients[i] = []
offload_fp32_offsets[i] = []
offload_fp32_gradients[i].append(grad_buffer.float())
offload_fp32_offsets[i].append(dest_offset)
else:
fp32_grad_tensor = self.fp32_partitioned_groups_flat[i].grad.narrow(
0, dest_offset, grad_buffer.numel())
fp32_grad_tensor.copy_(grad_buffer)
# free the gradient
param.grad.record_stream(get_accelerator().current_stream())
param.grad = None
if self.offload_optimizer and self.swap_optimizer:
for i in offload_fp32_gradients.keys():
self.optimizer_swapper.swap_out_gradients(parameter=self.fp32_partitioned_groups_flat[i],
gradient_offsets=offload_fp32_offsets[i],
gradient_tensors=offload_fp32_gradients[i])
return buffers
def reduce_ready_partitions_and_remove_grads(self, param, i):
#print_rank_0(f"Backward {debug_param2name_id_shape(param)}", force=True)
self.reduce_independent_p_g_buckets_and_remove_grads(param, i)
def zero_reduced_gradients(self, partition_id, i):
def are_all_related_partitions_reduced(params_id):
for partition_id in self.param_to_partition_ids[i][params_id]:
if not self.is_partition_reduced[i][partition_id]:
return False
return True
for params_id in self.is_grad_computed[i][partition_id]:
if are_all_related_partitions_reduced(params_id):
self.param_dict[params_id].grad = None
def flatten_and_print(self, message, tensors, start=0, n=5):
flatten_tensor = self.flatten(tensors)
def print_func():
logger.info(flatten_tensor.contiguous().view(-1).narrow(0, start, n))
self.sequential_execution(print_func, message)
def get_grads_to_reduce(self, i, partition_id):
def get_reducible_portion(key):
grad = self.param_dict[key].grad
total_elements = grad.numel()
start = self.grad_start_offset[i][partition_id][key]
num_elements = min(total_elements - start,
self.partition_size[i] - self.grad_partition_insertion_offset[i][partition_id][key])
if not pg_correctness_test:
if num_elements == total_elements:
return grad
else:
return grad.contiguous().view(-1).narrow(0, int(start), int(num_elements))
else:
if num_elements == total_elements:
return grad.clone()
else:
return grad.clone().contiguous().view(-1).narrow(0, int(start), int(num_elements))
grads_to_reduce = []
for key in self.is_grad_computed[i][partition_id]:
grad = get_reducible_portion(key)
grads_to_reduce.append(grad)
return grads_to_reduce
def sequential_execution(self, function, message, group=None):
if group is None:
group = self.dp_process_group
if dist.get_rank(group=group) == 0:
logger.info(message)
for id in range(dist.get_world_size(group=group)):
if id == dist.get_rank(group=group):
function()
dist.barrier(group=group)
def set_none_gradients_to_zero(self, i, partition_id):
for param_id in self.is_grad_computed[i][partition_id]:
param = self.param_dict[param_id]
if param.grad is None:
param.grad = torch.zero_like(param)
######################Reduction Related Methods##############################
def allreduce_bucket(self, bucket, rank=None, log=None):
rank = None
tensor = self.flatten(bucket)
tensor_to_allreduce = tensor
if pg_correctness_test:
communication_data_type = torch.float32
else:
communication_data_type = self.communication_data_type
if communication_data_type != tensor.dtype:
tensor_to_allreduce = tensor.to(communication_data_type)
tensor_to_allreduce.div_(dist.get_world_size(group=self.dp_process_group))
if rank is None:
# "All Reducing"
dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
else:
global_rank = dist.get_global_rank(self.dp_process_group, rank)
dist.reduce(tensor_to_allreduce, global_rank, group=self.dp_process_group)
if communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce:
if rank is None or rank == dist.get_rank(group=self.dp_process_group):
tensor.copy_(tensor_to_allreduce)
return tensor
# if rank is specified do a reduction instead of an allreduce
def allreduce_and_copy(self, small_bucket, rank=None, log=None):
with get_accelerator().stream(self.reduction_stream):
allreduced = self.allreduce_bucket(small_bucket, rank=rank, log=log)
if rank is None or rank == dist.get_rank(group=self.dp_process_group):
for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):
buf.copy_(synced)
def allreduce_no_retain(self, bucket, numel_per_bucket=500000000, rank=None, log=None):
small_bucket = []
numel = 0
for tensor in bucket:
small_bucket.append(tensor)
numel = numel + tensor.numel()
if numel > numel_per_bucket:
self.allreduce_and_copy(small_bucket, rank=rank, log=None)
small_bucket = []
if len(small_bucket) > 0:
self.allreduce_and_copy(small_bucket, rank=rank, log=log)
#############################################################################
#############################################################################
#############################################################################
# views the tensor as multiple partitions and returns
# those partitions
def get_data_parallel_partitions(self, tensor):
partitions = []
dp = dist.get_world_size(group=self.dp_process_group)
dp_id = dist.get_rank(group=self.dp_process_group)
total_num_elements = tensor.numel()
base_size = total_num_elements // dp
remaining = total_num_elements % dp
start = 0
for id in range(dp):
partition_size = base_size
if id < remaining:
partition_size = partition_size + 1
partitions.append(tensor.narrow(0, start, partition_size))
start = start + partition_size
return partitions
def get_partition_info(self, tensor_list, partition_size, partition_id):
params_in_partition = []
params_not_in_partition = []
start_index = partition_size * partition_id
end_index = partition_size * (partition_id + 1)
current_index = 0
first_offset = 0
for tensor in tensor_list:
tensor_size = tensor.numel()
if (current_index >= start_index and current_index < end_index):
params_in_partition.append(tensor)
elif start_index > current_index and start_index < (current_index + tensor_size):
params_in_partition.append(tensor)
assert (first_offset == 0
), "This can happen either zero or only once as this must be the first tensor in the partition"
first_offset = start_index - current_index
else:
params_not_in_partition.append(tensor)
current_index = current_index + tensor_size
return params_in_partition, params_not_in_partition, first_offset
@instrument_w_nvtx
def zero_grad(self, set_to_none=False):
"""
Zero FP16 parameter grads.
"""
self.micro_step_id = 0
# FP32 grad should never exist.
# For speed, set model fp16 grad to None by default
for group in self.fp16_groups:
for p in group:
if set_to_none:
if p.grad is not None and get_accelerator().on_accelerator(p.grad):
p.grad.record_stream(get_accelerator().current_stream())
p.grad = None
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def _model_parallel_all_reduce(self, tensor, op):
""" Perform all reduce within model parallel group, if any.
"""
if self.model_parallel_group is None:
pass
else:
dist.all_reduce(tensor=tensor, op=op, group=self.model_parallel_group)
@instrument_w_nvtx
def get_grad_norm_direct(self, gradients, params, norm_type=2):
"""Clips gradient norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(g.data.abs().max() for g in gradients)
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=self.dp_process_group)
# Take max across all GPUs.
self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.MAX)
total_norm = total_norm_cuda[0].item()
else:
# if dist.get_rank() == 0:
# logger.info(f"Total Norm beginning {total_norm}")
grad_norms = []
for g, p in zip(gradients, params):
if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
grad_norms.append(g.to(get_accelerator().device_name(), non_blocking=True).double().norm(2))
# Sum across all model parallel GPUs.
if len(grad_norms) == 0:
# FIX https://github.com/microsoft/DeepSpeed/issues/3564
total_norm_cuda = torch.tensor(0,
dtype=gradients[0].dtype).to(get_accelerator().device_name()).double()
else:
total_norm_cuda = torch.sum(torch.pow(torch.stack(grad_norms), 2))
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group)
self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM)
total_norm = total_norm_cuda.item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
# creates a flat fused tensor from the tensor list starting at the first_offset
# in the first tensor of the list. If there are not enough elements in the tensor
# list then the flat tensor will be padded with zeros
def get_flat_partition(self, tensor_list, first_offset, partition_size, return_tensor_list=False):
flat_tensor_list = []
current_size = 0
for i, tensor in enumerate(tensor_list):
if tensor.grad is None:
tensor.grad = torch.zeros_like(tensor)
tensor = tensor.grad
num_elements = tensor.numel()
tensor_offset = 0
# we need to offset to get to the right element
if i == 0 and first_offset > 0:
tensor_offset = first_offset
num_elements = num_elements - tensor_offset
# we dont need all elements of the tensor
if num_elements > (partition_size - current_size):
num_elements = partition_size - current_size
# we need a narrow view of the tensor based on the tensor offset and number of elements that
# we need from this tensor
if tensor_offset > 0 or num_elements < tensor.numel():
flat_tensor_list.append(tensor.contiguous().view(-1).narrow(0, int(tensor_offset), int(num_elements)))
else:
flat_tensor_list.append(tensor)
current_size = current_size + num_elements
# this means its the last partition and does not align with the dp boundary. We need to pad before flattening
if current_size < partition_size:
flat_tensor_list.append(
torch.zeros(int(partition_size - current_size),
dtype=tensor_list[0].dtype,
device=tensor_list[0].device))
if return_tensor_list:
return flat_tensor_list
return self.flatten(flat_tensor_list)
def free_grad_in_param_list(self, param_list):
for p in param_list:
p.grad = None
def reset_cpu_buffers(self):
self.norm_for_param_grads = {}
def log_timers(self, timer_names):
if self.timers is None:
return
self.timers.log(names=list(timer_names))
def start_timers(self, timer_names):
if self.timers is None:
return
for name in timer_names:
self.timers(name).start()
def stop_timers(self, timer_names):
if self.timers is None:
return
for name in timer_names:
self.timers(name).stop()
def _pre_step(self):
self.micro_step_id = 0
print_rank_0(f"Inside Step function")
see_memory_usage(f"In step before checking overflow", force=False)
print_rank_0("Finished Tracing at Beginning of Step")
self._get_param_coordinator(training=True).hierarchy = 0
print_rank_0("Finished Tracing at Beginning of Step")
@instrument_w_nvtx
def _get_norm_groups(self):
norm_groups = []
for i, group in enumerate(self.fp16_groups):
if self.offload_optimizer:
norm_groups.append(self.complete_grad_norm_calculation_for_cpu_offload(self.fp16_groups[i]))
else:
norm_groups.append(self.get_grad_norm_direct(self.averaged_gradients[i], self.fp16_groups[i]))
return norm_groups
@instrument_w_nvtx
def _prepare_fp32_grad_for_sub_group(self, sub_group_id):
partition_id = dist.get_rank(group=self.dp_process_group)
single_grad_partition = self.flatten(self.averaged_gradients[sub_group_id]).to(
self.fp32_partitioned_groups_flat[sub_group_id].dtype)
assert single_grad_partition.numel() == self.fp32_partitioned_groups_flat[sub_group_id].numel(), \
"averaged gradients have different number of elements that partition size {} {} {} {}".format(
single_grad_partition.numel(), self.fp32_partitioned_groups_flat[sub_group_id].numel(), sub_group_id, partition_id)
self.fp32_partitioned_groups_flat[sub_group_id].grad = single_grad_partition
# release all the gradient since we have already created a necessary copy in dp_grad_partition
self.zero_grad(set_to_none=True)
for grad in filter(lambda g: get_accelerator().on_accelerator(g), self.averaged_gradients[sub_group_id]):
grad.record_stream(get_accelerator().current_stream())
self.averaged_gradients[sub_group_id] = None
@instrument_w_nvtx
def _prepare_sub_group(self, sub_group_id, timer_names=set()):
see_memory_usage(f'Before prepare optimizer sub group {sub_group_id}', force=False)
if self._swappable_optimizer_subgroup(sub_group_id):
self._optimizer_states_and_gradient_swap_in(sub_group_id, timer_names)
elif not self.offload_optimizer:
self._prepare_fp32_grad_for_sub_group(sub_group_id)
see_memory_usage(f'After prepare optimizer sub group {sub_group_id}', force=False)
def _optimizer_states_and_gradient_swap_in(self, sub_group_id, timer_names=set()):
param_length = self.fp16_partitioned_groups_flat_numel[sub_group_id]
fp32_param_id = id(self.fp32_partitioned_groups_flat[sub_group_id])
assert self._swappable_optimizer_subgroup(sub_group_id), \
f'Parameter {fp32_param_id} of numel={param_length} is not swappable'
OPTIMIZER_SWAP_IN_STATE = 'optimizer_swap_in_state'
see_memory_usage(f'pre-step Before swapping in optimizer tensors {sub_group_id}', force=False)
self.start_timers([OPTIMIZER_SWAP_IN_STATE])
self.optimizer_swapper.swap_in_optimizer_state(
parameter=self.fp32_partitioned_groups_flat[sub_group_id],
async_parameter=self.next_swappable_fp32_partitioned_groups[sub_group_id])
self.stop_timers([OPTIMIZER_SWAP_IN_STATE])
timer_names.add(OPTIMIZER_SWAP_IN_STATE)
see_memory_usage(f'pre-step After swapping in optimizer tensors {sub_group_id}', force=False)
@instrument_w_nvtx
def _release_sub_group(self, sub_group_id, timer_names=set()):
see_memory_usage(f'Before release optimizer sub group {sub_group_id}', force=False)
# get rid of the fp32 gradients. Not needed anymore
if not self.offload_optimizer:
self.fp32_partitioned_groups_flat[sub_group_id].grad = None
if self._swappable_optimizer_subgroup(sub_group_id):
self._optimizer_states_and_gradient_swap_out(sub_group_id, timer_names)
see_memory_usage(f'After release optimizer sub group {sub_group_id}', force=False)
# create a flat tensor aligned at the alignment boundary
@instrument_w_nvtx
def flatten_dense_tensors_aligned(self, tensor_list, alignment):
num_elements = 0
for tens in tensor_list:
num_elements = num_elements + tens.numel()
remaining = num_elements % alignment
if remaining:
elements_to_add = alignment - remaining
pad_tensor = torch.zeros(elements_to_add, device=tensor_list[0].device, dtype=tensor_list[0].dtype)
padded_tensor_list = tensor_list + [pad_tensor]
num_elements = num_elements + elements_to_add
else:
padded_tensor_list = tensor_list
return self.flatten(padded_tensor_list)
def _optimizer_states_and_gradient_swap_out(self, sub_group_id, timer_names=set()):
param_length = self.fp16_partitioned_groups_flat_numel[sub_group_id]
fp32_param_id = id(self.fp32_partitioned_groups_flat[sub_group_id])
assert self._swappable_optimizer_subgroup(sub_group_id), \
f'Parameter {fp32_param_id} of numel={param_length} is not swappable'
OPTIMIZER_SWAP_OUT_STATE = 'optimizer_swap_out_state'
see_memory_usage(f'post-step Before swapping out optimizer tensors {sub_group_id}', force=False)
self.start_timers([OPTIMIZER_SWAP_OUT_STATE])
self.optimizer_swapper.swap_out_optimizer_state(
parameter=self.fp32_partitioned_groups_flat[sub_group_id],
async_swap=self.next_swappable_fp32_partitioned_groups[sub_group_id] is not None)
self.stop_timers([OPTIMIZER_SWAP_OUT_STATE])
see_memory_usage(f'post-step After swapping out optimizer tensors {sub_group_id}', force=False)
timer_names.add(OPTIMIZER_SWAP_OUT_STATE)
# get rid of the fp32 gradients. Not needed anymore
self.fp32_partitioned_groups_flat[sub_group_id].grad = None
def _unflatten_partitioned_parameters(self, sub_group_id):
updated_params = self.unflatten(self.fp16_partitioned_groups_flat[sub_group_id],
self.fp16_partitioned_groups[sub_group_id])
for partitioned_param, q in zip(self.fp16_partitioned_groups[sub_group_id], updated_params):
partitioned_param.data = q.data
def _overflow_clean_up(self, prev_scale):
see_memory_usage('After overflow before clearing gradients', force=False)
self.zero_grad(set_to_none=True)
if self.offload_optimizer:
self.reset_cpu_buffers()
else:
self.averaged_gradients = {}
see_memory_usage('After overflow after clearing gradients', force=False)
@instrument_w_nvtx
def _overflow_check_and_loss_scale_update(self):
# First compute norm for all group so we know if there is overflow
self.check_overflow()
#loss scaling related computation
prev_scale = self.loss_scale
self._update_scale(self.overflow)
if self.overflow:
self._overflow_clean_up(prev_scale)
return self.overflow
@instrument_w_nvtx
def _post_step(self, timer_names=set()):
if self.offload_optimizer:
self.reset_cpu_buffers()
#Gathering persisting parameters
if len(self.persistent_parameters) > 0:
self.persistent_parameters[0].all_gather(self.persistent_parameters)
if self.swap_optimizer:
self.optimizer_swapper.log_timers()
self.invalidate_secondary_tensor()
self.log_timers(timer_names)
see_memory_usage('After zero_optimizer step', force=False)
print_rank_0(f"------------------Finishing Step-----------------------")
@instrument_w_nvtx
def _reassign_or_swap_out_partitioned_parameters(self, sub_group_id):
if self.fp16_partitioned_groups_flat[sub_group_id] is not None:
self.fp16_partitioned_groups_flat[sub_group_id].data.copy_(
self.fp32_partitioned_groups_flat[sub_group_id].data)
#unflatten fp16 parameter subgroup
self._unflatten_partitioned_parameters(sub_group_id)
else:
self._partitioned_params_swap_out(sub_group_id)
def override_loss_scale(self, loss_scale):
if loss_scale != self.external_loss_scale:
logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}')
self.custom_loss_scaler = True
self.external_loss_scale = loss_scale
@instrument_w_nvtx
def step(self, closure=None):
"""
Not supporting closure.
"""
self._pre_step()
self._partition_all_parameters()
#checks for overflow, adjust the loss scale accordingly
if self._overflow_check_and_loss_scale_update():
if self.swap_optimizer:
self.optimizer_swapper.log_timers()
return
norm_groups = self._get_norm_groups()
scaled_global_grad_norm = get_global_norm(norm_list=norm_groups)
# Stash unscaled gradient norm
self._global_grad_norm = scaled_global_grad_norm / self.loss_scale
timer_names = set()
timer_names.add('optimizer_step')
self.start_timers(['optimizer_step'])
#update parameters one sub group at a time
for sub_group_id, group in enumerate(self.fp16_groups):
#prepare optimizer states, gradients and fp32 parameters for update
self._prepare_sub_group(sub_group_id, timer_names)
#scale the fp32 gradients
self.unscale_and_clip_grads(sub_group_id, scaled_global_grad_norm)
#apply the optimizer step on the sub group and copy fp32 parameters to fp16
self._optimizer_step(sub_group_id)
#put fp16 parameters in appropriate location
self._reassign_or_swap_out_partitioned_parameters(sub_group_id)
#release memory or swap out optimizer states of fp32 parameters
self._release_sub_group(sub_group_id, timer_names)
self.stop_timers(['optimizer_step'])
self._post_step(timer_names)
# warn user about caching allocator flushes
memory_stats = get_accelerator().memory_stats()
alloc_retries = memory_stats["num_alloc_retries"] if memory_stats is not None else 0
if alloc_retries > self.n_caching_allocator_flushes:
if dist.get_rank() == 0:
logger.warning(
"%d pytorch allocator cache flushes since last step. this happens "
"when there is high memory pressure and is detrimental to "
"performance. if this is happening frequently consider adjusting "
"settings to reduce memory consumption. If you are unable to "
"make the cache flushes go away consider adding "
"get_accelerator().empty_cache() calls in your training loop to ensure "
"that all ranks flush their caches at the same time",
alloc_retries - self.n_caching_allocator_flushes)
self.n_caching_allocator_flushes = alloc_retries
def dump_pre_step_gradients(self, debug_fp32_grads):
# Dump gradient norms for debugging
for i, _ in enumerate(self.fp16_groups):
print(f'Pre-Step Dump Norms for Group {i} FP16P, FP16G, FP32G, FP32GUC')
for fp16_param, fp32_grad in zip(self.fp16_groups[i], debug_fp32_grads[i]):
param_id = self.get_param_id(fp16_param)
fp16_grad_norm = self.debug_fp16_grads[i][param_id]
fp32_grad_norm = [float(t.data.float().norm(2)) for t in fp32_grad]
norm_list = [fp16_grad_norm, fp32_grad_norm]
print(f'Pre-Step Norms {i} {param_id} = {norm_list}')
def dump_post_step_gradients(self):
# Dump gradient norms for debugging
for i, group in enumerate(self.fp16_groups):
print(f'Post-Step Dump Norms for Group {i} FP16P, FP16DS, FP16FLAT, FP32FLAT')
unflat_fp16 = self.unflatten(self.fp16_groups_flat[i], self.fp16_groups[i])
unflat_fp32 = self.unflatten(self.fp32_partitioned_groups_flat[i], self.fp16_groups[i])
for j, p in enumerate(self.fp16_groups[i]):
param_id = self.get_param_id(p)
param_norm = float(p.data.float().norm(2))
ds_norm = float(p.ds_tensor.data.float().norm(2))
unflat_norm = [float(t.data.float().norm(2)) for t in [unflat_fp16[j], unflat_fp32[j]]]
norm_list = [param_norm, ds_norm] + unflat_norm
print(f'Post-Step Norms {i} {param_id} = {norm_list}')
@instrument_w_nvtx
def unscale_and_clip_grads(self, sub_group_id, total_norm):
# compute combined scale factor for this group
combined_scale = self.loss_scale
if self.clip_grad > 0.:
# norm is in fact norm*scale
clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad
if clip > 1:
combined_scale = clip * self.loss_scale
self.fp32_partitioned_groups_flat[sub_group_id].grad.mul_(1. / combined_scale)
def _check_overflow(self, partition_gradients=True):
self.overflow = self.has_overflow(partition_gradients)
# `params` is a list / generator of torch.Variable
def has_overflow_serial(self, params, is_grad_list=False):
for p in params:
if p.grad is not None and self._has_inf_or_nan(p.grad.data):
return True
return False
def has_overflow_partitioned_grads_serial(self):
for i in range(len(self.fp16_groups)):
for j, grad in enumerate(self.averaged_gradients[i]):
if grad is not None and self._has_inf_or_nan(grad.data, j):
return True
return False
@instrument_w_nvtx
def has_overflow(self, partition_gradients=True):
if partition_gradients:
with get_accelerator().stream(self.reduce_and_partition_stream):
if hasattr(self.inf_or_nan_tracker, "logical_or_"):
self.inf_or_nan_tracker.logical_or_(torch.isinf(self.grad_partitions_flat_buffer).any())
self.inf_or_nan_tracker.logical_or_(torch.isnan(self.grad_partitions_flat_buffer).any())
else:
# logical_or_ not available in older versions of pytorch
self.inf_or_nan_tracker += torch.isinf(self.grad_partitions_flat_buffer).any()
self.inf_or_nan_tracker += torch.isnan(self.grad_partitions_flat_buffer).any()
self.inf_or_nan_tracker = self.inf_or_nan_tracker > 0
overflow_gpu = self.inf_or_nan_tracker.clone().to(torch.uint8)
self.inf_or_nan_tracker.zero_()
get_accelerator().default_stream().wait_stream(self.reduce_and_partition_stream)
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.dp_process_group)
else:
params = []
for group in self.fp16_groups:
for param in group:
params.append(param)
overflow = self.has_overflow_serial(params, is_grad_list=partition_gradients)
overflow_gpu = get_accelerator().ByteTensor([overflow])
# Since each model parallel GPU carries only part of the model,
# make sure overflow flag is synced across all the model parallel GPUs
self._model_parallel_all_reduce(tensor=overflow_gpu, op=dist.ReduceOp.MAX)
overflow = overflow_gpu[0].item()
return bool(overflow)
# `x` is a torch.Tensor
@staticmethod
def _has_inf_or_nan(x, j=None):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
@instrument_w_nvtx
def backward(self, loss, retain_graph=False):
"""
:attr:`backward` performs the following steps:
1. fp32_loss = loss.float()
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
"""
if self.swap_optimizer:
self.optimizer_swapper.pre_backward()
see_memory_usage(f"Before backward", force=False)
if self.custom_loss_scaler:
scaled_loss = self.external_loss_scale * loss
scaled_loss.backward()
else:
self.loss_scaler.backward(loss.float(), retain_graph=retain_graph)
self._get_param_coordinator(training=True).reset_step()
if self.swap_optimizer:
self.optimizer_swapper.post_backward()
def get_fp32_grad_partitions(self) -> Dict[int, Dict[int, Tensor]]:
"""get fp32 gradient partition dictionary
accessed as grad_dict[parameter_group_index][parameter_index]
"""
self.reduce_and_partition_stream.synchronize()
grad_dict = collections.defaultdict(dict)
if self.offload_optimizer:
for group in self.fp16_groups:
for param_idx, param in enumerate(group):
group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)]
fp32_grad = self.fp32_partitioned_groups_flat[group_idx].grad.narrow(0, dest_offset, num_elements)
grad_dict[group_idx][param_idx] = fp32_grad
else:
for group_idx, group in self.averaged_gradients.items():
for param_idx, gradient in enumerate(group):
grad_dict[group_idx][param_idx] = gradient.float()
return grad_dict
def _fp32_state_allgather(self, param, fp32_state):
reduce_buffer = torch.zeros(self.partition_count * fp32_state.numel(),
dtype=torch.float32,
device=param.device).flatten()
my_rank = dist.get_rank(group=self.dp_process_group)
partitions = [
reduce_buffer.narrow(0,
fp32_state.numel() * i, fp32_state.numel()) for i in range(self.partition_count)
]
partitions[my_rank].data.copy_(fp32_state.data, non_blocking=False)
dist.all_gather(partitions, partitions[my_rank], group=self.dp_process_group)
return reduce_buffer.narrow(0, 0, param.ds_numel).view(param.ds_shape)
def get_fp32_grad_for_param(self, param) -> Tensor:
if not param.requires_grad:
return None
self.reduce_and_partition_stream.synchronize()
if self.offload_optimizer:
group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)]
fp32_grad = self.fp32_partitioned_groups_flat[group_idx].grad.narrow(0, dest_offset,
num_elements).to(device=param.device)
else:
fp32_grad = self.__param_id_to_grad_partition[param.ds_id].float()
return self._fp32_state_allgather(param, fp32_grad)
def get_full_hp_param(self, param, optim_state_key=None) -> Tensor:
if not param.requires_grad:
return None
self.reduce_and_partition_stream.synchronize()
group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)]
if self._swappable_optimizer_subgroup(group_idx):
self._optimizer_states_and_gradient_swap_in(group_idx)
fp32_param = self.fp32_partitioned_groups_flat[group_idx]
if optim_state_key is None:
fp32_opt_state = fp32_param.narrow(0, dest_offset, num_elements).to(device=param.device)
else:
fp32_opt_state = self.optimizer.state[fp32_param][optim_state_key].narrow(
0, dest_offset, num_elements).to(device=param.device)
hp_param = self._fp32_state_allgather(param, fp32_opt_state)
if self._swappable_optimizer_subgroup(group_idx):
self._optimizer_states_and_gradient_swap_out(group_idx)
return hp_param
@instrument_w_nvtx
def _partition_all_parameters(self):
self.parameter_offload.partition_all_parameters()
def check_overflow(self, partition_gradients=True):
self._check_overflow(partition_gradients)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
self.trainable_param_groups = self._get_trainable_parameter_groups()
param_groups = property(_get_param_groups, _set_param_groups)
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
if self.custom_loss_scaler:
return self.external_loss_scale
else:
return self.loss_scaler.cur_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
cur_scale = property(_get_loss_scale, _set_loss_scale)
def _get_lean_tensors(self, padded_flattened_tensor, group_tensors, paddings):
# Remove paddings from flattened tensor
individual_tensors = self.unflatten(padded_flattened_tensor, group_tensors)
lean_lengths = [t.numel() - pad for t, pad in zip(group_tensors, paddings)]
lean_tensors = [t[:len] for t, len in zip(individual_tensors, lean_lengths)]
#logger.info(f'rank {dist.get_rank()}: lean_tensors = {[t.numel() for t in lean_tensors]}')
return lean_tensors
#TODO REVISIT this for stage 3
def get_lean_optimizer_state(self):
# Return optimizer states after removing paddings.
# This method assumes that each param group contains a single flattened tensor.
optimizer_groups_state = []
for i, group in enumerate(self.optimizer.param_groups):
p = group['params'][0]
lean_state = {}
for key, value in self.optimizer.state[p].items():
if torch.is_tensor(value):
padded_lens = [t.numel() for t in self.fp16_partitioned_groups[i]]
lean_state[key] = self._get_lean_tensors(value, self.fp16_partitioned_groups[i],
self.groups_padding[i])
lean_flat_len = sum([t.numel() for t in lean_state[key]])
else:
lean_state[key] = value
optimizer_groups_state.append(lean_state)
return optimizer_groups_state
def get_groups_without_padding(self, groups_with_padding):
# Return group tensor after removing paddings added for alignment to DP world size.
groups_without_padding = []
for i, group in enumerate(groups_with_padding):
lean_group = self._get_lean_tensors(group, self.fp16_partitioned_groups[i], self.groups_padding[i])
groups_without_padding.append(lean_group)
return groups_without_padding
def _set_fp32_optimizer_param_groups(self):
for sub_group_id, _ in enumerate(self.fp16_groups):
param_group_id = self.sub_group_to_group_id[sub_group_id]
self.optimizer.param_groups[param_group_id]['params'].append(
self.fp32_partitioned_groups_flat[sub_group_id])
def _clear_fp32_optimizer_param_groups(self):
for param_group in self.optimizer.param_groups:
param_group['params'] = []
def _rigid_state_dict(self):
state_dict = {}
state_dict[ZERO_STAGE] = ZeroStageEnum.weights
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict[PARTITION_COUNT] = self.partition_count
self._set_fp32_optimizer_param_groups()
state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict()
state_dict[FP32_FLAT_GROUPS] = self.fp32_partitioned_groups_flat
self._clear_fp32_optimizer_param_groups()
return state_dict
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
if self.elastic_checkpoint:
raise NotImplementedError("ZeRO-3 does not yet support elastic checkpointing, please disable for now.")
if self.swap_optimizer or self.params_in_nvme_and_cpu:
raise NotImplementedError(
"ZeRO-3 does not yet support checkpointing with NVMe offloading, please disable for now.")
return self._rigid_state_dict()
# Restore base optimizer fp32 weights from checkpoint by:
# 1) Merging fp32 weights from checkpoints of all partitions
# 2) Extracting fp32 weights for current partition from merged weights
# 3) Using extracted weights to update base optimizer weights directly.
def _restore_from_fp32_weights(self, all_state_dict):
flat_local_partition = []
for i in range(len(self.fp32_partitioned_groups_flat)):
merged_partitions = [sd['fp32_groups'][i] for sd in all_state_dict]
flat_local_partition.append(self._get_flattened_partition(merged_partitions))
for current, saved in zip(self.fp32_partitioned_groups_flat, flat_local_partition):
current.data.copy_(saved.data)
# Restore base optimizer fp32 weights from ZeRO fp16 weights
def _restore_from_bit16_weights(self):
for fp16_partitions, fp32_partition in zip(self.fp16_partitioned_groups_flat,
self.fp32_partitioned_groups_flat):
fp32_partition.data.copy_(fp16_partitions.data)
# Refresh the fp32 master params from the fp16 copies.
def refresh_fp32_params(self):
self._restore_from_bit16_weights()
# Extract flattened partition for current rank from all partitions
def _get_flattened_partition(self, all_partition_states):
partition_id = dist.get_rank(group=self.dp_process_group)
alignment = dist.get_world_size(group=self.dp_process_group)
param_partitions = [[] for _ in range(len(all_partition_states[0]))]
for i, partition in enumerate(all_partition_states):
for j, param in enumerate(partition):
param_partitions[j].append(param)
local_state_partitions = []
for param_index, param_slices in enumerate(param_partitions):
flattened_merged_tensor = self.flatten_dense_tensors_aligned(param_slices, alignment)
new_partitions = self.get_data_parallel_partitions(flattened_merged_tensor)
local_state_partitions.append(new_partitions[partition_id])
if torch.is_tensor(local_state_partitions[0]):
return self.flatten_dense_tensors_aligned(local_state_partitions, alignment)
# Assume non-tensor states are not partitioned and equal across ranks, so return first one
return local_state_partitions[0]
# Restore base optimizer state from checkpoint by
# 1) Merging optimizer state from checkpoints of all partitions
# 2) Extracting optimizer state for current partition from the merged state
# 3) Using the extracted value to directly update the base optimizer.
def _restore_base_optimizer_state(self, all_state_dict):
base_optimizer_group_states = []
for i in range(len(self.optimizer.param_groups)):
partition_states = {}
all_partition_group_states = [sd['base_optimizer_state'][i] for sd in all_state_dict]
for key in all_partition_group_states[0].keys():
all_partition_states = [all_states[key] for all_states in all_partition_group_states]
partition_states[key] = self._get_flattened_partition(all_partition_states)
base_optimizer_group_states.append(partition_states)
for i, group in enumerate(self.optimizer.param_groups):
p = group['params'][0]
for key, saved in base_optimizer_group_states[i].items():
if torch.is_tensor(self.optimizer.state[p][key]):
self.optimizer.state[p][key].data.copy_(saved.data)
else:
self.optimizer.state[p][key] = saved
def _rigid_load_state_dict(self, state_dict, load_optimizer_states=True):
# I think it should actually be ok to reload the optimizer before the model.
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
if load_optimizer_states:
self._set_fp32_optimizer_param_groups()
self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT])
self._clear_fp32_optimizer_param_groups()
# restore fp32 partitions
for curr_param, saved_param in zip(self.fp32_partitioned_groups_flat, state_dict[FP32_FLAT_GROUPS]):
curr_param.data.copy_(saved_param.data)
# restore fp16 partitions from fp32
for sub_group_id in range(len(self.fp32_partitioned_groups_flat)):
fp32_param = self.fp32_partitioned_groups_flat[sub_group_id]
fp16_param = self.fp16_partitioned_groups_flat[sub_group_id]
fp16_param.data.copy_(fp32_param.data)
# update fp16 unflattened params
for sub_group_id in range(len(self.fp16_partitioned_groups_flat)):
updated_params = self.unflatten(self.fp16_partitioned_groups_flat[sub_group_id],
self.fp16_partitioned_groups[sub_group_id])
for partitioned_param, q in zip(self.fp16_partitioned_groups[sub_group_id], updated_params):
partitioned_param.data = q.data
# TODO: Support different/changing load/save DP degree.
def load_state_dict(self,
state_dict_list,
load_optimizer_states=True,
load_from_fp32_weights=False,
checkpoint_folder=None):
r"""Loading a ZeRO checkpoint
Arguments:
state_dict_list: List of all saved ZeRO checkpoints, one for each saved partition.
Note that the number of saved partitions may differ from number of loading partitions to support
changing GPU count, specifically DP world size, between saving and loading checkpoints.
load_optimizer_states: Boolean indicating whether or not to load base optimizer states
load_from_fp32_weights: Boolean indicating whether to initialize fp32 master weights from fp32
copies in checkpoints (no precision loss) or from model's fp16 copies (with precision loss).
"""
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
if self.elastic_checkpoint:
raise NotImplementedError("ZeRO-3 does not yet support elastic checkpointing, please disable for now.")
if self.swap_optimizer or self.params_in_nvme_and_cpu:
raise NotImplementedError(
"ZeRO-3 does not yet support checkpointing with NVMe offloading, please disable for now.")
self._rigid_load_state_dict(state_dict_list[dist.get_rank(group=self.dp_process_group)],
load_optimizer_states=load_optimizer_states)
if len(self.persistent_parameters) > 0:
self.persistent_parameters[0].partition(self.persistent_parameters)
self.persistent_parameters[0].all_gather(self.persistent_parameters)
def checkpoint_event_prologue(self):
self._partition_all_parameters()
def checkpoint_event_epilogue(self):
if len(self.persistent_parameters) > 0:
self.persistent_parameters[0].all_gather(self.persistent_parameters)
def empty_partition_cache(self):
self.parameter_offload.empty_partition_cache()
def _handle_overflow(cpu_sum, x, i):
import math
rank = dist.get_rank()
if rank == 0:
t_i = -1
for v_i, v in enumerate(x.data.contiguous().view(-1)):
if not math.isfinite(float(v)):
t_i = v_i
break
logger.info(f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}")
def estimate_zero3_model_states_mem_needs(total_params,
largest_layer_params,
num_gpus_per_node=1,
num_nodes=1,
cpu_offload=True,
cpu_offload_params=True,
zero_init=True,
additional_buffer_factor=1.5):
total_gpus = num_nodes * num_gpus_per_node
gpus_factor = 1 / num_nodes
largest_layer_memory = (4 * largest_layer_params)
if cpu_offload:
if cpu_offload_params:
gpu_mem = largest_layer_memory
if zero_init:
cpu_mem = total_params * 18 * gpus_factor * additional_buffer_factor
else:
cpu_mem = total_params * max(4 * num_gpus_per_node, 18 * gpus_factor) * additional_buffer_factor
else:
gpu_mem = largest_layer_memory + int(2 * total_params / total_gpus)
if zero_init:
cpu_mem = total_params * 16 * gpus_factor * additional_buffer_factor
else:
cpu_mem = total_params * max(4 * num_gpus_per_node, 16 * gpus_factor) * additional_buffer_factor
else:
gpu_mem = largest_layer_memory + int(18 * total_params / total_gpus)
if zero_init:
cpu_mem = largest_layer_params * 4 * num_gpus_per_node * additional_buffer_factor
else:
cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor
return int(cpu_mem), int(gpu_mem), largest_layer_memory
def model_to_params(model):
# shared params calculated only once
total_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values())
largest_layer_params = 0
for m in model.modules():
# assuming no shared params within a single layer
layer_params = sum(p.numel() for p in m.parameters(recurse=False))
largest_layer_params = max(largest_layer_params, layer_params)
return total_params, largest_layer_params
def estimate_zero3_model_states_mem_needs_all_live(model,
num_gpus_per_node=1,
num_nodes=1,
additional_buffer_factor=1.5):
"""
Print out estimates on memory usage requirements for ZeRO 3 params, optim states and gradients
for a given ``model`` and hardware setup.
If you have an actual model object, use this function and everything will be derived
automatically.
If it's a hypothetical model, use ``estimate_zero3_model_states_mem_needs_all_cold`` where you have to pass
the ``total_params`` and ``largest_layer_params`` explicitly.
Args:
- ``model``: ``nn.Module`` object
- ``num_gpus_per_node``: how many gpus per node (defaults to 1)
- ``num_nodes``: how many nodes (defaults to 1),
- ``additional_buffer_factor``: estimation factor (defaults to 1.5):
"""
total_params, largest_layer_params = model_to_params(model)
estimate_zero3_model_states_mem_needs_all_cold(total_params=total_params,
largest_layer_params=largest_layer_params,
num_gpus_per_node=num_gpus_per_node,
num_nodes=num_nodes,
additional_buffer_factor=additional_buffer_factor)
def estimate_zero3_model_states_mem_needs_all_cold(total_params,
largest_layer_params,
num_gpus_per_node=1,
num_nodes=1,
additional_buffer_factor=1.5):
"""
Print out estimates on memory usage requirements for ZeRO 3 params, optim states and gradients
for a given ``model`` and hardware setup.
If it's a hypothetical model, use this function where you have to pass
the ``total_params`` and ``largest_layer_params`` explicitly.
If you have an actual model object, use ``estimate_zero3_model_states_mem_needs_all_live`` and everything
will be derived automatically.
Args:
- ``total_params``: total model params
- ``largest_layer_params``: largest layer's params
- ``num_gpus_per_node``: how many gpus per node (defaults to 1)
- ``num_nodes``: how many nodes (defaults to 1),
- ``additional_buffer_factor``: estimation factor (defaults to 1.5):
"""
def format_options(cpu_offload, cpu_offload_params, zero_init):
enabled = []
padded_cpu_str = f'{OffloadDeviceEnum.cpu:4}'
param_device = padded_cpu_str if cpu_offload_params else "none"
enabled.append(f"offload_param={param_device}")
optimizer_device = padded_cpu_str if cpu_offload else "none"
enabled.append(f"offload_optimizer={optimizer_device}")
enabled.append(f"zero_init={1 if zero_init else 0}")
return ", ".join(enabled)
nodes_str = "nodes" if num_nodes > 1 else "node"
gpus_str = "GPUs" if num_gpus_per_node > 1 else "GPU"
print(
"Estimated memory needed for params, optim states and gradients for a:\n"
f"HW: Setup with {num_nodes} {nodes_str}, {num_gpus_per_node} {gpus_str} per node.\n"
f"SW: Model with {int(total_params/1e6)}M total params, {int(largest_layer_params/1e6)}M largest layer params."
)
print(" per CPU | per GPU | Options")
for cpu_offload in [True, False]:
for cpu_offload_params in [True, False]:
if not cpu_offload and cpu_offload_params:
continue
for zero_init in [True, False]:
cpu_mem, gpu_mem, largest_layer_memory = estimate_zero3_model_states_mem_needs(
total_params=total_params,
largest_layer_params=largest_layer_params,
num_gpus_per_node=num_gpus_per_node,
num_nodes=num_nodes,
cpu_offload=cpu_offload,
cpu_offload_params=cpu_offload_params,
zero_init=zero_init,
additional_buffer_factor=additional_buffer_factor)
options_str = format_options(cpu_offload=cpu_offload,
cpu_offload_params=cpu_offload_params,
zero_init=zero_init)
print(f" {cpu_mem/2**30:7.2f}GB | {gpu_mem/2**30:6.2f}GB | {options_str}")
| 118,239 | 45.241689 | 219 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/zero/utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from typing import List
import torch
from deepspeed import comm as dist
from deepspeed.utils import logger
from deepspeed.ops.adam import DeepSpeedCPUAdam
from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad
from deepspeed.ops.adam import FusedAdam
from deepspeed.utils.nvtx import instrument_w_nvtx
from deepspeed.accelerator import get_accelerator
def _initialize_parameter_parallel_groups(parameter_parallel_size=None):
data_parallel_size = int(dist.get_world_size())
parameter_parallel_size = parameter_parallel_size or data_parallel_size
logger.info("data_parallel_size: %s, parameter_parallel_size: %s", data_parallel_size, parameter_parallel_size)
assert data_parallel_size % parameter_parallel_size == 0, \
'world size should be divisible by parameter parallel size'
rank = dist.get_rank()
my_group = None
for i in range(data_parallel_size // parameter_parallel_size):
ranks = range(i * parameter_parallel_size, (i + 1) * parameter_parallel_size)
group = dist.new_group(ranks)
if rank in ranks:
my_group = group
return my_group
class ZeRORuntimeException(Exception):
pass
ZERO_SUPPORTED_OPTIMIZERS = [
torch.optim.Adam, torch.optim.AdamW, FusedAdam, DeepSpeedCPUAdam, torch.optim.Adagrad, DeepSpeedCPUAdagrad
]
# Add apex FusedAdam to supported list if apex is installed
try:
import apex
if hasattr(apex, 'optimizers') and hasattr(apex.optimizers, 'FusedAdam'):
ZERO_SUPPORTED_OPTIMIZERS.append(apex.optimizers.FusedAdam)
except ImportError:
pass
def is_zero_supported_optimizer(optimizer):
if dist.get_rank() == 0:
logger.info(f'Checking ZeRO support for optimizer={optimizer.__class__.__name__} type={type(optimizer)}')
return type(optimizer) in ZERO_SUPPORTED_OPTIMIZERS
def get_lst_from_rank0(lst: List[int]) -> None:
"""
NOTE: creates both communication and synchronization overhead so should be used
sparingly
"""
lst_tensor = torch.tensor(
lst if dist.get_rank() == 0 else [-1] * len(lst),
dtype=int,
# device=get_accelerator().current_device_name(),
device=torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])),
requires_grad=False,
)
dist.broadcast(lst_tensor, src=0, async_op=False)
return list(lst_tensor.cpu().numpy())
@instrument_w_nvtx
def assert_ints_same_as_other_ranks(ints: List[int]) -> None:
"""
NOTE: creates both communication and synchronization overhead so should be
used sparingly
takes a list of ints from each rank and ensures that they are the same
across ranks, throwing an exception if they are not.
"""
rank0_ints = get_lst_from_rank0(ints)
if ints != rank0_ints:
raise RuntimeError(f"disagreement between rank0 and rank{dist.get_rank()}: "
f"rank0: {rank0_ints}, rank{dist.get_rank()}: {ints}")
| 3,033 | 33.477273 | 115 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/zero/tiling.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
from deepspeed.runtime.utils import partition_uniform as partition
def split_tensor_along_last_dim(tensor, partitions, contiguous_split_chunks=False):
"""Split a tensor along its last dimension. Adapted from Megatron-LM.
Arguments:
tensor: input tensor.
partitions: list of partition sizes to supply to torch.split
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
# Split.
tensor_list = torch.split(tensor, partitions, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
class TiledLinear(torch.nn.Module):
def __init__(self,
in_features,
out_features,
bias=True,
in_splits=1,
out_splits=1,
input_is_already_split=False,
combine_out_splits=True,
linear_cls=torch.nn.Linear,
init_linear=None,
**kwargs):
"""A replacement for ``torch.nn.Linear`` that works with ZeRO-3 to reduce
memory requirements via tiling.
TiledLinear breaks the input and output dimensions of a linear layer
into tiles that are processed in sequence. This class enables huge
linear layers when combined with ZeRO-3 because inactive tiles can be
partitioned and offloaded.
.. note::
We recommend using as few tiles as necessary. Tiling
significantly reduces memory usage, but can reduce throughput
for inexpensive layers. This due to the smaller kernels having
less parallelism and lower arithmetic intensity, while
introducing more frequent synchronization and communication.
Args:
in_features (int): See ``torch.nn.Linear``
out_features (int): See ``torch.nn.Linear``
bias (bool, optional): See ``torch.nn.Linear``
in_splits (int, optional): The number of tiles along the input dimension. Defaults to 1.
out_splits (int, optional): The number of tiles along the output dimension. Defaults to 1.
input_is_already_split (bool, optional): If set to ``True``, assume that the ``input_`` in
to ``forward()`` is already split into ``in_splits`` chunks. Defaults to ``False``.
combine_out_splits (bool, optional): If set to ``False``, do not combine the ``out_splits`` outputs
into a single tensor. Defaults to ``True``.
linear_cls (class, optional): The underlying class to build individual tiles.
Defaults to ``torch.nn.Linear``.
init_linear (``torch.nn.Linear``, optional): If set, copy the parameters of
``init_linear``. Useful for debugging. Defaults to ``None``.
kwargs (dict, optional): additional keyword arguments to provide to ``linear_cls()``.
Raises:
RuntimeError: ``in_splits`` must be within the range [1, in_features).
RuntimeError: ``out_splits`` must be within the range of [1, out_features).
"""
super().__init__()
if (in_splits < 1) or (in_splits > in_features):
raise RuntimeError('in splits must be in range [1, in_features].')
if (out_splits < 1) or (out_splits > out_features):
raise RuntimeError('out splits must be in range [1, out_features].')
# global, not necessarily local
self.in_features = in_features
self.out_features = out_features
self.use_bias = bias
self.out_splits = out_splits
self.in_splits = in_splits
self.input_is_already_split = input_is_already_split
self.combine_out_splits = combine_out_splits
# Build partition-lists. These are CSR-style splits [0, part0, part1, ..., features]
# For example, row_parts[p] gives the start of partition p and row_parts[p+1]
# is the exclusive end.
self.in_parts = partition(num_items=in_features, num_parts=in_splits)
self.out_parts = partition(num_items=out_features, num_parts=out_splits)
assert len(self.out_parts) == out_splits + 1
assert len(self.in_parts) == in_splits + 1
assert self.out_parts[0] == 0
assert self.out_parts[out_splits] == out_features
assert self.in_parts[in_splits] == in_features
self.linears = torch.nn.ModuleList()
for out_id in range(out_splits):
self.linears.append(torch.nn.ModuleList())
local_out_dim = self.out_parts[out_id + 1] - self.out_parts[out_id]
for in_id in range(in_splits):
#if input_size is split, we only need one bias
local_bias = bias if in_id == (in_splits - 1) else False
local_in_dim = self.in_parts[in_id + 1] - self.in_parts[in_id]
local = linear_cls(local_in_dim, local_out_dim, bias=local_bias, **kwargs)
self.linears[out_id].append(local)
# Optionally initialize with a known tensor
if init_linear is not None:
self.copy_params_from(init_linear)
def forward(self, input_):
if self.in_splits > 1 and not self.input_is_already_split:
input_parts = partition(input_.shape[-1], self.in_splits)
split_sizes = [input_parts[p + 1] - input_parts[p] for p in range(self.in_splits)]
inputs = self._split_global_input(input_, split_sizes)
elif self.in_splits > 1:
inputs = input_
assert len(
inputs) == self.in_splits, f"Col splits {self.in_splits} does not match input splits {len(inputs)}"
else:
# no splits
inputs = [input_]
outputs = [None] * self.out_splits
for out_id in range(self.out_splits):
for in_id in range(self.in_splits):
local_output = self.linears[out_id][in_id](inputs[in_id])
outputs[out_id] = self._reduce_local_output(in_id=in_id,
out_id=out_id,
current_out=outputs[out_id],
new_out=local_output)
if self.combine_out_splits:
return self._combine_output_splits(outputs)
return outputs
def _split_global_input(self, input, split_sizes):
"""Partition an input tensor along the last dimension, aligned with given splits.
Subclasses should override this method to account for new input types.
Args:
input (List[Tensor]): The tensor to partition along the last dimension.
split_sizes (List[int]): The size of each partition.
Returns:
List[Any]: A list of the chunks of ``input``.
"""
return split_tensor_along_last_dim(input, split_sizes)
def _reduce_local_output(self, in_id, out_id, current_out, new_out):
"""Reduce (sum) a new local result into the existing local results.
Subclasses should override this method.
For a given ``out_id``, this method is called ``in_id-1`` times. The first input
split is a simple assignment.
Args:
in_id (int): The input split that produced ``new_out``.
out_id (int): The output split that produced ``new_out``.
current_out (Any): The reduced form of all previous ``out_id`` results.
new_out (Any): The local result from forward (``in_id``, ``out_id``)e
Returns:
Any: The combined result of ``current_out`` and ``new_out``.
"""
if current_out is None:
#this clone is necessary to preserve auto grad
#there is some issue with inplace update for outputs that are views
return new_out.clone()
else:
return current_out + new_out
def _combine_output_splits(self, outputs):
"""Join the splits of the output into a single result.
Args:
outputs (List[Any]): The reduced outputs for each output split.
Returns:
Any: The combined outputs.
"""
assert len(outputs) == self.out_splits
return torch.cat(outputs, dim=-1)
@torch.no_grad()
def copy_params_from(self, other):
"""Copy the weight and bias data from ``other``.
This is especially useful for reproducible initialization and testing.
Equivalent to:
.. code-block:: python
with torch.no_grad():
self.weight.copy_(other.weight)
if self.bias is not None:
self.bias.copy_(other.bias)
.. note::
If ZeRO-3 is enabled, this is a collective operation and the
updated parameters of data-parallel rank 0 will be visible on all
ranks. See :class:`deepspeed.zero.GatheredParameters` for more
information.
Args:
other (``torch.nn.Linear``): the linear layer to copy from.
"""
assert hasattr(other, 'weight')
assert other.weight.size() == (self.out_features, self.in_features)
if self.use_bias:
assert hasattr(other, 'bias')
assert other.bias is not None
assert other.bias.size() == (self.out_features, )
else:
assert other.bias is None
for row in range(self.out_splits):
rstart = self.out_parts[row]
rstop = self.out_parts[row + 1]
for col in range(self.in_splits):
cstart = self.in_parts[col]
cstop = self.in_parts[col + 1]
local = self.linears[row][col]
global_weight = other.weight[rstart:rstop, cstart:cstop]
with deepspeed.zero.GatheredParameters(local.weight, modifier_rank=0):
local.weight.copy_(global_weight)
if local.bias is not None:
with deepspeed.zero.GatheredParameters(local.bias, modifier_rank=0):
local.bias.data.copy_(other.bias[rstart:rstop].data)
class TiledLinearReturnBias(TiledLinear):
"""Wrapper for a Linear class that returns its own bias parameter, such as
used by Megatron-LM.
"""
def _reduce_local_output(self, in_id, out_id, current_out, new_out):
"""Reduces output tensors, but not the returned bias. """
if current_out is not None:
old_tensor, old_bias = current_out
else:
old_tensor, old_bias = None, None
assert isinstance(new_out, tuple)
assert len(new_out) == 2
tensor, bias = new_out
assert tensor is not None
tensor = super()._reduce_local_output(in_id=in_id, out_id=out_id, current_out=old_tensor, new_out=tensor)
if bias is None:
bias = old_bias
return tensor, bias
def _combine_output_splits(self, outputs):
# stack output tensors
tensors = [o[0] for o in outputs]
tensor = super()._combine_output_splits(tensors)
# stack biases if applicable
biases = [o[1] for o in outputs if o[1] is not None]
if len(biases) > 0:
bias = super()._combine_output_splits(biases)
else:
bias = None
return tensor, bias
| 11,727 | 38.488215 | 115 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/zero/partitioned_param_coordinator.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from dataclasses import dataclass
import collections
from collections import UserDict
from typing import Deque, Set
from deepspeed import comm as dist
from deepspeed.utils.logging import logger
from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum
from deepspeed.runtime.zero.partition_parameters import *
from deepspeed.runtime.zero.partitioned_param_profiler import PartitionedParameterProfiler
from deepspeed.runtime.swap_tensor.partitioned_param_swapper import PartitionedParamStatus
from deepspeed.utils.debug import debug_module2name_id, debug_param2name_id
from deepspeed.accelerator import get_accelerator
import logging
def debug_rank0(message: str) -> None:
if dist.get_rank() == 0:
logger.debug(message)
@instrument_w_nvtx
def get_all_parameters(sub_module, recurse=False):
return itertools.chain(sub_module.named_parameters(recurse=recurse), sub_module.ds_external_parameters())
def iter_params(module: Module, recurse=False) -> Iterable[Parameter]:
return map(lambda pair: pair[1], get_all_parameters(module, recurse))
class ZeRoTraceMode(Enum):
# Record trace of the network during a single forward+backward (for training) or forward (for inference)
RECORD = 1
# Use recorded network trace to optimize current forward+backward or forward
COMPLETE = 2
# Recorded trace does not match current forward+backward or forward pass.
INVALID = 3
class InflightParamRegistry(UserDict):
"""registry for parameters in flight"""
def __setitem__(self, param: Parameter, handle: AllGatherCoalescedHandle) -> None:
if param in self.data:
raise RuntimeError(f"{param.ds_summary()} already in registry")
if param.ds_status != ZeroParamStatus.INFLIGHT:
raise RuntimeError(f"attempted to add non-inflight parameter to registry {param.ds_summary()}")
self.data[param] = handle
class PartitionedParameterCoordinator:
FORWARD_FETCH_SUBMIT = 'forward_fetch_submit'
FORWARD_FETCH_WAIT = 'forward_fetch_wait'
FORWARD_PREFETCH_SUBMIT = 'forward_prefetch_submit'
BACKWARD_FETCH_SUBMIT = 'backward_fetch_submit'
BACKWARD_FETCH_WAIT = 'backward_fetch_wait'
BACKWARD_PREFETCH_SUBMIT = 'backward_prefetch_wait'
FORWARD_ALL_GATHER = 'forward_all_gather'
BACKWARD_ALL_GATHER = 'backward_all_gather'
"""Handles partitioning and gathering of parameters."""
@dataclass
class __ParamInTrace:
param: Parameter
step_id_last_used_at: int
def __init__(
self,
prefetch_bucket_sz: int,
max_reuse_distance_in_numel: int,
max_available_parameters_in_numel: int,
allgather_stream: get_accelerator().Stream,
inflight_param_registry: InflightParamRegistry,
prefetch_nvme: bool = False,
timers=None,
) -> None:
# mapping of param -> handle for each param that is currently in flight
self.__inflight_param_registry = inflight_param_registry
# keeps track of the number of submodules invoked so far.
self.__step_id: int = 0
# network tracing mode
self.__trace_mode: ZeRoTraceMode = ZeRoTraceMode.RECORD
# sequence of submodules/parameters in forward pass + backward pass
self.__submodule_order: Iterable[Module] = []
self.__param_order: Iterable[__class__.__ParamInTrace] = []
self.__most_recent_step_id_param_fetched_for = collections.defaultdict(lambda: int(-1e10))
self.__step_id_module_fetched_for = collections.defaultdict(lambda: collections.deque())
# number of available params, and max number of available params
self.__n_available_params: int = 0
self.__max_n_available_params: int = max_available_parameters_in_numel
# max distance between two use of the module beyond which module is released
self.__max_reuse_dist_in_numel: int = max_reuse_distance_in_numel
# queue for parameters to fetch. parameters will be popped off the left
# side of the dequeue as they are fetched
self.__param_queue: Deque[__class__.__ParamInTrace] = None
self.__prefetch_bucket_sz: int = prefetch_bucket_sz
self.__prefetch_nvme: bool = prefetch_nvme
self.hierarchy: int = 0
# stream that will be used for allgather operations
self.__allgather_stream: get_accelerator().Stream = allgather_stream
# limit the number of fetch events that can be queued at once
# otherwise, what happens is memory is allocated by the host thread at the
# time of the call, but not used until later by the asynchronous cuda stream.
# allowing an infinite number of these to queue up causes a lot of memory
# pressure that then becomes detrimental to performance.
# this is a much less elegant way of fixing this vs something like using
# cudaMallocAsync/cudaFreeAsync. Choosing to not expose this to the user now
# because ideally in the future its replaced by an async allocation
# mechanism which doesn't require any configuration by the user.
self.__ongoing_fetch_events: Deque[get_accelerator().Event] = collections.deque()
# TODO. make this configurable via JSON
self.__max_ongoing_fetch_events: int = 2
self.__profiler = PartitionedParameterProfiler(timers)
"""Tracing and Tracking
TODO. consider performing trace before initializing PartitionedParameterCoordinator
and passing trace results into constructor. This way all the code in here can
just assume that the trace is complete and the results can be entirely
immutable.
Bookkeeping operations used to track where we are in the forward/backward pass
"""
def _clear_trace_structures(self) -> None:
self.__submodule_order = []
self.__param_order = []
self.__most_recent_step_id_param_fetched_for = collections.defaultdict(lambda: int(-1e10))
self.__param_queue = None
def is_complete_trace(self) -> bool:
return self.__trace_mode == ZeRoTraceMode.COMPLETE
def is_invalid_trace(self) -> bool:
return self.__trace_mode == ZeRoTraceMode.INVALID
def is_record_trace(self) -> bool:
return self.__trace_mode == ZeRoTraceMode.RECORD
def _invalidate_trace(self) -> None:
if self.is_invalid_trace():
raise RuntimeError("attempted to invalidate already invalid trace")
self.__trace_mode = ZeRoTraceMode.INVALID
self._clear_trace_structures()
def trace_prologue(self, sub_module: Module) -> None:
if self.is_complete_trace():
# sub_module must match expectation else invalidate trace cache
if len(self.__submodule_order) <= self.__step_id:
print_rank_0(
f"Invalidate trace cache @ step {self.__step_id} and module {sub_module.id}: "
f"cache has only {len(self.__submodule_order)} modules",
force=True)
self._invalidate_trace()
return
if sub_module != self.__submodule_order[self.__step_id]:
expected_module_id = self.__submodule_order[self.__step_id].id
print_rank_0(
f"Invalidate trace cache @ step {self.__step_id}: "
f"expected module {expected_module_id}, but got module {sub_module.id}",
force=True)
self._invalidate_trace()
def record_module(self, sub_module: Module) -> None:
"""adds sub module to trace"""
if not self.is_record_trace():
raise RuntimeError(f"attempted to record trace when status = {self.__trace_mode}")
self.__submodule_order.append(sub_module)
self.__step_id_module_fetched_for[sub_module.id].append(self.__step_id)
def record_parameters(self, sub_module: Module) -> None:
"""adds sub module to trace"""
if not self.is_record_trace():
raise RuntimeError(f"attempted to record trace when status = {self.__trace_mode}")
step_id = self.__step_id_module_fetched_for[sub_module.id].popleft()
for param in sorted(set(iter_params(sub_module)), key=lambda p: p.ds_id):
self.__param_order.append(__class__.__ParamInTrace(param=param, step_id_last_used_at=step_id))
def construct_parameter_trace_from_module_trace(self):
"""use module trace to construct parameter trace"""
self.__param_order = []
for sub_module in self.__submodule_order:
self.record_parameters(sub_module)
def reset_step(self) -> None:
"""indicate that we have completed one fwd+bwd for the model"""
if self.__inflight_param_registry:
raise RuntimeError(f"still have inflight params "
f"{[p.ds_summary() for p in self.__inflight_param_registry.keys()]}")
if not self.is_complete_trace(): # not self.trace_complete:
# Make sure that recorded submodule orders are identical across ranks
assert_ints_same_as_other_ranks([m.id for m in self.__submodule_order])
if self.is_record_trace():
# Successfully recorded a trace
self.construct_parameter_trace_from_module_trace()
# Make sure that recorded parameter orders are identical across ranks
assert_ints_same_as_other_ranks([p.param.ds_id for p in self.__param_order])
assert_ints_same_as_other_ranks([p.step_id_last_used_at for p in self.__param_order])
self.__submodule_order = tuple(self.__submodule_order) # freeze
self.__param_order = tuple(self.__param_order) # freeze
self.__trace_mode = ZeRoTraceMode.COMPLETE
print_rank_0(
f"completed record trace of {len(self.__submodule_order)} sub modules: {[m.id for m in self.__submodule_order]}",
force=False)
else:
# Enable trace recording for next forward/backward pass
self.__trace_mode = ZeRoTraceMode.RECORD
else:
if self.__profiler is not None:
self.__profiler.log_events()
self.__param_queue = collections.deque(self.__param_order) # reset fetch queue
self.__most_recent_step_id_param_fetched_for = collections.defaultdict(lambda: int(-1e10))
self.__step_id_module_fetched_for = collections.defaultdict(lambda: collections.deque())
self.__step_id = 0
self.__n_available_params = 0
self.__profiler.reset_events()
def _dump_params(self, tag, sub_module, params, step_id=None):
if step_id is None:
step_id = self.__step_id
param_names = [debug_param2name_id(p) for p in params]
print_rank_0(f'{tag} step = {step_id} mod = {debug_module2name_id(sub_module)} p_names = {param_names}',
force=False)
def _dump_param_ids(self, tag, mod_id, p_ids, step_id=None):
if step_id is None:
step_id = self.__step_id
print_rank_0(f'{tag} mod = {mod_id}, step = {step_id}, p_ids = {p_ids}', force=False)
"""Fetch and Release
Fetching, prefetching, and releasing parameters
"""
@instrument_w_nvtx
@torch.no_grad()
def fetch_sub_module(self, current_submodule: Module, forward: bool) -> None:
"""This method does the following (in order):
1. kick off fetch for parameters in immediately required sub module
2. kick off fetch for next few parameters we will need later (prefetch)
3. block on parameters in immediately required sub module
"""
if logger.isEnabledFor(logging.DEBUG):
debug_rank0(
f"{self.__step_id}: M{current_submodule.id}({type(current_submodule).__name__}) P{[p.ds_id for p in iter_params(current_submodule)]} "
+ str({
"avail": f"{self.__n_available_params:.1e}",
"queue_sz": f"{len(self.__param_queue or [])}",
"inflight": [p.ds_id for p in self.__inflight_param_registry],
}))
params_to_fetch = frozenset(iter_params(current_submodule))
fetch_numel = sum(
[p.partition_numel() for p in params_to_fetch if p.ds_status == ZeroParamStatus.NOT_AVAILABLE])
if fetch_numel > 0:
event_name = __class__.FORWARD_FETCH_SUBMIT if forward else __class__.BACKWARD_FETCH_SUBMIT
self._dump_param_ids(event_name, current_submodule.id,
[p.ds_id for p in params_to_fetch if p.ds_status == ZeroParamStatus.NOT_AVAILABLE])
self.__profiler.start_event(event_name)
# kick off all gather for params in the immediately required submodule
#for param in params_to_fetch:
if logger.isEnabledFor(logging.DEBUG):
for param in params_to_fetch:
debug_rank0(f"-fetch: {param.ds_summary()}")
self.__all_gather_params(params_to_fetch, forward)
self.__profiler.stop_event(event_name, fetch_numel)
wait_numel = 0
wait_event_name = __class__.FORWARD_FETCH_WAIT if forward else __class__.BACKWARD_FETCH_WAIT
self.__profiler.start_event(wait_event_name)
# wait for parameters in the immediately needed submodule to become available
for param in params_to_fetch:
param.ds_active_sub_modules.add(current_submodule.id)
if logger.isEnabledFor(logging.DEBUG):
debug_rank0(f"-wait: {param.ds_summary()}")
if param in self.__inflight_param_registry:
wait_numel += param.partition_numel()
with get_accelerator().stream(self.__allgather_stream):
while self.__ongoing_fetch_events and self.__ongoing_fetch_events[0].query():
self.__ongoing_fetch_events.popleft()
if len(self.__ongoing_fetch_events) > self.__max_ongoing_fetch_events:
self.__ongoing_fetch_events.popleft().synchronize()
self.__inflight_param_registry.pop(param).wait()
event = get_accelerator().Event()
event.record()
self.__ongoing_fetch_events.append(event)
assert param.ds_status == ZeroParamStatus.AVAILABLE, param.ds_summary()
get_accelerator().current_stream().wait_stream(self.__allgather_stream)
self.__profiler.stop_event(wait_event_name, wait_numel)
# kick off parameter prefetches for upcoming modules
# don't prefetch if we dont have a completed model trace
if self.is_complete_trace():
# go through the parameters we need for the current module and pop them
# off the fetch queue so that they aren't prefetched later.
# if params have already been popped off the fetch queue by earlier
# prefetches we won't look for them here
discarded_from_prefetch_queue = set()
params_not_already_fetched = set(
filter(lambda p: self.__most_recent_step_id_param_fetched_for[p] < self.__step_id, params_to_fetch))
while self.__param_queue and len(discarded_from_prefetch_queue) < len(params_not_already_fetched):
param_in_trace = self.__param_queue.popleft()
self.__most_recent_step_id_param_fetched_for[
param_in_trace.param] = param_in_trace.step_id_last_used_at
discarded_from_prefetch_queue.add(param_in_trace.param)
if discarded_from_prefetch_queue != params_not_already_fetched:
raise RuntimeError(
f"tracing error at step {self.__step_id}: \n"
f"module id: {current_submodule.id}, training: {current_submodule.training}\n"
f"expected the next {len(params_not_already_fetched)} parameters in the "
f"parameter fetch queue to be {tuple(p.ds_summary(use_debug_name=True) for p in params_not_already_fetched)} \n"
f"but got \n {tuple(p.ds_summary(use_debug_name=True) for p in discarded_from_prefetch_queue)}.")
def _is_currently_on_nvme(param):
if param.nvme_swapper is None:
return False
return param.ds_tensor.final_location == OffloadDeviceEnum.nvme \
and param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE
# kick off all gather for params in the next few submodules (prefetch)
if self.__prefetch_bucket_sz > 0:
max_params_to_prefetch = min(self.__max_n_available_params - self.__n_available_params,
self.__prefetch_bucket_sz)
params_to_prefetch = set()
numel_prefetching = 0
while self.__param_queue and numel_prefetching < max_params_to_prefetch:
param_in_trace: __class__.__ParamInTrace = self.__param_queue.popleft()
if _is_currently_on_nvme(param_in_trace.param):
# nvme prefetch is handled elsewhere. Need to break here to preserve fetch order
self.__param_queue.appendleft(param_in_trace)
break
do_prefetch = param_in_trace.param.ds_status == ZeroParamStatus.NOT_AVAILABLE
if param_in_trace.param in params_to_prefetch:
# Avoid duplicates
do_prefetch = False
self.__most_recent_step_id_param_fetched_for[param_in_trace.param] = \
max(self.__most_recent_step_id_param_fetched_for[param_in_trace.param],
param_in_trace.step_id_last_used_at)
if do_prefetch:
params_to_prefetch.add(param_in_trace.param)
numel_prefetching += param_in_trace.param.ds_numel
if numel_prefetching > 0:
event_name = __class__.FORWARD_PREFETCH_SUBMIT if forward else __class__.BACKWARD_PREFETCH_SUBMIT
self.__profiler.start_event(event_name)
if logger.isEnabledFor(logging.DEBUG):
for param in params_to_prefetch:
debug_rank0(f"-prefetch: {param.ds_summary()}")
self.__all_gather_params(params_to_prefetch, forward)
self.__profiler.stop_event(event_name, numel_prefetching)
if self.__prefetch_nvme:
self.__prefetch_nvme_param_partitions()
self.__step_id += 1
@instrument_w_nvtx
@torch.no_grad()
def release_sub_module(self, submodule: Module, backward: bool) -> None:
"""release the parameters of a sub module, assuming they meet conditions to
be released."""
params_to_release = (self.__params_to_release(submodule, self.__step_id) if self.is_complete_trace() else set(
p.ds_id for p in iter_params(submodule)))
for param in iter_params(submodule):
param.ds_active_sub_modules.discard(submodule.id)
if param.ds_id in params_to_release and not param.is_external_param:
self.__release_param(param, backward)
@instrument_w_nvtx
@torch.no_grad()
def release_and_reset_all(self, module: Module) -> None:
"""release all module parameters"""
for param in iter_params(module, recurse=True):
if param in self.__inflight_param_registry:
raise RuntimeError(f"param {param.ds_summary()} still in flight")
# TODO. make this throw if if there are still active submodules. currently
# there's a hook execution issue
param.ds_active_sub_modules.clear()
self.__release_param(param, backward=False)
for param in iter_params(module, recurse=True):
if param.ds_status != ZeroParamStatus.NOT_AVAILABLE:
raise RuntimeError(f"{param.ds_summary()} expected to be released")
@instrument_w_nvtx
def __all_gather_params(self, params: Set[Parameter], forward: bool) -> None:
"""for each partitioned parameter, kick off an async allgather and store
the work handle for the in flight parameters."""
partitioned_params = []
all_gather_numel = 0
for param in params:
if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
partitioned_params.append(param)
all_gather_numel += param.ds_numel
if partitioned_params:
self.__n_available_params += all_gather_numel
with get_accelerator().stream(self.__allgather_stream):
event_name = __class__.FORWARD_ALL_GATHER if forward else __class__.BACKWARD_ALL_GATHER
self.__profiler.start_event(event_name)
handle = partitioned_params[0].all_gather_coalesced(partitioned_params, forward)
self.__profiler.stop_event(event_name, all_gather_numel)
for param in partitioned_params:
assert param.ds_status == ZeroParamStatus.INFLIGHT, param.ds_summary()
self.__inflight_param_registry[param] = handle
# Release swap buffers for persisted params on nvme since they will never be partitioned or evicted from GPU
swap_persisted_params = [
p for p in partitioned_params if p.ds_persist and p.ds_tensor.final_location == OffloadDeviceEnum.nvme
]
if swap_persisted_params:
swap_persisted_params[0].nvme_swapper.remove_partition_and_release_buffers(swap_persisted_params)
@instrument_w_nvtx
def __release_param(self, param: Parameter, backward: bool) -> None:
if param.ds_status == ZeroParamStatus.AVAILABLE and not param.ds_active_sub_modules:
if logger.isEnabledFor(logging.DEBUG):
debug_rank0(f"-release: {param.ds_summary()}")
param.partition(backward=backward)
self.__n_available_params -= param.ds_numel
@instrument_w_nvtx
@functools.lru_cache(maxsize=None)
def __params_to_release(self, submodule_to_release: Module, step_id: int) -> Set[int]:
if not self.is_complete_trace():
raise RuntimeError("expected trace to be complete")
params_to_release = set(p.ds_id for p in iter_params(submodule_to_release) if not p.ds_persist)
# Problem: When prefetcher scans the param trace, it skips AVAILABLE params.
# This creates issues if those params are released before the skipped uses:
# 1) It hurts performance as the skipped uses are never prefetched.
# 2) For nvme params, we run out of swap buffers because the prefetch order
# diverges from the trace.
# Solution: Don't release params whose reuse was skipped by prefetch. This is
# possible because we detect such skips during prefetch and mark those params.
for param in iter_params(submodule_to_release):
if self.__most_recent_step_id_param_fetched_for[param] > step_id:
params_to_release.discard(param.ds_id)
# examine all modules within `max_reuse_dist_in_numel` of the current step,
# if we see any of the candidate parameters to be released reoccur while
# doing this, remove them from the set of parameters to release.
params_traversed = 0
for module in self.__submodule_order[step_id:]:
if params_traversed >= self.__max_reuse_dist_in_numel:
break
for param in iter_params(module):
params_to_release.discard(param.ds_id)
params_traversed += param.ds_numel
return params_to_release
@instrument_w_nvtx
def __prefetch_nvme_param_partitions(self) -> None:
"""swap in parameter partitions from nvme for those parameters that will be used
after the ones that are already being prefetched into full parameters
"""
if not self.is_complete_trace():
return
numel_in_flight = sum(param.ds_numel for param in self.__inflight_param_registry)
numel_considered = 0
swap_in_params = []
for param_in_trace in self.__param_queue:
param = param_in_trace.param
if param.nvme_swapper is None:
continue
if (numel_considered > 2 * numel_in_flight
or len(swap_in_params) >= param.nvme_swapper.available_swap_in_buffers()):
break
if param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE:
swap_in_params.append(param)
numel_considered += param.ds_numel
if swap_in_params:
swap_in_params[0].nvme_swapper.swap_in(swap_in_params, async_op=True)
| 25,138 | 48.978131 | 150 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/zero/linear.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
#Linear Module to use with ZeRO Stage 3 to allow for parameter memory release
#after the module execution during forward
#Instead of saving variables using save_for_backward, we save variable ids
#Allowing us to retrieve the variable without creating pointer to it
#Which allows for underlying tensor to be garbage collected
#When partitioned as needed by the Zero Stage 3 optimizer
#TODO instead of patching Linear module, we could patch the ctx.save_for_backward
#ctx.saved_tensors so that this approach works for all nn modules that are built upon
#torch.nn.function. However the issue is that many modules uses C++ implementations
#which does not have pytorch implementation. Eg torch.addmm which acts as a functional
#when implemented outside of torch.autograd.Function
import math
import torch
from torch import Tensor
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.nn.modules.module import Module
from deepspeed.runtime.utils import noop_decorator
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
def print_rank_0(message, debug=False, force=False):
if dist.get_rank() == 0 and (debug or force):
print(message)
try:
autocast_custom_fwd = get_accelerator().amp().custom_fwd
autocast_custom_bwd = get_accelerator().amp().custom_bwd
except (ImportError, AttributeError) as exp:
autocast_custom_fwd = noop_decorator
autocast_custom_bwd = noop_decorator
class LinearFunctionForZeroStage3(torch.autograd.Function):
# Note that both forward and backward are @staticmethods
@staticmethod
@autocast_custom_fwd
# bias is an optional argument
def forward(ctx, input, weight, bias=None):
ctx.save_for_backward(input, weight, bias)
if input.dim() == 2 and bias is not None:
# fused op is marginally faster
ret = torch.addmm(bias, input, weight.t())
else:
output = input.matmul(weight.t())
if bias is not None:
output += bias
ret = output
return ret
# This function has only a single output, so it gets only one gradient
@staticmethod
@autocast_custom_bwd
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
input, weight, bias = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
#print(f"backward shaped grad_output {grad_output.shape}, input {input.shape}, weight {weight.shape} and bias {bias.shape if bias is not None else None}")
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
#print(f"Computing grad input weight {weight.shape} grad_output {grad_output.shape}")
grad_input = grad_output.matmul(weight)
#print(f"Computed grad input {grad_input.shape}")
if ctx.needs_input_grad[1]:
#print("Computing grad weight")
dim = grad_output.dim()
if dim > 2:
grad_weight = grad_output.reshape(-1,
grad_output.shape[-1]).t().matmul(input.reshape(-1, input.shape[-1]))
else:
grad_weight = grad_output.t().matmul(input)
#print(f"Computed grad weight grad_weight {grad_weight.shape}")
if bias is not None and ctx.needs_input_grad[2]:
#print("Computing grad bias")
grad_bias = grad_output.sum(0)
#print("Done computing grad bias")
#print("needs bias")
#print(f"backward shaped grad_input {grad_input.shape}, grad_weight {grad_weight.shape}, grad_bias {grad_bias.shape if grad_bias is not None else None}")
return grad_input, grad_weight, grad_bias
def zero3_linear_wrap(input, weight, bias=None):
if bias is None:
return LinearFunctionForZeroStage3.apply(input, weight)
else:
return LinearFunctionForZeroStage3.apply(input, weight, bias)
class LinearModuleForZeroStage3(Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
The weights are pre-transposed and stored as A^T instead of transposing during each
forward. Memory savings proportional to the parameter size.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \text{in\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
Attributes:
weight: the learnable weights of the module of shape
:math:`(\text{out\_features}, \text{in\_features})`. The values are
initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
:math:`k = \frac{1}{\text{in\_features}}`
bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{1}{\text{in\_features}}`
Examples::
>>> m = nn.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
super(LinearModuleForZeroStage3, self).__init__()
print("Building ZeRO module")
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
return LinearFunctionForZeroStage3.apply(input, self.weight, self.bias)
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(self.in_features, self.out_features, self.bias
is not None)
| 7,403 | 41.068182 | 162 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/zero/mics.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import sys
from typing import List
import deepspeed
import torch
from deepspeed import comm as dist
from deepspeed.runtime.zero.mics_utils import (MiCS_CommGroups, create_mics_comm_groups, scale_tensors)
from deepspeed.runtime.zero.parameter_offload import (DeepSpeedZeRoOffload, is_zero_param)
from deepspeed.runtime.zero.partition_parameters import Init, AllGatherCoalescedHandle, ZeroParamStatus
from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3
from deepspeed.utils import instrument_w_nvtx, log_dist
from deepspeed.accelerator import get_accelerator
from torch import Tensor
from torch.nn import Parameter
def has_hierarchical_all_gather_groups(comm_groups: MiCS_CommGroups):
result = False
if comm_groups.param_intra_node_group is not None and comm_groups.param_inter_node_shard_group is not None:
result = True
return result
class MiCS_AllGatherCoalescedHandle(AllGatherCoalescedHandle):
""" This handle assumes that no need to
copy data out from a contiguous tensor
"""
def __init__(self, allgather_handle, params: List[Parameter], partitions: List[Tensor], world_size: int) -> None:
super().__init__(allgather_handle, params, partitions, world_size)
def wait(self) -> None:
"""
"""
# let the current stream to op
instrument_w_nvtx(self.allgather_handle.wait)()
if self.complete:
return
for _, param in enumerate(self.params):
assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight"
param.ds_status = ZeroParamStatus.AVAILABLE
self.complete = True
class MiCS_Init(Init):
def __init__(self,
module=None,
data_parallel_group=None,
mem_efficient_linear=True,
remote_device=None,
pin_memory=False,
config_dict_or_path=None,
config=None,
enabled=True,
dtype=None,
mpu=None):
"""A context manager to partition the model parameters during the model
construction with MiCS partition strategy. Model states are partitioned
to the number of devices specified via ``mics_shard_size`` field in the
deepspeed config json file. The context manager also introduces
hierarchical communication method to reduce the cost of inter-node
communications, which can be enabled with
``mics_hierarchical_params_gather`` field in deepspeed config.
Args:
module (``torch.nn.Module``, optional): If provided, partition the model as
if it was constructed in the context.
data_parallel_group (``deepspeed.comm`` process group, optional):
The group of processes to partition among. Defaults to all processes.
mem_efficient_linear (bool, optional): Replace
torch.nn.functional.linear with an implementation that allows
DeepSpeed to partition parameters. Defaults to ``True``.
remote_device (string, optional): The initial device to store model
weights e.g., ``cpu``, ``nvme``. Passing ``"cpu"`` will create the model in CPU
memory. The model may still be moved to GPU based on the
offload settings for training. Defaults to param offload device if a config is
defined, otherwise GPU.
pin_memory (bool, optional): Potentially increase performance by
using pinned memory for model weights. ``remote_device`` must be
``"cpu"``. Defaults to pin_memory value in config, otherwise ``False``.
config_dict_or_path (dict or ``json file``, optional): If provided, provides configuration
for swapping fp16 params to NVMe.
config (dict or ``json file``, optional): Deprecated, use config_dict_or_path instead.
enabled (bool, optional): If ``False``, this context has no
effect. Defaults to ``True``.
dtype (``dtype``, optional): Can be used to change the data type of the parameters.
Supported options are ``torch.half`` and ``torch.float``. Defaults to ``None``
mpu (``object``, optional): A model parallelism unit object that implements get_{model,data}_parallel_{rank,group,world_size}.
This context follows the same logic as ``deepspeed.zero.Init()``, but
with the modification for partition size of each parameter.
Examples
--------
#. Allocate a model and partition it among all processes:
.. code-block:: python
# the config_dict_or_path is required to let the context manager know
# how partition the parameters.
# The configuration has to include the field ``mics_shard_size``
with deepspeed.zero.MiCS_Init(config_dict_or_path=ds_config):
model = MyLargeModel()
#. Allocate a model in pinned CPU memory and partition it among a subgroup of processes:
.. code-block:: python
with deepspeed.zero.MiCS_Init(data_parallel_group=mpu.get_data_parallel_group(),
remote_device="cpu",
pin_memory=True
config_dict_or_path=ds_config):
model = MyLargeModel()
#. Partition an already-allocated model in CPU memory:
.. code-block:: python
model = deepspeed.zero.MiCS_Init(module=model,
config_dict_or_path=ds_config)
"""
assert config_dict_or_path is not None, "Must provide configuration for MiCS Initialization"
_ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, mpu)
if not dist.is_initialized():
dist.init_distributed()
assert dist.is_initialized(), "Parameters cannot be scattered without initializing deepspeed.comm"
self.mics_comm_groups = create_mics_comm_groups(
_ds_config.mics_shard_size,
data_parallel_group,
hierarchical_allgather=_ds_config.mics_hierarchial_params_gather,
mpu=mpu)
super().__init__(module, data_parallel_group, mem_efficient_linear, remote_device, pin_memory,
config_dict_or_path, config, enabled, dtype, mpu)
def _convert_to_deepspeed_param(self, param):
super()._convert_to_deepspeed_param(param)
# attach communication groups to every param
param.comm = self.mics_comm_groups
# record existing all_gather_coalesced implementation
# so that we can fallback later
old_all_gather_coalesced = param.all_gather_coalesced
def _param_all_gather_coalesced(params, safe_mode=False, param_buffers=None):
""""""
mics_comm_groups: MiCS_CommGroups = params[0].comm
hierarchical_all_gather = has_hierarchical_all_gather_groups(mics_comm_groups)
if dist.has_coalescing_manager() and hierarchical_all_gather:
return self._hierarchical_all_gather_params(params, param_buffers)
elif dist.has_coalescing_manager():
return self._flat_all_gather_with_coalescing_manager(params, param_buffers)
else:
return old_all_gather_coalesced(params, safe_mode)
# change the all_gather_coalesced method
param.all_gather_coalesced = _param_all_gather_coalesced
def _pre_all_gather(self, params, params_buffers=None):
# fetches from nvme if the partition is not available and in nvme
self._ensure_availability_of_partitioned_params(params)
for param in params:
if param.ds_status != ZeroParamStatus.NOT_AVAILABLE:
raise RuntimeError(param.ds_summary())
param.ds_status = ZeroParamStatus.INFLIGHT
# ensure that each rank has params in same order. the allgather
# is done by flattening the parameter list into a single tensor that
# can be allgathered in a single call - this means that if each rank
# gives a list of the same parameters in a different order we will
# silently get incorrect parameter values, and have very difficult
# to debug correctness issues.
params = sorted(params, key=lambda p: p.ds_id)
return params, params_buffers
def _flat_all_gather_with_coalescing_manager(self, params, params_buffers=None):
""""""
# must have to change the status of the param
# and ensure they are on the device
params, params_buffers = self._pre_all_gather(params, params_buffers)
mics_comm_groups: MiCS_CommGroups = params[0].comm
param_shard_size = mics_comm_groups.param_shard_size
output_tensors = []
input_tensors = []
for i, p in enumerate(params):
t_size = p.ds_tensor.ds_numel * param_shard_size
if params_buffers is not None and params_buffers[i] is not None:
assert params_buffers[i].numel(
) == t_size, f'params_to_gather_buffers[{i}] size {params_buffers[i].numel()} does not match with t_size {t_size}'
flat_out = params_buffers[i]
else:
flat_out = torch.empty(t_size, dtype=p.dtype, device=self.local_device, requires_grad=False).view(-1)
output_tensors.append(flat_out)
_flat_input = p.ds_tensor.data.view(-1)
input_tensors.append(_flat_input)
all_gather_handle = dist.all_gather_coalesced(output_tensors,
input_tensors,
group=mics_comm_groups.param_shard_group,
async_op=True)
for idx, param in enumerate(params):
param.data = output_tensors[idx].narrow(0, 0, param.ds_numel).view(param.ds_shape).data
return MiCS_AllGatherCoalescedHandle(allgather_handle=all_gather_handle,
params=params,
partitions=[],
world_size=param_shard_size)
def _hierarchical_all_gather_params(self, params, params_buffers=None):
""""""
params, params_buffers = self._pre_all_gather(params, params_buffers)
mics_comm_groups: MiCS_CommGroups = params[0].comm
local_rank = dist.get_rank(group=mics_comm_groups.param_intra_node_group)
inter_node_comm_group = mics_comm_groups.param_inter_node_shard_group
intra_node_comm_group = mics_comm_groups.param_intra_node_group
param_shard_size = mics_comm_groups.param_shard_size
inter_node_size = dist.get_world_size(group=inter_node_comm_group)
intra_node_size = dist.get_world_size(group=intra_node_comm_group)
param_tensors = []
for i, p in enumerate(params):
param_size = p.ds_tensor.ds_numel * param_shard_size
if params_buffers is not None and params_buffers[i] is not None:
assert params_buffers[i].numel(
) == param_size, f'param_buffers[{i}] size {params_buffers[i].numel()} does not match with param_size {param_size}'
param_tensor = params_buffers[i]
else:
param_tensor = torch.empty(param_size, dtype=p.dtype, device=self.local_device,
requires_grad=False).view(-1)
param_tensors.append(param_tensor)
# inter node all-gather
inter_outputs = []
inter_inputs = []
for i, p in enumerate(params):
inter_size = p.ds_tensor.ds_numel * inter_node_size
_out = param_tensors[i].narrow(0, local_rank * inter_size, inter_size)
inter_outputs.append(_out)
inter_inputs.append(p.ds_tensor.data.view(-1).to(self.local_device))
# sync enqueue
dist.all_gather_coalesced(inter_outputs, inter_inputs, group=inter_node_comm_group, async_op=False)
# intra node all-gather
intra_outputs = []
intra_inputs = []
for i, p in enumerate(params):
# partition param into multiple chunks for allgather
# because inter-node all-gather outputs are in a continues memory
# while in param memory, those inter-node data are placed in different
# location.
# each chunk is an intra-node output
param_chunk = param_tensors[i].view(
(inter_node_size, intra_node_size, p.ds_tensor.ds_numel)).narrow(1, local_rank, 1)
param_chunk.copy_(inter_outputs[i].detach().clone().view(param_chunk.size()))
output_chunks = torch.chunk(param_tensors[i], inter_node_size)
for j, _out in enumerate(output_chunks):
intra_chunk_size = intra_node_size * p.ds_tensor.ds_numel
local_offset = local_rank * p.ds_tensor.ds_numel
_in = param_tensors[i].narrow(0, j * intra_chunk_size + local_offset, p.ds_tensor.ds_numel)
intra_outputs.append(_out)
intra_inputs.append(_in)
all_gather_handle = dist.all_gather_coalesced(intra_outputs,
intra_inputs,
group=intra_node_comm_group,
async_op=True)
for i, param in enumerate(params):
param.data = param_tensors[i].narrow(0, 0, param.ds_numel).view(param.ds_shape).data
return MiCS_AllGatherCoalescedHandle(
allgather_handle=all_gather_handle,
params=params,
partitions=[],
world_size=param_shard_size,
)
def get_partition_dp_group(self, param):
return param.comm.param_shard_group
def get_partition_rank(self):
return self.mics_comm_groups.param_shard_rank
@property
def num_partitions(self):
return self.mics_comm_groups.param_shard_size
class MiCS_Offload(DeepSpeedZeRoOffload):
""" Wrapper to change the behavior for parameter sharding
"""
def __init__(self,
module,
timers,
ds_config,
overlap_comm=True,
prefetch_bucket_size=50000000,
max_reuse_distance=1000000000,
max_live_parameters=1000000000,
param_persistence_threshold=100000,
model_persistence_threshold=sys.maxsize,
offload_param_config=None,
mpu=None):
super().__init__(module, timers, ds_config, overlap_comm, prefetch_bucket_size, max_reuse_distance,
max_live_parameters, param_persistence_threshold, model_persistence_threshold,
offload_param_config, mpu)
def _convert_to_zero_parameters(self, ds_config, module, mpu):
""" overload the parent class function for convert the parameters
"""
log_dist(f'Convert to zero parameters from MiCS Offload manager', ranks=[0])
non_zero_params = [p for p in module.parameters() if not is_zero_param(p)]
if non_zero_params:
zero_params = [p for p in module.parameters() if is_zero_param(p)]
if zero_params:
zero_params[0].convert_to_zero_parameters(param_list=non_zero_params)
else:
group = None
if mpu:
group = mpu.get_data_parallel_group()
MiCS_Init(module=module,
data_parallel_group=group,
dtype=self.dtype,
config_dict_or_path=ds_config,
remote_device=self.offload_device,
pin_memory=self.offload_param_pin_memory,
mpu=mpu)
class MiCS_Optimizer(DeepSpeedZeroOptimizer_Stage3):
"""
MiCS Optimizer
"""
def __init__(self,
module,
init_optimizer,
timers,
ds_config,
static_loss_scale=1,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True,
contiguous_gradients=True,
reduce_bucket_size=500000000,
prefetch_bucket_size=50000000,
max_reuse_distance=1000000000,
max_live_parameters=1000000000,
param_persistence_threshold=100000,
model_persistence_threshold=sys.maxsize,
dp_process_group=None,
reduce_scatter=True,
overlap_comm=False,
offload_optimizer_config=None,
offload_param_config=None,
sub_group_size=1000000000000,
mpu=None,
clip_grad=0,
communication_data_type=torch.float16,
postscale_gradients=True,
gradient_predivide_factor=1,
gradient_accumulation_steps=1,
elastic_checkpoint=False,
aio_config=None):
log_dist("Init MiCS optimizer", ranks=[0])
super().__init__(module, init_optimizer, timers, ds_config, static_loss_scale, dynamic_loss_scale,
dynamic_loss_args, verbose, contiguous_gradients, reduce_bucket_size, prefetch_bucket_size,
max_reuse_distance, max_live_parameters, param_persistence_threshold,
model_persistence_threshold, dp_process_group, reduce_scatter, overlap_comm,
offload_optimizer_config, offload_param_config, sub_group_size, mpu, clip_grad,
communication_data_type, postscale_gradients, gradient_predivide_factor,
gradient_accumulation_steps, elastic_checkpoint, aio_config)
first_param = next(module.parameters())
# overload the dp_process_group and partition_count
assert hasattr(first_param, "comm"), " ".join([
"Sharded parameters don't have the MiCS_CommGroups attached.",
"Might due to the use of deepspeed.zero.Init context for initializing the weights.",
"To use MiCS sharding, please use deepspeed.zero.MiCS_Init instead for initializing parameter."
])
self.dp_process_group = first_param.comm.param_shard_group
self.partition_count = first_param.comm.param_shard_size
def initialize_ds_offload(
self,
module,
timers,
ds_config,
overlap_comm,
prefetch_bucket_size,
max_reuse_distance,
max_live_parameters,
param_persistence_threshold,
model_persistence_threshold,
offload_param_config,
mpu,
zpg=None,
zero_quantized_weights=False,
):
assert not zero_quantized_weights and zpg is None, "MiCS is mutually exclusive with ZeRO++"
return MiCS_Offload(module, timers, ds_config, overlap_comm, prefetch_bucket_size, max_reuse_distance,
max_live_parameters, param_persistence_threshold, model_persistence_threshold,
offload_param_config, mpu)
def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None:
grad_buffers = super().partition_grads(params_to_release, grad_partitions)
# perform all-reduce among replication groups
# the function will perform accumulation boundary check
self.allreduce_mics_shard_grads(params_to_release, grad_buffers)
@instrument_w_nvtx
def allreduce_mics_shard_grads(self, params, partitioned_grads_buffers: List[Tensor]):
"""
"""
# TODO: improve the condition check
if not self.is_gradient_accumulation_boundary or \
len(partitioned_grads_buffers) == 0:
return
mics_comm_groups: MiCS_CommGroups = params[0].comm
param_repli_group = mics_comm_groups.param_repli_group
param_repli_size = mics_comm_groups.param_repli_size
if param_repli_size is None or param_repli_size <= 1:
return
if not get_accelerator().on_accelerator(partitioned_grads_buffers[0]):
raise RuntimeError("Local sharding has no support for CPU offloading")
if dist.has_all_reduce_coalesced():
scale_tensors(partitioned_grads_buffers, param_repli_size)
dist.all_reduce_coalesced(tensors=partitioned_grads_buffers, group=param_repli_group)
else:
# manually coalescing all-reduce
aggregated_buffer: Tensor = torch.cat(partitioned_grads_buffers)
aggregated_buffer.div_(param_repli_size)
dist.all_reduce(aggregated_buffer, group=param_repli_group)
offset = 0
for grad_buff in partitioned_grads_buffers:
grad_buff.view(-1).copy_(aggregated_buffer.narrow(0, offset, grad_buff.numel()))
offset += grad_buff.numel()
def load_state_dict(self,
state_dict_list,
load_optimizer_states=True,
load_from_fp32_weights=False,
checkpoint_folder=None):
r""" Loading the ZeRO-3/MiCS partitioned checkpoints
Because the self.dp_process_group is replaced with the communicator for
partition group we can call the load_state_dict logic from ZeRO-3.
"""
super().load_state_dict(state_dict_list, load_optimizer_states, load_from_fp32_weights, checkpoint_folder)
| 22,259 | 46.061311 | 138 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/checkpoint_engine/torch_checkpoint_engine.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.utils import logger, log_dist
from deepspeed.runtime.checkpoint_engine.checkpoint_engine import \
CheckpointEngine
class TorchCheckpointEngine(CheckpointEngine):
def __init__(self, config_params=None):
super().__init__(config_params)
def create(self, tag):
log_dist(f"[Torch] Checkpoint {tag} is about to be saved!", ranks=[0])
def save(self, state_dict, path: str):
logger.info(f"[Torch] Saving {path}...")
torch.save(state_dict, path)
logger.info(f"[Torch] Saved {path}.")
return None
def load(self, path: str, map_location=None):
logger.info(f"[Torch] Loading checkpoint from {path}...")
partition = torch.load(path, map_location=map_location)
logger.info(f"[Torch] Loaded checkpoint from {path}.")
return partition
def commit(self, tag):
logger.info(f"[Torch] Checkpoint {tag} is ready now!")
return True
| 1,060 | 29.314286 | 78 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/checkpoint_engine/nebula_checkpoint_engine.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import torch_nebula
from deepspeed.runtime.checkpoint_engine.checkpoint_engine import \
CheckpointEngine
from deepspeed.utils import logger, log_dist
from deepspeed.nebula.constants import *
def _get_tag_from_path(path):
return os.path.basename(os.path.dirname(path))
class NebulaCheckpointEngine(CheckpointEngine):
def __init__(self, config_params=None):
super().__init__(config_params)
self.checkpoint = None
self.tag_flag = None
self.enable_nebula_load = config_params.enable_nebula_load
self.nebula_load_path = config_params.load_path
if self.nebula_load_path is None:
self.nebula_load_path = config_params.persistent_storage_path
nebula_config_params = {
NEBULA_PERSISTENT_STORAGE_PATH: config_params.persistent_storage_path,
NEBULA_PERSISTENT_TIME_INTERVAL: config_params.persistent_time_interval,
NEBULA_NUM_OF_VERSION_IN_RETENTION: config_params.num_of_version_in_retention,
}
torch_nebula.init(**nebula_config_params)
def create(self, tag):
log_dist(f"[Nebula] Start Checkpoint for tag:{tag}", ranks=[0])
# -2 means: customer needs to explicitly tell nebula
# current checkpoint is complete by commit method.
self.checkpoint = torch_nebula.Checkpoint(tag, -2)
def save(self, state_dict, path: str):
log_dist(f"[Nebula] Create dummy files for loading.")
torch.save("", path)
tag = _get_tag_from_path(path)
partition_name = os.path.basename(path)
logger.info(f"[Nebula] Saving {partition_name} under tag {tag}...")
self.checkpoint.save(partition_name, state_dict)
logger.info(f"[Nebula] Saved {partition_name} under tag {tag}.")
return None
def load(self, path: str, map_location=None):
tag = _get_tag_from_path(path)
first_load_flag = self.tag_flag is None or self.tag_flag == tag
if not self.enable_nebula_load and first_load_flag:
self.tag_flag = tag
logger.info(f"[Nebula] Disable nebula load. Loading checkpoint from {path} ...")
partition = torch.load(path, map_location=map_location)
logger.info(f"[Nebula] Disable nebula load. Loaded checkpoint from {path} .")
return partition
partition_name = os.path.basename(path)
logger.info(f"[Nebula] Loading {path} under tag {tag} from nebula path {self.nebula_load_path}...")
checkpoint = None
if tag in (None, 'latest', 'latest_universal'):
# In some cases, there is the inconsistent tag between deepspeed metadata (latest file)
# and nebula metadata, will lead to the failure on loading with deepspeed tag. Then we
# will try to load the valid latest checkpoint from nebula(tier3 > tier1). So, in summary
# when met failure loading for given tag, the loading priority would be like:
# nebula tier3 latest > nebula tier1 latest.
checkpoint = torch_nebula.get_latest_checkpoint(persist_path=self.nebula_load_path)
else:
checkpoint = torch_nebula.get_checkpoint(tag=tag, persist_path=self.nebula_load_path)
if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''):
logger.info(
f"Unable to find valid checkpoint tag:{tag} from Nebula, try to get latest checkpoint again from nebula {self.nebula_load_path} path!"
)
# nebula tier3 latest
checkpoint = torch_nebula.get_latest_checkpoint(persist_path=self.nebula_load_path)
if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''):
logger.info(
f"Unable to find latest checkpoint from Nebula tier3, try to get latest checkpoint again from nebula tier1 path!"
)
# nebula tier1 latest
checkpoint = torch_nebula.get_latest_checkpoint()
logger.warning(f"Unable to find valid checkpoint from Nebula under tag:{tag}.")
return None
tag = checkpoint.tag
self.tag_flag = -1
partition = checkpoint.load(partition_name, map_location=map_location)
logger.info(f"[Nebula] Loaded {path} under tag {tag} from {self.nebula_load_path}.")
return partition
def commit(self, tag):
# nebula commit will be call when all files under give tag are ready to be persisted in the async way.
logger.info(f"[Nebula] all files for {tag} are saved in tier1. It is ready to start persisting")
commit_rls = self.checkpoint.commit()
if not commit_rls:
logger.error(f"[Nebula] failed to commit the checkpoint, please check the log.")
return False
return commit_rls
| 4,975 | 45.074074 | 150 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/pipe/engine.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from types import MethodType
import torch
from deepspeed import comm as dist
from deepspeed.utils import logger
from deepspeed.utils.timer import ThroughputTimer
from deepspeed.accelerator import get_accelerator
from ..engine import DeepSpeedEngine, MEMORY_OPT_ALLREDUCE_SIZE
from ..utils import PartitionedTensor
from ..dataloader import RepeatingLoader
from ..zero.config import ZeroStageEnum
from ..activation_checkpointing import checkpointing as ds_checkpointing
from .module import PipelineModule, PipelineError
from . import p2p
from . import schedule
TARGET_ID = -2
LOG_STAGE = -2
DATA_PARALLEL_ID = -2
def is_even(number):
return number % 2 == 0
mem_alloced = 0
mem_cached = 0
def _tensor_bytes(tensor):
return tensor.numel() * tensor.element_size()
class PipelineEngine(DeepSpeedEngine):
""" A training engine hybrid pipeline, data, and model parallel training.
This engine is created by ``deepspeed.initialize()`` when a :class:`PipelineModule`
is provided.
"""
ID_TO_DTYPE = [
torch.float32, torch.float64, torch.complex64, torch.complex128, torch.float16, torch.bfloat16, torch.uint8,
torch.int8, torch.int16, torch.int32, torch.int64, torch.bool
]
DTYPE_TO_ID = {dtype: id_ for id_, dtype in enumerate(ID_TO_DTYPE)}
def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs):
super().__init__(*super_args, **super_kwargs)
assert isinstance(self.module, PipelineModule), "model must base PipelineModule"
assert self.zero_optimization_stage() < 2, "ZeRO-2 and ZeRO-3 are incompatible with pipeline parallelism"
# We schedule the all-reduces, so disable it in super().backward()
self.enable_backward_allreduce = False
self.has_bool_tensors = has_bool_tensors
self.eval_return_logits = False
self.outputs = None
# used to disable the pipeline all-reduce when used with 1-bit Adam/1-bit LAMB
self.pipeline_enable_backward_allreduce = True
if self.elasticity_enabled():
if not self.is_elastic_model_parallel_supported():
assert not self.elasticity_enabled(), "Elasticity is not currently supported" \
" with pipeline parallelism."
# pipeline step for logging
self.log_batch_step_id = -1
self.micro_batch_size = self.train_micro_batch_size_per_gpu()
self.micro_batches = self.gradient_accumulation_steps()
# Set Grid and Communication Groups
self.grid = self.module._grid
if self.grid.get_global_rank() == 0:
logger.info(f'CONFIG: micro_batches={self.micro_batches} '
f'micro_batch_size={self.micro_batch_size}')
self.global_rank = self.grid.get_global_rank()
assert self.dp_world_size == self.grid.data_parallel_size
assert self.train_batch_size() == \
self.micro_batch_size * self.micro_batches * self.grid.data_parallel_size
# Set Stage Inf
self.num_stages = self.grid.pipe_parallel_size
self.stage_id = self.grid.get_stage_id()
self.prev_stage = self.stage_id - 1
self.next_stage = self.stage_id + 1
self.data_iterator = None
self.batch_fn = None
self._force_grad_boundary = False
self.batch_timer = ThroughputTimer(batch_size=self.train_batch_size(),
logging_fn=self.tput_log,
monitor_memory=False,
steps_per_output=self.steps_per_print())
# PipelineEngine needs to handle data loading specially due to only the first
# and last stages loading inputs/labels. We construct a sampler that uses
if self.training_data:
self._build_data_iter(self.training_data)
self.is_pipe_parallel = self.grid.pipe_parallel_size > 1
self.is_data_parallel = self.grid.data_parallel_size > 1
self.is_model_parallel = self.grid.model_parallel_size > 1
# Partition input/output buffers
# XXX temporarily disable while I revert some partition hacks.
self.is_pipe_partitioned = self.is_model_parallel
self.is_grad_partitioned = self.is_model_parallel
model_parameters = filter(lambda p: p.requires_grad, self.module.parameters())
num_params = sum([p.numel() for p in model_parameters])
unique_params = num_params
# Subtract tied parameters if we don't own them
if self.module.tied_comms:
tied_params = 0
for key, d in self.module.tied_comms.items():
if self.global_rank != min(d['ranks']):
tied_params += sum(p.numel() for p in d['module'].parameters())
unique_params -= tied_params
params_tensor = torch.LongTensor(data=[num_params, unique_params]).to(self.device)
dist.all_reduce(params_tensor, group=self.grid.get_model_parallel_group())
params_tensor = params_tensor.tolist()
total_params = params_tensor[0]
unique_params = params_tensor[1]
if self.grid.data_parallel_id == 0:
logger.info(f'RANK={self.global_rank} '
f'STAGE={self.stage_id} '
f'LAYERS={self.module._local_stop - self.module._local_start} '
f'[{self.module._local_start}, {self.module._local_stop}) '
f'STAGE_PARAMS={num_params} ({num_params/1e6:0.3f}M) '
f'TOTAL_PARAMS={total_params} ({total_params/1e6:0.3f}M) '
f'UNIQUE_PARAMS={unique_params} ({unique_params/1e6:0.3f}M)')
#initialize peer-2-peer communication and allreduce groups
if self.is_pipe_parallel:
p2p.init_process_groups(self.grid)
# Pipeline buffers
self.num_pipe_buffers = 0
self.pipe_buffers = {
'inputs': [], # batch input and received activations
'labels': [], # labels from batch input
'outputs': [], # activations
'output_tensors': [], # tensor object to preserve backward graph
}
self.pipe_recv_buf = None
self.grad_layer = None
self.meta_buffer = None
self.first_output_send = True
self.first_gradient_send = True
#stores the loss for the current micro batch being processed
self.loss = torch.tensor(0.0).to(self.device)
#stores the loss for the entire batch
self.total_loss = None
self.agg_loss = torch.tensor(0.0, requires_grad=False).to(self.device)
self.dp_group_loss = torch.tensor(0.0, requires_grad=False).to(self.device)
if self._config.pipeline['activation_checkpoint_interval'] > 0:
self.module.activation_checkpoint_interval = self._config.pipeline['activation_checkpoint_interval']
self.module.checkpoint_parallel_write_pipeline = self._config.checkpoint_parallel_write_pipeline
if self.is_last_stage():
self.loss_model = self.module.loss_fn
self.has_attention_mask = self.module.__class__.__name__ == 'GPT2ModelPipe'
# Initialize pipeline communicators. Just send a 0.
if is_even(self.stage_id):
if not self.is_last_stage():
p2p.send(self.loss, self.next_stage)
if not self.is_first_stage():
p2p.recv(self.loss, self.prev_stage)
else:
if not self.is_first_stage():
p2p.recv(self.loss, self.prev_stage)
if not self.is_last_stage():
p2p.send(self.loss, self.next_stage)
# XXX look into timer reporting timing
# Initialize some timers because of early weirdness.
if self.wall_clock_breakdown():
self.timers('forward_microstep').start()
self.timers('forward_microstep').stop()
self.timers('backward_microstep').start()
self.timers('backward_microstep').stop()
self.timers('backward_inner_microstep').start()
self.timers('backward_inner_microstep').stop()
self.timers('backward_allreduce_microstep').start()
self.timers('backward_allreduce_microstep').stop()
self.timers('backward_allreduce').start()
self.timers('backward_allreduce').stop()
self.timers('step_microstep').start()
self.timers('step_microstep').stop()
def set_has_attention_mask(self, value):
assert isinstance(value, bool)
self.has_attention_mask = value
def _build_data_iter(self, dataset):
sampler = torch.utils.data.distributed.DistributedSampler(dataset,
num_replicas=self.dp_world_size,
rank=self.mpu.get_data_parallel_rank(),
shuffle=False)
# Build a loader and make it repeating.
pipe_dataloader = self.deepspeed_io(dataset, data_sampler=sampler)
pipe_dataloader = RepeatingLoader(pipe_dataloader)
self.set_dataloader(pipe_dataloader)
def _exec_reduce_tied_grads(self):
# We need to run this first to write to self.averaged_gradients;
# since this class turns `enable_backward_allreduce` off,
# `self.overlapping_partition_gradients_reduce_epilogue()` defined in the DeepSpeedEngine
# never actually runs. I suspect this is because of efficiency problems; get_flat_partition in
# stage2.py might do something expensive; someone will have to look into that later. But
# in the meantime, this fixes ZeRO2 + Pipelining enough to run a demo. Further profiling
# needed to decide if it actually breaks everything.
# (see https://github.com/EleutherAI/gpt-neox/issues/62#issuecomment-761471944)
if self.zero_optimization_partition_gradients():
self.optimizer.overlapping_partition_gradients_reduce_epilogue()
weight_group_list = self.module.get_tied_weights_and_groups()
for weight, group in weight_group_list:
grad = weight._hp_grad if self.bfloat16_enabled() else weight.grad
dist.all_reduce(grad, group=group)
def _exec_reduce_grads(self):
self._force_grad_boundary = True
if self.pipeline_enable_backward_allreduce:
if self.bfloat16_enabled():
if self.zero_optimization_stage() < ZeroStageEnum.gradients:
self._bf16_reduce_grads()
else:
raise NotImplementedError("PP+BF16 only work for ZeRO Stage 1")
else:
self.allreduce_gradients(bucket_size=MEMORY_OPT_ALLREDUCE_SIZE)
self._force_grad_boundary = False
def _bf16_reduce_grads(self):
# Make our own list of gradients from the optimizer's FP32 grads
grads = []
self.buffered_allreduce_fallback(grads=self.optimizer.get_grads_for_reduction(),
elements_per_buffer=MEMORY_OPT_ALLREDUCE_SIZE)
def _reserve_pipe_buffers(self, num_buffers):
"""Ensure that each pipeline buffer has at least ``num_buffers`` slots.
This method only reserves slots and does not allocate tensors.
Args:
num_buffers (int): The number of buffers to reserve.
"""
if self.num_pipe_buffers >= num_buffers:
return
num_added = num_buffers - self.num_pipe_buffers
for key in self.pipe_buffers:
self.pipe_buffers[key].extend([None] * num_added)
self.num_pipe_buffers = num_buffers
def reset_activation_shape(self):
"""Reset the buffers when the shape of activation and gradient change.
For example, for curriculum learning that changes the seqlen of each
sample, we need to call this whenever the seqlen is going to change.
"""
self.first_output_send = True
self.pipe_recv_buf = None
self.grad_layer = None
self.meta_buffer = None
def train_batch(self, data_iter=None):
"""Progress the pipeline to train the next batch of data. The engine will ingest
``self.train_batch_size()`` total samples collectively across all workers.
An iterator that over training data should be provided as an argument
unless ``deepspeed.initialize()`` was provided a training set. In that event,
the training data will automatically be read.
.. warning::
A total of ``self.gradient_accumulation_steps()`` entries will be pulled
from ``data_iter`` by each pipeline. There must be sufficient
data left in ``data_iter`` or else a ``StopIteration`` will halt training.
DeepSpeed provides a convenience class :class:`deepspeed.utils.RepeatingLoader`
that wraps data loaders to automatically restart upon a ``StopIteration``.
Args:
data_iter (Iterator, optional): Iterator of training data.
Returns:
The arithmetic mean of the losses computed this batch.
"""
if not torch._C.is_grad_enabled():
raise RuntimeError(f'train_batch() requires gradients enabled. Use eval_batch() instead.')
# Curriculum learning could change activation shape
if self.curriculum_enabled_legacy():
new_difficulty = self.curriculum_scheduler_legacy.update_difficulty( \
self.global_steps + 1)
if self.global_steps == 0 or self.curriculum_scheduler_legacy.first_step:
self.reset_activation_shape()
self.curriculum_scheduler_legacy.first_step = False
elif new_difficulty != self.curriculum_scheduler_legacy.get_difficulty( \
self.global_steps):
self.reset_activation_shape()
if data_iter:
self.set_dataiterator(data_iter)
self.module.train()
self.total_loss = None
self._compute_loss = True
# Do the work
self.timers('train_batch').start()
sched = schedule.TrainSchedule(micro_batches=self.micro_batches,
stages=self.num_stages,
stage_id=self.stage_id)
self._exec_schedule(sched)
self.agg_train_loss = self._aggregate_total_loss()
self.timers('train_batch').stop()
if self.global_steps % self.steps_per_print() == 0:
if self.global_rank == 0:
elapsed = self.timers('train_batch').elapsed(reset=True) / 1000.0
iter_time = elapsed / self.steps_per_print()
tput = self.train_batch_size() / iter_time
print(f'steps: {self.global_steps} '
f'loss: {self.agg_train_loss:0.4f} '
f'iter time (s): {iter_time:0.3f} '
f'samples/sec: {tput:0.3f}')
# Monitoring
if self.global_rank == 0 and self.monitor.enabled:
self.summary_events = [(f'Train/Samples/train_loss', self.agg_train_loss.mean().item(),
self.global_samples)]
self.monitor.write_events(self.summary_events)
if self.wall_clock_breakdown() and self.global_steps % self.steps_per_print() == 0:
self.timers.log(['pipe_send_output', 'pipe_send_grad', 'pipe_recv_input', 'pipe_recv_grad'])
# TODO: should return precisely what loss returned and allow others to be queried?
return self.agg_train_loss
def eval_batch(self, data_iter, return_logits=False, compute_loss=True, reduce_output='avg'):
"""Evaluate the pipeline on a batch of data from ``data_iter``. The
engine will evaluate ``self.train_batch_size()`` total samples
collectively across all workers.
This method is equivalent to:
.. code-block:: python
module.eval()
with torch.no_grad():
output = module(batch)
.. warning::
A total of ``self.gradient_accumulation_steps()`` entries will be pulled
from ``data_iter`` by each pipeline. There must be sufficient
data left in ``data_iter`` or else a ``StopIteration`` will halt training.
DeepSpeed provides a convenience class :class:`deepspeed.utils.RepeatingLoader`
that wraps data loaders to automatically restart upon a ``StopIteration``.
Args:
data_iter (Iterator): Iterator of data to evaluate.
Returns:
The arithmetic mean of the losses computed this batch.
"""
self.eval_return_logits = return_logits
self.module.eval()
# Curriculum learning could change activation shape
if self.curriculum_enabled_legacy():
new_difficulty = self.curriculum_scheduler_legacy.update_difficulty( \
self.global_steps + 1)
if self.global_steps == 0 or self.curriculum_scheduler_legacy.first_step:
self.reset_activation_shape()
self.curriculum_scheduler_legacy.first_step = False
elif new_difficulty != self.curriculum_scheduler_legacy.get_difficulty( \
self.global_steps):
self.reset_activation_shape()
eval_output = None
self._compute_loss = compute_loss
# Use the provided data iterator
train_iterator = self.data_iterator
self.set_dataiterator(data_iter)
# Do the work
sched = schedule.InferenceSchedule(micro_batches=self.micro_batches,
stages=self.num_stages,
stage_id=self.stage_id)
# prevent dead-lock with multiple evals sequence
dist.barrier()
with torch.no_grad():
self._exec_schedule(sched)
if self.is_last_stage():
eval_output = self._reduce_outputs(self.fwd_outputs, reduce=reduce_output)
if compute_loss:
eval_output = self._bcast_pipe_scalar(eval_output)
if self.global_rank == 0 and self.monitor.enabled:
self.summary_events = [(f'Train/Samples/eval_loss', eval_output.mean().item(), self.global_samples)]
self.monitor.write_events(self.summary_events)
# Restore the training iterator
self.set_dataiterator(train_iterator)
# Reset any buffers that may have been populated during the forward passes.
#ds_checkpointing.reset()
self.eval_return_logits = False
if return_logits:
outputs = self.outputs
self.outputs = None
return eval_output, outputs
return eval_output
def set_train_batch_size(self, train_batch_size):
"""Adjust the global batch size by increasing or decreasing the number of
micro-batches (i.e., gradient accumulation steps). The size of each micro-batch
(i.e., ``train_micro_batch_size_per_gpu``) is not changed.
Args:
train_batch_size (int): The new global batch size for training.
Raises:
ValueError: if ``train_batch_size`` is not divisible by the
configured micro-batch size and data parallelism.
"""
super().set_train_batch_size(train_batch_size)
self.micro_batches = self.gradient_accumulation_steps()
def is_first_stage(self):
"""True if this process is in the first stage in the pipeline."""
return self.stage_id == 0
def is_last_stage(self):
"""True if this process is in the last stage in the pipeline."""
return self.stage_id == self.num_stages - 1
def _reduce_outputs(self, outputs, reduce='avg', reduce_dp=True):
if reduce is None:
return outputs
if reduce.lower() == 'avg':
# first sum over all microbatches
if torch.is_tensor(outputs[0]):
reduced = sum(outputs)
else:
assert isinstance(outputs, (list, tuple))
reduced = [torch.zeros_like(o) for o in outputs[0]]
for idx, out in outputs:
reduced[idx] += out
# Average over the microbatches
reduced = self._scale_loss_by_gas(reduced)
# Average over DP groups
if reduce_dp and self.is_data_parallel:
if torch.is_tensor(reduced):
dist.all_reduce(reduced, group=self.mpu.get_data_parallel_group())
reduced /= self.dp_world_size
else:
for idx in range(len(reduced)):
dist.all_reduce(reduced[idx], group=self.mpu.get_data_parallel_group())
reduced[idx] /= self.dp_world_size
return reduced
else:
raise NotImplementedError(f'reduction type {reduce} not supported.')
def _bcast_pipe_scalar(self, data, src_rank=None, dtype=torch.float32):
# Default to last stage (e.g., for broadcasting loss)
if src_rank is None:
src_rank = self.grid.stage_to_global(self.num_stages - 1)
assert src_rank in self.grid.pp_group
if self.global_rank == src_rank:
result = data.clone().detach().type(dtype).to(self.device)
else:
result = torch.Tensor([0.]).type(dtype).to(self.device)
dist.broadcast(tensor=result, src=src_rank, group=self.mpu.get_pipe_parallel_group())
return result
def _aggregate_total_loss(self):
# Scale loss, average among DP ranks, and bcast loss to the rest of my DP group
if self.is_last_stage():
loss = self._scale_loss_by_gas(self.total_loss)
self.dp_group_loss = loss.clone().detach()
## Average loss across all data-parallel groups
agg_loss = self.dp_group_loss.clone().detach()
#print(f'RANK={self.global_rank} bcast SENDER src={self.global_rank} group={self.grid.pp_group}', flush=True)
if self.is_data_parallel:
dist.all_reduce(agg_loss, group=self.mpu.get_data_parallel_group())
agg_loss /= self.dp_world_size
assert self.global_rank in self.grid.pp_group
losses = torch.Tensor([self.dp_group_loss, agg_loss]).to(self.device)
if self.is_pipe_parallel:
dist.broadcast(tensor=losses, src=self.global_rank, group=self.mpu.get_pipe_parallel_group())
else:
# Get loss from last stage
src_rank = self.grid.stage_to_global(self.num_stages - 1)
assert src_rank in self.grid.pp_group
losses = torch.Tensor([0., 0.]).to(self.device)
dist.broadcast(tensor=losses, src=src_rank, group=self.grid.get_pipe_parallel_group())
self.dp_group_loss = losses[0].clone().detach()
agg_loss = losses[1].clone().detach()
return agg_loss
def set_dataloader(self, loader):
""""""
if self.is_first_stage() or self.is_last_stage():
self.training_dataloader = loader
self.data_iterator = iter(self.training_dataloader)
def set_dataiterator(self, iterator):
""" Store an iterator to sample for training data. """
if self.is_first_stage() or self.is_last_stage():
self.training_dataloader = None
self.data_iterator = iterator
def set_batch_fn(self, fn):
"""Execute a post-processing function on input data.
Args:
fn (function): The function to run.
"""
self.batch_fn = fn
def is_gradient_accumulation_boundary(self):
"""True if the engine is executing a gradient reduction or optimizer step instruction.
This is overridden from :class:`DeepSpeedEngine` to force reductions
and steps when the pipeline engine is instructed to do so.
Returns:
bool: whether reductions and optimizer steps should occur.
"""
return self._force_grad_boundary
def log_for_device(self, *msg):
if LOG_STAGE == self.stage_id or LOG_STAGE == -1:
if DATA_PARALLEL_ID == self.grid.data_parallel_id or DATA_PARALLEL_ID == -1:
print(
f'RANK={dist.get_rank()} '
f'PIPE-ID={self.stage_id} '
f'DATA-ID={self.grid.data_parallel_id} '
f'MBATCH-ID={self.microbatch_id} '
f'STEP-ID={self.log_batch_step_id} '
'::',
*msg,
flush=True)
def tput_log(self, *msg):
if self.global_rank == 0 and self.global_steps % self.steps_per_print() == 0:
print(*msg)
def _next_batch(self):
# If using 3D parallelism, only some first-stage ranks may do IO
batch = None
if self.data_iterator is not None:
batch = next(self.data_iterator)
# Any post-processing, like broadcasting across a slice-parallel group.
if self.batch_fn:
batch = self.batch_fn(batch)
return batch
def _exec_forward_pass(self, buffer_id):
self.tput_timer.start()
self.mem_status('BEFORE FWD', reset_max=True)
if isinstance(self.pipe_buffers['inputs'][buffer_id], tuple):
inputs = tuple(t.clone() for t in self.pipe_buffers['inputs'][buffer_id])
else:
inputs = self.pipe_buffers['inputs'][buffer_id].clone()
# collect the partitioned input from the previous stage
if self.is_pipe_partitioned and not self.is_first_stage():
part_input = PartitionedTensor.from_meta(meta=inputs[0],
local_part=inputs[1],
group=self.grid.get_slice_parallel_group())
inputs = (part_input.full(), *inputs[2:])
inputs[0].requires_grad = True
# skip mask
#inputs[1].requires_grad = True
part_input = None
inputs = inputs[0] if len(inputs) == 1 else inputs
self.pipe_buffers['inputs'][buffer_id] = inputs
# Zero out the gradients each time we use the tensor because only the data in
# tensor changes across batches
self._zero_grads(inputs)
outputs = super().forward(inputs)
# Reset activation checkpointing buffers.
# Need to call this between evaluation iterations
if not self.module.training:
ds_checkpointing.reset()
# Partition the outputs if we are not the last stage
if self.is_pipe_partitioned and not self.is_last_stage():
if isinstance(outputs, tuple):
first_output = outputs[0]
# TODO: Improve pipe partitioning to pass multiple tensors that require grads
assert all([torch.is_tensor(elt) and elt.requires_grad is False for elt in outputs[1:]])
outputs_tail = outputs[1:]
elif torch.is_tensor(outputs):
first_output = outputs
outputs_tail = []
else:
raise ValueError("expecting a tensor or a tuple of tensors")
part = PartitionedTensor(tensor=first_output, group=self.grid.get_slice_parallel_group())
# Clear the large output data, but save the computation graph
first_output.data = torch.zeros(1)
self.pipe_buffers['output_tensors'][buffer_id] = first_output
# Inject the partitioned tensor into the output before sending
outputs = (part.to_meta(), part.data(), *outputs_tail)
part = None
self.pipe_buffers['outputs'][buffer_id] = outputs
# Optionally compute loss on the last device
if self.is_last_stage():
if self._compute_loss and self.module.loss_fn is not None:
labels = self.pipe_buffers['labels'][buffer_id]
self.loss = self.module.loss_fn(outputs, labels)
else:
# Some models just return loss from forward()
self.loss = outputs
if self.eval_return_logits:
self.outputs = outputs
if isinstance(self.loss, torch.Tensor):
self.fwd_outputs.append(self.loss.detach())
if self.total_loss is None:
self.total_loss = torch.zeros_like(self.loss)
self.total_loss += self.loss.detach()
else:
self.fwd_outputs.append([l.detach() for l in self.loss])
if self.total_loss is None:
self.total_loss = [torch.zeros_like(l) for l in self.loss]
for idx, l in enumerate(self.loss):
self.total_loss[idx] += l.detach()
def _exec_backward_pass(self, buffer_id):
assert self.optimizer is not None, "must provide optimizer during " \
"init in order to use backward"
self.mem_status('BEFORE BWD', reset_max=True)
# The last stage just runs backward on the loss using DeepSpeed's typical
# mechanisms.
if self.is_last_stage():
super().backward(self.loss)
self.mem_status('AFTER BWD')
return
outputs = self.pipe_buffers['outputs'][buffer_id]
if self.wall_clock_breakdown():
self.timers('backward_microstep').start()
self.timers('backward').start()
self.timers('backward_inner_microstep').start()
self.timers('backward_inner').start()
# Reconstruct if we previously partitioned the output. We must be
# careful to also restore the computational graph of the tensors we partitioned.
if self.is_pipe_partitioned:
if self.is_grad_partitioned:
part_output = PartitionedTensor.from_meta(meta=outputs[0],
local_part=outputs[1],
group=self.grid.get_slice_parallel_group())
self.pipe_buffers['output_tensors'][buffer_id].data = part_output.full()
outputs = (self.pipe_buffers['output_tensors'][buffer_id], *outputs[2:])
else:
# Already restored from partition
self.pipe_buffers['output_tensors'][buffer_id].data = outputs[0]
outputs = (self.pipe_buffers['output_tensors'][buffer_id], *outputs[1:])
grad_tensors = self.grad_layer
if self.is_grad_partitioned:
#print(f'RANK={self.global_rank} BEFORE-BWD restoring grad={self.grad_layer[0].size()} {self.grad_layer[1].size()}')
part_grad = PartitionedTensor.from_meta(meta=self.grad_layer[0],
local_part=self.grad_layer[1],
group=self.grid.get_slice_parallel_group())
grad_tensors = (part_grad.full(), *grad_tensors[2:])
part_grad = None
#print(f'RANK={self.global_rank} BEFORE-BWD restored grad={self.grad_layer[0].size()} {self.grad_layer[1].size()}')
if self.bfloat16_enabled() and not self.is_last_stage():
# manually call because we don't call optimizer.backward()
self.optimizer.clear_lp_grads()
# This handles either a single tensor or tuple of tensors.
if isinstance(outputs, tuple):
out_tensors = [t for t in outputs if t.is_floating_point()]
assert len(out_tensors) == len(grad_tensors)
torch.autograd.backward(tensors=out_tensors, grad_tensors=grad_tensors)
else:
torch.autograd.backward(tensors=(outputs, ), grad_tensors=(grad_tensors, ))
if self.bfloat16_enabled() and not self.is_last_stage():
# manually call because we don't call optimizer.backward()
self.optimizer.update_hp_grads(clear_lp_grads=False)
# Free up the memory from the output of forward()
self.pipe_buffers['output_tensors'][buffer_id] = None
self.pipe_buffers['outputs'][buffer_id] = None
grad_tensors = None
if self.wall_clock_breakdown():
self.timers('backward_inner').stop()
self.timers('backward_inner_microstep').stop()
self.timers('backward').stop()
self.timers('backward_microstep').stop()
self.mem_status('AFTER BWD')
def _exec_load_micro_batch(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('batch_input').start()
batch = self._next_batch()
if self.is_first_stage():
loaded = None
if torch.is_tensor(batch[0]):
loaded = batch[0].clone().to(self.device).detach()
loaded.requires_grad = loaded.is_floating_point()
else:
assert isinstance(batch[0], (tuple, list))
# Assume list or tuple
loaded = []
for x in batch[0]:
assert torch.is_tensor(x)
mine = x.clone().detach().to(self.device)
mine.requires_grad = mine.is_floating_point()
loaded.append(mine)
loaded = tuple(loaded)
self.pipe_buffers['inputs'][buffer_id] = loaded
if self.is_last_stage():
loaded = batch[1]
if torch.is_tensor(batch[1]):
loaded = batch[1].to(self.device)
elif isinstance(batch[1], tuple):
loaded = []
for x in batch[1]:
assert torch.is_tensor(x)
x = x.to(self.device).detach()
loaded.append(x)
loaded = tuple(loaded)
self.pipe_buffers['labels'][buffer_id] = loaded
if self.wall_clock_breakdown():
self.timers('batch_input').stop()
def _send_tensor_meta(self, buffer, recv_stage):
""" Communicate metadata about upcoming p2p transfers.
Metadata is communicated in this order:
* type (0: tensor, 1: list)
* num_tensors if type=list
foreach tensor in buffer:
* ndims
* shape
"""
send_bytes = 0
if isinstance(buffer, torch.Tensor):
type_tensor = torch.LongTensor(data=[0]).to(self.device)
p2p.send(type_tensor, recv_stage)
send_shape = torch.LongTensor(data=buffer.size()).to(self.device)
send_ndims = torch.LongTensor(data=[len(buffer.size())]).to(self.device)
p2p.send(send_ndims, recv_stage)
p2p.send(send_shape, recv_stage)
send_bytes += _tensor_bytes(buffer)
elif isinstance(buffer, list):
assert (False)
type_tensor = torch.LongTensor(data=[1]).to(self.device)
p2p.send(type_tensor, recv_stage)
count_tensor = torch.LongTensor(data=[len(buffer)]).to(self.device)
p2p.send(count_tensor, recv_stage)
for tensor in buffer:
assert isinstance(tensor, torch.Tensor)
send_shape = torch.LongTensor(data=tensor.size()).to(self.device)
send_ndims = torch.LongTensor(data=[len(tensor.size())]).to(self.device)
p2p.send(send_ndims, recv_stage)
p2p.send(send_shape, recv_stage)
send_bytes += _tensor_bytes(tensor)
elif isinstance(buffer, tuple):
type_tensor = torch.LongTensor(data=[2]).to(self.device)
p2p.send(type_tensor, recv_stage)
count_tensor = torch.LongTensor(data=[len(buffer)]).to(self.device)
p2p.send(count_tensor, recv_stage)
for idx, tensor in enumerate(buffer):
assert isinstance(tensor, torch.Tensor)
send_shape = torch.LongTensor(data=tensor.size()).to(self.device)
send_ndims = torch.LongTensor(data=[len(tensor.size())]).to(self.device)
send_dtype = torch.LongTensor(data=[self.DTYPE_TO_ID[tensor.dtype]]).to(self.device)
p2p.send(send_dtype, recv_stage)
p2p.send(send_ndims, recv_stage)
p2p.send(send_shape, recv_stage)
# Useful for performance debugging.
'''
new_bytes = _tensor_bytes(tensor)
send_bytes += _tensor_bytes(tensor)
# Useful for performance debugging.
if self.grid.data_parallel_id == 0:
print(
f'STAGE={self.stage_id} pipe-send-volume[{idx}]: shape={send_shape} {new_bytes/1024**2:0.2f}MB'
)
'''
else:
raise NotImplementedError(f'Could not send meta type {type(buffer)}')
# Useful for performance debugging.
'''
if self.grid.data_parallel_id == 0:
print(f'STAGE={self.stage_id} pipe-send-volume: {send_bytes/1024**2:0.2f}MB')
'''
def _recv_tensor_meta(self, send_stage):
"""Receive metadata about upcoming p2p transfers and return allocated buffers.
Metadata is communicated in this order:
* type (0: tensor, 1: list)
* num_tensors if type=list
foreach tensor in buffer:
* ndims
* shape
Returns:
Allocated buffer for receiving from send_stage.
"""
type_tensor = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(type_tensor, send_stage)
recv_type = type_tensor.item()
# A single tensor will be sent.
if recv_type == 0:
recv_ndims = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(recv_ndims, send_stage)
recv_ndims = recv_ndims.item()
recv_shape = torch.LongTensor([1] * recv_ndims).to(self.device)
p2p.recv(recv_shape, send_stage)
recv_shape = recv_shape.tolist()
return self._allocate_buffer(recv_shape, num_buffers=1)[0]
# List or tuple of tensors
elif recv_type == 1 or recv_type == 2:
count_tensor = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(count_tensor, send_stage)
num_tensors = count_tensor.item()
recv_shapes_and_dtypes = []
for idx in range(num_tensors):
recv_dtype = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(recv_dtype, send_stage)
recv_dtype = self.ID_TO_DTYPE[recv_dtype.item()]
recv_ndims = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(recv_ndims, send_stage)
recv_ndims = recv_ndims.item()
recv_shape = torch.LongTensor([1] * recv_ndims).to(self.device)
p2p.recv(recv_shape, send_stage)
recv_shapes_and_dtypes.append((recv_shape.tolist(), recv_dtype))
buffers = self._allocate_buffers(recv_shapes_and_dtypes, num_buffers=1)[0]
# Convert to tuples if requested.
if recv_type == 2:
buffers = tuple(buffers)
return buffers
else:
raise NotImplementedError(f'Could not receive type {type(recv_type)}')
def _exec_send_activations(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_send_output').start()
outputs = self.pipe_buffers['outputs'][buffer_id]
# NCCL does not like to send torch.BoolTensor types, so cast the mask to half().
# We could do char, but with half() we can eventually flatten with other fp16
# messages (TODO)
if self.has_attention_mask or self.has_bool_tensors:
outputs = list(outputs)
outputs[-1] = outputs[-1].half()
outputs = tuple(outputs)
if self.first_output_send:
self.first_output_send = False
self._send_tensor_meta(outputs, self.next_stage)
if isinstance(outputs, torch.Tensor):
p2p.send(outputs, self.next_stage)
elif isinstance(outputs, tuple):
for idx, buffer in enumerate(outputs):
p2p.send(buffer, self.next_stage)
else:
raise NotImplementedError('Could not send output of type '
f'{type(outputs)}')
# Restore the boolean tensor
if self.has_attention_mask or self.has_bool_tensors:
outputs = list(outputs)
outputs[-1] = outputs[-1].bool()
outputs = tuple(outputs)
if self.wall_clock_breakdown():
self.timers('pipe_send_output').stop()
def _exec_send_grads(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_send_grad').start()
inputs = self.pipe_buffers['inputs'][buffer_id]
# Partition the gradient
if self.is_grad_partitioned:
if isinstance(inputs, tuple):
first_input = inputs[0]
assert all([torch.is_tensor(elt) for elt in inputs[1:]])
inputs_grad_tail = [elt.grad for elt in inputs[1:] if elt.grad is not None]
elif torch.is_tensor(inputs):
first_input = inputs
inputs_grad_tail = []
else:
raise ValueError("expecting a tensor or a tuple of tensors")
assert torch.is_tensor(first_input)
part = PartitionedTensor(tensor=first_input.grad, group=self.grid.get_slice_parallel_group())
inputs = (part.to_meta(), part.data(), *inputs_grad_tail)
# XXX Terrible hack
# Drop the attention mask from the input buffer here. It does not have
# a grad that needs to be communicated. We free the buffer immediately
# after, so no need to restore it. The receiver also has a hack that skips
# the recv. This is because NCCL does not let us send torch.BoolTensor :-(.
if self.has_attention_mask or self.has_bool_tensors:
inputs = list(inputs)
inputs.pop()
inputs = tuple(inputs)
if isinstance(inputs, torch.Tensor):
assert inputs.grad is not None
p2p.send(inputs.grad, self.prev_stage)
else:
# XXX terrible hacky branch
if self.is_grad_partitioned:
# First two sends are partitioned gradient
p2p.send(inputs[0], self.prev_stage)
p2p.send(inputs[1], self.prev_stage)
else:
for idx, buffer in enumerate(inputs):
# Skip tensors that will not produce a grad
if not buffer.is_floating_point():
assert buffer.grad is None
continue
assert buffer.grad is not None
p2p.send(buffer.grad, self.prev_stage)
# We can free up the input buffer now
self.pipe_buffers['inputs'][buffer_id] = None
if self.wall_clock_breakdown():
self.timers('pipe_send_grad').stop()
def _exec_recv_activations(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_recv_input').start()
recvd = None
# Allocate the buffer if necessary
if self.pipe_recv_buf is None:
self.pipe_recv_buf = self._recv_tensor_meta(self.prev_stage)
if isinstance(self.pipe_recv_buf, torch.Tensor):
p2p.recv(self.pipe_recv_buf, self.prev_stage)
recvd = self.pipe_recv_buf.clone().detach()
recvd.requires_grad = recvd.is_floating_point()
else:
assert isinstance(self.pipe_recv_buf, tuple)
recvd = [None] * len(self.pipe_recv_buf)
for idx, buffer in enumerate(self.pipe_recv_buf):
assert torch.is_tensor(buffer)
# XXX hardcode meta type
if self.is_pipe_partitioned and idx == 0 and buffer.dtype != torch.long:
if self.meta_buffer is None:
self.meta_buffer = torch.zeros(buffer.size(), dtype=torch.long, device=self.device)
buffer = self.meta_buffer
p2p.recv(buffer, self.prev_stage)
recvd[idx] = buffer.clone().detach()
# NCCL does not like to send torch.BoolTensor types, so un-cast the
# attention mask
if self.has_attention_mask or self.has_bool_tensors:
recvd[-1] = recvd[-1].bool()
recvd = tuple(recvd)
for buffer in recvd:
buffer.requires_grad = buffer.is_floating_point()
self.pipe_buffers['inputs'][buffer_id] = recvd
if self.wall_clock_breakdown():
self.timers('pipe_recv_input').stop()
def _exec_recv_grads(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_recv_grad').start()
outputs = self.pipe_buffers['outputs'][buffer_id]
# XXX these shapes are hardcoded for Megatron
# Restore partitioned output if it was partitioned and we are sending full gradients
if self.is_pipe_partitioned and not self.is_grad_partitioned:
part_output = PartitionedTensor.from_meta(meta=outputs[0],
local_part=outputs[1],
group=self.grid.get_slice_parallel_group())
outputs[0].data = part_output.full()
outputs = (outputs[0], *outputs[2:])
# save for backward
self.pipe_buffers['outputs'][buffer_id] = outputs
# Allocate gradient if necessary
if self.grad_layer is None:
if isinstance(outputs, torch.Tensor):
s = list(outputs.size())
self.grad_layer = self._allocate_buffer(s, dtype=outputs.dtype, num_buffers=1)[0]
else:
# XXX This is a HACK
# When we exchange activations/gradients, the two pipe stages
# need to issue the send/recv with the same buffer sizes or
# else there is a deadlock. The is_floating_point() filter is
# used to avoid sending gradients for tensors that do not
# produce gradients. When TP>1, we partition the first
# activations/gradients across TP ranks to save communication
# volume and memory. That partitioned tensor is represented as
# two tensors: a 1/TPth chunk of the original data and also a
# small LongTensor storing the metadata used to reconstruct on
# the other side. When combined, the floating point filter also
# filtered out the metadata tensor. This quick (hacky) fix just
# branches on is_grad_partitioned so we don't filter out the
# metadata tensor.
if self.is_grad_partitioned:
sizes_and_dtypes = [(list(t.size()), t.dtype)
for t in outputs[:2]] + [(list(t.size()), t.dtype)
for t in outputs[2:] if t.is_floating_point()]
else:
sizes_and_dtypes = [(list(t.size()), t.dtype) for t in outputs if t.is_floating_point()]
self.grad_layer = self._allocate_buffers(sizes_and_dtypes, num_buffers=1)[0]
if isinstance(self.grad_layer, torch.Tensor):
p2p.recv(self.grad_layer, self.next_stage)
else:
assert isinstance(outputs, tuple)
for idx, buffer in enumerate(self.grad_layer):
# XXX GPT-2 hack
if self.is_grad_partitioned and idx == 0 and buffer.dtype != torch.long:
buffer.data = torch.zeros(buffer.size(), dtype=torch.long, device=self.device)
p2p.recv(buffer, self.next_stage)
if self.wall_clock_breakdown():
self.timers('pipe_recv_grad').stop()
def _exec_optimizer_step(self, lr_kwargs=None):
if self.wall_clock_breakdown():
self.timers('step_microstep').start()
self.timers('step').start()
self.mem_status('BEFORE STEP', reset_max=True)
self._force_grad_boundary = True
self._take_model_step(lr_kwargs)
self._force_grad_boundary = False
self.mem_status('AFTER STEP')
if self.global_rank == 0 and self.monitor.enabled:
self.summary_events = [(f'Train/Samples/lr', self.get_lr()[0], self.global_samples)]
if self.fp16_enabled() and hasattr(self.optimizer, 'cur_scale'):
self.summary_events.append(
(f'Train/Samples/loss_scale', self.optimizer.cur_scale, self.global_samples))
self.monitor.write_events(self.summary_events)
if self.wall_clock_breakdown():
self.timers('step_microstep').stop()
self.timers('step').stop()
if self.global_steps % self.steps_per_print() == 0:
self.timers.log([
'batch_input', 'forward_microstep', 'backward_microstep', 'backward_inner_microstep',
'backward_allreduce_microstep', 'backward_tied_allreduce_microstep', 'step_microstep'
])
if self.global_steps % self.steps_per_print() == 0:
self.timers.log(['forward', 'backward', 'backward_inner', 'backward_allreduce', 'step'])
def _zero_grads(self, inputs):
if isinstance(inputs, torch.Tensor):
if inputs.grad is not None:
inputs.grad.data.zero_()
else:
for t in inputs:
if t.grad is not None:
t.grad.data.zero_()
def _allocate_zeros(self, shape, **kwargs):
""" Allocate a tensor of zeros on the engine's device.
Arguments:
shape: the shape of the tensor to allocate
kwargs: passed to torch.zeros()
Returns:
A tensor from torch.zeros() allocated on self.device.
"""
if "dtype" not in kwargs:
if self.fp16_enabled():
kwargs["dtype"] = torch.half
if self.bfloat16_enabled():
kwargs["dtype"] = torch.bfloat16
return torch.zeros(shape, device=self.device, **kwargs)
def _allocate_buffer(self, shape, num_buffers=-1, **kwargs):
buffers = []
if num_buffers == -1:
num_buffers = self.num_pipe_buffers
for count in range(num_buffers):
buffers.append(self._allocate_zeros(shape, **kwargs))
return buffers
def _allocate_buffers(self, shapes_and_dtypes, requires_grad=False, num_buffers=-1):
buffers = []
if num_buffers == -1:
num_buffers = self.num_pipe_buffers
for count in range(num_buffers):
buffer = []
for shape, dtype in shapes_and_dtypes:
buffer.append(self._allocate_zeros(shape, dtype=dtype, requires_grad=requires_grad))
buffers.append(buffer)
return buffers
def forward(self, *args, **kwargs):
"""Disabled for pipeline parallel training. See ``train_batch()``. """
raise PipelineError("Only train_batch() is accessible in pipeline mode.")
def backward(self, *args, **kwargs):
"""Disabled for pipeline parallel training. See ``train_batch()``. """
raise PipelineError("Only train_batch() is accessible in pipeline mode.")
def step(self, *args, **kwargs):
"""Disabled for pipeline parallel training. See ``train_batch()``. """
raise PipelineError("Only train_batch() is accessible in pipeline mode.")
def mem_status(self, msg, print_rank=-1, reset_max=False):
return
global mem_alloced, mem_cached
if not self.global_steps == 0 or not self.global_steps == 9:
#return
pass
if self.mpu.get_data_parallel_rank() != 0:
return
if self.global_rank != 0:
return
rank = self.global_rank
if print_rank != -1 and rank != print_rank:
return
get_accelerator().synchronize()
if reset_max:
get_accelerator().reset_max_memory_cached()
get_accelerator().reset_max_memory_allocated()
new_alloced = get_accelerator().memory_allocated()
new_cached = get_accelerator().memory_cached()
delta_alloced = new_alloced - mem_alloced
delta_cached = new_cached - mem_cached
mem_cached = new_cached
mem_alloced = new_alloced
max_alloced = get_accelerator().max_memory_allocated()
max_cached = get_accelerator().max_memory_cached()
# convert to GB for printing
new_alloced /= 1024**3
new_cached /= 1024**3
delta_alloced /= 1024**3
delta_cached /= 1024**3
max_alloced /= 1024**3
max_cached /= 1024**3
print(
f'RANK={rank} STAGE={self.stage_id} STEP={self.global_steps} MEMSTATS', msg,
f'current alloc={new_alloced:0.4f}GB (delta={delta_alloced:0.4f}GB max={max_alloced:0.4f}GB) '
f'current cache={new_cached:0.4f}GB (delta={delta_cached:0.4f}GB max={max_cached:0.4f}GB)')
def module_state_dict(self):
"""Override hack to save a pipe model and return the directory path of the save.
This method should only be called by DeepSpeed's ``save_checkpoint()``. The
recommended way of saving a ``PipelineModule`` outside of ``save_checkpoint()``
is ``save_state_dict()``.
Returns:
None
"""
assert isinstance(self.module, PipelineModule)
assert self._curr_ckpt_path is not None, \
"PipelineEngine expects module_state_dict() to be called from save_checkpoint()"
self.module.save_state_dict(self._curr_ckpt_path, checkpoint_engine=self.checkpoint_engine)
return None
def load_module_state_dict(self, checkpoint, strict=True, custom_load_fn=None):
"""Override hack to instead use a directory path.
This is important because pipeline models checkpoint by layer instead of rank.
If ``state_dict`` is not ``None`` or a ``str``, we revert to ``super()`` expecting a ``dict``.
Args:
state_dict (str, None): unused
strict (bool, optional): Strict state loading. Defaults to True.
"""
assert custom_load_fn is None, "custom_load_fn not supported w. pipeline parallelism"
state_dict = checkpoint['module']
if (state_dict is not None) and (not isinstance(state_dict, str)):
super().load_module_state_dict(state_dict, strict)
return
self.module.load_state_dir(load_dir=self._curr_ckpt_path,
strict=strict,
checkpoint_engine=self.checkpoint_engine)
# A map of PipeInstruction types to methods. Each method will be executed with the
# kwargs provided to the PipeInstruction from the scheduler.
_INSTRUCTION_MAP = {
schedule.OptimizerStep: _exec_optimizer_step,
schedule.ReduceGrads: _exec_reduce_grads,
schedule.ReduceTiedGrads: _exec_reduce_tied_grads,
schedule.LoadMicroBatch: _exec_load_micro_batch,
schedule.ForwardPass: _exec_forward_pass,
schedule.BackwardPass: _exec_backward_pass,
schedule.SendActivation: _exec_send_activations,
schedule.RecvActivation: _exec_recv_activations,
schedule.SendGrad: _exec_send_grads,
schedule.RecvGrad: _exec_recv_grads,
}
def _exec_schedule(self, pipe_schedule):
# Reserve and reset buffers.
self._reserve_pipe_buffers(pipe_schedule.num_pipe_buffers())
self.fwd_outputs = []
# For each step in the schedule
for step_cmds in pipe_schedule:
# For each instruction in the step
for cmd in step_cmds:
if type(cmd) not in self._INSTRUCTION_MAP:
raise RuntimeError(f'{self.__class__.__name__} does not understand instruction {repr(cmd)}')
# Equivalent to: self._exec_forward_pass(buffer_id=0)
self._exec_instr = MethodType(self._INSTRUCTION_MAP[type(cmd)], self)
self._exec_instr(**cmd.kwargs)
| 56,886 | 42.49159 | 128 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/pipe/p2p.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pickle
import typing
import torch
from deepspeed import comm as dist
# To query whether we have send/recv support
from packaging.version import Version
from deepspeed.git_version_info import torch_info
from deepspeed.accelerator import get_accelerator
_groups = None
_grid = None
_async = []
def can_send_recv() -> bool:
torch_version = Version(torch_info['version'])
sendrecv_min = Version('1.8')
return torch_version >= sendrecv_min
#initializes adjacent process groups
#run this only after deepspeed.init_distributed() has been called
def init_process_groups(grid):
global _groups, _grid
_grid = grid
assert _grid.pipe_parallel_size > 1, "There is no pipeline parallelism"
if not can_send_recv():
_groups = [dist.new_group(ranks=group) for group in _grid.p2p_groups]
def _is_valid_send_recv(src_stage, dest_stage):
first_stage = 0
last_stage = _grid.pipe_parallel_size - 1
assert abs(src_stage-dest_stage) == 1 or \
(src_stage == first_stage and dest_stage == last_stage) or \
(src_stage == last_stage and dest_stage == first_stage), \
"Functionality currently limited to send and receive between adjacent ranks only"
def send(tensor, dest_stage, async_op=False):
global _groups
assert async_op == False, "Doesn't support async_op true"
src_stage = _grid.get_stage_id()
_is_valid_send_recv(src_stage, dest_stage)
dest_rank = _grid.stage_to_global(stage_id=dest_stage)
if async_op:
global _async
op = dist.isend(tensor, dest_rank)
_async.append(op)
else:
if can_send_recv():
return dist.send(tensor, dest_rank)
else:
group = _get_send_recv_group(src_stage, dest_stage)
src_rank = _grid.stage_to_global(stage_id=src_stage)
return dist.broadcast(tensor, src_rank, group=group, async_op=async_op)
def recv(tensor, src_stage, async_op=False):
global _groups
assert async_op == False, "Doesn't support async_op true"
dest_stage = _grid.get_stage_id()
_is_valid_send_recv(src_stage, dest_stage)
src_rank = _grid.stage_to_global(stage_id=src_stage)
if async_op:
global _async
op = dist.irecv(tensor, src_rank)
_async.append(op)
else:
if can_send_recv():
return dist.recv(tensor, src_rank)
else:
group = _get_send_recv_group(src_stage, dest_stage)
return dist.broadcast(tensor, src_rank, group=group, async_op=async_op)
def wait():
global _async
for op in _async:
op.wait()
_async = []
get_accelerator().synchronize()
def send_obj(msg: typing.Any, dest: int):
"""Send an arbitrary python object to ``dest``.
Note: ``msg`` must be pickleable.
WARN: This incurs a CPU -> GPU transfer and should be used sparingly
for performance reasons.
Args:
msg (typing.Any): The object to send.
dest (int): Destination rank.
"""
# serialize the message
msg = pickle.dumps(msg)
# construct a tensor to send
msg = torch.ByteTensor(torch.ByteStorage.from_buffer(msg)).to(get_accelerator().device_name())
# Send meta and message
length_tensor = torch.tensor([len(msg)], dtype=torch.long).to(get_accelerator().device_name())
dist.send(length_tensor, dst=dest)
dist.send(msg, dst=dest)
def recv_obj(sender: int) -> typing.Any:
"""Receive an arbitrary python object from ``sender``.
WARN: This incur a CPU <-> GPU transfers and should be used sparingly
for performance reasons.
Args:
sender (int): The rank sending the message.
"""
# Get message meta
length = torch.tensor([0], dtype=torch.long).to(get_accelerator().device_name())
dist.recv(length, src=sender)
# Receive and deserialize
msg = torch.empty(length.item(), dtype=torch.uint8).to(get_accelerator().device_name())
dist.recv(msg, src=sender)
msg = pickle.loads(msg.cpu().numpy().tobytes())
def _to(x):
"""Recursively move to the current device."""
if torch.is_tensor(x):
return x.to(get_accelerator().device_name())
if isinstance(x, (tuple, list)):
ret = [_to(x_) for x_ in x]
if isinstance(x, tuple):
ret = tuple(ret)
return ret
# handle kwargs
if isinstance(x, dict):
ret = dict()
for key, val in x.items():
ret[_to(key)] = _to(val)
return ret
# Anything else is a no-op
return x
msg = _to(msg)
return msg
def _get_send_recv_group(src_stage, dest_stage):
'''the group id is always the smaller rank unless its a wrap around'''
stage_id = None
first_stage = 0
last_stage = _grid.pipe_parallel_size - 1
if (src_stage == first_stage and dest_stage == last_stage
or dest_stage == first_stage and src_stage == last_stage):
stage_id = last_stage
elif src_stage > dest_stage:
stage_id = dest_stage
else:
stage_id = src_stage
'''group_id corresponds to group of [group_id, group_id+1]
unless group_id is the rank of the last stage
in which case group_id corresponds to group[group_id-num_stages+1, group_id]
'''
group_id = _grid.stage_to_global(stage_id=stage_id)
return _groups[group_id]
| 5,477 | 28.294118 | 98 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/pipe/topology.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed import comm as dist
from collections import namedtuple
from itertools import product as cartesian_product
class ProcessTopology:
""" Manages the mapping of n-dimensional Cartesian coordinates to linear
indices. This mapping is used to map the rank of processes to the grid
for various forms of parallelism.
Each axis of the tensor is accessed by its name. The provided ordering
of the axes defines the layout of the topology. ProcessTopology uses a "row-major"
layout of the tensor axes, and so axes=['x', 'y'] would map coordinates (x,y) and
(x,y+1) to adjacent linear indices. If instead axes=['y', 'x'] was used, coordinates
(x,y) and (x+1,y) would be adjacent.
Some methods return ProcessCoord namedtuples.
"""
def __init__(self, axes, dims):
"""Create a mapping of n-dimensional tensor coordinates to linear indices.
Arguments:
axes (list): the names of the tensor axes
dims (list): the dimension (length) of each axis of the topology tensor
"""
self.axes = axes # names of each topology axis
self.dims = dims # length of each topology axis
# This is actually a class that lets us hash {'row':3, 'col':2} mappings
self.ProcessCoord = namedtuple('ProcessCoord', axes)
self.mapping = {}
ranges = [range(d) for d in dims]
# example: 1, (0,0,1)
for global_rank, coord in enumerate(cartesian_product(*ranges)):
key = {axis: coord[self.axes.index(axis)] for axis in self.axes}
key = self.ProcessCoord(**key)
# for example, {ProcessCoord(row=0, col=1) : 1}
self.mapping[key] = global_rank
def get_rank(self, **coord_kwargs):
"""Return the global rank of a process via its coordinates.
Coordinates are specified as kwargs. For example:
>>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
>>> X.get_rank(x=0, y=1)
1
"""
if len(coord_kwargs) != len(self.axes):
raise ValueError('get_rank() does not support slices. Use filter_match())')
key = self.ProcessCoord(**coord_kwargs)
assert key in self.mapping, f'key {coord_kwargs} invalid'
return self.mapping[key]
def get_axis_names(self):
"""Return a list of the axis names in the ordering of the topology. """
return self.axes
def get_rank_repr(self, rank, omit_axes=['data', 'pipe'], inner_sep='_', outer_sep='-'):
"""Return a string representation of a rank.
This method is primarily used for checkpointing model data.
For example:
>>> topo = Topo(axes=['a', 'b'], dims=[2, 2])
>>> topo.get_rank_repr(rank=3)
'a_01-b_01'
>>> topo.get_rank_repr(rank=3, omit_axes=['a'])
'b_01'
Args:
rank (int): A rank in the topology.
omit_axes (list, optional): Axes that should not be in the representation. Defaults to ['data', 'pipe'].
inner_sep (str, optional): [description]. Defaults to '_'.
outer_sep (str, optional): [description]. Defaults to '-'.
Returns:
str: A string representation of the coordinate owned by ``rank``.
"""
omit_axes = frozenset(omit_axes)
axes = [a for a in self.get_axis_names() if a not in omit_axes]
names = []
for ax in axes:
ax_rank = getattr(self.get_coord(rank=rank), ax)
names.append(f'{ax}{inner_sep}{ax_rank:02d}')
return outer_sep.join(names)
def get_dim(self, axis):
"""Return the number of processes along the given axis.
For example:
>>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
>>> X.get_dim('y')
3
"""
if axis not in self.axes:
return 0
return self.dims[self.axes.index(axis)]
def get_coord(self, rank):
"""Return the coordinate owned by a process rank.
The axes of the returned namedtuple can be directly accessed as members. For
example:
>>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
>>> coord = X.get_coord(rank=1)
>>> coord.x
0
>>> coord.y
1
"""
for coord, idx in self.mapping.items():
if idx == rank:
return coord
raise ValueError(f'rank {rank} not found in topology.')
def get_axis_comm_lists(self, axis):
""" Construct lists suitable for a communicator group along axis ``axis``.
Example:
>>> topo = Topo(axes=['pipe', 'data', 'model'], dims=[2, 2, 2])
>>> topo.get_axis_comm_lists('pipe')
[
[0, 4], # data=0, model=0
[1, 5], # data=0, model=1
[2, 6], # data=1, model=0
[3, 7], # data=1, model=1
]
Returns:
A list of lists whose coordinates match in all axes *except* ``axis``.
"""
# We don't want to RuntimeError because it allows us to write more generalized
# code for hybrid parallelisms.
if axis not in self.axes:
return []
# Grab all axes but `axis`
other_axes = [a for a in self.axes if a != axis]
lists = []
# Construct all combinations of coords with other_axes
ranges = [range(self.get_dim(a)) for a in other_axes]
for coord in cartesian_product(*ranges):
other_keys = {a: coord[other_axes.index(a)] for a in other_axes}
# now go over all ranks in `axis`.
sub_list = []
for axis_key in range(self.get_dim(axis)):
key = self.ProcessCoord(**other_keys, **{axis: axis_key})
sub_list.append(self.mapping[key])
lists.append(sub_list)
return lists
def filter_match(self, **filter_kwargs):
"""Return the list of ranks whose coordinates match the provided criteria.
Example:
>>> X = ProcessTopology(axes=['pipe', 'data', 'model'], dims=[2, 2, 2])
>>> X.filter_match(pipe=0, data=1)
[2, 3]
>>> [X.get_coord(rank) for rank in X.filter_match(pipe=0, data=1)]
[ProcessCoord(pipe=0, data=1, model=0), ProcessCoord(pipe=0, data=1, model=1)]
Arguments:
**filter_kwargs (dict): criteria used to select coordinates.
Returns:
The list of ranks whose coordinates match filter_kwargs.
"""
def _filter_helper(x):
for key, val in filter_kwargs.items():
if getattr(x, key) != val:
return False
return True
coords = filter(_filter_helper, self.mapping.keys())
return [self.mapping[coord] for coord in coords]
def get_axis_list(self, axis, idx):
"""Returns the list of global ranks whose coordinate in an axis is idx.
For example:
>>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
>>> X.get_axis_list(axis='x', idx=0)
[0, 1, 2]
>>> X.get_axis_list(axis='y', idx=0)
[0, 3]
"""
# This could be faster by generating the desired keys directly instead of
# filtering.
axis_num = self.axes.index(axis)
ranks = [self.mapping[k] for k in self.mapping.keys() if k[axis_num] == idx]
return ranks
def world_size(self):
return len(self.mapping)
def __str__(self):
return str(self.mapping)
def _prime_factors(N):
""" Returns the prime factorization of positive integer N. """
if N <= 0:
raise ValueError("Values must be strictly positive.")
primes = []
while N != 1:
for candidate in range(2, N + 1):
if N % candidate == 0:
primes.append(candidate)
N //= candidate
break
return primes
class PipeDataParallelTopology(ProcessTopology):
""" A topology specialization for hybrid data and pipeline parallelism.
Uses data parallelism on the last dimension to encourage gradient
reductions to use high-bandwidth intra-node links and lower-volume
pipeline communications to use low-bandwidth inter-node links.
"""
def __init__(self, num_pp, num_dp):
super().__init__(axes=['pipe', 'data'], dims=[num_pp, num_dp])
class PipeModelDataParallelTopology(ProcessTopology):
""" A topology for hybrid pipeline, model, and data parallelism. """
def __init__(self, num_pp, num_mp, num_dp):
super().__init__(axes=['pipe', 'data', 'model'], dims=[num_pp, num_dp, num_mp])
class PipelineParallelGrid:
"""Implements a grid object that stores the data parallel ranks
corresponding to each of the model parallel stages
The grid object organizes the processes in a distributed pytorch job
into a 2D grid, of stage_id and data_parallel_id.
self.stage_id and self.data_parallel_id stores the stage id
and the data parallel id of current process.
self.dp_group groups the processes by stage_id.
self.dp_group[i], is a list containing all process ranks whose
stage_id is i.
self.p2p_groups stores a list of tuple, where each tuple
stores process ranks of adjacent stages for a given data_parallel_id.
For example if num_stage is 5 then a tuple [7,8] represents stages [3, 4],
with data_parallel id = 1. A stage wrap around will appear as non-adjacent ranks,
for example tuple [4,0] with representing wrap-around stage 4 and 0, for
data_parallel_id = 0, or similarly [9,5] represents wrapped around stages [4,0]
for data_parallel_id = 1.
"""
def __init__(self, topology=None, process_group=None):
# TODO use process_group if provided
self.global_rank = dist.get_rank()
self.world_size = dist.get_world_size()
if topology is not None:
if self.global_rank == 0:
print('Using topology:', topology)
self._topo = topology
else:
num_pp = 1
num_dp = 1
for idx, prime in enumerate(_prime_factors(self.world_size)):
if idx % 2 == 0:
num_pp *= prime
else:
num_dp *= prime
self._topo = PipeDataParallelTopology(num_dp=num_dp, num_pp=num_pp)
self.data_parallel_size = max(self._topo.get_dim('data'), 1)
self.pipe_parallel_size = max(self._topo.get_dim('pipe'), 1)
self.model_parallel_size = max(self._topo.get_dim('model'), 1)
self.slice_parallel_size = self.model_parallel_size
assert self._is_grid_valid(), "Invalid Grid"
self.stage_id = self.get_stage_id()
self.data_parallel_id = self.get_data_parallel_id()
# Create new ProcessGroups for all model parallelism. DeepSpeedLight uses these
# to detect overflow, etc.
self.ds_model_proc_group = None
self.ds_model_rank = -1
for dp in range(self.data_parallel_size):
ranks = sorted(self._topo.get_axis_list(axis='data', idx=dp))
if self.global_rank == 0:
#print(f'RANK={self.global_rank} building DeepSpeed model group: {ranks}')
pass
proc_group = dist.new_group(ranks=ranks)
if self.global_rank in ranks:
self.ds_model_proc_group = proc_group
self.ds_model_world_size = len(ranks)
self.ds_model_rank = ranks.index(self.global_rank)
assert self.ds_model_rank > -1
assert self.ds_model_proc_group is not None
# Create new ProcessGroup for gradient all-reduces - these are the data parallel groups
self.dp_group = []
self.dp_groups = self._topo.get_axis_comm_lists('data')
for g in self.dp_groups:
proc_group = dist.new_group(ranks=g)
if self.global_rank in g:
self.dp_group = g
self.dp_proc_group = proc_group
self.is_first_stage = (self.stage_id == 0)
self.is_last_stage = (self.stage_id == (self.pipe_parallel_size - 1))
self.p2p_groups = self._build_p2p_groups()
# Create new ProcessGroup for pipeline collectives - these are pipe parallel groups
self.pp_group = []
self.pp_proc_group = None
self.pipe_groups = self._topo.get_axis_comm_lists('pipe')
for ranks in self.pipe_groups:
if self.global_rank == 0:
#print(f'RANK={self.global_rank} building pipeline group: {ranks}')
pass
proc_group = dist.new_group(ranks=ranks)
if self.global_rank in ranks:
self.pp_group = ranks
self.pp_proc_group = proc_group
assert self.pp_proc_group is not None
# Create new ProcessGroup for model (tensor-slicing) collectives
# Short circuit case without model parallelism.
# TODO: it would be nice if topology had bcast semantics to avoid this branching
# case?
if self.model_parallel_size == 1:
for group_rank in range(self.world_size):
group_rank = [group_rank]
group = dist.new_group(ranks=group_rank)
if group_rank[0] == self.global_rank:
self.slice_group = group_rank
self.slice_proc_group = group
return
else:
self.mp_group = []
self.model_groups = self._topo.get_axis_comm_lists('model')
for g in self.model_groups:
proc_group = dist.new_group(ranks=g)
if self.global_rank in g:
self.slice_group = g
self.slice_proc_group = proc_group
def get_stage_id(self):
return self._topo.get_coord(rank=self.global_rank).pipe
def get_data_parallel_id(self):
return self._topo.get_coord(rank=self.global_rank).data
def _build_p2p_groups(self):
"""Groups for sending and receiving activations and gradients across model
parallel stages.
"""
comm_lists = self._topo.get_axis_comm_lists('pipe')
p2p_lists = []
for rank in range(self.world_size):
for l in comm_lists:
assert len(l) == self.pipe_parallel_size
if rank in l:
idx = l.index(rank)
buddy_rank = l[(idx + 1) % self.pipe_parallel_size]
p2p_lists.append([rank, buddy_rank])
break # next global rank
assert len(p2p_lists) == self.world_size
return p2p_lists
def _is_grid_valid(self):
ranks = 1
for ax in self._topo.get_axis_names():
ranks *= self._topo.get_dim(ax)
return ranks == dist.get_world_size()
#returns the global rank of the process with the provided stage id
#which has the same data_parallel_id as caller process
def stage_to_global(self, stage_id, **kwargs):
me = self._topo.get_coord(self.global_rank)
transform = me._replace(pipe=stage_id, **kwargs)._asdict()
return self._topo.get_rank(**transform)
def topology(self):
return self._topo
# MPU functions for DeepSpeed integration
def get_global_rank(self):
return self.global_rank
def get_pipe_parallel_rank(self):
""" The stage of the pipeline this rank resides in. """
return self.get_stage_id()
def get_pipe_parallel_world_size(self):
""" The number of stages in the pipeline. """
return self.pipe_parallel_size
def get_pipe_parallel_group(self):
""" The group of ranks within the same pipeline. """
return self.pp_proc_group
def get_data_parallel_rank(self):
""" Which pipeline this rank resides in. """
return self.data_parallel_id
def get_data_parallel_world_size(self):
""" The number of pipelines. """
return self.data_parallel_size
def get_data_parallel_group(self):
""" The group of ranks within the same stage of all pipelines. """
return self.dp_proc_group
# These are model parallel groups across all types of model parallelism.
# Deepspeed uses them to detect overflow, etc.
def get_model_parallel_rank(self):
return self.ds_model_rank
def get_model_parallel_world_size(self):
return self.ds_model_world_size
def get_model_parallel_group(self):
return self.ds_model_proc_group
# For Megatron-style tensor slicing
def get_slice_parallel_rank(self):
if 'model' in self._topo.get_axis_names():
return self._topo.get_coord(rank=self.global_rank).model
else:
return 0
def get_slice_parallel_world_size(self):
return self.slice_parallel_size
def get_slice_parallel_group(self):
return self.slice_proc_group
| 17,167 | 36.56674 | 116 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/pipe/module.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import glob
import re as regex
from functools import partial
import torch
import torch.nn as nn
from deepspeed import comm as dist
from deepspeed.utils import logger
from .. import utils as ds_utils
from ..activation_checkpointing import checkpointing
from .topology import PipeDataParallelTopology, PipelineParallelGrid
from deepspeed.runtime.state_dict_factory import SDLoaderFactory
from deepspeed.accelerator import get_accelerator
class PipelineError(Exception):
"""Errors related to the use of deepspeed.PipelineModule """
class LayerSpec:
"""Building block for specifying pipeline-parallel modules.
LayerSpec stores the type information and parameters for each stage in a
PipelineModule. For example:
.. code-block:: python
nn.Sequence(
torch.nn.Linear(self.in_dim, self.hidden_dim, bias=False),
torch.nn.Linear(self.hidden_hidden, self.out_dim)
)
becomes
.. code-block:: python
layer_specs = [
LayerSpec(torch.nn.Linear, self.in_dim, self.hidden_dim, bias=False),
LayerSpec(torch.nn.Linear, self.hidden_hidden, self.out_dim)]
]
"""
def __init__(self, typename, *module_args, **module_kwargs):
self.typename = typename
self.module_args = module_args
self.module_kwargs = module_kwargs
if not issubclass(typename, nn.Module):
raise RuntimeError('LayerSpec only supports torch.nn.Module types.')
if dist.is_initialized():
self.global_rank = dist.get_rank()
else:
self.global_rank = -1
def __repr__(self):
return ds_utils.call_to_str(self.typename.__name__, self.module_args, self.module_kwargs)
def build(self, log=False):
"""Build the stored specification."""
if log:
logger.info(f'RANK={self.global_rank} building {repr(self)}')
return self.typename(*self.module_args, **self.module_kwargs)
class TiedLayerSpec(LayerSpec):
def __init__(self, key, typename, *module_args, forward_fn=None, tied_weight_attr='weight', **module_kwargs):
super().__init__(typename, *module_args, **module_kwargs)
self.key = key
self.forward_fn = forward_fn
self.tied_weight_attr = tied_weight_attr
class PipelineModule(nn.Module):
"""Modules to be parallelized with pipeline parallelism.
The key constraint that enables pipeline parallelism is the
representation of the forward pass as a sequence of layers
and the enforcement of a simple interface between them. The
forward pass is implicitly defined by the module ``layers``. The key
assumption is that the output of each layer can be directly fed as
input to the next, like a ``torch.nn.Sequence``. The forward pass is
implicitly:
.. code-block:: python
def forward(self, inputs):
x = inputs
for layer in self.layers:
x = layer(x)
return x
.. note::
Pipeline parallelism is not compatible with ZeRO-2 and ZeRO-3.
Args:
layers (Iterable): A sequence of layers defining pipeline structure. Can be a ``torch.nn.Sequential`` module.
num_stages (int, optional): The degree of pipeline parallelism. If not specified, ``topology`` must be provided.
topology (``deepspeed.runtime.pipe.ProcessTopology``, optional): Defines the axes of parallelism axes for training. Must be provided if ``num_stages`` is ``None``.
loss_fn (callable, optional): Loss is computed ``loss = loss_fn(outputs, label)``
seed_layers(bool, optional): Use a different seed for each layer. Defaults to False.
seed_fn(type, optional): The custom seed generating function. Defaults to random seed generator.
base_seed (int, optional): The starting seed. Defaults to 1234.
partition_method (str, optional): The method upon which the layers are partitioned. Defaults to 'parameters'.
activation_checkpoint_interval (int, optional): The granularity activation checkpointing in terms of number of layers. 0 disables activation checkpointing.
activation_checkpoint_func (callable, optional): The function to use for activation checkpointing. Defaults to ``deepspeed.checkpointing.checkpoint``.
checkpointable_layers(list, optional): Checkpointable layers may not be checkpointed. Defaults to None which does not additional filtering.
"""
def __init__(self,
layers,
num_stages=None,
topology=None,
loss_fn=None,
seed_layers=False,
seed_fn=None,
base_seed=1234,
partition_method='parameters',
activation_checkpoint_interval=0,
activation_checkpoint_func=checkpointing.checkpoint,
checkpointable_layers=None):
super().__init__()
if num_stages is None and topology is None:
raise RuntimeError('must provide num_stages or topology')
self.micro_offset = 0
self.loss_fn = loss_fn
self.checkpointable_layers = checkpointable_layers
if checkpointable_layers is not None:
assert isinstance(checkpointable_layers, list), "param `checkpointable_layers` must be type of list."
self.seed_layers = seed_layers
self.seed_fn = seed_fn
self.base_seed = base_seed
if dist.get_rank() == 0:
try:
seed_str = self.seed_fn.__name__
except AttributeError:
seed_str = None
print(f'SEED_LAYERS={self.seed_layers} BASE_SEED={self.base_seed} SEED_FN={seed_str}')
# Setup world info
self.world_group = dist.new_group(ranks=range(dist.get_world_size()))
self.global_rank = dist.get_rank(group=self.world_group)
self.world_size = dist.get_world_size(group=self.world_group)
self.local_rank = int(os.environ.get("LOCAL_RANK", None))
assert self.local_rank is not None
if topology:
self._topo = topology
self.num_stages = self._topo.get_dim('pipe')
else:
self.num_stages = num_stages
if topology is None:
if self.world_size % self.num_stages != 0:
raise RuntimeError(
f'num_stages ({self.num_stages}) must divide distributed world size ({self.world_size})')
dp = self.world_size // num_stages
topology = PipeDataParallelTopology(num_pp=num_stages, num_dp=dp)
self._topo = topology
# Construct communicators for pipeline topology
self._grid = PipelineParallelGrid(process_group=self.world_group, topology=self._topo)
self.stage_id = self._topo.get_coord(self.global_rank).pipe
# Initialize partition information
self._layer_specs = list(layers)
self._num_layers = len(self._layer_specs)
self._local_start = 0
self._local_stop = None
self._partition_layers(method=partition_method)
self.forward_funcs = []
self.fwd_map = {}
self.tied_modules = nn.ModuleDict()
self.tied_weight_attrs = {}
# Offset the random seed by the stage ID.
#newseed = get_accelerator().initial_seed() + self._grid.get_stage_id()
#ds_utils.set_random_seed(newseed)
#with torch.random.fork_rng(devices=[get_accelerator().current_device_name()]):
self._build()
self.to(get_accelerator().device_name(self.local_rank))
self.tied_comms = self._index_tied_modules()
self._synchronize_tied_weights()
self.activation_checkpoint_interval = activation_checkpoint_interval
self.activation_checkpoint_func = activation_checkpoint_func
def _build(self):
specs = self._layer_specs
for local_idx, layer in enumerate(specs[self._local_start:self._local_stop]):
layer_idx = local_idx + self._local_start
if self.seed_layers:
if self.seed_fn:
self.seed_fn(self.base_seed + layer_idx)
else:
ds_utils.set_random_seed(self.base_seed + layer_idx)
# Recursively build PipelineModule objects
if isinstance(layer, PipelineModule):
raise NotImplementedError('RECURSIVE BUILD NOT YET IMPLEMENTED')
# LayerSpec objects contain an nn.Module that should be allocated now.
elif isinstance(layer, nn.Module):
name = str(layer_idx)
self.forward_funcs.append(layer)
self.fwd_map.update({name: len(self.forward_funcs) - 1})
self.add_module(name, layer)
# TiedLayerSpec objects contain an nn.Module that should be allocated now.
elif isinstance(layer, TiedLayerSpec):
# Build and register the module if we haven't seen it before.
if layer.key not in self.tied_modules:
self.tied_modules[layer.key] = layer.build()
self.tied_weight_attrs[layer.key] = layer.tied_weight_attr
if layer.forward_fn is None:
# Just use forward()
self.forward_funcs.append(self.tied_modules[layer.key])
else:
# User specified fn with args (module, input)
self.forward_funcs.append(partial(layer.forward_fn, self.tied_modules[layer.key]))
# LayerSpec objects contain an nn.Module that should be allocated now.
elif isinstance(layer, LayerSpec):
module = layer.build()
name = str(layer_idx)
self.forward_funcs.append(module)
self.fwd_map.update({name: len(self.forward_funcs) - 1})
self.add_module(name, module)
# Last option: layer may be a functional (e.g., lambda). We do nothing in
# that case and just use it in forward()
else:
self.forward_funcs.append(layer)
# All pipeline parameters should be considered as model parallel in the context
# of our FP16 optimizer
for p in self.parameters():
p.ds_pipe_replicated = False
def _count_layer_params(self):
"""Count the trainable parameters in individual layers.
This routine will only build one layer at a time.
Returns:
A list of the number of parameters in each layer.
"""
param_counts = [0] * len(self._layer_specs)
for idx, layer in enumerate(self._layer_specs):
if isinstance(layer, LayerSpec):
l = layer.build()
params = filter(lambda p: p.requires_grad, l.parameters())
param_counts[idx] = sum(p.numel() for p in params)
elif isinstance(layer, nn.Module):
params = filter(lambda p: p.requires_grad, layer.parameters())
param_counts[idx] = sum(p.numel() for p in params)
return param_counts
def _find_layer_type(self, layername):
idxs = []
typeregex = regex.compile(layername, regex.IGNORECASE)
for idx, layer in enumerate(self._layer_specs):
name = None
if isinstance(layer, LayerSpec):
name = layer.typename.__name__
elif isinstance(layer, nn.Module):
name = layer.__class__.__name__
else:
try:
name = layer.__name__
except AttributeError:
continue
if typeregex.search(name):
idxs.append(idx)
if len(idxs) == 0:
raise RuntimeError(f"Partitioning '{layername}' found no valid layers to partition.")
return idxs
def forward(self, forward_input):
# We need to offset the seed by the microbatch ID. Save it in a local var to
# ensure it is preserved in the closure. Otherwise checkpointed forward funcs
# will see a different offset.
self.micro_offset += 1
def exec_range_func(start, end):
''' Helper function to be used with checkpoint()
Adapted from torch.utils.checkpoint:checkpoint_sequential()
'''
local_micro_offset = self.micro_offset + 1
def exec_func(*inputs):
# Single tensor inputs need to be unwrapped
if len(inputs) == 1:
inputs = inputs[0]
for idx, layer in enumerate(self.forward_funcs[start:end]):
self.curr_layer = idx + self._local_start
if self.seed_layers:
new_seed = (self.base_seed * local_micro_offset) + self.curr_layer
if self.seed_fn:
self.seed_fn(new_seed)
else:
ds_utils.set_random_seed(new_seed)
inputs = layer(inputs)
return inputs
return exec_func
if self.activation_checkpoint_interval == 0:
func = exec_range_func(0, len(self.forward_funcs))
x = func(forward_input)
else:
num_layers = len(self.forward_funcs)
x = forward_input
for start_idx in range(0, num_layers, self.activation_checkpoint_interval):
end_idx = min(start_idx + self.activation_checkpoint_interval, num_layers)
funcs = self.forward_funcs[start_idx:end_idx]
# Since we either pass tensors or tuples of tensors without unpacking, we
# need to be careful not to double-wrap tensors with tuple.
if not isinstance(x, tuple):
x = (x, )
if self._is_checkpointable(funcs):
x = self.activation_checkpoint_func(exec_range_func(start_idx, end_idx), *x)
else:
x = exec_range_func(start_idx, end_idx)(*x)
return x
def _partition_layers(self, method='uniform'):
num_stages = self._topo.get_dim('pipe')
stage_id = self._topo.get_coord(self.global_rank).pipe
if self.global_rank == 0:
logger.info(f'Partitioning pipeline stages with method {method}')
method = method.lower()
# Each stage gets a simple uniform number of layers.
if method == 'uniform':
num_layers = len(self._layer_specs)
self.parts = ds_utils.partition_uniform(num_items=num_layers, num_parts=num_stages)
elif method == 'parameters':
param_counts = self._count_layer_params()
self.parts = ds_utils.partition_balanced(weights=param_counts, num_parts=num_stages)
elif method.startswith('type:'):
layertype = method.split(':')[1]
binary_weights = [0] * len(self._layer_specs)
for idx in self._find_layer_type(layertype):
binary_weights[idx] = 1
self.parts = ds_utils.partition_balanced(weights=binary_weights, num_parts=num_stages)
elif method == 'profile':
raise NotImplementedError(f'Partitioning method {method} not implemented.')
else:
raise NotImplementedError(f'Partitioning method {method} not implemented.')
# Print some information on the partitioning.
if self.global_rank == 0:
for stage in range(num_stages):
start = self.parts[stage]
stop = self.parts[stage + 1]
print(f'stage={stage} layers={stop - start}')
for idx, layer in enumerate(self._layer_specs[start:stop]):
name = str(layer)
if isinstance(layer, LayerSpec):
name = layer.typename.__name__
if isinstance(layer, nn.Module):
name = layer.__class__.__name__
else:
try:
name = layer.__name__
except AttributeError:
pass
print(f' {idx+start:2d}: {name}')
if self.loss_fn:
try:
print(f' loss: {self.loss_fn.__name__}')
except AttributeError:
print(f' loss: {self.loss_fn.__class__.__name__}')
self._set_bounds(start=self.parts[stage_id], stop=self.parts[stage_id + 1])
def allreduce_tied_weight_gradients(self):
'''All reduce the gradients of the tied weights between tied stages'''
for key, comm in self.tied_comms.items():
weight = getattr(self.tied_modules[key], comm['weight_attr'])
dist.all_reduce(weight.grad, group=comm['group'])
def get_tied_weights_and_groups(self):
weight_group_list = []
for key, comm in self.tied_comms.items():
weight = getattr(self.tied_modules[key], comm['weight_attr'])
weight_group_list.append((weight, comm['group']))
return weight_group_list
def _synchronize_tied_weights(self):
for key, comm in self.tied_comms.items():
dist.broadcast(
getattr(comm['module'], comm['weight_attr']),
src=min(comm['ranks']),
group=comm['group'],
)
def _index_tied_modules(self):
''' Build communication structures for tied modules. '''
tied_comms = {}
if self._topo.get_dim('pipe') == 1:
return tied_comms
specs = self._layer_specs
tie_keys = set(s.key for s in specs if isinstance(s, TiedLayerSpec))
for key in tie_keys:
# Find the layers that the tied module appears in
tied_layers = []
for idx, layer in enumerate(specs):
if isinstance(layer, TiedLayerSpec) and layer.key == key:
tied_layers.append(idx)
# Find all stages with this tied module
# TODO: Would be nice to remove the nested data/model parallelism loops and
# TODO: instead generalize in some way, since we really just care about the
# TODO: stage that owns the tied layer. Then loop over each (dp, mp, ...)
# TODO: fiber to generate process groups.
tied_stages = set(self.stage_owner(idx) for idx in tied_layers)
for dp in range(self._grid.data_parallel_size):
for mp in range(self._grid.get_slice_parallel_world_size()):
tied_ranks = []
for s in sorted(tied_stages):
if self._grid.get_slice_parallel_world_size() > 1:
tied_ranks.append(self._grid.stage_to_global(stage_id=s, data=dp, model=mp))
else:
tied_ranks.append(self._grid.stage_to_global(stage_id=s, data=dp))
group = dist.new_group(ranks=tied_ranks)
# Record this tied module if we own a local copy of it.
if self.global_rank in tied_ranks:
assert key in self.tied_modules
if key in self.tied_modules:
tied_comms[key] = {
'ranks': tied_ranks,
'group': group,
'weight_attr': self.tied_weight_attrs[key],
'module': self.tied_modules[key],
}
# Only count the tied module once in the eyes of the FP16 optimizer
if self.global_rank != tied_ranks[0]:
for p in self.tied_modules[key].parameters():
p.ds_pipe_replicated = True
'''
if len(tied_comms) > 0:
print(f'RANK={self.global_rank} tied_comms={tied_comms}')
'''
return tied_comms
def partitions(self):
return self.parts
def stage_owner(self, layer_idx):
assert 0 <= layer_idx < self._num_layers
for stage in range(self._topo.get_dim('pipe')):
if self.parts[stage] <= layer_idx < self.parts[stage + 1]:
return stage
raise RuntimeError(f'Layer {layer_idx} not owned? parts={self.parts}')
def _set_bounds(self, start=None, stop=None):
"""Manually define the range of layers that will be built on this process.
These boundaries are treated as list slices and so start is inclusive and stop is
exclusive. The default of None for both results in all layers being built
locally.
"""
self._local_start = start
self._local_stop = stop
def set_checkpoint_interval(self, interval):
assert interval >= 0
self.checkpoint_interval = interval
def topology(self):
""" ProcessTopology object to query process mappings. """
return self._topo
def mpu(self):
return self._grid
def num_pipeline_stages(self):
return self._topo.get_dim('pipe')
def ckpt_prefix(self, checkpoints_path, tag):
"""Build a prefix for all checkpoint files written by this module. """
# All checkpoint files start with this
rank_name = 'module'
# Data parallelism is omitted from the naming convention because we are agnostic
# to this in the checkpoint.
omit_dims = frozenset(['data'])
axes = [a for a in self._grid._topo.get_axis_names() if a not in omit_dims]
for dim in axes:
rank = getattr(self._grid._topo.get_coord(rank=self.global_rank), dim)
rank_name += f'-{dim}_{rank:02d}'
ckpt_name = os.path.join(checkpoints_path, str(tag), rank_name)
return ckpt_name
def ckpt_layer_path(self, ckpt_dir, local_layer_idx):
"""Customize a prefix for a specific pipeline module layer. """
idx = local_layer_idx + self._local_start
layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}')
rank_repr = self._grid._topo.get_rank_repr(rank=self.global_rank)
if rank_repr != '':
layer_ckpt_path += f'-{rank_repr}'
layer_ckpt_path += '-model_states.pt'
return layer_ckpt_path
def ckpt_layer_path_list(self, ckpt_dir, local_layer_idx):
"""Get all ckpt file list for a specific pipeline module layer. """
idx = local_layer_idx + self._local_start
layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}-')
layer_ckpt_path += "*model_states.pt"
ckpt_files = glob.glob(layer_ckpt_path)
ckpt_files.sort()
return ckpt_files
def save_state_dict(self, save_dir, checkpoint_engine):
# Processes having the same model parallel rank on different data parallel instances
# have identical layer weights. We can distribute the task of saving the layer weights
# among the data parallel ranks. For example, if a pipeline stage has 9 layers and
# if there are 2 data parallel instances, rank 0 will save the first 5 layers and
# rank 1 will save the last 4.
dp_rank = self._grid.data_parallel_id
dp_size = self._grid.data_parallel_size
num_layers = len(self.forward_funcs)
if self.checkpoint_parallel_write_pipeline:
# spread layers evenly across data parallel ranks
offsets = ds_utils.partition_uniform(num_layers, dp_size)
start, end = offsets[dp_rank], offsets[dp_rank + 1]
else:
# data parallel rank 0 writes all layers
if dp_rank != 0:
return
start, end = 0, num_layers
layer_list = self.forward_funcs[start:end]
checkpoint_engine.makedirs(save_dir, exist_ok=True)
for idx, layer in enumerate(layer_list):
model_ckpt_path = self.ckpt_layer_path(save_dir, start + idx)
if not hasattr(layer, 'state_dict'):
continue
# We pass cloned tensors to torch.save() to avoid checkpoint bloat which occurs because torch.save()
# saves the underlying storage rather than the slice of the storage corresponding to individual tensors.
# This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers.
# Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size.
# It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat.
# See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing
orig_state_dict = layer.state_dict()
final_state_dict = type(orig_state_dict)({k: v.clone() for k, v in orig_state_dict.items()})
checkpoint_engine.save(final_state_dict, model_ckpt_path)
def load_state_dir(self, load_dir, checkpoint_engine, strict=True):
for idx, layer in enumerate(self.forward_funcs):
# Functions, etc. will not have state_dicts
if not hasattr(layer, 'load_state_dict'):
continue
# get all checkpoint files for the layer.
model_ckpt_list = self.ckpt_layer_path_list(load_dir, idx)
mp_rank = self._grid.get_slice_parallel_rank()
mp_world_size = self._grid.get_slice_parallel_world_size()
sd_loader = SDLoaderFactory.get_sd_loader(model_ckpt_list,
version=2.0,
checkpoint_engine=checkpoint_engine)
load_path, checkpoint, _ = sd_loader.load(mp_world_size, mp_rank, module_key=None, is_pipe_parallel=True)
layer.load_state_dict(checkpoint)
# if self._grid.data_parallel_id == 0:
# logger.info(
# f'RANK={self.global_rank} Loaded layer={idx+self._local_start} file={load_path}'
# )
self._synchronize_tied_weights()
def _is_checkpointable(self, funcs):
# This is an unfortunate hack related to torch and deepspeed activation checkpoint implementations.
# Some layers like torch.nn.Embedding will not receive grads if checkpointed, which breaks things.
# I presume it's related to the discrete inputs that cannot require_grad? Need to revisit.
if self.__class__.__name__ in ('GPTModelPipe', 'GPT2ModelPipe'):
return all('ParallelTransformerLayerPipe' in f.__class__.__name__ for f in funcs)
if self.checkpointable_layers is not None:
return all(f.__class__.__name__ in self.checkpointable_layers for f in funcs)
params = [f.parameters() for f in funcs if isinstance(f, torch.nn.Module)]
return any(len(list(p)) > 0 for p in params)
| 27,190 | 42.998382 | 171 | py |
DeepSpeed | DeepSpeed-master/deepspeed/runtime/pipe/schedule.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from ..utils import call_to_str
from abc import ABC, abstractmethod
class PipeSchedule(ABC):
"""Directs the execution of a pipeline engine by generating sequences of
:class:`PipeInstruction`.
Schedules are generators that yield sequences of
:class:`PipeInstruction` to process the micro-batches in one batch.
Each yielded step is atomic in the sense that a barrier
synchronization can be placed between successive steps without
deadlock.
Below is an example schedule that implements data parallelism with gradient accumulation:
.. code-block:: python
class DataParallelSchedule(PipeSchedule):
def steps(self):
for step_id in range(self.micro_batches):
cmds = [
LoadMicroBatch(buffer_id=0),
ForwardPass(buffer_id=0),
BackwardPass(buffer_id=0),
]
if step_id == self.micro_batches - 1:
cmds.extend([
ReduceGrads(),
OptimizerStep(),
])
yield cmds
def num_pipe_buffers(self):
return 1
Args:
micro_batches (int): The number of micro-batches that comprise a batch.
stages (int): The number of pipeline stages.
stage_id (int): The pipe stage that will execute the generated schedule.
"""
def __init__(self, micro_batches, stages, stage_id):
super().__init__()
self.micro_batches = micro_batches
self.stages = stages
self.stage_id = stage_id
self.prev_stage = self.stage_id - 1
self.next_stage = self.stage_id + 1
@abstractmethod
def steps(self):
"""Yield a list of :class:`PipeInstruction` for each step in the schedule.
.. note::
Schedules must implement ``steps()`` to define the schedule.
Returns:
Instructions to be executed as one step of the pipeline
"""
pass
def num_pipe_buffers(self):
"""The number of pipeline buffers that will be used by this stage.
.. note::
Schedules should specialize ``num_pipe_buffers()`` for memory savings at scale.
Returns:
The number of buffers for the engine to allocate.
"""
return self.micro_batches
def _valid_micro_batch(self, micro_batch_id):
return 0 <= micro_batch_id < self.micro_batches
def _valid_stage(self, stage_id):
return 0 <= stage_id < self.stages
@property
def stage(self):
"""Stage index used to configure this schedule."""
return self.stage_id
@property
def num_stages(self):
"""The number of total pipeline stages used to configure this schedule."""
return self.stages
@property
def num_micro_batches(self):
"""The number of total micro_batches used to configure this schedule."""
return self.micro_batches
@property
def is_first_stage(self):
"""True if the configured ``stage_id`` is the first stage in the pipeline."""
return self.stage_id == 0
@property
def is_last_stage(self):
"""True if the configured ``stage_id`` is the last stage in the pipeline."""
return self.stage_id == self.stages - 1
def _buffer_idx(self, micro_batch_id):
"""Map a micro-batch index to a pipeline buffer index.
This method uses a cyclic allocation strategy.
Args:
micro_batch_id (int): The micro-batch index relative to the beginning of the schedule.
Returns:
int: The index of the buffer that should store data.
"""
assert self._valid_micro_batch(micro_batch_id)
return micro_batch_id % self.num_pipe_buffers()
def __iter__(self):
self.it = None
return self
def __next__(self):
if self.it is None:
self.it = self.steps()
return next(self.it)
class InferenceSchedule(PipeSchedule):
"""A schedule for inferencing batches using pipeline parallelism.
"""
def steps(self):
""""""
prev_micro_batch_id = -1
total_steps = self.micro_batches + self.stages - 1
for step_id in range(total_steps):
cmds = []
micro_batch_id = step_id - self.stage_id
# Alternate send/recv buffers
if _is_even(self.stage_id):
recv_buf = step_id % 2
send_buf = (step_id + 1) % 2
else:
recv_buf = (step_id + 1) % 2
send_buf = step_id % 2
if self.is_first_stage or self.is_last_stage:
if self._valid_micro_batch(micro_batch_id):
cmds.append(LoadMicroBatch(recv_buf))
if _is_even(self.stage_id):
if self._valid_stage(self.next_stage):
if self._valid_micro_batch(micro_batch_id - 1):
cmds.append(SendActivation(send_buf))
if self._valid_stage(self.prev_stage):
if self._valid_micro_batch(micro_batch_id):
cmds.append(RecvActivation(recv_buf))
else:
if self._valid_stage(self.prev_stage):
if self._valid_micro_batch(micro_batch_id):
cmds.append(RecvActivation(recv_buf))
if self._valid_stage(self.next_stage):
if self._valid_micro_batch(micro_batch_id - 1):
cmds.append(SendActivation(send_buf))
if self._valid_micro_batch(micro_batch_id):
cmds.append(ForwardPass(recv_buf))
yield cmds
def num_pipe_buffers(self):
"""Only two pipeline buffers are required for inferencing.
Returns:
``2``
"""
return 2
class TrainSchedule(PipeSchedule):
"""A schedule for training a batch using hybrid parallelism.
Pipeline parallelism is extracted through gradient accumulation and thus
convergence follows that of a data parallel approach with the same batch
size.
"""
def steps(self):
""""""
prev_micro_batch_id = -1
total_steps = 2 * (self.micro_batches + self.stages - 1)
for step_id in range(total_steps):
# Map the step of the pipeline to the micro-batch id and also whether it is a
# forward or backward pass step.
micro_batch_id, is_forward = self._step_to_micro_batch(step_id)
if self._valid_micro_batch(prev_micro_batch_id):
prev_buffer = self._buffer_idx(prev_micro_batch_id)
if self._valid_micro_batch(micro_batch_id):
curr_buffer = self._buffer_idx(micro_batch_id)
cmds = []
# Exchange activations
if is_forward:
if self._valid_micro_batch(prev_micro_batch_id) and self._valid_stage(self.prev_stage):
cmds.append(SendGrad(prev_buffer))
if self._valid_micro_batch(micro_batch_id) and self._valid_stage(self.prev_stage):
cmds.append(RecvActivation(curr_buffer))
else:
if self._valid_micro_batch(micro_batch_id) and self._valid_stage(self.next_stage):
cmds.append(RecvGrad(curr_buffer))
if self._valid_micro_batch(prev_micro_batch_id) and self._valid_stage(self.next_stage):
cmds.append(SendActivation(prev_buffer))
# First/last stage loads
if self.stage_id == 0 or self.stage_id == self.stages - 1:
if is_forward and self._valid_micro_batch(micro_batch_id):
cmds.append(LoadMicroBatch(curr_buffer))
# Computation
if self._valid_micro_batch(micro_batch_id):
if is_forward:
cmds.append(ForwardPass(curr_buffer))
else:
cmds.append(BackwardPass(curr_buffer))
# Model step at the end of the batch
if step_id == total_steps - 1:
cmds.append(ReduceTiedGrads())
cmds.append(ReduceGrads())
cmds.append(OptimizerStep())
# Prepare state for next time
prev_micro_batch_id = micro_batch_id
yield cmds
def num_pipe_buffers(self):
"""Return the number of pipeline buffers required for this stage.
This is equivalent to the maximum number of in-flight forward passes,
since we need to remember the activations of forward passes in order
to run backpropagation. For synchronous 1F1B, this is equivalent to
the index difference between this stage and the last stage.
"""
buffers = min(self.stages - self.stage_id, self.micro_batches)
return max(2, buffers)
def _step_to_micro_batch(self, step_id):
if _is_even(step_id) and _is_even(self.stage_id):
micro_batch_id = self._even_step_forward_id(step_id)
is_forward = True
elif _is_odd(step_id) and _is_odd(self.stage_id):
micro_batch_id = self._odd_step_forward_id(step_id)
is_forward = True
elif _is_even(step_id) and _is_odd(self.stage_id):
micro_batch_id = self._even_step_backward_id(step_id)
is_forward = False
elif _is_odd(step_id) and _is_even(self.stage_id):
micro_batch_id = self._odd_step_backward_id(step_id)
is_forward = False
else:
assert False
return micro_batch_id, is_forward
def _even_step_forward_id(self, step_id):
base = step_id // 2
micro_batch_id = int(base - self.stage_id // 2)
return micro_batch_id
def _odd_step_forward_id(self, step_id):
base = (step_id - 1) // 2
micro_batch_id = int(base - self.stage_id // 2)
return micro_batch_id
def _even_step_backward_id(self, step_id):
base = step_id // 2
micro_batch_id = int(base - self.stages + (self.stage_id + 1) // 2)
return micro_batch_id
def _odd_step_backward_id(self, step_id):
base = ((step_id - 1) // 2) - self.stages + 1
micro_batch_id = int(base + self.stage_id // 2)
return micro_batch_id
class DataParallelSchedule(PipeSchedule):
"""An example schedule that trains using traditional data parallelism with gradient
accumulation.
"""
def steps(self):
""""""
for step_id in range(self.micro_batches):
cmds = [
LoadMicroBatch(buffer_id=0),
ForwardPass(buffer_id=0),
BackwardPass(buffer_id=0),
]
if step_id == self.micro_batches - 1:
cmds.extend([
ReduceGrads(),
OptimizerStep(),
])
yield cmds
def num_pipe_buffers(self):
"""Only one pipeline buffer needed.
"""
return 1
class PipeInstruction:
"""Base class for all instructions to be executed by the pipeline engine.
All keyword arguments are stored as members similar to a ``namedtuple``. These are
then accessible to the :class:`PipeEngine` during execution.
Args:
kwargs (optional): keyword arguments to store as members
"""
def __init__(self, **kwargs):
self.name = self.__class__.__name__
self.kwargs = kwargs
for key, val in kwargs.items():
setattr(self, key, val)
def __repr__(self):
return call_to_str(self.name, **self.kwargs)
class OptimizerStep(PipeInstruction):
"""Performs one step with the optimizer and zeros gradients.
.. note:: Should be issued after :class:`ReduceGrads` and :class:`ReduceTiedGrads`.
.. note:: Can be a synchronization point among data-parallel ranks.
"""
pass
class ReduceGrads(PipeInstruction):
"""Reduce the computed gradients among data-parallel processes within the stage.
"""
pass
class ReduceTiedGrads(PipeInstruction):
"""Reduce the computed gradients of tied modules within a pipeline-parallel group.
.. warning::
The stages included in this synchronization point are not known until
the model is partitioned among pipeline stages. In the worst case, it
includes all pipeline stages. This instruction should be scheduled
carefully to avoid deadlocks.
"""
pass
class BufferOpInstruction(PipeInstruction):
"""A pipeline instruction that operates on pipeline buffer(s).
Args:
buffer_id (int): the index of the pipeline buffer() to modify.
"""
def __init__(self, buffer_id, **kwargs):
super().__init__(buffer_id=buffer_id, **kwargs)
# IO
class LoadMicroBatch(BufferOpInstruction):
"""Load a micro-batch into a buffer.
Roughly:
.. code-block:: python
buffers['inputs'][buffer_id] = next(data_iter)
"""
pass
# Compute
class ForwardPass(BufferOpInstruction):
"""Compute a forward pass.
Roughly:
.. code-block:: python
buffers['outputs'][buffer_id] = forward(buffers['inputs'][buffer_id])
"""
pass
class BackwardPass(BufferOpInstruction):
"""Compute a backward pass and accumulate gradients.
Roughly:
.. code-block:: python
outputs = buffers['outputs'][buffer_id]
gradients = buffers['gradients'][buffer_id]
torch.autograd.backward(tensors=outputs,
grad_tensors=gradients)
"""
pass
# Communication
class SendActivation(BufferOpInstruction):
"""Send activations to the next stage in the pipeline.
Roughly:
.. code-block:: python
send(buffers['outputs'][buffer_id])
.. note::
The communication is blocking and must be paired with a :class:`RecvActivation`
on the next pipeline stage to avoid deadlock.
"""
pass
class RecvActivation(BufferOpInstruction):
"""Receive activations from the previous stage in the pipeline.
Roughly:
.. code-block:: python
buffers['inputs'][buffer_id] = recv()
.. note::
The communication is blocking and must be paired with a :class:`SendActivation`
on the previous pipeline stage to avoid deadlock.
"""
pass
class SendGrad(BufferOpInstruction):
"""Send computed gradients to the previous pipeline stage.
with respect to the received activations
.. note::
Only received tensors with ``requires_grad==True`` will produce gradients.
Missing gradients will be replaced with ``None`` on the receiving stage.
.. note::
The communication is blocking and must be paired with a :class:`RecvGrad`
on the previous pipeline stage to avoid deadlock.
"""
pass
class RecvGrad(BufferOpInstruction):
"""Receive computed gradients the next pipeline stage.
.. note::
Only activations with ``requires_grad==True`` will produce gradients.
Missing gradients will be replaced with ``None``.
.. note::
The communication is blocking and must be paired with a :class:`SendGrad`
on the next pipeline stage to avoid deadlock.
"""
pass
def _is_even(x):
return x % 2 == 0
def _is_odd(x):
return x % 2 != 0
| 15,546 | 30.408081 | 103 | py |
DeepSpeed | DeepSpeed-master/deepspeed/moe/layer.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.utils import log_dist
from deepspeed.utils import groups
from .sharded_moe import MOELayer, TopKGate
from .experts import Experts
import typing
class MoE(torch.nn.Module):
"""Initialize an MoE layer.
Arguments:
hidden_size (int): the hidden dimension of the model, importantly this is also the input and output dimension.
expert (torch.nn.Module): the torch module that defines the expert (e.g., MLP, torch.linear).
num_experts (int, optional): default=1, the total number of experts per layer.
ep_size (int, optional): default=1, number of ranks in the expert parallel world or group.
k (int, optional): default=1, top-k gating value, only supports k=1 or k=2.
capacity_factor (float, optional): default=1.0, the capacity of the expert at training time.
eval_capacity_factor (float, optional): default=1.0, the capacity of the expert at eval time.
min_capacity (int, optional): default=4, the minimum capacity per expert regardless of the capacity_factor.
use_residual (bool, optional): default=False, make this MoE layer a Residual MoE (https://arxiv.org/abs/2201.05596) layer.
noisy_gate_policy (str, optional): default=None, noisy gate policy, valid options are 'Jitter', 'RSample' or 'None'.
drop_tokens (bool, optional): default=True, whether to drop tokens - (setting to False is equivalent to infinite capacity).
use_rts (bool, optional): default=True, whether to use Random Token Selection.
use_tutel (bool, optional): default=False, whether to use Tutel optimizations (if installed).
enable_expert_tensor_parallelism (bool, optional): default=False, whether to use tensor parallelism for experts
"""
def __init__(self,
hidden_size,
expert,
num_experts=1,
ep_size=1,
k=1,
capacity_factor=1.,
eval_capacity_factor=1.,
min_capacity=4,
use_residual=False,
noisy_gate_policy: typing.Optional[str] = None,
drop_tokens: bool = True,
use_rts=True,
use_tutel: bool = False,
enable_expert_tensor_parallelism: bool = False):
super(MoE, self).__init__()
self.use_residual = use_residual
self.enable_expert_tensor_parallelism = enable_expert_tensor_parallelism
assert num_experts % ep_size == 0, f"Number of experts ({num_experts}) should be divisible by expert parallel size ({ep_size})"
self.ep_size = ep_size
self.expert_group_name = f"ep_size_{self.ep_size}"
self.num_experts = num_experts
self.num_local_experts = num_experts // self.ep_size
log_dist(
f'Creating MoE layer with num_experts: {num_experts} | num_local_experts: {self.num_local_experts} | expert_parallel_size: {self.ep_size}',
[0])
assert noisy_gate_policy is None or noisy_gate_policy in ['None', 'Jitter', 'RSample'], \
'Unsupported noisy_gate_policy: ' + noisy_gate_policy
experts = Experts(expert, self.num_local_experts, self.expert_group_name)
self.deepspeed_moe = MOELayer(TopKGate(hidden_size, num_experts, k, capacity_factor, eval_capacity_factor,
min_capacity, noisy_gate_policy, drop_tokens, use_rts),
experts,
self.expert_group_name,
self.ep_size,
self.num_local_experts,
use_tutel=use_tutel)
if self.use_residual:
self.mlp = expert
# coefficient is used for weighted sum of the output of expert and mlp
self.coefficient = torch.nn.Linear(hidden_size, 2)
def set_deepspeed_parallelism(self):
self._create_process_groups()
def _create_process_groups(self):
# Create process group for a layer if needed
if self.expert_group_name not in groups._get_expert_parallel_group_dict():
print(f"No existing process group found, creating a new group named: {self.expert_group_name}")
if (groups.mpu is None) or (not self.enable_expert_tensor_parallelism):
# Condition 1 - no groups.mpu means no tensor parallelism
# Condition 2 - disabling expert tensor parallelism on purpose
groups._create_expert_and_data_parallel(self.ep_size)
else:
# expert tensor parallelism is enabled
groups._create_expert_data_and_model_parallel(self.ep_size, mpu=groups.mpu)
# Set the group handle for the MOELayer (deepspeed_moe) object
self.deepspeed_moe._set_ep_group(groups._get_expert_parallel_group(self.expert_group_name))
def forward(self, hidden_states, used_token=None):
""" MoE forward
Arguments:
hidden_states (Tensor): input to the layer
used_token (Tensor, optional): default: None, mask only used tokens
Returns:
A tuple including output, gate loss, and expert count.
* output (Tensor): output of the model
* l_aux (Tensor): gate loss value
* exp_counts (int): expert count
"""
output = self.deepspeed_moe(hidden_states, used_token)
if self.use_residual:
# Residual MoE
output_mlp = self.mlp(hidden_states)
if type(output_mlp) is tuple:
output_mlp = output_mlp[0] # Ignore the bias term for now
coef = self.coefficient(hidden_states)
coef = torch.nn.functional.softmax(coef, dim=-1)
output = output * coef[..., 0:1] + output_mlp * coef[..., 1:]
return output, self.deepspeed_moe.l_aux, self.deepspeed_moe.exp_counts
| 6,082 | 47.664 | 151 | py |
DeepSpeed | DeepSpeed-master/deepspeed/moe/mappings.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# The file has been adapted from the following Megatron-LM file:
# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/mpu/mappings.py
# Git commit hash: 9dc3c42a84aa656f583703cf8b6b4f79f712b796
# We retain the following copyright from the original files:
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import deepspeed
def _gather_tokens(input_, dim=0):
"""Gather tensors and concatenate them along a dimension"""
mpu = deepspeed.utils.groups.mpu
input_ = input_.contiguous()
# Size and dimension.
rank = mpu.get_tensor_model_parallel_rank()
tensor_list = [torch.empty_like(input_) for _ in range(mpu.get_tensor_model_parallel_world_size())]
tensor_list[rank] = input_
deepspeed.comm.all_gather(tensor_list, input_, group=mpu.get_tensor_model_parallel_group())
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=dim).contiguous()
return output
def _drop_tokens(input_, dim=0):
"""Divide a tensor among the tensor parallel ranks"""
mpu = deepspeed.utils.groups.mpu
total_chunks = mpu.get_tensor_model_parallel_world_size()
this_chunk = mpu.get_tensor_model_parallel_rank()
assert input_.shape[
dim] % total_chunks == 0, f"input dimension {dim} ({input_.shape[dim]}) is not divisible by tensor parallel world size ({total_chunks})"
chunk_size = input_.shape[dim] // total_chunks
return torch.narrow(input_, dim, this_chunk * chunk_size, chunk_size)
class _GatherTokens(torch.autograd.Function):
"""All gather tokens among the tensor parallel ranks"""
@staticmethod
def symbolic(graph, input_, dim):
return _gather_tokens(input_, dim)
@staticmethod
def forward(ctx, input_, dim):
ctx.dim = dim
return _gather_tokens(input_, dim)
@staticmethod
def backward(ctx, grad_output):
return _drop_tokens(grad_output, ctx.dim), None
class _DropTokens(torch.autograd.Function):
"Divide tokens equally among the tensor parallel ranks"
@staticmethod
def symbolic(graph, input_, dim):
return _drop_tokens(input_, dim)
@staticmethod
def forward(ctx, input_, dim):
ctx.dim = dim
return _drop_tokens(input_, dim)
@staticmethod
def backward(ctx, input_):
return _gather_tokens(input_, ctx.dim), None
def gather_tokens(input_, dim=0):
mpu = deepspeed.utils.groups.mpu
if mpu is None or mpu.get_tensor_model_parallel_world_size() == 1:
# no tensor parallelism for non-experts
return input_
return _GatherTokens.apply(input_, dim)
def drop_tokens(input_, dim=0):
mpu = deepspeed.utils.groups.mpu
if mpu is None or mpu.get_tensor_model_parallel_world_size() == 1:
# no tensor parallelism for non-experts
return input_
return _DropTokens.apply(input_, dim)
| 3,529 | 31.990654 | 144 | py |
DeepSpeed | DeepSpeed-master/deepspeed/moe/sharded_moe.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
The file has been adapted from two fairscale files:
(1) https://github.com/facebookresearch/fairscale/blob/master/fairscale/nn/moe/moe_layer.py
(2) https://github.com/facebookresearch/fairscale/blob/master/fairscale/nn/moe/top2gate.py
Git commit hash: 34df606902a240567a0d898037ece55c2f1336cf
We retain the following license from the original files:
"""
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from deepspeed.utils.timer import SynchronizedWallClockTimer
from deepspeed.utils import logger
from typing import Callable, Dict, TYPE_CHECKING, Any, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import Module
import torch.nn.functional as F
from deepspeed.utils import groups
from .mappings import drop_tokens, gather_tokens
if TYPE_CHECKING:
Base = Module[Tensor]
else:
Base = Module
uniform_map: Dict[torch.device, Callable] = {}
gumbel_map: Dict[torch.device, Callable] = {}
exp_selection_uniform_map: Dict[torch.device, Callable] = {}
try:
# To enable Tutel MoE optimizations:
# python3 -m pip install --user --upgrade git+https://github.com/microsoft/tutel@v0.1.x
from tutel import moe as tutel_moe
TUTEL_INSTALLED = True
except:
# Fail silently so we don't spam logs unnecessarily if user isn't using tutel
TUTEL_INSTALLED = False
pass
def multiplicative_jitter(x, device: torch.device, epsilon=1e-2):
"""
Modified from switch transformer paper. mesh transformers
Multiply values by a random number between 1-epsilon and 1+epsilon.
Makes models more resilient to rounding errors introduced by bfloat16.
This seems particularly important for logits.
Args:
x: a torch.tensor
device: torch.device
epsilon: a floating point value
Returns:
a jittered x.
"""
if epsilon == 0:
return x
uniform = uniform_map.get(device)
if uniform is None:
uniform = torch.distributions.uniform.Uniform(low=torch.tensor(1.0 - epsilon, device=device),
high=torch.tensor(1.0 + epsilon,
device=device)).rsample # type: ignore
uniform_map[device] = uniform
return x * uniform(x.shape)
def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor:
gumbel = gumbel_map.get(device)
if gumbel is None:
one = torch.tensor(1.0, device=device)
zero = torch.tensor(0.0, device=device)
gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore
gumbel_map[device] = gumbel
return gumbel(shape)
from deepspeed import comm as dist
# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity
# See https://arxiv.org/pdf/2006.16668.pdf for details.
# Based on https://github.com/pytorch/pytorch/pull/40762
class _AllToAll(torch.autograd.Function):
@staticmethod
def forward(
ctx: Any,
# TODO: replace with DS process group
group: torch.distributed.ProcessGroup,
input: Tensor) -> Tensor: # type: ignore
ctx.group = group
input = input.contiguous()
output = torch.empty_like(input)
dist.all_to_all_single(output, input, group=group)
return output
@staticmethod
def backward(ctx: Any, *grad_output: Tensor) -> Tuple[None, Tensor]:
return (None, _AllToAll.apply(ctx.group, *grad_output))
# einsum rewrites are on par or more performant
# switch can be bubbled up in future
USE_EINSUM = True
# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity
# See https://arxiv.org/pdf/2006.16668.pdf for details.
def einsum(rule, a, b):
if USE_EINSUM:
return torch.einsum(rule, a, b)
elif rule == 's,se->se':
return a.reshape(a.shape[0], -1) * b
elif rule == 'se,sc->sec':
return a.unsqueeze(2) * b.unsqueeze(1)
elif rule == 'se,se->s':
return torch.bmm(a.unsqueeze(1), b.unsqueeze(2)).reshape(-1)
elif rule == 'sec,sm->ecm':
s = a.shape[0]
e = a.shape[1]
c = a.shape[2]
m = b.shape[1]
return torch.matmul(a.reshape(s, -1).t(), b).reshape(e, c, m)
elif rule == 'sec,ecm->sm':
return torch.matmul(a.reshape(a.shape[0], -1), b.reshape(-1, b.shape[-1]))
elif rule == 'ks,ksm->sm':
k = b.shape[0]
s = b.shape[1]
m = b.shape[2]
# [k, s] -> [s, k] -> [s, 1, k]
a = a.t().unsqueeze(1)
# [k,s,m] -> [k, sm] -> [sm, k] -> [s, m, k]
b = b.reshape(k, -1).t().reshape(s, m, k)
# bmm([s, 1, k], [s, m, k]^t) -> [s, m, 1]
return torch.bmm(a, b.transpose(1, 2)).squeeze(2)
else:
return torch.einsum(rule, a, b)
# The following functions are extracted and scripted
# because otherwise during a torch.jit.trace, the non-Tensor
# values used in the calculations get recorded as constants.
# torch.jit.script coerces them into Tensors and preserves
# their dynamic shapes. This enables ONNX export.
# We can't script the entire top1gating function because it
# includes stateful caching logic which is incompatible with ONNX.
@torch.jit.script
def _capacity(gates: Tensor, capacity_factor: Tensor, min_capacity: Tensor) -> Tensor:
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
# to(torch.int64) works around a bug in torch.onnx.export:
# it should cast k to int64 when converting torch.topk but it doesn't.
capacity = torch.ceil((num_tokens / num_experts) * capacity_factor).to(torch.int64)
if capacity < min_capacity:
capacity = min_capacity.to(torch.int64)
return capacity
@torch.jit.script
def _top_idx(source, k):
return torch.topk(source, k=k, dim=0)[1]
@torch.jit.script
def _one_hot_to_float(x, num_classes):
return F.one_hot(x, num_classes=num_classes).float()
def top1gating(logits: Tensor,
capacity_factor: float,
min_capacity: int,
used_token: Tensor = None,
noisy_gate_policy: Optional[str] = None,
drop_tokens: bool = True,
use_rts: bool = True,
use_tutel: bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Implements Top1Gating on logits."""
if noisy_gate_policy == 'RSample':
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
# everything is in fp32 in this function
gates = F.softmax(logits, dim=1)
capacity = _capacity(gates, torch.tensor(capacity_factor), torch.tensor(min_capacity))
# Create a mask for 1st's expert per token
# noisy gating
indices1_s = torch.argmax(logits_w_noise if noisy_gate_policy == 'RSample' else gates, dim=1)
num_experts = int(gates.shape[1])
mask1 = F.one_hot(indices1_s, num_classes=num_experts)
# mask only used tokens
if used_token is not None:
mask1 = einsum("s,se->se", used_token, mask1)
# gating decisions
exp_counts = torch.sum(mask1, dim=0).detach().to('cpu')
# if we don't want to drop any tokens
if not drop_tokens:
new_capacity = torch.max(exp_counts).to(logits.device)
dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=dist.get_world_group())
capacity = new_capacity
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.float(), dim=0)
l_aux = torch.sum(me * ce) * num_experts
# Random Token Selection
if use_rts:
uniform = exp_selection_uniform_map.get(logits.device)
if uniform is None:
uniform = torch.distributions.uniform.Uniform(low=torch.tensor(0.0, device=logits.device),
high=torch.tensor(1.0, device=logits.device)).rsample
exp_selection_uniform_map[logits.device] = uniform
mask1_rand = mask1 * uniform(mask1.shape)
else:
mask1_rand = mask1
assert logits.shape[
0] >= min_capacity, "No. of tokens (batch-size) should be greater than min_capacity. Either set min_capacity to 0 or increase your batch size."
top_idx = _top_idx(mask1_rand, capacity)
new_mask1 = mask1 * torch.zeros_like(mask1).scatter_(0, top_idx, 1)
mask1 = new_mask1
if use_tutel:
# Tutel doesn't support index values masked with zero
# so we need to replace masked indices with -1
indices_mask = mask1.sum(dim=1) * num_experts - 1
indices1_s = torch.min(indices1_s, indices_mask)
# Compute locations in capacity buffer
if use_tutel:
locations1 = tutel_moe.fast_cumsum_sub_one(mask1)
else:
locations1 = torch.cumsum(mask1, dim=0) - 1
if use_tutel:
gates1_s = (gates * mask1).sum(dim=1)
locations1_s = torch.sum(locations1 * mask1, dim=1)
return l_aux, capacity, num_experts, [
indices1_s,
], [
locations1_s,
], [
gates1_s,
], exp_counts
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
# Normalize gate probabilities
mask1_float = mask1.float()
gates = gates * mask1_float
locations1_sc = _one_hot_to_float(locations1_s, capacity)
combine_weights = einsum("se,sc->sec", gates, locations1_sc)
dispatch_mask = combine_weights.bool()
return l_aux, combine_weights, dispatch_mask, exp_counts
def top2gating(logits: Tensor, capacity_factor: float, min_capacity: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Implements Top2Gating on logits."""
# everything is in fp32 in this function
gates = F.softmax(logits, dim=1)
capacity = _capacity(gates, torch.tensor(capacity_factor * 2), torch.tensor(min_capacity))
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1)
num_experts = int(gates.shape[1])
mask1 = F.one_hot(indices1_s, num_classes=num_experts)
# Create a mask for 2nd's expert per token using Gumbel-max trick
# https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
# Replace top-expert with min value
logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float("-inf"))
indices2_s = torch.argmax(logits_except1, dim=1)
mask2 = F.one_hot(indices2_s, num_classes=num_experts)
# Compute locations in capacity buffer
locations1 = torch.cumsum(mask1, dim=0) - 1
locations2 = torch.cumsum(mask2, dim=0) - 1
# Update 2nd's location by accounting for locations of 1st
locations2 += torch.sum(mask1, dim=0, keepdim=True)
# gating decisions
exp_counts = torch.sum(mask1, dim=0).detach().to('cpu')
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.float(), dim=0)
l_aux = torch.mean(me * ce) * num_experts * num_experts
# Remove locations outside capacity from mask
mask1 *= torch.lt(locations1, capacity)
mask2 *= torch.lt(locations2, capacity)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
locations2_s = torch.sum(locations2 * mask2, dim=1)
# Normalize gate probabilities
mask1_float = mask1.float()
mask2_float = mask2.float()
gates1_s = einsum("se,se->s", gates, mask1_float)
gates2_s = einsum("se,se->s", gates, mask2_float)
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s /= denom_s
gates2_s /= denom_s
# Calculate combine_weights and dispatch_mask
gates1 = einsum("s,se->se", gates1_s, mask1_float)
gates2 = einsum("s,se->se", gates2_s, mask2_float)
locations1_sc = _one_hot_to_float(locations1_s, capacity)
locations2_sc = _one_hot_to_float(locations2_s, capacity)
combine1_sec = einsum("se,sc->sec", gates1, locations1_sc)
combine2_sec = einsum("se,sc->sec", gates2, locations2_sc)
combine_weights = combine1_sec + combine2_sec
dispatch_mask = combine_weights.bool()
return l_aux, combine_weights, dispatch_mask, exp_counts
class TopKGate(Module):
"""Gate module which implements Top2Gating as described in Gshard_.
::
gate = TopKGate(model_dim, num_experts)
l_aux, combine_weights, dispatch_mask = gate(input)
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
model_dim (int):
size of model embedding dimension
num_experts (ints):
number of experts in model
"""
wg: torch.nn.Linear
def __init__(self,
model_dim: int,
num_experts: int,
k: int = 1,
capacity_factor: float = 1.0,
eval_capacity_factor: float = 1.0,
min_capacity: int = 8,
noisy_gate_policy: Optional[str] = None,
drop_tokens: bool = True,
use_rts: bool = True) -> None:
super().__init__()
# Only top-1 and top-2 are supported at the moment.
if k != 1 and k != 2:
raise ValueError('Only top-1 and top-2 gatings are supported.')
self.wg = torch.nn.Linear(model_dim, num_experts, bias=False).float()
self.k = k
self.capacity_factor = capacity_factor
self.eval_capacity_factor = eval_capacity_factor
self.min_capacity = min_capacity
self.noisy_gate_policy = noisy_gate_policy
self.timers = SynchronizedWallClockTimer()
self.wall_clock_breakdown = False
self.gate_time = 0.0
self.drop_tokens = drop_tokens
self.use_rts = use_rts
def forward(self,
input: torch.Tensor,
used_token: torch.Tensor = None,
use_tutel: bool = False) -> Tuple[Tensor, Tensor, Tensor]: # type: ignore
if self.wall_clock_breakdown:
self.timers('TopKGate').start()
if self.wg.weight.dtype != torch.float32:
self.wg = self.wg.float()
input_fp32 = input.float()
# input jittering
if self.noisy_gate_policy == 'Jitter' and self.training:
input_fp32 = multiplicative_jitter(input_fp32, device=input.device)
logits = self.wg(input_fp32)
if self.k == 1:
gate_output = top1gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor,
self.min_capacity, used_token, self.noisy_gate_policy if self.training else None,
self.drop_tokens, self.use_rts, use_tutel)
else:
gate_output = top2gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor,
self.min_capacity)
if self.wall_clock_breakdown:
self.timers('TopKGate').stop()
self.gate_time = self.timers('TopKGate').elapsed(reset=False)
return gate_output
class MOELayer(Base):
"""MOELayer module which implements MixtureOfExperts as described in Gshard_.
::
gate = TopKGate(model_dim, num_experts)
moe = MOELayer(gate, expert)
output = moe(input)
l_aux = moe.l_aux
.. Gshard_: https://arxiv.org/pdf/2006.16668.pdf
Args:
gate (torch.nn.Module):
gate network
expert (torch.nn.Module):
expert network
"""
def __init__(self,
gate: Module,
experts: Module,
ep_group_name,
ep_size,
num_local_experts: int,
use_tutel: bool = False) -> None:
super().__init__()
self.gate = gate
self.experts = experts
self.ep_group = None
self.ep_size = ep_size
self.ep_group_name = ep_group_name
self.num_local_experts = num_local_experts
self.time_falltoall = 0.0
self.time_salltoall = 0.0
self.time_moe = 0.0
self.timers = SynchronizedWallClockTimer()
self.wall_clock_breakdown = False
self.use_tutel = use_tutel and TUTEL_INSTALLED and gate.k == 1
if self.use_tutel:
logger.info('Using Tutel optimizations.')
elif use_tutel and not TUTEL_INSTALLED:
logger.warning("Tutel optimization requested but not installed. "
"Proceeding without Tutel.")
elif use_tutel and TUTEL_INSTALLED and gate.k != 1:
logger.warning("To enable Tutel optimization, use top-1 instead of top-2 gate. "
"Proceeding without Tutel.")
def _set_ep_group(self, ep_group):
self.ep_group = ep_group
def forward(self, *input: Tensor, **kwargs: Any) -> Tensor:
if self.wall_clock_breakdown:
self.timers('moe').start()
# Implement Algorithm 2 from GShard paper.
d_model = input[0].shape[-1]
# Initial implementation -> Reshape into S tokens by dropping sequence dimension.
# Reshape into G groups so that each group can distribute tokens equally
# group_size = kwargs['group_size'] if 'group_size' in kwargs.keys() else 1
reshaped_input = input[0].reshape(-1, d_model)
if self.use_tutel:
self.l_aux, C, E, indices_, locations_, gates_, self.exp_counts = self.gate(reshaped_input, input[1], True)
S, M = reshaped_input.size(0), reshaped_input.size(1)
if not hasattr(self, '_tutel_dispatcher'):
self._tutel_dispatcher = tutel_moe.fast_dispatcher(E, C, M, dispatch_dtype=reshaped_input.dtype)
self._tutel_dispatcher.update(indices_, locations_, gates_, capacity=C)
dispatched_input = self._tutel_dispatcher.encode(reshaped_input)
else:
self.l_aux, combine_weights, dispatch_mask, self.exp_counts = self.gate(reshaped_input, input[1])
dispatched_input = einsum("sec,sm->ecm", dispatch_mask.type_as(input[0]), reshaped_input)
if self.wall_clock_breakdown:
self.timers('falltoall').start()
if groups._get_expert_model_parallel_world_size() == 1:
# If the non-expert is tensor-parallel, it will create
# duplicate tokens on the tensor-parallel ranks.
# Since our experts are not tensor-parallel, these duplicates
# need to be dropped to ensure correctness.
# this also doubles up as a communication optimization as we are
# reducing the all-to-all communication volume.
dispatched_input = drop_tokens(dispatched_input, dim=1)
dispatched_input = _AllToAll.apply(self.ep_group, dispatched_input)
if self.wall_clock_breakdown:
self.timers('falltoall').stop()
self.time_falltoall = self.timers('falltoall').elapsed(reset=False)
# Re-shape after all-to-all: ecm -> gecm
dispatched_input = dispatched_input.reshape(self.ep_size, self.num_local_experts, -1, d_model)
expert_output = self.experts(dispatched_input)
if self.wall_clock_breakdown:
self.timers('salltoall').start()
expert_output = _AllToAll.apply(self.ep_group, expert_output)
if self.wall_clock_breakdown:
self.timers('salltoall').stop()
self.time_salltoall = self.timers('salltoall').elapsed(reset=False)
# Re-shape back: gecm -> ecm
expert_output = expert_output.reshape(self.ep_size * self.num_local_experts, -1, d_model)
if groups._get_expert_model_parallel_world_size() == 1:
# the dropped duplicate tokens need to be gathered on each
# tensor parallel rank again for the tensor-parallel
# non-expert of the next layer.
expert_output = gather_tokens(expert_output, dim=1)
if self.use_tutel:
combined_output = self._tutel_dispatcher.decode(expert_output.view(E * C, M))
else:
combined_output = einsum("sec,ecm->sm", combine_weights.type_as(input[0]), expert_output)
a = combined_output.reshape(input[0].shape)
if self.wall_clock_breakdown:
self.timers('moe').stop()
self.time_moe = self.timers('moe').elapsed(reset=False)
return a
| 20,621 | 36.494545 | 151 | py |
DeepSpeed | DeepSpeed-master/deepspeed/moe/utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from typing import List, Tuple, Dict
import torch
from .layer import MoE
def has_moe_layers(m):
has_moe = False
num_experts = 0
for _, module in m.named_modules():
if isinstance(module, MoE):
has_moe = True
num_experts = module.num_experts
break
return has_moe, num_experts
def is_moe_param(param: torch.Tensor) -> bool:
if hasattr(param, "allreduce") and not param.allreduce:
return True
return False
def split_params_into_shared_and_expert_params(
params: List[torch.nn.Parameter]) -> Tuple[torch.nn.Parameter, torch.nn.Parameter]:
shared_params, expert_params = [], []
for p in params:
if is_moe_param(p):
expert_params.append(p)
else:
shared_params.append(p)
return shared_params, expert_params
def split_params_grads_into_shared_and_expert_params(
group: List[torch.nn.Parameter]) -> Tuple[torch.nn.Parameter, torch.nn.Parameter]:
"""Split grad of parameters into grads of non-expert params
and grads of expert params. This is useful while computing
grad-norms for clipping and overflow detection
group (List[torch.nn.Parameter]):
Args:
The group of parameters to split
Returns:
Tuple[List[torch.nn.Parameter], List[torch.nn.Parameter]]:
list of gradients for non MoE params, list of gradients of MoE params
"""
expert_grads = []
shared_grads = []
for p in group:
if p.grad is not None:
if is_moe_param(p):
expert_grads.append(p.grad.to(p.dtype))
else:
shared_grads.append(p.grad.to(p.dtype))
return shared_grads, expert_grads
def split_params_into_different_moe_groups_for_optimizer(param_groups: Tuple[Dict],
max_group_size=178956971) -> Tuple[Dict]:
"""Split parameters into different MoE groups for optimizer
Args:
param_groups (Tuple[Dict]):
The list of parameter groups to split
Returns:
Tuple[Dict]:
list of MoE/non-MoE groups for optimizer
"""
if isinstance(param_groups, tuple):
param_groups = list(param_groups) # Tuple cannot be modified
elif isinstance(param_groups, dict):
param_groups = [param_groups]
elif not isinstance(param_groups, list):
raise ValueError(f"Unknown param group type of {type(param_groups)}")
# gather all data parallel group names
data_parallel_group_names = set()
for param_group in param_groups:
for param in param_group["params"]:
if is_moe_param(param):
data_parallel_group_names.add(param.group_name)
data_parallel_group_names = list(data_parallel_group_names)
group_moe = {}
# Create the param MoE groups, leave param assign to next step
for param_group in param_groups:
group_moe[param_group['name']] = {}
for key in data_parallel_group_names:
group_moe[param_group['name']][key] = {}
group_moe[param_group['name']][key]['name'] = key
group_moe[param_group['name']][key]['moe'] = True
for ori_key in param_group.keys():
if ori_key != 'name':
if ori_key == 'params':
group_moe[param_group['name']][key][ori_key] = []
else:
group_moe[param_group['name']][key][ori_key] = param_group[ori_key]
# Assign param
for param_group in param_groups:
new_params = []
for param in param_group['params']:
if is_moe_param(param):
group_moe[param_group['name']][param.group_name]['params'].append(param)
# param_group['params'].remove(param)
else:
new_params.append(param)
param_group['params'] = new_params
# Flatten the moe groups
if max_group_size is not None:
for k, v in group_moe.items():
for k1, v1 in v.items():
cur_group = []
all_groups = []
size_of_cur_group = 0
for param in v1['params']:
if size_of_cur_group + param.numel() <= max_group_size:
cur_group.append(param)
size_of_cur_group += param.numel()
else:
all_groups.append(cur_group)
cur_group = [param]
size_of_cur_group = param.numel()
if cur_group:
all_groups.append(cur_group)
for group in all_groups:
new_dict = {}
for key, val in v1.items():
if key != 'params':
new_dict[key] = val
new_dict['params'] = group
param_groups.append(new_dict)
else:
for k, v in group_moe.items():
for k1, v1 in v.items():
param_groups.append(v1)
return tuple(param_groups)
| 5,214 | 34.719178 | 98 | py |
DeepSpeed | DeepSpeed-master/deepspeed/moe/experts.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import copy
class Experts(torch.nn.Module):
def __init__(self, expert, num_local_experts=1, expert_group_name=None):
super(Experts, self).__init__()
self.deepspeed_experts = torch.nn.ModuleList([copy.deepcopy(expert) for i in range(num_local_experts)])
self.num_local_experts = num_local_experts
# TODO: revisit allreduce for moe.gate...
for expert in self.deepspeed_experts:
# TODO: Create param groups to handle expert + data case (e.g. param.group = moe_group)
for name, param in expert.named_parameters():
param.allreduce = False
param.group_name = expert_group_name
def forward(self, inputs):
chunks = inputs.chunk(self.num_local_experts, dim=1)
expert_outputs = []
for chunk, expert in zip(chunks, self.deepspeed_experts):
out = expert(chunk)
if type(out) is tuple:
out = out[0] # Ignore the bias term for now
expert_outputs += [out]
expert_output = torch.cat(expert_outputs, dim=1)
return expert_output
| 1,223 | 33 | 111 | py |
DeepSpeed | DeepSpeed-master/deepspeed/launcher/launch.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
DeepSpeed launcher, this is similar to torch's distributed.launch but supports
additional features such as arbitrary gpu exclusion.
deepspeed.launcher.launch is intended to be run on a single worker node and
will spawn several worker sub-processes depending on how many devices/ranks
are on the worker.
"""
import sys
import subprocess
import os
import json
import base64
import time
import signal
import psutil
from collections import defaultdict
from typing import Dict
from argparse import ArgumentParser, REMAINDER
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT
from ..nebula.constants import DLTS_POD_ENV_PATH
from ..utils import logger, get_numactl_cmd
from ..elasticity import is_torch_elastic_compatible
from .constants import ELASTIC_TRAINING_ID_DEFAULT
PID_FILE_BASEPATH = "/tmp"
def parse_args():
parser = ArgumentParser(description="DeepSpeed distributed training launch"
" utility that creates multiple distributed"
" processes on a single node")
# Optional arguments for the launch helper
parser.add_argument("--node_rank",
type=int,
default=0,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--master_addr",
default="127.0.0.1",
type=str,
help="Master node (rank 0)'s address, should be either"
" the IP address or the hostname of node 0, for"
" single node multi-proc training, the"
" --master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port",
default=TORCH_DISTRIBUTED_DEFAULT_PORT,
type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communication during distributed "
"training")
parser.add_argument("--world_info", default="None", type=str, help="world info base64 encoded dictionary")
parser.add_argument("--module",
action="store_true",
help="Change each process to interpret the launch "
"script as a Python module, executing with the same "
"behavior as 'python -m'.")
parser.add_argument("--no_python",
action="store_true",
help="Skip prepending the training script with "
"'python' - just execute it directly.")
parser.add_argument("--enable_elastic_training", action="store_true", help="Enable elastic training support.")
parser.add_argument("--min_elastic_nodes", type=int, default=-1, help="Min number of nodes in elastic training.")
parser.add_argument("--max_elastic_nodes", type=int, default=-1, help="Max number of nodes in elastic training.")
parser.add_argument("--no_local_rank",
action="store_true",
help="Do not pass local_rank as an argument when calling "
"the user's training script.")
parser.add_argument("--save_pid",
type=int,
default=0,
help="main launching process pid, for internal pid tracking")
parser.add_argument("--enable_each_rank_log",
default="None",
type=str,
help="redirect the stdout and stderr from each rank into different log files")
parser.add_argument("--bind_cores_to_rank",
action="store_true",
help="Bind each rank to different cores of the host. "
"This improves host efficiency especially for CPU backend")
parser.add_argument("--bind_core_list",
type=str,
default=None,
help="List of cores to bind to with comma separated list of "
"numbers and range. i.e. 1,3-5,7 => [1,3,4,5,7]. When not "
"specified, all cores on system would be used rank binding")
# positional
parser.add_argument("training_script",
type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
# Adapted from https://psutil.readthedocs.io/en/latest/#kill-process-tree
def terminate_process_tree(pid):
process = psutil.Process(pid)
children = process.children(recursive=True)
children.append(process)
for child in children:
try:
child.terminate()
except psutil.NoSuchProcess:
pass
gone, alive = psutil.wait_procs(children, timeout=30)
for p in alive:
p.kill()
def main():
args = parse_args()
current_env = os.environ.copy()
for k in current_env.keys():
if "NCCL" in k:
logger.info(f"{args.node_rank} {k}={current_env[k]}")
if args.world_info == "None":
raise ValueError("world_info can not be None")
world_info = base64.urlsafe_b64decode(args.world_info)
world_info = json.loads(world_info)
logger.info(f"WORLD INFO DICT: {world_info}")
node_list = list(world_info.keys())
args.nnodes = len(node_list)
local_node = node_list[args.node_rank]
local_gpu_ids = world_info[local_node]
num_local_procs = len(local_gpu_ids)
logger.info(f"nnodes={args.nnodes}, num_local_procs={num_local_procs}, node_rank={args.node_rank}")
global_rank_mapping = defaultdict(list)
curr_global_rank = 0
dist_world_size = 0
for node_id in node_list:
gids = world_info[node_id]
dist_world_size += len(gids)
for gid in gids:
global_rank_mapping[node_id].append(curr_global_rank)
curr_global_rank += 1
logger.info(f"global_rank_mapping={global_rank_mapping}")
logger.info(f"dist_world_size={dist_world_size}")
current_env["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, local_gpu_ids))
logger.info(f"Setting CUDA_VISIBLE_DEVICES={current_env['CUDA_VISIBLE_DEVICES']}")
# set PyTorch distributed related environmental variables
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
current_env["CROSS_RANK"] = str(args.node_rank)
current_env["CROSS_SIZE"] = str(args.nnodes)
current_env["LOCAL_SIZE"] = str(num_local_procs)
if args.save_pid:
print(f"launcher pid: {os.getpid()}")
pid_file = None
if args.save_pid:
launcher_pid = os.getpid()
pid_file = os.path.join(PID_FILE_BASEPATH, f"{args.save_pid}.deepspeed")
assert not os.path.isfile(pid_file), "pid file exists but shouldn't"
with open(pid_file, 'w') as fd:
fd.write(f"{launcher_pid}")
if not is_torch_elastic_compatible():
if args.enable_elastic_training:
logger.info(f"Disabling elastic training support as \
PyTorch version should be greater than 1.11.x")
args.enable_elastic_training = False
if os.path.exists(DLTS_POD_ENV_PATH):
with open(DLTS_POD_ENV_PATH) as file:
lines = file.readlines()
lines = [line.rstrip() for line in lines]
for line in lines:
if line.startswith('export FC_TASKROLE_NAME') or line.startswith('export FC_TASK_INDEX'):
key_val = line.split()[1]
key, val = key_val.split('=')
current_env[key] = val
processes = []
cmd = []
if not args.enable_elastic_training:
if args.enable_each_rank_log != "None":
# prepare the log path and the file name prefix
if os.path.isfile(args.enable_each_rank_log):
raise ValueError(f"{args.enable_each_rank_log} should not be a file, it should be a directory.")
if not os.path.exists(args.enable_each_rank_log):
try:
os.makedirs(args.enable_each_rank_log)
except Exception as e:
print(e)
raise ValueError(f"unable to create directory {args.enable_each_rank_log} for each rank log.")
log_name_prefix = time.strftime("%Y%m%d%H%M%S", time.localtime())
for local_proc in range(0, num_local_procs):
# each process's rank
dist_rank = global_rank_mapping[local_node][local_proc]
local_rank = dist_rank % num_local_procs
current_env["RANK"] = str(dist_rank)
current_env["LOCAL_RANK"] = str(local_rank)
# spawn the processes
cmd = []
if args.bind_cores_to_rank:
cores_per_rank, numactl_cmd = get_numactl_cmd(args.bind_core_list, num_local_procs, local_rank)
current_env["OMP_NUM_THREADS"] = f"{cores_per_rank}"
cmd = cmd + numactl_cmd
if not args.no_python:
cmd.append(sys.executable)
cmd.append("-u")
if args.module:
cmd.append("-m")
else:
if args.module:
raise ValueError("Don't use both the '--no_python' flag"
" and the '--module' flag at the same time.")
cmd.append(args.training_script)
# A user may not want to pass local_rank as a keyword arg so we make this optional.
if not args.no_local_rank:
cmd.append(f"--local_rank={local_rank}")
cmd += args.training_script_args
if args.enable_each_rank_log != "None":
log_file = os.path.join(args.enable_each_rank_log, f"{log_name_prefix}_rank{dist_rank}.log")
log_fd = open(log_file, 'w')
process = subprocess.Popen(cmd, env=current_env, stdout=log_fd, stderr=log_fd)
else:
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
else:
from ..elasticity import DSElasticAgent
from torch.distributed.elastic.rendezvous import RendezvousParameters
from torch.distributed.elastic.agent.server.api import WorkerSpec
import torch.distributed.elastic.rendezvous.registry as rdzv_registry
from torch.distributed.elastic.multiprocessing import Std
if args.min_elastic_nodes == -1:
args.min_elastic_nodes = 1
if args.max_elastic_nodes == -1:
args.max_elastic_nodes = args.nnodes
assert args.max_elastic_nodes > 0 and args.min_elastic_nodes > 0, "Max and Min nodes should be positive"
current_env["NCCL_ASYNC_ERROR_HANDLING"] = str(1)
# Get config and arguments
cmd = []
if not args.no_python:
cmd = [sys.executable, "-u"]
if args.module:
cmd.append("-m")
else:
if args.module:
raise ValueError("Don't use both the '--no_python' flag"
" and the '--module' flag at the same time.")
cmd.append(args.training_script)
cmd += args.training_script_args
cmd_args = cmd[1:]
rdzv_configs: Dict[str, str] = {'timeout': 100}
run_id = os.environ.get("ELASTIC_RUN_ID", ELASTIC_TRAINING_ID_DEFAULT)
# Creating config for rendezvous class
rdzv_parameters = RendezvousParameters(backend='c10d',
endpoint=args.master_addr + ":" + str(args.master_port),
run_id=run_id,
min_nodes=args.min_elastic_nodes,
max_nodes=args.max_elastic_nodes,
**rdzv_configs)
spec = WorkerSpec(
role='trainer',
local_world_size=num_local_procs,
entrypoint=cmd[0],
args=cmd[1:],
rdzv_handler=rdzv_registry.get_rendezvous_handler(rdzv_parameters),
max_restarts=100,
monitor_interval=5,
redirects=Std.from_str("0"),
tee=Std.from_str("0"),
master_addr=None,
master_port=None,
)
agent = DSElasticAgent(spec, current_env)
agent.run()
sig_names = {2: "SIGINT", 15: "SIGTERM"}
last_return_code = None
def sigkill_handler(signum, frame):
for process in processes:
logger.info(f"Killing subprocess {process.pid}")
try:
terminate_process_tree(process.pid)
except Exception:
pass
if last_return_code is not None:
logger.error(f"{cmd} exits with return code = {last_return_code}")
sys.exit(last_return_code)
if signum in sig_names:
logger.info(f"Main process received {sig_names[signum]}, exiting")
if args.save_pid:
if os.path.isfile(pid_file):
os.remove(pid_file)
sys.exit(1)
# pass SIGINT/SIGTERM to children if the parent is being terminated
signal.signal(signal.SIGINT, sigkill_handler)
signal.signal(signal.SIGTERM, sigkill_handler)
alive_processes = set(processes)
while len(alive_processes):
finished_processes = []
for process in alive_processes:
if process.poll() is None:
# the process is still running
continue
else:
if process.returncode != 0:
last_return_code = process.returncode # for sigkill_handler
sigkill_handler(signal.SIGTERM, None) # not coming back
else:
# exited cleanly
logger.info(f"Process {process.pid} exits successfully.")
finished_processes.append(process)
alive_processes = set(alive_processes) - set(finished_processes)
time.sleep(1)
if __name__ == "__main__":
main()
| 14,654 | 40.16573 | 117 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/module_quantize.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
def quantize_transformer_layer(orig_layer_impl, model, megatron=False, preln=False):
""" Quantize bert-style transformer layers with DeepSpeed's transformer layer
Arguments:
orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for,
e.g., transformers.modeling_bert.BertLayer.
model (torch.nn.Module): user's nn.module representing their model
megatron (bool): megatron model-parallel implementation (this is supported for inference only)
preln (bool): does the original layer implementation do pre or post layer norm?
Note: For Bert kind of models, we inject based on the DeepSpeed-Example models, if not setting huggingface flag.
Returns:
Updated nn.module with quantized transformer layers
"""
def quantize_weight(weight):
return weight.to(torch.int8)
def megatron_layer_quantize(layer):
layer.attention.query_key_value.weight.data = quantize_weight(layer.attention.query_key_value.weight.data)
layer.attention.dense.weight.data = quantize_weight(layer.attention.dense.weight.data)
layer.mlp.dense_h_to_4h.weight.data = quantize_weight(layer.mlp.dense_h_to_4h.weight.data)
layer.mlp.dense_4h_to_h.weight.data = quantize_weight(layer.mlp.dense_4h_to_h.weight.data)
def bert_layer_quantize(layer):
layer.attention.self.query.weight.data = quantize_weight(layer.attention.self.query.weight.data)
layer.attention.self.key.weight.data = quantize_weight(layer.attention.self.key.weight.data)
layer.attention.self.value.weight.data = quantize_weight(layer.attention.self.value.weight.data)
layer.attention.output.dense.weight.data = quantize_weight(layer.attention.output.dense.weight.data)
if preln:
layer.intermediate.dense_act.weight.data = quantize_weight(layer.intermediate.dense_act.weight.data)
else:
layer.intermediate.dense.weight.data = quantize_weight(layer.intermediate.dense.weight.data)
layer.output.dense.weight.data = quantize_weight(layer.output.dense.weight.data)
def quantize_fn(child):
if megatron:
# Quantize megatron GPT2 / GPT3 trained model
megatron_layer_quantize(child)
else:
# Quantize either DeepSpeed or HuggingFace trained model
bert_layer_quantize(child)
return child
return quantize_module(model=model, orig_class=orig_layer_impl, quantize_fn=quantize_fn)
def quantize_module(model, orig_class, quantize_fn):
policy = {orig_class: quantize_fn}
return _quantize_module(model, policy)
def _quantize_module(model, policies):
for name, child in model.named_children():
if child.__class__ in policies:
orig = repr(child)
setattr(model, name, policies[child.__class__](child))
new = getattr(model, name)
else:
_quantize_module(child, policies)
return model
| 3,107 | 41.575342 | 120 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/load_checkpoint.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from torch import nn
from deepspeed.model_implementations.transformers.ds_bloom import DeepSpeedBloomInference
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference
from deepspeed.model_implementations.transformers.ds_megatron_gpt import DeepSpeedMegatronGPTInference
from deepspeed.model_implementations.transformers.ds_opt import DeepSpeedOPTInference
import deepspeed.ops.transformer as transformer_inference
from .layers import LinearLayer, Normalize, EmbeddingLayer, OPTEmbedding, RMSNormalize
import torch
import gc
from deepspeed.accelerator import get_accelerator
import re
def load_model_with_checkpoint(r_module,
sd,
mp_replace,
ckpt_type,
ckpt_mp_size,
weight_quantizer=None,
rank=0,
container=None):
error_msgs = []
def prefix_check():
# if keys start with 'model.' or 'transformer.', don't skip level 0 prefix
for key in sd[0].keys():
# OPT models
if re.match("^model[.]", key):
return False
# BLOOM models
if re.match("^transformer[.]", key):
return False
return True
skip_level_0_prefix = prefix_check() and container.policy.use_load_prefix
def transpose(data):
with torch.no_grad():
data = data.contiguous()
data1 = data.transpose(-1, -2).reshape(-1)
data.reshape(-1).copy_(data1)
data1 = None
return data.reshape(data.shape[-1], data.shape[-2])
def load(module, prefix):
args = (sd[0], prefix, {}, True, [], [], error_msgs)
if hasattr(module, 'weight'):
module.weight = mp_replace.copy(module.weight.data, sd[0][prefix + 'weight'])
if prefix + 'bias' in sd[0].keys():
if module.bias.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.bias.data, device="cpu"),
requires_grad=module.bias.data.requires_grad)
module.bias = mp_replace.copy(module.bias.data, sd[0][prefix + 'bias'])
args = None
gc.collect()
def load_transformer_layer(module, prefix):
if ckpt_type == "tp":
def load_parameters(module, prefix):
for n, p in module.named_parameters():
if prefix + n in sd[0] and len(n.split('.')) == 1:
if type(sd[0][prefix + n]) is list:
tmp_data, scale = sd[0][prefix + n]
tmp_data = tmp_data
scale = scale.to(get_accelerator().current_device_name())
# set the quantizer number of groups using the checkpoint scale shape
weight_quantizer.num_groups = scale.shape[0]
else:
tmp_data = sd[0][prefix + n].to(get_accelerator().current_device_name())
scale = None
src_shape = tmp_data.shape
dst_shape = p.shape
inner_dim = 1 if tmp_data.dtype == torch.int8 else 0
outer_dim = 0 if tmp_data.dtype == torch.int8 else 1
if (len(src_shape) == 2 and len(dst_shape) == 2):
if (src_shape[inner_dim] == dst_shape[0] and src_shape[outer_dim] == dst_shape[1]):
if tmp_data.dtype != torch.int8:
p = weight_quantizer.quantize(
transpose(tmp_data) if weight_quantizer.q_int8 else tmp_data)
else:
p = torch.nn.parameter.Parameter(tmp_data, requires_grad=False)
p.scale = scale
setattr(module, n, p)
else:
dim = inner_dim if src_shape[inner_dim] != dst_shape[0] else outer_dim
dim1 = 0 if src_shape[inner_dim] != dst_shape[0] else 1
if src_shape[dim] > dst_shape[dim1]:
weight_partition = torch.split(tmp_data, dst_shape[dim1], dim=dim)[rank].to(
get_accelerator().current_device_name())
assert tmp_data.dtype != torch.int8 or scale.numel() > weight_quantizer.num_groups * (rank+1), \
'''ERROR: We require the quantization scales for larger TP-size when loading INT8 checkpoint!\
Please use the FP16 checkpoint to generate INT8 checkpoint with the sharding parameters!'''
scale = scale.view(-1)[weight_quantizer.num_groups * (rank + 1):].reshape(
weight_quantizer.num_groups, -1).contiguous()
else:
assert tmp_data.dtype != torch.int8, \
'''Merging of the checkpoints are not supported when using INT8 checkpoint! \
Please use a as many GPUs as TP-size for the checkpoint'''
all_data = [
sd[j][prefix + n] if type(sd[j][prefix + n]) is list else sd[j][prefix + n].to(
get_accelerator().current_device_name()) for j in range(len(sd))
]
# Check if the weight tensor is for the QKV parameter
if src_shape[1] == (3 * src_shape[0]) // ckpt_mp_size:
qkv_size = src_shape[outer_dim] // 3
src_split = [
torch.split(src[0].data, qkv_size, dim=outer_dim) for src in all_data
]
weight_partition = torch.cat([
torch.cat([qkv_s[i] for qkv_s in src_split], axis=outer_dim)
for i in range(len(src_split[0]))
],
dim=dim)
else:
weight_partition = torch.cat([
ad[0].to(get_accelerator().current_device_name())
if type(ad) is list else ad for ad in all_data
],
dim=dim)
if tmp_data.dtype == torch.int8:
scale = torch.cat(
[ad[1].to(get_accelerator().current_device_name()) for ad in all_data],
dim=dim)
if tmp_data.dtype != torch.int8:
weight_partition = weight_quantizer.quantize(
transpose(weight_partition), \
parallel_dim=(0 if dim == 1 else 1)) if weight_quantizer.q_int8 else \
weight_quantizer.quantize(weight_partition)
else:
weight_partition = torch.nn.parameter.Parameter(weight_partition,
requires_grad=False)
weight_partition.scale = scale
setattr(module, n, weight_partition)
else:
if src_shape[0] == dst_shape[0]:
p.data.copy_(tmp_data)
else:
if src_shape[0] > dst_shape[0]:
bias_split = torch.split(tmp_data, dst_shape[-1])[rank].to(
get_accelerator().current_device_name()).contiguous()
p.data.copy_(bias_split)
else:
# Check if the weight tensor is for the QKV parameter
if src_shape[0] == (3 * r_module.config.hidden_size) // ckpt_mp_size:
qkv_size = src_shape[0] // 3
src_split = [
torch.split(sd[j][prefix + n], qkv_size, dim=0) for j in range(len(sd))
]
p.data.copy_(
torch.cat([
torch.cat([qkv_s[i] for qkv_s in src_split], axis=0)
for i in range(len(src_split[0]))
],
dim=0).to(get_accelerator().current_device_name()).contiguous())
else:
p.data.copy_(
torch.cat([sd[j][prefix + n] for j in range(len(sd))],
dim=0).to(get_accelerator().current_device_name()).contiguous())
load_parameters(module, prefix)
for n, child in module.named_children():
load_parameters(child, prefix + n + '.')
else:
container.load_params(module, sd[0], weight_quantizer, mp_replace, prefix)
try:
import transformers
OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding
if hasattr(transformers.models, "llama"):
LlamaRMSNorm = transformers.models.llama.modeling_llama.LlamaRMSNorm
else:
LlamaRMSNorm = None
except:
OPTLearnedPositionalEmbedding = None
layer_policies = {
nn.Linear: load,
nn.Embedding: load,
nn.LayerNorm: load,
EmbeddingLayer: load,
LinearLayer: load,
Normalize: load,
transformer_inference.DeepSpeedTransformerInference: load_transformer_layer,
DeepSpeedBloomInference: load_transformer_layer,
DeepSpeedGPTInference: load_transformer_layer,
DeepSpeedBERTInference: load_transformer_layer,
DeepSpeedMegatronGPTInference: load_transformer_layer,
DeepSpeedOPTInference: load_transformer_layer,
OPTLearnedPositionalEmbedding: load,
OPTEmbedding: load,
LlamaRMSNorm: load,
RMSNormalize: load
}
all_ds_ids = {}
def load_module_recursive(module, prefix='', level=0):
for name, child in module.named_children():
if child.__class__ in layer_policies:
checking_key = prefix + name + '.'
if not any(checking_key in item for item in sd[0].keys()):
if hasattr(child, 'weight') and \
(hasattr(child.weight, 'ds_id') and \
child.weight.ds_id in all_ds_ids):
prefix1 = all_ds_ids[child.weight.ds_id]
if child.__class__ is nn.Linear:
child = LinearLayer(weight=all_ds_ids[child.weight.ds_id])
setattr(module, name, child)
continue
child_params = list(child.parameters())
if len(child_params) > 0 and (child_params[0].numel() == 0 or child_params[0].is_meta):
if child.weight.is_meta:
ds_shape = child.weight.shape
else:
ds_shape = child.weight.ds_shape
if child.__class__ is nn.LayerNorm:
child = Normalize(dim=ds_shape[-1], dtype=child.weight.dtype, eps=child.eps)
setattr(module, name, child)
elif child.__class__ is nn.Linear:
child = LinearLayer(weight_shape=child.weight.shape, bias=child.bias)
setattr(module, name, child)
elif child.__class__ is OPTLearnedPositionalEmbedding:
child = OPTEmbedding(weight_shape=ds_shape)
setattr(module, name, child)
elif child.__class__ is LlamaRMSNorm:
child = RMSNormalize(dim=ds_shape[-1], dtype=child.weight.dtype, eps=child.variance_epsilon)
setattr(module, name, child)
else:
ds_id = None
if hasattr(child.weight, 'ds_id'):
ds_id = child.weight.ds_id
child = EmbeddingLayer(weight_shape=ds_shape, dtype=child.weight.dtype)
if ds_id is not None:
all_ds_ids[ds_id] = child.weight
setattr(module, name, child)
layer_policies[child.__class__](child, prefix + name + '.')
else:
load_module_recursive(
child,
prefix if (level == 0 and ckpt_type == 'pp') and skip_level_0_prefix else \
prefix + name + '.',
level + 1)
load_module_recursive(r_module)
embedding_weight = None
for n, p in r_module.named_parameters():
if "word_embeddings." in n or "embed_tokens." in n or "wte." in n:
embedding_weight = p
if embedding_weight is not None and r_module.lm_head.weight.is_meta:
r_module.lm_head.weight = embedding_weight
for sd_ in sd:
del sd_
sd = None
gc.collect()
| 14,745 | 53.817844 | 134 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/policy.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import ABC, abstractmethod
from deepspeed.utils.types import ActivationFuncType, NormType
import torch
from deepspeed.accelerator import get_accelerator
transformer_param_names = (
'attn_qkvw', \
'attn_qkvb', \
'attn_ow' , \
'attn_ob', \
'inter_w', \
'inter_b', \
'output_w', \
'output_b', \
'attn_nw', \
'attn_nb', \
'norm_w', \
'norm_b')
class DSPolicy(ABC):
_orig_layer_class = None
def __init__(self):
self.cuda_graph_supported = False
@abstractmethod
def attention(self):
"""
Returns attention qkv and dense parameters
weight: (3*hidden, hidden) and (hidden, hidden)
bias: (3*hidden) and (hidden)
"""
raise NotImplementedError
class TransformerPolicy(DSPolicy):
# a static class variable containing the HuggingFace model configuration.
# see e.g., transformers.models.opt.configuration_opt.OPTConfig
hf_model_config = None
def __init__(
self,
inference=True,
linear_layer=True,
scale_attention=True,
megatron_v2=False,
use_mup=False,
# the type of activation function used in MLP
mlp_act_func_type=ActivationFuncType.GELU,
# applies layer norm before attention if `pre_attn_norm` is set to True
pre_attn_norm=True,
# this flag shows whether or not using prefix in loading the checkpoint
use_load_prefix=False,
# whether or not the qkv is stored in the split-format
split_qkv=True,
# Type of normalization to perform
norm_type=NormType.LayerNorm):
super().__init__()
self.cuda_graph_supported = False
self.inference = inference
self.linear_layer = linear_layer
self.scale_attention = scale_attention
self.is_megatron_v2 = megatron_v2
self.use_mup = use_mup
self.mlp_act_func_type = mlp_act_func_type
self.pre_attn_norm = pre_attn_norm
self.use_load_prefix = use_load_prefix
self.split_qkv = split_qkv
self.norm_type = norm_type
@abstractmethod
def attention(self):
"""
Returns attention qkv and dense parameters
weight: (3*hidden, hidden) and (hidden, hidden)
bias: (3*hidden) and (hidden)
"""
raise NotImplementedError
@abstractmethod
def get_hidden_heads(self):
"""
return hidden_size and number of heads
"""
raise NotImplementedError
@abstractmethod
def mlp(self):
"""
Returns mlp intermediate and output
weight: (intermediate, hidden) and (hidden, intermediate)
bias: (intermediate) and (hidden)
"""
raise NotImplementedError
@abstractmethod
def layernorm(self):
"""
Returns LayerNorms used in transformer layer
Post-Attention and pre/post layer norm
gamma and beta with shape: (hidden)
"""
raise NotImplementedError
# TODO (lekurile): This function exists in base container as well, consolidate as some point
def transpose(data):
with torch.no_grad():
data = data.contiguous()
data1 = data.transpose(-1, -2).reshape(-1)
data.reshape(-1).copy_(data1)
data1 = None
return data.reshape(data.shape[-1], data.shape[-2])
# TODO (lekurile): This function exists in megatron feature container as well, consolidate as some point
def _transpose(x, heads=1, mp_replace=None):
heads = heads // mp_replace.mp_size # type: ignore
outer_dim = -1
attention_head_size = x.shape[outer_dim] // heads
new_x_shape = x.size()[:outer_dim] + (heads, attention_head_size)
x_1 = x.view(*new_x_shape)
(q, k, v) = torch.split(x_1, (x_1.shape[-1] // 3), dim=-1)
if len(q.shape) > 2:
new_shape = (q.shape[0], ) + (-1, )
return torch.cat((q.reshape(new_shape), k.reshape(new_shape), v.reshape(new_shape)),
dim=outer_dim).reshape(x.shape)
else:
return torch.cat((q.reshape(-1), k.reshape(-1), v.reshape(-1)), dim=-1).reshape(x.shape)
# This checks if the parameter exits in the checkpoint file and maybe copies it into the corresponding destination tensor.
# Note that not all parameters are saved in one checkpoint, that's why we always need to check if they exist!
def maybe_copy(module,
sd,
weight_quantizer,
mp_replace,
dst_name,
src_name,
qkv=False,
megatron_v2=False,
split_qkv=False,
heads=1):
if src_name in sd:
dst = getattr(module, dst_name)
tmp = sd[src_name]
if len(dst.shape) == 1:
if split_qkv:
dst = mp_replace.strided_copy(dst, tmp, num_splits=3)
else:
dst = mp_replace.copy(dst, tmp)
if qkv and megatron_v2:
dst = torch.nn.parameter.Parameter(_transpose(dst, heads=heads, mp_replace=mp_replace).contiguous())
else:
if split_qkv:
dst = mp_replace.strided_copy(dst, weight_quantizer.quantize(tmp if weight_quantizer.q_int8 else \
(transpose(tmp).contiguous())), num_splits=3, int8=weight_quantizer.q_int8)
else:
if qkv and megatron_v2:
tmp = _transpose(transpose(tmp), heads=heads, mp_replace=mp_replace).contiguous()
if weight_quantizer.q_int8:
tmp = transpose(tmp)
dst = mp_replace.copy(dst, weight_quantizer.quantize(tmp if weight_quantizer.q_int8 else \
transpose(tmp)), int8=weight_quantizer.q_int8)
setattr(module, dst_name, dst)
# Extending the maybe_copy function for when the q, k, and v are in separate parameters!
def maybe_copy_qkv(module, sd, weight_quantizer, mp_replace, dst_name, src_names, split_qkv=False):
if src_names[0] in sd:
q = sd[src_names[0]]
k = sd[src_names[1]]
v = sd[src_names[2]]
qkv_data = torch.cat((q, k, v), dim=0)
dst = getattr(module, dst_name)
if len(dst.shape) == 1:
if split_qkv:
dst = mp_replace.strided_copy(dst, qkv_data.contiguous(), num_splits=3)
else:
dst = mp_replace.copy(dst, qkv_data)
else:
if split_qkv:
dst = mp_replace.strided_copy(dst, weight_quantizer.quantize(qkv_data.to(get_accelerator().device_name()) if weight_quantizer.q_int8 else \
((transpose(qkv_data)).contiguous())), num_splits=3, int8=weight_quantizer.q_int8)
else:
dst = mp_replace.copy(dst, weight_quantizer.quantize(qkv_data.to(get_accelerator().device_name()) if weight_quantizer.q_int8 else \
transpose(qkv_data)), int8=weight_quantizer.q_int8)
setattr(module, dst_name, dst)
# Extending the `maybe_copy` function for when mlp1 is in separate parameters for GeGLU
def maybe_copy_geglu(module, sd, weight_quantizer, mp_replace, dst_name, src_names):
if src_names[0] in sd:
reg_proj = sd[src_names[0]]
gate_proj = sd[src_names[1]]
mlp1_data = torch.cat((reg_proj, gate_proj), dim=0)
dst = getattr(module, dst_name)
dst = mp_replace.strided_copy(dst, weight_quantizer.quantize(mlp1_data.to(get_accelerator().device_name()) if weight_quantizer.q_int8 else \
transpose(mlp1_data)), num_splits=2, int8=weight_quantizer.q_int8)
setattr(module, dst_name, dst)
def pack_lora_weights(p):
return [
p.lora_right_weight, \
p.lora_left_weight, \
p.lora_scaling
]
def maybe_get_lora(p):
if hasattr(p, 'lora_right_weight'):
lora_param = pack_lora_weights(p)
else:
lora_param = []
return lora_param
| 8,259 | 35.711111 | 155 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/auto_tp.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# Automatic Tensor Parallelism
import re
from torch import nn
from .replace_policy import replace_policies
class AutoTP():
def in_module_list(module, module_list):
for item in module_list:
if type(item).__name__ == type(module).__name__:
return True
return False
def get_module_list(model):
mlist = []
for child in model.children():
if isinstance(child, nn.ModuleList):
for module in child.children():
if not mlist:
mlist = [module]
elif not AutoTP.in_module_list(module, mlist):
mlist = mlist + [module]
else:
mlist = mlist + AutoTP.get_module_list(child)
return mlist
def supported(model):
unsupported = ['codegen', 'deberta', 'flaubert', 'fsmt', 'gpt2', 'led', 'longformer', 'xlm', 'xlnet']
model = str(model)
key = re.search(r": (.*?)Model", model)
if key is None:
key = re.search(r": (.*?)Stack", model)
if key is None:
key = re.match(r"(.*?)Model", model)
assert key is not None, "Not able to determine model policy automatically. Please provide policy."
if key.group(1).lower() in unsupported:
return False
return True
def get_layers(parent, module):
layer_list = []
for key, submodule in module._modules.items():
if isinstance(submodule, nn.Linear):
layer_list = layer_list + [parent + "." + key]
elif isinstance(submodule, nn.LayerNorm) or key == 'LayerNorm' or key == 'layer_norm':
layer_list = layer_list + ["ln"]
else:
layer_list = layer_list + AutoTP.get_layers(key, submodule)
return layer_list
def update_policy_list(policy_list, new_module, new_gems):
if len(policy_list):
for i, policy in enumerate(policy_list):
# if module already exists in policy, combine gems and remove duplicates
if policy[0] == type(new_module):
new_gems = set(new_gems + policy[1])
policy_list[i] = tuple([type(new_module), new_gems])
return policy_list
policy_list.append(tuple([type(new_module), new_gems]))
return policy_list
def kernel_supported(module_list):
policy = []
for plcy in replace_policies:
# instantiate a throw-away policy in order to populate the _orig_layer_class
_ = plcy(None)
if isinstance(plcy._orig_layer_class, list):
for orig_layer_class in plcy._orig_layer_class:
policy.append(orig_layer_class)
elif plcy._orig_layer_class is not None:
policy.append(plcy._orig_layer_class)
for child in module_list:
if child.__class__ in policy:
return True
return False
def tp_parser(model):
policy_list = []
module_list = []
layer_list = []
gem_list = []
module_list = AutoTP.get_module_list(model)
assert AutoTP.supported(model), "AutoTP not supported for model. Please use kernel injection since container policy for model exists." \
if AutoTP.kernel_supported(module_list) else "AutoTP not supported for model. Please provide policy."
for module in module_list:
for key, submodule in module._modules.items():
if isinstance(submodule, nn.Linear):
layer_list = layer_list + ["." + key]
elif isinstance(submodule, nn.LayerNorm) or key == 'LayerNorm' or key == 'layer_norm':
layer_list = layer_list + ["ln"]
else:
layer_list = layer_list + AutoTP.get_layers(key, submodule)
for i, layer in enumerate(layer_list):
if layer == 'ln':
if layer_list[i - 1] != 'ln':
gem_list = gem_list + [layer_list[i - 1]]
elif 'out_proj' in layer:
gem_list = gem_list + [layer]
elif 'o_proj' in layer:
gem_list = gem_list + [layer]
elif 'down_proj' in layer:
gem_list = gem_list + [layer]
elif 'attention.dense' in layer and 'GPTNeoX' in str(model):
gem_list = gem_list + [layer]
elif 'self_attention.dense' in layer and 'falcon' in str(
type(module)): # this is a hack to get the right linear layer for this model!
gem_list = gem_list + [layer]
layer_list = []
if gem_list != []:
gem_list = list(set(gem_list))
policy_list = AutoTP.update_policy_list(policy_list, module, gem_list)
gem_list = []
assert len(policy_list), "AutoTP not supported for model. Please use kernel injection since container policy for model exists." \
if AutoTP.kernel_supported(module_list) else "Not able to determine model policy automatically. Please provide policy."
return policy_list
| 5,353 | 41.832 | 144 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/layers.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed import comm as dist
from torch import nn
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from deepspeed.accelerator import get_accelerator
class LinearAllreduce(nn.Module):
def __init__(self, weight, bias=None, mp_group=None):
super(LinearAllreduce, self).__init__()
self.weight = weight
self.bias = bias
self.mp_group = mp_group
def forward(self, input):
output = torch.matmul(input, self.weight.transpose(-1, -2))
if self.mp_group is not None:
dist.all_reduce(output, group=self.mp_group)
if self.bias is not None:
output += self.bias
return output
class LinearLayer(nn.Module):
def __init__(self, weight_shape=None, dtype=torch.half, weight=None, bias=None):
super(LinearLayer, self).__init__()
if weight is not None:
self.weight = weight
self.bias = bias
else:
self.weight = Parameter(
torch.empty(weight_shape, dtype=dtype, device=get_accelerator().current_device_name()))
self.bias = Parameter(
torch.empty(weight_shape[0],
dtype=dtype,
device=get_accelerator().current_device_name())) \
if bias is not None else None
def forward(self, input):
output = torch.matmul(input, self.weight.transpose(-1, -2))
if self.bias is not None:
output += self.bias
return output
class Normalize(nn.Module):
def __init__(self, dim=None, dtype=torch.float, eps=1e-5, weight=None, bias=None):
super(Normalize, self).__init__()
if weight is not None:
self.weight = weight
self.bias = bias
else:
self.norm = nn.LayerNorm(dim, eps=eps).to(dtype).to(get_accelerator().current_device_name())
self.weight = self.norm.weight
self.bias = self.norm.bias
self.eps = eps
def forward(self, input):
return nn.functional.layer_norm(input, input.shape[-1:], self.weight, self.bias, eps=self.eps)
class EmbeddingLayer(nn.Module):
def __init__(self, weight_shape=None, dtype=torch.half, weight=None, bias=None):
super(EmbeddingLayer, self).__init__()
if weight is None:
self.weight = Parameter(
torch.empty(weight_shape[0],
weight_shape[1],
dtype=dtype,
device=get_accelerator().current_device_name()))
else:
self.weight = weight
def forward(self, input):
return F.embedding(input, self.weight)
class OPTEmbedding(EmbeddingLayer):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, weight_shape=None, weight=None, bias=None):
# OPT is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(weight_shape, weight=weight)
def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
attention_mask = attention_mask.long()
# create positions depending on attention_mask
positions = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() - 1
# cut positions if `past_key_values_length` is > 0
positions = positions[:, past_key_values_length:]
return super().forward(positions + self.offset)
class RMSNormalize(nn.Module):
def __init__(self, dim=None, dtype=torch.float, eps=1e-5, weight=None):
super(RMSNormalize, self).__init__()
if weight is not None:
self.weight = weight
else:
self.weight = nn.Parameter(torch.ones(dim, dtype=dtype, device=get_accelerator().current_device_name()))
self.eps = eps
def forward(self, hidden_states):
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
print(self.weight)
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return hidden_states * self.weight
| 4,592 | 33.276119 | 116 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/inject.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import copy
import torch
from deepspeed.ops.transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
def module_inject(layer_obj, model, config, micro_batch_size, max_seq_length, seed, preln, fp16=True):
for name, child in model.named_children():
if isinstance(child, layer_obj):
print('REPLACING BertLayer')
cuda_config = DeepSpeedTransformerConfig(batch_size=micro_batch_size,
max_seq_length=max_seq_length,
hidden_size=config.hidden_size,
heads=config.num_attention_heads,
attn_dropout_ratio=config.attention_probs_dropout_prob,
hidden_dropout_ratio=config.hidden_dropout_prob,
num_hidden_layers=config.num_hidden_layers,
initializer_range=config.initializer_range,
seed=seed,
fp16=fp16,
pre_layer_norm=preln)
new_module = DeepSpeedTransformerLayer(cuda_config)
# copy relevant state from child -> new module
qw = child.attention.self.query.weight
qb = child.attention.self.query.bias
kw = child.attention.self.key.weight
kb = child.attention.self.key.bias
vw = child.attention.self.value.weight
vb = child.attention.self.value.bias
qkvw = torch.cat((qw, kw, vw), 0)
qkvb = torch.cat((qb, kb, vb), 0)
new_module.attn_qkvw.data = qkvw
new_module.attn_qkvb.data = qkvb
new_module.attn_ow.data = child.attention.output.dense.weight
new_module.attn_ob.data = child.attention.output.dense.bias
if preln:
attention_layerNorm = child.PostAttentionLayerNorm
else:
attention_layerNorm = child.attention.output.LayerNorm
new_module.attn_nw.data = attention_layerNorm.weight
new_module.attn_nb.data = attention_layerNorm.bias
if preln:
intermediate_FF = child.intermediate.dense_act
else:
intermediate_FF = child.intermediate.dense
new_module.inter_w.data = intermediate_FF.weight
new_module.inter_b.data = intermediate_FF.bias
new_module.output_w.data = child.output.dense.weight
new_module.output_b.data = child.output.dense.bias
if preln:
transformer_LayerNorm = child.PreAttentionLayerNorm
else:
transformer_LayerNorm = child.output.LayerNorm
new_module.norm_w.data = transformer_LayerNorm.weight
new_module.norm_b.data = transformer_LayerNorm.bias
setattr(model, name, copy.deepcopy(new_module))
else:
module_inject(layer_obj, child, config, micro_batch_size, max_seq_length, seed, preln, fp16)
return model
def test_hi():
from turing.nvidia_modelingpreln import BertConfig as BertConfigPreLN
from turing.nvidia_modelingpreln import BertForQuestionAnswering as BertForQuestionAnsweringPreLN
from turing.nvidia_modelingpreln import BertLayer
bert_model_config = {
"vocab_size_or_config_json_file": 119547,
"hidden_size": 1024,
"num_hidden_layers": 1,
"num_attention_heads": 16,
"intermediate_size": 4096,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"initializer_range": 0.02
}
bert_config = BertConfigPreLN(**bert_model_config)
base_model = BertForQuestionAnsweringPreLN(bert_config, args=None)
#base_model = LinearStack()
test_model = copy.deepcopy(base_model)
test_model = module_inject(BertLayer, test_model, bert_config, 4, 384, 1234)
print('BASE', base_model)
print('TEST', test_model)
#base_model.eval()
#test_model.eval()
#test_input = torch.rand(1, base_model.input_dim)
#base_output = base_model(test_input)
#test_output = test_model(test_input)
#
#assert torch.allclose(base_output, test_output, atol=3e-8)
| 4,719 | 40.769912 | 108 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/replace_module.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from typing import Optional
import torch
import tqdm
import deepspeed
import deepspeed.ops.transformer as transformer_inference
from deepspeed.ops.transformer.inference.diffusers_attention import DeepSpeedDiffusersAttention
from deepspeed.ops.transformer.inference.diffusers_transformer_block import DeepSpeedDiffusersTransformerBlock
from deepspeed.ops.transformer.inference.diffusers_2d_transformer import Diffusers2DTransformerConfig
from deepspeed.accelerator import get_accelerator
from .replace_policy import HFGPT2LayerPolicy
from .replace_policy import replace_policies, generic_policies
from deepspeed import comm as dist
from torch import nn
from .layers import LinearAllreduce, LinearLayer
from .load_checkpoint import load_model_with_checkpoint
import time
from .utils import policy_to_ds_container
import gc
class ReplaceWithTensorSlicing:
def __init__(self, mp_group=None, mp_size=1, out_dim=1, in_dim=0):
if mp_group is not None:
self.gpu_index = dist.get_rank(group=mp_group)
else:
self.gpu_index = 0
self.out_dim = out_dim
self.in_dim = in_dim
self.mp_size = mp_size
def merge_assert(self, dim1, dim2):
assert dim1 > dim2, \
'Merging tensors is not allowed here! Please use deepspeed load_checkpoint\
for merging your checkpoints before replacing the transformer layer with\
inference-kernels'
def strided_copy(self,
dst: Optional[torch.Tensor],
src: Optional[torch.Tensor],
num_splits: int,
int8: bool = False,
allocate_tensor: bool = False):
if src is None:
return src
src_shape = src.shape
dst_shape = dst.shape
outer_dim = 0 if int8 else -1
if allocate_tensor:
dst = torch.empty_like(dst)
src_split = torch.split(src.data, src.shape[outer_dim] // num_splits, dim=outer_dim)
if (len(src_shape) == 2 and len(dst_shape) == 2):
if src_shape[outer_dim] == dst_shape[self.out_dim]:
dst = dst.reshape(-1).data.copy_(src.data.reshape(-1)).reshape(src.shape)
dst = torch.nn.parameter.Parameter(dst, requires_grad=False)
if hasattr(src, 'scale'):
dst.scale = src.scale
return dst
self.merge_assert(src_shape[outer_dim], dst_shape[self.out_dim])
qkv_size = dst_shape[self.out_dim] // num_splits
qkv_split = [torch.split(src_s, qkv_size, dim=outer_dim) for src_s in src_split]
weight_split = [
torch.cat([qkv_s[i] for qkv_s in qkv_split], axis=outer_dim) for i in range(len(qkv_split[0]))
]
dst = dst.reshape(-1).data.copy_(weight_split[self.gpu_index].contiguous().reshape(-1)).reshape(
weight_split[self.gpu_index].shape)
else:
if src_shape[0] == dst_shape[0]:
return torch.nn.parameter.Parameter(src)
qkv_size = dst_shape[0] // num_splits
qkv_split = [torch.split(src_s, qkv_size, dim=0) for src_s in src_split]
bias_split = [torch.cat([qkv_s[i] for qkv_s in qkv_split], axis=0) for i in range(len(qkv_split[0]))]
dst.data.copy_(bias_split[self.gpu_index].contiguous())
dst = torch.nn.parameter.Parameter(dst, requires_grad=False)
if hasattr(src, 'scale'):
dst.scale = src.scale
return dst
def copy(self, dst, src, int8=False, allocate_tensor=False):
if src is None:
return src
assert not dst.data.is_meta # the torch.Tensor.copy_ method used below will silently fail on meta tensors
if allocate_tensor:
dst = torch.empty_like(dst)
outer_dim = 0 if int8 else 1
inner_dim = 1 if int8 else 0
src_shape = src.shape
dst_shape = dst.shape
if (len(src_shape) == 2 and len(dst_shape) == 2):
if src_shape[inner_dim] == dst_shape[self.in_dim] and src_shape[outer_dim] == dst_shape[self.out_dim]:
dst = dst.reshape(-1).data.copy_(src.data.reshape(-1)).reshape(src.shape)
else:
if src_shape[inner_dim] != dst_shape[self.in_dim]:
self.merge_assert(src_shape[inner_dim], dst_shape[self.in_dim])
dst.data.copy_(src[:, self.gpu_index * dst_shape[self.in_dim]: (self.gpu_index + 1) * dst_shape[self.in_dim]] if inner_dim == 1 else \
src[self.gpu_index * dst_shape[self.in_dim]: (self.gpu_index + 1) * dst_shape[self.in_dim], :])
else:
self.merge_assert(src_shape[outer_dim], dst_shape[self.out_dim])
dst.data.copy_(src[:, self.gpu_index * dst_shape[self.out_dim]: (self.gpu_index + 1) * dst_shape[self.out_dim]] if outer_dim == 1 else \
src[self.gpu_index * dst_shape[self.out_dim]: (self.gpu_index + 1) * dst_shape[self.out_dim], :])
else:
if src_shape[0] == dst_shape[0]:
dst = src if src.dtype == dst.dtype else dst.data.copy_(src)
else:
dst.data.copy_(src[self.gpu_index * dst_shape[-1]:(self.gpu_index + 1) * dst_shape[-1]])
dst = torch.nn.parameter.Parameter(dst, requires_grad=False)
if hasattr(src, 'scale'):
dst.scale = src.scale
return dst
def get_transformer_name(replaced_module):
from .containers import supported_models
from torch.nn import ModuleList
transformer_name = ''
for n, c in replaced_module.named_children():
if c.__class__ in supported_models:
transformer_name += n + '.'
for name, child in c.named_children():
if child.__class__ is ModuleList:
transformer_name += name
break
break
return transformer_name
class GroupQuantizer:
def __init__(self, q_int8=True, group_size=1, num_bits=8, num_groups=0):
self.group_size = group_size
self.num_bits = num_bits
self.q_int8 = q_int8
self.num_groups = num_groups
def quantize(self, inputs, qkv=True, count=1, parallel_dim=0):
if not self.q_int8 or not qkv:
inputs = torch.nn.Parameter(inputs, requires_grad=False)
inputs.scale = torch.empty(1)
return inputs
q_range = 2**self.num_bits
num_groups = self.num_groups if self.num_groups > 0 else inputs.shape[0] // self.group_size
inputs = inputs.to(get_accelerator().current_device_name())
input_flat = inputs.reshape(num_groups, -1).contiguous()
input_min = torch.min(input_flat, dim=1, keepdim=True)[0].float()
input_max = torch.max(input_flat, dim=1, keepdim=True)[0].float()
scale = torch.max(input_min.abs(), input_max.abs()) * 2.0 / (q_range)
input_flat = (input_flat / scale).round().clamp(-q_range // 2, q_range // 2 - 1)
inputs_q = input_flat.reshape(inputs.shape).to(torch.int8).contiguous()
out = torch.nn.Parameter(inputs_q, requires_grad=False)
inputs_split = inputs.split(inputs.shape[parallel_dim] // 2, dim=parallel_dim)
input_flat = [inputs_split[i].reshape(num_groups, -1).contiguous() for i in range(2)]
input_min = [torch.min(input_flat[i], dim=1, keepdim=True)[0].float() for i in range(2)]
input_max = [torch.max(input_flat[i], dim=1, keepdim=True)[0].float() for i in range(2)]
scale1 = [(torch.max(input_min[i].abs(), input_max[i].abs()) * 2.0 / (q_range)).squeeze().unsqueeze(0)
for i in range(2)]
out.scale = torch.cat([scale.squeeze().unsqueeze(0), scale1[0], scale1[1]], dim=0).reshape(num_groups,
-1).contiguous()
return out
def _module_match(module):
for policy in generic_policies:
policy = policy()
if policy.match(module):
return policy
return None
def generic_injection(module, fp16=False, bf16=False, enable_cuda_graph=True):
def replace_attn(child, policy):
policy_attn = policy.attention(child)
if policy_attn is None:
return child
if len(policy_attn) == 5:
qkvw, attn_ow, attn_ob, hidden_size, heads = policy_attn
else:
qw, kw, vw, attn_ow, attn_ob, hidden_size, heads = policy_attn
config = transformer_inference.DeepSpeedInferenceConfig(
hidden_size=hidden_size,
heads=heads,
fp16=fp16,
bf16=bf16,
triangular_masking=False,
max_out_tokens=4096,
)
attn_module = DeepSpeedDiffusersAttention(config)
def transpose(data):
data = data.contiguous()
data.reshape(-1).copy_(data.transpose(-1, -2).contiguous().reshape(-1))
data = data.reshape(data.shape[-1], data.shape[-2])
data.to(get_accelerator().current_device_name())
return data
if len(policy_attn) == 5:
attn_module.attn_qkvw.data = transpose(qkvw.data)
else:
attn_module.attn_qkvw = None
attn_module.attn_qw.data = transpose(qw.data)
attn_module.attn_kw.data = transpose(kw.data)
attn_module.attn_vw.data = transpose(vw.data)
attn_module.attn_qkvb = None
attn_module.attn_ow.data = transpose(attn_ow.data)
attn_module.attn_ob.data.copy_(attn_ob.data.to(get_accelerator().current_device_name()))
return attn_module
def replace_attn_block(child, policy):
config = Diffusers2DTransformerConfig()
return DeepSpeedDiffusersTransformerBlock(child, config)
if isinstance(module, torch.nn.Module):
pass
else:
if fp16 is False and bf16 is False:
raise ValueError("Generic injection only supported with FP16 or BF16")
try:
import diffusers
if hasattr(diffusers.models.attention, 'CrossAttention'):
cross_attention = diffusers.models.attention.CrossAttention
else:
cross_attention = diffusers.models.attention_processor.Attention
attention_block = diffusers.models.attention.BasicTransformerBlock
new_policies = {
cross_attention: replace_attn,
attention_block: replace_attn_block,
}
except ImportError:
new_policies = {}
#replace_transformer_layer(None,
# module.text_encoder,
# training=False,
# replace_with_kernel_inject=True,
# triangular_masking=True,
# max_out_tokens=8192)
from ..model_implementations.transformers.clip_encoder import DSClipEncoder
cg_encoder = DSClipEncoder(module.text_encoder, enable_cuda_graph=enable_cuda_graph)
setattr(module, 'text_encoder', cg_encoder)
for name in module.__dict__.keys():
sub_module = getattr(module, name)
policy = _module_match(sub_module)
if policy is not None:
def _replace_module(module, policy):
for name, child in module.named_children():
_replace_module(child, policy)
if child.__class__ in new_policies:
replaced_module = new_policies[child.__class__](child, policy)
setattr(module, name, replaced_module)
_replace_module(sub_module, policy)
new_module = policy.apply(sub_module, enable_cuda_graph=enable_cuda_graph)
print(f"**** found and replaced {name} w. {type(new_module)}")
setattr(module, name, new_module)
container_g = None
def replace_transformer_layer(orig_layer_impl, model, checkpoint_dict, config, model_config):
""" Replace bert-style transformer layers with DeepSpeed's transformer layer
Arguments:
orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for,
e.g., transformers.modeling_bert.BertLayer.
model (torch.nn.Module): user's nn.module representing their model
checkpoint_dict: Dictionary for checkpoint passed from the Inference Engine
config: top-level DS Inference config defined in inference/config.py
model_config: HuggingFace model config passed from the inference/engine.py
Returns:
Updated nn.module with replaced transformer layers
"""
# defining globals as internally defined functions inherit these everywhere
quantize = (config.dtype == torch.int8)
# todo: Refactor later. In future, let's minimize the style used above and use config.** instead
linear_layer_setting = None
'''
linear_layer_setting (tuple of modules) [Optional]: shows which two classes are used for linear layers and embedding layers
'''
micro_batch_size = -1
seed = -1
local_rank = -1
mp_replace = ReplaceWithTensorSlicing(mp_group=config.tensor_parallel.tp_group,
mp_size=config.tensor_parallel.tp_size) #, out_dim=0, in_dim=1)
def replace_with_policy(child, policy_cls, triangular_masking, inference=False, layer_id=0):
policy = policy_cls(child, inference=inference)
if not policy.cuda_graph_supported:
# policy says cuda graph is not supported raise an error if set
assert not config.enable_cuda_graph, "cuda graph is not supported with this model, please disable"
from deepspeed.moe.layer import MoE
moe = False
if hasattr(child, 'mlp') and isinstance(child.mlp, MoE):
num_experts = child.mlp.num_experts
moe = True
# 1. Create a model-specific container object using the policy object.
_container = policy_to_ds_container(policy=policy,
config=config,
model_config=model_config,
layer_id=layer_id,
child=child)
_container.set_moe(moe)
# 2. Set the tensor parallelism config
_container.set_tensor_parallel_config(config.tensor_parallel.tp_size, config.tensor_parallel.tp_group)
# 3. Initialize tensors
_container.initialize_tensors()
# 4. deal with data types -- needs refactor to use dtype instead of fp16
if config.dtype in [torch.float16, torch.bfloat16, torch.int8]:
_container.convert_to_required_dtype()
# 5. Set the quantization config
quantizer = GroupQuantizer(q_int8=quantize)
_container.set_quantization_config(quantizer)
# 6. create a DS Inference config object
_container.create_ds_model_config()
# 7. use the config and create the module
_container.create_module()
# 8. transpose the weights and bias if needed
_container.transpose()
# 9. deal with tensor parallelism.
_container.apply_tensor_parallelism(mp_replace)
# 10. copy the tensors from the model-specific container to the new module
_container.copy_data_to_new_module()
# 11. set global for generic checkpoint loading
global container_g
if container_g is None:
container_g = _container
return _container.module
def replace_wo_policy(module, all_reduce_linears, prefix="", state_dict=None):
mp_size = config.tensor_parallel.tp_size
mp_group = config.tensor_parallel.tp_group
def _replace(child, name, conv_linear_layer):
if getattr(child, "replaced", False) == True:
return
mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)
weight_shape = child.weight.shape
if name in all_reduce_linears:
new_weight = torch.empty((
weight_shape[1] if conv_linear_layer else weight_shape[0],
(weight_shape[0] if conv_linear_layer else weight_shape[1]) // mp_size,
),
device=child.weight.device,
dtype=child.weight.dtype)
if conv_linear_layer:
child.weight.data = child.weight.data.transpose(-1, -2).contiguous()
data = mp_replace.copy(new_weight, child.weight.data)
new_bias = torch.empty((weight_shape[0]), device=child.weight.device, dtype=child.weight.dtype)
if child.bias is not None:
new_bias.data.copy_(child.bias.data)
setattr(child, "replaced", True)
return LinearAllreduce(data, child.bias if child.bias is None else \
torch.nn.parameter.Parameter(new_bias.to(get_accelerator().current_device_name())), mp_group)
else:
new_weight = torch.empty((
(weight_shape[1] if conv_linear_layer else weight_shape[0]) // mp_size,
weight_shape[0] // mp_size if conv_linear_layer else weight_shape[1],
),
device=child.weight.device,
dtype=child.weight.dtype)
if conv_linear_layer:
child.weight.data = child.weight.data.transpose(-1, -2).contiguous()
data = mp_replace.copy(new_weight, child.weight.data)
new_bias = torch.empty((weight_shape[0] // mp_size),
device=child.weight.device,
dtype=child.weight.dtype)
bias_data = None if child.bias is None else mp_replace.copy(new_bias, child.bias.data).to(
get_accelerator().current_device_name())
setattr(child, "replaced", True)
return LinearLayer(weight=data.to(get_accelerator().current_device_name()), bias=bias_data)
def _slice_embedding(child, name, conv_linear_layer):
if getattr(child, "replaced", False) == True:
return
mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)
new_weight = torch.empty((child.weight.shape[0], child.weight.shape[1] // mp_size),
device=child.weight.device,
dtype=child.weight.dtype)
data = mp_replace.copy(new_weight,
child.weight.ds_tensor.data if hasattr(child.weight, 'ds_tensor') else \
child.weight.data)
new_embedding = nn.Embedding(child.weight.shape[0], child.weight.shape[1] // mp_size)
new_embedding.weight.data.copy_(data)
setattr(child, "replaced", True)
return new_embedding
def update_mp_params(child):
if getattr(child, "replaced", False) == True:
return
for param in [
"n_heads", "inner_dim", "num_heads", "num_kv", "num_attention_heads", "num_attn_heads",
"all_head_size", "embed_dim", "hidden_size"
]:
if hasattr(child, param):
param_val = getattr(child, param)
assert param_val % mp_size == 0, f"{param} ({param_val}) must be divisible by mp_size ({mp_size})"
setattr(child, param, param_val // mp_size)
setattr(child, "replaced", True)
conv_linear_layer = False
if linear_layer_setting is not None:
linear_policies = {linear_layer_setting[0]: _replace}
if len(linear_layer_setting) == 2:
linear_policies.update({linear_layer_setting[1]: _slice_embedding})
else:
if orig_layer_impl is HFGPT2LayerPolicy._orig_layer_class:
try:
import transformers
conv_linear_layer = True
linear_policies = {transformers.model_utils.Conv1D: _replace}
except ImportError:
linear_policies = {nn.Linear: _replace}
else:
linear_policies = {nn.Linear: _replace, nn.Embedding: _slice_embedding}
def _replace_module(r_module, prev_name='', prev_class_name=''):
for name, child in r_module.named_children():
if prev_class_name == "":
class_name = prev_name
elif prev_name == "":
class_name = prev_class_name
else:
class_name = prev_class_name + '.' + prev_name
checking_key = prefix + '.' + class_name + '.' + name + '.' if class_name != "" else prefix + '.' + name + '.'
if child.__class__ in [nn.Linear, nn.Embedding, nn.LayerNorm] and state_dict is not None:
if any(checking_key in item for item in state_dict):
load(child, state_dict, checking_key, mp_group)
else:
continue
if len(child._buffers) != 0 and state_dict is not None:
load_buffer(child, state_dict, checking_key)
if child.__class__ in linear_policies:
setattr(r_module, name, linear_policies[child.__class__](child, prev_name + '.' + name,
conv_linear_layer))
elif any(isinstance(child, lp) for lp in linear_policies):
# Added for falcon model support
# Note: isinstance will account for class inheritance, child.__class__ does not
key = None
for lp in linear_policies:
if isinstance(child, lp):
key = lp
break
assert key is not None
setattr(r_module, name, linear_policies[key](child, prev_name + '.' + name, conv_linear_layer))
else:
update_mp_params(child)
_replace_module(child, name, class_name)
return r_module
return _replace_module(module)
def replace_fn(child, _policy, layer_id=0, prefix="", state_dict=None):
training = False # todo: refactor this part to go in the config
if training:
# copy relevant state from child -> new module
new_module = replace_with_policy(child, _policy, config.triangular_masking)
else:
# copy relevant state from child -> new module
if config.replace_with_kernel_inject:
new_module = replace_with_policy(child,
_policy,
config.triangular_masking,
inference=True,
layer_id=layer_id)
else:
new_module = replace_wo_policy(child, _policy, prefix=prefix, state_dict=state_dict)
return new_module
if checkpoint_dict is not None and not config.replace_with_kernel_inject:
# AutoTP shard loading
checkpoint = checkpoint_dict["checkpoints"]
pbar = tqdm.tqdm(total=len(checkpoint), desc=f"Loading {len(checkpoint)} checkpoint shards")
for i in range(len(checkpoint)):
replaced_module = replace_module(model=model,
orig_class=orig_layer_impl,
replace_fn=replace_fn,
_replace_policy=config.injection_policy_tuple,
checkpoint=checkpoint[i])
pbar.update(1)
gc.collect()
else:
replaced_module = replace_module(model=model,
orig_class=orig_layer_impl,
replace_fn=replace_fn,
_replace_policy=config.injection_policy_tuple)
quantizer = GroupQuantizer(q_int8=quantize)
world_size = dist.get_world_size() if dist.is_initialized() else 1
rank = dist.get_rank() if dist.is_initialized() else 0
if checkpoint_dict is not None and config.replace_with_kernel_inject:
assert container_g.ckpt_load_enabled, \
f"Meta Tensor checkpoint loading not supported in {container_g.__class__.__name__} container"
start_time = time.time()
checkpoint = checkpoint_dict['checkpoints']
ckpt_list = checkpoint["tp"] if type(checkpoint) is dict else checkpoint
ckpt_type = checkpoint_dict.get('parallelization', 'pp')
ckpt_mp_size = checkpoint_dict.get('tp_size', len(ckpt_list))
ckpt_mp_size = checkpoint_dict.get('mp_size', ckpt_mp_size)
base_dir1 = checkpoint_dict.get('base_dir', config.base_dir)
if ckpt_type == 'pp' and type(checkpoint) is list:
pbar = tqdm.tqdm(total=len(checkpoint), desc=f"Loading {len(checkpoint)} checkpoint shards")
for i in range(len(checkpoint)):
sd = [torch.load(os.path.join(base_dir1, checkpoint[i]), map_location='cpu')]
load_model_with_checkpoint(replaced_module,
sd,
mp_replace,
ckpt_type,
ckpt_mp_size,
quantizer,
container=container_g)
pbar.update(1)
else:
num_checkpoints = len(ckpt_list) // ckpt_mp_size
tp_split_size = (world_size / ckpt_mp_size)
sd_offset = int(rank / tp_split_size)
sd_count = int((rank + max(1, tp_split_size)) / tp_split_size) - sd_offset
pbar = tqdm.tqdm(total=num_checkpoints, desc=f"Loading {num_checkpoints} checkpoint shards")
for i in range(num_checkpoints):
pbar.update(1)
ckpt_index = i * ckpt_mp_size + sd_offset
ckpt_files = [
os.path.join(base_dir1, ckpt_list[ckpt_index + j]) if base_dir1 else ckpt_list[ckpt_index + j]
for j in range(sd_count)
]
sds = [torch.load(ckpt_file, map_location='cpu') for ckpt_file in ckpt_files]
load_model_with_checkpoint(replaced_module,
sds,
mp_replace,
ckpt_type,
ckpt_mp_size,
quantizer,
int(rank % tp_split_size),
container=container_g)
sds = [None for _ in sds]
gc.collect()
if "non_tp" in checkpoint:
pbar = tqdm.tqdm(total=len(checkpoint["non_tp"]),
desc=f"Loading {len(checkpoint['non_tp'])} checkpoint shards")
for i in range(len(checkpoint["non_tp"])):
pbar.update(1)
ckpt_file = os.path.join(base_dir1,
checkpoint["non_tp"][i]) if base_dir1 else checkpoint["non_tp"][i]
sds = [torch.load(ckpt_file, map_location='cpu')]
load_model_with_checkpoint(replaced_module,
sds,
mp_replace,
ckpt_type,
ckpt_mp_size,
quantizer,
int(rank % tp_split_size),
container=container_g)
sds = [None for _ in sds]
gc.collect()
print(f"checkpoint loading time at rank {rank}: {time.time()-start_time} sec")
if config.save_mp_checkpoint_path is not None:
from collections import OrderedDict
import json
num_partitions = 8
if checkpoint_dict is None:
ckpt_name = "ds_model"
try:
from transformers.models.bloom.modeling_bloom import BloomForCausalLM
if isinstance(model, BloomForCausalLM):
ckpt_name = "bloom"
except ImportError:
ckpt_name = "ds_model"
else:
ckpt_name = checkpoint_dict['type']
if dist.is_initialized():
dist.barrier()
transformer_name = get_transformer_name(replaced_module)
non_tp_ckpt_name = f'non-tp.pt'
ckpt_files = [non_tp_ckpt_name]
os.makedirs(config.save_mp_checkpoint_path, exist_ok=True)
if not dist.is_initialized() or dist.get_rank() == 0:
print("Saving tp-sharded checkpoints")
torch.save(
OrderedDict({k: v
for k, v in dict(replaced_module.state_dict()).items()
if transformer_name not in k}), f'{config.save_mp_checkpoint_path}/{non_tp_ckpt_name}')
dtype_reprs = {
torch.float32: 'float32',
torch.float16: 'float16',
torch.int8: 'int8',
torch.bfloat16: 'bfloat16'
}
ckpt_config = json.dumps({
'type': ckpt_name,
'base_dir': f'{config.save_mp_checkpoint_path}',
'checkpoints': {
"non_tp": ckpt_files,
"tp": [f'tp_{r:0>2d}_{m:0>2d}.pt' for m in range(num_partitions) for r in range(world_size)]
},
'version': 1.0,
'parallelization': 'tp',
'tp_size': world_size,
'dtype': dtype_reprs[config.dtype]
})
with open(f"{config.save_mp_checkpoint_path}/ds_inference_config.json", "w") as cfg:
cfg.write(ckpt_config)
rep_sd = replaced_module.state_dict()
for n, p in replaced_module.named_parameters():
if hasattr(p, 'scale'):
rep_sd[n] = [p, p.scale]
keys = list(rep_sd.keys())
partition_size = (len(keys) // num_partitions + 1)
for m in range(num_partitions):
torch.save(
OrderedDict({
k: [rep_sd[k], rep_sd[k].scale] if hasattr(rep_sd[k], 'scale') else rep_sd[k]
for k in keys[m * partition_size:(m + 1) * partition_size] if transformer_name in k
}), f'{config.save_mp_checkpoint_path}/tp_{rank:0>2d}_{m:0>2d}.pt')
return replaced_module
def revert_transformer_layer(orig_layer_impl, model, config, preln=False):
""" Revert DeepSpeed's transformer layer back to original bert-style transformer layer
Arguments:
orig_layer_impl (torch.nn.Module): the original transformer layer implementation that was replaced,
e.g., transformers.modeling_bert.BertLayer.
model (torch.nn.Module): user's nn.module representing their model
config (dict): model config containing hidden size, attention heads, etc.
Returns:
Updated nn.module with original bert-style transformer layers
"""
def replace_fn(child, _replace_policy, layer_id):
#from turing.nvidia_modelingpreln import BertLayer
orig_module = orig_layer_impl(config)
# copy relevant state from child -> original module
qkvw = child.attn_qkvw.data
qkvb = child.attn_qkvb.data
qw, kw, vw = torch.chunk(qkvw, 3, axis=0)
qb, kb, vb = torch.chunk(qkvb, 3, axis=0)
orig_module.attention.self.query.weight.data = qw
orig_module.attention.self.query.bias.data = qb
orig_module.attention.self.key.weight.data = kw
orig_module.attention.self.key.bias.data = kb
orig_module.attention.self.value.weight.data = vw
orig_module.attention.self.value.bias.data = vb
orig_module.attention.output.dense.weight.data = child.attn_ow.data
orig_module.attention.output.dense.bias.data = child.attn_ob.data
attn_ln_w = child.attn_nw.data
attn_ln_b = child.attn_nb.data
if preln:
orig_module.PostAttentionLayerNorm.weight.data = attn_ln_w
orig_module.PostAttentionLayerNorm.bias.data = attn_ln_b
else:
orig_module.attention.output.LayerNorm.weight.data = attn_ln_w
orig_module.attention.output.LayerNorm.bias.data = attn_ln_b
inter_ff_w = child.inter_w.data
inter_ff_b = child.inter_b.data
if preln:
orig_module.intermediate.dense_act.weight.data = inter_ff_w
orig_module.intermediate.dense_act.bias.data = inter_ff_b
else:
orig_module.intermediate.dense.weight.data = inter_ff_w
orig_module.intermediate.dense.bias.data = inter_ff_b
orig_module.output.dense.weight.data = child.output_w.data
orig_module.output.dense.bias.data = child.output_b.data
transformer_ln_w = child.norm_w.data
transformer_ln_b = child.norm_b.data
if preln:
orig_module.PreAttentionLayerNorm.weight.data = transformer_ln_w
orig_module.PreAttentionLayerNorm.bias.data = transformer_ln_b
else:
orig_module.output.LayerNorm.weight.data = transformer_ln_w
orig_module.output.LayerNorm.bias.data = transformer_ln_b
return orig_module
return replace_module(model=model,
orig_class=deepspeed.DeepSpeedTransformerLayer,
replace_fn=replace_fn,
_replace_policy=None)
def replace_module(model, orig_class, replace_fn, _replace_policy, checkpoint=None):
""" Scan the model for instances of ``orig_clas:`` to replace using ``replace_fn``.
Arguments:
model (torch.nn.Module): the model to augment
orig_class (torch.nn.Module): the module to search for
replace_fn (method): a method to convert instances of ``orig_class`` to the
desired type and return a new instance.
Returns:
A modified ``model``.
"""
sd = None
if checkpoint is not None:
sd = torch.load(checkpoint, map_location='cpu')
policy = {}
if orig_class is not None:
policy.update({orig_class: (replace_fn, _replace_policy)})
else:
for plcy in replace_policies:
# instantiate a throw-away policy in order to populate the _orig_layer_class
_ = plcy(None)
if isinstance(plcy._orig_layer_class, list):
for orig_layer_class in plcy._orig_layer_class:
policy.update({orig_layer_class: (replace_fn, plcy)})
elif plcy._orig_layer_class is not None:
policy.update({plcy._orig_layer_class: (replace_fn, plcy)})
assert len(policy.items()) > 0,\
"No default policy found! Please specify your policy injection_policy (like {BertLayer:HFBEertLayerPolicy})." +\
"You can find some samples here: https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/replace_policy.py"
replaced_module, _ = _replace_module(model, policy, state_dict=sd)
if checkpoint is not None:
embedding_weight = None
for n, p in replaced_module.named_parameters():
if "word_embeddings." in n or "embed_tokens." in n or "wte." in n:
embedding_weight = p
if embedding_weight is not None and hasattr(replaced_module, "lm_head") and hasattr(
replaced_module.lm_head, "weight") and replaced_module.lm_head.weight.is_meta:
replaced_module.lm_head.weight = embedding_weight
return replaced_module
from ..pipe import PipelineModule
import re
def skip_level_0_prefix(model, name):
model = str(model)
key = re.search(r": (.*?)Model", model)
if key is None:
key = re.search(r": (.*?)Stack", model)
if key is None:
key = re.match(r"(.*?)Model", model)
if key is not None and key.group(1).lower() in "bloom":
# if keys start with 'model.', don't skip level 0 prefix
if not re.match("^model[.]", name):
return True
return False
def load_buffer(module, state_dict, prefix):
for name in module._buffers.keys():
if module._buffers[name].data.is_meta:
module._buffers[name] = torch.nn.parameter.Parameter(
data=torch.empty_like(module._buffers[name].data, device="cpu"),
requires_grad=module._buffers[name].data.requires_grad)
if prefix + name in state_dict.keys():
module._buffers[name].data.copy_(state_dict[prefix + name])
def _replace_module(model, policies, prefix='', layer_id=0, level_id=0, state_dict=None):
""" Traverse model's children recursively and apply any transformations in ``policies``.
Arguments:
model (torch.nn.Module): model to augment
policies (dict): Mapping of source class to replacement function.
Returns:
Modified ``model``.
"""
try:
import transformers
OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding
except:
OPTLearnedPositionalEmbedding = None
load_layers = [nn.Linear, nn.Embedding, nn.LayerNorm, OPTLearnedPositionalEmbedding]
for name, child in model.named_children():
if child.__class__ in policies:
replaced_module = policies[child.__class__][0](child,
policies[child.__class__][-1],
layer_id,
prefix=prefix + name,
state_dict=state_dict)
setattr(model, name, replaced_module)
if isinstance(model, PipelineModule):
assert hasattr(model, 'forward_funcs'),\
"we require pipe-module to have the list of fwd_functions"
model.forward_funcs[model.fwd_map[name]] = replaced_module
layer_id += 1
else:
checking_key = prefix + name + '.'
if child.__class__ in load_layers and state_dict is not None:
if any(checking_key in item for item in state_dict):
load(
child,
state_dict,
checking_key,
)
else:
continue
if len(child._buffers) != 0 and state_dict is not None:
load_buffer(child, state_dict, checking_key)
_, layer_id = _replace_module(child,
policies,
prefix if level_id == 0 and skip_level_0_prefix(model, name) else \
prefix + name + '.',
layer_id=layer_id,
level_id=level_id + 1,
state_dict=state_dict)
# Add the reset_cache func to the model, so that it can be called in the beginning of text-generation.
model.reset_cache = transformer_inference.DeepSpeedTransformerInference.reset_cache
return model, layer_id
def load(module, state_dict, prefix, mp_group=None):
mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)
if hasattr(module, 'weight'):
if module.weight.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.weight = torch.nn.parameter.Parameter(data=torch.empty_like(module.weight.data, device="cpu"),
requires_grad=module.weight.data.requires_grad)
if 'query_key_value' in prefix:
module.weight = mp_replace.strided_copy(module.weight.data,
state_dict[prefix + 'weight'],
num_splits=3)
else:
module.weight = mp_replace.copy(module.weight.data, state_dict[prefix + 'weight'])
else:
if hasattr(module, 'norm') and hasattr(module.norm, 'weight'):
if module.norm.weight.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.norm.weight = torch.nn.parameter.Parameter(data=torch.empty_like(module.norm.weight.data,
device="cpu"),
requires_grad=module.norm.weight.data.requires_grad)
module.norm.weight = mp_replace.copy(module.norm.weight.data, state_dict[prefix + 'weight'])
if prefix + 'bias' in state_dict.keys():
if hasattr(module, 'bias'):
if module.bias.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.bias.data, device="cpu"),
requires_grad=module.bias.data.requires_grad)
module.bias = mp_replace.copy(module.bias, state_dict[prefix + 'bias'])
else:
if hasattr(module, 'norm') and hasattr(module.norm, 'bias'):
if module.norm.bias.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.norm.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.norm.bias.data,
device="cpu"),
requires_grad=module.norm.bias.data.requires_grad)
module.norm.bias = mp_replace.copy(module.norm.bias, state_dict[prefix + 'bias'])
| 43,185 | 47.144928 | 156 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/distil_bert.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
class DS_DistilBERTContainer(BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
self.triangular_masking = False
self.return_single_tuple = True
self.use_triton = kwargs['config'].use_triton and deepspeed.HAS_TRITON
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedBERTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
class HFDistilBertLayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self, client_module, inference=False, preln=False):
super().__init__(inference)
self.client_module = client_module
self.preln = preln
self.cuda_graph_supported = True
if HFDistilBertLayerPolicy._orig_layer_class is None:
try:
import transformers
HFDistilBertLayerPolicy._orig_layer_class = [
transformers.models.distilbert.modeling_distilbert.TransformerBlock,
]
except:
HFDistilBertLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attention.q_lin.weight.shape[1], \
self.client_module.attention.n_heads, \
self.client_module.sa_layer_norm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
qw = self.client_module.attention.q_lin.weight
qb = self.client_module.attention.q_lin.bias
kw = self.client_module.attention.k_lin.weight
kb = self.client_module.attention.k_lin.bias
vw = self.client_module.attention.v_lin.weight
vb = self.client_module.attention.v_lin.bias
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training)
return qkvw, \
qkvb, \
self.client_module.attention.out_lin.weight, \
self.client_module.attention.out_lin.bias
def mlp(self, enable_training=False):
intermediate_ff = self.client_module.ffn.lin1
return intermediate_ff.weight, intermediate_ff.bias, \
self.client_module.ffn.lin2.weight, \
self.client_module.ffn.lin2.bias
def layernorm(self):
attention_layernorm = self.client_module.sa_layer_norm
transformer_layernorm = self.client_module.output_layer_norm
return attention_layernorm.weight, \
attention_layernorm.bias, \
transformer_layernorm.weight, \
transformer_layernorm.bias
| 3,188 | 37.421687 | 88 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/bert.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
class DS_BERTContainer(BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
self.return_tuple = True
self.triangular_masking = False
self.use_triton = kwargs['config'].use_triton and deepspeed.HAS_TRITON
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedBERTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
class HFBertLayerPolicy(TransformerPolicy):
def __init__(self, client_module, inference=False):
super().__init__(inference, pre_attn_norm=False)
self.client_module = client_module
self.cuda_graph_supported = True
if HFBertLayerPolicy._orig_layer_class is None:
try:
import transformers
HFBertLayerPolicy._orig_layer_class = [
transformers.models.bert.modeling_bert.BertLayer,
transformers.models.roberta.modeling_roberta.RobertaLayer
]
except:
HFBertLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
if self.pre_attn_norm:
attention_layernorm = self.client_module.PostAttentionLayerNorm
else:
attention_layernorm = self.client_module.attention.output.LayerNorm
return self.client_module.attention.self.query.weight.shape[1], \
self.client_module.attention.self.num_attention_heads, \
attention_layernorm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
qw = self.client_module.attention.self.query.weight
qb = self.client_module.attention.self.query.bias
kw = self.client_module.attention.self.key.weight
kb = self.client_module.attention.self.key.bias
vw = self.client_module.attention.self.value.weight
vb = self.client_module.attention.self.value.bias
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training)
return qkvw, \
qkvb, \
self.client_module.attention.output.dense.weight, \
self.client_module.attention.output.dense.bias, \
def mlp(self, enable_training=False):
if self.pre_attn_norm:
intermediate_ff = self.client_module.intermediate.dense_act
else:
intermediate_ff = self.client_module.intermediate.dense
return intermediate_ff.weight, intermediate_ff.bias, \
self.client_module.output.dense.weight, \
self.client_module.output.dense.bias
def layernorm(self):
if self.pre_attn_norm:
attention_layernorm = self.client_module.PostAttentionLayerNorm
transformer_layernorm = self.client_module.PreAttentionLayerNorm
else:
attention_layernorm = self.client_module.attention.output.LayerNorm
transformer_layernorm = self.client_module.output.LayerNorm
return attention_layernorm.weight, \
attention_layernorm.bias, \
transformer_layernorm.weight, \
transformer_layernorm.bias
| 3,769 | 39.106383 | 87 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/base.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# Create a container object to save model-specific tensors using the policy file above.
from abc import ABC
import torch
import deepspeed
from deepspeed.ops.transformer.inference.config import DeepSpeedInferenceConfig
from deepspeed.accelerator import get_accelerator
# If the intermediate size attribute is set DEFAULT_INTERMEDIATE_SIZE
# it is assumed the intermediate size is 4x the embedding dimension
DEFAULT_INTERMEDIATE_SIZE = -1
class BaseConvolutionContainer(ABC):
# not implemented
def __init__(self):
pass
class BaseTransformerContainer(ABC):
def __init__(self, policy, config, model_config, layer_id, child):
self.policy = policy
self.config = config
self.model_config = model_config
self.layer_id = layer_id
self.child = child
self.megatron_v2 = self.policy.is_megatron_v2
self.scale_attention = self.policy.scale_attention
self.ckpt_load_enabled = False
# configuration for models. todo: can this be moved to a pydantic model config?
self.hidden_size = None
self.intermediate_size = None
self.num_attention_heads = None
self.mp_size = self.config.tensor_parallel.tp_size
self.pre_layer_norm = self.model_config.do_layer_norm_before if \
hasattr(self.model_config, 'do_layer_norm_before') else self.policy.pre_attn_norm
self.dtype = self.config.dtype
self.attn_linear_layer = self.policy.linear_layer
self.mlp_linear_layer = self.policy.linear_layer
self.return_tuple = self.config.return_tuple
self.triangular_masking = True
self.local_attention = ((self.model_config.attention_layers[self.layer_id] == "local") if hasattr(
self.model_config, 'attention_layers') else False)
self.window_size = getattr(self.model_config, "window_size", 1)
self.mlp_act_func_type = self.policy.mlp_act_func_type
self.norm_type = self.policy.norm_type
self.training_mp_size = self.config.training_mp_size
self.bigscience_bloom = False
self.max_out_tokens = self.config.max_out_tokens
self.min_out_tokens = self.config.min_out_tokens
self.scale_attn_by_inverse_layer_idx = getattr(self.config, "scale_attn_by_inverse_layer_idx", False)
self.use_mup = self.policy.use_mup
self.return_single_tuple = False
self.rotary_dim = self.get_rotary_dim()
self.mlp_after_attn = (self.rotary_dim is None or self.rotary_dim < 0)
# Attention tensors
self.qkvw = None
self.qkvb = None
self.dense_w = None
self.dense_b = None
# MLP tensors
self._h4h_w = None
self._h4h_b = None
self._4hh_w = None
self._4hh_b = None
# LayerNorm tensors
self.attn_nw = None
self.attn_nb = None
self.input_nw = None
self.input_nb = None
self.mp_group = None
self.use_triton = False
# Triton
self.use_triton = config.use_triton and deepspeed.HAS_TRITON
def create_ds_model_config(self):
self.set_hidden_heads(*self.policy.get_hidden_heads())
assert self.num_attention_heads % self.mp_size == 0,\
"To run the model parallel across the GPUs, the attention_heads require to be divisible by the world_size!" +\
"This is because the attention computation is partitioned evenly among the parallel GPUs."
self.ds_model_config = DeepSpeedInferenceConfig(
hidden_size=self.hidden_size,
intermediate_size=self.intermediate_size,
heads=self.num_attention_heads,
layer_norm_eps=self.layernorm_epsilon,
dtype=self.dtype,
pre_layer_norm=self.pre_layer_norm,
norm_type=self.norm_type,
mp_size=self.mp_size,
return_tuple=self.return_tuple,
triangular_masking=self.triangular_masking,
local_attention=self.local_attention,
window_size=self.window_size,
rotary_dim=self.rotary_dim,
mlp_after_attn=self.mlp_after_attn,
mlp_act_func_type=self.mlp_act_func_type,
training_mp_size=self.training_mp_size,
bigscience_bloom=self.bigscience_bloom,
max_out_tokens=self.max_out_tokens,
min_out_tokens=self.min_out_tokens,
scale_attn_by_inverse_layer_idx=self.scale_attn_by_inverse_layer_idx,
use_mup=self.use_mup,
return_single_tuple=self.return_single_tuple,
set_empty_params=self.config.set_empty_params,
transposed_mode=self.config.transposed_mode,
use_triton=self.use_triton,
triton_autotune=self.config.triton_autotune)
if self.use_triton and deepspeed.HAS_TRITON:
if not self.config.triton_autotune:
from deepspeed.ops.transformer.inference.triton.matmul_ext import fp16_matmul
fp16_matmul.skip_autotune()
return self.ds_model_config
def check_meta_tensor_support(self):
if hasattr(self.qkvw, 'is_meta'):
if self.qkvw.is_meta:
assert self.ckpt_load_enabled, "Meta tensors are not supported for this model currently."
else:
raise NotImplementedError("Meta tensor support is not available, please upgrade to torch 1.10+")
def initialize_tensors(self, enable_training=False):
# Set the tensors from policy (user module) to container (DS module)
self.set_attention(*self.policy.attention(enable_training=enable_training))
self.set_mlp(*self.policy.mlp(enable_training=enable_training))
self.set_layernorm(*self.policy.layernorm())
self.check_meta_tensor_support()
def convert_to_required_dtype(self):
# Note: converting tensors to fp16 requires that we do it in-place using self.__dict__ and not make a list/dict copy
if self.dtype in [torch.half, torch.bfloat16]:
for k, v in self.__dict__.items():
# The list comprehension is used for MoE tensor lists
if isinstance(v, list) and all((isinstance(tensor, torch.Tensor) \
or isinstance(tensor, torch.nn.Parameter)) for tensor in v):
self.__dict__[k] = [moe_tensor.to(self.dtype) for moe_tensor in v]
if isinstance(v, torch.Tensor) or isinstance(v, torch.nn.Parameter):
self.__dict__[k] = v.to(self.dtype)
def get_rotary_dim(self):
if hasattr(self.model_config, 'rotary_dim'):
return self.model_config.rotary_dim
if hasattr(self.child, 'attention') and hasattr(self.child.attention, 'rotary_ndims'):
return self.child.attention.rotary_ndims
return -1
def set_moe(self, moe=False):
self.moe = moe
def set_tensor_parallel_config(self, mp_size, mp_group):
self.mp_size = mp_size
self.mp_group = mp_group
def set_quantization_config(self, quantizer):
self.quantizer = quantizer
def set_hidden_heads(self, hidden_size, num_attention_heads, epsilon, intermediate_size):
"""
Args:
hidden_size: embedding dimension of the model
num_attention_heads: number of attention heads in the model
epsilon: epsilon value for layer norm (same value used for all norms)
intermediate_size: Size of MLP projection. If `DEFAULT_INTERMEDIATE_SIZE` is passed
it is assumed to be `4 * hidden_size`
"""
self.hidden_size = hidden_size
if intermediate_size == DEFAULT_INTERMEDIATE_SIZE:
self.intermediate_size = 4 * hidden_size
else:
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.layernorm_epsilon = epsilon
def set_attention(self, qkvw, qkvb, dense_w, dense_b):
self.qkvw = qkvw
self.qkvb = qkvb
self.dense_w = dense_w
self.dense_b = dense_b
def set_mlp(self, _h4h_w, _h4h_b, _4hh_w, _4hh_b):
self._h4h_w = _h4h_w
self._h4h_b = _h4h_b
self._4hh_w = _4hh_w
self._4hh_b = _4hh_b
def set_layernorm(self, attn_nw, attn_nb, input_nw, input_nb):
self.attn_nw = attn_nw
self.attn_nb = attn_nb
self.input_nw = input_nw
self.input_nb = input_nb
def apply_weight_quantization(self):
# quantize attention weights
self.attention_quantization()
# quantize mlp weights
self.mlp_quantization()
def attention_quantization(self):
self.module.attention.attn_qkvw = self.quantizer.quantize(self.module.attention.attn_qkvw)
self.module.attention.attn_ow = self.quantizer.quantize(self.module.attention.attn_ow)
def mlp_quantization(self):
self.module.mlp.inter_w = self.quantizer.quantize(self.module.mlp.inter_w)
self.module.mlp.output_w = self.quantizer.quantize(self.module.mlp.output_w)
def apply_tensor_parallelism(self, mp_replace):
# setup the new Attention module
self.attention_qkv_mp(mp_replace)
self.attention_o_mp(mp_replace)
# setup the new MLP module
self.mlp_inter_mp(mp_replace)
self.mlp_output_mp(mp_replace)
# Apply weight quantization
# TODO(cmikeh2): Re-enable this once verified
#self.apply_weight_quantization()
def attention_qkv_mp(self, mp_replace, reversed_dim=False):
self.module.attention.attn_qkvw = mp_replace.strided_copy(self.module.attention.attn_qkvw,
self.qkvw,
num_splits=3,
int8=reversed_dim)
self.module.attention.attn_qkvb = mp_replace.strided_copy(self.module.attention.attn_qkvb,
self.qkvb,
num_splits=3,
int8=reversed_dim)
def attention_o_mp(self, mp_replace, reversed_dim=False):
self.module.attention.attn_ow = mp_replace.copy(self.module.attention.attn_ow, self.dense_w, int8=reversed_dim)
self.module.attention.attn_ob = mp_replace.copy(self.module.attention.attn_ob,
self.dense_b,
int8=reversed_dim,
allocate_tensor=reversed_dim)
def mlp_inter_mp(self, mp_replace, reversed_dim=False):
self.module.mlp.inter_w = mp_replace.copy(self.module.mlp.inter_w, self._h4h_w, int8=reversed_dim)
self.module.mlp.inter_b = mp_replace.copy(self.module.mlp.inter_b, self._h4h_b, int8=reversed_dim)
def mlp_output_mp(self, mp_replace, reversed_dim=False):
self.module.mlp.output_w = mp_replace.copy(self.module.mlp.output_w, self._4hh_w, int8=reversed_dim)
self.module.mlp.output_b = mp_replace.copy(self.module.mlp.output_b,
self._4hh_b,
int8=reversed_dim,
allocate_tensor=reversed_dim)
def copy_data_to_new_module(self):
params = {'attn_nw': self.attn_nw, 'attn_nb': self.attn_nb}
for key in params:
if params[key] is None:
setattr(self.module.mlp, key, None)
else:
setattr(self.module.mlp, key,
torch.nn.parameter.Parameter(params[key].to(get_accelerator().current_device_name())))
params = {'norm_w': self.input_nw, 'norm_b': self.input_nb}
for key in params:
if params[key] is None:
setattr(self.module, key, None)
else:
setattr(self.module, key,
torch.nn.parameter.Parameter(params[key].to(get_accelerator().current_device_name())))
def transpose(self):
self.transpose_attention()
self.transpose_mlp()
def transpose_attention(self):
if self.attn_linear_layer:
self.qkvw = self.transpose_impl(self.qkvw.data)
self.dense_w = self.transpose_impl(self.dense_w.data)
def transpose_mlp(self):
if self.mlp_linear_layer:
self._h4h_w = self.transpose_impl(self._h4h_w.data)
self._4hh_w = self.transpose_impl(self._4hh_w.data)
def transpose_impl(self, data):
data = data.contiguous()
data.reshape(-1).copy_(data.transpose(-1, -2).contiguous().reshape(-1))
data = data.reshape(data.shape[-1], data.shape[-2])
data.to(get_accelerator().current_device_name())
return data
def get_all_params(self):
params = [
self.attn_nw,
self.attn_nb,
self.input_nw,
self.input_nb,
]
params.extend(self.get_attn_params())
params.extend(self.get_mlp_params())
return params
def get_attn_params(self):
return [self.qkvw, self.qkvb, self.dense_w, self.dense_b]
def get_mlp_params(self):
return [self._h4h_w, self._h4h_b, self._4hh_w, self._4hh_b]
| 13,564 | 41.523511 | 126 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.