id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
10,354 | import re
import collections.abc
import os
import json
from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, TRAIN_MICRO_BATCH_SIZE_PER_GPU
import itertools
import copy
from ..utils import logger
def remove_dupe_dicts(l):
""" Removes duplicate dictionaries from a list. Uses list comprehension and the json library to sort and stringify each dictionary and the set data type to ensure unique values. Works with nested data structures.
Args:
l (list): a list of (nested) data structures.
Returns:
A list of unique values.
"""
list_of_strings = [json.dumps(d, sort_keys=True) for d in l]
list_of_strings = set(list_of_strings)
return [json.loads(s) for s in list_of_strings]
def prune_config(config, ignored_keys=[]):
""" Prunes the input configurations
Args:
configs (dict): A configuration dictionary.
ignored_keys (list, optional): the keys of the sections to delete. Defaults to [].
Returns:
A configuration dictionary.
"""
if ignored_keys:
for k in ignored_keys:
def find_del_key(d: dict, k: str):
if k in d:
del d[k]
else:
for dd in d.values():
if isinstance(dd, dict):
find_del_key(dd, k)
find_del_key(config, k)
The provided code snippet includes necessary dependencies for implementing the `prune_configs` function. Write a Python function `def prune_configs(configs, ignored_keys=[])` to solve the following problem:
Prunes the input list of configurations Args: configs (list): A list of configuration dictionaries. ignored_keys (list, optional): the keys of the sections to delete. Defaults to []. Returns: A list of valid and unique configuration dictionaries.
Here is the function:
def prune_configs(configs, ignored_keys=[]):
""" Prunes the input list of configurations
Args:
configs (list): A list of configuration dictionaries.
ignored_keys (list, optional): the keys of the sections to delete. Defaults to [].
Returns:
A list of valid and unique configuration dictionaries.
"""
pruned_list = []
for config in configs:
prune_config(config, ignored_keys)
pruned_list.append(config)
return remove_dupe_dicts(pruned_list) | Prunes the input list of configurations Args: configs (list): A list of configuration dictionaries. ignored_keys (list, optional): the keys of the sections to delete. Defaults to []. Returns: A list of valid and unique configuration dictionaries. |
10,355 | import re
import collections.abc
import os
import json
from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, TRAIN_MICRO_BATCH_SIZE_PER_GPU
import itertools
import copy
from ..utils import logger
The provided code snippet includes necessary dependencies for implementing the `get_tuning_keys` function. Write a Python function `def get_tuning_keys(tuning_space: dict)` to solve the following problem:
Outputs the list of tunnable parameters in the tuning space dict. Args: tuning_space (dict): a configuration dictionary containing tunable parameters as lists of values. Returns: A list of strings
Here is the function:
def get_tuning_keys(tuning_space: dict):
"""Outputs the list of tunnable parameters in the tuning space dict.
Args:
tuning_space (dict): a configuration dictionary containing tunable parameters as lists of values.
Returns:
A list of strings
"""
tuning_keys = []
for key, val in tuning_space.items():
if isinstance(val, dict):
tuning_keys.extend(get_tuning_keys(val))
if isinstance(val, list) and len(val) > 1:
tuning_keys.append(key)
return tuning_keys | Outputs the list of tunnable parameters in the tuning space dict. Args: tuning_space (dict): a configuration dictionary containing tunable parameters as lists of values. Returns: A list of strings |
10,356 | import re
import collections.abc
import os
import json
from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, TRAIN_MICRO_BATCH_SIZE_PER_GPU
import itertools
import copy
from ..utils import logger
def get_list(val):
if not isinstance(val, list):
return [val]
else:
return val
def del_if_exists(t, d):
"""Deletes a key from a dictionary if it exists.
Args:
t (string): target key to delete
d (dict): dictionary to delete from
"""
if t in d:
del d[t]
return
for k, v in d.items():
if isinstance(v, collections.abc.Mapping):
del_if_exists(t, v)
def replace_dict(d, u, ignored_keys=[]):
"""Replaces values in dict d with values in dict u.
Args:
d (dict): the target dict to overwrite
u (dict): the dict containing the values to overwrite the target dict
Returns:
dict d with values overwritten by the corresponding ones in dict u.
"""
if u is not None:
for k, v in u.items():
if k not in ignored_keys:
if v is None:
del_if_exists(k, d)
continue
if isinstance(v, collections.abc.Mapping):
d[k] = replace_dict(d.get(k, {}), v, ignored_keys)
else:
d[k] = v
return d
The provided code snippet includes necessary dependencies for implementing the `get_all_configs` function. Write a Python function `def get_all_configs(tuning_space: dict, ignore_keys=None)` to solve the following problem:
Splits the tuning space dictionary to result in all combinations of values. Args: tuning_space (dict): the tuning space where tunable parameters are lists of values.
Here is the function:
def get_all_configs(tuning_space: dict, ignore_keys=None):
""" Splits the tuning space dictionary to result in all combinations of values.
Args:
tuning_space (dict): the tuning space where tunable parameters are lists of values.
"""
def gen_combinations(d: dict):
keys, values = d.keys(), d.values()
for v in values:
if not isinstance(v, list):
v = [v]
values_choices = (gen_combinations(v) if isinstance(v,
dict) else get_list(v)
for v in values)
for comb in itertools.product(*values_choices):
yield dict(zip(keys, comb))
all_configs = []
ignored_key_vals = {}
for ik in ignore_keys:
ignored_key_vals[ik] = tuning_space.get(ik, {})
del_if_exists(ik, tuning_space)
for c in gen_combinations(tuning_space):
replace_dict(c, ignored_key_vals)
all_configs.append(c)
return all_configs | Splits the tuning space dictionary to result in all combinations of values. Args: tuning_space (dict): the tuning space where tunable parameters are lists of values. |
10,357 | import re
import collections.abc
import os
import json
from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, TRAIN_MICRO_BATCH_SIZE_PER_GPU
import itertools
import copy
from ..utils import logger
TRAIN_MICRO_BATCH_SIZE_PER_GPU = '''
TRAIN_MICRO_BATCH_SIZE_PER_GPU is defined in this format:
"train_micro_batch_size_per_gpu": 1
'''
TRAIN_MICRO_BATCH_SIZE_PER_GPU = "train_micro_batch_size_per_gpu"
GRADIENT_ACCUMULATION_STEPS = "gradient_accumulation_steps"
The provided code snippet includes necessary dependencies for implementing the `canonical_name` function. Write a Python function `def canonical_name(config: dict, tuning_keys=None, prefix="", omit_val=False)` to solve the following problem:
Generates a name from the acronyms of the tuning keys in the config dict. TRAIN_MICRO_BATCH_SIZE_PER_GPU is always included in the tuning keys. Args: config (dict): the config dict used to generate the name tuning_keys (list, optional): the tuning keys used to generate the name. Defaults to None. prefix (str, optional): a string added to the beginning of the name. Defaults to None.
Here is the function:
def canonical_name(config: dict, tuning_keys=None, prefix="", omit_val=False):
""" Generates a name from the acronyms of the tuning keys in the config dict. TRAIN_MICRO_BATCH_SIZE_PER_GPU is always included in the tuning keys.
Args:
config (dict): the config dict used to generate the name
tuning_keys (list, optional): the tuning keys used to generate the name. Defaults to None.
prefix (str, optional): a string added to the beginning of the name. Defaults to None.
"""
if TRAIN_MICRO_BATCH_SIZE_PER_GPU not in tuning_keys:
tuning_keys.append(TRAIN_MICRO_BATCH_SIZE_PER_GPU)
if GRADIENT_ACCUMULATION_STEPS not in tuning_keys:
tuning_keys.append(GRADIENT_ACCUMULATION_STEPS)
tuning_keys.sort()
def get_offload_name(offload_config):
cname = ""
if offload_config is None:
return "None_"
for key, val in offload_config.items():
key = "".join(map(lambda c: c[0], key.split('_')))
if (isinstance(val, int) or isinstance(val, float)) and val > 9000:
cname += key + '{:.1e}'.format(val) + "_"
else:
if isinstance(val, bool):
val = "T" if val else "F"
cname += f"{key}{val}_"
return cname
def get_name_by_keys(config: dict, tuning_keys=None, omit_val=False):
cname = ""
if not tuning_keys or config is None:
return cname
for key, val in config.items():
# skip the arg_mappings section when naming the exp file
if key == "arg_mappings":
continue
if key == "offload_param":
cname += "op_"
if not omit_val:
cname += get_offload_name(val)
continue
if key == "offload_optimizer":
cname += "oo_"
if not omit_val:
cname += get_offload_name(val)
continue
# recursively call the func to get name for the child dicts
if isinstance(val, dict):
n = get_name_by_keys(val, tuning_keys, omit_val=omit_val)
if n != "":
cname += n + "_"
if tuning_keys and key not in tuning_keys:
continue
key_str = "".join(map(lambda c: c[0], key.split('_')))
if not omit_val:
if (isinstance(val, int) or isinstance(val, float)) and val > 9000:
cname += key_str + '{:.1e}'.format(val) + "_"
else:
if isinstance(val, bool):
val = "T" if val else "F"
cname += f"{key_str}{val}_"
else:
cname += key_str + "_"
return cname[:-1]
name = get_name_by_keys(config, tuning_keys, omit_val=omit_val)
return prefix + (name if name != "" else "exp") | Generates a name from the acronyms of the tuning keys in the config dict. TRAIN_MICRO_BATCH_SIZE_PER_GPU is always included in the tuning keys. Args: config (dict): the config dict used to generate the name tuning_keys (list, optional): the tuning keys used to generate the name. Defaults to None. prefix (str, optional): a string added to the beginning of the name. Defaults to None. |
10,358 | import re
import collections.abc
import os
import json
from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, TRAIN_MICRO_BATCH_SIZE_PER_GPU
import itertools
import copy
from ..utils import logger
def get_first_config(config: dict):
if not config:
return None
cfg = copy.deepcopy(config)
for key, val in cfg.items():
if isinstance(val, dict):
if key == "optimizer": # use user defined optimizer which might have lists of values as params
cfg[key] = val
else:
cfg[key] = get_first_config(val)
if isinstance(val, list) and len(val) > 0:
cfg[key] = val[0]
return cfg | null |
10,359 | import re
import collections.abc
import os
import json
from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, TRAIN_MICRO_BATCH_SIZE_PER_GPU
import itertools
import copy
from ..utils import logger
def write_experiments(exps: list, exps_dir: str):
exp_paths = []
for exp in exps:
exp_name = exp['name']
# write the expr config to a json file
exp_path = os.path.join(exps_dir, f'{exp_name}.json')
with open(exp_path, 'w') as fd:
json.dump(exp, fd)
exp_paths.append(exp_path)
return exp_paths | null |
10,360 | import re
import collections.abc
import os
import json
from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, TRAIN_MICRO_BATCH_SIZE_PER_GPU
import itertools
import copy
from ..utils import logger
def memory_to_string(n, postfix="", units=None, precision=2):
if units is None:
if n // 10**12 > 0:
return str(round(n / 1024**4, precision)) + " T" + postfix
if n // 10**9 > 0:
return str(round(n / 1024**3, precision)) + " G" + postfix
elif n // 10**6 > 0:
return str(round(n / 1024**2, precision)) + " M" + postfix
elif n // 10**3 > 0:
return str(round(n / 1014, precision)) + " K" + postfix
else:
return str(n) + " "
else:
if units == "T":
return str(round(n / 1024**4, precision)) + " " + units
if units == "G" + postfix:
return str(round(n / 1024**3, precision)) + " " + units
elif units == "M" + postfix:
return str(round(n / 1024**2, precision)) + " " + units
elif units == "K" + postfix:
return str(round(n / 1024, precision)) + " " + units
else:
return str(n) + " " | null |
10,361 | import re
import collections.abc
import os
import json
from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, TRAIN_MICRO_BATCH_SIZE_PER_GPU
import itertools
import copy
from ..utils import logger
def number_to_string(n, postfix="", units=None, precision=2):
if units is None:
if n // 10**9 > 0:
return str(round(n / 1000**3, precision)) + " B" + postfix
if n // 10**6 > 0:
return str(round(n / 1000**2, precision)) + " M" + postfix
elif n // 10**3 > 0:
return str(round(n / 1000**1, precision)) + " K" + postfix
else:
return str(n) + " "
else:
if units == "B" + postfix:
return str(round(n / 1000**3, precision)) + " " + units
elif units == "M" + postfix:
return str(round(n / 1000**2, precision)) + " " + units
elif units == "K" + postfix:
return str(round(n / 1000**1, precision)) + " " + units
else:
return str(n) + " " | null |
10,362 | import numpy as np
import itertools
from ..utils import *
import collections.abc
The provided code snippet includes necessary dependencies for implementing the `index_to_feature` function. Write a Python function `def index_to_feature(p, dims)` to solve the following problem:
convert index form (single integer) to feature form (vector)
Here is the function:
def index_to_feature(p, dims):
"""convert index form (single integer) to feature form (vector)"""
feature = []
for dim in dims:
feature.append(p % dim)
p //= dim
return feature | convert index form (single integer) to feature form (vector) |
10,363 | import numpy as np
import itertools
from ..utils import *
import collections.abc
The provided code snippet includes necessary dependencies for implementing the `feature_to_index` function. Write a Python function `def feature_to_index(feature, dims)` to solve the following problem:
convert feature form (vector) to index form (single integer)
Here is the function:
def feature_to_index(feature, dims):
"""convert feature form (vector) to index form (single integer)"""
p = 0
for j, k in enumerate(feature):
print("j:", "k:", k, "dims", dims[:j])
p += int(np.prod(dims[:j])) * k
return p | convert feature form (vector) to index form (single integer) |
10,364 | import numpy as np
import itertools
from ..utils import *
import collections.abc
def dict_to_dims(tuning_space):
dims = []
for key, val in tuning_space.items():
if isinstance(val, dict):
dims.extend(dict_to_dims(val))
elif isinstance(val, list):
dims.append(len(val))
else:
dims.append(1)
return dims | null |
10,365 | import numpy as np
import itertools
from ..utils import *
import collections.abc
import itertools
def get_list(val):
if not isinstance(val, list):
return [val]
else:
return val
def gen_combinations(d: dict):
keys, values = d.keys(), d.values()
for v in values:
if not isinstance(v, list):
v = [v]
values_choices = (gen_combinations(v) if isinstance(v,
dict) else get_list(v)
for v in values)
for comb in itertools.product(*values_choices):
yield dict(zip(keys, comb)) | null |
10,366 | import numpy as np
import itertools
from ..utils import *
import collections.abc
import collections.abc
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items) | null |
10,367 | import numpy as np
import itertools
from ..utils import *
import collections.abc
The provided code snippet includes necessary dependencies for implementing the `dict_to_feature` function. Write a Python function `def dict_to_feature(feature_dict, keys, max_value=None)` to solve the following problem:
Extract values from dict
Here is the function:
def dict_to_feature(feature_dict, keys, max_value=None):
"""Extract values from dict"""
feature = []
for key, val in feature_dict.items(): # First level
if key not in keys:
continue
if val is None or val == "auto" or key == "autotuning" or val == "":
continue
if isinstance(val, dict):
feature.append(dict_to_feature(val, max_value))
else:
feature.append(float(val))
# normalization, should not matter in tree models
if max_value is not None:
norm_feature = []
for f, mv in zip(feature, max_value):
norm_feature.append(f / mv)
feature = norm_feature
return feature | Extract values from dict |
10,368 | from deepspeed.runtime.config_utils import get_scalar_param, get_dict_param, DeepSpeedConfigObject
from deepspeed.autotuning.constants import *
def get_scalar_param(param_dict, param_name, param_default_value):
def get_model_info_config(param_dict):
if MODEL_INFO in param_dict and param_dict[MODEL_INFO] is not None:
model_info_config = {}
for key, default_value in MODEL_INFO_KEY_DEFAULT_DICT.items():
model_info_config[key] = get_scalar_param(param_dict[MODEL_INFO],
key,
default_value)
return model_info_config
return None | null |
10,369 | from deepspeed.runtime.config_utils import get_scalar_param, get_dict_param, DeepSpeedConfigObject
from deepspeed.autotuning.constants import *
def get_default_model_info_config():
return MODEL_INFO_KEY_DEFAULT_DICT | null |
10,370 | import copy
from numpy import BUFSIZE
import json
import subprocess
import sys
import threading
import time
import base64
import os
import hjson
from tqdm import tqdm
from ..utils import logger
from .constants import AUTOTUNING, AUTOTUNING_METRIC_PATH
from .utils import get_val_by_key, search_error, was_interruptted
from deepspeed import comm as dist
def get_job_id():
# Infrastructure-specific job-id
infra_job_id = None
if "DLWS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLWS_JOB_ID"]
elif "DLTS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLTS_JOB_ID"]
else:
infra_job_id = "unknown-job-id"
return infra_job_id
def get_user():
user = None
if "USER" in os.environ:
user = os.environ["USER"]
else:
user = "unknown-user"
return user
def clean_up(exp: dict, reservations):
env = os.environ.copy()
env['PDSH_RCMD_TYPE'] = 'ssh'
nodes_str = ""
for reservation in reservations:
nodes_str += f"{reservation.node.host},"
nodes_str = nodes_str[:-1]
logger.debug(
f"Cleaning up exp_id = {exp['exp_id']} on the following workers: {nodes_str}")
# PDSH flags for max node fan out and specific hosts to launch on
# See https://linux.die.net/man/1/pdsh for flag details
pdsh_cmd = ['pdsh', '-f', str(PDSH_MAX_FAN_OUT), '-w', nodes_str]
kill_cmd = [
'pkill',
'-f',
exp['name'],
]
cmd = pdsh_cmd + kill_cmd
logger.debug("cmd = {}".format(' '.join(cmd)))
result = subprocess.Popen(cmd, env=env)
result.wait()
# In case of failure must propagate the error-condition back to the caller (usually shell). The
# actual error and traceback should have been printed in the subprocess, so in order to avoid
# unnecessary noise we just quietly exit here with the same code as the subprocess
if result.returncode > 0:
sys.exit(result.returncode)
logger.info(
f"Done cleaning up exp_id = {exp['exp_id']} on the following workers: {nodes_str}"
)
def run_experiment(exp: dict, reservations, user_script, user_args):
include_str = ""
for reservation in reservations:
reservation.slots.sort()
slots = ",".join(map(str, reservation.slots))
include_str += f"{reservation.node.host}:{slots}@"
include_str = include_str[:-1]
master_port = exp["master_port"]
exp["launcher_args"] = [
"--include",
f"{include_str}",
"--master_port",
str(master_port),
]
logger.debug(f'launcher args={exp["launcher_args"]}')
exp["user"] = get_user()
exp["job_id"] = get_job_id()
exp_dir = exp["result_dir"]
os.makedirs(exp_dir, exist_ok=True)
ds_config_path = os.path.join(exp_dir, "ds_config.json")
exp["ds_config_path"] = ds_config_path
ds_config = copy.deepcopy(exp["ds_config"])
ds_config_json = json.dumps(ds_config).encode('utf-8')
exp["ds_config_base64"] = base64.urlsafe_b64encode(ds_config_json).decode('utf-8')
with open(exp["ds_config_path"], "w", buffering=BUFSIZE) as fd:
json.dump(ds_config, fd)
fd.flush()
os.fsync(fd)
path = exp["ds_config_path"]
logger.info(f"Scheduler wrote ds_config to {path}, {os.path.abspath(path)}")
with open(os.path.join(exp_dir, "exp.json"), "w", buffering=BUFSIZE) as fd:
json.dump(exp, fd)
fd.flush()
os.fsync(fd)
path = os.path.join(exp_dir, "exp.json")
logger.info(f"Scheduler wrote exp to {path}, {os.path.abspath(path)}")
# remove "--deepspeed_config ds_config.json" from user_args
if user_args:
if "--deepspeed_config" in user_args:
idx = user_args.index("--deepspeed_config")
# "--deepspeed_config" is omitted in HF
elif "--deepspeed" in user_args:
idx = user_args.index("--deepspeed")
assert idx < len(user_args), "there is no ds_config file specified after --deepspeed_config or --deepspeed"
# user_args[idx + 1] = exp["ds_config_path"]
# pass base64 serialized ds_config to launcher
user_args[idx + 1] = exp["ds_config_base64"]
exp["user_script"] = user_script
exp["user_args"] = user_args
cmd = ["deepspeed"] + exp["launcher_args"] + [user_script] + user_args
assert len(exp["launcher_args"]) > 0, "must provide launcher args"
with open(os.path.join(exp_dir, "cmd.txt"), "w", buffering=BUFSIZE) as fd:
fd.write(" ".join(cmd))
fd.write("\n")
fd.flush()
os.fsync(fd)
logger.info(
f"Launching exp_id = {exp['exp_id']}, exp_name = {exp['name']}, with resource = {include_str}, and ds_config = {os.path.abspath(ds_config_path)}"
)
with open(os.path.join(exp_dir, "stdout.log"), "wb") as out, open(
os.path.join(exp_dir, "stderr.log"), "wb"
) as err:
result = subprocess.Popen(cmd, stdout=out, stderr=err)
result.wait()
out.flush()
err.flush()
os.fsync(out)
os.fsync(err)
clean_up(exp, reservations)
logger.info(
f"Done running exp_id = {exp['exp_id']}, exp_name = {exp['name']}, with resource = {include_str}"
) | null |
10,371 | import torch
import deepspeed
import subprocess
import argparse
from .ops.op_builder import ALL_OPS
from .git_version_info import installed_ops, torch_info
GREEN = '\033[92m'
YELLOW = '\033[93m'
END = '\033[0m'
OKAY = f"{GREEN}[OKAY]{END}"
FAIL = f'{RED}[FAIL]{END}'
color_len = len(GREEN) + len(END)
def ninja_installed():
ALL_OPS = {op.name: op for op in __op_builders__}
def op_report(verbose=True):
max_dots = 23
max_dots2 = 11
h = ["op name", "installed", "compatible"]
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
print("DeepSpeed C++/CUDA extension op report")
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
print("NOTE: Ops not installed will be just-in-time (JIT) compiled at\n"
" runtime if needed. Op compatibility means that your system\n"
" meet the required dependencies to JIT install the op.")
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
print("JIT compiled ops requires ninja")
ninja_status = OKAY if ninja_installed() else FAIL
print('ninja', "." * (max_dots - 5), ninja_status)
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
print(h[0], "." * (max_dots - len(h[0])), h[1], "." * (max_dots2 - len(h[1])), h[2])
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1])))
installed = f"{GREEN}[YES]{END}"
no = f"{YELLOW}[NO]{END}"
for op_name, builder in ALL_OPS.items():
dots = "." * (max_dots - len(op_name))
is_compatible = OKAY if builder.is_compatible(verbose) else no
is_installed = installed if installed_ops[op_name] else no
dots2 = '.' * ((len(h[1]) + (max_dots2 - len(h[1]))) -
(len(is_installed) - color_len))
print(op_name, dots, is_installed, dots2, is_compatible)
print("-" * (max_dots + max_dots2 + len(h[0]) + len(h[1]))) | null |
10,372 | import torch
import deepspeed
import subprocess
import argparse
from .ops.op_builder import ALL_OPS
from .git_version_info import installed_ops, torch_info
def nvcc_version():
import torch.utils.cpp_extension
cuda_home = torch.utils.cpp_extension.CUDA_HOME
if cuda_home is None:
return f"{RED} [FAIL] cannot find CUDA_HOME via torch.utils.cpp_extension.CUDA_HOME={torch.utils.cpp_extension.CUDA_HOME} {END}"
try:
output = subprocess.check_output([cuda_home + "/bin/nvcc",
"-V"],
universal_newlines=True)
except FileNotFoundError:
return f"{RED} [FAIL] nvcc missing {END}"
output_split = output.split()
release_idx = output_split.index("release")
release = output_split[release_idx + 1].replace(',', '').split(".")
return ".".join(release)
def debug_report():
max_dots = 33
hip_version = None
if hasattr(torch.version, 'hip'):
hip_version = torch.version.hip
report = [
("torch install path",
torch.__path__),
("torch version",
torch.__version__),
("torch cuda version",
torch.version.cuda),
("torch hip version",
hip_version),
("nvcc version",
(None if hip_version else nvcc_version())),
("deepspeed install path",
deepspeed.__path__),
("deepspeed info",
f"{deepspeed.__version__}, {deepspeed.__git_hash__}, {deepspeed.__git_branch__}"
),
("deepspeed wheel compiled w.",
f"torch {torch_info['version']}, " +
(f"hip {torch_info['hip_version']}"
if hip_version else f"cuda {torch_info['cuda_version']}")),
]
print("DeepSpeed general environment info:")
for name, value in report:
print(name, "." * (max_dots - len(name)), value) | null |
10,373 | import torch
import deepspeed
import subprocess
import argparse
from .ops.op_builder import ALL_OPS
from .git_version_info import installed_ops, torch_info
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
'--hide_operator_status',
action='store_true',
help=
'Suppress display of installation and compatibility statuses of DeepSpeed operators. '
)
parser.add_argument('--hide_errors_and_warnings',
action='store_true',
help='Suppress warning and error messages.')
args = parser.parse_args()
return args
def main(hide_operator_status=False, hide_errors_and_warnings=False):
if not hide_operator_status:
op_report(verbose=not hide_errors_and_warnings)
debug_report()
def cli_main():
args = parse_arguments()
main(hide_operator_status=args.hide_operator_status,
hide_errors_and_warnings=args.hide_errors_and_warnings) | null |
10,374 | import torch
def quantize_module(model, orig_class, quantize_fn):
policy = {orig_class: quantize_fn}
return _quantize_module(model, policy)
The provided code snippet includes necessary dependencies for implementing the `quantize_transformer_layer` function. Write a Python function `def quantize_transformer_layer(orig_layer_impl, model, megatron=False, preln=False)` to solve the following problem:
Quantize bert-style transformer layers with DeepSpeed's transformer layer Arguments: orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for, e.g., transformers.modeling_bert.BertLayer. model (torch.nn.Module): user's nn.module representing their model megatron (bool): megatron model-parallel implementation (this is supported for inference only) preln (bool): does the original layer implementation do pre or post layer norm? Note: For Bert kind of models, we inject based on the DeepSpeed-Example models, if not setting huggingface flag. Returns: Updated nn.module with quantized transformer layers
Here is the function:
def quantize_transformer_layer(orig_layer_impl, model, megatron=False, preln=False):
""" Quantize bert-style transformer layers with DeepSpeed's transformer layer
Arguments:
orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for,
e.g., transformers.modeling_bert.BertLayer.
model (torch.nn.Module): user's nn.module representing their model
megatron (bool): megatron model-parallel implementation (this is supported for inference only)
preln (bool): does the original layer implementation do pre or post layer norm?
Note: For Bert kind of models, we inject based on the DeepSpeed-Example models, if not setting huggingface flag.
Returns:
Updated nn.module with quantized transformer layers
"""
def quantize_weight(weight):
return weight.to(torch.int8)
def megatron_layer_quantize(layer):
layer.attention.query_key_value.weight.data = quantize_weight(
layer.attention.query_key_value.weight.data)
layer.attention.dense.weight.data = quantize_weight(
layer.attention.dense.weight.data)
layer.mlp.dense_h_to_4h.weight.data = quantize_weight(
layer.mlp.dense_h_to_4h.weight.data)
layer.mlp.dense_4h_to_h.weight.data = quantize_weight(
layer.mlp.dense_4h_to_h.weight.data)
def bert_layer_quantize(layer):
layer.attention.self.query.weight.data = quantize_weight(
layer.attention.self.query.weight.data)
layer.attention.self.key.weight.data = quantize_weight(
layer.attention.self.key.weight.data)
layer.attention.self.value.weight.data = quantize_weight(
layer.attention.self.value.weight.data)
layer.attention.output.dense.weight.data = quantize_weight(
layer.attention.output.dense.weight.data)
if preln:
layer.intermediate.dense_act.weight.data = quantize_weight(
layer.intermediate.dense_act.weight.data)
else:
layer.intermediate.dense.weight.data = quantize_weight(
layer.intermediate.dense.weight.data)
layer.output.dense.weight.data = quantize_weight(layer.output.dense.weight.data)
def quantize_fn(child):
if megatron:
# Quantize megatron GPT2 / GPT3 trained model
megatron_layer_quantize(child)
else:
# Quantize either DeepSpeed or HuggingFace trained model
bert_layer_quantize(child)
return child
return quantize_module(model=model,
orig_class=orig_layer_impl,
quantize_fn=quantize_fn) | Quantize bert-style transformer layers with DeepSpeed's transformer layer Arguments: orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for, e.g., transformers.modeling_bert.BertLayer. model (torch.nn.Module): user's nn.module representing their model megatron (bool): megatron model-parallel implementation (this is supported for inference only) preln (bool): does the original layer implementation do pre or post layer norm? Note: For Bert kind of models, we inject based on the DeepSpeed-Example models, if not setting huggingface flag. Returns: Updated nn.module with quantized transformer layers |
10,375 | import copy
import torch
from deepspeed.ops.transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
def module_inject(layer_obj,
model,
config,
micro_batch_size,
max_seq_length,
seed,
preln,
fp16=True):
for name, child in model.named_children():
if isinstance(child, layer_obj):
print('REPLACING BertLayer')
cuda_config = DeepSpeedTransformerConfig(
batch_size=micro_batch_size,
max_seq_length=max_seq_length,
hidden_size=config.hidden_size,
heads=config.num_attention_heads,
attn_dropout_ratio=config.attention_probs_dropout_prob,
hidden_dropout_ratio=config.hidden_dropout_prob,
num_hidden_layers=config.num_hidden_layers,
initializer_range=config.initializer_range,
seed=seed,
fp16=fp16,
pre_layer_norm=preln)
new_module = DeepSpeedTransformerLayer(cuda_config)
# copy relevant state from child -> new module
qw = child.attention.self.query.weight
qb = child.attention.self.query.bias
kw = child.attention.self.key.weight
kb = child.attention.self.key.bias
vw = child.attention.self.value.weight
vb = child.attention.self.value.bias
qkvw = torch.cat((qw, kw, vw), 0)
qkvb = torch.cat((qb, kb, vb), 0)
new_module.attn_qkvw.data = qkvw
new_module.attn_qkvb.data = qkvb
new_module.attn_ow.data = child.attention.output.dense.weight
new_module.attn_ob.data = child.attention.output.dense.bias
if preln:
attention_layerNorm = child.PostAttentionLayerNorm
else:
attention_layerNorm = child.attention.output.LayerNorm
new_module.attn_nw.data = attention_layerNorm.weight
new_module.attn_nb.data = attention_layerNorm.bias
if preln:
intermediate_FF = child.intermediate.dense_act
else:
intermediate_FF = child.intermediate.dense
new_module.inter_w.data = intermediate_FF.weight
new_module.inter_b.data = intermediate_FF.bias
new_module.output_w.data = child.output.dense.weight
new_module.output_b.data = child.output.dense.bias
if preln:
transformer_LayerNorm = child.PreAttentionLayerNorm
else:
transformer_LayerNorm = child.output.LayerNorm
new_module.norm_w.data = transformer_LayerNorm.weight
new_module.norm_b.data = transformer_LayerNorm.bias
setattr(model, name, copy.deepcopy(new_module))
else:
module_inject(layer_obj,
child,
config,
micro_batch_size,
max_seq_length,
seed,
preln,
fp16)
return model
def test_hi():
from turing.nvidia_modelingpreln import BertConfig as BertConfigPreLN
from turing.nvidia_modelingpreln import BertForQuestionAnswering as BertForQuestionAnsweringPreLN
from turing.nvidia_modelingpreln import BertLayer
bert_model_config = {
"vocab_size_or_config_json_file": 119547,
"hidden_size": 1024,
"num_hidden_layers": 1,
"num_attention_heads": 16,
"intermediate_size": 4096,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"initializer_range": 0.02
}
bert_config = BertConfigPreLN(**bert_model_config)
base_model = BertForQuestionAnsweringPreLN(bert_config, args=None)
#base_model = LinearStack()
test_model = copy.deepcopy(base_model)
test_model = module_inject(BertLayer, test_model, bert_config, 4, 384, 1234)
print('BASE', base_model)
print('TEST', test_model)
#base_model.eval()
#test_model.eval()
#test_input = torch.rand(1, base_model.input_dim)
#base_output = base_model(test_input)
#test_output = test_model(test_input)
#
#assert torch.allclose(base_output, test_output, atol=3e-8) | null |
10,376 | import os
import torch
import tqdm
import deepspeed
import deepspeed.ops.transformer as transformer_inference
from deepspeed.ops.transformer.inference.diffusers_attention import DeepSpeedDiffusersAttention
from deepspeed.ops.transformer.inference.diffusers_transformer_block import DeepSpeedDiffusersTransformerBlock
from deepspeed.ops.transformer.inference.diffusers_2d_transformer import Diffusers2DTransformerConfig
from .replace_policy import HFBertLayerPolicy, HFGPT2LayerPolicy, BLOOMLayerPolicy
from .replace_policy import replace_policies, generic_policies
from deepspeed import comm as dist
from torch import nn
from ..runtime.zero import GatheredParameters
from .layers import LinearAllreduce, LinearLayer
from .load_checkpoint import load_model_with_checkpoint
import time
def _module_match(module):
from ..pipe import PipelineModule
def _replace_module(model, policies, layer_id=0):
class DeepSpeedDiffusersAttention(nn.Module):
def __init__(
self,
config,
):
def forward(self, input, context=None, input_mask=None):
class DeepSpeedDiffusersTransformerBlock(nn.Module):
def __init__(self,
equivalent_module: nn.Module,
config: Diffusers2DTransformerConfig):
def forward(self, hidden_states, context=None, timestep=None):
class Diffusers2DTransformerConfig():
def __init__(self, int8_quantization=False):
class DSClipEncoder(torch.nn.Module):
def __init__(self, enc, enable_cuda_graph=False):
def _build_causal_attention_mask(self, bsz, seq_len, dtype):
def _graph_replay(self, *inputs, **kwargs):
def forward(self, *inputs, **kwargs):
def _create_cuda_graph(self, *inputs, **kwargs):
def _forward(self, *inputs, **kwargs):
def generic_injection(module, fp16=False, enable_cuda_graph=True):
def replace_attn(child, policy):
policy_attn = policy.attention(child)
if policy_attn is None:
return child
if len(policy_attn) == 5:
qkvw, attn_ow, attn_ob, hidden_size, heads = policy_attn
else:
qw, kw, vw, attn_ow, attn_ob, hidden_size, heads = policy_attn
config = transformer_inference.DeepSpeedInferenceConfig(
hidden_size=hidden_size,
heads=heads,
fp16=fp16,
triangular_masking=False,
max_out_tokens=4096,
)
attn_module = DeepSpeedDiffusersAttention(config)
def transpose(data):
data = data.contiguous()
data.reshape(-1).copy_(data.transpose(-1, -2).contiguous().reshape(-1))
data = data.reshape(data.shape[-1], data.shape[-2])
data.to(torch.cuda.current_device())
return data
if len(policy_attn) == 5:
attn_module.attn_qkvw.data = transpose(qkvw.data)
else:
attn_module.attn_qkvw = None
attn_module.attn_qw.data = transpose(qw.data)
attn_module.attn_kw.data = transpose(kw.data)
attn_module.attn_vw.data = transpose(vw.data)
attn_module.attn_qkvb = None
attn_module.attn_ow.data = transpose(attn_ow.data)
attn_module.attn_ob.data.copy_(attn_ob.data.to(torch.cuda.current_device()))
return attn_module
def replace_attn_block(child, policy):
config = Diffusers2DTransformerConfig()
return DeepSpeedDiffusersTransformerBlock(child, config)
if isinstance(module, torch.nn.Module):
pass
else:
if fp16 is False:
raise ValueError("Generic injection only supported with FP16")
try:
import diffusers
cross_attention = diffusers.models.attention.CrossAttention
attention_block = diffusers.models.attention.BasicTransformerBlock
new_policies = {
cross_attention: replace_attn,
attention_block: replace_attn_block,
}
except ImportError:
new_policies = {}
#replace_transformer_layer(None,
# module.text_encoder,
# training=False,
# replace_with_kernel_inject=True,
# triangular_masking=True,
# max_out_tokens=8192)
from ..model_implementations.transformers.clip_encoder import DSClipEncoder
cg_encoder = DSClipEncoder(module.text_encoder,
enable_cuda_graph=enable_cuda_graph)
setattr(module, 'text_encoder', cg_encoder)
for name in module.__dict__.keys():
sub_module = getattr(module, name)
policy = _module_match(sub_module)
if policy is not None:
def _replace_module(module, policy):
for name, child in module.named_children():
_replace_module(child, policy)
if child.__class__ in new_policies:
replaced_module = new_policies[child.__class__](child,
policy)
setattr(module, name, replaced_module)
_replace_module(sub_module, policy)
new_module = policy.apply(sub_module,
enable_cuda_graph=enable_cuda_graph)
print(f"**** found and replaced {name} w. {type(new_module)}")
setattr(module, name, new_module) | null |
10,377 | import os
import torch
import tqdm
import deepspeed
import deepspeed.ops.transformer as transformer_inference
from deepspeed.ops.transformer.inference.diffusers_attention import DeepSpeedDiffusersAttention
from deepspeed.ops.transformer.inference.diffusers_transformer_block import DeepSpeedDiffusersTransformerBlock
from deepspeed.ops.transformer.inference.diffusers_2d_transformer import Diffusers2DTransformerConfig
from .replace_policy import HFBertLayerPolicy, HFGPT2LayerPolicy, BLOOMLayerPolicy
from .replace_policy import replace_policies, generic_policies
from deepspeed import comm as dist
from torch import nn
from ..runtime.zero import GatheredParameters
from .layers import LinearAllreduce, LinearLayer
from .load_checkpoint import load_model_with_checkpoint
import time
class ReplaceWithTensorSlicing:
def __init__(self, mp_group=None, mp_size=1, out_dim=1, in_dim=0):
if mp_group is not None:
self.gpu_index = dist.get_rank(group=mp_group)
else:
self.gpu_index = 0
self.out_dim = out_dim
self.in_dim = in_dim
self.mp_size = mp_size
def merge_assert(self, dim1, dim2):
assert dim1 > dim2, \
'Merging tensors is not allowed here! Please use deepspeed load_checkpoint\
for merging your checkpoints before replacing the transformer layer with\
inference-kernels'
def qkv_copy(self, dst, src):
if src is None:
return src
src_shape = src.shape
dst_shape = dst.shape
if self.out_dim == 0:
src_split = torch.split(src.data,
src_shape[self.out_dim] // self.mp_size,
dim=0)
else:
src_split = torch.split(src.data, src.shape[-1] // 3, dim=-1)
if (len(src_shape) == 2 and len(dst_shape) == 2):
if src_shape[self.out_dim] == dst_shape[self.out_dim]:
return torch.nn.parameter.Parameter(src)
if self.out_dim == 1:
self.merge_assert(src_shape[self.out_dim], dst_shape[self.out_dim])
qkv_size = dst_shape[self.out_dim] // 3
qkv_split = [
torch.split(src_s,
qkv_size,
dim=self.out_dim) for src_s in src_split
]
weight_split = [
torch.cat([qkv_s[i] for qkv_s in qkv_split],
axis=self.out_dim) for i in range(len(qkv_split[0]))
]
dst.data.copy_(weight_split[self.gpu_index].to(
torch.cuda.current_device()).contiguous())
else:
dst.data.copy_(src_split[self.gpu_index].to(
torch.cuda.current_device()).contiguous())
else:
if src_shape[0] == dst_shape[0]:
return torch.nn.parameter.Parameter(src)
if self.out_dim == 1:
qkv_size = dst_shape[0] // 3
qkv_split = [torch.split(src_s, qkv_size, dim=0) for src_s in src_split]
bias_split = [
torch.cat([qkv_s[i] for qkv_s in qkv_split],
axis=0) for i in range(len(qkv_split[0]))
]
dst.data.copy_(bias_split[self.gpu_index].to(
torch.cuda.current_device()).contiguous())
else:
dst.data.copy_(src_split[self.gpu_index].to(
torch.cuda.current_device()).contiguous())
return torch.nn.parameter.Parameter(dst)
def copy(self, dst, src):
if src is None:
return src
src_shape = src.shape
dst_shape = dst.shape
if (len(src_shape) == 2 and len(dst_shape) == 2):
if src_shape[0] == dst_shape[0] and src_shape[1] == dst_shape[1]:
dst.data.copy_(src)
else:
if src_shape[self.in_dim] != dst_shape[self.in_dim]:
self.merge_assert(src_shape[self.in_dim], dst_shape[self.in_dim])
weight_split = torch.split(
src,
dst_shape[self.in_dim],
dim=self.in_dim)[self.gpu_index].to(
torch.cuda.current_device()).contiguous()
else:
self.merge_assert(src_shape[self.out_dim], dst_shape[self.out_dim])
weight_split = torch.split(
src.data,
dst_shape[self.out_dim],
dim=self.out_dim)[self.gpu_index].to(
torch.cuda.current_device()).contiguous()
dst.data.copy_(weight_split.contiguous())
else:
if src_shape[0] == dst_shape[0]:
dst.data.copy_(src)
else:
bias_split = torch.split(src.data,
dst_shape[-1])[self.gpu_index].to(
torch.cuda.current_device()).contiguous()
dst.data.copy_(bias_split)
dst = torch.nn.parameter.Parameter(dst, requires_grad=False)
if hasattr(src, 'scale'):
dst.scale = src.scale
return dst
def get_transformer_name(replaced_module):
from .replace_policy import supported_models
from torch.nn import ModuleList
transformer_name = ''
for n, c in replaced_module.named_children():
if c.__class__ in supported_models:
transformer_name += n + '.'
for name, child in c.named_children():
if child.__class__ is ModuleList:
transformer_name += name
break
break
return transformer_name
class GroupQuantizer:
def __init__(self, q_int8=True, group_size=1, num_bits=8):
self.group_size = group_size
self.num_bits = num_bits
self.q_int8 = q_int8
def quantize(self, inputs, qkv=True, count=1, parallel_dim=0):
if not self.q_int8 or not qkv:
inputs = torch.nn.Parameter(inputs, requires_grad=False)
inputs.scale = torch.empty(1)
return inputs
q_range = 2**self.num_bits
num_groups = inputs.shape[0] // self.group_size
inputs = inputs.to(torch.cuda.current_device())
input_flat = inputs.reshape(num_groups, -1).contiguous()
input_min = torch.min(input_flat, dim=1, keepdim=True)[0].float()
input_max = torch.max(input_flat, dim=1, keepdim=True)[0].float()
scale = torch.max(input_min.abs(), input_max.abs()) * 2.0 / (q_range)
input_flat = (input_flat / scale).round().clamp(-q_range // 2, q_range // 2 - 1)
inputs_q = input_flat.reshape(inputs.shape).to(torch.int8).contiguous()
out = torch.nn.Parameter(inputs_q, requires_grad=False)
#print(inputs.shape)
inputs_split = inputs.split(inputs.shape[parallel_dim] // 2, dim=parallel_dim)
input_flat = [
inputs_split[i].reshape(num_groups,
-1).contiguous() for i in range(2)
]
input_min = [
torch.min(input_flat[i],
dim=1,
keepdim=True)[0].float() for i in range(2)
]
input_max = [
torch.max(input_flat[i],
dim=1,
keepdim=True)[0].float() for i in range(2)
]
scale1 = [
(torch.max(input_min[i].abs(),
input_max[i].abs()) * 2.0 / (q_range)).squeeze().unsqueeze(0)
for i in range(2)
]
out.scale = torch.cat([scale.squeeze().unsqueeze(0),
scale1[0],
scale1[1]],
dim=0).reshape(num_groups,
-1).contiguous()
return out
selected_policy_g = None
megatron_v2_g = False
transformer_config_g = None
def replace_module(model, orig_class, replace_fn, _replace_policy):
""" Scan the model for instances of ``orig_clas:`` to replace using ``replace_fn``.
Arguments:
model (torch.nn.Module): the model to augment
orig_class (torch.nn.Module): the module to search for
replace_fn (method): a method to convert instances of ``orig_class`` to the
desired type and return a new instance.
Returns:
A modified ``model``.
"""
policy = {}
if orig_class is not None:
policy.update({orig_class: (replace_fn, _replace_policy)})
else:
for plcy in replace_policies:
# instantiate a throw-away policy in order to populate the _orig_layer_class
_ = plcy(None)
if isinstance(plcy._orig_layer_class, list):
for orig_layer_class in plcy._orig_layer_class:
policy.update({orig_layer_class: (replace_fn, plcy)})
elif plcy._orig_layer_class is not None:
policy.update({plcy._orig_layer_class: (replace_fn, plcy)})
assert len(policy.items()) > 0,\
"No default policy found! Please specify your policy injection_policy (like {BertLayer:HFBEertLayerPolicy})." +\
"You can find some samples here: https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/replace_policy.py"
replaced_module, _ = _replace_module(model, policy)
return replaced_module
from ..pipe import PipelineModule
def _replace_module(model, policies, layer_id=0):
""" Traverse model's children recursively and apply any transformations in ``policies``.
Arguments:
model (torch.nn.Module): model to augment
policies (dict): Mapping of source class to replacement function.
Returns:
Modified ``model``.
"""
for name, child in model.named_children():
if child.__class__ in policies:
replaced_module = policies[child.__class__][0](child,
policies[child.__class__][-1],
layer_id)
setattr(model, name, replaced_module)
if isinstance(model, PipelineModule):
assert hasattr(model, 'forward_funcs'),\
"we require pipe-module to have the list of fwd_functions"
model.forward_funcs[model.fwd_map[name]] = replaced_module
layer_id += 1
else:
_, layer_id = _replace_module(child, policies, layer_id=layer_id)
return model, layer_id
class HFBertLayerPolicy(TransformerPolicy):
def __init__(self, client_module, inference=False):
super().__init__(inference, pre_attn_norm=False)
self.client_module = client_module
self.cuda_graph_supported = True
if HFBertLayerPolicy._orig_layer_class is None:
try:
import transformers
HFBertLayerPolicy._orig_layer_class = [
transformers.models.bert.modeling_bert.BertLayer,
transformers.models.roberta.modeling_roberta.RobertaLayer
]
except:
HFBertLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attention.self.query.weight.shape[1], \
self.client_module.attention.self.num_attention_heads
def attention(self):
qw = self.client_module.attention.self.query.weight
qb = self.client_module.attention.self.query.bias
kw = self.client_module.attention.self.key.weight
kb = self.client_module.attention.self.key.bias
vw = self.client_module.attention.self.value.weight
vb = self.client_module.attention.self.value.bias
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=False)
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=False)
return self.linear_layer, \
qkvw, \
qkvb, \
self.client_module.attention.output.dense.weight, \
self.client_module.attention.output.dense.bias, \
self.scale_attention, \
self.is_megatron_v2
def mlp(self):
if self.pre_attn_norm:
intermediate_ff = self.client_module.intermediate.dense_act
else:
intermediate_ff = self.client_module.intermediate.dense
return self.linear_layer, intermediate_ff.weight, intermediate_ff.bias, \
self.client_module.output.dense.weight, \
self.client_module.output.dense.bias
def layerNorm(self):
if self.pre_attn_norm:
attention_layernorm = self.client_module.PostAttentionLayerNorm
transformer_layernorm = self.client_module.PreAttentionLayerNorm
else:
attention_layernorm = self.client_module.attention.output.LayerNorm
transformer_layernorm = self.client_module.output.LayerNorm
return attention_layernorm.weight, \
attention_layernorm.bias, \
transformer_layernorm.weight, \
transformer_layernorm.bias
class HFGPT2LayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self, client_module, inference=True):
# HuggingFace GPT2 uses convolutional layer instead of linear layer
super().__init__(inference, linear_layer=False)
self.client_module = client_module
try:
import transformers
HFGPT2LayerPolicy._orig_layer_class = transformers.models.gpt2.modeling_gpt2.GPT2Block
except:
HFGPT2LayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attn.embed_dim, \
self.client_module.attn.num_heads
def attention(self):
return self.linear_layer, \
self.client_module.attn.c_attn.weight, \
self.client_module.attn.c_attn.bias, \
self.client_module.attn.c_proj.weight, \
self.client_module.attn.c_proj.bias, \
self.scale_attention, \
self.is_megatron_v2
def mlp(self):
return self.linear_layer, \
self.client_module.mlp.c_fc.weight, \
self.client_module.mlp.c_fc.bias, \
self.client_module.mlp.c_proj.weight, \
self.client_module.mlp.c_proj.bias
def layerNorm(self):
return self.client_module.ln_2.weight, \
self.client_module.ln_2.bias, \
self.client_module.ln_1.weight, \
self.client_module.ln_1.bias
class BLOOMLayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self,
client_module,
inference=True,
use_load_prefix=True,
split_qkv=False):
super().__init__(inference, linear_layer=True)
self.client_module = client_module
try:
import transformers
BLOOMLayerPolicy._orig_layer_class = transformers.models.bloom.modeling_bloom.BloomBlock
global supported_models
supported_models.update(
{transformers.models.bloom.modeling_bloom.BloomModel})
except:
BLOOMLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.self_attention.hidden_size, \
self.client_module.self_attention.num_heads
def attention(self):
return self.linear_layer, \
self.client_module.self_attention.query_key_value.weight, \
self.client_module.self_attention.query_key_value.bias, \
self.client_module.self_attention.dense.weight, \
self.client_module.self_attention.dense.bias, \
self.scale_attention, \
self.is_megatron_v2
def mlp(self):
return self.linear_layer, \
self.client_module.mlp.dense_h_to_4h.weight, \
self.client_module.mlp.dense_h_to_4h.bias, \
self.client_module.mlp.dense_4h_to_h.weight, \
self.client_module.mlp.dense_4h_to_h.bias
def layerNorm(self):
return self.client_module.post_attention_layernorm.weight, \
self.client_module.post_attention_layernorm.bias, \
self.client_module.input_layernorm.weight, \
self.client_module.input_layernorm.bias
def get_param_names(self):
return 'self_attention.query_key_value.weight', \
'self_attention.query_key_value.bias', \
'self_attention.dense.weight', \
'self_attention.dense.bias', \
'mlp.dense_h_to_4h.weight', \
'mlp.dense_h_to_4h.bias', \
'mlp.dense_4h_to_h.weight', \
'mlp.dense_4h_to_h.bias', \
'input_layernorm.weight', \
'input_layernorm.bias', \
'post_attention_layernorm.weight', \
'post_attention_layernorm.bias', \
self.use_load_prefix, \
self.split_qkv
class LinearAllreduce(nn.Module):
def __init__(self, weight, bias=None, mp_group=None):
super(LinearAllreduce, self).__init__()
self.weight = weight
self.bias = bias
self.mp_group = mp_group
def forward(self, input):
output = torch.matmul(input, self.weight.transpose(-1, -2))
if self.mp_group is not None:
dist.all_reduce(output, group=self.mp_group)
if self.bias is not None:
output += self.bias
return output
class LinearLayer(nn.Module):
def __init__(self, weight_shape=None, dtype=torch.half, weight=None, bias=None):
super(LinearLayer, self).__init__()
if weight is not None:
self.weight = weight
self.bias = bias
else:
self.weight = Parameter(
torch.empty(weight_shape,
dtype=dtype,
device=torch.cuda.current_device()))
self.bias = Parameter(
torch.empty(weight_shape[0],
dtype=dtype,
device=torch.cuda.current_device())) \
if bias is not None else None
def forward(self, input):
output = torch.matmul(input, self.weight.transpose(-1, -2))
if self.bias is not None:
output += self.bias
return output
def load_model_with_checkpoint(r_module,
sd,
mp_replace,
ckpt_type,
weight_quantizer=None,
rank=0,
param_names=None,
transformer_config=None,
megatron_v2=False):
error_msgs = []
def transpose(data):
with torch.no_grad():
data = data.contiguous()
data1 = data.transpose(-1, -2).reshape(-1)
data.reshape(-1).copy_(data1)
data1 = None
return data.reshape(data.shape[-1], data.shape[-2])
def load(module, prefix):
args = (sd[0], prefix, {}, True, [], [], error_msgs)
if len(list(module.parameters())) > 0 and list(
module.parameters())[0].numel() == 0:
with GatheredParameters(list(module.parameters(recurse=False)),
modifier_rank=0):
module._load_from_sd(*args)
else:
if hasattr(module, 'weight'):
module.weight = mp_replace.copy(module.weight.data,
sd[0][prefix + 'weight'])
if prefix + 'bias' in sd[0].keys():
module.bias = mp_replace.copy(module.bias.data, sd[0][prefix + 'bias'])
args = None
gc.collect()
def load_transformer_layer(module, prefix):
if ckpt_type == "tp":
def load_parameters(module, prefix):
for n, p in module.named_parameters():
if prefix + n in sd[0] and len(n.split('.')) == 1:
if type(sd[0][prefix + n]) is list:
tmp_data, scale = sd[0][prefix + n]
tmp_data = tmp_data
scale = scale.to(torch.cuda.current_device())
else:
tmp_data = sd[0][prefix + n].to(torch.cuda.current_device())
scale = None
src_shape = tmp_data.shape
dst_shape = p.shape
inner_dim = 1 if tmp_data.dtype == torch.int8 else 0
outer_dim = 0 if tmp_data.dtype == torch.int8 else 1
if (len(src_shape) == 2 and len(dst_shape) == 2):
if (src_shape[inner_dim] == dst_shape[0]
and src_shape[outer_dim] == dst_shape[1]):
if tmp_data.dtype != torch.int8:
p = weight_quantizer.quantize(
transpose(tmp_data) if weight_quantizer.
q_int8 else tmp_data)
else:
p = torch.nn.parameter.Parameter(tmp_data,
requires_grad=False)
p.scale = scale
setattr(module, n, p)
else:
dim = inner_dim if src_shape[inner_dim] != dst_shape[
0] else outer_dim
dim1 = 0 if src_shape[inner_dim] != dst_shape[0] else 1
if src_shape[dim] > dst_shape[dim1]:
weight_partition = torch.split(
tmp_data,
dst_shape[dim1],
dim=dim)[rank].to(torch.cuda.current_device())
assert tmp_data.dtype != torch.int8 or scale.numel() > weight_quantizer.num_groups * (rank+1), \
'''ERROR: We require the quantization scales for larger TP-size when loading INT8 checkpoint!\
Please use the FP16 checkpoint to generate INT8 checkpoint with the sharding parameters!'''
scale = scale.view(
-1)[weight_quantizer.num_groups *
(rank + 1):].reshape(
weight_quantizer.num_groups,
-1).contiguous()
else:
assert tmp_data.dtype != torch.int8, \
'''Merging of the checkpoints are not supported when using INT8 checkpoint! \
Please use a as many GPUs as TP-size for the checkpoint'''
all_data = [
sd[j][prefix +
n] if type(sd[j][prefix + n]) is list else
sd[j][prefix + n].to(torch.cuda.current_device())
for j in range(len(sd))
]
weight_partition = torch.cat([
ad[0].to(torch.cuda.current_device())
if type(ad) is list else ad for ad in all_data
],
dim=dim)
if tmp_data.dtype == torch.int8:
scale = torch.cat([
ad[1].to(torch.cuda.current_device())
for ad in all_data
],
dim=dim)
if tmp_data.dtype != torch.int8:
weight_partition = weight_quantizer.quantize(
transpose(weight_partition), \
parallel_dim=(0 if dim == 1 else 1)) if weight_quantizer.q_int8 else \
weight_quantizer.quantize(weight_partition)
else:
weight_partition = torch.nn.parameter.Parameter(
weight_partition,
requires_grad=False)
weight_partition.scale = scale
setattr(module, n, weight_partition)
else:
if src_shape[0] == dst_shape[0]:
p.data.copy_(tmp_data)
else:
if src_shape[0] > dst_shape[0]:
bias_split = torch.split(
tmp_data,
dst_shape[-1])[rank].to(
torch.cuda.current_device()).contiguous()
p.data.copy_(bias_split)
else:
p.data.copy_(
torch.cat(
[sd[j][prefix + n] for j in range(len(sd))],
dim=0).to(torch.cuda.current_device()).
contiguous())
load_parameters(module, prefix)
for n, child in module.named_children():
load_parameters(child, prefix + n + '.')
else:
def _transpose(x):
heads = transformer_config.heads // mp_replace.mp_size
attention_head_size = x.shape[-1] // heads
new_x_shape = x.size()[:-1] + (heads, attention_head_size)
x_1 = x.view(*new_x_shape)
(q, k, v) = torch.split(x_1, (x_1.shape[-1] // 3), dim=(x_1.dim() - 1))
if len(q.shape) > 2:
return torch.cat((q.reshape(q.shape[0],
-1),
k.reshape(q.shape[0],
-1),
v.reshape(q.shape[0],
-1)),
dim=-1).reshape(x.shape)
else:
return torch.cat((q.reshape(-1),
k.reshape(-1),
v.reshape(-1)),
dim=-1).reshape(x.shape)
# This checks if the parameter exits in the checkpoint file and maybe copies it into the corresponding destination tensor.
# Note that not all parameters are saved in one checkpoint, that's why we always need to check if they exist!
def maybe_copy(module,
dst_name,
src_name,
qkv=False,
megatron_v2=False,
split_qkv=False):
if src_name in sd[0]:
dst = getattr(module, dst_name)
tmp = sd[0][src_name].cuda()
if len(dst.shape) == 1:
if split_qkv:
dst = mp_replace.qkv_copy(dst, tmp)
else:
dst = mp_replace.copy(dst, tmp)
if qkv and megatron_v2:
dst = torch.nn.parameter.Parameter(
_transpose(dst).contiguous())
else:
if split_qkv:
dst = weight_quantizer.quantize(mp_replace.qkv_copy(dst, tmp if weight_quantizer.q_int8 else \
(transpose(tmp).contiguous())))
else:
dst = weight_quantizer.quantize(mp_replace.copy(dst, tmp if weight_quantizer.q_int8 else \
transpose(tmp)))
if qkv and megatron_v2:
scale1 = dst.scale
dst = torch.nn.parameter.Parameter(
_transpose(dst).contiguous())
dst.scale = scale1
setattr(module, dst_name, dst)
# Extending the maybe_copy function for when the q, k, and v are in separate parameters!
def maybe_copy_qkv(module, dst_name, src_names, split_qkv=False):
if src_names[0] in sd[0]:
q = sd[0][src_names[0]]
k = sd[0][src_names[1]]
v = sd[0][src_names[2]]
qkv_data = torch.cat((q, k, v), dim=0)
dst = getattr(module, dst_name)
if len(dst.shape) == 1:
if split_qkv:
dst = mp_replace.qkv_copy(dst,
(qkv_data.cuda()).contiguous())
else:
dst = mp_replace.copy(dst, qkv_data.cuda())
else:
if split_qkv:
dst = weight_quantizer.quantize(mp_replace.qkv_copy(dst, qkv_data.cuda() if weight_quantizer.q_int8 else \
((transpose(qkv_data.cuda())).contiguous())))
else:
dst = weight_quantizer.quantize(mp_replace.copy(dst, qkv_data.cuda() if weight_quantizer.q_int8 else \
transpose(qkv_data.cuda())))
setattr(module, dst_name, dst)
if len(param_names) == 14:
qkv_w, qkv_b, attn_ow, attn_ob, \
mlp_intw, mlp_intb, mlp_ow, mlp_ob, \
inp_normw, inp_normb, attn_nw, attn_nb, _, split_qkv = param_names
elif len(param_names) < 14:
q_w, k_w, v_w, attn_ow, \
mlp_intw, mlp_intb, mlp_ow, mlp_ob, \
inp_normw, inp_normb, _, split_qkv = param_names
else:
q_w, q_b, k_w, k_b, v_w, v_b, attn_ow, attn_ob, \
mlp_intw, mlp_intb, mlp_ow, mlp_ob, \
inp_normw, inp_normb, attn_nw, attn_nb, _, split_qkv = param_names
maybe_copy(module, 'norm_w', prefix + inp_normw)
maybe_copy(module, 'norm_b', prefix + inp_normb)
if len(param_names) == 14:
maybe_copy(module.attention,
'attn_qkvw',
prefix + qkv_w,
qkv=True,
megatron_v2=megatron_v2,
split_qkv=split_qkv)
maybe_copy(module.attention,
'attn_qkvb',
prefix + qkv_b,
qkv=True,
megatron_v2=megatron_v2,
split_qkv=split_qkv)
elif len(param_names) < 14:
maybe_copy_qkv(module.attention,
'attn_qkvw',
[prefix + q_w,
prefix + k_w,
prefix + v_w],
split_qkv=split_qkv)
else:
maybe_copy_qkv(module.attention,
'attn_qkvw',
[prefix + q_w,
prefix + k_w,
prefix + v_w],
split_qkv=split_qkv)
maybe_copy_qkv(module.attention,
'attn_qkvb',
[prefix + q_b,
prefix + k_b,
prefix + v_b],
split_qkv=split_qkv)
maybe_copy(module.attention, 'attn_ow', prefix + attn_ow)
if len(param_names) >= 14:
maybe_copy(module.attention, 'attn_ob', prefix + attn_ob)
maybe_copy(module.mlp, 'attn_nw', prefix + attn_nw)
maybe_copy(module.mlp, 'attn_nb', prefix + attn_nb)
maybe_copy(module.mlp, 'inter_w', prefix + mlp_intw)
maybe_copy(module.mlp, 'inter_b', prefix + mlp_intb)
maybe_copy(module.mlp, 'output_w', prefix + mlp_ow)
maybe_copy(module.mlp, 'output_b', prefix + mlp_ob)
try:
import transformers
OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding
except:
OPTLearnedPositionalEmbedding = None
layer_policies = {
nn.Linear: load,
nn.Embedding: load,
nn.LayerNorm: load,
EmbeddingLayer: load,
LinearLayer: load,
Normalize: load,
transformer_inference.DeepSpeedTransformerInference: load_transformer_layer,
OPTLearnedPositionalEmbedding: load,
OPTEmbedding: load
}
all_ds_ids = {}
def load_module_recursive(module, prefix='', level=0):
for name, child in module.named_children():
if child.__class__ in layer_policies:
checking_key = prefix + name + '.'
if not any(checking_key in item for item in sd[0].keys()):
if hasattr(child, 'weight') and \
(hasattr(child.weight, 'ds_id') and \
child.weight.ds_id in all_ds_ids):
prefix1 = all_ds_ids[child.weight.ds_id]
if child.__class__ is nn.Linear:
child = LinearLayer(weight=all_ds_ids[child.weight.ds_id])
setattr(module, name, child)
continue
child_params = list(child.parameters())
if len(child_params) > 0 and (child_params[0].numel() == 0
or child_params[0].is_meta):
if child.weight.is_meta:
ds_shape = child.weight.shape
else:
ds_shape = child.weight.ds_shape
if child.__class__ is nn.LayerNorm:
child = Normalize(dim=ds_shape[-1],
dtype=child.weight.dtype,
eps=child.eps)
setattr(module, name, child)
elif child.__class__ is nn.Linear:
child = LinearLayer(weight_shape=child.weight.shape,
bias=child.bias)
setattr(module, name, child)
elif child.__class__ is OPTLearnedPositionalEmbedding:
child = OPTEmbedding(weight_shape=ds_shape)
setattr(module, name, child)
else:
ds_id = None
if hasattr(child.weight, 'ds_id'):
ds_id = child.weight.ds_id
child = EmbeddingLayer(weight_shape=ds_shape,
dtype=child.weight.dtype)
if ds_id is not None:
all_ds_ids[ds_id] = child.weight
setattr(module, name, child)
layer_policies[child.__class__](child, prefix + name + '.')
else:
load_module_recursive(
child,
prefix if (level == 0 and ckpt_type == 'pp') and param_names[-2] else \
prefix + name + '.',
level + 1)
load_module_recursive(r_module)
#XXX: hack to tie embedding w. lm_head for BLOOM, need to revist soon
embedding_weight = None
for n, p in r_module.named_parameters():
if "word_embeddings." in n or "embed_tokens." in n:
embedding_weight = p
if embedding_weight is not None:
r_module.lm_head.weight = embedding_weight
for sd_ in sd:
del sd_
sd = None
gc.collect()
class MoE(torch.nn.Module):
"""Initialize an MoE layer.
Arguments:
hidden_size (int): the hidden dimension of the model, importantly this is also the input and output dimension.
expert (torch.nn.Module): the torch module that defines the expert (e.g., MLP, torch.linear).
num_experts (int, optional): default=1, the total number of experts per layer.
ep_size (int, optional): default=1, number of ranks in the expert parallel world or group.
k (int, optional): default=1, top-k gating value, only supports k=1 or k=2.
capacity_factor (float, optional): default=1.0, the capacity of the expert at training time.
eval_capacity_factor (float, optional): default=1.0, the capacity of the expert at eval time.
min_capacity (int, optional): default=4, the minimum capacity per expert regardless of the capacity_factor.
use_residual (bool, optional): default=False, make this MoE layer a Residual MoE (https://arxiv.org/abs/2201.05596) layer.
noisy_gate_policy (str, optional): default=None, noisy gate policy, valid options are 'Jitter', 'RSample' or 'None'.
drop_tokens (bool, optional): default=True, whether to drop tokens - (setting to False is equivalent to infinite capacity).
use_rts (bool, optional): default=True, whether to use Random Token Selection.
use_tutel (bool, optional): default=False, whether to use Tutel optimizations (if installed).
enable_expert_tensor_parallelism (bool, optional): default=False, whether to use tensor parallelism for experts
"""
def __init__(self,
hidden_size,
expert,
num_experts=1,
ep_size=1,
k=1,
capacity_factor=1.,
eval_capacity_factor=1.,
min_capacity=4,
use_residual=False,
noisy_gate_policy: typing.Optional[str] = None,
drop_tokens: bool = True,
use_rts=True,
use_tutel: bool = False,
enable_expert_tensor_parallelism: bool = False):
super(MoE, self).__init__()
self.use_residual = use_residual
self.enable_expert_tensor_parallelism = enable_expert_tensor_parallelism
assert num_experts % ep_size == 0, f"Number of experts ({num_experts}) should be divisible by expert parallel size ({ep_size})"
self.ep_size = ep_size
self.expert_group_name = f"ep_size_{self.ep_size}"
self.num_experts = num_experts
self.num_local_experts = num_experts // self.ep_size
log_dist(
f'Creating MoE layer with num_experts: {num_experts} | num_local_experts: {self.num_local_experts} | expert_parallel_size: {self.ep_size}',
[0])
assert noisy_gate_policy is None or noisy_gate_policy in ['None', 'Jitter', 'RSample'], \
'Unsupported noisy_gate_policy: ' + noisy_gate_policy
experts = Experts(expert, self.num_local_experts, self.expert_group_name)
self.deepspeed_moe = MOELayer(TopKGate(hidden_size,
num_experts,
k,
capacity_factor,
eval_capacity_factor,
min_capacity,
noisy_gate_policy,
drop_tokens,
use_rts),
experts,
self.expert_group_name,
self.ep_size,
self.num_local_experts,
use_tutel=use_tutel)
if self.use_residual:
self.mlp = expert
# coefficient is used for weighted sum of the output of expert and mlp
self.coefficient = torch.nn.Linear(hidden_size, 2)
def set_deepspeed_parallelism(self):
self._create_process_groups()
def _create_process_groups(self):
# Create process group for a layer if needed
if self.expert_group_name not in groups._get_expert_parallel_group_dict():
print(
f"No existing process group found, creating a new group named: {self.expert_group_name}"
)
if (groups.mpu is None) or (not self.enable_expert_tensor_parallelism):
# Condition 1 - no groups.mpu means no tensor parallelism
# Condition 2 - disabling expert tensor parallelism on purpose
groups._create_expert_and_data_parallel(self.ep_size)
else:
# expert tensor parallelism is enabled
groups._create_expert_data_and_model_parallel(self.ep_size,
mpu=groups.mpu)
# Set the group handle for the MOELayer (deepspeed_moe) object
self.deepspeed_moe._set_ep_group(
groups._get_expert_parallel_group(self.expert_group_name))
def forward(self, hidden_states, used_token=None):
""" MoE forward
Arguments:
hidden_states (Tensor): input to the layer
used_token (Tensor, optional): default: None, mask only used tokens
Returns:
A tuple including output, gate loss, and expert count.
* output (Tensor): output of the model
* l_aux (Tensor): gate loss value
* exp_counts (int): expert count
"""
output = self.deepspeed_moe(hidden_states, used_token)
if self.use_residual:
# Residual MoE
output_mlp = self.mlp(hidden_states)
if type(output_mlp) is tuple:
output_mlp = output_mlp[0] # Ignore the bias term for now
coef = self.coefficient(hidden_states)
coef = torch.nn.functional.softmax(coef, dim=-1)
output = output * coef[..., 0:1] + output_mlp * coef[..., 1:]
return output, self.deepspeed_moe.l_aux, self.deepspeed_moe.exp_counts
The provided code snippet includes necessary dependencies for implementing the `replace_transformer_layer` function. Write a Python function `def replace_transformer_layer(orig_layer_impl, model, checkpoint_dict, config, model_config)` to solve the following problem:
Replace bert-style transformer layers with DeepSpeed's transformer layer Arguments: orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for, e.g., transformers.modeling_bert.BertLayer. model (torch.nn.Module): user's nn.module representing their model checkpoint_dict: Dictionary for checkpoint passed from the Inference Engine config: top-level DS Inference config defined in inference/config.py model_config: HuggingFace model config passed from the inference/engine.py Returns: Updated nn.module with replaced transformer layers
Here is the function:
def replace_transformer_layer(orig_layer_impl,
model,
checkpoint_dict,
config,
model_config):
""" Replace bert-style transformer layers with DeepSpeed's transformer layer
Arguments:
orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for,
e.g., transformers.modeling_bert.BertLayer.
model (torch.nn.Module): user's nn.module representing their model
checkpoint_dict: Dictionary for checkpoint passed from the Inference Engine
config: top-level DS Inference config defined in inference/config.py
model_config: HuggingFace model config passed from the inference/engine.py
Returns:
Updated nn.module with replaced transformer layers
"""
# defining globals as internally defined functions inherit these everywhere
fp16 = (config.dtype == torch.float16 or config.dtype == torch.int8)
quantize = (config.dtype == torch.int8)
# todo: Refactor later. In future, let's minimize the style used above and use config.** instead
linear_layer_setting = None
'''
linear_layer_setting (tuple of modules) [Optional]: shows which two classes are used for linear layers and embedding layers
'''
micro_batch_size = -1
seed = -1
local_rank = -1
mp_replace = ReplaceWithTensorSlicing(
mp_group=config.tensor_parallel.tp_group,
mp_size=config.tensor_parallel.tp_size) #, out_dim=0, in_dim=1)
def replace_with_policy(child,
policy_cls,
triangular_masking,
inference=False,
layer_id=0):
policy = policy_cls(child, inference=inference)
global selected_policy_g
if selected_policy_g is None:
selected_policy_g = policy
if not policy.cuda_graph_supported:
# policy says cuda graph is not supported raise an error if set
assert not config.enable_cuda_graph, "cuda graph is not supported with this model, please disable"
if inference:
hidden_size, num_attention_heads = policy.get_hidden_heads()
assert num_attention_heads % config.tensor_parallel.tp_size == 0,\
"To run the model parallel across the GPUs, the attention_heads require to be divisible by the world_size!" +\
"This is because the attention computation is partitioned evenly among the parallel GPUs."
from deepspeed.moe.layer import MoE
moe = False
if hasattr(child, 'mlp') and isinstance(child.mlp, MoE):
num_experts = child.mlp.num_experts
moe = True
attn_linear_layer, qkvw, qkvb, dense_w, dense_b, scale_attention, megatron_v2 = policy.attention()
global megatron_v2_g
megatron_v2_g = megatron_v2
if not moe or config.moe.type == 'standard':
mlp_linear_layer, _h4h_w, _h4h_b, _4hh_w, _4hh_b = policy.mlp()
else:
mlp_linear_layer, _h4h_w, _h4h_b, _4hh_w, _4hh_b, \
_res_h4h_w, _res_h4h_b, _res_4hh_w, _res_4hh_b, _res_coef = policy.mlp(config.moe.type)
attn_nw, attn_nb, input_nw, input_nb = policy.layerNorm()
if False:
if policy_cls is not HFBertLayerPolicy:
qkvw = qkvw.to(torch.int8)
dense_w = dense_w.to(torch.int8)
_h4h_w = [moe_w1.to(torch.int8)
for moe_w1 in _h4h_w] if moe else _h4h_w.to(torch.int8)
_4hh_w = [moe_w1.to(torch.int8)
for moe_w1 in _4hh_w] if moe else _4hh_w.to(torch.int8)
elif fp16:
qkvw = qkvw.half()
dense_w = dense_w.half()
_h4h_w = [moe_w1.half() for moe_w1 in _h4h_w] if moe else _h4h_w.half()
_4hh_w = [moe_w1.half() for moe_w1 in _4hh_w] if moe else _4hh_w.half()
if quantize or fp16:
qkvb = qkvb if qkvb is None else qkvb.half()
dense_b = dense_b if dense_b is None else dense_b.half()
_h4h_b = [moe_b1.half() for moe_b1 in _h4h_b] if moe else _h4h_b.half()
_4hh_b = [moe_b1.half() for moe_b1 in _4hh_b] if moe else _4hh_b.half()
attn_nw = attn_nw if attn_nw is None else attn_nw.half()
attn_nb = attn_nb if attn_nb is None else attn_nb.half()
input_nw = input_nw.half()
input_nb = input_nb.half()
if config.moe.enabled and config.moe.type == 'residual' and fp16:
_res_h4h_b = _res_h4h_b.half()
_res_4hh_b = _res_4hh_b.half()
_res_h4h_w = _res_h4h_w.half()
_res_4hh_w = _res_4hh_w.half()
_res_coef = _res_coef.half()
#expert_mp_replace = ReplaceWithTensorSlicing(mp_group=expert_mp_group)
quantizer = GroupQuantizer(q_int8=quantize)
if inference:
scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx if hasattr(
config,
'scale_attn_by_inverse_layer_idx') else False
if moe:
ep_world_size = dist.get_world_size()
local_ep_size = 1 if num_experts < ep_world_size else num_experts // ep_world_size
bigscience_bloom = policy_cls is BLOOMLayerPolicy
transformer_config = transformer_inference.DeepSpeedMoEInferenceConfig(
hidden_size=hidden_size,
heads=num_attention_heads,
layer_norm_eps=config.layer_norm_eps if hasattr(
config,
'layer_norm_eps') else 1e-12,
fp16=fp16,
pre_layer_norm=policy.pre_attn_norm,
mp_size=config.tensor_parallel.tp_size,
q_int8=quantize,
moe_experts=local_ep_size,
global_experts=num_experts,
mlp_type=config.moe.type,
scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx)
else:
rotary_dim = model_config.rotary_dim if hasattr(model_config, 'rotary_dim') else child.attention.rotary_ndims \
if hasattr(child, 'attention') and hasattr(child.attention,'rotary_ndims') else -1
bigscience_bloom = policy_cls is BLOOMLayerPolicy
transformer_config = transformer_inference.DeepSpeedInferenceConfig(
hidden_size=hidden_size,
heads=num_attention_heads,
layer_norm_eps=model_config.layer_norm_eps if hasattr(
model_config,
'layer_norm_eps') else
(model_config.layer_norm_epsilon if hasattr(
model_config,
'layer_norm_epsilon') else model_config.layernorm_epsilon
if hasattr(model_config,
'layernorm_epsilon') else 1.0e-12),
fp16=fp16,
pre_layer_norm=policy.pre_attn_norm,
mp_size=config.tensor_parallel.tp_size,
q_int8=quantize,
return_tuple=(config.return_tuple
or (policy_cls is HFBertLayerPolicy)),
triangular_masking=(policy_cls is not HFBertLayerPolicy),
local_attention=((model_config.attention_layers[layer_id] == "local")
if hasattr(model_config,
'attention_layers') else False),
window_size=(model_config.window_size if hasattr(
model_config,
'window_size') else 1),
rotary_dim=rotary_dim,
mlp_after_attn=(rotary_dim is None or rotary_dim < 0),
mlp_act_func_type=policy.mlp_act_func_type,
training_mp_size=config.training_mp_size,
bigscience_bloom=bigscience_bloom,
max_out_tokens=config.max_out_tokens,
scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx)
global transformer_config_g
transformer_config_g = transformer_config
if moe:
new_module = transformer_inference.DeepSpeedMoEInference(
transformer_config,
mp_group=config.tensor_parallel.tp_group,
ep_group=None
if config.moe.ep_group is None else config.moe.ep_group[num_experts],
expert_mp_group=None if config.moe.ep_mp_group is None else
config.moe.ep_mp_group[num_experts],
)
else:
new_module = transformer_inference.DeepSpeedTransformerInference(
transformer_config,
mp_group=config.tensor_parallel.tp_group,
)
new_module.config.scale_attention = scale_attention
# we want the weights in [input, output] shape
# linear layer is created with [input, output] shape
# transpose it here to reduce inference cost!
def transpose(data):
# temp move to cpu to avoid requiring extra GPU memory during the reshape
data = data.to('cpu').contiguous()
data.reshape(-1).copy_(data.transpose(-1, -2).contiguous().reshape(-1))
data = data.reshape(data.shape[-1], data.shape[-2])
data.to(torch.cuda.current_device())
return data
attn_block = new_module.attention
mpl_block = new_module.mlp
if attn_linear_layer:
if qkvw.numel() == 0 or qkvw.is_meta:
if qkvw.is_meta or qkvw.ds_tensor.numel(
) < attn_block.attn_qkvw.numel():
pass
else:
with GatheredParameters([qkvw,
dense_w,
qkvb,
dense_b],
modifier_rank=0):
qkvw = transpose(qkvw.data)
dense_w = transpose(dense_w.data)
qkvb = qkvb.data
dense_b = dense_b.data
else:
qkvw.data = transpose(qkvw.data)
dense_w.data = transpose(dense_w.data)
def _transpose(x):
attention_head_size = x.shape[-1] // transformer_config.heads
new_x_shape = x.size()[:-1] + (transformer_config.heads,
attention_head_size)
x_1 = x.view(*new_x_shape)
(q, k, v) = torch.split(x_1, (x_1.shape[-1] // 3), dim=(x_1.dim() - 1))
if len(q.shape) > 2:
return torch.cat((q.reshape(q.shape[0],
-1),
k.reshape(q.shape[0],
-1),
v.reshape(q.shape[0],
-1)),
dim=-1).reshape(x.shape)
else:
return torch.cat((q.reshape(-1),
k.reshape(-1),
v.reshape(-1)),
dim=-1).reshape(x.shape)
if megatron_v2:
new_module.config.rotate_half = True
new_module.config.rotate_every_two = False
# Note: this part needs to be added for BLOOM architecture
qkvw = torch.nn.parameter.Parameter(_transpose(qkvw).contiguous())
qkvb = torch.nn.parameter.Parameter(_transpose(qkvb).contiguous())
# NOTE: This part caused instability in the multi-GPU inference!
# TODO: This needs to be incorporated in the kernels.
#dense_b = dense_b if dense_b is None else dense_b * (
# transformer_config.training_mp_size / transformer_config.mp_size)
#_4hh_b = _4hh_b * (transformer_config.training_mp_size /
# transformer_config.mp_size)
if mlp_linear_layer:
if not moe and (_4hh_w.numel() == 0 or _4hh_w.is_meta):
if _4hh_w.is_meta or _4hh_w.ds_tensor.numel(
) < mpl_block.inter_w.numel():
pass
else:
with GatheredParameters([_h4h_w,
_4hh_w,
_4hh_b,
_h4h_b],
modifier_rank=0):
_h4h_w = transpose(_h4h_w.data)
_4hh_w = transpose(_4hh_w.data)
_h4h_b = _h4h_b.data
_4hh_b = _4hh_b.data
else:
_h4h_w = [transpose(moe_w1.data)
for moe_w1 in _h4h_w] if moe else transpose(_h4h_w.data)
_4hh_w = [transpose(moe_w1.data)
for moe_w1 in _4hh_w] if moe else transpose(_4hh_w.data)
if moe and config.moe.type == 'residual':
_res_h4h_w.data = transpose(_res_h4h_w.data)
_res_4hh_w.data = transpose(_res_4hh_w.data)
_res_coef.data = transpose(_res_coef.data)
if qkvw.is_meta or qkvw.numel() == 0 or qkvw.is_meta:
if qkvw.is_meta or qkvw.ds_tensor.numel() < attn_block.attn_qkvw.numel():
if qkvb is None:
attn_block.attn_qkvb = None
if dense_b is None:
attn_block.attn_ob = None
pass
else:
with GatheredParameters([
attn_block.attn_qkvw,
attn_block.attn_qkvb,
attn_block.attn_ow,
attn_block.attn_ob
],
modifier_rank=0):
attn_block.attn_qkvw = mp_replace.copy(
attn_block.attn_qkvw,
qkvw)
attn_block.attn_qkvb = mp_replace.copy(
attn_block.attn_qkvb,
qkvb)
attn_block.attn_ow = mp_replace.copy(attn_block.attn_ow, dense_w)
attn_block.attn_ob = mp_replace.copy(attn_block.attn_ob, dense_b)
else:
attn_block.attn_qkvw = quantizer.quantize(
mp_replace.copy(attn_block.attn_qkvw, qkvw) if bigscience_bloom else \
mp_replace.qkv_copy(attn_block.attn_qkvw, qkvw))
attn_block.attn_qkvb = \
mp_replace.copy(attn_block.attn_qkvb, qkvb) if bigscience_bloom else \
mp_replace.qkv_copy(attn_block.attn_qkvb, qkvb)
attn_block.attn_ow = quantizer.quantize(
mp_replace.copy(attn_block.attn_ow,
dense_w))
attn_block.attn_ob = mp_replace.copy(attn_block.attn_ob, dense_b)
if moe:
gpu_index = dist.get_rank()
gpu_index = 0
for ep_index in range(local_ep_size):
mpl_block[ep_index].inter_w.data = _h4h_w[
gpu_index * local_ep_size + ep_index].to(
torch.cuda.current_device())
mpl_block[ep_index].inter_b.data = _h4h_b[
gpu_index * local_ep_size + ep_index].to(
torch.cuda.current_device())
mpl_block[ep_index].output_w.data = _4hh_w[
gpu_index * local_ep_size + ep_index].to(
torch.cuda.current_device())
mpl_block[ep_index].output_b.data = _4hh_b[
gpu_index * local_ep_size + ep_index].to(
torch.cuda.current_device())
new_module.attn_nw.data = attn_nw.to(torch.cuda.current_device())
new_module.attn_nb.data = attn_nb.to(torch.cuda.current_device())
if config.moe.type == 'residual':
new_module.res_mlp.inter_w.data = _res_h4h_w.to(
torch.cuda.current_device())
new_module.res_mlp.inter_b.data = _res_h4h_b.to(
torch.cuda.current_device())
new_module.res_mlp.output_w.data = _res_4hh_w.to(
torch.cuda.current_device())
new_module.res_mlp.output_b.data = _res_4hh_b.to(
torch.cuda.current_device())
new_module.res_coef.data = _res_coef.to(torch.cuda.current_device())
else:
if _4hh_w.numel() == 0 or _4hh_w.is_meta:
if _4hh_w.is_meta or _4hh_w.ds_tensor.numel(
) < mpl_block.inter_w.numel():
pass
else:
with GatheredParameters([_h4h_w,
_4hh_w,
_4hh_w,
_4hh_b],
modifier_rank=0):
mpl_block.inter_w = mp_replace.copy(
mpl_block.inter_w,
_h4h_w)
mpl_block.inter_b = mp_replace.copy(
mpl_block.inter_b,
_h4h_b)
mpl_block.output_w = mp_replace.copy(
mpl_block.output_w,
_4hh_w)
mpl_block.output_b = mp_replace.copy(
mpl_block.output_b,
_4hh_b)
else:
mpl_block.inter_w = quantizer.quantize(
mp_replace.copy(mpl_block.inter_w,
_h4h_w))
mpl_block.inter_b = mp_replace.copy(mpl_block.inter_b, _h4h_b)
mpl_block.output_w = quantizer.quantize(
mp_replace.copy(mpl_block.output_w,
_4hh_w))
mpl_block.output_b = mp_replace.copy(mpl_block.output_b, _4hh_b)
if attn_nw is None:
new_module.mlp.attn_nw = attn_nw
new_module.mlp.attn_nb = attn_nb
else:
if attn_nw.is_meta or attn_nw.numel() == 0:
if attn_nw.is_meta or attn_nw.ds_tensor.numel(
) < new_module.mlp.attn_nw.numel():
pass
else:
with GatheredParameters([attn_nw, attn_nb], modifier_rank=0):
new_module.mlp.attn_nw.data.copy_(
attn_nw.to(torch.cuda.current_device()))
new_module.mlp.attn_nb.data.copy_(
attn_nb.to(torch.cuda.current_device()))
else:
new_module.mlp.attn_nw.data.copy_(
attn_nw.to(torch.cuda.current_device()))
new_module.mlp.attn_nb.data.copy_(
attn_nb.to(torch.cuda.current_device()))
if input_nw.is_meta or input_nw.numel() == 0:
if input_nw.is_meta or input_nw.ds_tensor.numel(
) < new_module.norm_w.numel():
pass
else:
with GatheredParameters([input_nw, input_nb], modifier_rank=0):
new_module.norm_w.data.copy_(
input_nw.to(torch.cuda.current_device()))
new_module.norm_b.data.copy_(
input_nb.to(torch.cuda.current_device()))
else:
new_module.norm_w.data.copy_(input_nw.to(torch.cuda.current_device()))
new_module.norm_b.data.copy_(input_nb.to(torch.cuda.current_device()))
else:
transformer_config = deepspeed.DeepSpeedTransformerConfig(
batch_size=micro_batch_size if micro_batch_size > 0 else 1,
hidden_size=config.hidden_size,
heads=config.num_attention_heads,
attn_dropout_ratio=config.attention_probs_dropout_prob,
hidden_dropout_ratio=config.hidden_dropout_prob,
num_hidden_layers=config.num_hidden_layers,
initializer_range=config.initializer_range,
layer_norm_eps=config.layer_norm_eps if hasattr(
config,
'layer_norm_eps') else 1e-12,
seed=seed,
fp16=fp16,
pre_layer_norm=policy.pre_attn_norm,
return_tuple=config.return_tuple,
local_rank=local_rank,
stochastic_mode=True,
normalize_invertible=True,
training=True)
new_module = deepspeed.DeepSpeedTransformerLayer(transformer_config)
new_module.attn_qkvw.data = qkvw
new_module.attn_qkvb.data = qkvb
new_module.attn_ow.data = dense_w
new_module.attn_ob.data = dense_b
new_module.attn_nw.data = attn_nw
new_module.attn_nb.data = attn_nb
new_module.norm_w.data = input_nw
new_module.norm_b.data = input_nb
new_module.inter_w.data = _h4h_w
new_module.inter_b.data = _h4h_b
new_module.output_w.data = _4hh_w
new_module.output_b.data = _4hh_b
return new_module
def replace_wo_policy(module, all_reduce_linears):
mp_size = config.tensor_parallel.tp_size
mp_group = config.tensor_parallel.tp_group
def _replace(child, name, conv_linear_layer):
mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)
z_inference = (len(list(child.parameters())) > 0) and (list(
child.parameters())[0].numel() == 0)
if z_inference:
weight_shape = child.weight.ds_shape
else:
weight_shape = child.weight.shape
if name in all_reduce_linears:
new_weight = torch.empty((
weight_shape[1] if conv_linear_layer else weight_shape[0],
(weight_shape[0] if conv_linear_layer else weight_shape[1]) //
mp_size,
),
device=child.weight.device,
dtype=child.weight.dtype)
if z_inference:
with deepspeed.zero.GatheredParameters(child.weight,
modifier_rank=0):
data = child.weight.data.to(new_weight.device)
if conv_linear_layer:
data = data.transpose(-1, -2).contiguous()
data = mp_replace.copy(new_weight, data)
child.weight.ds_tensor = torch.empty(1)
else:
if conv_linear_layer:
child.weight.data = child.weight.data.transpose(-1,
-2).contiguous()
data = mp_replace.copy(new_weight, child.weight.data)
new_bias = torch.empty((weight_shape[0]),
device=child.weight.device,
dtype=child.weight.dtype)
if z_inference:
with deepspeed.zero.GatheredParameters(child.bias, modifier_rank=0):
new_bias.data.copy_(child.bias.data)
elif child.bias is not None:
new_bias.data.copy_(child.bias.data)
return LinearAllreduce(data, child.bias if child.bias is None else \
torch.nn.parameter.Parameter(new_bias.to(torch.cuda.current_device())), mp_group)
else:
new_weight = torch.empty((
(weight_shape[1] if conv_linear_layer else weight_shape[0]) //
mp_size,
weight_shape[0] // mp_size if conv_linear_layer else weight_shape[1],
),
device=child.weight.device,
dtype=child.weight.dtype)
if z_inference:
with deepspeed.zero.GatheredParameters(child.weight,
modifier_rank=0):
data = child.weight.data.to(new_weight.device)
if conv_linear_layer:
data = data.transpose(-1, -2).contiguous()
data = mp_replace.copy(new_weight, data)
child.weight.ds_tensor = torch.empty(1)
else:
if conv_linear_layer:
child.weight.data = child.weight.data.transpose(-1,
-2).contiguous()
data = mp_replace.copy(new_weight, child.weight.data)
new_bias = torch.empty((weight_shape[0] // mp_size),
device=child.weight.device,
dtype=child.weight.dtype)
if z_inference:
with deepspeed.zero.GatheredParameters(child.bias, modifier_rank=0):
bias_data = None if child.bias is None else mp_replace.copy(
new_bias,
child.bias.data).to(torch.cuda.current_device())
else:
bias_data = None if child.bias is None else mp_replace.copy(
new_bias,
child.bias.data).to(torch.cuda.current_device())
return LinearLayer(weight=data.to(torch.cuda.current_device()),
bias=bias_data)
def _slice_embedding(child, name, conv_linear_layer):
mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)
new_weight = torch.empty((child.weight.shape[0],
child.weight.shape[1] // mp_size),
device=child.weight.device,
dtype=child.weight.dtype)
data = mp_replace.copy(new_weight,
child.weight.ds_tensor.data if hasattr(child.weight, 'ds_tensor') else \
child.weight.data)
new_embedding = nn.Embedding(child.weight.shape[0],
child.weight.shape[1] // mp_size)
new_embedding.weight.data.copy_(data)
return new_embedding
def update_mp_params(child):
if hasattr(child, 'n_heads'):
child.n_heads = child.n_heads // mp_size
if hasattr(child, 'inner_dim'):
child.inner_dim = child.inner_dim // mp_size
if hasattr(child, 'num_heads'):
child.num_heads = child.num_heads // mp_size
if hasattr(child, 'num_attention_heads'):
child.num_attention_heads = child.num_attention_heads // mp_size
if hasattr(child, 'all_head_size'):
child.all_head_size = child.all_head_size // mp_size
if hasattr(child, 'embed_dim'):
child.embed_dim = child.embed_dim // mp_size
if hasattr(child, 'hidden_size'):
child.hidden_size = child.hidden_size // mp_size
conv_linear_layer = False
if linear_layer_setting is not None:
linear_policies = {linear_layer_setting[0]: _replace}
if len(linear_layer_setting) == 2:
linear_policies.update({linear_layer_setting[1]: _slice_embedding})
else:
if orig_layer_impl is HFGPT2LayerPolicy._orig_layer_class:
try:
import transformers
conv_linear_layer = True
linear_policies = {transformers.model_utils.Conv1D: _replace}
except ImportError:
linear_policies = {nn.Linear: _replace}
else:
linear_policies = {nn.Linear: _replace, nn.Embedding: _slice_embedding}
def _replace_module(r_module, prev_name=''):
for name, child in r_module.named_children():
if child.__class__ in linear_policies:
setattr(
r_module,
name,
linear_policies[child.__class__](child,
prev_name + '.' + name,
conv_linear_layer))
else:
update_mp_params(child)
_replace_module(child, name)
return r_module
return _replace_module(module)
def replace_fn(child, _policy, layer_id=0):
training = False # todo: refactor this part to go in the config
if training:
# copy relevant state from child -> new module
new_module = replace_with_policy(child, _policy, config.triangular_masking)
else:
# copy relevant state from child -> new module
if config.replace_with_kernel_inject:
new_module = replace_with_policy(child,
_policy,
config.triangular_masking,
inference=True,
layer_id=layer_id)
else:
new_module = replace_wo_policy(child, _policy)
return new_module
replaced_module = replace_module(model=model,
orig_class=orig_layer_impl,
replace_fn=replace_fn,
_replace_policy=config.injection_policy_tuple)
quantizer = GroupQuantizer(q_int8=quantize)
world_size = dist.get_world_size() if dist.is_initialized() else 1
rank = dist.get_rank() if dist.is_initialized() else 0
if checkpoint_dict is not None:
start_time = time.time()
checkpoint = checkpoint_dict['checkpoints']
ckpt_list = checkpoint["tp"] if type(checkpoint) is dict else checkpoint
ckpt_type = checkpoint_dict.get('parallelization', 'pp')
ckpt_mp_size = checkpoint_dict.get('tp_size', len(ckpt_list))
ckpt_mp_size = checkpoint_dict.get('mp_size', ckpt_mp_size)
base_dir1 = checkpoint_dict.get('base_dir', config.base_dir)
if ckpt_type == 'pp' and type(checkpoint) is list:
pbar = tqdm.tqdm(total=len(checkpoint),
desc=f"Loading {len(checkpoint)} checkpoint shards")
for i in range(len(checkpoint)):
sd = [
torch.load(os.path.join(base_dir1,
checkpoint[i]),
map_location='cpu')
]
load_model_with_checkpoint(
replaced_module,
sd,
mp_replace,
ckpt_type,
quantizer,
param_names=selected_policy_g.get_param_names(),
transformer_config=transformer_config_g,
megatron_v2=megatron_v2_g)
pbar.update(1)
else:
import gc
num_checkpoints = len(ckpt_list) // ckpt_mp_size
tp_split_size = (world_size / ckpt_mp_size)
sd_offset = int(rank / tp_split_size)
sd_count = int((rank + max(1, tp_split_size)) / tp_split_size) - sd_offset
pbar = tqdm.tqdm(total=num_checkpoints,
desc=f"Loading {num_checkpoints} checkpoint shards")
for i in range(num_checkpoints):
pbar.update(1)
ckpt_index = i * ckpt_mp_size + sd_offset
ckpt_files = [
os.path.join(base_dir1,
ckpt_list[ckpt_index +
j]) if base_dir1 else ckpt_list[ckpt_index +
j]
for j in range(sd_count)
]
sds = [
torch.load(ckpt_file,
map_location='cpu') for ckpt_file in ckpt_files
]
load_model_with_checkpoint(
replaced_module,
sds,
mp_replace,
ckpt_type,
quantizer,
int(rank % tp_split_size),
param_names=selected_policy_g.get_param_names(),
transformer_config=transformer_config_g,
megatron_v2=megatron_v2_g)
sds = [None for _ in sds]
gc.collect()
if "non_tp" in checkpoint:
pbar = tqdm.tqdm(
total=len(checkpoint["non_tp"]),
desc=f"Loading {len(checkpoint['non_tp'])} checkpoint shards")
for i in range(len(checkpoint["non_tp"])):
pbar.update(1)
ckpt_file = os.path.join(base_dir1,
checkpoint["non_tp"][i]
) if base_dir1 else checkpoint["non_tp"][i]
sds = [torch.load(ckpt_file, map_location='cpu')]
load_model_with_checkpoint(
replaced_module,
sds,
mp_replace,
ckpt_type,
quantizer,
int(rank % tp_split_size),
param_names=selected_policy_g.get_param_names(),
transformer_config=transformer_config_g,
megatron_v2=megatron_v2_g)
sds = [None for _ in sds]
gc.collect()
print(f"checkpoint loading time at rank {rank}: {time.time()-start_time} sec")
if config.save_mp_checkpoint_path is not None:
from collections import OrderedDict
import json
num_partitions = 8
if checkpoint_dict is None:
ckpt_name = "ds_model"
try:
from transformers.models.bloom.modeling_bloom import BloomForCausalLM
if isinstance(model, BloomForCausalLM):
ckpt_name = "bloom"
except ImportError:
ckpt_name = "ds_model"
else:
ckpt_name = checkpoint_dict['type']
if dist.is_initialized():
dist.barrier()
transformer_name = get_transformer_name(replaced_module)
non_tp_ckpt_name = f'non-tp.pt'
ckpt_files = [non_tp_ckpt_name]
os.makedirs(config.save_mp_checkpoint_path, exist_ok=True)
if not dist.is_initialized() or dist.get_rank() == 0:
print("Saving tp-sharded checkpoints")
torch.save(
OrderedDict({
k: v
for k,
v in dict(replaced_module.state_dict()).items()
if transformer_name not in k
}),
f'{config.save_mp_checkpoint_path}/{non_tp_ckpt_name}')
ckpt_config = json.dumps({
'type':
ckpt_name,
'base_dir':
f'{config.save_mp_checkpoint_path}',
'checkpoints': {
"non_tp":
ckpt_files,
"tp": [
f'tp_{r:0>2d}_{m:0>2d}.pt' for m in range(num_partitions)
for r in range(world_size)
]
},
'version':
1.0,
'parallelization':
'tp',
'tp_size':
world_size,
'dtype':
'int8' if quantize else ('float16' if fp16 else 'float32')
})
with open(f"{config.save_mp_checkpoint_path}/ds_inference_config.json",
"w") as cfg:
cfg.write(ckpt_config)
rep_sd = replaced_module.state_dict()
for n, p in replaced_module.named_parameters():
if hasattr(p, 'scale'):
rep_sd[n] = [p, p.scale]
keys = list(rep_sd.keys())
partition_size = (len(keys) // num_partitions + 1)
for m in range(num_partitions):
torch.save(
OrderedDict({
k: [rep_sd[k],
rep_sd[k].scale] if hasattr(rep_sd[k],
'scale') else rep_sd[k]
for k in keys[m * partition_size:(m + 1) * partition_size]
if transformer_name in k
}),
f'{config.save_mp_checkpoint_path}/tp_{rank:0>2d}_{m:0>2d}.pt')
return replaced_module | Replace bert-style transformer layers with DeepSpeed's transformer layer Arguments: orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for, e.g., transformers.modeling_bert.BertLayer. model (torch.nn.Module): user's nn.module representing their model checkpoint_dict: Dictionary for checkpoint passed from the Inference Engine config: top-level DS Inference config defined in inference/config.py model_config: HuggingFace model config passed from the inference/engine.py Returns: Updated nn.module with replaced transformer layers |
10,378 | import os
import torch
import tqdm
import deepspeed
import deepspeed.ops.transformer as transformer_inference
from deepspeed.ops.transformer.inference.diffusers_attention import DeepSpeedDiffusersAttention
from deepspeed.ops.transformer.inference.diffusers_transformer_block import DeepSpeedDiffusersTransformerBlock
from deepspeed.ops.transformer.inference.diffusers_2d_transformer import Diffusers2DTransformerConfig
from .replace_policy import HFBertLayerPolicy, HFGPT2LayerPolicy, BLOOMLayerPolicy
from .replace_policy import replace_policies, generic_policies
from deepspeed import comm as dist
from torch import nn
from ..runtime.zero import GatheredParameters
from .layers import LinearAllreduce, LinearLayer
from .load_checkpoint import load_model_with_checkpoint
import time
def replace_module(model, orig_class, replace_fn, _replace_policy):
""" Scan the model for instances of ``orig_clas:`` to replace using ``replace_fn``.
Arguments:
model (torch.nn.Module): the model to augment
orig_class (torch.nn.Module): the module to search for
replace_fn (method): a method to convert instances of ``orig_class`` to the
desired type and return a new instance.
Returns:
A modified ``model``.
"""
policy = {}
if orig_class is not None:
policy.update({orig_class: (replace_fn, _replace_policy)})
else:
for plcy in replace_policies:
# instantiate a throw-away policy in order to populate the _orig_layer_class
_ = plcy(None)
if isinstance(plcy._orig_layer_class, list):
for orig_layer_class in plcy._orig_layer_class:
policy.update({orig_layer_class: (replace_fn, plcy)})
elif plcy._orig_layer_class is not None:
policy.update({plcy._orig_layer_class: (replace_fn, plcy)})
assert len(policy.items()) > 0,\
"No default policy found! Please specify your policy injection_policy (like {BertLayer:HFBEertLayerPolicy})." +\
"You can find some samples here: https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/replace_policy.py"
replaced_module, _ = _replace_module(model, policy)
return replaced_module
from ..pipe import PipelineModule
The provided code snippet includes necessary dependencies for implementing the `revert_transformer_layer` function. Write a Python function `def revert_transformer_layer(orig_layer_impl, model, config, preln=False)` to solve the following problem:
Revert DeepSpeed's transformer layer back to original bert-style transformer layer Arguments: orig_layer_impl (torch.nn.Module): the original transformer layer implementation that was replaced, e.g., transformers.modeling_bert.BertLayer. model (torch.nn.Module): user's nn.module representing their model config (dict): model config containing hidden size, attention heads, etc. Returns: Updated nn.module with original bert-style transformer layers
Here is the function:
def revert_transformer_layer(orig_layer_impl, model, config, preln=False):
""" Revert DeepSpeed's transformer layer back to original bert-style transformer layer
Arguments:
orig_layer_impl (torch.nn.Module): the original transformer layer implementation that was replaced,
e.g., transformers.modeling_bert.BertLayer.
model (torch.nn.Module): user's nn.module representing their model
config (dict): model config containing hidden size, attention heads, etc.
Returns:
Updated nn.module with original bert-style transformer layers
"""
def replace_fn(child, _replace_policy, layer_id):
#from turing.nvidia_modelingpreln import BertLayer
orig_module = orig_layer_impl(config)
# copy relevant state from child -> original module
qkvw = child.attn_qkvw.data
qkvb = child.attn_qkvb.data
qw, kw, vw = torch.chunk(qkvw, 3, axis=0)
qb, kb, vb = torch.chunk(qkvb, 3, axis=0)
orig_module.attention.self.query.weight.data = qw
orig_module.attention.self.query.bias.data = qb
orig_module.attention.self.key.weight.data = kw
orig_module.attention.self.key.bias.data = kb
orig_module.attention.self.value.weight.data = vw
orig_module.attention.self.value.bias.data = vb
orig_module.attention.output.dense.weight.data = child.attn_ow.data
orig_module.attention.output.dense.bias.data = child.attn_ob.data
attn_ln_w = child.attn_nw.data
attn_ln_b = child.attn_nb.data
if preln:
orig_module.PostAttentionLayerNorm.weight.data = attn_ln_w
orig_module.PostAttentionLayerNorm.bias.data = attn_ln_b
else:
orig_module.attention.output.LayerNorm.weight.data = attn_ln_w
orig_module.attention.output.LayerNorm.bias.data = attn_ln_b
inter_ff_w = child.inter_w.data
inter_ff_b = child.inter_b.data
if preln:
orig_module.intermediate.dense_act.weight.data = inter_ff_w
orig_module.intermediate.dense_act.bias.data = inter_ff_b
else:
orig_module.intermediate.dense.weight.data = inter_ff_w
orig_module.intermediate.dense.bias.data = inter_ff_b
orig_module.output.dense.weight.data = child.output_w.data
orig_module.output.dense.bias.data = child.output_b.data
transformer_ln_w = child.norm_w.data
transformer_ln_b = child.norm_b.data
if preln:
orig_module.PreAttentionLayerNorm.weight.data = transformer_ln_w
orig_module.PreAttentionLayerNorm.bias.data = transformer_ln_b
else:
orig_module.output.LayerNorm.weight.data = transformer_ln_w
orig_module.output.LayerNorm.bias.data = transformer_ln_b
return orig_module
return replace_module(model=model,
orig_class=deepspeed.DeepSpeedTransformerLayer,
replace_fn=replace_fn,
_replace_policy=None) | Revert DeepSpeed's transformer layer back to original bert-style transformer layer Arguments: orig_layer_impl (torch.nn.Module): the original transformer layer implementation that was replaced, e.g., transformers.modeling_bert.BertLayer. model (torch.nn.Module): user's nn.module representing their model config (dict): model config containing hidden size, attention heads, etc. Returns: Updated nn.module with original bert-style transformer layers |
10,379 | import os
import json
import numpy as np
import math
from packaging import version as pkg_version
from .config import ElasticityConfig, ElasticityConfigError, ElasticityError, \
ElasticityIncompatibleWorldSize
from .constants import ELASTICITY, ENABLED, ENABLED_DEFAULT, LATEST_ELASTICITY_VERSION, \
MINIMUM_DEEPSPEED_VERSION, DEEPSPEED_ELASTICITY_CONFIG
from ..git_version_info import version as __version__
from ..utils import logger
ELASTICITY = 'elasticity'
ENABLED = 'enabled'
ENABLED_DEFAULT = False
def elasticity_enabled(ds_config: dict):
if ELASTICITY not in ds_config:
return False
return ds_config[ELASTICITY].get(ENABLED, ENABLED_DEFAULT) | null |
10,380 | import os
import json
import numpy as np
import math
from packaging import version as pkg_version
from .config import ElasticityConfig, ElasticityConfigError, ElasticityError, \
ElasticityIncompatibleWorldSize
from .constants import ELASTICITY, ENABLED, ENABLED_DEFAULT, LATEST_ELASTICITY_VERSION, \
MINIMUM_DEEPSPEED_VERSION, DEEPSPEED_ELASTICITY_CONFIG
from ..git_version_info import version as __version__
from ..utils import logger
class ElasticityConfigError(ElasticityError):
"""
Elasticity configuration error
"""
class ElasticityConfig:
"""
Elastic config object, constructed from a param dictionary that only contains elastic
config parameters, example below:
If elasticity is enabled, user must specify (at least) max_train_batch_size
and micro_batch_sizes.
{
"enabled": true,
"max_train_batch_size": 2000,
"micro_batch_sizes": [2,4,6],
"min_gpus": 1,
"max_gpus" : 10000
"min_time": 20
"ignore_non_elastic_batch_info": false
"version": 0.1
}
"""
def __init__(self, param_dict):
self.enabled = param_dict.get(ENABLED, ENABLED_DEFAULT)
if self.enabled:
if MAX_ACCEPTABLE_BATCH_SIZE in param_dict:
self.max_acceptable_batch_size = param_dict[MAX_ACCEPTABLE_BATCH_SIZE]
else:
raise ElasticityConfigError(
f"Elasticity config missing {MAX_ACCEPTABLE_BATCH_SIZE}")
if MICRO_BATCHES in param_dict:
self.micro_batches = param_dict[MICRO_BATCHES]
else:
raise ElasticityConfigError(f"Elasticity config missing {MICRO_BATCHES}")
else:
self.max_acceptable_batch_size = param_dict.get(
MAX_ACCEPTABLE_BATCH_SIZE,
MAX_ACCEPTABLE_BATCH_SIZE_DEFAULT)
self.micro_batches = param_dict.get(MICRO_BATCHES, MICRO_BATCHES_DEFAULT)
if not isinstance(self.micro_batches, list):
raise ElasticityConfigError(
f"Elasticity expected value of {MICRO_BATCHES} to be a "
f"list of micro batches, instead is: {type(self.micro_batches)}, containing: {self.micro_batches}"
)
if not all(map(lambda m: isinstance(m, int), self.micro_batches)):
raise ElasticityConfigError(
f"Elasticity expected {MICRO_BATCHES} to only contain a list of integers, "
f"instead contains: f{self.micro_batches}")
if not all(map(lambda m: m > 0, self.micro_batches)):
raise ElasticityConfigError(
f"Elasticity expected {MICRO_BATCHES} to only contain positive integers, "
f"instead contains: f{self.micro_batches}")
self.min_gpus = param_dict.get(MIN_GPUS, MIN_GPUS_DEFAULT)
self.max_gpus = param_dict.get(MAX_GPUS, MAX_GPUS_DEFAULT)
if self.min_gpus < 1 or self.max_gpus < 1:
raise ElasticityConfigError(
"Elasticity min/max gpus must be > 0, "
f"given min_gpus: {self.min_gpus}, max_gpus: {self.max_gpus}")
if self.max_gpus < self.min_gpus:
raise ElasticityConfigError(
"Elasticity min_gpus cannot be greater than max_gpus, "
f"given min_gpus: {self.min_gpus}, max_gpus: {self.max_gpus}")
self.model_parallel_size = param_dict.get(MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT)
if self.model_parallel_size < 1:
raise ElasticityConfigError(
"Model-Parallel size cannot be less than 1, "
f"given model-parallel size: {self.model_parallel_size}")
self.num_gpus_per_node = param_dict.get(NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT)
if self.num_gpus_per_node < 1:
raise ElasticityConfigError(
"Number of GPUs per node cannot be less than 1, "
f"given number of GPUs per node: {self.num_gpus_per_node}")
self.min_time = param_dict.get(MIN_TIME, MIN_TIME_DEFAULT)
if self.min_time < 0:
raise ElasticityConfigError(
f"Elasticity min time needs to be >= 0: given {self.min_time}")
self.version = param_dict.get(VERSION, VERSION_DEFAULT)
self.prefer_larger_batch_size = param_dict.get(PREFER_LARGER_BATCH,
PREFER_LARGER_BATCH_DEFAULT)
self.ignore_non_elastic_batch_info = param_dict.get(
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT)
def repr(self):
return self.__dict__
def __repr__(self):
return json.dumps(self.__dict__, sort_keys=True, indent=4)
DEEPSPEED_ELASTICITY_CONFIG = "DEEPSPEED_ELASTICITY_CONFIG"
The provided code snippet includes necessary dependencies for implementing the `ensure_immutable_elastic_config` function. Write a Python function `def ensure_immutable_elastic_config(runtime_elastic_config_dict: dict)` to solve the following problem:
Ensure the resource scheduler saw the same elastic config we are using at runtime
Here is the function:
def ensure_immutable_elastic_config(runtime_elastic_config_dict: dict):
"""
Ensure the resource scheduler saw the same elastic config we are using at runtime
"""
if DEEPSPEED_ELASTICITY_CONFIG in os.environ:
scheduler_elastic_config_dict = json.loads(
os.environ[DEEPSPEED_ELASTICITY_CONFIG])
scheduler_elastic_config = ElasticityConfig(scheduler_elastic_config_dict)
runtime_elastic_config = ElasticityConfig(runtime_elastic_config_dict)
err_str = "Elastic config '{}={}' seen by resource scheduler does not match config passed to runtime {}={}"
if runtime_elastic_config.max_acceptable_batch_size != scheduler_elastic_config.max_acceptable_batch_size:
raise ElasticityConfigError(
err_str.format('max_acceptable_batch_size',
scheduler_elastic_config.max_acceptable_batch_size,
'max_acceptable_batch_size',
runtime_elastic_config.max_acceptable_batch_size))
if runtime_elastic_config.micro_batches != scheduler_elastic_config.micro_batches:
raise ElasticityConfigError(
err_str.format('micro_batches',
scheduler_elastic_config.micro_batches,
'micro_batches',
runtime_elastic_config.micro_batches))
if runtime_elastic_config.version != scheduler_elastic_config.version:
raise ElasticityConfigError(
err_str.format('version',
scheduler_elastic_config.version,
'version',
runtime_elastic_config.version))
else:
logger.warning("Unable to find DEEPSPEED_ELASTICITY_CONFIG environment variable, cannot " \
"guarantee resource scheduler will scale this job using compatible GPU counts.") | Ensure the resource scheduler saw the same elastic config we are using at runtime |
10,381 | import os
import json
import numpy as np
import math
from packaging import version as pkg_version
from .config import ElasticityConfig, ElasticityConfigError, ElasticityError, \
ElasticityIncompatibleWorldSize
from .constants import ELASTICITY, ENABLED, ENABLED_DEFAULT, LATEST_ELASTICITY_VERSION, \
MINIMUM_DEEPSPEED_VERSION, DEEPSPEED_ELASTICITY_CONFIG
from ..git_version_info import version as __version__
from ..utils import logger
def _get_compatible_gpus_v01(micro_batches,
max_acceptable_batch_size,
min_gpus=None,
max_gpus=None,
prefer_larger=True):
'''We use two heuristics to compute the batch size
1. We use the Lowest Common Multiple of the micro-batches
as the base batch size and scale it by a HCN such that the result is
the largest batch size less than the max_acceptable batch size
2. We use each of the micro batches as a base and scale it
by a HCN such that the result is the largest batch size less than the
max_acceptable batch size.
We then use brute force to count the number of compatible GPU count for
each of the aforementioned cases, and return the batch size with the most number of
compatible GPU counts in the min-max GPU range if provided, other wise
we return the batch size with the most number of total compatible GPU counts.
Returns:
final_batch_size
valid_gpus
'''
min_gpus = min_gpus or 1
max_gpus = max_gpus or max_acceptable_batch_size // min(micro_batches)
if not all(mb <= max_acceptable_batch_size for mb in micro_batches):
raise ValueError(f"All micro batches must be less than \
or equal to max_acceptable_batch_size: {max_acceptable_batch_size}")
lcm = np.lcm.reduce(micro_batches)
base_list = []
base_list.extend(micro_batches)
base_list.append(lcm)
candidate_batch_sizes = get_candidate_batch_sizes(base_list,
max_acceptable_batch_size)
final_batch_size, valid_gpus = get_best_candidates(
candidate_batch_sizes,
micro_batches,
min_gpus,
max_gpus,
prefer_larger)
return final_batch_size, valid_gpus
def _get_compatible_gpus_v02(micro_batches,
max_acceptable_batch_size,
current_num_gpus,
min_gpus=None,
max_gpus=None,
prefer_larger=True,
num_gpus_per_node=1,
model_parallel_size=1):
'''
Returns:
final_batch_size
valid_gpus
micro-batch size
'''
if num_gpus_per_node % model_parallel_size != 0:
raise ElasticityError(
f"In Elasticity v0.2, number of GPUs per node:" \
f"{num_gpus_per_node} should be divisible by " \
f"model parallel size {model_parallel_size}")
def get_microbatch(final_batch_size):
candidate_microbatch = None
for micro_batch in micro_batches:
if final_batch_size // current_num_gpus % micro_batch == 0:
if candidate_microbatch == None:
candidate_microbatch = micro_batch
if prefer_larger and candidate_microbatch < micro_batch:
candidate_microbatch = micro_batch
return candidate_microbatch
dp_size_per_node = num_gpus_per_node // model_parallel_size
final_batch_size, valid_world_size = _get_compatible_gpus_v01(micro_batches,
int(max_acceptable_batch_size/dp_size_per_node),
int(min_gpus/num_gpus_per_node),
int(max_gpus/num_gpus_per_node), # Passing number of max nodes as Elasticity v2 works at node level
prefer_larger=prefer_larger)
final_batch_size = int(final_batch_size) * dp_size_per_node
valid_dp_world_size = [i * dp_size_per_node for i in valid_world_size]
if current_num_gpus // model_parallel_size in valid_dp_world_size:
candidate_microbatch = get_microbatch(final_batch_size)
return final_batch_size, valid_dp_world_size, candidate_microbatch
current_dp_size = (current_num_gpus / num_gpus_per_node) * dp_size_per_node
candidate_batch_sizes = []
for micro_batch in micro_batches:
min_batch_size = micro_batch * current_dp_size
factor = math.floor(max_acceptable_batch_size / float(min_batch_size))
candidate_batch_sizes.append(factor * min_batch_size)
used_microbatch = None
if prefer_larger:
candidate_batch_size = max(candidate_batch_sizes)
else:
candidate_batch_size = min(candidate_batch_sizes)
candidate_microbatch = get_microbatch(candidate_batch_size)
return candidate_batch_size, [int(current_dp_size)], candidate_microbatch
def _compatible_ds_version_check(target_deepspeed_version: str):
min_version = pkg_version.parse(MINIMUM_DEEPSPEED_VERSION)
target_version = pkg_version.parse(target_deepspeed_version)
err_str = f"Target deepspeed version of {target_deepspeed_version} is not compatible " \
f"with minimum version {MINIMUM_DEEPSPEED_VERSION} supporting elasticity."
if target_version < min_version:
raise ElasticityError(err_str)
return True
class ElasticityError(Exception):
"""
Base exception for all elasticity related errors
"""
class ElasticityConfigError(ElasticityError):
"""
Elasticity configuration error
"""
class ElasticityIncompatibleWorldSize(ElasticityError):
"""
Attempting to run a world size that is incompatible with a given elastic config
"""
class ElasticityConfig:
"""
Elastic config object, constructed from a param dictionary that only contains elastic
config parameters, example below:
If elasticity is enabled, user must specify (at least) max_train_batch_size
and micro_batch_sizes.
{
"enabled": true,
"max_train_batch_size": 2000,
"micro_batch_sizes": [2,4,6],
"min_gpus": 1,
"max_gpus" : 10000
"min_time": 20
"ignore_non_elastic_batch_info": false
"version": 0.1
}
"""
def __init__(self, param_dict):
self.enabled = param_dict.get(ENABLED, ENABLED_DEFAULT)
if self.enabled:
if MAX_ACCEPTABLE_BATCH_SIZE in param_dict:
self.max_acceptable_batch_size = param_dict[MAX_ACCEPTABLE_BATCH_SIZE]
else:
raise ElasticityConfigError(
f"Elasticity config missing {MAX_ACCEPTABLE_BATCH_SIZE}")
if MICRO_BATCHES in param_dict:
self.micro_batches = param_dict[MICRO_BATCHES]
else:
raise ElasticityConfigError(f"Elasticity config missing {MICRO_BATCHES}")
else:
self.max_acceptable_batch_size = param_dict.get(
MAX_ACCEPTABLE_BATCH_SIZE,
MAX_ACCEPTABLE_BATCH_SIZE_DEFAULT)
self.micro_batches = param_dict.get(MICRO_BATCHES, MICRO_BATCHES_DEFAULT)
if not isinstance(self.micro_batches, list):
raise ElasticityConfigError(
f"Elasticity expected value of {MICRO_BATCHES} to be a "
f"list of micro batches, instead is: {type(self.micro_batches)}, containing: {self.micro_batches}"
)
if not all(map(lambda m: isinstance(m, int), self.micro_batches)):
raise ElasticityConfigError(
f"Elasticity expected {MICRO_BATCHES} to only contain a list of integers, "
f"instead contains: f{self.micro_batches}")
if not all(map(lambda m: m > 0, self.micro_batches)):
raise ElasticityConfigError(
f"Elasticity expected {MICRO_BATCHES} to only contain positive integers, "
f"instead contains: f{self.micro_batches}")
self.min_gpus = param_dict.get(MIN_GPUS, MIN_GPUS_DEFAULT)
self.max_gpus = param_dict.get(MAX_GPUS, MAX_GPUS_DEFAULT)
if self.min_gpus < 1 or self.max_gpus < 1:
raise ElasticityConfigError(
"Elasticity min/max gpus must be > 0, "
f"given min_gpus: {self.min_gpus}, max_gpus: {self.max_gpus}")
if self.max_gpus < self.min_gpus:
raise ElasticityConfigError(
"Elasticity min_gpus cannot be greater than max_gpus, "
f"given min_gpus: {self.min_gpus}, max_gpus: {self.max_gpus}")
self.model_parallel_size = param_dict.get(MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT)
if self.model_parallel_size < 1:
raise ElasticityConfigError(
"Model-Parallel size cannot be less than 1, "
f"given model-parallel size: {self.model_parallel_size}")
self.num_gpus_per_node = param_dict.get(NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT)
if self.num_gpus_per_node < 1:
raise ElasticityConfigError(
"Number of GPUs per node cannot be less than 1, "
f"given number of GPUs per node: {self.num_gpus_per_node}")
self.min_time = param_dict.get(MIN_TIME, MIN_TIME_DEFAULT)
if self.min_time < 0:
raise ElasticityConfigError(
f"Elasticity min time needs to be >= 0: given {self.min_time}")
self.version = param_dict.get(VERSION, VERSION_DEFAULT)
self.prefer_larger_batch_size = param_dict.get(PREFER_LARGER_BATCH,
PREFER_LARGER_BATCH_DEFAULT)
self.ignore_non_elastic_batch_info = param_dict.get(
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT)
def repr(self):
return self.__dict__
def __repr__(self):
return json.dumps(self.__dict__, sort_keys=True, indent=4)
ELASTICITY = 'elasticity'
LATEST_ELASTICITY_VERSION = 0.2
ENABLED = 'enabled'
ENABLED_DEFAULT = False
The provided code snippet includes necessary dependencies for implementing the `compute_elastic_config` function. Write a Python function `def compute_elastic_config(ds_config: dict, target_deepspeed_version: str, world_size=0, return_microbatch=False)` to solve the following problem:
Core deepspeed elasticity API. Given an elastic config (similar to the example below) DeepSpeed will compute a total train batch size corresponding valid GPU count list that provides a high level of elasticity. Elasticity in this case means we are safe to scale the training job up/down across the GPU count list *without* any negative impacts on training convergence. This is achievable primarily due to DeepSpeed's gradient accumulation feature which allows us to decompose a global training batch size into: micro-batch-size * gradient-accumulation-steps * world-size. "elasticity": { "enabled": true, "max_train_batch_size": 2000, "micro_batch_sizes": [2,4,6], "min_gpus": 1, "max_gpus" : 10000 "min_time": 20 "version": 0.1 } Intended to be called both by scheduling infrastructure and deepspeed runtime. For the same `ds_config` we should return deterministic results. Args: ds_config (dict): DeepSpeed config dictionary/json target_deepspeed_version (str): When called from scheduling infrastructure we want to ensure that the target deepspeed version is compatible with the elasticity version used in the backend. world_size (int, optional): Intended/current DP world size, will do some sanity checks to ensure world size is actually valid with the config. return_microbatch (bool, optional): whether to return micro batch size or not. Raises: ElasticityConfigError: Missing required elasticity config or elasticity disabled ElasticityError: If target deepspeed version is not compatible with current version Returns: final_batch_size (int): total batch size used for training valid_gpus (list(int)): list of valid GPU counts with this config micro_batch_size (int, optional): if world_size is provided will return specific micro batch size
Here is the function:
def compute_elastic_config(ds_config: dict,
target_deepspeed_version: str,
world_size=0,
return_microbatch=False):
"""Core deepspeed elasticity API. Given an elastic config (similar to the example below)
DeepSpeed will compute a total train batch size corresponding valid GPU count list that
provides a high level of elasticity. Elasticity in this case means we are safe to scale
the training job up/down across the GPU count list *without* any negative impacts on
training convergence. This is achievable primarily due to DeepSpeed's gradient accumulation
feature which allows us to decompose a global training batch size into:
micro-batch-size * gradient-accumulation-steps * world-size.
"elasticity": {
"enabled": true,
"max_train_batch_size": 2000,
"micro_batch_sizes": [2,4,6],
"min_gpus": 1,
"max_gpus" : 10000
"min_time": 20
"version": 0.1
}
Intended to be called both by scheduling infrastructure and deepspeed runtime.
For the same `ds_config` we should return deterministic results.
Args:
ds_config (dict): DeepSpeed config dictionary/json
target_deepspeed_version (str): When called from scheduling
infrastructure we want to ensure that the target deepspeed version is
compatible with the elasticity version used in the backend.
world_size (int, optional): Intended/current DP world size, will do some sanity
checks to ensure world size is actually valid with the config.
return_microbatch (bool, optional): whether to return micro batch size or not.
Raises:
ElasticityConfigError: Missing required elasticity config or elasticity disabled
ElasticityError: If target deepspeed version is not compatible with current version
Returns:
final_batch_size (int): total batch size used for training
valid_gpus (list(int)): list of valid GPU counts with this config
micro_batch_size (int, optional): if world_size is provided will return
specific micro batch size
"""
if not isinstance(ds_config, dict):
raise ValueError("Expected ds_config to be a dictionary but received " \
f"a {type(ds_config)}, containing: {ds_config}")
if ELASTICITY not in ds_config:
raise ElasticityConfigError(f"'{ELASTICITY}' is missing from config json," \
" please add it if running an elastic training job.")
elastic_config_dict = ds_config[ELASTICITY]
if not elastic_config_dict.get(ENABLED, ENABLED_DEFAULT):
raise ElasticityConfigError("Elasticity is disabled, please enable it " \
"('enabled':true) if running an elastic training job.")
elastic_config = ElasticityConfig(elastic_config_dict)
model_parallel_size = elastic_config.model_parallel_size
num_gpus_per_node = elastic_config.num_gpus_per_node
if model_parallel_size > 1 and float(elastic_config.version) != 0.2:
raise ElasticityConfigError(f"Elasticity V{elastic_config.version} " \
f"does not support model-parallel training. Given model-parallel size: " \
f"{model_parallel_size}")
if float(elastic_config.version) > LATEST_ELASTICITY_VERSION:
raise ElasticityConfigError("Attempting to run elasticity version " \
f"{elastic_config.version} but runtime only supports up " \
f"to {LATEST_ELASTICITY_VERSION}")
# Ensure target deepspeed version works with intended elasticity version
if not _compatible_ds_version_check(target_deepspeed_version):
raise ElasticityError("Unable to run elasticity on target deepspeed version of" \
f" {target_deepspeed_version}, currently {__version__}")
if float(elastic_config.version) == 0.1:
final_batch_size, valid_gpus = _get_compatible_gpus_v01(
micro_batches=elastic_config.micro_batches,
max_acceptable_batch_size=elastic_config.max_acceptable_batch_size,
min_gpus=elastic_config.min_gpus,
max_gpus=elastic_config.max_gpus,
prefer_larger=elastic_config.prefer_larger_batch_size)
# ensure batch size is int dtype
final_batch_size = int(final_batch_size)
elif float(elastic_config.version) == 0.2:
if world_size != 0:
current_num_gpus = world_size
else:
if "WORLD_SIZE" in os.environ and \
os.getenv('WORLD_SIZE').isnumeric():
current_num_gpus = int(os.getenv('WORLD_SIZE'))
else:
WORLD_SIZE = os.getenv('WORLD_SIZE')
raise ElasticityConfigError(
'Elasticity V 0.2 needs WORLD_SIZE '\
'to compute valid batch size. '\
'Either give it as argument to function compute_elastic_config '\
'or set it as an environment variable. '\
f'Value of WORLD_SIZE as environment variable is {WORLD_SIZE}')
final_batch_size, valid_gpus, candidate_microbatch_size = _get_compatible_gpus_v02(
micro_batches=elastic_config.micro_batches,
max_acceptable_batch_size=elastic_config.max_acceptable_batch_size,
current_num_gpus=current_num_gpus,
min_gpus=elastic_config.min_gpus,
max_gpus=elastic_config.max_gpus,
prefer_larger=elastic_config.prefer_larger_batch_size,
num_gpus_per_node=num_gpus_per_node,
model_parallel_size=model_parallel_size)
# ensure batch size is int dtype
final_batch_size = int(final_batch_size)
else:
raise NotImplementedError(
f"Unable to find elastic logic for version: {elastic_config.version}")
logger.info(f"Valid World Size (GPUs / Model Parallel Size): {valid_gpus}")
if world_size > 0:
if world_size not in valid_gpus:
raise ElasticityIncompatibleWorldSize(f"World size ({world_size}) is not valid " \
f"with the current list of valid GPU counts: {valid_gpus}")
# Pick largest valid micro batch size
micro_batch_size = None
for mbsz in sorted(list(set(elastic_config.micro_batches)), reverse=True):
if final_batch_size // world_size % mbsz == 0:
micro_batch_size = mbsz
break
assert micro_batch_size is not None, "Unable to find divisible micro batch size" \
f" world_size={world_size}, final_batch_size={final_batch_size}, and " \
f" micro_batches={elastic_config.micro_batches}."
return final_batch_size, valid_gpus, micro_batch_size
if return_microbatch:
# Pick a valid micro batch size
if float(elastic_config.version) == 0.2:
return final_batch_size, valid_gpus, candidate_microbatch_size
else:
micro_batch_size = None
for mbsz in sorted(list(set(elastic_config.micro_batches)), reverse=True):
if final_batch_size // world_size % mbsz == 0:
micro_batch_size = mbsz
break
assert micro_batch_size is not None, "Unable to find divisible micro batch size" \
f" world_size={world_size}, final_batch_size={final_batch_size}, and " \
f" micro_batches={elastic_config.micro_batches}."
return final_batch_size, valid_gpus, micro_batch_size
return final_batch_size, valid_gpus | Core deepspeed elasticity API. Given an elastic config (similar to the example below) DeepSpeed will compute a total train batch size corresponding valid GPU count list that provides a high level of elasticity. Elasticity in this case means we are safe to scale the training job up/down across the GPU count list *without* any negative impacts on training convergence. This is achievable primarily due to DeepSpeed's gradient accumulation feature which allows us to decompose a global training batch size into: micro-batch-size * gradient-accumulation-steps * world-size. "elasticity": { "enabled": true, "max_train_batch_size": 2000, "micro_batch_sizes": [2,4,6], "min_gpus": 1, "max_gpus" : 10000 "min_time": 20 "version": 0.1 } Intended to be called both by scheduling infrastructure and deepspeed runtime. For the same `ds_config` we should return deterministic results. Args: ds_config (dict): DeepSpeed config dictionary/json target_deepspeed_version (str): When called from scheduling infrastructure we want to ensure that the target deepspeed version is compatible with the elasticity version used in the backend. world_size (int, optional): Intended/current DP world size, will do some sanity checks to ensure world size is actually valid with the config. return_microbatch (bool, optional): whether to return micro batch size or not. Raises: ElasticityConfigError: Missing required elasticity config or elasticity disabled ElasticityError: If target deepspeed version is not compatible with current version Returns: final_batch_size (int): total batch size used for training valid_gpus (list(int)): list of valid GPU counts with this config micro_batch_size (int, optional): if world_size is provided will return specific micro batch size |
10,382 | import torch
The provided code snippet includes necessary dependencies for implementing the `is_torch_elastic_compatible` function. Write a Python function `def is_torch_elastic_compatible()` to solve the following problem:
Helper to lookup torch version. Elastic training is introduced in 1.11.x
Here is the function:
def is_torch_elastic_compatible():
'''
Helper to lookup torch version. Elastic training is
introduced in 1.11.x
'''
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 1 and TORCH_MINOR >= 11:
return True
else:
return False | Helper to lookup torch version. Elastic training is introduced in 1.11.x |
10,383 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
def _dropout_flops_compute(input, p=0.5, training=True, inplace=False):
return 0, 0 | null |
10,384 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
def _linear_flops_compute(input, weight, bias=None):
out_features = weight.shape[0]
macs = input.numel() * out_features
return 2 * macs, macs
def _relu_flops_compute(input, inplace=False):
return input.numel(), 0
def _prelu_flops_compute(input: Tensor, weight: Tensor):
return input.numel(), 0
def _elu_flops_compute(input: Tensor, alpha: float = 1.0, inplace: bool = False):
return input.numel(), 0
def _leaky_relu_flops_compute(input: Tensor,
negative_slope: float = 0.01,
inplace: bool = False):
return input.numel(), 0
def _relu6_flops_compute(input: Tensor, inplace: bool = False):
return input.numel(), 0
def _silu_flops_compute(input: Tensor, inplace: bool = False):
return input.numel(), 0
def _gelu_flops_compute(input):
return input.numel(), 0
def _pool_flops_compute(input,
kernel_size,
stride=None,
padding=0,
dilation=None,
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
return_indices=None):
return input.numel(), 0
def _conv_flops_compute(input,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
groups=1):
assert weight.shape[1] * groups == input.shape[1]
batch_size = input.shape[0]
in_channels = input.shape[1]
out_channels = weight.shape[0]
kernel_dims = list(weight.shape[2:])
input_dims = list(input.shape[2:])
length = len(input_dims)
paddings = padding if type(padding) is tuple else (padding, ) * length
strides = stride if type(stride) is tuple else (stride, ) * length
dilations = dilation if type(dilation) is tuple else (dilation, ) * length
output_dims = []
for idx, input_dim in enumerate(input_dims):
output_dim = (input_dim + 2 * paddings[idx] -
(dilations[idx] * (kernel_dims[idx] - 1) + 1)) // strides[idx] + 1
output_dims.append(output_dim)
filters_per_channel = out_channels // groups
conv_per_position_macs = int(_prod(kernel_dims)) * in_channels * filters_per_channel
active_elements_count = batch_size * int(_prod(output_dims))
overall_conv_macs = conv_per_position_macs * active_elements_count
overall_conv_flops = 2 * overall_conv_macs
bias_flops = 0
if bias is not None:
bias_flops = out_channels * active_elements_count
return int(overall_conv_flops + bias_flops), int(overall_conv_macs)
def _conv_trans_flops_compute(
input,
weight,
bias=None,
stride=1,
padding=0,
output_padding=0,
groups=1,
dilation=1,
):
batch_size = input.shape[0]
in_channels = input.shape[1]
out_channels = weight.shape[0]
kernel_dims = list(weight.shape[2:])
input_dims = list(input.shape[2:])
length = len(input_dims)
paddings = padding if type(padding) is tuple else (padding, ) * length
strides = stride if type(stride) is tuple else (stride, ) * length
dilations = dilation if type(dilation) is tuple else (dilation, ) * length
output_dims = []
for idx, input_dim in enumerate(input_dims):
output_dim = (input_dim + 2 * paddings[idx] -
(dilations[idx] * (kernel_dims[idx] - 1) + 1)) // strides[idx] + 1
output_dims.append(output_dim)
paddings = padding if type(padding) is tuple else (padding, padding)
strides = stride if type(stride) is tuple else (stride, stride)
dilations = dilation if type(dilation) is tuple else (dilation, dilation)
filters_per_channel = out_channels // groups
conv_per_position_macs = int(_prod(kernel_dims)) * in_channels * filters_per_channel
active_elements_count = batch_size * int(_prod(input_dims))
overall_conv_macs = conv_per_position_macs * active_elements_count
overall_conv_flops = 2 * overall_conv_macs
bias_flops = 0
if bias is not None:
bias_flops = out_channels * batch_size * int(_prod(output_dims))
return int(overall_conv_flops + bias_flops), int(overall_conv_macs)
def _batch_norm_flops_compute(
input,
running_mean,
running_var,
weight=None,
bias=None,
training=False,
momentum=0.1,
eps=1e-05,
):
has_affine = weight is not None
if training:
# estimation
return input.numel() * (5 if has_affine else 4), 0
flops = input.numel() * (2 if has_affine else 1)
return flops, 0
def _layer_norm_flops_compute(
input: Tensor,
normalized_shape: List[int],
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
eps: float = 1e-5,
):
has_affine = weight is not None
# estimation
return input.numel() * (5 if has_affine else 4), 0
def _group_norm_flops_compute(input: Tensor,
num_groups: int,
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
eps: float = 1e-5):
has_affine = weight is not None
# estimation
return input.numel() * (5 if has_affine else 4), 0
def _instance_norm_flops_compute(
input: Tensor,
running_mean: Optional[Tensor] = None,
running_var: Optional[Tensor] = None,
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
use_input_stats: bool = True,
momentum: float = 0.1,
eps: float = 1e-5,
):
has_affine = weight is not None
# estimation
return input.numel() * (5 if has_affine else 4), 0
def _upsample_flops_compute(input,
size=None,
scale_factor=None,
mode="nearest",
align_corners=None):
if size is not None:
if isinstance(size, tuple):
return int(_prod(size)), 0
else:
return int(size), 0
assert scale_factor is not None, "either size or scale_factor should be defined"
flops = input.numel()
if isinstance(scale_factor, tuple) and len(scale_factor) == len(input):
flops * int(_prod(scale_factor))
else:
flops * scale_factor**len(input)
return flops, 0
def _softmax_flops_compute(input, dim=None, _stacklevel=3, dtype=None):
return input.numel(), 0
def _embedding_flops_compute(
input,
weight,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
):
return 0, 0
def wrapFunc(func, funcFlopCompute):
oldFunc = func
name = func.__name__
old_functions[name] = oldFunc
def newFunc(*args, **kwds):
flops, macs = funcFlopCompute(*args, **kwds)
if module_flop_count:
module_flop_count[-1].append((name, flops))
if module_mac_count and macs:
module_mac_count[-1].append((name, macs))
return oldFunc(*args, **kwds)
newFunc.__name__ = func.__name__
return newFunc
def _patch_functionals():
# FC
F.linear = wrapFunc(F.linear, _linear_flops_compute)
# convolutions
F.conv1d = wrapFunc(F.conv1d, _conv_flops_compute)
F.conv2d = wrapFunc(F.conv2d, _conv_flops_compute)
F.conv3d = wrapFunc(F.conv3d, _conv_flops_compute)
# conv transposed
F.conv_transpose1d = wrapFunc(F.conv_transpose1d, _conv_trans_flops_compute)
F.conv_transpose2d = wrapFunc(F.conv_transpose2d, _conv_trans_flops_compute)
F.conv_transpose3d = wrapFunc(F.conv_transpose3d, _conv_trans_flops_compute)
# activations
F.relu = wrapFunc(F.relu, _relu_flops_compute)
F.prelu = wrapFunc(F.prelu, _prelu_flops_compute)
F.elu = wrapFunc(F.elu, _elu_flops_compute)
F.leaky_relu = wrapFunc(F.leaky_relu, _leaky_relu_flops_compute)
F.relu6 = wrapFunc(F.relu6, _relu6_flops_compute)
if hasattr(F, "silu"):
F.silu = wrapFunc(F.silu, _silu_flops_compute)
F.gelu = wrapFunc(F.gelu, _gelu_flops_compute)
# Normalizations
F.batch_norm = wrapFunc(F.batch_norm, _batch_norm_flops_compute)
F.layer_norm = wrapFunc(F.layer_norm, _layer_norm_flops_compute)
F.instance_norm = wrapFunc(F.instance_norm, _instance_norm_flops_compute)
F.group_norm = wrapFunc(F.group_norm, _group_norm_flops_compute)
# poolings
F.avg_pool1d = wrapFunc(F.avg_pool1d, _pool_flops_compute)
F.avg_pool2d = wrapFunc(F.avg_pool2d, _pool_flops_compute)
F.avg_pool3d = wrapFunc(F.avg_pool3d, _pool_flops_compute)
F.max_pool1d = wrapFunc(F.max_pool1d, _pool_flops_compute)
F.max_pool2d = wrapFunc(F.max_pool2d, _pool_flops_compute)
F.max_pool3d = wrapFunc(F.max_pool3d, _pool_flops_compute)
F.adaptive_avg_pool1d = wrapFunc(F.adaptive_avg_pool1d, _pool_flops_compute)
F.adaptive_avg_pool2d = wrapFunc(F.adaptive_avg_pool2d, _pool_flops_compute)
F.adaptive_avg_pool3d = wrapFunc(F.adaptive_avg_pool3d, _pool_flops_compute)
F.adaptive_max_pool1d = wrapFunc(F.adaptive_max_pool1d, _pool_flops_compute)
F.adaptive_max_pool2d = wrapFunc(F.adaptive_max_pool2d, _pool_flops_compute)
F.adaptive_max_pool3d = wrapFunc(F.adaptive_max_pool3d, _pool_flops_compute)
# upsample
F.upsample = wrapFunc(F.upsample, _upsample_flops_compute)
F.interpolate = wrapFunc(F.interpolate, _upsample_flops_compute)
# softmax
F.softmax = wrapFunc(F.softmax, _softmax_flops_compute)
# embedding
F.embedding = wrapFunc(F.embedding, _embedding_flops_compute) | null |
10,385 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
Tensor = torch.Tensor
def _matmul_flops_compute(input, other, *, out=None):
def _addmm_flops_compute(input, mat1, mat2, *, beta=1, alpha=1, out=None):
def _einsum_flops_compute(equation, *operands):
def _tensor_addmm_flops_compute(self, mat1, mat2, *, beta=1, alpha=1, out=None):
def _mul_flops_compute(input, other, *, out=None):
def _add_flops_compute(input, other, *, alpha=1, out=None):
def wrapFunc(func, funcFlopCompute):
def _patch_tensor_methods():
torch.matmul = wrapFunc(torch.matmul, _matmul_flops_compute)
torch.Tensor.matmul = wrapFunc(torch.Tensor.matmul, _matmul_flops_compute)
torch.mm = wrapFunc(torch.mm, _matmul_flops_compute)
torch.Tensor.mm = wrapFunc(torch.Tensor.mm, _matmul_flops_compute)
torch.bmm = wrapFunc(torch.bmm, _matmul_flops_compute)
torch.Tensor.bmm = wrapFunc(torch.bmm, _matmul_flops_compute)
torch.addmm = wrapFunc(torch.addmm, _addmm_flops_compute)
torch.Tensor.addmm = wrapFunc(torch.Tensor.addmm, _tensor_addmm_flops_compute)
torch.mul = wrapFunc(torch.mul, _mul_flops_compute)
torch.Tensor.mul = wrapFunc(torch.Tensor.mul, _mul_flops_compute)
torch.add = wrapFunc(torch.add, _add_flops_compute)
torch.Tensor.add = wrapFunc(torch.Tensor.add, _add_flops_compute)
torch.einsum = wrapFunc(torch.einsum, _einsum_flops_compute) | null |
10,386 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
old_functions = {}
def _reload_functionals():
# torch.nn.functional does not support importlib.reload()
F.linear = old_functions[F.linear.__name__]
F.conv1d = old_functions[F.conv1d.__name__]
F.conv2d = old_functions[F.conv2d.__name__]
F.conv3d = old_functions[F.conv3d.__name__]
F.conv_transpose1d = old_functions[F.conv_transpose1d.__name__]
F.conv_transpose2d = old_functions[F.conv_transpose2d.__name__]
F.conv_transpose3d = old_functions[F.conv_transpose3d.__name__]
F.relu = old_functions[F.relu.__name__]
F.prelu = old_functions[F.prelu.__name__]
F.elu = old_functions[F.elu.__name__]
F.leaky_relu = old_functions[F.leaky_relu.__name__]
F.relu6 = old_functions[F.relu6.__name__]
F.batch_norm = old_functions[F.batch_norm.__name__]
F.avg_pool1d = old_functions[F.avg_pool1d.__name__]
F.avg_pool2d = old_functions[F.avg_pool2d.__name__]
F.avg_pool3d = old_functions[F.avg_pool3d.__name__]
F.max_pool1d = old_functions[F.max_pool1d.__name__]
F.max_pool2d = old_functions[F.max_pool2d.__name__]
F.max_pool3d = old_functions[F.max_pool3d.__name__]
F.adaptive_avg_pool1d = old_functions[F.adaptive_avg_pool1d.__name__]
F.adaptive_avg_pool2d = old_functions[F.adaptive_avg_pool2d.__name__]
F.adaptive_avg_pool3d = old_functions[F.adaptive_avg_pool3d.__name__]
F.adaptive_max_pool1d = old_functions[F.adaptive_max_pool1d.__name__]
F.adaptive_max_pool2d = old_functions[F.adaptive_max_pool2d.__name__]
F.adaptive_max_pool3d = old_functions[F.adaptive_max_pool3d.__name__]
F.upsample = old_functions[F.upsample.__name__]
F.interpolate = old_functions[F.interpolate.__name__]
F.softmax = old_functions[F.softmax.__name__]
F.embedding = old_functions[F.embedding.__name__] | null |
10,387 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
old_functions = {}
def _reload_tensor_methods():
torch.matmul = old_functions[torch.matmul.__name__] | null |
10,388 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
def _rnn_flops(flops, rnn_module, w_ih, w_hh, input_size):
# matrix matrix mult ih state and internal state
flops += w_ih.shape[0] * w_ih.shape[1]
# matrix matrix mult hh state and internal state
flops += w_hh.shape[0] * w_hh.shape[1]
if isinstance(rnn_module, (nn.RNN, nn.RNNCell)):
# add both operations
flops += rnn_module.hidden_size
elif isinstance(rnn_module, (nn.GRU, nn.GRUCell)):
# hadamard of r
flops += rnn_module.hidden_size
# adding operations from both states
flops += rnn_module.hidden_size * 3
# last two hadamard _product and add
flops += rnn_module.hidden_size * 3
elif isinstance(rnn_module, (nn.LSTM, nn.LSTMCell)):
# adding operations from both states
flops += rnn_module.hidden_size * 4
# two hadamard _product and add for C state
flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size
# final hadamard
flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size
return flops
def _rnn_forward_hook(rnn_module, input, output):
flops = 0
# input is a tuple containing a sequence to process and (optionally) hidden state
inp = input[0]
batch_size = inp.shape[0]
seq_length = inp.shape[1]
num_layers = rnn_module.num_layers
for i in range(num_layers):
w_ih = rnn_module.__getattr__("weight_ih_l" + str(i))
w_hh = rnn_module.__getattr__("weight_hh_l" + str(i))
if i == 0:
input_size = rnn_module.input_size
else:
input_size = rnn_module.hidden_size
flops = _rnn_flops(flops, rnn_module, w_ih, w_hh, input_size)
if rnn_module.bias:
b_ih = rnn_module.__getattr__("bias_ih_l" + str(i))
b_hh = rnn_module.__getattr__("bias_hh_l" + str(i))
flops += b_ih.shape[0] + b_hh.shape[0]
flops *= batch_size
flops *= seq_length
if rnn_module.bidirectional:
flops *= 2
rnn_module.__flops__ += int(flops) | null |
10,389 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
def _rnn_flops(flops, rnn_module, w_ih, w_hh, input_size):
def _rnn_cell_forward_hook(rnn_cell_module, input, output):
flops = 0
inp = input[0]
batch_size = inp.shape[0]
w_ih = rnn_cell_module.__getattr__("weight_ih")
w_hh = rnn_cell_module.__getattr__("weight_hh")
input_size = inp.shape[1]
flops = _rnn_flops(flops, rnn_cell_module, w_ih, w_hh, input_size)
if rnn_cell_module.bias:
b_ih = rnn_cell_module.__getattr__("bias_ih")
b_hh = rnn_cell_module.__getattr__("bias_hh")
flops += b_ih.shape[0] + b_hh.shape[0]
flops *= batch_size
rnn_cell_module.__flops__ += int(flops) | null |
10,390 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
def num_to_string(num, precision=2):
if num // 10**9 > 0:
return str(round(num / 10.0**9, precision)) + " G"
elif num // 10**6 > 0:
return str(round(num / 10.0**6, precision)) + " M"
elif num // 10**3 > 0:
return str(round(num / 10.0**3, precision)) + " K"
else:
return str(num) | null |
10,391 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
def flops_to_string(flops, units=None, precision=2):
if units is None:
if flops // 10**12 > 0:
return str(round(flops / 10.0**12, precision)) + " TFLOPS"
if flops // 10**9 > 0:
return str(round(flops / 10.0**9, precision)) + " GFLOPS"
elif flops // 10**6 > 0:
return str(round(flops / 10.0**6, precision)) + " MFLOPS"
elif flops // 10**3 > 0:
return str(round(flops / 10.0**3, precision)) + " KFLOPS"
else:
return str(flops) + " FLOPS"
else:
if units == "TFLOPS":
return str(round(flops / 10.0**12, precision)) + " " + units
if units == "GFLOPS":
return str(round(flops / 10.0**9, precision)) + " " + units
elif units == "MFLOPS":
return str(round(flops / 10.0**6, precision)) + " " + units
elif units == "KFLOPS":
return str(round(flops / 10.0**3, precision)) + " " + units
else:
return str(flops) + " FLOPS" | null |
10,392 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
def duration_to_string(duration, units=None, precision=2):
if units is None:
if duration > 1:
return str(round(duration, precision)) + " s"
elif duration * 10**3 > 1:
return str(round(duration * 10**3, precision)) + " ms"
elif duration * 10**6 > 1:
return str(round(duration * 10**6, precision)) + " us"
else:
return str(duration)
else:
if units == "us":
return str(round(duration * 10.0**6, precision)) + " " + units
elif units == "ms":
return str(round(duration * 10.0**3, precision)) + " " + units
else:
return str(round(duration, precision)) + " s"
# can not iterate over all submodules using self.model.modules()
# since modules() returns duplicate modules only once | null |
10,393 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
def get_module_flops(module):
sum = module.__flops__
# iterate over immediate children modules
for child in module.children():
sum += get_module_flops(child)
return sum | null |
10,394 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
def get_module_macs(module):
sum = module.__macs__
# iterate over immediate children modules
for child in module.children():
sum += get_module_macs(child)
return sum | null |
10,395 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
def get_module_duration(module):
duration = module.__duration__
if duration == 0: # e.g. ModuleList
for m in module.children():
duration += m.__duration__
return duration | null |
10,396 | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from typing import List, Optional
from collections import OrderedDict
import numpy as np
class FlopsProfiler(object):
"""Measures the latency, number of estimated floating-point operations and parameters of each module in a PyTorch model.
The flops-profiler profiles the forward pass of a PyTorch model and prints the model graph with the measured profile attached to each module. It shows how latency, flops and parameters are spent in the model and which modules or layers could be the bottleneck. It also outputs the names of the top k modules in terms of aggregated latency, flops, and parameters at depth l with k and l specified by the user. The output profile is computed for each batch of input.
The DeepSpeed flops profiler can be used with the DeepSpeed runtime or as a standalone package.
When using DeepSpeed for model training, the flops profiler can be configured in the deepspeed_config file and no user code change is required.
If using the profiler as a standalone package, one imports the flops_profiler package and use the APIs.
Here is an example for usage in a typical training workflow:
.. code-block:: python
model = Model()
prof = FlopsProfiler(model)
for step, batch in enumerate(data_loader):
if step == profile_step:
prof.start_profile()
loss = model(batch)
if step == profile_step:
flops = prof.get_total_flops(as_string=True)
params = prof.get_total_params(as_string=True)
prof.print_model_profile(profile_step=profile_step)
prof.end_profile()
loss.backward()
optimizer.step()
To profile a trained model in inference, use the `get_model_profile` API.
Args:
object (torch.nn.Module): The PyTorch model to profile.
"""
def __init__(self, model, ds_engine=None):
self.model = model
self.ds_engine = ds_engine
self.started = False
self.func_patched = False
def start_profile(self, ignore_list=None):
"""Starts profiling.
Extra attributes are added recursively to all the modules and the profiled torch.nn.functionals are monkey patched.
Args:
ignore_list (list, optional): the list of modules to ignore while profiling. Defaults to None.
"""
self.reset_profile()
_patch_functionals()
_patch_tensor_methods()
def register_module_hooks(module, ignore_list):
if ignore_list and type(module) in ignore_list:
return
# if computing the flops of a module directly
if type(module) in MODULE_HOOK_MAPPING:
if not hasattr(module, "__flops_handle__"):
module.__flops_handle__ = module.register_forward_hook(
MODULE_HOOK_MAPPING[type(module)])
return
# if computing the flops of the functionals in a module
def pre_hook(module, input):
module_flop_count.append([])
module_mac_count.append([])
if not hasattr(module, "__pre_hook_handle__"):
module.__pre_hook_handle__ = module.register_forward_pre_hook(pre_hook)
def post_hook(module, input, output):
if module_flop_count:
module.__flops__ += sum([elem[1] for elem in module_flop_count[-1]])
module_flop_count.pop()
module.__macs__ += sum([elem[1] for elem in module_mac_count[-1]])
module_mac_count.pop()
if not hasattr(module, "__post_hook_handle__"):
module.__post_hook_handle__ = module.register_forward_hook(post_hook)
def start_time_hook(module, input):
torch.cuda.synchronize()
module.__start_time__ = time.time()
if not hasattr(module, "__start_time_hook_handle"):
module.__start_time_hook_handle__ = module.register_forward_pre_hook(
start_time_hook)
def end_time_hook(module, input, output):
torch.cuda.synchronize()
module.__duration__ += time.time() - module.__start_time__
if not hasattr(module, "__end_time_hook_handle__"):
module.__end_time_hook_handle__ = module.register_forward_hook(
end_time_hook)
self.model.apply(partial(register_module_hooks, ignore_list=ignore_list))
self.started = True
self.func_patched = True
def stop_profile(self):
"""Stop profiling.
All torch.nn.functionals are restored to their originals.
"""
if self.started and self.func_patched:
_reload_functionals()
_reload_tensor_methods()
self.func_patched = False
def remove_profile_attrs(module):
if hasattr(module, "__pre_hook_handle__"):
module.__pre_hook_handle__.remove()
del module.__pre_hook_handle__
if hasattr(module, "__post_hook_handle__"):
module.__post_hook_handle__.remove()
del module.__post_hook_handle__
if hasattr(module, "__flops_handle__"):
module.__flops_handle__.remove()
del module.__flops_handle__
if hasattr(module, "__start_time_hook_handle__"):
module.__start_time_hook_handle__.remove()
del module.__start_time_hook_handle__
if hasattr(module, "__end_time_hook_handle__"):
module.__end_time_hook_handle__.remove()
del module.__end_time_hook_handle__
self.model.apply(remove_profile_attrs)
def reset_profile(self):
"""Resets the profiling.
Adds or resets the extra attributes.
"""
def add_or_reset_attrs(module):
module.__flops__ = 0
module.__macs__ = 0
module.__params__ = sum(p.numel() for p in module.parameters())
module.__start_time__ = 0
module.__duration__ = 0
self.model.apply(add_or_reset_attrs)
def end_profile(self):
"""Ends profiling.
The added attributes and handles are removed recursively on all the modules.
"""
if not self.started:
return
self.stop_profile()
self.started = False
def remove_profile_attrs(module):
if hasattr(module, "__flops__"):
del module.__flops__
if hasattr(module, "__macs__"):
del module.__macs__
if hasattr(module, "__params__"):
del module.__params__
if hasattr(module, "__start_time__"):
del module.__start_time__
if hasattr(module, "__duration__"):
del module.__duration__
self.model.apply(remove_profile_attrs)
def get_total_flops(self, as_string=False):
"""Returns the total flops of the model.
Args:
as_string (bool, optional): whether to output the flops as string. Defaults to False.
Returns:
The number of multiply-accumulate operations of the model forward pass.
"""
total_flops = get_module_flops(self.model)
return num_to_string(total_flops) if as_string else total_flops
def get_total_macs(self, as_string=False):
"""Returns the total MACs of the model.
Args:
as_string (bool, optional): whether to output the flops as string. Defaults to False.
Returns:
The number of multiply-accumulate operations of the model forward pass.
"""
total_macs = get_module_macs(self.model)
return macs_to_string(total_macs) if as_string else total_macs
def get_total_duration(self, as_string=False):
"""Returns the total duration of the model forward pass.
Args:
as_string (bool, optional): whether to output the duration as string. Defaults to False.
Returns:
The latency of the model forward pass.
"""
total_duration = get_module_duration(self.model)
return duration_to_string(total_duration) if as_string else total_duration
def get_total_params(self, as_string=False):
"""Returns the total parameters of the model.
Args:
as_string (bool, optional): whether to output the parameters as string. Defaults to False.
Returns:
The number of parameters in the model.
"""
return params_to_string(
self.model.__params__) if as_string else self.model.__params__
def print_model_profile(self,
profile_step=1,
module_depth=-1,
top_modules=1,
detailed=True,
output_file=None):
"""Prints the model graph with the measured profile attached to each module.
Args:
profile_step (int, optional): The global training step at which to profile. Note that warm up steps are needed for accurate time measurement.
module_depth (int, optional): The depth of the model to which to print the aggregated module information. When set to -1, it prints information from the top to the innermost modules (the maximum depth).
top_modules (int, optional): Limits the aggregated profile output to the number of top modules specified.
detailed (bool, optional): Whether to print the detailed model profile.
output_file (str, optional): Path to the output file. If None, the profiler prints to stdout.
"""
if not self.started:
return
import sys
import os.path
original_stdout = None
f = None
if output_file and output_file != "":
dir_path = os.path.dirname(os.path.abspath(output_file))
if not os.path.exists(dir_path):
os.makedirs(dir_path)
original_stdout = sys.stdout
f = open(output_file, "w")
sys.stdout = f
total_flops = self.get_total_flops()
total_macs = self.get_total_macs()
total_duration = self.get_total_duration()
total_params = self.get_total_params()
self.flops = total_flops
self.macs = total_macs
self.params = total_params
print(
"\n-------------------------- DeepSpeed Flops Profiler --------------------------"
)
print(f'Profile Summary at step {profile_step}:')
print(
"Notations:\ndata parallel size (dp_size), model parallel size(mp_size),\nnumber of parameters (params), number of multiply-accumulate operations(MACs),\nnumber of floating-point operations (flops), floating-point operations per second (FLOPS),\nfwd latency (forward propagation latency), bwd latency (backward propagation latency),\nstep (weights update latency), iter latency (sum of fwd, bwd and step latency)\n"
)
if self.ds_engine:
print('{:<60} {:<8}'.format('world size: ', self.ds_engine.world_size))
print('{:<60} {:<8}'.format('data parallel size: ',
self.ds_engine.dp_world_size))
print('{:<60} {:<8}'.format('model parallel size: ',
self.ds_engine.mp_world_size))
print('{:<60} {:<8}'.format(
'batch size per GPU: ',
self.ds_engine.train_micro_batch_size_per_gpu()))
print('{:<60} {:<8}'.format('params per gpu: ', params_to_string(total_params)))
print('{:<60} {:<8}'.format(
'params of model = params per GPU * mp_size: ',
params_to_string(total_params *
((self.ds_engine.mp_world_size) if self.ds_engine else 1))))
print('{:<60} {:<8}'.format('fwd MACs per GPU: ', macs_to_string(total_macs)))
print('{:<60} {:<8}'.format('fwd flops per GPU: ', num_to_string(total_flops)))
print('{:<60} {:<8}'.format(
'fwd flops of model = fwd flops per GPU * mp_size: ',
num_to_string(total_flops *
((self.ds_engine.mp_world_size) if self.ds_engine else 1))))
fwd_latency = self.get_total_duration()
if self.ds_engine and self.ds_engine.wall_clock_breakdown():
fwd_latency = self.ds_engine.timers('forward').elapsed(False) / 1000.0
print('{:<60} {:<8}'.format('fwd latency: ', duration_to_string(fwd_latency)))
print('{:<60} {:<8}'.format(
'fwd FLOPS per GPU = fwd flops per GPU / fwd latency: ',
flops_to_string(total_flops / fwd_latency)))
if self.ds_engine and self.ds_engine.wall_clock_breakdown():
bwd_latency = self.ds_engine.timers('backward').elapsed(False) / 1000.0
step_latency = self.ds_engine.timers('step').elapsed(False) / 1000.0
print('{:<60} {:<8}'.format('bwd latency: ',
duration_to_string(bwd_latency)))
print('{:<60} {:<8}'.format(
'bwd FLOPS per GPU = 2 * fwd flops per GPU / bwd latency: ',
flops_to_string(2 * total_flops / bwd_latency)))
print('{:<60} {:<8}'.format(
'fwd+bwd FLOPS per GPU = 3 * fwd flops per GPU / (fwd+bwd latency): ',
flops_to_string(3 * total_flops / (fwd_latency + bwd_latency))))
print('{:<60} {:<8}'.format('step latency: ',
duration_to_string(step_latency)))
iter_latency = fwd_latency + bwd_latency + step_latency
print('{:<60} {:<8}'.format('iter latency: ',
duration_to_string(iter_latency)))
print('{:<60} {:<8}'.format(
'FLOPS per GPU = 3 * fwd flops per GPU / iter latency: ',
flops_to_string(3 * total_flops / iter_latency)))
samples_per_iter = self.ds_engine.train_micro_batch_size_per_gpu(
) * self.ds_engine.world_size
print('{:<60} {:<8.2f}'.format('samples/second: ',
samples_per_iter / iter_latency))
def flops_repr(module):
params = module.__params__
flops = get_module_flops(module)
macs = get_module_macs(module)
items = [
params_to_string(params),
"{:.2%} Params".format(params / total_params if total_params else 0),
macs_to_string(macs),
"{:.2%} MACs".format(0.0 if total_macs == 0 else macs / total_macs),
]
duration = get_module_duration(module)
items.append(duration_to_string(duration))
items.append(
"{:.2%} latency".format(0.0 if total_duration == 0 else duration /
total_duration))
items.append(flops_to_string(0.0 if duration == 0 else flops / duration))
items.append(module.original_extra_repr())
return ", ".join(items)
def add_extra_repr(module):
flops_extra_repr = flops_repr.__get__(module)
if module.extra_repr != flops_extra_repr:
module.original_extra_repr = module.extra_repr
module.extra_repr = flops_extra_repr
assert module.extra_repr != module.original_extra_repr
def del_extra_repr(module):
if hasattr(module, "original_extra_repr"):
module.extra_repr = module.original_extra_repr
del module.original_extra_repr
self.model.apply(add_extra_repr)
print(
"\n----------------------------- Aggregated Profile per GPU -----------------------------"
)
self.print_model_aggregated_profile(module_depth=module_depth,
top_modules=top_modules)
if detailed:
print(
"\n------------------------------ Detailed Profile per GPU ------------------------------"
)
print(
"Each module profile is listed after its name in the following order: \nparams, percentage of total params, MACs, percentage of total MACs, fwd latency, percentage of total fwd latency, fwd FLOPS"
)
print(
"\nNote: 1. A module can have torch.nn.module or torch.nn.functional to compute logits (e.g. CrossEntropyLoss). They are not counted as submodules, thus not to be printed out. However they make up the difference between a parent's MACs (or latency) and the sum of its submodules'.\n2. Number of floating-point operations is a theoretical estimation, thus FLOPS computed using that could be larger than the maximum system throughput.\n3. The fwd latency listed in the top module's profile is directly captured at the module forward function in PyTorch, thus it's less than the fwd latency shown above which is captured in DeepSpeed.\n"
)
print(self.model)
self.model.apply(del_extra_repr)
print(
"------------------------------------------------------------------------------"
)
if output_file:
sys.stdout = original_stdout
f.close()
def print_model_aggregated_profile(self, module_depth=-1, top_modules=1):
"""Prints the names of the top top_modules modules in terms of aggregated time, flops, and parameters at depth module_depth.
Args:
module_depth (int, optional): the depth of the modules to show. Defaults to -1 (the innermost modules).
top_modules (int, optional): the number of top modules to show. Defaults to 1.
"""
info = {}
if not hasattr(self.model, "__flops__"):
print(
"no __flops__ attribute in the model, call this function after start_profile and before end_profile"
)
return
def walk_module(module, curr_depth, info):
if curr_depth not in info:
info[curr_depth] = {}
if module.__class__.__name__ not in info[curr_depth]:
info[curr_depth][module.__class__.__name__] = [
0,
0,
0,
] # macs, params, time
info[curr_depth][module.__class__.__name__][0] += get_module_macs(module)
info[curr_depth][module.__class__.__name__][1] += module.__params__
info[curr_depth][module.__class__.__name__][2] += get_module_duration(module)
has_children = len(module._modules.items()) != 0
if has_children:
for child in module.children():
walk_module(child, curr_depth + 1, info)
walk_module(self.model, 0, info)
depth = module_depth
if module_depth == -1:
depth = len(info) - 1
print(
f'Top {top_modules} modules in terms of params, MACs or fwd latency at different model depths:'
)
for d in range(depth):
num_items = min(top_modules, len(info[d]))
sort_macs = {
k: macs_to_string(v[0])
for k,
v in sorted(info[d].items(),
key=lambda item: item[1][0],
reverse=True)[:num_items]
}
sort_params = {
k: params_to_string(v[1])
for k,
v in sorted(info[d].items(),
key=lambda item: item[1][1],
reverse=True)[:num_items]
}
sort_time = {
k: duration_to_string(v[2])
for k,
v in sorted(info[d].items(),
key=lambda item: item[1][2],
reverse=True)[:num_items]
}
print(f"depth {d}:")
print(f" params - {sort_params}")
print(f" MACs - {sort_macs}")
print(f" fwd latency - {sort_time}")
def macs_to_string(macs, units=None, precision=2):
if units is None:
if macs // 10**9 > 0:
return str(round(macs / 10.0**9, precision)) + " GMACs"
elif macs // 10**6 > 0:
return str(round(macs / 10.0**6, precision)) + " MMACs"
elif macs // 10**3 > 0:
return str(round(macs / 10.0**3, precision)) + " KMACs"
else:
return str(macs) + " MACs"
else:
if units == "GMACs":
return str(round(macs / 10.0**9, precision)) + " " + units
elif units == "MMACs":
return str(round(macs / 10.0**6, precision)) + " " + units
elif units == "KMACs":
return str(round(macs / 10.0**3, precision)) + " " + units
else:
return str(macs) + " MACs"
def number_to_string(num, units=None, precision=2):
if units is None:
if num // 10**9 > 0:
return str(round(num / 10.0**9, precision)) + " G"
elif num // 10**6 > 0:
return str(round(num / 10.0**6, precision)) + " M"
elif num // 10**3 > 0:
return str(round(num / 10.0**3, precision)) + " K"
else:
return str(num) + " "
else:
if units == "G":
return str(round(num / 10.0**9, precision)) + " " + units
elif units == "M":
return str(round(num / 10.0**6, precision)) + " " + units
elif units == "K":
return str(round(num / 10.0**3, precision)) + " " + units
else:
return str(num) + " "
def params_to_string(params_num, units=None, precision=2):
if units is None:
if params_num // 10**6 > 0:
return str(round(params_num / 10**6, 2)) + " M"
elif params_num // 10**3:
return str(round(params_num / 10**3, 2)) + " k"
else:
return str(params_num)
else:
if units == "M":
return str(round(params_num / 10.0**6, precision)) + " " + units
elif units == "K":
return str(round(params_num / 10.0**3, precision)) + " " + units
else:
return str(params_num)
The provided code snippet includes necessary dependencies for implementing the `get_model_profile` function. Write a Python function `def get_model_profile( model, input_shape=None, args=[], kwargs={}, print_profile=True, detailed=True, module_depth=-1, top_modules=1, warm_up=1, as_string=True, output_file=None, ignore_modules=None, )` to solve the following problem:
Returns the total floating-point operations, MACs, and parameters of a model. Example: .. code-block:: python model = torchvision.models.alexnet() batch_size = 256 flops, macs, params = get_model_profile(model=model, input_shape=(batch_size, 3, 224, 224))) Args: model ([torch.nn.Module]): the PyTorch model to be profiled. input_shape (tuple): input shape to the model. If specified, the model takes a tensor with this shape as the only positional argument. args (list): list of positional arguments to the model. kwargs (dict): dictionary of keyword arguments to the model. print_profile (bool, optional): whether to print the model profile. Defaults to True. detailed (bool, optional): whether to print the detailed model profile. Defaults to True. module_depth (int, optional): the depth into the nested modules. Defaults to -1 (the inner most modules). top_modules (int, optional): the number of top modules to print in the aggregated profile. Defaults to 3. warm_up (int, optional): the number of warm-up steps before measuring the latency of each module. Defaults to 1. as_string (bool, optional): whether to print the output as string. Defaults to True. output_file (str, optional): path to the output file. If None, the profiler prints to stdout. ignore_modules ([type], optional): the list of modules to ignore during profiling. Defaults to None. Returns: The number of floating-point operations, multiply-accumulate operations (MACs), and parameters in the model.
Here is the function:
def get_model_profile(
model,
input_shape=None,
args=[],
kwargs={},
print_profile=True,
detailed=True,
module_depth=-1,
top_modules=1,
warm_up=1,
as_string=True,
output_file=None,
ignore_modules=None,
):
"""Returns the total floating-point operations, MACs, and parameters of a model.
Example:
.. code-block:: python
model = torchvision.models.alexnet()
batch_size = 256
flops, macs, params = get_model_profile(model=model, input_shape=(batch_size, 3, 224, 224)))
Args:
model ([torch.nn.Module]): the PyTorch model to be profiled.
input_shape (tuple): input shape to the model. If specified, the model takes a tensor with this shape as the only positional argument.
args (list): list of positional arguments to the model.
kwargs (dict): dictionary of keyword arguments to the model.
print_profile (bool, optional): whether to print the model profile. Defaults to True.
detailed (bool, optional): whether to print the detailed model profile. Defaults to True.
module_depth (int, optional): the depth into the nested modules. Defaults to -1 (the inner most modules).
top_modules (int, optional): the number of top modules to print in the aggregated profile. Defaults to 3.
warm_up (int, optional): the number of warm-up steps before measuring the latency of each module. Defaults to 1.
as_string (bool, optional): whether to print the output as string. Defaults to True.
output_file (str, optional): path to the output file. If None, the profiler prints to stdout.
ignore_modules ([type], optional): the list of modules to ignore during profiling. Defaults to None.
Returns:
The number of floating-point operations, multiply-accumulate operations (MACs), and parameters in the model.
"""
assert isinstance(model, nn.Module), "model must be a PyTorch module"
prof = FlopsProfiler(model)
model.eval()
if input_shape is not None:
assert type(input_shape) is tuple, "input_shape must be a tuple"
assert len(input_shape) >= 1, "input_shape must have at least one element"
try:
input = torch.ones(()).new_empty(
(*input_shape,
),
dtype=next(model.parameters()).dtype,
device=next(model.parameters()).device,
)
except StopIteration:
input = torch.ones(()).new_empty((*input_shape, ))
args = [input]
assert (len(args) > 0) or (len(kwargs) > 0), "args and/or kwargs must be specified if input_shape is None"
for _ in range(warm_up):
if kwargs:
_ = model(*args, **kwargs)
else:
_ = model(*args)
prof.start_profile(ignore_list=ignore_modules)
if kwargs:
_ = model(*args, **kwargs)
else:
_ = model(*args)
flops = prof.get_total_flops()
macs = prof.get_total_macs()
params = prof.get_total_params()
if print_profile:
prof.print_model_profile(profile_step=warm_up,
module_depth=module_depth,
top_modules=top_modules,
detailed=detailed,
output_file=output_file)
prof.end_profile()
if as_string:
return number_to_string(flops), macs_to_string(macs), params_to_string(params)
return flops, macs, params | Returns the total floating-point operations, MACs, and parameters of a model. Example: .. code-block:: python model = torchvision.models.alexnet() batch_size = 256 flops, macs, params = get_model_profile(model=model, input_shape=(batch_size, 3, 224, 224))) Args: model ([torch.nn.Module]): the PyTorch model to be profiled. input_shape (tuple): input shape to the model. If specified, the model takes a tensor with this shape as the only positional argument. args (list): list of positional arguments to the model. kwargs (dict): dictionary of keyword arguments to the model. print_profile (bool, optional): whether to print the model profile. Defaults to True. detailed (bool, optional): whether to print the detailed model profile. Defaults to True. module_depth (int, optional): the depth into the nested modules. Defaults to -1 (the inner most modules). top_modules (int, optional): the number of top modules to print in the aggregated profile. Defaults to 3. warm_up (int, optional): the number of warm-up steps before measuring the latency of each module. Defaults to 1. as_string (bool, optional): whether to print the output as string. Defaults to True. output_file (str, optional): path to the output file. If None, the profiler prints to stdout. ignore_modules ([type], optional): the list of modules to ignore during profiling. Defaults to None. Returns: The number of floating-point operations, multiply-accumulate operations (MACs), and parameters in the model. |
10,397 | import os
import torch
import inspect
from deepspeed.utils import get_caller_func
The provided code snippet includes necessary dependencies for implementing the `older_torch` function. Write a Python function `def older_torch()` to solve the following problem:
Helper to lookup torch version. For versions less than 1.8, torch.dist used torch.distributed.group.WORLD as the default group argument instead of None. See more details at: https://github.com/pytorch/pytorch/pull/48767
Here is the function:
def older_torch():
'''
Helper to lookup torch version. For versions less than 1.8, torch.dist
used torch.distributed.group.WORLD as the default group argument instead of None.
See more details at: https://github.com/pytorch/pytorch/pull/48767
'''
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 1 and TORCH_MINOR < 8:
return True
else:
return False | Helper to lookup torch version. For versions less than 1.8, torch.dist used torch.distributed.group.WORLD as the default group argument instead of None. See more details at: https://github.com/pytorch/pytorch/pull/48767 |
10,398 | import os
import torch
import inspect
from deepspeed.utils import get_caller_func
The provided code snippet includes necessary dependencies for implementing the `has_allgather_base` function. Write a Python function `def has_allgather_base()` to solve the following problem:
Helper to check if torch.distributed has _all_gather_base
Here is the function:
def has_allgather_base():
'''
Helper to check if torch.distributed has _all_gather_base
'''
return hasattr(torch.distributed, "_all_gather_base") | Helper to check if torch.distributed has _all_gather_base |
10,399 | import os
import torch
import inspect
from deepspeed.utils import get_caller_func
The provided code snippet includes necessary dependencies for implementing the `has_reduce_scatter_base` function. Write a Python function `def has_reduce_scatter_base()` to solve the following problem:
Helper to check if torch.distributed has _reduce_scatter_base
Here is the function:
def has_reduce_scatter_base():
'''
Helper to check if torch.distributed has _reduce_scatter_base
'''
return hasattr(torch.distributed, "_reduce_scatter_base") | Helper to check if torch.distributed has _reduce_scatter_base |
10,400 | import os
import torch
import inspect
from deepspeed.utils import get_caller_func
def get_local_rank_from_launcher():
# DeepSpeed launcher will set it so get from there
rank = os.environ.get('LOCAL_RANK')
if rank is None:
rank = os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK')
# Make it a single process job and set rank to 0
if rank is None:
rank = 0
return int(rank) | null |
10,401 | import os
import torch
import inspect
from deepspeed.utils import get_caller_func
def get_world_rank_from_launcher():
# DeepSpeed launcher will set it so get from there
rank = os.environ.get('RANK')
if rank is None:
rank = os.environ.get('OMPI_COMM_WORLD_RANK')
# Make it a single process job and set rank to 0
if rank is None:
rank = 0
return int(rank) | null |
10,402 | import os
import torch
import inspect
from deepspeed.utils import get_caller_func
def get_world_size_from_launcher():
# DeepSpeed launcher will set it so get from there
size = os.environ.get('WORLD_SIZE')
rank = os.environ.get('RANK')
if size is None:
size = os.environ.get('OMPI_COMM_WORLD_SIZE')
# Make it a single process job and set size to 1
if size is None:
size = 1
if rank == 0:
print(f"set world size to {size}")
return int(size) | null |
10,403 | import os
import torch
import inspect
from deepspeed.utils import get_caller_func
def get_tensor_position(func):
sig_params = inspect.signature(func).parameters
arg = None
# most colls
if 'tensor' in sig_params:
arg = 'tensor'
# reduce scatter coll
elif 'input_list' in sig_params:
arg = 'input_list'
# all_to_all and torch multiGPU colls
elif 'input_tensor_list' in sig_params:
arg = 'input_tensor_list'
if arg is None:
return -1
else:
return list(sig_params).index(arg)
def get_tensor_kwarg(func, kwargs):
func_args = get_default_args(func)
func_args.update(kwargs)
arg = None
if 'tensor' in func_args:
arg = func_args['tensor']
elif 'input_list' in func_args:
arg = func_args['input_list']
elif 'input_tensor_list' in func_args:
arg = func_args['input_tensor_list']
return arg
def get_msg_size_from_args(func, *args, **kwargs):
# 3 cases:
# - tensor arg is in args
# - tensor arg is in kwargs
# - tensor arg is not present (e.g. barrier)
tensor_arg_position = -1
tensor_arg = None
# check if tensor arg is in args
if len(args) > 0:
tensor_arg_position = get_tensor_position(func)
if tensor_arg_position > -1:
tensor_arg = args[get_tensor_position(func)]
# check if tensor arg is in kwargs
if tensor_arg is None and len(kwargs) > 0:
tensor_arg = get_tensor_kwarg(func, kwargs)
# if tensor arg is not present, no data is being transmitted
if tensor_arg is None:
return 0
else:
# Sum of tensor sizes for list colls such as torch's all_to_all
# NOTE: msg_size for list colls will not be the actual size transmitted by a given MPI/NCCL call within the coll op. Instead, it's the total amount of data transmitted.
if type(tensor_arg) is list:
return sum(x.element_size() * x.nelement() for x in tensor_arg)
else:
return tensor_arg.element_size() * tensor_arg.nelement() | null |
10,404 | import os
import torch
import inspect
from deepspeed.utils import get_caller_func
def get_debug_log_name(func_args, debug):
if debug:
return func_args['log_name'] + ' | [Caller Func: ' + get_caller_func() + ']'
else:
return func_args['log_name'] | null |
10,405 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
timers = timer.SynchronizedWallClockTimer()
comms_logger = CommsLogger()
from deepspeed.comm.utils import *
def barrier(group=None,
async_op=False,
device_ids=None,
prof=False,
log_name='barrier',
debug=get_caller_func()):
global cdb
return cdb.barrier(group=group, async_op=async_op, device_ids=device_ids)
def timed_op(func):
def log_wrapper(*args, **kwargs):
# Add enabled flag so that overhead to each comm op is two if conditions at most
if comms_logger.enabled:
if ('prof' in kwargs and kwargs['prof']) or comms_logger.prof_all or (
'log_name' in kwargs
and kwargs['log_name'] in comms_logger.prof_ops):
# Need func args for their defaults
func_args = get_default_args(func)
func_args.update(kwargs)
msg_size = get_msg_size_from_args(func, *args, **kwargs)
log_name = get_debug_log_name(func_args, comms_logger.debug)
timers(log_name).start()
# Return the op, then stop the op's timer
try:
return func(*args, **kwargs)
finally:
if comms_logger.enabled:
# Need to make op blocking for accurate logging
torch.cuda.synchronize()
# If we're using MPI, we can't simply sync the stream
if cdb.using_mpi:
cdb.barrier()
if ('prof' in kwargs and kwargs['prof']) or comms_logger.prof_all or (
'log_name' in kwargs
and kwargs['log_name'] in comms_logger.prof_ops):
log_name = get_debug_log_name(func_args, comms_logger.debug)
raw_name = func.__name__
timers(log_name).stop()
# need temp var since 'elapsed' resets events
time_elapsed = timers(log_name).elapsed(reset=False)
comms_logger.append(raw_name, log_name, time_elapsed, msg_size)
return log_wrapper | null |
10,406 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
use_ds_backend = False
cdb = None
nccl_backend = None
mpi_backend = None
from deepspeed.comm.utils import *
NCCL_BACKEND = 'nccl'
MPI_BACKEND = 'mpi'
GLOO_BACKEND = 'gloo'
def init_deepspeed_backend(ds_backend):
global cdb
global nccl_backend
global mpi_backend
global use_ds_backend
if ds_backend == NCCL_BACKEND:
utils.logger.warn("NCCL backend in DeepSpeed not yet implemented")
elif ds_backend == MPI_BACKEND:
utils.logger.warn("MPI backend in DeepSpeed not yet implemented")
elif ds_backend == GLOO_BACKEND:
utils.logger.warn("Gloo backend in DeepSpeed not yet implemented")
else:
utils.logger.warn(f"DeepSpeed does not support {ds_backend} backend") | null |
10,407 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
from deepspeed.comm.utils import *
def destroy_process_group(group=None):
global cdb
return cdb.destroy_process_group(group=group) | null |
10,408 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
from deepspeed.comm.utils import *
def is_initialized():
#assert cdb is not None, 'DeepSpeed backend not set, please initialize it using init_process_group()'
if cdb is None:
return False
else:
return cdb.is_initialized()
def new_group(ranks):
global cdb
assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.new_group(ranks) | null |
10,409 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
from deepspeed.comm.utils import *
def is_available() -> bool:
# Returns ``True`` if the deepspeed comm package is available.
# TODO: load other ops. Clients including deepspeed itself should use deepspeed.comm to import
# any communication related primitives from this package.
# use hasattr(deepspeed.csrc.ops, "_comm") or something
return True | null |
10,410 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
use_ds_backend = False
cdb = None
nccl_backend = None
mpi_backend = None
from deepspeed.comm.utils import *
def is_initialized():
#assert cdb is not None, 'DeepSpeed backend not set, please initialize it using init_process_group()'
if cdb is None:
return False
else:
return cdb.is_initialized()
NCCL_BACKEND = 'nccl'
MPI_BACKEND = 'mpi'
def set_backend(backend):
if not use_ds_backend:
utils.logger.error(
"DeepSpeed communication backend is required. Please use deepspeed.comm.init_distributed(backend, use_deepspeed=True) to use this functionality"
)
raise RuntimeError(
'Error: Custom DeepSpeed backend called without initializing DeepSpeed distributed.'
)
global cdb
global nccl_backend
global mpi_backend
try:
if backend_name == NCCL_BACKEND:
if nccl_backend is not None and nccl_backend.is_initialized():
cdb = nccl_backend
elif backend_name == MPI_BACKEND:
if mpi_backend is not None and mpi_backend.is_initialized():
cdb = mpi_backend
except Exception as inst:
print(inst) | null |
10,411 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
from deepspeed.comm.utils import *
def broadcast(tensor,
src,
group=None,
async_op=False,
prof=False,
log_name='broadcast',
debug=get_caller_func()):
global cdb
return cdb.broadcast(tensor=tensor, src=src, group=group, async_op=async_op) | null |
10,412 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
class ReduceOp(Enum):
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
has_warned_reduce_scatter = False
from deepspeed.comm.utils import *
def is_initialized():
def has_reduce_scatter_base():
def reduce_scatter_base(output_tensor,
tensor,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='reduce_scatter_base',
debug=get_caller_func()):
def reduce_scatter(output,
input_list,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='reduce_scatter',
debug=get_caller_func()):
def get_world_size(group=None) -> int:
def reduce_scatter_fn(output_tensor,
tensor,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
debug=get_caller_func()):
global cdb
global has_warned_reduce_scatter
assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()'
if cdb.has_reduce_scatter_base:
return reduce_scatter_base(output_tensor,
tensor,
op=op,
group=group,
async_op=async_op,
prof=prof,
debug=debug)
else:
if not has_warned_reduce_scatter:
utils.logger.warning(
"unable to find torch.distributed._reduce_scatter_base. will fall back to "
"torch.distributed.all_gather which will result in suboptimal performance. "
"please consider upgrading your pytorch installation.")
has_warned_reduce_scatter = True
input_tensor_lst = list(torch.chunk(tensor, cdb.get_world_size(group)))
return reduce_scatter(output_tensor,
input_tensor_lst,
op=op,
group=group,
async_op=async_op,
prof=prof,
debug=debug) | null |
10,413 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
from deepspeed.comm.utils import *
def all_to_all_single(output,
tensor,
output_split_sizes=None,
input_split_sizes=None,
group=None,
async_op=False,
prof=False,
log_name='all_to_all_single',
debug=get_caller_func()):
global cdb
return cdb.all_to_all_single(output=output,
input=tensor,
output_split_sizes=output_split_sizes,
input_split_sizes=input_split_sizes,
group=group,
async_op=async_op) | null |
10,414 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
from deepspeed.comm.utils import *
def send(tensor,
dst,
group=None,
tag=0,
prof=False,
log_name='send',
debug=get_caller_func()):
global cdb
return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag)
def isend(tensor,
dst,
group=None,
tag=0,
prof=False,
log_name='isend',
debug=get_caller_func()):
global cdb
return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag) | null |
10,415 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
from deepspeed.comm.utils import *
def recv(tensor,
src=None,
group=None,
tag=0,
prof=False,
log_name='recv',
debug=get_caller_func()):
global cdb
return cdb.recv(tensor=tensor, src=src, group=group, tag=tag)
def irecv(tensor,
src=None,
group=None,
tag=0,
prof=False,
log_name='irecv',
debug=get_caller_func()):
global cdb
return cdb.recv(tensor=tensor, src=src, group=group, tag=tag) | null |
10,416 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
from deepspeed.comm.utils import *
def gather(tensor,
gather_list=None,
dst=0,
group=None,
async_op=False,
prof=False,
log_name='gather',
debug=get_caller_func()):
global cdb
return cdb.gather(tensor=tensor,
gather_list=gather_list,
dst=dst,
group=group,
async_op=async_op) | null |
10,417 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
from deepspeed.comm.utils import *
def scatter(tensor,
scatter_list=None,
src=0,
group=None,
async_op=False,
prof=False,
log_name='scatter',
debug=get_caller_func()):
global cdb
return cdb.scatter(tensor=tensor,
scatter_list=scatter_list,
src=src,
group=group,
async_op=async_op) | null |
10,418 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
from deepspeed.comm.utils import *
def barrier(group=None,
async_op=False,
device_ids=None,
prof=False,
log_name='barrier',
debug=get_caller_func()):
global cdb
return cdb.barrier(group=group, async_op=async_op, device_ids=device_ids)
def monitored_barrier(group=None,
timeout=None,
wait_all_ranks=False,
prof=False,
log_name='monitored_barrier',
debug=get_caller_func()):
global cdb
return cdb.barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks) | null |
10,419 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
comms_logger = CommsLogger()
from deepspeed.comm.utils import *
def barrier(group=None,
async_op=False,
device_ids=None,
prof=False,
log_name='barrier',
debug=get_caller_func()):
global cdb
return cdb.barrier(group=group, async_op=async_op, device_ids=device_ids)
def get_rank(group=None):
"""
Returns the rank of the current process in the provided ``group`` or the
default group if none was provided.
Rank is a unique identifier assigned to each process within a distributed
process group. They are always consecutive integers ranging from 0 to
``world_size``.
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
Returns:
The rank of the process group
-1, if not part of the group
"""
global cdb
assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.get_rank(group)
def log_summary():
global cdb
barrier(log_name='log_summary_barrier')
if cdb.get_rank() == 0:
comms_logger.log_all()
barrier(log_name='log_summary_barrier') | null |
10,420 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
class ReduceOp(Enum):
SUM = 0
PRODUCT = 1
MIN = 2
MAX = 3
BAND = 4
BOR = 5
BXOR = 6
AVG = 7
UNUSED = 8
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
from deepspeed.comm.utils import *
def reduce(tensor,
dst,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='reduce',
debug=get_caller_func()):
global cdb
return cdb.reduce(tensor=tensor, dst=dst, op=op, group=group, async_op=async_op) | null |
10,421 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
class ReduceOp(Enum):
SUM = 0
PRODUCT = 1
MIN = 2
MAX = 3
BAND = 4
BOR = 5
BXOR = 6
AVG = 7
UNUSED = 8
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
from deepspeed.comm.utils import *
def all_reduce(tensor,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='all_reduce',
debug=get_caller_func()):
#if profile_comm:
# context of the timers?
# timers.start()
# TensorBoard logging for comm calls.?
global cdb
#print(f'op = {op}, cdb= {cdb.name}')
return cdb.all_reduce(tensor, op, group, async_op) | null |
10,422 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
from deepspeed.comm.utils import *
def is_initialized():
def get_world_group():
global cdb
assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.get_world_group() | null |
10,423 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
from deepspeed.comm.utils import *
def is_initialized():
#assert cdb is not None, 'DeepSpeed backend not set, please initialize it using init_process_group()'
if cdb is None:
return False
else:
return cdb.is_initialized()
The provided code snippet includes necessary dependencies for implementing the `get_local_rank` function. Write a Python function `def get_local_rank()` to solve the following problem:
Helper function to get local rank after a backend has been set and initialized Args: None Returns: local rank (= GPU device ID)
Here is the function:
def get_local_rank():
"""
Helper function to get local rank after a backend has been set and initialized
Args:
None
Returns:
local rank (= GPU device ID)
"""
global cdb
assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return get_local_rank_from_launcher() | Helper function to get local rank after a backend has been set and initialized Args: None Returns: local rank (= GPU device ID) |
10,424 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
from deepspeed.comm.utils import *
def is_initialized():
#assert cdb is not None, 'DeepSpeed backend not set, please initialize it using init_process_group()'
if cdb is None:
return False
else:
return cdb.is_initialized()
def get_global_rank(group=None, group_rank=0):
global cdb
assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.get_global_rank(group, group_rank) | null |
10,425 | from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
cdb = None
from deepspeed.comm.utils import *
def configure(
deepspeed_config=None,
enabled=None,
prof_all=None,
prof_ops=None,
verbose=None,
debug=None,
):
if deepspeed_config is not None:
_configure_using_config_file(deepspeed_config.comms_config)
if enabled is not None:
comms_logger.enabled = enabled
if prof_all is not None:
comms_logger.prof_all = prof_all
if prof_ops is not None:
comms_logger.prof_ops = prof_ops
if verbose is not None:
comms_logger.verbose = verbose
if debug is not None:
comms_logger.debug = debug
def is_initialized():
#assert cdb is not None, 'DeepSpeed backend not set, please initialize it using init_process_group()'
if cdb is None:
return False
else:
return cdb.is_initialized()
def mpi_discovery(distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT, verbose=True):
'''
Discovery MPI environment via mpi4py and map to relevant dist state
'''
from mpi4py import MPI
import subprocess
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
world_size = comm.Get_size()
master_addr = None
if rank == 0:
hostname_cmd = ["hostname -I"]
result = subprocess.check_output(hostname_cmd, shell=True)
master_addr = result.decode('utf-8').split()[0]
master_addr = comm.bcast(master_addr, root=0)
# Determine local rank by assuming hostnames are unique
proc_name = MPI.Get_processor_name()
all_procs = comm.allgather(proc_name)
local_rank = sum([i == proc_name for i in all_procs[:rank]])
os.environ['RANK'] = str(rank)
os.environ['WORLD_SIZE'] = str(world_size)
os.environ['LOCAL_RANK'] = str(local_rank)
os.environ['MASTER_ADDR'] = master_addr
os.environ['MASTER_PORT'] = str(distributed_port)
if verbose:
utils.logger.info(
"Discovered MPI settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}"
.format(os.environ['RANK'],
os.environ['LOCAL_RANK'],
os.environ['WORLD_SIZE'],
os.environ['MASTER_ADDR'],
os.environ['MASTER_PORT']))
if cdb is not None and cdb.is_initialized():
assert cdb.get_rank() == rank, "MPI rank {} does not match torch rank {}".format(
rank, cdb.get_rank())
assert cdb.get_world_size() == world_size, "MPI world size {} does not match torch world size {}".format(
world_size, cdb.get_world_size())
def in_aml():
# Are we running inside an Azure Machine Learning (AML) environment?
return 'AZUREML_EXPERIMENT_ID' in os.environ
def in_aws_sm():
# Are we running inside an AWS SageMaker environment?
return 'SM_TRAINING_ENV' in os.environ
def in_dlts():
# Are we running on a DLTS cluster?
return 'DLTS_JOB_ID' in os.environ
def patch_aml_env_for_torch_nccl_backend(master_port=6105, verbose=True):
"""Helper routine to get and set environment variables.
This is adapted from Azure ML's documentation available from:
https://azure.github.io/azureml-web/docs/cheatsheet/distributed-training/#environment-variables-from-openmpi
"""
os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
single_node = int(os.environ["OMPI_COMM_WORLD_LOCAL_SIZE"]) == int(
os.environ["WORLD_SIZE"])
if not single_node:
master_node_params = os.environ["AZ_BATCH_MASTER_NODE"].split(":")
os.environ["MASTER_ADDR"] = master_node_params[0]
# Do not overwrite master port with that defined in AZ_BATCH_MASTER_NODE
if "MASTER_PORT" not in os.environ:
os.environ["MASTER_PORT"] = str(master_port)
else:
os.environ["MASTER_ADDR"] = os.environ["AZ_BATCHAI_MPI_MASTER_NODE"]
os.environ["MASTER_PORT"] = DEFAULT_AML_MASTER_PORT
if verbose:
utils.logger.info("NCCL_SOCKET_IFNAME original value = {}".format(
os.environ["NCCL_SOCKET_IFNAME"]))
os.environ["NCCL_SOCKET_IFNAME"] = DEFAULT_AML_NCCL_SOCKET_IFNAME
os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]
if verbose:
utils.logger.info(
"Discovered AzureML settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}"
.format(os.environ['RANK'],
os.environ['LOCAL_RANK'],
os.environ['WORLD_SIZE'],
os.environ['MASTER_ADDR'],
os.environ['MASTER_PORT']))
def patch_aws_sm_env_for_torch_nccl_backend(verbose=True):
"""Helper routine to get and set environment variables when running inside an AWS SageMaker environment.
"""
os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]
os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
if verbose:
utils.logger.info(
"Discovered AWS SageMaker settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}"
.format(os.environ['RANK'],
os.environ['LOCAL_RANK'],
os.environ['WORLD_SIZE'],
os.environ['MASTER_ADDR'],
os.environ['MASTER_PORT']))
class TorchBackend(Backend):
"""
A light-weight wrapper class for torch.distributed API.
Only a subset of functions are wrapped. Once the init_process_group
is initialized, standard torch.distributed.* can be used directly
so no need to wrap all the functions. We can keep adding wrappers as
needed.
"""
def __init__(self, backend, timeout, init_method, name='torch'):
super(TorchBackend, self).__init__()
self.torch_version_before_18 = older_torch()
self.has_allgather_base = has_allgather_base()
self.has_reduce_scatter_base = has_reduce_scatter_base()
self.initialized = True
self.name = name
# Future functionality to support ds.initialize() on a single GPU
# The idea is to fake that dist backend is initialized even when
# it is not so we can run on a single GPU without doing any init_process_group
self.single_gpu_mode = True
self.init_process_group(backend, timeout, init_method)
def init_process_group(self, backend, timeout, init_method):
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend,
timeout=timeout,
init_method=init_method)
self.using_mpi = torch.distributed.get_backend() == 'mpi'
def all_reduce(self,
tensor,
op=torch.distributed.ReduceOp.SUM,
group=None,
async_op=False):
op = self._reduce_op(op)
return torch.distributed.all_reduce(tensor=tensor,
op=op,
group=group,
async_op=async_op)
def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False):
return torch.distributed.reduce(tensor=tensor,
dst=dst,
op=self._reduce_op(op),
group=group,
async_op=async_op)
def reduce_scatter(self,
output,
input_list,
op=ReduceOp.SUM,
group=None,
async_op=False):
return torch.distributed.reduce_scatter(output=output,
input_list=input_list,
op=self._reduce_op(op),
group=group,
async_op=async_op)
def broadcast(self, tensor, src, group=None, async_op=False):
return torch.distributed.broadcast(tensor=tensor,
src=src,
group=group,
async_op=async_op)
def all_gather(self, tensor_list, tensor, group=None, async_op=False):
return torch.distributed.all_gather(tensor_list=tensor_list,
tensor=tensor,
group=group,
async_op=async_op)
def all_gather_base(self, output_tensor, input_tensor, group=None, async_op=False):
if self.has_allgather_base:
return torch.distributed.distributed_c10d._all_gather_base(
output_tensor=output_tensor,
input_tensor=input_tensor,
group=group,
async_op=async_op)
else:
utils.logger.warning(
"unable to find torch.distributed._all_gather_base. will fall back to "
"torch.distributed.reduce_scatter which will result in suboptimal performance. "
"please consider upgrading your pytorch installation.")
pass
def reduce_scatter_base(self,
output_tensor,
input_tensor,
op=ReduceOp.SUM,
group=None,
async_op=False):
if self.has_reduce_scatter_base:
return torch.distributed._reduce_scatter_base(output_tensor,
input_tensor,
op=self._reduce_op(op),
group=group,
async_op=async_op)
else:
utils.logger.warning(
"unable to find torch.distributed._reduce_scatter_base. will fall back to "
"torch.distributed.reduce_scatter which will result in suboptimal performance. "
"please consider upgrading your pytorch installation.")
pass
def all_to_all_single(self,
output,
input,
output_split_sizes=None,
input_split_sizes=None,
group=None,
async_op=False):
return torch.distributed.all_to_all_single(output=output,
input=input,
output_split_sizes=output_split_sizes,
input_split_sizes=input_split_sizes,
group=group,
async_op=async_op)
def send(self, tensor, dst, group=None, tag=0):
return torch.distributed.send(tensor=tensor, dst=dst, group=group, tag=tag)
def recv(self, tensor, src=None, group=None, tag=0):
return torch.distributed.recv(tensor=tensor, src=src, group=group, tag=tag)
def isend(self, tensor, dst, group=None, tag=0):
return torch.distributed.isend(tensor=tensor, dst=dst, group=group, tag=tag)
def irecv(self, tensor, src=None, group=None, tag=0):
return torch.distributed.irecv(tensor=tensor, src=src, group=group, tag=tag)
def gather(self, tensor, gather_list=None, dst=0, group=None, async_op=False):
return torch.distributed.gather(tensor=tensor,
gather_list=gather_list,
dst=dst,
group=group,
async_op=async_op)
def scatter(self, tensor, scatter_list=None, src=0, group=None, async_op=False):
return torch.distributed.scatter(tensor=tensor,
scatter_list=scatter_list,
src=src,
group=group,
async_op=async_op)
def barrier(self,
group=torch.distributed.GroupMember.WORLD,
async_op=False,
device_ids=None):
if group is None:
group = torch.distributed.GroupMember.WORLD
return torch.distributed.barrier(group=group,
async_op=async_op,
device_ids=device_ids)
def monitored_barrier(self,
group=torch.distributed.GroupMember.WORLD,
timeout=None,
wait_all_ranks=False):
if group is None:
group = torch.distributed.GroupMember.WORLD
return torch.distributed.monitored_barrier(group=group,
timeout=timeout,
wait_all_ranks=wait_all_ranks)
def get_rank(self, group=None):
return torch.distributed.get_rank(group=group)
def get_world_size(self, group=None):
return torch.distributed.get_world_size(group=group)
def is_initialized(self):
return torch.distributed.is_initialized()
def get_backend(self, group=None):
return torch.distributed.get_backend(group=group)
def new_group(self, ranks):
return torch.distributed.new_group(ranks)
def get_global_rank(self, group, group_rank):
if hasattr(torch.distributed.distributed_c10d, "get_global_rank"):
from torch.distributed.distributed_c10d import get_global_rank as _get_global_rank
else:
from torch.distributed.distributed_c10d import _get_global_rank
return _get_global_rank(group, group_rank)
def get_world_group(self):
return torch.distributed.group.WORLD
def destroy_process_group(self, group=None):
return torch.distributed.destroy_process_group(group=group)
def _reduce_op(self, op):
'''
Helper function. If the op provided is not a torch.dist.ReduceOp, convert it and return
'''
if not isinstance(op, torch.distributed.ReduceOp):
if op == ReduceOp.SUM:
op = torch.distributed.ReduceOp.SUM
elif op == ReduceOp.PRODUCT:
op = torch.distributed.ReduceOp.PRODUCT
elif op == ReduceOp.AVG:
op = torch.distributed.ReduceOp.AVG
elif op == ReduceOp.MIN:
op = torch.distributed.ReduceOp.MIN
elif op == ReduceOp.MAX:
op = torch.distributed.ReduceOp.MAX
elif op == ReduceOp.BAND:
op = torch.distributed.ReduceOp.BAND
elif op == ReduceOp.BOR:
op = torch.distributed.ReduceOp.BOR
elif op == ReduceOp.BXOR:
op = torch.distributed.ReduceOp.BXOR
return op
TORCH_DISTRIBUTED_DEFAULT_PORT = 29500
default_pg_timeout = timedelta(minutes=30)
The provided code snippet includes necessary dependencies for implementing the `init_distributed` function. Write a Python function `def init_distributed(dist_backend="nccl", auto_mpi_discovery=True, distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT, verbose=True, timeout=default_pg_timeout, init_method=None, dist_init_required=None, config=None)` to solve the following problem:
Initialize dist backend, potentially performing MPI discovery if needed Arguments: dist_backend: Optional (str). torch distributed backend, e.g., nccl, mpi, gloo auto_mpi_discovery Optional (bool). if distributed environment variables are not set, attempt to discover them from MPI distributed_port: Optional (int). torch distributed backend port verbose: Optional (bool). verbose logging timeout: Optional (timedelta). Timeout for operations executed against the process group. Default value equals 30 minutes. init_method: Optional (string). Torch distributed, URL specifying how to initialize the process group. Default is “env://” if no init_method or store is specified. config: Optional (dict). DeepSpeed configuration for setting up comms options (e.g. Comms profiling)
Here is the function:
def init_distributed(dist_backend="nccl",
auto_mpi_discovery=True,
distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT,
verbose=True,
timeout=default_pg_timeout,
init_method=None,
dist_init_required=None,
config=None):
''' Initialize dist backend, potentially performing MPI discovery if needed
Arguments:
dist_backend: Optional (str). torch distributed backend, e.g., nccl, mpi, gloo
auto_mpi_discovery Optional (bool). if distributed environment variables are not set, attempt to discover them from MPI
distributed_port: Optional (int). torch distributed backend port
verbose: Optional (bool). verbose logging
timeout: Optional (timedelta). Timeout for operations executed against the process group. Default value equals 30 minutes.
init_method: Optional (string). Torch distributed, URL specifying how to initialize the process group. Default is “env://” if no init_method or store is specified.
config: Optional (dict). DeepSpeed configuration for setting up comms options (e.g. Comms profiling)
'''
global cdb
configure(deepspeed_config=config)
if dist_init_required is None:
dist_init_required = cdb is None or not cdb.is_initialized()
if cdb is None and torch.distributed.is_initialized():
# The user initialized torch.dist themselves, create cdb and short-circuit
cdb = TorchBackend(dist_backend, timeout, init_method)
return
if dist_init_required is False:
assert (
cdb is not None and cdb.is_initialized() is True
), "Distributed backend is not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()"
else:
# Initialize torch distributed if needed
required_env = ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
if auto_mpi_discovery and not all(map(lambda v: v in os.environ, required_env)):
if verbose:
utils.logger.info(
"Not using the DeepSpeed or dist launchers, attempting to detect MPI environment..."
)
if in_aml() and not in_dlts():
patch_aml_env_for_torch_nccl_backend(verbose=verbose)
elif in_aws_sm():
patch_aws_sm_env_for_torch_nccl_backend(verbose=verbose)
else:
mpi_discovery(distributed_port=distributed_port, verbose=verbose)
if cdb is not None and cdb.is_initialized():
if int(os.getenv('RANK', '0')) == 0:
utils.logger.info('Distributed backend already initialized')
else:
assert isinstance(timeout, timedelta)
if int(os.getenv('RANK', '0')) == 0:
utils.logger.info(
'Initializing TorchBackend in DeepSpeed with backend {}'.format(
dist_backend))
# Create a torch backend object, initialize torch distributed, and assign to cdb
cdb = TorchBackend(dist_backend, timeout, init_method) | Initialize dist backend, potentially performing MPI discovery if needed Arguments: dist_backend: Optional (str). torch distributed backend, e.g., nccl, mpi, gloo auto_mpi_discovery Optional (bool). if distributed environment variables are not set, attempt to discover them from MPI distributed_port: Optional (int). torch distributed backend port verbose: Optional (bool). verbose logging timeout: Optional (timedelta). Timeout for operations executed against the process group. Default value equals 30 minutes. init_method: Optional (string). Torch distributed, URL specifying how to initialize the process group. Default is “env://” if no init_method or store is specified. config: Optional (dict). DeepSpeed configuration for setting up comms options (e.g. Comms profiling) |
10,426 | import torch
from ..op_builder import QuantizerBuilder
quantizer_cuda_module = None
def ds_quantizer(input, groups=1, bit_num=8, sr=False, asym=False):
# Load cuda modules if needed
global quantizer_cuda_module
if quantizer_cuda_module is None:
quantizer_cuda_module = QuantizerBuilder().load()
if sr:
if asym:
quantize_func = quantizer_cuda_module.ds_sr_quantize_asym_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_sr_quantize_asym_fp32
else:
quantize_func = quantizer_cuda_module.ds_sr_quantize_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_sr_quantize_fp32
else:
if asym:
quantize_func = quantizer_cuda_module.ds_quantize_asym_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_quantize_asym_fp32
else:
quantize_func = quantizer_cuda_module.ds_quantize_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_quantize_fp32
return quantize_func(input, groups, bit_num) | null |
10,427 | import os
import sys
import time
import importlib
from pathlib import Path
import subprocess
import shlex
import shutil
import tempfile
import distutils.ccompiler
import distutils.log
import distutils.sysconfig
from distutils.errors import CompileError, LinkError
from abc import ABC, abstractmethod
from typing import List
DEFAULT_COMPUTE_CAPABILITIES = "6.0;6.1;7.0"
try:
import torch
except ImportError:
print(
f"{WARNING} unable to import torch, please install it if you want to pre-compile any deepspeed ops."
)
else:
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
def installed_cuda_version():
import torch.utils.cpp_extension
cuda_home = torch.utils.cpp_extension.CUDA_HOME
assert cuda_home is not None, "CUDA_HOME does not exist, unable to compile CUDA op(s)"
# Ensure there is not a cuda version mismatch between torch and nvcc compiler
output = subprocess.check_output([cuda_home + "/bin/nvcc",
"-V"],
universal_newlines=True)
output_split = output.split()
release_idx = output_split.index("release")
release = output_split[release_idx + 1].replace(',', '').split(".")
# Ignore patch versions, only look at major + minor
cuda_major, cuda_minor = release[:2]
installed_cuda_version = ".".join(release[:2])
return int(cuda_major), int(cuda_minor)
def get_default_compute_capabilities():
compute_caps = DEFAULT_COMPUTE_CAPABILITIES
import torch.utils.cpp_extension
if torch.utils.cpp_extension.CUDA_HOME is not None and installed_cuda_version(
)[0] >= 11:
if installed_cuda_version()[0] == 11 and installed_cuda_version()[1] == 0:
# Special treatment of CUDA 11.0 because compute_86 is not supported.
compute_caps += ";8.0"
else:
compute_caps += ";8.0;8.6"
return compute_caps | null |
10,428 | import os
import sys
import time
import importlib
from pathlib import Path
import subprocess
import shlex
import shutil
import tempfile
import distutils.ccompiler
import distutils.log
import distutils.sysconfig
from distutils.errors import CompileError, LinkError
from abc import ABC, abstractmethod
from typing import List
try:
import torch
except ImportError:
print(
f"{WARNING} unable to import torch, please install it if you want to pre-compile any deepspeed ops."
)
else:
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
def installed_cuda_version():
import torch.utils.cpp_extension
cuda_home = torch.utils.cpp_extension.CUDA_HOME
assert cuda_home is not None, "CUDA_HOME does not exist, unable to compile CUDA op(s)"
# Ensure there is not a cuda version mismatch between torch and nvcc compiler
output = subprocess.check_output([cuda_home + "/bin/nvcc",
"-V"],
universal_newlines=True)
output_split = output.split()
release_idx = output_split.index("release")
release = output_split[release_idx + 1].replace(',', '').split(".")
# Ignore patch versions, only look at major + minor
cuda_major, cuda_minor = release[:2]
installed_cuda_version = ".".join(release[:2])
return int(cuda_major), int(cuda_minor)
cuda_minor_mismatch_ok = {
10: [
"10.0",
"10.1",
"10.2",
],
11: ["11.0",
"11.1",
"11.2",
"11.3",
"11.4",
"11.5",
"11.6",
"11.7",
"11.8"],
}
def assert_no_cuda_mismatch():
cuda_major, cuda_minor = installed_cuda_version()
sys_cuda_version = f'{cuda_major}.{cuda_minor}'
torch_cuda_version = ".".join(torch.version.cuda.split('.')[:2])
# This is a show-stopping error, should probably not proceed past this
if sys_cuda_version != torch_cuda_version:
if (cuda_major in cuda_minor_mismatch_ok
and sys_cuda_version in cuda_minor_mismatch_ok[cuda_major]
and torch_cuda_version in cuda_minor_mismatch_ok[cuda_major]):
print(f"Installed CUDA version {sys_cuda_version} does not match the "
f"version torch was compiled with {torch.version.cuda} "
"but since the APIs are compatible, accepting this combination")
return
raise Exception(
f"Installed CUDA version {sys_cuda_version} does not match the "
f"version torch was compiled with {torch.version.cuda}, unable to compile "
"cuda/cpp extensions without a matching cuda version.") | null |
10,429 | import torch
import torch.nn as nn
from ... import op_builder
from deepspeed import module_inject
from .diffusers_attention import DeepSpeedDiffusersAttention
from .bias_add import nhwc_bias_add
from .diffusers_2d_transformer import Diffusers2DTransformerConfig
transformer_cuda_module = None
def load_transformer_module():
global transformer_cuda_module
if transformer_cuda_module is None:
transformer_cuda_module = op_builder.InferenceBuilder().load()
return transformer_cuda_module | null |
10,430 | import torch
import torch.nn as nn
from ... import op_builder
from deepspeed import module_inject
from .diffusers_attention import DeepSpeedDiffusersAttention
from .bias_add import nhwc_bias_add
from .diffusers_2d_transformer import Diffusers2DTransformerConfig
spatial_cuda_module = None
def load_spatial_module():
global spatial_cuda_module
if spatial_cuda_module is None:
spatial_cuda_module = op_builder.SpatialInferenceBuilder().load()
return spatial_cuda_module | null |
10,431 | import math
import torch
from torch.autograd import Function
from ... import op_builder
import torch.nn as nn
from packaging import version as pkg_version
from deepspeed.utils.logging import log_dist
triton_flash_attn = None
class triton_flash_attn(torch.nn.Module):
def __init__(self, ):
super(triton_flash_attn, self).__init__()
def forward(self, q, k, v, sm_scale, block_128=True):
BLOCK = 128 if block_128 else 64
# shape constraints
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
o = torch.empty_like(q)
grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1])
tmp = torch.empty((q.shape[0] * q.shape[1],
q.shape[2]),
device=q.device,
dtype=torch.float32)
num_warps = 4 if Lk <= 64 else 8
_fwd_kernel[grid](
q,
k,
v,
sm_scale,
tmp,
o,
q.stride(0),
q.stride(1),
q.stride(2),
q.stride(3),
k.stride(0),
k.stride(1),
k.stride(2),
k.stride(3),
v.stride(0),
v.stride(1),
v.stride(2),
v.stride(3),
o.stride(0),
o.stride(1),
o.stride(2),
o.stride(3),
k.shape[0],
k.shape[1],
k.shape[2],
BLOCK_M=BLOCK,
BLOCK_N=BLOCK,
BLOCK_DMODEL=Lk,
num_warps=num_warps,
num_stages=1,
)
return o
def load_triton_flash_attn():
global triton_flash_attn
try:
import triton
except ImportError:
raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`")
if pkg_version.parse(triton.__version__) < pkg_version.parse("2.0"):
raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`")
from .triton_ops import triton_flash_attn | null |
10,432 | from typing import Optional
import torch
from ... import op_builder
spatial_cuda_module = None
def nhwc_bias_add(activation: torch.Tensor,
bias: torch.Tensor,
other: Optional[torch.Tensor] = None,
other_bias: Optional[torch.Tensor] = None) -> torch.Tensor:
global spatial_cuda_module
if spatial_cuda_module is None:
spatial_cuda_module = op_builder.SpatialInferenceBuilder().load()
if other is None:
return spatial_cuda_module.nhwc_bias_add(activation, bias)
elif other_bias is None:
return spatial_cuda_module.nhwc_bias_add_add(activation, bias, other)
else:
return spatial_cuda_module.nhwc_bias_add_bias_add(activation,
bias,
other,
other_bias) | null |
10,433 | import torch
import triton
import triton.language as tl
def _fwd_kernel(
Q,
K,
V,
sm_scale,
TMP,
Out,
stride_qz,
stride_qh,
stride_qm,
stride_qk,
stride_kz,
stride_kh,
stride_kn,
stride_kk,
stride_vz,
stride_vh,
stride_vk,
stride_vn,
stride_oz,
stride_oh,
stride_om,
stride_on,
Z,
H,
N_CTX,
BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_DMODEL)
off_q = off_hz * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk
off_k = off_hz * stride_kh + offs_n[:, None] * stride_kn + offs_d[None, :] * stride_kk
off_v = off_hz * stride_vh + offs_n[:, None] * stride_qm + offs_d[None, :] * stride_qk
# Initialize pointers to Q, K, V
q_ptrs = Q + off_q
k_ptrs = K + off_k
v_ptrs = V + off_v
# initialize pointer to m and l
t_ptrs = TMP + off_hz * N_CTX + offs_m
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# load q: it will stay in SRAM throughout
q = tl.load(q_ptrs)
# loop over k, v and update accumulator
for start_n in range(0, N_CTX, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(k_ptrs + start_n * stride_kn)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k, trans_b=True)
qk *= sm_scale
# -- compute m_ij, p, l_ij
m_ij = tl.max(qk, 1)
p = tl.exp(qk - m_ij[:, None])
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
m_i_new = tl.maximum(m_i, m_ij)
alpha = tl.exp(m_i - m_i_new)
beta = tl.exp(m_ij - m_i_new)
l_i_new = alpha * l_i + beta * l_ij
# -- update output accumulator --
# scale p
p_scale = beta / l_i_new
p = p * p_scale[:, None]
# scale acc
acc_scale = l_i / l_i_new * alpha
tl.store(t_ptrs, acc_scale)
acc_scale = tl.load(t_ptrs) # BUG: have to store and immediately load
acc = acc * acc_scale[:, None]
# update acc
v = tl.load(v_ptrs + start_n * stride_vk)
p = p.to(tl.float16)
acc += tl.dot(p, v)
# update m_i and l_i
l_i = l_i_new
m_i = m_i_new
# initialize pointers to output
offs_n = tl.arange(0, BLOCK_DMODEL)
off_o = off_hz * stride_oh + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on
out_ptrs = Out + off_o
tl.store(out_ptrs, acc) | null |
10,434 | import torch
import triton
import triton.language as tl
def next_power_of_2(n):
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n += 1
return n | null |
10,435 | import torch
import triton
import triton.language as tl
def num_warps(n):
if n < 512:
return 4
if n < 2048:
return 8
return 16 | null |
10,436 | import torch
import triton
import triton.language as tl
def _forward(X,
scale,
LUT,
RPE,
KP_M,
ATTN_M,
sizemax,
stride_zx,
stride_zrpe,
stride_hrpe,
stride_srpe,
stride_zkpm,
stride_zattnm,
**meta):
TN = meta['TN']
BLOCK = meta['BLOCK']
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from LUT
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# block id and column id
blockid = tl.load(LUT + offset + rbmn * 4 + 0)
columnid = tl.load(LUT + offset + rbmn * 4 + 1)
rowid = tl.load(LUT + offset + rbmn * 4 + 2)
headid = tl.load(LUT + offset + rbmn * 4 + 3)
# pointers to X
px = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
x = tl.load(px, mask=check, other=-float('inf'))
x = x.to(tl.float32)
# apply scale
if meta['APPLY_SCALE']:
x = x * scale
# apply RPE
if meta['APPLY_RPE']:
prpe = RPE + pidz * stride_zrpe + headid * stride_hrpe + columnid * BLOCK + rowid * BLOCK * stride_srpe + rxm * stride_srpe + rxn
rpe = tl.load(prpe, mask=check, other=0)
x = x + rpe
# apply key-padding mask
if meta['APPLY_KP_MASK']:
pkp_m = KP_M + pidz * stride_zkpm + columnid * BLOCK + rxn
kp_m = tl.load(pkp_m, mask=check, other=-float('inf'))
if meta['KP_MASK_MUL']:
kp_m = tl.where(kp_m == 0, -float('inf'), 0.)
x = x + kp_m
# apply attention mask
if meta['APPLY_ATTN_MASK']:
pattn_m = ATTN_M + columnid * BLOCK + rowid * BLOCK * stride_zattnm + rxm * stride_zattnm + rxn
attn_m = tl.load(pattn_m, mask=check, other=-float('inf'))
if meta['ATTN_MASK_MUL']:
attn_m = tl.where(attn_m == 0, -float('inf'), 0.)
x = x + attn_m
# computation
x = tl.softmax(x)
tl.store(px, x, mask=check) | null |
10,437 | import torch
import triton
import triton.language as tl
def _backward(X, scale, DX, LUT, sizemax, stride_zx, stride_zdx, **meta):
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
TN = meta['TN']
BLOCK = meta['BLOCK']
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from look-up table
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
# bounds checking on lut
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# initialize pointers to block-sparse input
blockid = tl.load(LUT + offset + rbmn * 4)
X = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
DX = DX + pidz * stride_zdx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
# compute fused softmax backward
x = tl.load(X, mask=check, other=0)
dx = tl.load(DX, mask=check, other=0)
x = x.to(tl.float32)
dx = dx.to(tl.float32)
y = x * (dx - tl.sum(x * dx, 0)) * scale
tl.store(DX, y, mask=check) | null |
10,438 | import importlib
import torch
import triton
import triton.language as tl
import triton._C.libtriton as libtriton
def _kernel(A,
B,
C,
stride_za,
stride_ha,
stride_ma,
stride_ka,
stride_zb,
stride_hb,
stride_kb,
stride_nb,
stride_zc,
stride_hc,
stride_mc,
stride_nc,
DS0,
DS1,
SDD_K,
SDD_off_width,
lut,
locks,
nlocks,
**meta):
TM = meta['TM']
TN = meta['TN']
TK = meta['TK']
TZ = meta['TZ']
BLOCK = meta['BLOCK']
#------------#
#- Prologue -#
#------------#
pid0 = tl.program_id(0)
pid1 = tl.program_id(1)
pidz = tl.program_id(2)
if meta['SDD']:
pid1 = pid1 + SDD_off_width
blockidm = tl.arange(0, TM) // BLOCK
blockidn = tl.arange(0, TN) // BLOCK
offlutm = blockidm * (TN // BLOCK) * 4
offlutn = blockidn * 4
header = lut + pid1 * (TM // BLOCK) * (TN // BLOCK) * 4
z = tl.load(header + 0)
i = tl.load(header + 1 + offlutm)
j = tl.load(header + 2 + offlutn)
AS1 = SDD_K // TZ
lockid = tl.where(TZ > 1, 1, 0)
offka = pid0 * AS1
offkb = pid0 * AS1
offmc = 0
offnc = 0
offpa = 0
offpb = 0
maxid = TZ
offhc = 0
offha = z
offhb = z
ram = i * BLOCK + (tl.arange(0, TM) % BLOCK)
rbn = j * BLOCK + (tl.arange(0, TN) % BLOCK)
else:
header = lut + pid0 * 6
offset = tl.load(header + 0)
AS1 = tl.load(header + 1)
column = tl.load(header + 2)
depth = tl.load(header + 3)
lockid = tl.load(header + 4)
maxid = tl.load(header + 5)
pinc = lut + offset
offhc = depth
if meta['DSD']:
# output offset
offnc = pid1 * TN
offmc = column * TM
offpc = 0
# dense input offset
offnb = pid1 * TN
offkb = tl.load(pinc)
offkb = tl.multiple_of(offkb, 8) # compiler hint
offpb = 0
# sparse input offset
offma = 0
offka = 0
offpa = tl.load(pinc + 1)
offpa = tl.multiple_of(offpa, 8) # compiler hint
offpa = offpa * BLOCK * BLOCK
offha = 0
offhb = depth
else:
# output offset
offmc = pid1 * TM
offnc = column * TN
offpc = 0
# dense input offset
offma = pid1 * TM
offka = tl.load(pinc)
offka = tl.multiple_of(offka, 8) # compiler hint
offpa = 0
# sparse input offset
offnb = 0
offkb = 0
offpb = tl.load(pinc + 1)
offpb = tl.multiple_of(offpb, 8) # compiler hint
offpb = offpb * BLOCK * BLOCK
offha = depth
offhb = 0
ram = offma + tl.arange(0, TM)
rbn = offnb + tl.arange(0, TN)
# initialize a, b pointers
rka = offka + tl.arange(0, TK)
rkb = offkb + tl.arange(0, TK)
pa = A + pidz * stride_za + offha * stride_ha + offpa + ram[:, None] * stride_ma + rka[None, :] * stride_ka
pb = B + pidz * stride_zb + offhb * stride_hb + offpb + rbn[None, :] * stride_nb + rkb[:, None] * stride_kb
if meta['DDS']:
checkam = ram[:, None] < DS0
else:
checkam = AS1 > 0
if meta['DSD']:
checkbn = rbn[None, :] < DS0
else:
checkbn = AS1 > 0
a = tl.load(pa, mask=checkam, other=0.)
b = tl.load(pb, mask=checkbn, other=0.)
## ---------------- ##
## Inner Loop ##
## ---------------- ##
acc = tl.zeros((TM, TN), dtype=tl.float32)
for k in range(AS1, 0, -TK):
acc += tl.dot(a, b)
if meta['SDD']:
inc_a = TK * stride_ka
inc_b = TK * stride_kb
else:
pinc += 2
if meta['DSD']:
inc_b = tl.load(pinc)
inc_a = tl.load(pinc + 1)
inc_b = tl.multiple_of(inc_b, 8)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = inc_b * stride_kb
if meta['DDS']:
inc_a = tl.load(pinc)
inc_b = tl.load(pinc + 1)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = tl.multiple_of(inc_b, 8)
inc_a = inc_a * stride_ka
pa += inc_a
pb += inc_b
# pre-fetch
checkak = k > TK
checkbk = k > TK
checka = checkam & checkak
checkb = checkbn & checkbk
a = tl.load(pa, mask=checka)
b = tl.load(pb, mask=checkb)
c = acc.to(C.dtype.element_ty)
if meta['SDD']:
checkc = True
rr_blockidm = tl.arange(0, TM) // BLOCK
rr_blockidn = tl.arange(0, TN) // BLOCK
rr_offlutm = rr_blockidm * (TN // BLOCK) * 4
rr_offlutn = rr_blockidn * 4
off_bkid = 3 + rr_offlutm[:, None] + rr_offlutn[None, :]
bkid = tl.load(header + off_bkid)
offpc = bkid * BLOCK * BLOCK
rcm = tl.arange(0, TM) % BLOCK
rcn = tl.arange(0, TN) % BLOCK
else:
rcm = offmc + tl.arange(0, TM)
rcn = offnc + tl.arange(0, TN)
if meta['DSD']:
checkc = rcn[None, :] < DS0
if meta['DDS']:
checkc = rcm[:, None] < DS0
pc = C + offpc + offhc * stride_hc + pidz * stride_zc + rcm[:, None] * stride_mc + rcn[None, :] * stride_nc
# write-back directly
if lockid == 0:
tl.store(pc, c, mask=checkc)
# accumulate partial results using spin-locks
else:
plock = locks + tl.program_id(2) * nlocks * tl.num_programs(1) + tl.program_id(
1) * nlocks + lockid - 1
pcount = plock + tl.num_programs(2) * tl.num_programs(1) * nlocks
while tl.atomic_cas(plock, 0, 1) == 1:
pass
count = tl.load(pcount)
if count == 0:
tl.store(pc, c, mask=checkc)
else:
d = tl.load(pc, mask=checkc)
tl.store(pc, d + c, mask=checkc)
tl.atomic_xchg(pcount, (count + 1) % maxid)
tl.atomic_xchg(plock, 0) | null |
10,439 |
def check_tb_availability():
try:
# torch.utils.tensorboard will fail if `tensorboard` is not available,
# see their docs for more details: https://pytorch.org/docs/1.8.0/tensorboard.html
import tensorboard # noqa: F401
except ImportError:
print('If you want to use tensorboard logging, please `pip install tensorboard`')
raise | null |
10,440 |
def check_wandb_availability():
try:
import wandb # noqa: F401
except ImportError:
print(
'If you want to use wandb logging, please `pip install wandb` and follow the instructions at https://docs.wandb.ai/quickstart'
)
raise | null |
10,441 | from typing import List, Tuple, Dict
import torch
from .layer import MoE
class MoE(torch.nn.Module):
"""Initialize an MoE layer.
Arguments:
hidden_size (int): the hidden dimension of the model, importantly this is also the input and output dimension.
expert (torch.nn.Module): the torch module that defines the expert (e.g., MLP, torch.linear).
num_experts (int, optional): default=1, the total number of experts per layer.
ep_size (int, optional): default=1, number of ranks in the expert parallel world or group.
k (int, optional): default=1, top-k gating value, only supports k=1 or k=2.
capacity_factor (float, optional): default=1.0, the capacity of the expert at training time.
eval_capacity_factor (float, optional): default=1.0, the capacity of the expert at eval time.
min_capacity (int, optional): default=4, the minimum capacity per expert regardless of the capacity_factor.
use_residual (bool, optional): default=False, make this MoE layer a Residual MoE (https://arxiv.org/abs/2201.05596) layer.
noisy_gate_policy (str, optional): default=None, noisy gate policy, valid options are 'Jitter', 'RSample' or 'None'.
drop_tokens (bool, optional): default=True, whether to drop tokens - (setting to False is equivalent to infinite capacity).
use_rts (bool, optional): default=True, whether to use Random Token Selection.
use_tutel (bool, optional): default=False, whether to use Tutel optimizations (if installed).
enable_expert_tensor_parallelism (bool, optional): default=False, whether to use tensor parallelism for experts
"""
def __init__(self,
hidden_size,
expert,
num_experts=1,
ep_size=1,
k=1,
capacity_factor=1.,
eval_capacity_factor=1.,
min_capacity=4,
use_residual=False,
noisy_gate_policy: typing.Optional[str] = None,
drop_tokens: bool = True,
use_rts=True,
use_tutel: bool = False,
enable_expert_tensor_parallelism: bool = False):
super(MoE, self).__init__()
self.use_residual = use_residual
self.enable_expert_tensor_parallelism = enable_expert_tensor_parallelism
assert num_experts % ep_size == 0, f"Number of experts ({num_experts}) should be divisible by expert parallel size ({ep_size})"
self.ep_size = ep_size
self.expert_group_name = f"ep_size_{self.ep_size}"
self.num_experts = num_experts
self.num_local_experts = num_experts // self.ep_size
log_dist(
f'Creating MoE layer with num_experts: {num_experts} | num_local_experts: {self.num_local_experts} | expert_parallel_size: {self.ep_size}',
[0])
assert noisy_gate_policy is None or noisy_gate_policy in ['None', 'Jitter', 'RSample'], \
'Unsupported noisy_gate_policy: ' + noisy_gate_policy
experts = Experts(expert, self.num_local_experts, self.expert_group_name)
self.deepspeed_moe = MOELayer(TopKGate(hidden_size,
num_experts,
k,
capacity_factor,
eval_capacity_factor,
min_capacity,
noisy_gate_policy,
drop_tokens,
use_rts),
experts,
self.expert_group_name,
self.ep_size,
self.num_local_experts,
use_tutel=use_tutel)
if self.use_residual:
self.mlp = expert
# coefficient is used for weighted sum of the output of expert and mlp
self.coefficient = torch.nn.Linear(hidden_size, 2)
def set_deepspeed_parallelism(self):
self._create_process_groups()
def _create_process_groups(self):
# Create process group for a layer if needed
if self.expert_group_name not in groups._get_expert_parallel_group_dict():
print(
f"No existing process group found, creating a new group named: {self.expert_group_name}"
)
if (groups.mpu is None) or (not self.enable_expert_tensor_parallelism):
# Condition 1 - no groups.mpu means no tensor parallelism
# Condition 2 - disabling expert tensor parallelism on purpose
groups._create_expert_and_data_parallel(self.ep_size)
else:
# expert tensor parallelism is enabled
groups._create_expert_data_and_model_parallel(self.ep_size,
mpu=groups.mpu)
# Set the group handle for the MOELayer (deepspeed_moe) object
self.deepspeed_moe._set_ep_group(
groups._get_expert_parallel_group(self.expert_group_name))
def forward(self, hidden_states, used_token=None):
""" MoE forward
Arguments:
hidden_states (Tensor): input to the layer
used_token (Tensor, optional): default: None, mask only used tokens
Returns:
A tuple including output, gate loss, and expert count.
* output (Tensor): output of the model
* l_aux (Tensor): gate loss value
* exp_counts (int): expert count
"""
output = self.deepspeed_moe(hidden_states, used_token)
if self.use_residual:
# Residual MoE
output_mlp = self.mlp(hidden_states)
if type(output_mlp) is tuple:
output_mlp = output_mlp[0] # Ignore the bias term for now
coef = self.coefficient(hidden_states)
coef = torch.nn.functional.softmax(coef, dim=-1)
output = output * coef[..., 0:1] + output_mlp * coef[..., 1:]
return output, self.deepspeed_moe.l_aux, self.deepspeed_moe.exp_counts
def has_moe_layers(m):
has_moe = False
num_experts = 0
for _, module in m.named_modules():
if isinstance(module, MoE):
has_moe = True
num_experts = module.num_experts
break
return has_moe, num_experts | null |
10,442 | from typing import List, Tuple, Dict
import torch
from .layer import MoE
def is_moe_param(param: torch.Tensor) -> bool:
if hasattr(param, "allreduce") and not param.allreduce:
return True
return False
def split_params_into_shared_and_expert_params(
params: List[torch.nn.Parameter]) -> Tuple[torch.nn.Parameter,
torch.nn.Parameter]:
shared_params, expert_params = [], []
for p in params:
if is_moe_param(p):
expert_params.append(p)
else:
shared_params.append(p)
return shared_params, expert_params | null |
10,443 | from typing import List, Tuple, Dict
import torch
from .layer import MoE
def is_moe_param(param: torch.Tensor) -> bool:
if hasattr(param, "allreduce") and not param.allreduce:
return True
return False
The provided code snippet includes necessary dependencies for implementing the `split_params_grads_into_shared_and_expert_params` function. Write a Python function `def split_params_grads_into_shared_and_expert_params( group: List[torch.nn.Parameter]) -> Tuple[torch.nn.Parameter, torch.nn.Parameter]` to solve the following problem:
Split grad of parameters into grads of non-expert params and grads of expert params. This is useful while computing grad-norms for clipping and overflow detection group (List[torch.nn.Parameter]): Args: The group of parameters to split Returns: Tuple[List[torch.nn.Parameter], List[torch.nn.Parameter]]: list of gradients for non MoE params, list of gradients of MoE params
Here is the function:
def split_params_grads_into_shared_and_expert_params(
group: List[torch.nn.Parameter]) -> Tuple[torch.nn.Parameter,
torch.nn.Parameter]:
"""Split grad of parameters into grads of non-expert params
and grads of expert params. This is useful while computing
grad-norms for clipping and overflow detection
group (List[torch.nn.Parameter]):
Args:
The group of parameters to split
Returns:
Tuple[List[torch.nn.Parameter], List[torch.nn.Parameter]]:
list of gradients for non MoE params, list of gradients of MoE params
"""
expert_grads = []
shared_grads = []
for p in group:
if p.grad is not None:
if is_moe_param(p):
expert_grads.append(p.grad.to(p.dtype))
else:
shared_grads.append(p.grad.to(p.dtype))
return shared_grads, expert_grads | Split grad of parameters into grads of non-expert params and grads of expert params. This is useful while computing grad-norms for clipping and overflow detection group (List[torch.nn.Parameter]): Args: The group of parameters to split Returns: Tuple[List[torch.nn.Parameter], List[torch.nn.Parameter]]: list of gradients for non MoE params, list of gradients of MoE params |
10,444 | from typing import List, Tuple, Dict
import torch
from .layer import MoE
def is_moe_param(param: torch.Tensor) -> bool:
if hasattr(param, "allreduce") and not param.allreduce:
return True
return False
The provided code snippet includes necessary dependencies for implementing the `split_params_into_different_moe_groups_for_optimizer` function. Write a Python function `def split_params_into_different_moe_groups_for_optimizer(param_groups: Tuple[Dict], max_group_size=178956971 ) -> Tuple[Dict]` to solve the following problem:
Split parameters into different MoE groups for optimizer Args: param_groups (Tuple[Dict]): The list of parameter groups to split Returns: Tuple[Dict]: list of MoE/non-MoE groups for optimizer
Here is the function:
def split_params_into_different_moe_groups_for_optimizer(param_groups: Tuple[Dict],
max_group_size=178956971
) -> Tuple[Dict]:
"""Split parameters into different MoE groups for optimizer
Args:
param_groups (Tuple[Dict]):
The list of parameter groups to split
Returns:
Tuple[Dict]:
list of MoE/non-MoE groups for optimizer
"""
if isinstance(param_groups, tuple):
param_groups = list(param_groups) # Tuple cannot be modified
elif isinstance(param_groups, dict):
param_groups = [param_groups]
elif not isinstance(param_groups, list):
raise ValueError(f"Unknown param group type of {type(param_groups)}")
# gather all data parallel group names
data_parallel_group_names = set()
for param_group in param_groups:
for param in param_group["params"]:
if is_moe_param(param):
data_parallel_group_names.add(param.group_name)
data_parallel_group_names = list(data_parallel_group_names)
group_moe = {}
# Create the param MoE groups, leave param assign to next step
for param_group in param_groups:
group_moe[param_group['name']] = {}
for key in data_parallel_group_names:
group_moe[param_group['name']][key] = {}
group_moe[param_group['name']][key]['name'] = key
group_moe[param_group['name']][key]['moe'] = True
for ori_key in param_group.keys():
if ori_key != 'name':
if ori_key == 'params':
group_moe[param_group['name']][key][ori_key] = []
else:
group_moe[
param_group['name']][key][ori_key] = param_group[ori_key]
# Assign param
for param_group in param_groups:
new_params = []
for param in param_group['params']:
if is_moe_param(param):
group_moe[param_group['name']][param.group_name]['params'].append(param)
# param_group['params'].remove(param)
else:
new_params.append(param)
param_group['params'] = new_params
# Flatten the moe groups
if max_group_size is not None:
for k, v in group_moe.items():
for k1, v1 in v.items():
cur_group = []
all_groups = []
size_of_cur_group = 0
for param in v1['params']:
if size_of_cur_group + param.numel() <= max_group_size:
cur_group.append(param)
size_of_cur_group += param.numel()
else:
all_groups.append(cur_group)
cur_group = [param]
size_of_cur_group = param.numel()
if cur_group:
all_groups.append(cur_group)
for group in all_groups:
new_dict = {}
for key, val in v1.items():
if key != 'params':
new_dict[key] = val
new_dict['params'] = group
param_groups.append(new_dict)
else:
for k, v in group_moe.items():
for k1, v1 in v.items():
param_groups.append(v1)
return tuple(param_groups) | Split parameters into different MoE groups for optimizer Args: param_groups (Tuple[Dict]): The list of parameter groups to split Returns: Tuple[Dict]: list of MoE/non-MoE groups for optimizer |
10,445 | import torch
import deepspeed
The provided code snippet includes necessary dependencies for implementing the `_gather_tokens` function. Write a Python function `def _gather_tokens(input_, dim=0)` to solve the following problem:
Gather tensors and concatenate them along a dimension
Here is the function:
def _gather_tokens(input_, dim=0):
"""Gather tensors and concatenate them along a dimension"""
mpu = deepspeed.utils.groups.mpu
input_ = input_.contiguous()
# Size and dimension.
rank = mpu.get_tensor_model_parallel_rank()
tensor_list = [
torch.empty_like(input_)
for _ in range(mpu.get_tensor_model_parallel_world_size())
]
tensor_list[rank] = input_
deepspeed.comm.all_gather(tensor_list,
input_,
group=mpu.get_tensor_model_parallel_group())
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=dim).contiguous()
return output | Gather tensors and concatenate them along a dimension |
10,446 | import torch
import deepspeed
The provided code snippet includes necessary dependencies for implementing the `_drop_tokens` function. Write a Python function `def _drop_tokens(input_, dim=0)` to solve the following problem:
Divide a tensor among the tensor parallel ranks
Here is the function:
def _drop_tokens(input_, dim=0):
"""Divide a tensor among the tensor parallel ranks"""
mpu = deepspeed.utils.groups.mpu
total_chunks = mpu.get_tensor_model_parallel_world_size()
this_chunk = mpu.get_tensor_model_parallel_rank()
assert input_.shape[dim] % total_chunks == 0, f"input dimension {dim} ({input_.shape[dim]}) is not divisible by tensor parallel world size ({total_chunks})"
chunk_size = input_.shape[dim] // total_chunks
return torch.narrow(input_, dim, this_chunk * chunk_size, chunk_size) | Divide a tensor among the tensor parallel ranks |
10,447 | import torch
import deepspeed
class _GatherTokens(torch.autograd.Function):
"""All gather tokens among the tensor parallel ranks"""
def symbolic(graph, input_, dim):
return _gather_tokens(input_, dim)
def forward(ctx, input_, dim):
ctx.dim = dim
return _gather_tokens(input_, dim)
def backward(ctx, grad_output):
return _drop_tokens(grad_output, ctx.dim), None
def gather_tokens(input_, dim=0):
mpu = deepspeed.utils.groups.mpu
if mpu is None or mpu.get_tensor_model_parallel_world_size() == 1:
# no tensor parallelism for non-experts
return input_
return _GatherTokens.apply(input_, dim) | null |
10,448 | import torch
import deepspeed
class _DropTokens(torch.autograd.Function):
"Divide tokens equally among the tensor parallel ranks"
def symbolic(graph, input_, dim):
return _drop_tokens(input_, dim)
def forward(ctx, input_, dim):
ctx.dim = dim
return _drop_tokens(input_, dim)
def backward(ctx, input_):
return _gather_tokens(input_, ctx.dim), None
def drop_tokens(input_, dim=0):
mpu = deepspeed.utils.groups.mpu
if mpu is None or mpu.get_tensor_model_parallel_world_size() == 1:
# no tensor parallelism for non-experts
return input_
return _DropTokens.apply(input_, dim) | null |
10,449 | from deepspeed.utils.timer import SynchronizedWallClockTimer
from deepspeed.utils import logger
from typing import Callable, Dict, TYPE_CHECKING, Any, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import Module
import torch.nn.functional as F
from deepspeed.utils import groups
from .mappings import drop_tokens, gather_tokens
uniform_map: Dict[torch.device, Callable] = {}
from deepspeed import comm as dist
The provided code snippet includes necessary dependencies for implementing the `multiplicative_jitter` function. Write a Python function `def multiplicative_jitter(x, device: torch.device, epsilon=1e-2)` to solve the following problem:
Modified from switch transformer paper. mesh transformers Multiply values by a random number between 1-epsilon and 1+epsilon. Makes models more resilient to rounding errors introduced by bfloat16. This seems particularly important for logits. Args: x: a torch.tensor device: torch.device epsilon: a floating point value Returns: a jittered x.
Here is the function:
def multiplicative_jitter(x, device: torch.device, epsilon=1e-2):
"""
Modified from switch transformer paper. mesh transformers
Multiply values by a random number between 1-epsilon and 1+epsilon.
Makes models more resilient to rounding errors introduced by bfloat16.
This seems particularly important for logits.
Args:
x: a torch.tensor
device: torch.device
epsilon: a floating point value
Returns:
a jittered x.
"""
if epsilon == 0:
return x
uniform = uniform_map.get(device)
if uniform is None:
uniform = torch.distributions.uniform.Uniform(
low=torch.tensor(1.0 - epsilon,
device=device),
high=torch.tensor(1.0 + epsilon,
device=device)).rsample # type: ignore
uniform_map[device] = uniform
return x * uniform(x.shape) | Modified from switch transformer paper. mesh transformers Multiply values by a random number between 1-epsilon and 1+epsilon. Makes models more resilient to rounding errors introduced by bfloat16. This seems particularly important for logits. Args: x: a torch.tensor device: torch.device epsilon: a floating point value Returns: a jittered x. |
10,450 | from deepspeed.utils.timer import SynchronizedWallClockTimer
from deepspeed.utils import logger
from typing import Callable, Dict, TYPE_CHECKING, Any, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import Module
import torch.nn.functional as F
from deepspeed.utils import groups
from .mappings import drop_tokens, gather_tokens
exp_selection_uniform_map: Dict[torch.device, Callable] = {}
def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor:
gumbel = gumbel_map.get(device)
if gumbel is None:
one = torch.tensor(1.0, device=device)
zero = torch.tensor(0.0, device=device)
gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore
gumbel_map[device] = gumbel
return gumbel(shape)
from deepspeed import comm as dist
def einsum(rule, a, b):
if USE_EINSUM:
return torch.einsum(rule, a, b)
elif rule == 's,se->se':
return a.reshape(a.shape[0], -1) * b
elif rule == 'se,sc->sec':
return a.unsqueeze(2) * b.unsqueeze(1)
elif rule == 'se,se->s':
return torch.bmm(a.unsqueeze(1), b.unsqueeze(2)).reshape(-1)
elif rule == 'sec,sm->ecm':
s = a.shape[0]
e = a.shape[1]
c = a.shape[2]
m = b.shape[1]
return torch.matmul(a.reshape(s, -1).t(), b).reshape(e, c, m)
elif rule == 'sec,ecm->sm':
return torch.matmul(a.reshape(a.shape[0], -1), b.reshape(-1, b.shape[-1]))
elif rule == 'ks,ksm->sm':
k = b.shape[0]
s = b.shape[1]
m = b.shape[2]
# [k, s] -> [s, k] -> [s, 1, k]
a = a.t().unsqueeze(1)
# [k,s,m] -> [k, sm] -> [sm, k] -> [s, m, k]
b = b.reshape(k, -1).t().reshape(s, m, k)
# bmm([s, 1, k], [s, m, k]^t) -> [s, m, 1]
return torch.bmm(a, b.transpose(1, 2)).squeeze(2)
else:
return torch.einsum(rule, a, b)
def _capacity(gates: Tensor, capacity_factor: Tensor, min_capacity: Tensor) -> Tensor:
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
# to(torch.int64) works around a bug in torch.onnx.export:
# it should cast k to int64 when converting torch.topk but it doesn't.
capacity = torch.ceil((num_tokens / num_experts) * capacity_factor).to(torch.int64)
if capacity < min_capacity:
capacity = min_capacity.to(torch.int64)
return capacity
def _top_idx(source, k):
return torch.topk(source, k=k, dim=0)[1]
def _one_hot_to_float(x, num_classes):
return F.one_hot(x, num_classes=num_classes).float()
The provided code snippet includes necessary dependencies for implementing the `top1gating` function. Write a Python function `def top1gating(logits: Tensor, capacity_factor: float, min_capacity: int, used_token: Tensor = None, noisy_gate_policy: Optional[str] = None, drop_tokens: bool = True, use_rts: bool = True, use_tutel: bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]` to solve the following problem:
Implements Top1Gating on logits.
Here is the function:
def top1gating(logits: Tensor,
capacity_factor: float,
min_capacity: int,
used_token: Tensor = None,
noisy_gate_policy: Optional[str] = None,
drop_tokens: bool = True,
use_rts: bool = True,
use_tutel: bool = False) -> Tuple[Tensor,
Tensor,
Tensor,
Tensor]:
"""Implements Top1Gating on logits."""
if noisy_gate_policy == 'RSample':
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
# everything is in fp32 in this function
gates = F.softmax(logits, dim=1)
capacity = _capacity(gates,
torch.tensor(capacity_factor),
torch.tensor(min_capacity))
# Create a mask for 1st's expert per token
# noisy gating
indices1_s = torch.argmax(
logits_w_noise if noisy_gate_policy == 'RSample' else gates,
dim=1)
num_experts = int(gates.shape[1])
mask1 = F.one_hot(indices1_s, num_classes=num_experts)
# mask only used tokens
if used_token is not None:
mask1 = einsum("s,se->se", used_token, mask1)
# gating decisions
exp_counts = torch.sum(mask1, dim=0).detach().to('cpu')
# if we don't want to drop any tokens
if not drop_tokens:
new_capacity = torch.max(exp_counts).to(logits.device)
dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=dist.get_world_group())
capacity = new_capacity
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.float(), dim=0)
l_aux = torch.sum(me * ce) * num_experts
# Random Token Selection
if use_rts:
uniform = exp_selection_uniform_map.get(logits.device)
if uniform is None:
uniform = torch.distributions.uniform.Uniform(
low=torch.tensor(0.0,
device=logits.device),
high=torch.tensor(1.0,
device=logits.device)).rsample
exp_selection_uniform_map[logits.device] = uniform
mask1_rand = mask1 * uniform(mask1.shape)
else:
mask1_rand = mask1
assert logits.shape[0] >= min_capacity, "No. of tokens (batch-size) should be greater than min_capacity. Either set min_capacity to 0 or increase your batch size."
top_idx = _top_idx(mask1_rand, capacity)
new_mask1 = mask1 * torch.zeros_like(mask1).scatter_(0, top_idx, 1)
mask1 = new_mask1
if use_tutel:
# Tutel doesn't support index values masked with zero
# so we need to replace masked indices with -1
indices_mask = mask1.sum(dim=1) * num_experts - 1
indices1_s = torch.min(indices1_s, indices_mask)
# Compute locations in capacity buffer
if use_tutel:
locations1 = tutel_moe.fast_cumsum_sub_one(mask1)
else:
locations1 = torch.cumsum(mask1, dim=0) - 1
if use_tutel:
gates1_s = (gates * mask1).sum(dim=1)
locations1_s = torch.sum(locations1 * mask1, dim=1)
return l_aux, capacity, num_experts, [indices1_s,], [locations1_s,], [gates1_s,], exp_counts
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
# Normalize gate probabilities
mask1_float = mask1.float()
gates = gates * mask1_float
locations1_sc = _one_hot_to_float(locations1_s, capacity)
combine_weights = einsum("se,sc->sec", gates, locations1_sc)
dispatch_mask = combine_weights.bool()
return l_aux, combine_weights, dispatch_mask, exp_counts | Implements Top1Gating on logits. |
10,451 | from deepspeed.utils.timer import SynchronizedWallClockTimer
from deepspeed.utils import logger
from typing import Callable, Dict, TYPE_CHECKING, Any, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import Module
import torch.nn.functional as F
from deepspeed.utils import groups
from .mappings import drop_tokens, gather_tokens
def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor:
gumbel = gumbel_map.get(device)
if gumbel is None:
one = torch.tensor(1.0, device=device)
zero = torch.tensor(0.0, device=device)
gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore
gumbel_map[device] = gumbel
return gumbel(shape)
from deepspeed import comm as dist
def einsum(rule, a, b):
if USE_EINSUM:
return torch.einsum(rule, a, b)
elif rule == 's,se->se':
return a.reshape(a.shape[0], -1) * b
elif rule == 'se,sc->sec':
return a.unsqueeze(2) * b.unsqueeze(1)
elif rule == 'se,se->s':
return torch.bmm(a.unsqueeze(1), b.unsqueeze(2)).reshape(-1)
elif rule == 'sec,sm->ecm':
s = a.shape[0]
e = a.shape[1]
c = a.shape[2]
m = b.shape[1]
return torch.matmul(a.reshape(s, -1).t(), b).reshape(e, c, m)
elif rule == 'sec,ecm->sm':
return torch.matmul(a.reshape(a.shape[0], -1), b.reshape(-1, b.shape[-1]))
elif rule == 'ks,ksm->sm':
k = b.shape[0]
s = b.shape[1]
m = b.shape[2]
# [k, s] -> [s, k] -> [s, 1, k]
a = a.t().unsqueeze(1)
# [k,s,m] -> [k, sm] -> [sm, k] -> [s, m, k]
b = b.reshape(k, -1).t().reshape(s, m, k)
# bmm([s, 1, k], [s, m, k]^t) -> [s, m, 1]
return torch.bmm(a, b.transpose(1, 2)).squeeze(2)
else:
return torch.einsum(rule, a, b)
def _capacity(gates: Tensor, capacity_factor: Tensor, min_capacity: Tensor) -> Tensor:
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
# to(torch.int64) works around a bug in torch.onnx.export:
# it should cast k to int64 when converting torch.topk but it doesn't.
capacity = torch.ceil((num_tokens / num_experts) * capacity_factor).to(torch.int64)
if capacity < min_capacity:
capacity = min_capacity.to(torch.int64)
return capacity
def _one_hot_to_float(x, num_classes):
return F.one_hot(x, num_classes=num_classes).float()
The provided code snippet includes necessary dependencies for implementing the `top2gating` function. Write a Python function `def top2gating(logits: Tensor, capacity_factor: float, min_capacity: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]` to solve the following problem:
Implements Top2Gating on logits.
Here is the function:
def top2gating(logits: Tensor,
capacity_factor: float,
min_capacity: int) -> Tuple[Tensor,
Tensor,
Tensor,
Tensor]:
"""Implements Top2Gating on logits."""
# everything is in fp32 in this function
gates = F.softmax(logits, dim=1)
capacity = _capacity(gates,
torch.tensor(capacity_factor * 2),
torch.tensor(min_capacity))
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1)
num_experts = int(gates.shape[1])
mask1 = F.one_hot(indices1_s, num_classes=num_experts)
# Create a mask for 2nd's expert per token using Gumbel-max trick
# https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
# Replace top-expert with min value
logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float("-inf"))
indices2_s = torch.argmax(logits_except1, dim=1)
mask2 = F.one_hot(indices2_s, num_classes=num_experts)
# Compute locations in capacity buffer
locations1 = torch.cumsum(mask1, dim=0) - 1
locations2 = torch.cumsum(mask2, dim=0) - 1
# Update 2nd's location by accounting for locations of 1st
locations2 += torch.sum(mask1, dim=0, keepdim=True)
# gating decisions
exp_counts = torch.sum(mask1, dim=0).detach().to('cpu')
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.float(), dim=0)
l_aux = torch.mean(me * ce) * num_experts * num_experts
# Remove locations outside capacity from mask
mask1 *= torch.lt(locations1, capacity)
mask2 *= torch.lt(locations2, capacity)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
locations2_s = torch.sum(locations2 * mask2, dim=1)
# Normalize gate probabilities
mask1_float = mask1.float()
mask2_float = mask2.float()
gates1_s = einsum("se,se->s", gates, mask1_float)
gates2_s = einsum("se,se->s", gates, mask2_float)
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s /= denom_s
gates2_s /= denom_s
# Calculate combine_weights and dispatch_mask
gates1 = einsum("s,se->se", gates1_s, mask1_float)
gates2 = einsum("s,se->se", gates2_s, mask2_float)
locations1_sc = _one_hot_to_float(locations1_s, capacity)
locations2_sc = _one_hot_to_float(locations2_s, capacity)
combine1_sec = einsum("se,sc->sec", gates1, locations1_sc)
combine2_sec = einsum("se,sc->sec", gates2, locations2_sc)
combine_weights = combine1_sec + combine2_sec
dispatch_mask = combine_weights.bool()
return l_aux, combine_weights, dispatch_mask, exp_counts | Implements Top2Gating on logits. |
10,452 | import os
import sys
import json
import base64
import argparse
import subprocess
import collections
from copy import deepcopy
import signal
import time
import torch.cuda
from .multinode_runner import PDSHRunner, OpenMPIRunner, MVAPICHRunner, SlurmRunner
from .constants import PDSH_LAUNCHER, OPENMPI_LAUNCHER, MVAPICH_LAUNCHER, SLURM_LAUNCHER
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT
from ..nebula.constants import NEBULA_EXPORT_ENVS
from ..utils import logger
from ..autotuning import Autotuner
DLTS_HOSTFILE = "/job/hostfile"
PDSH_LAUNCHER = 'pdsh'
TORCH_DISTRIBUTED_DEFAULT_PORT = 29500
def parse_args(args=None):
parser = argparse.ArgumentParser(
description="DeepSpeed runner to help launch distributed "
"multi-node/multi-gpu training jobs.")
parser.add_argument("-H",
"--hostfile",
type=str,
default=DLTS_HOSTFILE,
help="Hostfile path (in MPI style) that defines the "
"resource pool available to the job (e.g., "
"worker-0 slots=4)")
parser.add_argument("-i",
"--include",
type=str,
default="",
help='''Specify hardware resources to use during execution.
String format is
NODE_SPEC[@NODE_SPEC ...],
where
NODE_SPEC=NAME[:SLOT[,SLOT ...]].
If :SLOT is omitted, include all slots on that host.
Example: -i "worker-0@worker-1:0,2" will use all slots
on worker-0 and slots [0, 2] on worker-1.
''')
parser.add_argument("-e",
"--exclude",
type=str,
default="",
help='''Specify hardware resources to NOT use during execution.
Mutually exclusive with --include. Resource formatting
is the same as --include.
Example: -e "worker-1:0" will use all available
resources except slot 0 on worker-1.
''')
parser.add_argument("--num_nodes",
type=int,
default=-1,
help="Total number of worker nodes to run on, this will use "
"the top N hosts from the given hostfile.")
parser.add_argument("--min_elastic_nodes",
type=int,
default=-1,
help="Minimum number of nodes to run elastic training on. "
"Default is 1 when elastic training is enabled")
parser.add_argument("--max_elastic_nodes",
type=int,
default=-1,
help="Maximum number of nodes to run elastic training on. "
"Default is num_nodes when elastic training is enabled")
parser.add_argument("--num_gpus",
type=int,
default=-1,
help="Max number of GPUs to use on each node, will use "
"[0:N) GPU ids on each node.")
parser.add_argument("--master_port",
default=TORCH_DISTRIBUTED_DEFAULT_PORT,
type=int,
help="(optional) Port used by PyTorch distributed for "
"communication during training.")
parser.add_argument("--master_addr",
default="",
type=str,
help="(optional) IP address of node 0, will be "
"inferred via 'hostname -I' if not specified.")
parser.add_argument(
"--launcher",
default=PDSH_LAUNCHER,
type=str,
help="(optional) choose launcher backend for multi-node "
"training. Options currently include PDSH, OpenMPI, MVAPICH, SLURM.")
parser.add_argument("--launcher_args",
default="",
type=str,
help="(optional) pass launcher specific arguments as a "
"single quoted argument.")
parser.add_argument("--module",
action="store_true",
help="Change each process to interpret the launch "
"script as a Python module, executing with the same "
"behavior as 'python -m'.")
parser.add_argument("--no_python",
action="store_true",
help="Skip prepending the training script with "
"'python' - just execute it directly.")
parser.add_argument("--no_local_rank",
action="store_true",
help="Do not pass local_rank as an argument when calling "
"the user's training script.")
parser.add_argument("--no_ssh_check",
action="store_true",
help="Do not perform ssh check in multi-node launcher model")
parser.add_argument("--force_multi",
action="store_true",
help="Force multi-node launcher mode, helps in cases where user "
"wants to launch on single remote node.")
parser.add_argument(
"--save_pid",
action="store_true",
help="Save file containing launcher process id (pid) at /tmp/<main-pid>.ds, "
"where <main-pid> is the pid of the first process that invoked `deepspeed`. "
"Useful when launching deepspeed processes programmatically.")
parser.add_argument(
"--autotuning",
default="",
choices=["tune",
"run"],
type=str,
help="Run DeepSpeed autotuner to discover optimal configuration parameters "
"before running job.")
parser.add_argument("--elastic_training",
action="store_true",
help="Enable elastic training support in DeepSpeed.")
parser.add_argument("user_script",
type=str,
help="User script to launch, followed by any required "
"arguments.")
parser.add_argument('user_args', nargs=argparse.REMAINDER)
return parser.parse_args(args=args) | null |
10,453 | import os
import sys
import json
import base64
import argparse
import subprocess
import collections
from copy import deepcopy
import signal
import time
import torch.cuda
from .multinode_runner import PDSHRunner, OpenMPIRunner, MVAPICHRunner, SlurmRunner
from .constants import PDSH_LAUNCHER, OPENMPI_LAUNCHER, MVAPICH_LAUNCHER, SLURM_LAUNCHER
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT
from ..nebula.constants import NEBULA_EXPORT_ENVS
from ..utils import logger
from ..autotuning import Autotuner
def fetch_hostfile(hostfile_path):
if not os.path.isfile(hostfile_path):
logger.warning("Unable to find hostfile, will proceed with training "
"with local resources only.")
return None
# e.g., worker-0 slots=16
with open(hostfile_path, 'r') as fd:
resource_pool = collections.OrderedDict()
for line in fd.readlines():
line = line.strip()
if line == '':
# skip empty lines
continue
try:
hostname, slots = line.split()
_, slot_count = slots.split("=")
slot_count = int(slot_count)
except ValueError as err:
logger.error("Hostfile is not formatted correctly, unable to "
"proceed with training.")
raise err
if hostname in resource_pool:
logger.error("Hostfile contains duplicate hosts, unable to "
"proceed with training.")
raise ValueError(f"host {hostname} is already defined")
resource_pool[hostname] = slot_count
return resource_pool | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.