repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/llama.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features import HybridSplitQKVContainer, HybridGatedMLPContainer
from deepspeed.utils.types import ActivationFuncType, NormType
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import (
TransformerPolicy,
transformer_param_names,
maybe_copy,
maybe_copy_qkv,
maybe_copy_geglu,
maybe_get_lora,
)
class DS_LLAMAContainer(HybridGatedMLPContainer, HybridSplitQKVContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
_config.rotate_half = True
_config.rotate_every_two = False
_config.rotary_dim = self.hidden_size // self.num_attention_heads
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
return self.module
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.mlp.up_proj.weight, self.policy.client_module.mlp.gate_proj.weight,
self.policy.client_module.mlp.down_proj.weight, self.policy.client_module.self_attn.q_proj.weight,
self.policy.client_module.self_attn.k_proj.weight, self.policy.client_module.self_attn.v_proj.weight,
self.policy.client_module.self_attn.o_proj.weight
]
]
def get_lora_matched_pair(self):
up_proj_lora, gate_proj_lora, down_proj_lora, q_lora, k_lora, v_lora, out_lora = self.get_lora_params()
ret = [(up_proj_lora, self.inter_up_w), (gate_proj_lora, self.inter_gate_w), (down_proj_lora, self._4hh_w),
(out_lora, self.dense_w), (q_lora, self.qw), (k_lora, self.kw), (v_lora, self.vw)]
return ret
def set_q_k_v(self):
"""
Necessary to implement for `HybridSplitQKVContainer`
"""
self.qw = self.policy.client_module.self_attn.q_proj.weight
self.qb = None
self.kw = self.policy.client_module.self_attn.k_proj.weight
self.kb = None
self.vw = self.policy.client_module.self_attn.v_proj.weight
self.vb = None
def set_mlp_gate(self):
"""
Necessary to implement for `HybridGatedMLPContainer`
"""
self.inter_up_w = self.policy.client_module.mlp.up_proj.weight
self.inter_up_b = None
self.inter_gate_w = self.policy.client_module.mlp.gate_proj.weight
self.inter_gate_b = None
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'self_attn.q_proj.weight', \
'self_attn.k_proj.weight', \
'self_attn.v_proj.weight', \
'self_attn.o_proj.weight', \
'mlp.up_proj.weight', \
'mlp.gate_proj.weight', \
'mlp.down_proj.weight', \
'input_layernorm.weight', \
'post_attention_layernorm.weight'
)
maybe_copy_qkv(module.attention,
sd,
weight_quantizer,
mp_replace,
'attn_qkvw', [prefix + param_names[0], prefix + param_names[1], prefix + param_names[2]],
split_qkv=self.policy.split_qkv)
for i in range(3, 4):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1],
prefix + param_names[i])
maybe_copy_geglu(module.mlp, sd, weight_quantizer, mp_replace, 'inter_w',
[prefix + param_names[4], prefix + param_names[5]])
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, 'output_w', prefix + param_names[6])
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[8], prefix + param_names[7])
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[10], prefix + param_names[8])
class LLAMALayerPolicy(TransformerPolicy):
def __init__(self, client_module, inference=True):
super().__init__(
inference,
mlp_act_func_type=ActivationFuncType.GATED_SILU,
norm_type=NormType.RMSNorm,
)
self.client_module = client_module
try:
import transformers
LLAMALayerPolicy._orig_layer_class = transformers.models.llama.modeling_llama.LlamaDecoderLayer # type: ignore
except:
LLAMALayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.self_attn.q_proj.weight.shape[1], \
self.client_module.self_attn.num_heads, \
self.client_module.input_layernorm.variance_epsilon, \
self.client_module.mlp.gate_proj.weight.shape[0]
def attention(self, enable_training=False):
qw = self.client_module.self_attn.q_proj.weight
kw = self.client_module.self_attn.k_proj.weight
vw = self.client_module.self_attn.v_proj.weight
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
return qkvw, \
None, \
self.client_module.self_attn.o_proj.weight, \
None
def mlp(self, enable_training=False):
mlp1_up = self.client_module.mlp.up_proj.weight
mlp1_gate = self.client_module.mlp.gate_proj.weight
mlp2 = self.client_module.mlp.down_proj.weight
mlp1 = Parameter(torch.cat((mlp1_up, mlp1_gate), dim=0), requires_grad=enable_training)
return mlp1, None, mlp2, None
def layernorm(self):
return self.client_module.post_attention_layernorm.weight, \
None, \
self.client_module.input_layernorm.weight, \
None
| 6,180 | 38.621795 | 123 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/clip.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
class DS_CLIPContainer(BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
class HFCLIPLayerPolicy(TransformerPolicy):
def __init__(self, client_module, inference=False):
super().__init__(inference, pre_attn_norm=True, scale_attention=True)
self.client_module = client_module
self.cuda_graph_supported = True
if HFCLIPLayerPolicy._orig_layer_class is None:
try:
import transformers
HFCLIPLayerPolicy._orig_layer_class = transformers.models.clip.modeling_clip.CLIPEncoderLayer
except:
HFCLIPLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.self_attn.q_proj.weight.shape[1], \
self.client_module.self_attn.num_heads, \
self.client_module.layer_norm1.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
qw = self.client_module.self_attn.q_proj.weight
qb = self.client_module.self_attn.q_proj.bias
kw = self.client_module.self_attn.k_proj.weight
kb = self.client_module.self_attn.k_proj.bias
vw = self.client_module.self_attn.v_proj.weight
vb = self.client_module.self_attn.v_proj.bias
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training)
return qkvw, \
qkvb, \
self.client_module.self_attn.out_proj.weight, \
self.client_module.self_attn.out_proj.bias
def mlp(self, enable_training=False):
return self.client_module.mlp.fc1.weight, \
self.client_module.mlp.fc1.bias, \
self.client_module.mlp.fc2.weight, \
self.client_module.mlp.fc2.bias
def layernorm(self):
return self.client_module.layer_norm2.weight, \
self.client_module.layer_norm2.bias, \
self.client_module.layer_norm1.weight, \
self.client_module.layer_norm1.bias
| 2,822 | 37.148649 | 109 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/opt.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features import MetaTensorContainer, HybridSplitQKVContainer
from deepspeed.model_implementations.transformers.ds_opt import DeepSpeedOPTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
from ..policy import transformer_param_names
from ..policy import maybe_copy
from ..policy import maybe_copy_qkv
from ..policy import maybe_get_lora
from deepspeed.utils.types import ActivationFuncType
class DS_OPTContainer(MetaTensorContainer, HybridSplitQKVContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedOPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.fc1,
self.policy.client_module.fc2,
self.policy.client_module.self_attn.q_proj,
self.policy.client_module.self_attn.k_proj,
self.policy.client_module.self_attn.v_proj,
self.policy.client_module.self_attn.out_proj,
]
]
def set_q_k_v(self):
"""
Necessary to implement for `HybridSplitQKVContainer`
"""
self.qw = self.policy.client_module.self_attn.q_proj.weight
self.qb = self.policy.client_module.self_attn.q_proj.bias
self.kw = self.policy.client_module.self_attn.k_proj.weight
self.kb = self.policy.client_module.self_attn.k_proj.bias
self.vw = self.policy.client_module.self_attn.v_proj.weight
self.vb = self.policy.client_module.self_attn.v_proj.bias
def get_lora_matched_pair(self):
fc1_lora, fc2_lora, q_lora, k_lora, v_lora, out_lora = self.get_lora_params()
ret = [(fc1_lora, self._h4h_w), (fc2_lora, self._4hh_w), (out_lora, self.dense_w), (q_lora, self.qw),
(k_lora, self.kw), (v_lora, self.vw)]
return ret
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'self_attn.q_proj.weight', \
'self_attn.k_proj.weight', \
'self_attn.v_proj.weight', \
'self_attn.q_proj.bias', \
'self_attn.k_proj.bias', \
'self_attn.v_proj.bias', \
'self_attn.out_proj.weight', \
'self_attn.out_proj.bias', \
'fc1.weight', \
'fc1.bias', \
'fc2.weight', \
'fc2.bias', \
'final_layer_norm.weight', \
'final_layer_norm.bias', \
'self_attn_layer_norm.weight', \
'self_attn_layer_norm.bias'
)
for i in range(0, 6, 3):
maybe_copy_qkv(module.attention,
sd,
weight_quantizer,
mp_replace,
transformer_param_names[i // 3],
[prefix + param_names[i], prefix + param_names[i + 1], prefix + param_names[i + 2]],
split_qkv=self.policy.split_qkv)
for i in range(6, 8):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 4],
prefix + param_names[i])
for i in range(8, 14):
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i - 4],
prefix + param_names[i])
for i in range(14, 16):
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i - 4],
prefix + param_names[i])
class HFOPTLayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self, client_module, inference=True, use_load_prefix=True):
super().__init__(inference, linear_layer=True, pre_attn_norm=True, use_load_prefix=use_load_prefix)
self.client_module = client_module
try:
import transformers
HFOPTLayerPolicy._orig_layer_class = transformers.models.opt.modeling_opt.OPTDecoderLayer
except:
HFOPTLayerPolicy._orig_layer_class = None
if hasattr(TransformerPolicy, "hf_model_config") and hasattr(TransformerPolicy.hf_model_config,
"activation_function"):
if TransformerPolicy.hf_model_config.activation_function == "relu":
self.mlp_act_func_type = ActivationFuncType.ReLU
elif TransformerPolicy.hf_model_config.activation_function in ["gelu", "gelu_new"]:
self.mlp_act_func_type = ActivationFuncType.GELU
else:
raise ValueError("Unsupported activation function: {}".format(
TransformerPolicy.hf_model_config.activation_function))
else:
self.mlp_act_func_type = ActivationFuncType.ReLU # default
def get_hidden_heads(self):
return self.client_module.self_attn.embed_dim, \
self.client_module.self_attn.num_heads, \
self.client_module.self_attn_layer_norm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
qw = self.client_module.self_attn.q_proj.weight
qb = self.client_module.self_attn.q_proj.bias
kw = self.client_module.self_attn.k_proj.weight
kb = self.client_module.self_attn.k_proj.bias
vw = self.client_module.self_attn.v_proj.weight
vb = self.client_module.self_attn.v_proj.bias
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=enable_training)
return qkvw, \
qkvb, \
self.client_module.self_attn.out_proj.weight, \
self.client_module.self_attn.out_proj.bias
def mlp(self, enable_training=False):
return self.client_module.fc1.weight, \
self.client_module.fc1.bias, \
self.client_module.fc2.weight, \
self.client_module.fc2.bias
def layernorm(self):
return self.client_module.final_layer_norm.weight, \
self.client_module.final_layer_norm.bias, \
self.client_module.self_attn_layer_norm.weight, \
self.client_module.self_attn_layer_norm.bias
| 6,905 | 41.89441 | 111 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/megatron_gpt.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features.megatron import MegatronContainer
from deepspeed.model_implementations.transformers.ds_megatron_gpt import DeepSpeedMegatronGPTInference
import torch
from ..policy import TransformerPolicy
from packaging import version as pkg_version
class DS_MegatronGPTContainer(MegatronContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedMegatronGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
if self.megatron_v2:
self.module.config.rotate_half = True
self.module.config.rotate_every_two = False
return self.module
# TODO: Megatron GPT MoE inherits from Megatron policy and replaces mlp
# TODO: Generalize MoE overall goal, expand beyond Megatron
class MegatronLayerPolicy(TransformerPolicy):
_orig_layer_class = None
version = 0
moe_type = 'standard'
megatron_v2 = True
use_mup = False
def __init__(self, client_module, inference=True):
super().__init__(inference, megatron_v2=MegatronLayerPolicy.megatron_v2, use_mup=MegatronLayerPolicy.use_mup)
self.client_module = client_module
# we use megatron version to differentiate between the old and new
# megatron-lm source code
if MegatronLayerPolicy._orig_layer_class is None:
if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"):
MegatronLayerPolicy._orig_layer_class = None
else:
try:
from megatron.model.transformer import ParallelTransformerLayer
MegatronLayerPolicy._orig_layer_class = ParallelTransformerLayer
except ImportError:
MegatronLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attention.query_key_value.weight.shape[1], \
self.client_module.attention.num_attention_heads, \
self.client_module.input_layernorm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
if self.inference:
if MegatronLayerPolicy.version == 0:
attention = self.client_module.attention
else:
attention = self.client_module.self_attention
return attention.query_key_value.weight, \
attention.query_key_value.bias, \
attention.dense.weight, \
attention.dense.bias
def mlp(self, moe_type='standard', enable_training=False):
from deepspeed.moe.utils import has_moe_layers
moe, _ = has_moe_layers(self.client_module)
if moe:
moe_experts = self.client_module.mlp.deepspeed_moe.experts.deepspeed_experts if moe_type == 'standard' else \
self.client_module.mlp.moe.deepspeed_moe.experts.deepspeed_experts
num_experts = len(moe_experts)
if moe_type == 'standard':
return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \
[moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)]
else:
return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \
[moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)], \
self.client_module.mlp.mlp.dense_h_to_4h.weight, \
self.client_module.mlp.mlp.dense_h_to_4h.bias, \
self.client_module.mlp.mlp.dense_4h_to_h.weight, \
self.client_module.mlp.mlp.dense_4h_to_h.bias, \
self.client_module.mlp.coefficient.weight
else:
return self.client_module.mlp.dense_h_to_4h.weight, \
self.client_module.mlp.dense_h_to_4h.bias, \
self.client_module.mlp.dense_4h_to_h.weight, \
self.client_module.mlp.dense_4h_to_h.bias
def layernorm(self):
return self.client_module.post_attention_layernorm.weight, \
self.client_module.post_attention_layernorm.bias, \
self.client_module.input_layernorm.weight, \
self.client_module.input_layernorm.bias
| 5,017 | 44.207207 | 121 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/gptneox.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features.meta_tensor import MetaTensorContainer
from .features.hybrid_megatron import HybridMegatronContainer
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
import torch
from ..policy import TransformerPolicy
from ..policy import transformer_param_names
from ..policy import maybe_copy
from packaging import version as pkg_version
from ..policy import maybe_get_lora
class DS_GPTNEOXContainer(MetaTensorContainer, HybridMegatronContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
if self.megatron_v2:
self.module.config.rotate_half = True
self.module.config.rotate_every_two = False
return self.module
def get_lora_matched_pair(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
fc1_lora, fc2_lora, qkv_lora, out_lora = self.get_lora_params()
ret = [(fc1_lora, self._h4h_w), (fc2_lora, self._4hh_w), (qkv_lora, self.qkvw), (out_lora, self.dense_w)]
return ret
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
if GPTNEOXLayerPolicy.version == 0:
attention = self.policy.client_module.attention
else:
attention = self.policy.client_module.self_attention
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.mlp.dense_h_to_4h, self.policy.client_module.mlp.dense_4h_to_h,
attention.query_key_value, attention.dense
]
]
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'attention.query_key_value.weight', \
'attention.query_key_value.bias', \
'attention.dense.weight', \
'attention.dense.bias', \
'mlp.dense_h_to_4h.weight', \
'mlp.dense_h_to_4h.bias', \
'mlp.dense_4h_to_h.weight', \
'mlp.dense_4h_to_h.bias', \
'post_attention_layernorm.weight', \
'post_attention_layernorm.bias', \
'input_layernorm.weight', \
'input_layernorm.bias'
)
for i in range(0, 2):
maybe_copy(module.attention,
sd,
weight_quantizer,
mp_replace,
transformer_param_names[i],
prefix + param_names[i],
qkv=True,
megatron_v2=self.policy.is_megatron_v2,
split_qkv=self.policy.split_qkv,
heads=self.policy.client_module.attention.num_attention_heads)
for i in range(2, 4):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i],
prefix + param_names[i])
for i in range(4, 10):
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i],
prefix + param_names[i])
for i in range(10, 12):
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i], prefix + param_names[i])
class GPTNEOXLayerPolicy(TransformerPolicy):
_orig_layer_class = None
version = 0
def __init__(self, client_module, inference=True, megatron_v2=True, split_qkv=False):
super().__init__(inference, megatron_v2=megatron_v2, split_qkv=split_qkv)
self.client_module = client_module
if GPTNEOXLayerPolicy._orig_layer_class is None:
if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"):
GPTNEOXLayerPolicy._orig_layer_class = None
else:
try:
from transformers import GPTNeoXLayer
GPTNEOXLayerPolicy._orig_layer_class = GPTNeoXLayer
except ImportError:
GPTNEOXLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
if GPTNEOXLayerPolicy.version == 0:
attention = self.client_module.attention
else:
attention = self.client_module.self_attention
return self.client_module.attention.hidden_size, \
self.client_module.attention.num_attention_heads, \
self.client_module.input_layernorm.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
if GPTNEOXLayerPolicy.version == 0:
attention = self.client_module.attention
else:
attention = self.client_module.self_attention
return attention.query_key_value.weight, \
attention.query_key_value.bias, \
attention.dense.weight, \
attention.dense.bias
def mlp(self, enable_training=False):
return self.client_module.mlp.dense_h_to_4h.weight, \
self.client_module.mlp.dense_h_to_4h.bias, \
self.client_module.mlp.dense_4h_to_h.weight, \
self.client_module.mlp.dense_4h_to_h.bias
def layernorm(self):
return self.client_module.post_attention_layernorm.weight, \
self.client_module.post_attention_layernorm.bias, \
self.client_module.input_layernorm.weight, \
self.client_module.input_layernorm.bias
| 5,897 | 39.122449 | 117 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/gptneo.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features.meta_tensor import MetaTensorContainer
from .features.split_qkv import HybridSplitQKVContainer
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
from ..policy import transformer_param_names
from ..policy import maybe_copy
from ..policy import maybe_copy_qkv
from ..policy import maybe_get_lora
class DS_GPTNEOContainer(MetaTensorContainer, HybridSplitQKVContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.mlp.c_fc, self.policy.client_module.mlp.c_proj,
self.policy.client_module.attn.attention.q_proj, self.policy.client_module.attn.attention.k_proj,
self.policy.client_module.attn.attention.v_proj, self.policy.client_module.attn.attention.out_proj
]
]
def set_q_k_v(self):
"""
Necessary to implement for `HybridSplitQKVContainer`
"""
self.qw = self.policy.client_module.attn.attention.q_proj.weight
self.qb = None
self.kw = self.policy.client_module.attn.attention.k_proj.weight
self.kb = None
self.vw = self.policy.client_module.attn.attention.v_proj.weight
self.vb = None
def get_lora_matched_pair(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
fc1_lora, fc2_lora, q_lora, k_lora, v_lora, out_lora = self.get_lora_params()
ret = [(fc1_lora, self._h4h_w), (fc2_lora, self._4hh_w), (out_lora, self.dense_w), (q_lora, self.qw),
(k_lora, self.kw), (v_lora, self.vw)]
return ret
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'attn.attention.q_proj.weight', \
'attn.attention.k_proj.weight', \
'attn.attention.v_proj.weight', \
'attn.attention.out_proj.weight', \
'attn.attention.out_proj.bias', \
'mlp.c_fc.weight', \
'mlp.c_fc.bias', \
'mlp.c_proj.weight', \
'mlp.c_proj.bias', \
'ln_2.weight', \
'ln_2.bias', \
'ln_1.weight', \
'ln_1.bias'
)
maybe_copy_qkv(module.attention,
sd,
weight_quantizer,
mp_replace,
'attn_qkvw', [prefix + param_names[0], prefix + param_names[1], prefix + param_names[2]],
split_qkv=self.policy.split_qkv)
for i in range(3, 5):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1],
prefix + param_names[i])
for i in range(5, 11):
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1],
prefix + param_names[i])
for i in range(11, 13):
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1],
prefix + param_names[i])
class HFGPTNEOLayerPolicy(TransformerPolicy):
def __init__(self, client_module, inference=True):
super().__init__(inference, scale_attention=False)
self.client_module = client_module
try:
import transformers
HFGPTNEOLayerPolicy._orig_layer_class = transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoBlock
except:
HFGPTNEOLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attn.attention.embed_dim, \
self.client_module.attn.attention.num_heads, \
self.client_module.ln_1.eps, \
DEFAULT_INTERMEDIATE_SIZE
def get_q_k_v(self):
return self.client_module.attn.attention.q_proj.weight, \
None, \
self.client_module.attn.attention.k_proj.weight, \
None, \
self.client_module.attn.attention.v_proj.weight, \
None
def attention(self, enable_training=False):
qw = self.client_module.attn.attention.q_proj.weight
kw = self.client_module.attn.attention.k_proj.weight
vw = self.client_module.attn.attention.v_proj.weight
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
return qkvw, \
None, \
self.client_module.attn.attention.out_proj.weight, \
self.client_module.attn.attention.out_proj.bias
def mlp(self, enable_training=False):
return self.client_module.mlp.c_fc.weight, \
self.client_module.mlp.c_fc.bias, \
self.client_module.mlp.c_proj.weight, \
self.client_module.mlp.c_proj.bias
def layernorm(self):
return self.client_module.ln_2.weight, \
self.client_module.ln_2.bias, \
self.client_module.ln_1.weight, \
self.client_module.ln_1.bias
| 5,790 | 38.664384 | 114 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/unet.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from torch.nn.parameter import Parameter
from ..policy import DSPolicy
from ...model_implementations.diffusers.unet import DSUNet
class UNetPolicy(DSPolicy):
def __init__(self):
super().__init__()
try:
import diffusers
self._orig_layer_class = diffusers.models.unet_2d_condition.UNet2DConditionModel
except ImportError:
self._orig_layer_class = None
def match(self, module):
return isinstance(module, self._orig_layer_class)
def match_replaced(self, module):
return isinstance(module, DSUNet)
def apply(self, module, enable_cuda_graph=True):
# TODO(cmikeh2): Enable cuda graph should be an inference configuration
return DSUNet(module, enable_cuda_graph=enable_cuda_graph)
def attention(self, client_module):
qw = client_module.to_q.weight
kw = client_module.to_k.weight
vw = client_module.to_v.weight
if qw.shape[1] == kw.shape[1]:
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=False)
return qkvw, \
client_module.to_out[0].weight, \
client_module.to_out[0].bias, \
qw.shape[-1], \
client_module.heads
else:
#return None
#kvw = Parameter(torch.cat((kw, vw), dim=0), requires_grad=False)
return qw, \
kw, vw, \
client_module.to_out[0].weight, \
client_module.to_out[0].bias, \
qw.shape[-1], \
client_module.heads
| 1,732 | 30.509091 | 92 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/gptj.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .features.meta_tensor import MetaTensorContainer
from .features.split_qkv import HybridSplitQKVContainer
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
import torch
from torch.nn.parameter import Parameter
from ..policy import TransformerPolicy
from ..policy import transformer_param_names
from ..policy import maybe_copy
from ..policy import maybe_copy_qkv
from ..policy import maybe_get_lora
class DS_GPTJContainer(MetaTensorContainer, HybridSplitQKVContainer, BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
def set_lora_params(self):
"""
Necessary to implement for `HybridEngineContainer`
"""
self.lora_params = [
maybe_get_lora(p) for p in [
self.policy.client_module.mlp.fc_in, self.policy.client_module.mlp.fc_out,
self.policy.client_module.attn.q_proj, self.policy.client_module.attn.k_proj,
self.policy.client_module.attn.v_proj, self.policy.client_module.attn.out_proj
]
]
def get_lora_matched_pair(self):
fc1_lora, fc2_lora, q_lora, k_lora, v_lora, out_lora = self.get_lora_params()
ret = [(fc1_lora, self._h4h_w), (fc2_lora, self._4hh_w), (out_lora, self.dense_w), (q_lora, self.qw),
(k_lora, self.kw), (v_lora, self.vw)]
return ret
def set_q_k_v(self):
"""
Necessary to implement for `HybridSplitQKVContainer`
"""
self.qw = self.policy.client_module.attn.q_proj.weight
self.qb = None
self.kw = self.policy.client_module.attn.k_proj.weight
self.kb = None
self.vw = self.policy.client_module.attn.v_proj.weight
self.vb = None
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
param_names = (
'attn.q_proj.weight', \
'attn.k_proj.weight', \
'attn.v_proj.weight', \
'attn.out_proj.weight', \
'mlp.fc_in.weight', \
'mlp.fc_in.bias', \
'mlp.fc_out.weight', \
'mlp.fc_out.bias', \
'ln_1.weight', \
'ln_1.bias'
)
maybe_copy_qkv(module.attention,
sd,
weight_quantizer,
mp_replace,
'attn_qkvw', [prefix + param_names[0], prefix + param_names[1], prefix + param_names[2]],
split_qkv=self.policy.split_qkv)
for i in range(3, 4):
maybe_copy(module.attention, sd, weight_quantizer, mp_replace, transformer_param_names[i - 1],
prefix + param_names[i])
for i in range(4, 8):
maybe_copy(module.mlp, sd, weight_quantizer, mp_replace, transformer_param_names[i],
prefix + param_names[i])
for i in range(8, 10):
maybe_copy(module, sd, weight_quantizer, mp_replace, transformer_param_names[i + 2],
prefix + param_names[i])
class HFGPTJLayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self, client_module, inference=True):
super().__init__(inference, scale_attention=True)
self.client_module = client_module
try:
import transformers
HFGPTJLayerPolicy._orig_layer_class = transformers.models.gptj.modeling_gptj.GPTJBlock
except:
HFGPTJLayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attn.embed_dim, \
self.client_module.attn.num_attention_heads, \
self.client_module.ln_1.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
qw = self.client_module.attn.q_proj.weight
kw = self.client_module.attn.k_proj.weight
vw = self.client_module.attn.v_proj.weight
qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=enable_training)
return qkvw, \
None, \
self.client_module.attn.out_proj.weight, \
None,
def mlp(self, enable_training=False):
return self.client_module.mlp.fc_in.weight, \
self.client_module.mlp.fc_in.bias, \
self.client_module.mlp.fc_out.weight, \
self.client_module.mlp.fc_out.bias
def layernorm(self):
return None, \
None, \
self.client_module.ln_1.weight, \
self.client_module.ln_1.bias
| 5,074 | 37.157895 | 112 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/megatron_gpt_moe.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from .base_moe import *
from .features.megatron import MegatronContainer
from deepspeed.model_implementations.transformers.ds_megatron_gpt import DeepSpeedMegatronGPTInference
import torch
from .megatron_gpt import MegatronLayerPolicy
from packaging import version as pkg_version
class DS_MegatronGPTMoEContainer(MegatronContainer, BaseTransformerMoEContainer):
def __init__(self, policy, config, model_config, layer_id):
super().__init__(policy, config, model_config, layer_id)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedMegatronGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
if self.megatron_v2:
self.module.config.rotate_half = True
self.module.config.rotate_every_two = False
return self.module
# TODO: Megatron GPT MoE inherits from Megatron policy and replaces mlp
# TODO: Generalize MoE overall goal, expand beyond Megatron
class MegatronMoELayerPolicy(MegatronLayerPolicy):
_orig_layer_class = None
version = 0
moe_type = 'standard'
num_experts = 1
def __init__(self, client_module, inference=True):
super().__init__(inference)
self.client_module = client_module
# we use megatron version to differentiate between the old and new
# megatron-lm source code
if MegatronMoELayerPolicy._orig_layer_class is None:
if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"):
MegatronMoELayerPolicy._orig_layer_class = None
else:
try:
from megatron.model.transformer import ParallelTransformerLayer
MegatronMoELayerPolicy._orig_layer_class = ParallelTransformerLayer
except ImportError:
MegatronMoELayerPolicy._orig_layer_class = None
def get_num_experts(self):
return self.num_experts
def mlp(self, moe_type='standard', enable_training=False):
# for now, all of this is tightly coupled to megatron-deepspeed moe implementation
# todo: think and refactor this to be more general
#from deepspeed.moe.utils import has_moe_layers
#moe, _ = has_moe_layers(self.client_module)
moe_experts = self.client_module.mlp.deepspeed_moe.experts.deepspeed_experts if moe_type == 'standard' else \
self.client_module.mlp.moe.deepspeed_moe.experts.deepspeed_experts
num_experts = len(moe_experts)
self.num_experts = num_experts
if moe_type == 'standard':
return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \
[moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)]
else:
return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \
[moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \
[moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)], \
self.client_module.mlp.mlp.dense_h_to_4h.weight, \
self.client_module.mlp.mlp.dense_h_to_4h.bias, \
self.client_module.mlp.mlp.dense_4h_to_h.weight, \
self.client_module.mlp.mlp.dense_4h_to_h.bias, \
self.client_module.mlp.coefficient.weight
| 3,936 | 44.252874 | 117 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/features/hybrid_megatron.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from .hybrid_engine import HybridEngineContainer
from .megatron import MegatronContainer
class HybridMegatronContainer(MegatronContainer, HybridEngineContainer):
def _align_qkv(self, x: torch.Tensor):
"""
Internal helper for accepting the head-contiguous weight matrix and chunking
the query, key, and value components.
"""
attention_head_size = x.shape[0] // self.num_attention_heads
new_x_shape = (self.num_attention_heads, attention_head_size) + x.size()[1:]
x_1 = x.view(*new_x_shape)
div_dim = len(x_1.size()) - 2 if len(x.shape) == 2 else -1
(q, k, v) = torch.split(x_1, (x_1.shape[div_dim] // 3), dim=div_dim)
if len(q.shape) > 2:
x.data.copy_(
torch.cat((q.reshape(-1, q.shape[-1]), k.reshape(-1, q.shape[-1]), v.reshape(-1, q.shape[-1])),
dim=0).reshape(x.shape))
else:
x.data.copy_(torch.cat((q.reshape(-1), k.reshape(-1), v.reshape(-1)), dim=-1).reshape(x.shape))
def transform_for_inference(self) -> None:
"""
Overrides the HybridEngineContainer implementation.
The alternative layout of the QKV matrix for Megatron is such that each head's Q, K, and V
are sequential in memory. This is different from the default layout in which all of the Qs
are sequential, followed by all of the Ks, and then all of the Vs. Here, we take the default
layout and transform it to the inference layout.
"""
if hasattr(self.qkvw, 'ds_id'):
from deepspeed.runtime.zero import GatheredParameters
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
param_list = [self.qkvw, self.qkvb]
non_active_params = [param for param in param_list if (hasattr(param, 'ds_id') and \
param.ds_status == ZeroParamStatus.NOT_AVAILABLE)]
with GatheredParameters(non_active_params):
self._align_qkv(self.qkvw)
self._align_qkv(self.qkvb)
else:
self._align_qkv(self.qkvw)
self._align_qkv(self.qkvb)
def _partition_qkv(self, x: torch.Tensor):
"""
Internal helper for taking contiguous QKV and partitioning it for contiguous
heads.
"""
q_k_v = torch.split(x, (x.shape[0] // 3), dim=0)
attention_head_size = q_k_v[0].shape[0] // self.num_attention_heads
new_x_shape = (self.num_attention_heads, attention_head_size) + x.size()[1:]
q, k, v = [data.view(*new_x_shape) for data in q_k_v]
if len(q.shape) > 2:
x.data.copy_(torch.cat((q, k, v), dim=-2).reshape(-1, q.shape[-1]))
else:
x.data.copy_(torch.cat((q, k, v), dim=-1).reshape(-1))
def transform_for_training(self):
"""
Overrides the HybridEngineContainer implementation.
The alternative layout of the QKV matrix for Megatron is such that each head's Q, K, and V
are sequential in memory. This is different from the default layout in which all of the Qs
are sequential, followed by all of the Ks, and then all of the Vs. This function takes the inference format and reverts it back to the default format.
"""
# If parameter is distributed, handle gathering it
if hasattr(self.qkvw, 'ds_id'):
from deepspeed.runtime.zero import GatheredParameters
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
param_list = [self.qkvw, self.qkvb]
non_active_params = [param for param in param_list if (hasattr(param, 'ds_id') and \
param.ds_status == ZeroParamStatus.NOT_AVAILABLE)]
with GatheredParameters(non_active_params):
self._partition_qkv(self.qkvw)
self._partition_qkv(self.qkvb)
else:
self._partition_qkv(self.qkvw)
self._partition_qkv(self.qkvb)
| 4,129 | 45.931818 | 158 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/features/meta_tensor.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import ABC, abstractmethod
from packaging import version as pkg_version
import torch
class MetaTensorContainer(ABC):
"""
NOTE: If you are using this feature with a container that
also inherits from `HybridEngineContainer`, ensure that `MetaTensorContainer`
is inherited before `HybridEngineContainer` in the class definition.
"""
def __init__(self, **kwargs):
if pkg_version.parse('1.10') > pkg_version.parse(torch.__version__):
raise NotImplementedError("Meta tensor support is not available, please upgrade to torch 1.10+")
super().__init__(**kwargs)
self.is_meta = False
self.ckpt_load_enabled = True
def initialize_tensors(self, enable_training=False):
super().initialize_tensors(enable_training=enable_training)
self.is_meta = self.qkvw.is_meta
def apply_tensor_parallelism(self, mp_replace, **kwargs):
if self.is_meta:
if self.qkvb is None:
self.module.attention.attn_qkvb = None
if self.dense_b is None:
self.module.attention.attn_ob = None
else:
super().apply_tensor_parallelism(mp_replace, **kwargs)
def copy_data_to_new_module(self):
if self.is_meta:
if self.attn_nw is None:
self.module.mlp.attn_nw = self.attn_nw
self.module.mlp.attn_nb = self.attn_nb
else:
super().copy_data_to_new_module()
def transpose(self):
if not self.is_meta:
super().transpose()
@abstractmethod
def load_params(self, module, sd, weight_quantizer, mp_replace, prefix):
"""
Load all the transformer parameter from the checkpoint file (sd).
In addition to the parameter names, we require two
more parameters to help read the the data correctly
from the checkpoint and split the qkv heads in the
right order:
1. `use_load_prefix` (Default: False): this specifies
whether we need to use the name of first abstraction
layer of the model for searching the parameter's name
in a checkpoint file. For more information of how this
is used please see
https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/load_checkpoint.py
2. `split_qkv` (Default: True): we use this flag when splitting
the qkv parameter into heads. If it is False, it means the heads
of q, k, and v are stored together and needs to split in the
DeepSpeed-Inference API.
"""
raise NotImplementedError("A load_params() function must be defined in the model container \
when inheriting the MetaTensorContainer feature")
| 2,930 | 40.28169 | 109 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/features/split_qkv.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import abstractmethod
import torch
from .hybrid_engine import HybridEngineContainer
class HybridSplitQKVContainer(HybridEngineContainer):
def set_attention(self, qkvw, qkvb, dense_w, dense_b):
super().set_attention(qkvw, qkvb, dense_w, dense_b)
self.set_q_k_v()
@abstractmethod
def set_q_k_v(self):
"""
In `set_q_k_v`, it is necessary to populate the following variables (where appropriate)
for the given model:
self.qw: q weight
self.qb: q bias
self.kw: k weight
self.kb: k bias
self.vw: v weight
self.vb: v bias
"""
raise NotImplementedError("A set_q_k_v() function must be defined in the model container \
in order to set the unfused q, k, and v tensors.")
def attention_qkv_mp(self, mp_replace, reversed_dim=False):
# Only need to alter
if self.module.attention.attn_qkvw is None:
params = [
(self.module.attention.attn_qw, self.qw),
(self.module.attention.attn_qb, self.qb),
(self.module.attention.attn_kw, self.kw),
(self.module.attention.attn_kb, self.kb),
(self.module.attention.attn_vw, self.vw),
(self.module.attention.attn_vb, self.vb),
]
for dst, src in params:
dst = mp_replace.copy(
dst[:self.qw.shape[0] // mp_replace.mp_size], src, int8=reversed_dim,
allocate_tensor=reversed_dim) if src is not None else None
else:
super().attention_qkv_mp(mp_replace)
def release_qkv(self):
super().release_qkv()
split_qkv_params = [
(self.module.attention.attn_qw, self.qw),
(self.module.attention.attn_qb, self.qb),
(self.module.attention.attn_kw, self.kw),
(self.module.attention.attn_kb, self.kb),
(self.module.attention.attn_vw, self.vw),
(self.module.attention.attn_vb, self.vb),
]
self._release_params(split_qkv_params)
def reset_qkv(self):
self.qkvw.data[:self.qw.shape[0]] = self.qw.data
self.qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kw.data
self.qkvw.data[2 * self.qw.shape[0]:] = self.vw.data
qkv_data = [self.qw.data, self.kw.data, self.vw.data]
self.qw.data = self.qkvw.data[:self.qw.shape[0]]
self.kw.data = self.qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]]
self.vw.data = self.qkvw.data[2 * self.qw.shape[0]:]
if self.qkvb is not None:
self.qkvb.data[:self.qw.shape[0]] = self.qb.data
self.qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kb.data
self.qkvb.data[2 * self.qw.shape[0]:] = self.vb.data
qkv_data.extend([self.qb.data, self.kb.data, self.vb.data])
self.qb.data = self.qkvb.data[:self.qw.shape[0]]
self.kb.data = self.qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]]
self.vb.data = self.qkvb.data[2 * self.qw.shape[0]:]
for data in qkv_data:
del data
def reset_qkv_experimental(self):
"""
WIP - experimental and likely to be changed/improved.
Unused by keeping for now.
"""
if self.module.attention.attn_qkvw is None:
self.module.attention.attn_qkvw = torch.empty(self.qw.shape[0] * 3,
self.qw.shape[0],
dtype=self.qw.dtype,
device=self.qw.device)
self.module.attention.attn_qkvb = torch.empty(self.qw.shape[0] * 3,
dtype=self.qw.dtype,
device=self.qw.device)
self.module.attention.attn_qkvw.data[:self.qw.shape[0]] = self.qw.data
self.module.attention.attn_qkvb.data[:self.qw.shape[0]] = self.qb.data
self.module.attention.attn_qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kw.data
self.module.attention.attn_qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]] = self.kb.data
self.module.attention.attn_qkvw.data[2 * self.qw.shape[0]:] = self.vw.data
self.module.attention.attn_qkvb.data[2 * self.qw.shape[0]:] = self.vb.data
qkv_data = [self.qw.data, \
self.qb.data, \
self.kw.data, \
self.kb.data, \
self.vw.data, \
self.vb.data]
self.qw.data = self.module.attention.attn_qkvw.data[:self.qw.shape[0]]
self.qb.data = self.module.attention.attn_qkvb.data[:self.qw.shape[0]]
self.kw.data = self.module.attention.attn_qkvw.data[self.qw.shape[0]:2 * self.qw.shape[0]]
self.kb.data = self.module.attention.attn_qkvb.data[self.qw.shape[0]:2 * self.qw.shape[0]]
self.vw.data = self.module.attention.attn_qkvw.data[2 * self.qw.shape[0]:]
self.vb.data = self.module.attention.attn_qkvb.data[2 * self.qw.shape[0]:]
for data in qkv_data:
del data
def set_attn_params_wo_copy(self, Z3_enabled=False):
self.module.attention.attn_ow = self.dense_w
self.module.attention.attn_ob = self.dense_b
if not Z3_enabled:
# In initialize_tensors, we create a fused qkvw with the appropriate shape
# and copy the qw, qb, kw, kb, vw, vb into it
self.module.attention.attn_qkvw = self.qkvw
self.module.attention.attn_qkvb = self.qkvb
# We reset the data for qw (which is the original model parameter) to point
# to the fused weight matrix we have created here
self.qw.data = self.qkvw[:self.qw.shape[0], :]
self.kw.data = self.qkvw[self.qw.shape[0]:2 * self.qw.shape[0], :]
self.vw.data = self.qkvw[self.qw.shape[0] * 2:, :]
# Assume if one of the biases is not None, then all of them are not None
if self.qb is not None:
self.qb.data = self.qkvb[:self.qw.shape[0]]
self.kb.data = self.qkvb[self.qw.shape[0]:2 * self.qw.shape[0]]
self.vb.data = self.qkvb[self.qw.shape[0] * 2:]
else:
# In ZeRO-3 this will be managed by ZeRO and handled separately in the
# forward of ds_attention
self.module.attention.attn_qw = self.qw
self.module.attention.attn_qb = self.qb
self.module.attention.attn_kw = self.kw
self.module.attention.attn_kb = self.kb
self.module.attention.attn_vw = self.vw
self.module.attention.attn_vb = self.vb
def get_attn_params(self):
params = super().get_attn_params()
params.extend([self.qw, self.qb, self.kw, self.kb, self.vw, self.vb])
return params
| 7,136 | 43.60625 | 98 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/features/megatron.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from abc import ABC
class MegatronContainer(ABC):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.megatron_v2 = self.policy.is_megatron_v2
def _align_qkv_transposed(self, x):
attention_head_size = x.shape[-1] // self.num_attention_heads
new_x_shape = x.size()[:-1] + (self.num_attention_heads, attention_head_size)
x_1 = x.view(*new_x_shape)
(q, k, v) = torch.split(x_1, (x_1.shape[-1] // 3), dim=(x_1.dim() - 1))
if len(q.shape) > 2:
return torch.cat((q.reshape(q.shape[0], -1), k.reshape(q.shape[0], -1), v.reshape(q.shape[0], -1)),
dim=-1).reshape(x.shape)
else:
return torch.cat((q.reshape(-1), k.reshape(-1), v.reshape(-1)), dim=-1).reshape(x.shape)
def transpose(self):
super().transpose()
if self.megatron_v2:
self.qkvw = torch.nn.parameter.Parameter(self._align_qkv_transposed(self.qkvw).contiguous())
self.qkvb = torch.nn.parameter.Parameter(self._align_qkv_transposed(self.qkvb).contiguous())
| 1,200 | 36.53125 | 111 | py |
DeepSpeed | DeepSpeed-master/deepspeed/module_inject/containers/features/hybrid_engine.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from abc import ABC, abstractmethod
from typing import List, Tuple
import torch
class HybridEngineContainer(ABC):
"""
This container identifies which methods need to be overridden in addition to
the base container to enable use in the RLHF pipeline. These methods are not
necessary for inference alone.
NOTE: If you are using this feature with a container that
also inherits from `MetaTensorContainer`, ensure that `MetaTensorContainer`
is inherited before `HybridEngineContainer` in the class definition.
"""
def initialize_tensors(self, enable_training=False):
"""
Same purposes as the base container, but also grabs the hooks for any LoRA
parameters. If it's necessary to override specific sub-components of the model,
it's best to augment the specific `set_[component]` itself rather than modifying
the `initialize_tensors` method. See the `HybridSplitQKVContainer` for an example.
"""
super().initialize_tensors(enable_training=enable_training)
self.set_lora_params()
def transform_for_training(self):
"""
If the views on certain parameters are largely incompatible, it may be necessary to do
more substantial transformations to the parameters. This method should be overridden to
transform the inference format to what is necessary for training.
"""
pass
def transform_for_inference(self):
"""
If the views on certain parameters are largely incompatible, it may be necessary to do
more substantial transformations to the parameters. This method should be overridden to
transform the training format to what is necessary for inference.
"""
pass
@abstractmethod
def set_lora_params(self):
"""
If available, set the LoRA parameters for the module. An implementation
for this would iterate over all parameters of the model and use the `maybe_get_lora` helper
method to check if the parameter does in fact have any LoRA params.
"""
raise NotImplementedError("A set_lora_params() function must be defined for the relevant parameters.")
@abstractmethod
def get_lora_matched_pair(self):
"""Get the pair of lora params and its matched model parameters."""
raise NotImplementedError("get_lora_matched_pair() must be defined for the relevant parameters.")
def fuse_lora(self):
"""Fuse the LoRA parameters for the inference mode."""
for maybe_lora_param, param in self.get_lora_matched_pair():
if len(maybe_lora_param) == 3:
lora_right_weight, \
lora_left_weight, \
lora_scaling = maybe_lora_param
param.data += lora_scaling * torch.matmul(lora_left_weight.t(), lora_right_weight.t())
def unfuse_lora(self):
"""Unfuse the LoRA parameters for the training mode."""
for maybe_lora_param, param in self.get_lora_matched_pair():
if len(maybe_lora_param) == 3:
lora_right_weight, \
lora_left_weight, \
lora_scaling = maybe_lora_param
param.data -= lora_scaling * torch.matmul(lora_left_weight.t(), lora_right_weight.t())
def apply_tensor_parallelism(self, mp_replace, reversed_dim=False):
"""
Add support for reversed dim in tensor parallelism. If necessary, override
the called methods to handle partitioned weights (i.e. if qkv is split, override
the `attention_qkv_mp` method). If the model component is not split, it should
be safe to use the default implementation.
"""
# Setup the new Attention module
self.attention_qkv_mp(mp_replace, reversed_dim=reversed_dim)
self.attention_o_mp(mp_replace, reversed_dim=reversed_dim)
# Setup the new MLP module
self.mlp_inter_mp(mp_replace, reversed_dim=reversed_dim)
self.mlp_output_mp(mp_replace, reversed_dim=reversed_dim)
# Apply weight quantization
# TODO(cmikeh2): Re-enable this once verified
#self.apply_weight_quantization()
def _release_params(self, param_pairs: List[Tuple[torch.Tensor, torch.Tensor]]):
"""
Helper for `release_[component]` methods. Accepts a list of tuples where the first
element is the module param that needs to be deleted, and the second is the reassignment
from the container.
"""
for module_param, container_param in param_pairs:
if module_param is not None:
del module_param
module_param = container_param
def release_memory(self):
"""
Delete module parameters if they exist and point them back to the container. The primary
purpose of this is for TP-inference with ZeRO-3. In this scenario, we need to delete the
parameters we've created for inference to free their memory.
"""
general_params = [
(self.module.attention.attn_ow, self.dense_w),
(self.module.attention.attn_ob, self.dense_b),
(self.module.mlp.attn_nw, self.attn_nw),
(self.module.mlp.attn_nb, self.attn_nb),
(self.module.norm_w, self.input_nw),
(self.module.norm_b, self.input_nb),
]
self._release_params(general_params)
self.release_qkv()
self.release_mlp()
def release_qkv(self):
"""
Release for QKV parameters (as well as any aliases).
"""
qkv_params = [
(self.module.attention.attn_qkvw, self.qkvw),
(self.module.attention.attn_qkvb, self.qkvb),
]
self._release_params(qkv_params)
def release_mlp(self):
"""
Release for MLP parameters (as well as any aliases).
"""
mlp_params = [
(self.module.mlp.inter_w, self._h4h_w),
(self.module.mlp.inter_b, self._h4h_b),
(self.module.mlp.output_w, self._4hh_w),
(self.module.mlp.output_b, self._4hh_b),
]
self._release_params(mlp_params)
def reset_params(self):
"""
The purpose of reset params is to get the weights from the FP16 training
copy of the model and copy to them to contiguous inference view. This only needs
to be performed when the container parameters cannot be used directly for inference.
"""
self.reset_qkv()
self.reset_mlp()
def reset_qkv(self):
"""
Perform any necessary resets of the model parameters for the QKV components.
"""
pass
def reset_mlp(self):
"""
Perform any necessary resets of the model parameters for the MLP components.
"""
pass
def get_lora_params(self):
"""
Return a list of all parameters that would have LoRA for the module.
"""
if not hasattr(self, "lora_params"):
self.set_lora_params()
return self.lora_params
def set_params_wo_copy(self, Z3_enabled=False):
"""
Rather than copying into, set the parameters directly. This is necessary to provide
an inexpensive (low-memory-overhead) view onto the FP16 forward weights.
"""
self.module.mlp.attn_nw = self.attn_nw
self.module.mlp.attn_nb = self.attn_nb
self.module.norm_w = self.input_nw
self.module.norm_b = self.input_nb
self.set_attn_params_wo_copy(Z3_enabled=Z3_enabled)
self.set_mlp_params_wo_copy(Z3_enabled=Z3_enabled)
def set_attn_params_wo_copy(self, **kwargs):
"""
Narrower sub-method for finer grained overriding.
"""
self.module.attention.attn_ow = self.dense_w
self.module.attention.attn_ob = self.dense_b
self.module.attention.attn_qkvw = self.qkvw
self.module.attention.attn_qkvb = self.qkvb
def set_mlp_params_wo_copy(self, **kwargs):
"""
Narrower sub-method for finer grained overriding.
"""
self.module.mlp.inter_w = self._h4h_w
self.module.mlp.inter_b = self._h4h_b
self.module.mlp.output_w = self._4hh_w
self.module.mlp.output_b = self._4hh_b
| 8,374 | 38.319249 | 110 | py |
DeepSpeed | DeepSpeed-master/deepspeed/comm/ccl.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
'''
Copyright 2021 The Microsoft DeepSpeed Team
'''
import torch
from deepspeed.accelerator import get_accelerator
from .reduce_op import ReduceOp
from .torch import TorchBackend
def build_ccl_op():
builder = get_accelerator().create_op_builder("CCLCommBuilder")
if builder is None:
return None
ccl_cpp_module = builder.load()
print(f'DeepSpeed {builder.absolute_name()} built successfully')
return ccl_cpp_module
class CCLBackend(TorchBackend):
def __init__(self, name='ccl', rank=-1, world_size=-1, mpu=None, timeout=None, init_method=None):
self.ccl_comm_op = build_ccl_op()
if self.ccl_comm_op is None:
# set CCLBackend to uninitialized state if CCLCommBuilder cannot be loaded
self.initialized = False
return
super(CCLBackend, self).__init__(backend='ccl',
name='torch',
rank=rank,
world_size=world_size,
timeout=timeout,
init_method=init_method)
self.name = 'ccl'
size = self.get_world_size()
rank = self.get_rank()
main_kvs = self.ccl_comm_op.get_kvs_addr(rank)
main_kvs = torch.tensor(main_kvs).to(torch.uint8)
super(CCLBackend, self).broadcast(main_kvs, 0)
self.ccl_comm_op.initialize(size, rank, main_kvs)
self.initialized = True
def is_initialized(self):
return self.initialized
def broadcast(self, tensor, src, group=None, async_op=False):
self.ccl_comm_op.broadcast(tensor, src, group, async_op)
def all_reduce(self, tensor, op=ReduceOp.SUM, group=None, async_op=False):
use_caching = False
if use_caching:
match_id = f"{tensor.size()}-{op}"
self.ccl_comm_op.all_reduce_caching(tensor, op, match_id, group, async_op)
else:
self.ccl_comm_op.all_reduce(tensor, op, group, async_op)
def barrier(self, group=None, async_op=False):
self.ccl_comm_op.barrier(group, async_op)
| 2,248 | 34.698413 | 101 | py |
DeepSpeed | DeepSpeed-master/deepspeed/comm/torch.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed import utils
from .utils import *
from .backend import *
from .comm import *
import os
DS_COMM_ALL_GATHER_OFF = False
DS_COMM_REDUCE_SCATTER_OFF = False
DS_COMM_BROADCAST_OFF = False
DS_COMM_ALL_REDUCE_OFF = False
DS_COMM_REDUCE_OFF = False
def is_torch_two():
TORCH_MAJOR = int(torch.__version__.split('.')[0])
if TORCH_MAJOR >= 2:
return True
else:
return False
def torch_ver_ge_1_13():
if is_torch_two():
return True
else:
TORCH_MAJOR = int(torch.__version__.split('.')[0])
assert TORCH_MAJOR == 1
TORCH_MINOR = int(torch.__version__.split('.')[1])
return TORCH_MINOR >= 13
def has_coalescing_manager():
has_c10d = hasattr(torch.distributed, 'distributed_c10d')
return has_c10d and hasattr(torch.distributed.distributed_c10d, '_coalescing_manager')
def has_all_reduce_coalesced():
return hasattr(torch.distributed, "all_reduce_coalesced") and torch_ver_ge_1_13()
def get_coalescing_manager(group, device, reqs):
if is_torch_two():
return torch.distributed.distributed_c10d._coalescing_manager(group, device=device, reqs=reqs)
else:
return torch.distributed.distributed_c10d._coalescing_manager(group, reqs)
##Utilities to turn comm off
##TODO: move to base comm (wrapper)
def all_gather_comm_off(flag=False):
global DS_COMM_ALL_GATHER_OFF
DS_COMM_ALL_GATHER_OFF = flag
def reduce_scatter_comm_off(flag=False):
global DS_COMM_REDUCE_SCATTER_OFF
DS_COMM_REDUCE_SCATTER_OFF = flag
def broadcast_comm_off(flag=False):
global DS_COMM_BROADCAST_OFF
DS_COMM_BROADCAST_OFF = flag
def all_reduce_comm_off(flag=False):
global DS_COMM_ALL_REDUCE_OFF
DS_COMM_ALL_REDUCE_OFF = flag
def reduce_comm_off(flag=False):
global DS_COMM_REDUCE_OFF
DS_COMM_REDUCE_OFF = flag
#assumption: all_gather and reduce scatter
## are what we care about
def backward_comm_off(flag=False):
all_gather_comm_off(flag)
reduce_scatter_comm_off(flag)
class Noop:
def wait(self):
return None
class TorchBackend(Backend):
"""
A light-weight wrapper class for torch.distributed API.
Only a subset of functions are wrapped. Once the init_process_group
is initialized, standard torch.distributed.* can be used directly
so no need to wrap all the functions. We can keep adding wrappers as
needed.
"""
def __init__(self, backend, timeout, init_method, rank=-1, world_size=-1, name='torch'):
super(TorchBackend, self).__init__()
self.has_all_reduce_coalesced = has_all_reduce_coalesced()
self.has_coalescing_manager = has_coalescing_manager()
self.all_gather_function = self.get_all_gather_function()
self.reduce_scatter_function = self.get_reduce_scatter_function()
self.initialized = True
self.name = name
# Future functionality to support ds.initialize() on a single GPU
# The idea is to fake that dist backend is initialized even when
# it is not so we can run on a single GPU without doing any init_process_group
self.single_gpu_mode = True
self.init_process_group(backend, timeout, init_method, rank, world_size)
@classmethod
def get_all_gather_function(self):
if hasattr(torch.distributed, "all_gather_into_tensor"):
return torch.distributed.all_gather_into_tensor
elif hasattr(torch.distributed, "_all_gather_base"):
return torch.distributed._all_gather_base
return None
@classmethod
def get_reduce_scatter_function(self):
if hasattr(torch.distributed, "reduce_scatter_tensor"):
return torch.distributed.reduce_scatter_tensor
elif hasattr(torch.distributed, "_reduce_scatter_base"):
return torch.distributed._reduce_scatter_base
return None
def has_all_gather_into_tensor(self):
return self.all_gather_function is not None
def has_reduce_scatter_tensor(self):
return self.reduce_scatter_function is not None
def init_process_group(self, backend, timeout, init_method, rank, world_size):
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend,
timeout=timeout,
init_method=init_method,
rank=rank,
world_size=world_size)
self.using_mpi = torch.distributed.get_backend() == 'mpi'
def all_reduce(self, tensor, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False):
op = self._reduce_op(op)
return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=async_op)
def all_reduce_coalesced(self, tensors, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False):
""" proxy func to torch.distributed.all_reduce_coalesced,
which is included in PyTorch 1.13 and above
"""
if not self.has_all_reduce_coalesced:
raise RuntimeError(f"Current torch version does not have all_reduce_coalesced "
f"api (torch.__version__: {torch.__version__})")
op = self._reduce_op(op)
return torch.distributed.all_reduce_coalesced(tensors=tensors, op=op, group=group, async_op=async_op)
def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False):
if DS_COMM_REDUCE_OFF:
if int(os.getenv('RANK', '0')) == 0:
utils.logger.warning("REDUCE is OFF")
return Noop()
return torch.distributed.reduce(tensor=tensor, dst=dst, op=self._reduce_op(op), group=group, async_op=async_op)
def reduce_scatter(self, output, input_list, op=ReduceOp.SUM, group=None, async_op=False):
if DS_COMM_REDUCE_SCATTER_OFF:
if int(os.getenv('RANK', '0')) == 0:
utils.logger.warning("REDUCE SCATTER is OFF")
return Noop()
else:
return torch.distributed.reduce_scatter(output=output,
input_list=input_list,
op=self._reduce_op(op),
group=group,
async_op=async_op)
def broadcast(self, tensor, src, group=None, async_op=False):
if DS_COMM_BROADCAST_OFF:
if int(os.getenv('RANK', '0')) == 0:
utils.logger.warning("BROADCAST is OFF")
return Noop()
else:
return torch.distributed.broadcast(tensor=tensor, src=src, group=group, async_op=async_op)
def all_gather(self, tensor_list, tensor, group=None, async_op=False):
if DS_COMM_ALL_GATHER_OFF:
if int(os.getenv('RANK', '0')) == 0:
utils.logger.warning("All Gather is OFF")
return Noop()
else:
return torch.distributed.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op)
def all_gather_into_tensor(self, output_tensor, input_tensor, group=None, async_op=False):
if self.has_all_gather_into_tensor():
return self.all_gather_function(output_tensor=output_tensor,
input_tensor=input_tensor,
group=group,
async_op=async_op)
def all_gather_base(self, output_tensor, input_tensor, group=None, async_op=False):
if DS_COMM_ALL_GATHER_OFF:
if int(os.getenv('RANK', '0')) == 0:
utils.logger.warning("All Gather is OFF")
return Noop()
else:
if self.has_allgather_base:
return torch.distributed.distributed_c10d._all_gather_base(output_tensor=output_tensor,
input_tensor=input_tensor,
group=group,
async_op=async_op)
else:
utils.logger.warning("unable to find torch.distributed._all_gather_base. will fall back to "
"torch.distributed.reduce_scatter which will result in suboptimal performance. "
"please consider upgrading your pytorch installation.")
pass
def all_gather_coalesced(self, output_tensors, input_tensors, group=None, async_op=False):
""""""
assert len(output_tensors) == len(input_tensors), ""
if hasattr(torch.distributed.distributed_c10d, '_all_gather_base_coalesced'):
# customized PyTorch
return torch.distributed.distributed_c10d._all_gather_base_coalesced(output_tensors,
input_tensors,
group=group,
async_op=async_op)
elif has_coalescing_manager():
reqs = []
with get_coalescing_manager(group, input_tensors[0].device, reqs):
for output, input in zip(output_tensors, input_tensors):
handle = torch.distributed.distributed_c10d.all_gather_into_tensor(output,
input,
group=group,
async_op=True)
reqs.append(handle)
if async_op:
return reqs[-1]
else:
reqs[-1].wait()
def reduce_scatter_tensor(self, output_tensor, input_tensor, op=ReduceOp.SUM, group=None, async_op=False):
if self.has_reduce_scatter_tensor():
return self.reduce_scatter_function(output_tensor,
input_tensor,
op=self._reduce_op(op),
group=group,
async_op=async_op)
else:
utils.logger.warning("unable to find torch.distributed.reduce_scatter_tensor. will fall back to "
"torch.distributed.reduce_scatter which will result in suboptimal performance. "
"please consider upgrading your pytorch installation.")
pass
def all_to_all_single(self,
output,
input,
output_split_sizes=None,
input_split_sizes=None,
group=None,
async_op=False):
return torch.distributed.all_to_all_single(output=output,
input=input,
output_split_sizes=output_split_sizes,
input_split_sizes=input_split_sizes,
group=group,
async_op=async_op)
def send(self, tensor, dst, group=None, tag=0):
return torch.distributed.send(tensor=tensor, dst=dst, group=group, tag=tag)
def recv(self, tensor, src=None, group=None, tag=0):
return torch.distributed.recv(tensor=tensor, src=src, group=group, tag=tag)
def isend(self, tensor, dst, group=None, tag=0):
return torch.distributed.isend(tensor=tensor, dst=dst, group=group, tag=tag)
def irecv(self, tensor, src=None, group=None, tag=0):
return torch.distributed.irecv(tensor=tensor, src=src, group=group, tag=tag)
def gather(self, tensor, gather_list=None, dst=0, group=None, async_op=False):
return torch.distributed.gather(tensor=tensor,
gather_list=gather_list,
dst=dst,
group=group,
async_op=async_op)
def scatter(self, tensor, scatter_list=None, src=0, group=None, async_op=False):
return torch.distributed.scatter(tensor=tensor,
scatter_list=scatter_list,
src=src,
group=group,
async_op=async_op)
def barrier(self, group=torch.distributed.GroupMember.WORLD, async_op=False, device_ids=None):
if group is None:
group = torch.distributed.GroupMember.WORLD
return torch.distributed.barrier(group=group, async_op=async_op, device_ids=device_ids)
def monitored_barrier(self, group=torch.distributed.GroupMember.WORLD, timeout=None, wait_all_ranks=False):
if group is None:
group = torch.distributed.GroupMember.WORLD
return torch.distributed.monitored_barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks)
def get_rank(self, group=None):
return torch.distributed.get_rank(group=group)
def get_world_size(self, group=None):
return torch.distributed.get_world_size(group=group)
def is_initialized(self):
return torch.distributed.is_initialized()
def get_backend(self, group=None):
return torch.distributed.get_backend(group=group)
def new_group(self, ranks):
return torch.distributed.new_group(ranks)
def get_global_rank(self, group, group_rank):
if hasattr(torch.distributed.distributed_c10d, "get_global_rank"):
from torch.distributed.distributed_c10d import get_global_rank as _get_global_rank
else:
from torch.distributed.distributed_c10d import _get_global_rank
return _get_global_rank(group, group_rank)
def get_world_group(self):
return torch.distributed.group.WORLD
def destroy_process_group(self, group=None):
return torch.distributed.destroy_process_group(group=group)
def _reduce_op(self, op):
'''
Helper function. If the op provided is not a torch.dist.ReduceOp, convert it and return
'''
if not isinstance(op, torch.distributed.ReduceOp):
if op == ReduceOp.SUM:
op = torch.distributed.ReduceOp.SUM
elif op == ReduceOp.PRODUCT:
op = torch.distributed.ReduceOp.PRODUCT
elif op == ReduceOp.AVG:
op = torch.distributed.ReduceOp.AVG
elif op == ReduceOp.MIN:
op = torch.distributed.ReduceOp.MIN
elif op == ReduceOp.MAX:
op = torch.distributed.ReduceOp.MAX
elif op == ReduceOp.BAND:
op = torch.distributed.ReduceOp.BAND
elif op == ReduceOp.BOR:
op = torch.distributed.ReduceOp.BOR
elif op == ReduceOp.BXOR:
op = torch.distributed.ReduceOp.BXOR
return op
# This will become a light-weight wrapper around torch.distributed functions
# TODO: create some example to show how this wrapper can help profile communication
# TODO: make sure there is no performance regression with this approach
# TODO: explore monkey-patching if this does not work
| 15,854 | 42.557692 | 119 | py |
DeepSpeed | DeepSpeed-master/deepspeed/comm/comm.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
DeepSpeed Communication Package: deepspeed.comm
deepspeed.comm
-- import and use deepspeed.ops.comm
-- use torch.distributed directly if both this package and torch.distributed use the same NCCL version
-- use custom collectives
-- can either use torch.dist or ds.ops.comm?
Note: the old 1-bit compressed allreduce variants that resided in deepspeed.runtime.comm will be moved here as well.
deepspeed.comm API
-- must be kept fully compatible (same signatures) as torch.dist API to ensure backward/cross-framework compatibility.
-- e.g. if a client code used
from deepspeed import comm as dist
instead of
import torch.distributed as dist
The code should work without breaking any of the public torch.distributed functionality
Future:
-- deepspeed groups API should be brought into ds.comm
"""
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from .reduce_op import *
from deepspeed.accelerator import get_accelerator
from deepspeed.comm.ccl import CCLBackend
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
# Current deepspeed.comm backend (cdb) global object for simple access by client code
cdb = None
# Create global timer for ops
timers = timer.SynchronizedWallClockTimer()
timer_summary = {}
comms_logger = CommsLogger()
# Maintain objects of all initialized ds backends and assign them using the API functions in this file
nccl_backend = None
mpi_backend = None
ccl_backend = None
# This should be set here so all rank/size information from the launcher can be propagated
from deepspeed.comm.utils import *
class ProcessGroup():
def __init__(self, comm_id, ranks=[]):
self.ranks = ranks
self.comm_id = comm_id
self.size = len(ranks)
def _configure_using_config_file(config):
if config.comms_logger_enabled:
comms_logger.configure(config)
def configure(
deepspeed_config=None,
enabled=None,
prof_all=None,
prof_ops=None,
verbose=None,
debug=None,
):
if deepspeed_config is not None:
_configure_using_config_file(deepspeed_config.comms_config)
if enabled is not None:
comms_logger.enabled = enabled
if prof_all is not None:
comms_logger.prof_all = prof_all
if prof_ops is not None:
comms_logger.prof_ops = prof_ops
if verbose is not None:
comms_logger.verbose = verbose
if debug is not None:
comms_logger.debug = debug
# Logging wrapper for timing ops
def timed_op(func):
def log_wrapper(*args, **kwargs):
# Add enabled flag so that overhead to each comm op is two if conditions at most
if comms_logger.enabled:
if ('prof' in kwargs
and kwargs['prof']) or comms_logger.prof_all or ('log_name' in kwargs
and kwargs['log_name'] in comms_logger.prof_ops):
# Need func args for their defaults
func_args = get_default_args(func)
func_args.update(kwargs)
msg_size = get_msg_size_from_args(func, *args, **kwargs)
log_name = get_debug_log_name(func_args, comms_logger.debug)
timers(log_name).start()
# Return the op, then stop the op's timer
try:
return func(*args, **kwargs)
finally:
if comms_logger.enabled:
# Need to make op blocking for accurate logging
get_accelerator().synchronize()
# If we're using MPI, we can't simply sync the stream
if cdb.using_mpi:
cdb.barrier()
if ('prof' in kwargs and kwargs['prof']) or comms_logger.prof_all or (
'log_name' in kwargs and kwargs['log_name'] in comms_logger.prof_ops):
log_name = get_debug_log_name(func_args, comms_logger.debug)
raw_name = func.__name__
timers(log_name).stop()
# need temp var since 'elapsed' resets events
time_elapsed = timers(log_name).elapsed(reset=False)
comms_logger.append(raw_name, log_name, time_elapsed, msg_size)
return log_wrapper
# For compatibility with torch distributed's init_process_group, we shall retain the signature from PyTorch code.
# DeepSpeed NCCL/MPI backend may not need all these params as we will have our own implementation.
# Please read full torch.distributed API docs from https://pytorch.org/docs/stable/distributed.html
# UNUSED: Future helper function to initialize DS backends
def init_deepspeed_backend(ds_backend, timeout, init_method):
global cdb
global nccl_backend
global mpi_backend
global ccl_backend
rank = int(os.getenv('RANK', '-1'))
size = int(os.getenv('WORLD_SIZE', '-1'))
if ds_backend == NCCL_BACKEND:
utils.logger.warn("NCCL backend in DeepSpeed not yet implemented")
elif ds_backend == MPI_BACKEND:
utils.logger.warn("MPI backend in DeepSpeed not yet implemented")
elif ds_backend == GLOO_BACKEND:
utils.logger.warn("Gloo backend in DeepSpeed not yet implemented")
elif ds_backend == CCL_BACKEND:
ccl_backend = CCLBackend(rank=rank, world_size=size, timeout=timeout, init_method=init_method)
utils.logger.info(f"Initialize {ds_backend} backend")
else:
utils.logger.warn(f"DeepSpeed does not support {ds_backend} backend")
def is_initialized():
#assert cdb is not None, 'DeepSpeed backend not set, please initialize it using init_process_group()'
if cdb is None:
return False
else:
return cdb.is_initialized()
def destroy_process_group(group=None):
global cdb
return cdb.destroy_process_group(group=group)
def new_group(ranks):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.new_group(ranks)
def is_available() -> bool:
# Returns ``True`` if the deepspeed comm package is available.
# TODO: load other ops. Clients including deepspeed itself should use deepspeed.comm to import
# any communication related primitives from this package.
# use hasattr(deepspeed.csrc.ops, "_comm") or something
return True
def set_backend():
global cdb
global nccl_backend
global mpi_backend
global ccl_backend
backend_name = get_accelerator().communication_backend_name()
if backend_name == NCCL_BACKEND:
if nccl_backend is not None and nccl_backend.is_initialized():
cdb = nccl_backend
elif backend_name == MPI_BACKEND:
if mpi_backend is not None and mpi_backend.is_initialized():
cdb = mpi_backend
elif backend_name == CCL_BACKEND:
if ccl_backend is not None and ccl_backend.is_initialized():
cdb = ccl_backend
@timed_op
def broadcast(tensor, src, group=None, async_op=False, prof=False, log_name='broadcast', debug=get_caller_func()):
global cdb
return cdb.broadcast(tensor=tensor, src=src, group=group, async_op=async_op)
@timed_op
def all_gather(tensor_list,
tensor,
group=None,
async_op=False,
prof=False,
log_name='all_gather',
debug=get_caller_func()):
global cdb
return cdb.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op)
def has_reduce_scatter_tensor():
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.has_reduce_scatter_tensor()
def reduce_scatter_fn(output_tensor,
tensor,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
debug=get_caller_func()):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
if cdb.has_reduce_scatter_tensor():
return reduce_scatter_tensor(output_tensor,
tensor,
op=op,
group=group,
async_op=async_op,
prof=prof,
debug=debug)
else:
if get_rank() == 0:
utils.logger.warning_once("unable to find torch.distributed.reduce_scatter_tensor. will fall back to "
"torch.distributed.reduce_scatter which will result in suboptimal performance. "
"please consider upgrading your pytorch installation.")
input_tensor_lst = list(torch.chunk(tensor, cdb.get_world_size(group)))
return reduce_scatter(output_tensor,
input_tensor_lst,
op=op,
group=group,
async_op=async_op,
prof=prof,
debug=debug)
@timed_op
def reduce_scatter_tensor(output_tensor,
tensor,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='reduce_scatter_tensor',
debug=get_caller_func()):
global cdb
return cdb.reduce_scatter_tensor(output_tensor=output_tensor,
input_tensor=tensor,
op=op,
group=group,
async_op=async_op)
@timed_op
def all_gather_into_tensor(output_tensor,
tensor,
group=None,
async_op=False,
prof=False,
log_name='all_gather_into_tensor',
debug=get_caller_func()):
global cdb
return cdb.all_gather_into_tensor(output_tensor=output_tensor, input_tensor=tensor, group=group, async_op=async_op)
def has_all_gather_into_tensor():
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.has_all_gather_into_tensor()
def allgather_fn(output_tensor, input_tensor, group=None, async_op=False, debug=get_caller_func()):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
if cdb.has_all_gather_into_tensor():
return all_gather_into_tensor(output_tensor, input_tensor, group=group, async_op=async_op, debug=debug)
else:
if get_rank() == 0:
utils.logger.warning_once("unable to find torch.distributed.all_gather_into_tensor. will fall back to "
"torch.distributed.all_gather which will result in suboptimal performance. "
"please consider upgrading your pytorch installation.")
output_tensors = list(torch.chunk(output_tensor, cdb.get_world_size(group)))
return all_gather(output_tensors, input_tensor, group=group, async_op=async_op, debug=debug)
@timed_op
def all_to_all_single(output,
tensor,
output_split_sizes=None,
input_split_sizes=None,
group=None,
async_op=False,
prof=False,
log_name='all_to_all_single',
debug=get_caller_func()):
global cdb
return cdb.all_to_all_single(output=output,
input=tensor,
output_split_sizes=output_split_sizes,
input_split_sizes=input_split_sizes,
group=group,
async_op=async_op)
@timed_op
def send(tensor, dst, group=None, tag=0, prof=False, log_name='send', debug=get_caller_func()):
global cdb
return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag)
@timed_op
def recv(tensor, src=None, group=None, tag=0, prof=False, log_name='recv', debug=get_caller_func()):
global cdb
return cdb.recv(tensor=tensor, src=src, group=group, tag=tag)
@timed_op
def isend(tensor, dst, group=None, tag=0, prof=False, log_name='isend', debug=get_caller_func()):
global cdb
return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag)
@timed_op
def irecv(tensor, src=None, group=None, tag=0, prof=False, log_name='irecv', debug=get_caller_func()):
global cdb
return cdb.recv(tensor=tensor, src=src, group=group, tag=tag)
@timed_op
def gather(tensor,
gather_list=None,
dst=0,
group=None,
async_op=False,
prof=False,
log_name='gather',
debug=get_caller_func()):
global cdb
return cdb.gather(tensor=tensor, gather_list=gather_list, dst=dst, group=group, async_op=async_op)
@timed_op
def scatter(tensor,
scatter_list=None,
src=0,
group=None,
async_op=False,
prof=False,
log_name='scatter',
debug=get_caller_func()):
global cdb
return cdb.scatter(tensor=tensor, scatter_list=scatter_list, src=src, group=group, async_op=async_op)
@timed_op
def barrier(group=None, async_op=False, device_ids=None, prof=False, log_name='barrier', debug=get_caller_func()):
global cdb
return cdb.barrier(group=group, async_op=async_op)
@timed_op
def monitored_barrier(group=None,
timeout=None,
wait_all_ranks=False,
prof=False,
log_name='monitored_barrier',
debug=get_caller_func()):
global cdb
return cdb.barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks)
def log_summary(show_straggler=False):
global cdb
barrier(log_name='log_summary_barrier')
if cdb.get_rank() == 0:
comms_logger.log_all(print_log=True, show_straggler=show_straggler)
else:
comms_logger.log_all(print_log=False, show_straggler=show_straggler)
barrier(log_name='log_summary_barrier')
@timed_op
def reduce(tensor,
dst,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='reduce',
debug=get_caller_func()):
global cdb
return cdb.reduce(tensor=tensor, dst=dst, op=op, group=group, async_op=async_op)
@timed_op
def reduce_scatter(output,
input_list,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='reduce_scatter',
debug=get_caller_func()):
global cdb
return cdb.reduce_scatter(output=output, input_list=input_list, op=op, group=group, async_op=async_op)
def has_all_reduce_coalesced():
""""""
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
assert cdb.has_all_reduce_coalesced is not None, 'has_all_reduce_coalesced is not yet defined'
return cdb.has_all_reduce_coalesced
def has_coalescing_manager():
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
assert cdb.has_coalescing_manager is not None, 'has_coalescing_manager is not yet defined'
return cdb.has_coalescing_manager
def all_gather_coalesced(output_tensors, input_tensors, group=None, async_op=False):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.all_gather_coalesced(output_tensors, input_tensors, group=group, async_op=async_op)
@timed_op
def all_reduce(tensor,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='all_reduce',
debug=get_caller_func()):
#if profile_comm:
# context of the timers?
# timers.start()
# TensorBoard logging for comm calls.?
global cdb
#print(f'op = {op}, cdb= {cdb.name}')
return cdb.all_reduce(tensor, op, group, async_op)
@timed_op
def all_reduce_coalesced(tensors,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='all_reduce',
debug=get_caller_func()):
global cdb
return cdb.all_reduce_coalesced(tensors, op, group, async_op)
def get_world_group():
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.get_world_group()
def get_world_size(group=None) -> int:
"""
Returns the number of processes in the current process group
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
Returns:
The world size of the process group
-1, if not part of the group
"""
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.get_world_size(group)
def get_rank(group=None):
"""
Returns the rank of the current process in the provided ``group`` or the
default group if none was provided.
Rank is a unique identifier assigned to each process within a distributed
process group. They are always consecutive integers ranging from 0 to
``world_size``.
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
Returns:
The rank of the process group
-1, if not part of the group
"""
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.get_rank(group)
def get_local_rank():
"""
Helper function to get local rank after a backend has been set and initialized
Args:
None
Returns:
local rank (= GPU device ID)
"""
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return get_local_rank_from_launcher()
def get_global_rank(group=None, group_rank=0):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.get_global_rank(group, group_rank)
def get_all_ranks_from_group(group=None):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
rank = 0
group_ranks = []
try:
while True:
group_ranks.append(cdb.get_global_rank(group, rank))
rank += 1
except RuntimeError:
pass
return group_ranks
# Main DeepSpeed Comms. public API.
def init_distributed(dist_backend=None,
auto_mpi_discovery=True,
distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT,
verbose=True,
timeout=default_pg_timeout,
init_method=None,
dist_init_required=None,
config=None,
rank=-1,
world_size=-1):
''' Initialize dist backend, potentially performing MPI discovery if needed
Arguments:
dist_backend: Optional (str). torch distributed backend, e.g., nccl, mpi, gloo
auto_mpi_discovery Optional (bool). if distributed environment variables are not set, attempt to discover them from MPI
distributed_port: Optional (int). torch distributed backend port
verbose: Optional (bool). verbose logging
timeout: Optional (timedelta). Timeout for operations executed against the process group. Default value equals 30 minutes.
init_method: Optional (string). Torch distributed, URL specifying how to initialize the process group. Default is “env://” if no init_method or store is specified.
config: Optional (dict). DeepSpeed configuration for setting up comms options (e.g. Comms profiling)
rank: Optional (int). The current manually specified rank. Some init_method like “tcp://” need the rank and world_size as well (see: https://pytorch.org/docs/stable/distributed.html#tcp-initialization)
world_size: Optional (int). Desired world_size for the TCP or Shared file-system initialization.
'''
global cdb
configure(deepspeed_config=config)
if dist_init_required is None:
dist_init_required = cdb is None or not cdb.is_initialized()
if cdb is None:
if torch.distributed.is_initialized():
# The user initialized torch.dist themselves, create cdb and short-circuit
cdb = TorchBackend(dist_backend, timeout, init_method)
return
else:
init_deepspeed_backend(get_accelerator().communication_backend_name(), timeout, init_method)
set_backend()
utils.logger.info(f'cdb={cdb}')
if dist_init_required is False:
assert (
cdb is not None and cdb.is_initialized() is True
), "Distributed backend is not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()"
else:
# Initialize torch distributed if needed
required_env = ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
if auto_mpi_discovery and not all(map(lambda v: v in os.environ, required_env)):
if verbose:
utils.logger.info("Not using the DeepSpeed or dist launchers, attempting to detect MPI environment...")
if in_aml() and not in_dlts():
patch_aml_env_for_torch_nccl_backend(verbose=verbose)
elif in_aws_sm():
patch_aws_sm_env_for_torch_nccl_backend(verbose=verbose)
else:
mpi_discovery(distributed_port=distributed_port, verbose=verbose)
if cdb is not None and cdb.is_initialized():
if int(os.getenv('RANK', '0')) == 0:
utils.logger.info('Distributed backend already initialized')
else:
assert isinstance(timeout, timedelta)
if dist_backend is None:
dist_backend = get_accelerator().communication_backend_name()
if int(os.getenv('RANK', '0')) == 0:
utils.logger.info('Initializing TorchBackend in DeepSpeed with backend {}'.format(dist_backend))
# Create a torch backend object, initialize torch distributed, and assign to cdb
cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size)
def mpi_discovery(distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT, verbose=True):
'''
Discovery MPI environment via mpi4py and map to relevant dist state
'''
from mpi4py import MPI
import subprocess
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
world_size = comm.Get_size()
master_addr = None
if rank == 0:
hostname_cmd = ["hostname -I"]
result = subprocess.check_output(hostname_cmd, shell=True)
master_addr = result.decode('utf-8').split()[0]
master_addr = comm.bcast(master_addr, root=0)
# Determine local rank by assuming hostnames are unique
proc_name = MPI.Get_processor_name()
all_procs = comm.allgather(proc_name)
local_rank = sum([i == proc_name for i in all_procs[:rank]])
os.environ['RANK'] = str(rank)
os.environ['WORLD_SIZE'] = str(world_size)
os.environ['LOCAL_RANK'] = str(local_rank)
os.environ['MASTER_ADDR'] = master_addr
os.environ['MASTER_PORT'] = str(distributed_port)
if verbose:
utils.logger.info(
"Discovered MPI settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}".
format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'],
os.environ['MASTER_PORT']))
if cdb is not None and cdb.is_initialized():
assert cdb.get_rank() == rank, "MPI rank {} does not match torch rank {}".format(rank, cdb.get_rank())
assert cdb.get_world_size() == world_size, "MPI world size {} does not match torch world size {}".format(
world_size, cdb.get_world_size())
def in_aml():
# Are we running inside an Azure Machine Learning (AML) environment?
return 'AZUREML_EXPERIMENT_ID' in os.environ
def in_aws_sm():
# Are we running inside an AWS SageMaker environment?
return 'SM_TRAINING_ENV' in os.environ
def in_dlts():
# Are we running on a DLTS cluster?
return 'DLTS_JOB_ID' in os.environ
def patch_aml_env_for_torch_nccl_backend(master_port=6105, verbose=True):
"""Helper routine to get and set environment variables.
This is adapted from Azure ML's documentation available from:
https://azure.github.io/azureml-web/docs/cheatsheet/distributed-training/#environment-variables-from-openmpi
"""
os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
single_node = int(os.environ["OMPI_COMM_WORLD_LOCAL_SIZE"]) == int(os.environ["WORLD_SIZE"])
if not single_node:
master_node_params = os.environ["AZ_BATCH_MASTER_NODE"].split(":")
os.environ["MASTER_ADDR"] = master_node_params[0]
# Do not overwrite master port with that defined in AZ_BATCH_MASTER_NODE
if "MASTER_PORT" not in os.environ:
os.environ["MASTER_PORT"] = str(master_port)
else:
os.environ["MASTER_ADDR"] = os.environ["AZ_BATCHAI_MPI_MASTER_NODE"]
os.environ["MASTER_PORT"] = DEFAULT_AML_MASTER_PORT
if verbose:
utils.logger.info("NCCL_SOCKET_IFNAME original value = {}".format(os.environ["NCCL_SOCKET_IFNAME"]))
os.environ["NCCL_SOCKET_IFNAME"] = DEFAULT_AML_NCCL_SOCKET_IFNAME
os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]
if verbose:
utils.logger.info(
"Discovered AzureML settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}"
.format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'],
os.environ['MASTER_PORT']))
def patch_aws_sm_env_for_torch_nccl_backend(verbose=True):
"""Helper routine to get and set environment variables when running inside an AWS SageMaker environment.
"""
os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]
os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
if verbose:
utils.logger.info(
"Discovered AWS SageMaker settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}"
.format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'],
os.environ['MASTER_PORT']))
| 28,373 | 36.983936 | 209 | py |
DeepSpeed | DeepSpeed-master/deepspeed/comm/utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import inspect
from deepspeed.utils import get_caller_func
def get_local_rank_from_launcher():
# DeepSpeed launcher will set it so get from there
rank = os.environ.get('LOCAL_RANK')
if rank is None:
rank = os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK')
# Make it a single process job and set rank to 0
if rank is None:
rank = 0
return int(rank)
def get_world_rank_from_launcher():
# DeepSpeed launcher will set it so get from there
rank = os.environ.get('RANK')
if rank is None:
rank = os.environ.get('OMPI_COMM_WORLD_RANK')
# Make it a single process job and set rank to 0
if rank is None:
rank = 0
return int(rank)
def get_world_size_from_launcher():
# DeepSpeed launcher will set it so get from there
size = os.environ.get('WORLD_SIZE')
rank = os.environ.get('RANK')
if size is None:
size = os.environ.get('OMPI_COMM_WORLD_SIZE')
# Make it a single process job and set size to 1
if size is None:
size = 1
if rank == 0:
print(f"set world size to {size}")
return int(size)
def get_default_args(func):
signature = inspect.signature(func)
return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
# We need this hacky function since torch doesn't consistently name or place the input tensor args
def get_tensor_position(func):
sig_params = inspect.signature(func).parameters
arg = None
# most colls
if 'tensor' in sig_params:
arg = 'tensor'
# all_reduce_coalesced coll
elif 'tensors' in sig_params:
arg = 'tensors'
# reduce scatter coll
elif 'input_list' in sig_params:
arg = 'input_list'
# all_to_all and torch multiGPU colls
elif 'input_tensor_list' in sig_params:
arg = 'input_tensor_list'
if arg is None:
return -1
else:
return list(sig_params).index(arg)
def get_tensor_kwarg(func, kwargs):
func_args = get_default_args(func)
func_args.update(kwargs)
arg = None
if 'tensor' in func_args:
arg = func_args['tensor']
elif 'tensors' in func_args:
arg = func_args['tensors']
elif 'input_list' in func_args:
arg = func_args['input_list']
elif 'input_tensor_list' in func_args:
arg = func_args['input_tensor_list']
return arg
def get_msg_size_from_args(func, *args, **kwargs):
# 3 cases:
# - tensor arg is in args
# - tensor arg is in kwargs
# - tensor arg is not present (e.g. barrier)
tensor_arg_position = -1
tensor_arg = None
# check if tensor arg is in args
if len(args) > 0:
tensor_arg_position = get_tensor_position(func)
if tensor_arg_position > -1:
tensor_arg = args[get_tensor_position(func)]
# check if tensor arg is in kwargs
if tensor_arg is None and len(kwargs) > 0:
tensor_arg = get_tensor_kwarg(func, kwargs)
# if tensor arg is not present, no data is being transmitted
if tensor_arg is None:
return 0
else:
# Sum of tensor sizes for list colls such as torch's all_to_all
# NOTE: msg_size for list colls will not be the actual size transmitted by a given MPI/NCCL call within the coll op. Instead, it's the total amount of data transmitted.
if type(tensor_arg) is list:
return sum(x.element_size() * x.nelement() for x in tensor_arg)
else:
return tensor_arg.element_size() * tensor_arg.nelement()
def get_debug_log_name(func_args, debug):
if debug:
return func_args['log_name'] + ' | [Caller Func: ' + get_caller_func() + ']'
else:
return func_args['log_name']
| 3,842 | 27.679104 | 176 | py |
DeepSpeed | DeepSpeed-master/deepspeed/comm/backend.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
DeepSpeed Communication Backend.
In the future, directly use NCCL/MPI/Gloo/etc without requiring torch.distributed. Simply wrap torch.distributed for now.
# Custom DS Backends -- Direct C/Ops
- NCCL -- [EXPERIMENTAL]
- MPI -- [EXPERIMENTAL]
- RCCL -- [EXPERIMENTAL]
- GLOO -- [EXPERIMENTAL]
# DS backend wrapper for torch.distributed [DEFAULT]
- T-NCCL -- [DEFAULT]
- T-GLOO
- T-MPI
# Backend is the base class
-- NcclBackend, MpiBackend, and TorchBackend are the main subclasses. TorchBackend is the only officially supported backend for now.
"""
class Backend(object):
def __init__(self, name='backend', rank=0, size=1):
self.name = name
# The world size and rank of the world process group
self.world_group = None
self.world_size = size
self.world_rank = rank
# Single process group (pg) implementation for now but keep a list for future
self.process_groups = []
self.initialized = False
def is_initialized(self):
return self.initialized
def new_group(self):
# create a new pg and add it to pg list
pass
def init_process_group(self):
# subclasses will initialize them fully
# - initialize a default world process group and add it to pg list
self.initialized = True
| 1,416 | 28.520833 | 137 | py |
DeepSpeed | DeepSpeed-master/deepspeed/monitor/tensorboard.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .utils import check_tb_availability
from .monitor import Monitor
import os
import deepspeed.comm as dist
class TensorBoardMonitor(Monitor):
def __init__(self, tensorboard_config):
super().__init__(tensorboard_config)
check_tb_availability()
self.summary_writer = None
self.enabled = tensorboard_config.enabled
self.output_path = tensorboard_config.output_path
self.job_name = tensorboard_config.job_name
if self.enabled and dist.get_rank() == 0:
self.get_summary_writer()
def get_summary_writer(self, base=os.path.join(os.path.expanduser("~"), "tensorboard")):
if self.enabled and dist.get_rank() == 0:
from torch.utils.tensorboard import SummaryWriter
if self.output_path is not None:
log_dir = os.path.join(self.output_path, self.job_name)
# NOTE: This code path currently is never used since the default output_path is an empty string and not None. Saving it in case we want this functionality in the future.
else:
if "DLWS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLWS_JOB_ID"]
elif "DLTS_JOB_ID" in os.environ:
infra_job_id = os.environ["DLTS_JOB_ID"]
else:
infra_job_id = "unknown-job-id"
summary_writer_dir_name = os.path.join(infra_job_id, "logs")
log_dir = os.path.join(base, summary_writer_dir_name, self.output_path)
os.makedirs(log_dir, exist_ok=True)
self.summary_writer = SummaryWriter(log_dir=log_dir)
return self.summary_writer
def write_events(self, event_list, flush=True):
if self.enabled and self.summary_writer is not None and dist.get_rank() == 0:
for event in event_list:
self.summary_writer.add_scalar(*event)
if flush:
self.summary_writer.flush()
def flush(self):
if self.enabled and self.summary_writer is not None and dist.get_rank() == 0:
self.summary_writer.flush()
| 2,227 | 38.087719 | 181 | py |
DeepSpeed | DeepSpeed-master/deepspeed/monitor/utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
def check_tb_availability():
try:
# torch.utils.tensorboard will fail if `tensorboard` is not available,
# see their docs for more details: https://pytorch.org/docs/1.8.0/tensorboard.html
import tensorboard # noqa: F401
except ImportError:
print('If you want to use tensorboard logging, please `pip install tensorboard`')
raise
def check_wandb_availability():
try:
import wandb # noqa: F401
except ImportError:
print(
'If you want to use wandb logging, please `pip install wandb` and follow the instructions at https://docs.wandb.ai/quickstart'
)
raise
| 754 | 29.2 | 138 | py |
DeepSpeed | DeepSpeed-master/deepspeed/elasticity/elastic_agent.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent
from typing import Any, Dict, Optional, Tuple
from datetime import datetime
from torch.distributed.elastic.agent.server.api import log, _get_socket_with_port
from torch.distributed.elastic.metrics import put_metric
from torch.distributed.elastic.agent.server.api import (
RunResult,
WorkerGroup,
WorkerSpec,
WorkerState,
)
from torch.distributed import Store
import time
import os
from torch.distributed.elastic.multiprocessing import start_processes
from torch.distributed.elastic.utils import macros
import shutil
import copy
from contextlib import closing
import subprocess
class DSElasticAgent(LocalElasticAgent):
def __init__(
self,
spec: WorkerSpec,
env: Dict,
start_method="spawn",
exit_barrier_timeout: float = 300,
log_dir: Optional[str] = None,
):
super().__init__(spec, start_method, exit_barrier_timeout, log_dir)
self.ds_env = env
@staticmethod
def _set_master_addr_port(store: Store, master_addr: Optional[str], master_port: Optional[int]):
if master_port is None:
sock = _get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
if master_addr is None:
# master_addr = _get_fq_hostname()
result = subprocess.check_output("hostname -I", shell=True)
master_addr = result.decode('utf-8').split()[0]
store.set("MASTER_ADDR", master_addr.encode(encoding="UTF-8"))
store.set("MASTER_PORT", str(master_port).encode(encoding="UTF-8"))
def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:
spec = worker_group.spec
store = worker_group.store
assert store is not None
master_addr, master_port = super()._get_master_addr_port(store)
restart_count = spec.max_restarts - self._remaining_restarts
use_agent_store = spec.rdzv_handler.get_backend() == "static"
args: Dict[int, Tuple] = {}
envs: Dict[int, Dict[str, str]] = {}
for worker in worker_group.workers:
local_rank = worker.local_rank
worker_env_ds = copy.deepcopy(self.ds_env)
worker_env_elastic = {
"LOCAL_RANK": str(local_rank),
"RANK": str(worker.global_rank),
"GROUP_RANK": str(worker_group.group_rank),
"ROLE_RANK": str(worker.role_rank),
"ROLE_NAME": spec.role,
"LOCAL_WORLD_SIZE": str(spec.local_world_size),
"WORLD_SIZE": str(worker.world_size),
"GROUP_WORLD_SIZE": str(worker_group.group_world_size),
"ROLE_WORLD_SIZE": str(worker.role_world_size),
"MASTER_ADDR": master_addr,
"MASTER_PORT": str(master_port),
"TORCHELASTIC_RESTART_COUNT": str(restart_count),
"TORCHELASTIC_MAX_RESTARTS": str(spec.max_restarts),
"TORCHELASTIC_RUN_ID": spec.rdzv_handler.get_run_id(),
"TORCHELASTIC_USE_AGENT_STORE": str(use_agent_store),
"NCCL_ASYNC_ERROR_HANDLING": os.getenv("NCCL_ASYNC_ERROR_HANDLING", str(1)),
}
worker_env_ds.update(worker_env_elastic)
if "OMP_NUM_THREADS" in os.environ:
worker_env_ds["OMP_NUM_THREADS"] = os.environ["OMP_NUM_THREADS"]
envs[local_rank] = worker_env_ds
worker_args = list(spec.args)
worker_args = macros.substitute(worker_args, str(local_rank))
args[local_rank] = tuple(worker_args)
# scaling events do not count towards restarts (gets same attempt #)
# remove existing log dir if this restart is due to a scaling event
attempt_log_dir = os.path.join(self._log_dir, f"attempt_{restart_count}")
shutil.rmtree(attempt_log_dir, ignore_errors=True)
os.makedirs(attempt_log_dir)
assert spec.entrypoint is not None
self._pcontext = start_processes(
name=spec.role,
entrypoint=spec.entrypoint,
args=args,
envs=envs,
log_dir=attempt_log_dir,
start_method=self._start_method,
redirects=spec.redirects,
tee=spec.tee,
)
return self._pcontext.pids()
def _invoke_run(self, role: str = "default") -> RunResult:
# NOTE: currently only works for a single role
spec = self._worker_group.spec
role = spec.role
log.info(f"[{role}] starting workers for entrypoint: {spec.get_entrypoint_name()}")
self._initialize_workers(self._worker_group)
monitor_interval = spec.monitor_interval
rdzv_handler = spec.rdzv_handler
participants = rdzv_handler._state_holder.state.participants
while True:
assert self._worker_group.state != WorkerState.INIT
time.sleep(monitor_interval)
run_result = self._monitor_workers(self._worker_group)
state = run_result.state
self._worker_group.state = state
expire_time = datetime.utcnow() - (rdzv_handler._settings.keep_alive_interval *
rdzv_handler._settings.keep_alive_max_attempt)
_dead_nodes = [
node for node, last_heartbeat in rdzv_handler._state_holder.state.last_heartbeats.items()
if last_heartbeat < expire_time
]
put_metric(f"workers.{role}.remaining_restarts", self._remaining_restarts)
put_metric(f"workers.{role}.{state.name.lower()}", 1)
if state == WorkerState.SUCCEEDED:
log.info(f"[{role}] worker group successfully finished."
f" Waiting {self._exit_barrier_timeout} seconds for other agents to finish.")
self._exit_barrier()
return run_result
elif state in {WorkerState.UNHEALTHY, WorkerState.FAILED
} or len(participants) > len(rdzv_handler._state_holder.state.participants):
if self._remaining_restarts > 0:
log.info(f"[{role}] Worker group {state.name}. "
f"{self._remaining_restarts}/{spec.max_restarts} attempts left;"
f" will restart worker group")
self._remaining_restarts -= 1
# rdzv_handler._state_holder.state.restart = False
self._restart_workers(self._worker_group)
participants = rdzv_handler._state_holder.state.participants
else:
self._stop_workers(self._worker_group)
self._worker_group.state = WorkerState.FAILED
self._exit_barrier()
return run_result
elif state == WorkerState.HEALTHY:
# membership changes do not count as retries
num_nodes_waiting = rdzv_handler.num_nodes_waiting()
group_rank = self._worker_group.group_rank
if num_nodes_waiting > 0:
log.info(f"[{role}] Detected {num_nodes_waiting} "
f"new nodes from group_rank={group_rank}; "
f"will restart worker group")
self._restart_workers(self._worker_group)
participants = rdzv_handler._state_holder.state.participants
else:
raise Exception(f"[{role}] Worker group in {state.name} state")
| 7,762 | 41.653846 | 105 | py |
DeepSpeed | DeepSpeed-master/deepspeed/elasticity/utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
def is_torch_elastic_compatible():
'''
Helper to lookup torch version. Elastic training is
introduced in 1.11.x
'''
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 1 and TORCH_MINOR >= 11:
return True
else:
return False
| 459 | 22 | 59 | py |
DeepSpeed | DeepSpeed-master/deepspeed/elasticity/__init__.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .elasticity import compute_elastic_config, elasticity_enabled, ensure_immutable_elastic_config
from .utils import is_torch_elastic_compatible
from .constants import ENABLED, ENABLED_DEFAULT, ELASTICITY
if is_torch_elastic_compatible():
from .elastic_agent import DSElasticAgent
| 383 | 33.909091 | 99 | py |
DeepSpeed | DeepSpeed-master/deepspeed/utils/comms_logging.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
from deepspeed.utils import log_dist
def get_caller_func(frame=3):
import sys
return sys._getframe(frame).f_code.co_name
def print_rank_0(message):
import deepspeed.comm as dist
if dist.get_rank() == 0:
print(message)
# Helper function to pretty-print message sizes
def convert_size(size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
# Helper function to calculate algbw and busbw.
# See https://gist.github.com/jeffra/b5e80466b4c86be00ea3b6f130fb7a36 and https://github.com/NVIDIA/nccl-tests/blob/master/doc/PERFORMANCE.md
def calc_bw_log(comm_op, size, duration):
import deepspeed.comm as dist
n = dist.get_world_size()
tput = 0
busbw = 0
if comm_op == "all_to_all_single":
tput = (size / duration)
busbw = (size / duration) * ((n - 1) / n)
elif comm_op == "all_gather" or comm_op == "all_gather_into_tensor" or comm_op == "reduce_scatter" or comm_op == "reduce_scatter_tensor":
size *= n
tput = (size / duration)
busbw = (size / duration) * ((n - 1) / n)
elif comm_op == "all_reduce" or comm_op == "all_reduce_coalesced":
tput = (size * 2 / duration)
busbw = (size / duration) * (2 * (n - 1) / n)
elif comm_op == "send" or comm_op == "recv" or comm_op == "isend" or comm_op == "irecv" or comm_op == "broadcast" or comm_op == "reduce" or comm_op == "gather" or comm_op == "scatter" or comm_op == "barrier":
tput = (size / duration)
busbw = tput
else:
print_rank_0("wrong comm_op specified") # noqa: F821
exit(0)
# convert to Gbps
tput *= 8
busbw *= 8
tput /= 1e6
busbw /= 1e6
return tput, busbw
class CommsLogger:
def __init__(self):
from deepspeed.comm.constants import COMMS_LOGGER_VERBOSE_DEFAULT, COMMS_LOGGER_DEBUG_DEFAULT, COMMS_LOGGER_PROF_OPS_DEFAULT, COMMS_LOGGER_PROF_ALL_DEFAULT, COMMS_LOGGER_ENABLED_DEFAULT
self.comms_dict = {}
self.verbose = COMMS_LOGGER_VERBOSE_DEFAULT
self.debug = COMMS_LOGGER_DEBUG_DEFAULT
self.prof_ops = COMMS_LOGGER_PROF_OPS_DEFAULT
self.prof_all = COMMS_LOGGER_PROF_ALL_DEFAULT
self.enabled = COMMS_LOGGER_ENABLED_DEFAULT
def configure(self, comms_config):
self.enabled = comms_config.comms_logger_enabled
if self.enabled:
self.verbose = comms_config.comms_logger.verbose
self.debug = comms_config.comms_logger.debug
self.prof_ops = comms_config.comms_logger.prof_ops
self.prof_all = comms_config.comms_logger.prof_all
# There are three settings for the op profiler:
# - Global profiling (profile all comms)
# - Op-type profiling (e.g. profile all all_reduce comms)
# - Op profiling (e.g. profile a specific all_reduce op)
def start_profiling_comms(self):
self.prof_all = True
def stop_profiling_comms(self):
self.prof_all = True
# E.g. start_profiling_op('all_reduce')
def start_profiling_op(self, op_name_list):
self.prof_ops = list(set(self.prof_ops) | set(op_name_list))
def stop_profiling_op(self, op_name_list):
self.prof_ops = [op for op in self.prof_ops if op not in op_name_list]
# Add log entry
def append(self, raw_name, record_name, latency, msg_size):
import deepspeed.comm as dist
algbw, busbw = calc_bw_log(raw_name, msg_size, latency)
if record_name in self.comms_dict.keys():
# If this comm_op has already been logged with this message size, just add to existing record
if msg_size in self.comms_dict[record_name].keys():
self.comms_dict[record_name][msg_size][0] += 1
self.comms_dict[record_name][msg_size][1].append(latency)
self.comms_dict[record_name][msg_size][2].append(algbw)
self.comms_dict[record_name][msg_size][3].append(busbw)
# If this is a new message size for this comm_op, add new record under existing comm_op
else:
self.comms_dict[record_name][msg_size] = [1, [latency], [algbw], [busbw]]
else:
# Create entirely new record
self.comms_dict[record_name] = {msg_size: [1, [latency], [algbw], [busbw]]}
# If verbose, print every comm op
# TODO: Add to tensorboard
if self.verbose:
n = dist.get_world_size()
log_str = f"rank={dist.get_rank()} | comm op: " + record_name + " | time (ms): {:.2f}".format(latency)
log_str += " | msg size: " + convert_size(msg_size)
log_str += " | algbw (Gbps): {:.2f} ".format(algbw)
log_str += " | busbw (Gbps): {:.2f} ".format(busbw)
log_dist(log_str, [0])
# Print summary at end of iteration, epoch, or training
def log_all(self, print_log=True, show_straggler=False):
import torch
from deepspeed.utils.timer import trim_mean
import deepspeed.comm as dist
from deepspeed.comm.reduce_op import ReduceOp
if print_log:
print(
f"{'Comm. Op': <20}{'Message Size': <20}{'Count': <20}{'Total Latency(ms)': <20}{'Avg Latency(ms)': <20}{'tput_avg (Gbps)': <20}{'busbw_avg (Gbps)': <20}"
)
for record_name in self.comms_dict.keys():
if print_log:
print(record_name)
for msg_size, vals in sorted(self.comms_dict[record_name].items()):
# vals[0] is the count for each msg size
count = vals[0]
# vals[1] is a list of latency records for each msg size
total_lat = sum(vals[1])
# vals[2] and vals[3] are the lists of algbw and busbw, respectively
# Get rid of outliers when we print
avg_lat = trim_mean(vals[1], 0.1)
avg_algbw = trim_mean(vals[2], 0.1)
avg_busbw = trim_mean(vals[3], 0.1)
if print_log:
print(
f"{' ': <20}{convert_size(msg_size): <20}{count: <20}{total_lat: <20.2f}{avg_lat: <20.2f}{avg_algbw: <20.2f}{avg_busbw: <20.2f}"
)
if show_straggler:
if print_log:
print("_______________________________")
print("Breakdown with straggler effect")
print("-------------------------------")
print(
f"{'Comm. Op': <20}{'Message Size': <20}{'Count': <20}{'Total comm lat(ms)': <20}{'Total straggler(ms)': <20}{'Avg comm lat(ms)': <20}{'Avg straggler(ms)': <20}"
)
for record_name in self.comms_dict.keys():
if print_log:
print(record_name)
for msg_size, vals in sorted(self.comms_dict[record_name].items()):
# vals[0] is the count for each msg size
count = vals[0]
# vals[1] is a list of latency records for each msg size
lats = torch.tensor(vals[1])
min_lats = torch.tensor(vals[1])
dist.all_reduce(min_lats, op=ReduceOp.MIN)
total_lat = min_lats.sum().item()
total_straggler = (lats - min_lats).sum().item()
avg_lat = trim_mean(min_lats.tolist(), 0.1)
avg_straggler = trim_mean((lats - min_lats).tolist(), 0.1)
if print_log:
print(
f"{' ': <20}{convert_size(msg_size): <20}{count: <20}{total_lat: <20.2f}{total_straggler: <20.2f}{avg_lat: <20.2f}{avg_straggler: <20.2f}"
)
| 8,024 | 42.61413 | 212 | py |
DeepSpeed | DeepSpeed-master/deepspeed/utils/groups.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# The file has been adapted from https://github.com/NVIDIA/Megatron-LM and retains the following license from the original file
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Support different forms of parallelism in DeepSpeed using multiple process groups.
Given that there are multiple scenarios and use-cases, this file is going to be updated
frequently. For now, the group creation needed for the training scenario is being implemented.
For inference and other new scenarios, the code will be either reused or added to this file.
"""
from deepspeed import comm as dist
from deepspeed.utils import log_dist
from deepspeed.utils.exceptions import DeprecatedException
from deepspeed.accelerator import get_accelerator
# Expert parallel group that the current rank belongs to.
_EXPERT_PARALLEL_GROUP = {}
# Expert data parallel group that the current rank belongs to.
_EXPERT_DATA_PARALLEL_GROUP = {}
# dist world group needs to be cloned for some cases
_WORLD_GROUP = None
# ZeRO parameter partitioning group that the current rank belongs to.
_ZERO_PARAM_INTRA_PARALLEL_GROUP = None
# global object to maintain mpu object if passed by a Megatron client
mpu = None
# global object that stores tensor parallel world size for experts
expert_tensor_parallel_world_size = 1
# All to All quantized graident communication groups
_ALL_TO_ALL_GROUP = {}
_DATA_PARALLEL_GROUP = None
# Deprecated groups initialize function.
def initialize(ep_size=1, mpu=None):
""" Deprecated function. Retained to inform the users."""
raise DeprecatedException(
"Please do not use the groups.initialize() API as it is deprecated. Instead, pass the desired ep_size to deepspeed.moe.layer.MoE(..,ep_size,..)"
)
def _ensure_divisibility(numerator, denominator):
"""Ensure that numerator is divisible by the denominator."""
assert numerator % denominator == 0, '{} is not divisible by {}'.format(numerator, denominator)
# Not currently used. Helper function to create a model (tensor) parallel group.
def _create_model_parallel(model_parallel_size_):
"""
Initialize model data parallel groups.
Arguments:
model_parallel_size: number of GPUs used to parallelize model.
Returns:
Tuple of data parallel group and model parallel group
Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
use 2 GPUs to parallelize the model. The present function will
create 4 model parallel groups and 2 data parallel groups as:
4 model parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7]
2 data parallel groups:
[g0, g2, g4, g6], [g1, g3, g5, g7]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
log_dist(f'Creating model parallel group with size {model_parallel_size_}', ranks=[0])
# Get world size and rank. Ensure some consistencies.
assert dist.is_initialized()
world_size = dist.get_world_size()
model_parallel_size = min(model_parallel_size_, world_size)
_ensure_divisibility(world_size, model_parallel_size)
rank = dist.get_rank()
_DATA_PARALLEL_GROUP = None
_MODEL_PARALLEL_GROUP = None
# Build the data parallel groups.
for i in range(model_parallel_size):
ranks = range(i, world_size, model_parallel_size)
group = dist.new_group(ranks)
if i == (rank % model_parallel_size):
_DATA_PARALLEL_GROUP = group
# Build the model parallel groups.
for i in range(world_size // model_parallel_size):
ranks = range(i * model_parallel_size, (i + 1) * model_parallel_size)
group = dist.new_group(ranks)
if i == (rank // model_parallel_size):
_MODEL_PARALLEL_GROUP = group
return _DATA_PARALLEL_GROUP, _MODEL_PARALLEL_GROUP
def _create_expert_and_data_parallel(expert_parallel_size_):
"""
Create expert and data parallel groups.
Note: Caller of this function is responsible to check if the groups already exist.
Example - E + D parallel
world_size = 16
expert_parallel_size = 2 # number of experts in same group
expert_data_parallel_group = [0,2,4,6,8,10,12,14], [1,3,5,7,9,11,13,15] - all reduce is only on MoE params
expert_parallel_group = [0, 1], [2,3], [4,5], [6,7], [8,9] - no all reduce, but all to all
data_parallel_group = [0,1,...,15] - all reduce is only on non-MoE
"""
assert dist.is_initialized()
log_dist(f'Creating expert and data parallel groups with size {expert_parallel_size_}', ranks=[0])
world_size = dist.get_world_size()
rank = dist.get_rank()
_ensure_divisibility(world_size, expert_parallel_size_)
group_name = f"ep_size_{expert_parallel_size_}"
# Build the expert data parallel groups.
global _EXPERT_DATA_PARALLEL_GROUP
# Only create group if it does not already exist
if group_name not in _EXPERT_DATA_PARALLEL_GROUP:
for i in range(expert_parallel_size_):
ranks = range(i, world_size, expert_parallel_size_)
group = dist.new_group(ranks)
log_dist(f'Creating expert data parallel process group named {group_name} with ranks: {list(ranks)}', [0])
if i == (rank % expert_parallel_size_):
_EXPERT_DATA_PARALLEL_GROUP[group_name] = group
# Build the expert parallel groups.
global _EXPERT_PARALLEL_GROUP
# Only create group if it does not already exist
if group_name not in _EXPERT_PARALLEL_GROUP:
for i in range(world_size // expert_parallel_size_):
ranks = range(i * expert_parallel_size_, (i + 1) * expert_parallel_size_)
group = dist.new_group(ranks)
log_dist(f'creating expert parallel process group named {group_name} with ranks: {list(ranks)}', [0])
if i == (rank // expert_parallel_size_):
_EXPERT_PARALLEL_GROUP[group_name] = group
def _get_expert_parallel_ranks(world_size, model_parallel_size_, expert_parallel_size_):
"""Generate expert parallel and expert data parallel group ranks list.
Example - E + M + D parallel
world_size = 16
model_degree = 2
expert_degree = 4 # number of experts in same group
mp_group = [0, 1], [2,3], [4,5] ...
data_parallel_group =[0,2,4,6,8,10, 12,14], [1,3,5,7,9,11,13,15]
expert_parallel_group = [0,2,4,6], [8,10,12,14] [1,3,5,7], [9,11,13,15]
expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15]
Args:
world_size (int): Distributed world size.
model_parallel_size_ (int): Model parallel group size.
expert_parallel_size_ (int): Expert parallel group size.
Returns:
Expert parallel group ranks and Expert data parallel group ranks list.
"""
_ensure_divisibility(world_size, model_parallel_size_)
dp_world_size = world_size // model_parallel_size_
_ensure_divisibility(dp_world_size, expert_parallel_size_)
# Generate data parallel groups
data_parallel_groups = []
dp_group_size = model_parallel_size_
for i in range(dp_group_size):
data_parallel_groups.append(list(range(i, world_size, dp_group_size)))
expert_parallel_groups = []
expert_data_parallel_groups = []
for dp_ranks in data_parallel_groups:
# partition of expert parallel groups, e.g. [0,2,4,6], [8,10,12,14]
part_ep_groups = []
for i in range(0, dp_world_size, expert_parallel_size_):
part_ep_groups.append(dp_ranks[i:i + expert_parallel_size_])
expert_parallel_groups.extend(part_ep_groups)
# zip part_ep_groups get expert data parallel ranks, e.g [0,8],[2,10],[4,12],[6,14]
for expert_dp_ranks in zip(*part_ep_groups):
expert_data_parallel_groups.append(list(expert_dp_ranks))
return expert_parallel_groups, expert_data_parallel_groups
def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu):
"""
Create expert and data parallel groups based on MPU (model parallel) group.
Note: Caller of this function is responsible to check if the groups already exist.
Example - E + M + D parallel
world_size = 16
model_degree = 2
expert_degree = 4 # number of experts in same group
mp_group = [0, 1], [2,3], [4,5] ...
data_parallel_group =[0,2,4,6,8,10, 12,14], [1,3,5,7,9,11,13,15]
expert_parallel_group = [0,2,4,6], [8,10,12,14] [1,3,5,7], [9,11,13,15]
expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15]
"""
assert dist.is_initialized(), "dist is not initialized"
model_parallel_size_ = mpu.get_model_parallel_world_size()
global expert_tensor_parallel_world_size
expert_tensor_parallel_world_size = model_parallel_size_
world_size = dist.get_world_size()
rank = dist.get_rank()
dp_world_size = mpu.get_data_parallel_world_size()
dp_rank = mpu.get_data_parallel_rank()
_ensure_divisibility(world_size, model_parallel_size_)
_ensure_divisibility(dp_world_size, expert_parallel_size_)
log_dist(
f"Creating deepspeed groups with model parallel size {model_parallel_size_}, expert parallel size {expert_parallel_size_}, world size {world_size}, dp world size {dp_world_size}",
[0])
global _EXPERT_PARALLEL_GROUP, _EXPERT_DATA_PARALLEL_GROUP
# Get world size and rank. Ensure some consistencies.
_DATA_PARALLEL_GROUP = mpu.get_data_parallel_group()
_MODEL_PARALLEL_GROUP = mpu.get_model_parallel_group()
group_name = f"ep_size_{expert_parallel_size_}"
# Only create groups if they don't already exist
# Need to check conditions outside the group creation loop because of the way torch.dist group creation works
if group_name not in _EXPERT_DATA_PARALLEL_GROUP and group_name not in _EXPERT_PARALLEL_GROUP:
expert_parallel_groups, expert_data_parallel_groups = _get_expert_parallel_ranks(
world_size, model_parallel_size_, expert_parallel_size_)
for ranks in expert_parallel_groups:
group = dist.new_group(ranks)
if rank in list(ranks):
_EXPERT_PARALLEL_GROUP[group_name] = group
for ranks in expert_data_parallel_groups:
group = dist.new_group(ranks)
if rank in list(ranks):
_EXPERT_DATA_PARALLEL_GROUP[group_name] = group
def _get_max_expert_size():
"""Get the maximum ep_size from all the created groups."""
assert _EXPERT_PARALLEL_GROUP is not None, "Warning! Process group not initialized"
keylist = []
for key in _EXPERT_PARALLEL_GROUP.keys():
# index 2 is ep_size in the group name: ep_size_<ep_size>
index = 2
keylist.append(int(key.split('_')[index]))
return max(keylist) if len(keylist) > 0 else None
def _get_max_expert_size_name():
"""Get the name of the group with max. ep_size"""
return f'ep_size_{_get_max_expert_size()}'
def _get_max_expert_parallel_group():
"""Get the max expert parallel size."""
return _get_expert_parallel_group(_get_max_expert_size_name())
def _get_expert_parallel_group(group_name):
"""Get the expert parallel group the caller rank belongs to."""
assert group_name in _EXPERT_PARALLEL_GROUP, \
'expert parallel group is not initialized'
return _EXPERT_PARALLEL_GROUP[group_name]
def _get_expert_parallel_group_dict():
"""Get the expert parallel group dict."""
return _EXPERT_PARALLEL_GROUP
def _get_expert_data_parallel_group(group_name):
"""Get the expert data parallel group the caller rank belongs to."""
assert group_name in _EXPERT_DATA_PARALLEL_GROUP, \
'expert data parallel group is not initialized'
return _EXPERT_DATA_PARALLEL_GROUP[group_name]
def _get_expert_data_parallel_group_dict():
"""Get the expert data parallel group dict."""
return _EXPERT_DATA_PARALLEL_GROUP
def _clone_world_group():
"""Create a clone of the world group
Note: We need to clone the dist world group because we
use dist.get_global_rank() utility function in DeepSpeed at many places.
As that function does not work on dist.group.WORLD, we
need to keep a clone of it.
"""
assert dist.is_initialized(), "dist is not initialized"
global _WORLD_GROUP
if _WORLD_GROUP is None:
# If not cloned already, clone the world group
_WORLD_GROUP = dist.new_group(ranks=range(dist.get_world_size()))
return _WORLD_GROUP
def _get_local_all_to_all_group():
assert dist.is_initialized(), 'dist is not initialized'
global _ALL_TO_ALL_GROUP
device_per_node = get_accelerator().device_count()
num_local = dist.get_world_size() // device_per_node
if num_local == 0 and dist.get_world_size() > 0:
assert dist.get_world_size() >= 1, 'num_gpus must >=1, cannot initialize All-To-All'
cur_rank = []
for i in range(dist.get_world_size()):
cur_rank.append(i)
_ALL_TO_ALL_GROUP['local_0'] = dist.new_group(ranks=cur_rank)
elif num_local == 1:
assert dist.get_world_size(
) == device_per_node, 'num_gpus not equal to device per node, cannot initialize All-To-All'
_ALL_TO_ALL_GROUP['local_0'] = dist.new_group(ranks=[i for i in range(device_per_node)])
else:
assert dist.get_world_size() > device_per_node, 'num_nodes<2 cannot initialize All-To-All'
for i in range(num_local):
local_rank = [j + device_per_node * i for j in range(device_per_node)]
_ALL_TO_ALL_GROUP[f"local_{i}"] = dist.new_group(ranks=local_rank)
for i in range(device_per_node):
cur_rank = []
for j in range(num_local):
cur_rank.append(i + j * device_per_node)
_ALL_TO_ALL_GROUP[f"global_{i}"] = dist.new_group(ranks=cur_rank)
return _ALL_TO_ALL_GROUP
def _get_data_parallel_group():
"""Get the data parallel group the caller rank belongs to."""
assert dist.is_initialized(), 'dist is not initialized'
global mpu
if mpu is not None:
return mpu.get_data_parallel_group()
# Return the clone of dist world group
return _clone_world_group()
def _get_broadcast_src_rank():
return dist.get_global_rank(_get_data_parallel_group(), 0)
def _get_expert_broadcast_src_rank(group_name):
return dist.get_global_rank(_get_expert_data_parallel_group(group_name), 0)
def _get_expert_parallel_world_size(group_name):
"""Return world size for the expert parallel group."""
return dist.get_world_size(group=_get_expert_parallel_group(group_name))
def _get_expert_data_parallel_world_size(group_name):
"""Return world size for the expert data parallel group."""
return dist.get_world_size(group=_get_expert_data_parallel_group(group_name))
def _get_expert_parallel_rank(group_name):
"""Return my rank for the expert parallel group."""
return dist.get_rank(group=_get_expert_parallel_group(group_name))
def _get_expert_parallel_src_rank(group_name):
"""Calculate the global rank corresponding to a local rank zero
in the expert parallel group."""
global_rank = dist.get_rank()
local_world_size = _get_expert_parallel_world_size(group_name)
return (global_rank // local_world_size) * local_world_size
def _get_expert_data_parallel_rank(group_name):
"""Return my rank for the expert data parallel group."""
return dist.get_rank(group=_get_expert_data_parallel_group(group_name))
def _get_data_parallel_world_size():
"""Return world size for the data parallel group."""
global mpu
if mpu is not None:
return mpu.get_data_parallel_world_size()
return dist.get_world_size(group=_get_data_parallel_group())
def _get_model_parallel_world_size():
"""Return world size for the model parallel group."""
global mpu
if mpu is not None:
return mpu.get_model_parallel_world_size()
return 1
def _get_data_parallel_rank():
"""Return my rank for the data parallel group."""
global mpu
if mpu is not None:
return mpu.get_data_parallel_rank()
return dist.get_rank(group=_get_data_parallel_group())
def _get_expert_model_parallel_world_size():
global expert_tensor_parallel_world_size
return expert_tensor_parallel_world_size
def _create_zero_param_parallel_group(group_size):
"""
Create parameter partitioning group within ZeRO data parallel groups.
Example - ZP + D parallel
world_size = 16
zero_hpz_partition_size = 2 # number of ranks with with replicated params (dual partitioning)
zero_param_intra_parallel_group = [0, 1], [2,3], [4,5], [6,7], [8,9] - segmented (subgroup) with rep partition
data_parallel_group = [0,1,...,15] - all reduce is on ZeRO model
"""
assert dist.is_initialized()
global _ZERO_PARAM_INTRA_PARALLEL_GROUP
# Only create group if it does not already exist
assert _ZERO_PARAM_INTRA_PARALLEL_GROUP is None, \
'ZeRO parameter intra parallel group is already initialized'
world_size = dist.get_world_size()
rank = dist.get_rank()
zero_param_parallel_size_ = min(group_size, world_size)
_ensure_divisibility(world_size, zero_param_parallel_size_)
# Build the ZeRO param intra parallel groups.
for i in range(world_size // zero_param_parallel_size_):
ranks = range(i * zero_param_parallel_size_, (i + 1) * zero_param_parallel_size_)
group = dist.new_group(ranks)
if i == (rank // zero_param_parallel_size_):
_ZERO_PARAM_INTRA_PARALLEL_GROUP = group
def _get_zero_param_intra_parallel_group():
"""Get the ZeRO parameter partitioning intra parallel group the caller rank belongs to."""
#assert _ZERO_PARAM_INTRA_PARALLEL_GROUP is not None, \
# 'ZeRO parameter partitioning group is not initialized'
#TODO: Add warning
return _ZERO_PARAM_INTRA_PARALLEL_GROUP
def _zero_param_parallel_is_initialized():
"""Check if ZeRO data parallel with parameter partititioning groups are initialized."""
###TODO: assert that MPU is not set
if _ZERO_PARAM_INTRA_PARALLEL_GROUP is None and _DATA_PARALLEL_GROUP is None:
return False
def _get_zero_param_intra_parallel_rank_in_mygroup():
"""Return my rank for the ZeRO parameter inter parallel group."""
return dist.get_rank(group=_get_zero_param_intra_parallel_group())
def _get_zero_param_intra_parallel_group_world_size():
"""Return world size for the ZeRO parameter parallel group."""
return dist.get_world_size(group=_get_zero_param_intra_parallel_group())
def _get_zero_param_intra_parallel_group_ranks():
"""Return all ranks for the ZeRO parameter intra parallel group."""
return dist.get_all_ranks_from_group(group=_get_zero_param_intra_parallel_group())
| 19,762 | 39.664609 | 187 | py |
DeepSpeed | DeepSpeed-master/deepspeed/utils/init_on_device.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from typing import Callable
from torch import Tensor
from packaging import version as pkg_version
class OnDevice(object):
"""
Create modules/tensors w. specific devices and dtypes. Examples:
Create MyModule which consists of many different sub-modules and parameters. In this case we can create
MyModule as a collection of 'meta' tensors by passing `device='meta'` or we can create the module _directly_
on a CUDA device by passing `device=f'cuda:{local_rank}'` (where `local_rank` is the local GPU id.
with OnDevice(dtype=torch.float16, device='meta'):
model = MyModel()
with OnDevice(dtype=torch.float16, device=f'cuda:{local_rank}'):
model = MyModel()
"""
_orig_torch_empty = torch.empty
_orig_torch_zeros = torch.zeros
_orig_torch_ones = torch.ones
_orig_torch_full = torch.full
def __init__(self, dtype, device="meta", enabled=True):
self.dtype = dtype
self.enabled = enabled
self.device = device
if device == "meta":
if pkg_version.parse('1.10') > pkg_version.parse(torch.__version__):
raise NotImplementedError("Meta tensor support is not available, please upgrade to torch 1.10+")
def fp_tensor_constructor(self, fn: Callable, target_fp_dtype: torch.dtype) -> Callable:
def wrapped_fn(*args, **kwargs) -> Tensor:
if kwargs.get("device", None) is None:
kwargs['device'] = self.device
tensor: Tensor = fn(*args, **kwargs)
if tensor.is_floating_point():
tensor = tensor.to(target_fp_dtype)
return tensor
return wrapped_fn
def get_new_tensor_fn_for_dtype(self, dtype: torch.dtype) -> Callable:
def new_tensor(cls, *args) -> Tensor:
tensor = OnDevice._orig_torch_empty(0, device=self.device).new_empty(*args)
if tensor.is_floating_point():
tensor = tensor.to(dtype)
return tensor
return new_tensor
def __enter__(self):
if not self.enabled:
return
torch.Tensor.__old_new__ = torch.Tensor.__new__
torch.Tensor.__new__ = self.get_new_tensor_fn_for_dtype(self.dtype)
torch.empty = self.fp_tensor_constructor(self._orig_torch_empty, self.dtype)
torch.zeros = self.fp_tensor_constructor(self._orig_torch_zeros, self.dtype)
torch.ones = self.fp_tensor_constructor(self._orig_torch_ones, self.dtype)
torch.full = self.fp_tensor_constructor(self._orig_torch_full, self.dtype)
def __exit__(self, exc_type, exc_value, traceback):
if not self.enabled:
return
torch.Tensor.__new__ = torch.Tensor.__old_new__
torch.empty = self._orig_torch_empty
torch.zeros = self._orig_torch_zeros
torch.ones = self._orig_torch_ones
torch.full = self._orig_torch_full
| 3,004 | 35.646341 | 112 | py |
DeepSpeed | DeepSpeed-master/deepspeed/utils/tensor_fragment.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from dataclasses import dataclass
from deepspeed import comm as dist
from typing import Dict
@dataclass
class fragment_address:
numel: int
start: int
@dataclass
class tensor_fragment:
lp_fragment: torch.Tensor
lp_fragment_address: fragment_address
hp_fragment: torch.Tensor
hp_fragment_address: fragment_address
optim_fragment: Dict
gradient_dict: Dict
offload_gradient_dict: Dict
use_offload: bool
param_group_index: int
def update_hp(self):
self.hp_fragment.data.copy_(self.lp_fragment.data)
def update_lp(self):
self.lp_fragment.data.copy_(self.hp_fragment.data)
def get_optim_state_fragment(self, key):
if key in self.optim_fragment:
return self.optim_fragment[key]
else:
raise ValueError(f'{key} not found in optimizer state fragment')
def get_hp_fragment_address(self):
return self.hp_fragment_address
def get_optim_state_keys(self):
return list(self.optim_fragment.keys())
def get_full_hp_param(self, optim_state_key=None):
reduce_buffer = torch.zeros_like(self, dtype=torch.float32).flatten()
if self._hp_mapping is not None:
lp_frag_address = self._hp_mapping.lp_fragment_address
reduce_fragment = torch.narrow(reduce_buffer, 0, lp_frag_address.start, lp_frag_address.numel)
if optim_state_key is None:
hp_fragment = self._hp_mapping.hp_fragment
else:
hp_fragment = self._hp_mapping.get_optim_state_fragment(optim_state_key)
reduce_fragment.data.copy_(hp_fragment.data)
dist.all_reduce(reduce_buffer, group=self._dp_group)
return reduce_buffer.reshape_as(self)
def get_full_hp_grad(self):
reduce_buffer = torch.zeros_like(self, dtype=torch.float32).flatten()
if self._hp_mapping is not None:
hp_mapping = self._hp_mapping
if hp_mapping.use_offload:
gradient_dict = hp_mapping.offload_gradient_dict
else:
gradient_dict = hp_mapping.gradient_dict
if hp_mapping.param_group_index not in gradient_dict or gradient_dict[hp_mapping.param_group_index] is None:
raise ValueError("Gradients are only available immediately after backward and before engine step")
lp_grad_fragment = gradient_dict[hp_mapping.param_group_index][self._index_in_param_group]
hp_grad_fragment = lp_grad_fragment.to(torch.float32).flatten()
lp_frag_address = self._hp_mapping.lp_fragment_address
reduce_fragment = torch.narrow(reduce_buffer, 0, lp_frag_address.start, lp_frag_address.numel)
if self.view(-1).shape == hp_grad_fragment.shape:
reduce_buffer.data.copy_(hp_grad_fragment.data)
else:
reduce_fragment.data.copy_(hp_grad_fragment.data)
dist.all_reduce(reduce_buffer, group=self._dp_group)
return reduce_buffer.reshape_as(self)
def safe_get_full_fp32_param(param):
"""Assemble and return the fp32 parameter of a low-precision (e.g., fp16) parameter.
Args:
param (``torch.nn.Parameter``): A model parameter
"""
# ZeRO stage 3 param
if hasattr(param, 'ds_id'):
return param._z3_optimizer.get_full_hp_param(param)
# ZeRO stage 1, 2, and bf16_optimizer params
if hasattr(param, '_hp_mapping'):
return param.get_full_hp_param()
return None
def safe_get_full_optimizer_state(param, optim_state_key):
"""Assemble and return the fp32 optimizer state of a low-precision (e.g., fp16) parameter.
Args:
param (``torch.nn.Parameter``): A model parameter
"""
# ZeRO stage 3 param
if hasattr(param, 'ds_id'):
return param._z3_optimizer.get_full_hp_param(param, optim_state_key)
# ZeRO stage 1, 2, and bf16_optimizer params
if hasattr(param, '_hp_mapping'):
return param.get_full_hp_param(optim_state_key)
return None
# TODO: Figure out the correct return dtype
def safe_get_full_grad(param):
"""Assemble and return the fp32 gradient of a low-precision (e.g., fp16) parameter.
Args:
param (``torch.nn.Parameter``): A model parameter
"""
if param.grad is not None:
return param.grad
# ZeRO stage 3 param
if hasattr(param, 'ds_id'):
return param._z3_optimizer.get_fp32_grad_for_param(param)
# ZeRO stage 1, 2, and bf16_optimizer params
if hasattr(param, '_hp_mapping'):
return param.get_full_hp_grad()
return None
def get_hp_fragment_mapping(lp_param, lp_start, flat_hp_partition, gradient_dict, offload_gradient_dict, use_offload,
param_group_index, partition_start, partition_size, optimizer_state_dict):
lp_end = lp_param.numel() + lp_start
hp_start = partition_start
hp_end = partition_start + partition_size
fragment_start = max(lp_start, hp_start)
fragment_end = min(lp_end, hp_end)
assert fragment_start < fragment_end, \
f'fragment start {fragment_start} should be < fragment_end {fragment_end}'
fragment_numel = fragment_end - fragment_start
hp_frag_address = fragment_address(start=fragment_start - hp_start, numel=fragment_numel)
hp_fragment_tensor = flat_hp_partition.narrow(0, hp_frag_address.start, hp_frag_address.numel)
optim_fragment = {
key: value.narrow(0, hp_frag_address.start, hp_frag_address.numel)
for key, value in optimizer_state_dict.items()
if torch.is_tensor(value) and value.shape == flat_hp_partition.shape
}
lp_frag_address = fragment_address(start=fragment_start - lp_start, numel=fragment_numel)
lp_fragment_tensor = lp_param.flatten().narrow(0, lp_frag_address.start, lp_frag_address.numel)
return tensor_fragment(lp_fragment=lp_fragment_tensor,
lp_fragment_address=lp_frag_address,
hp_fragment=hp_fragment_tensor,
hp_fragment_address=hp_frag_address,
optim_fragment=optim_fragment,
gradient_dict=gradient_dict,
offload_gradient_dict=offload_gradient_dict,
use_offload=use_offload,
param_group_index=param_group_index)
'''
Logic for lp_param to hp_param mapping
lp lp0 lp1 lp2 lp3 lp4 <------- indices/names
lp [ ][ ][ ][ ][ ] <-------- tensors
flat_lp [ ] <-------- flat lp params
flat_hp [ ] <------------------ flat hp partition on current rank
full_hp [ ] <------- full flat hp params
lp2
full numel = 16
lp_frag
numel = 12
frag_start = 3
frag_end = 15
hp_frag
numel = 12
frag_start = 0
frag_end = 11
hp_frag.copy_(lp_frag)
lp3:
full numel = 4
lp_frag
numel = 4
start = 0
end = 3
hp_frag
numel = 4
start = 12
end = 15
lp4:
full numel = 12
lp_frag
numel = 4
start = 0
end = 3
hp_frag
numel = 4
start = 16
end = 19
Visual depiction of above
lp { }
flat_lp [ ]
flat_hp ( )
flat_lp [ { ( } ) ]
lx hx ly hy
ly-hx
lp { }
flat_lp [ ]
flat_hp ( )
flat_lp [ ( { ) } ]
hx lx hy ly
hy-lx
lp { }
flat_lp [ ]
flat_hp ( )
flat_lp [ ( { } ) ]
hx lx ly hy
ly-lx
lp -> (lx, hy)
flat_hp -> (hx, hy)
'''
| 8,027 | 29.876923 | 117 | py |
DeepSpeed | DeepSpeed-master/deepspeed/utils/zero_to_fp32.py | #!/usr/bin/env python
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
# application.
#
# example: python zero_to_fp32.py . pytorch_model.bin
import argparse
import torch
import glob
import math
import os
import re
from collections import OrderedDict
from dataclasses import dataclass
# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
# DeepSpeed data structures it has to be available in the current python environment.
from deepspeed.utils import logger
from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
@dataclass
class zero_model_state:
buffers: dict()
param_shapes: dict()
shared_params: list
ds_version: int
frozen_param_shapes: dict()
frozen_param_fragments: dict()
debug = 0
# load to cpu
device = torch.device('cpu')
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split(r'(\d+)', text)]
def get_model_state_file(checkpoint_dir, zero_stage):
if not os.path.isdir(checkpoint_dir):
raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
# there should be only one file
if zero_stage <= 2:
file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
elif zero_stage == 3:
file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
if not os.path.exists(file):
raise FileNotFoundError(f"can't find model states file at '{file}'")
return file
def get_checkpoint_files(checkpoint_dir, glob_pattern):
# XXX: need to test that this simple glob rule works for multi-node setup too
ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
if len(ckpt_files) == 0:
raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
return ckpt_files
def get_optim_files(checkpoint_dir):
return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
def get_model_state_files(checkpoint_dir):
return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
def parse_model_states(files):
zero_model_states = []
for file in files:
state_dict = torch.load(file, map_location=device)
if BUFFER_NAMES not in state_dict:
raise ValueError(f"{file} is not a model state checkpoint")
buffer_names = state_dict[BUFFER_NAMES]
if debug:
print("Found buffers:", buffer_names)
# recover just the buffers while restoring them to fp32 if they were saved in fp16
buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
param_shapes = state_dict[PARAM_SHAPES]
# collect parameters that are included in param_shapes
param_names = []
for s in param_shapes:
for name in s.keys():
param_names.append(name)
# update with frozen parameters
frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
if frozen_param_shapes is not None:
if debug:
print(f"Found frozen_param_shapes: {frozen_param_shapes}")
param_names += list(frozen_param_shapes.keys())
# handle shared params
shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
ds_version = state_dict.get(DS_VERSION, None)
frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
z_model_state = zero_model_state(buffers=buffers,
param_shapes=param_shapes,
shared_params=shared_params,
ds_version=ds_version,
frozen_param_shapes=frozen_param_shapes,
frozen_param_fragments=frozen_param_fragments)
zero_model_states.append(z_model_state)
return zero_model_states
def parse_optim_states(files, ds_checkpoint_dir):
total_files = len(files)
state_dicts = []
for f in files:
state_dicts.append(torch.load(f, map_location=device))
if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
raise ValueError(f"{files[0]} is not a zero checkpoint")
zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
# For ZeRO-2 each param group can have different partition_count as data parallelism for expert
# parameters can be different from data parallelism for non-expert parameters. So we can just
# use the max of the partition_count to get the dp world_size.
if type(world_size) is list:
world_size = max(world_size)
if world_size != total_files:
raise ValueError(
f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
"Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
)
# the groups are named differently in each stage
if zero_stage <= 2:
fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
elif zero_stage == 3:
fp32_groups_key = FP32_FLAT_GROUPS
else:
raise ValueError(f"unknown zero stage {zero_stage}")
if zero_stage <= 2:
fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
elif zero_stage == 3:
# if there is more than one param group, there will be multiple flattened tensors - one
# flattened tensor per group - for simplicity merge them into a single tensor
#
# XXX: could make the script more memory efficient for when there are multiple groups - it
# will require matching the sub-lists of param_shapes for each param group flattened tensor
fp32_flat_groups = [
torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
]
return zero_stage, world_size, fp32_flat_groups
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
"""
Returns fp32 state_dict reconstructed from ds checkpoint
Args:
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
"""
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
optim_files = get_optim_files(ds_checkpoint_dir)
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
model_files = get_model_state_files(ds_checkpoint_dir)
zero_model_states = parse_model_states(model_files)
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
if zero_stage <= 2:
return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
elif zero_stage == 3:
return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
def _zero2_merge_frozen_params(state_dict, zero_model_states):
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
return
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
frozen_param_fragments = zero_model_states[0].frozen_param_fragments
if debug:
num_elem = sum(s.numel() for s in frozen_param_shapes.values())
print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
wanted_params = len(frozen_param_shapes)
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
print(f'Frozen params: Have {avail_numel} numels to process.')
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
total_params = 0
total_numel = 0
for name, shape in frozen_param_shapes.items():
total_params += 1
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
state_dict[name] = frozen_param_fragments[name]
if debug:
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
param_shapes = zero_model_states[0].param_shapes
# Reconstruction protocol:
#
# XXX: document this
if debug:
for i in range(world_size):
for j in range(len(fp32_flat_groups[0])):
print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
# XXX: memory usage doubles here (zero2)
num_param_groups = len(fp32_flat_groups[0])
merged_single_partition_of_fp32_groups = []
for i in range(num_param_groups):
merged_partitions = [sd[i] for sd in fp32_flat_groups]
full_single_fp32_vector = torch.cat(merged_partitions, 0)
merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
avail_numel = sum(
[full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
if debug:
wanted_params = sum([len(shapes) for shapes in param_shapes])
wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
# not asserting if there is a mismatch due to possible padding
print(f"Have {avail_numel} numels to process.")
print(f"Need {wanted_numel} numels in {wanted_params} params.")
# params
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
# out-of-core computing solution
total_numel = 0
total_params = 0
for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
offset = 0
avail_numel = full_single_fp32_vector.numel()
for name, shape in shapes.items():
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
total_params += 1
if debug:
print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
offset += unpartitioned_numel
# Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
# avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
# paddings performed in the code it's almost impossible to predict the exact numbers w/o the
# live optimizer object, so we are checking that the numbers are within the right range
align_to = 2 * world_size
def zero2_align(x):
return align_to * math.ceil(x / align_to)
if debug:
print(f"original offset={offset}, avail_numel={avail_numel}")
offset = zero2_align(offset)
avail_numel = zero2_align(avail_numel)
if debug:
print(f"aligned offset={offset}, avail_numel={avail_numel}")
# Sanity check
if offset != avail_numel:
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
state_dict = OrderedDict()
# buffers
buffers = zero_model_states[0].buffers
state_dict.update(buffers)
if debug:
print(f"added {len(buffers)} buffers")
_zero2_merge_frozen_params(state_dict, zero_model_states)
_zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
# recover shared parameters
for pair in zero_model_states[0].shared_params:
if pair[1] in state_dict:
state_dict[pair[0]] = state_dict[pair[1]]
return state_dict
def zero3_partitioned_param_info(unpartitioned_numel, world_size):
remainder = unpartitioned_numel % world_size
padding_numel = (world_size - remainder) if remainder else 0
partitioned_numel = math.ceil(unpartitioned_numel / world_size)
return partitioned_numel, padding_numel
def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
return
if debug:
for i in range(world_size):
num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
frozen_param_shapes = zero_model_states[0].frozen_param_shapes
wanted_params = len(frozen_param_shapes)
wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
print(f'Frozen params: Have {avail_numel} numels to process.')
print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
total_params = 0
total_numel = 0
for name, shape in zero_model_states[0].frozen_param_shapes.items():
total_params += 1
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
if debug:
print(
f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
)
print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
param_shapes = zero_model_states[0].param_shapes
avail_numel = fp32_flat_groups[0].numel() * world_size
# Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
# param, re-consolidating each param, while dealing with padding if any
# merge list of dicts, preserving order
param_shapes = {k: v for d in param_shapes for k, v in d.items()}
if debug:
for i in range(world_size):
print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
wanted_params = len(param_shapes)
wanted_numel = sum(shape.numel() for shape in param_shapes.values())
# not asserting if there is a mismatch due to possible padding
avail_numel = fp32_flat_groups[0].numel() * world_size
print(f"Trainable params: Have {avail_numel} numels to process.")
print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
# params
# XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
# out-of-core computing solution
offset = 0
total_numel = 0
total_params = 0
for name, shape in param_shapes.items():
unpartitioned_numel = shape.numel()
total_numel += unpartitioned_numel
total_params += 1
partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
if debug:
print(
f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
)
# XXX: memory usage doubles here
state_dict[name] = torch.cat(
tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
0).narrow(0, 0, unpartitioned_numel).view(shape)
offset += partitioned_numel
offset *= world_size
# Sanity check
if offset != avail_numel:
raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
state_dict = OrderedDict()
# buffers
buffers = zero_model_states[0].buffers
state_dict.update(buffers)
if debug:
print(f"added {len(buffers)} buffers")
_zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
_zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
# recover shared parameters
for pair in zero_model_states[0].shared_params:
if pair[1] in state_dict:
state_dict[pair[0]] = state_dict[pair[1]]
return state_dict
def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
"""
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
via a model hub.
Args:
- ``checkpoint_dir``: path to the desired checkpoint folder
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
Returns:
- pytorch ``state_dict``
Note: this approach may not work if your application doesn't have sufficient free CPU memory and
you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
the checkpoint.
A typical usage might be ::
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
# do the training and checkpoint saving
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
model = model.cpu() # move to cpu
model.load_state_dict(state_dict)
# submit to model hub or save the model to share with others
In this example the ``model`` will no longer be usable in the deepspeed context of the same
application. i.e. you will need to re-initialize the deepspeed engine, since
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
"""
if tag is None:
latest_path = os.path.join(checkpoint_dir, 'latest')
if os.path.isfile(latest_path):
with open(latest_path, 'r') as fd:
tag = fd.read().strip()
else:
raise ValueError(f"Unable to find 'latest' file at {latest_path}")
ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
if not os.path.isdir(ds_checkpoint_dir):
raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
"""
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
Args:
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
- ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
"""
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
print(f"Saving fp32 state dict to {output_file}")
torch.save(state_dict, output_file)
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
"""
1. Put the provided model to cpu
2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
3. Load it into the provided model
Args:
- ``model``: the model object to update
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
Returns:
- ``model`: modified model
Make sure you have plenty of CPU memory available before you call this function. If you don't
have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
conveniently placed for you in the checkpoint folder.
A typical usage might be ::
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
# submit to model hub or save the model to share with others
Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
of the same application. i.e. you will need to re-initialize the deepspeed engine, since
``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
"""
logger.info(f"Extracting fp32 weights")
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
logger.info(f"Overwriting model with fp32 weights")
model = model.cpu()
model.load_state_dict(state_dict, strict=False)
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("checkpoint_dir",
type=str,
help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
parser.add_argument(
"output_file",
type=str,
help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
args = parser.parse_args()
debug = args.debug
convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
| 23,610 | 39.778929 | 197 | py |
DeepSpeed | DeepSpeed-master/deepspeed/inference/engine.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import time
import os
from deepspeed import comm as dist
from deepspeed.utils.logging import log_dist
from torch.nn.modules import Module
from packaging import version as pkg_version
from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine
from deepspeed.utils.timer import SynchronizedWallClockTimer
from ..runtime.state_dict_factory import SDLoaderFactory
from ..runtime.weight_quantizer import WeightQuantization
from ..module_inject import replace_transformer_layer, generic_injection
from ..comm.comm import init_distributed
from ..pipe import PipelineModule
from ..moe.utils import has_moe_layers
from ..module_inject import LinearAllreduce, LinearLayer, Normalize, ReplaceWithTensorSlicing
from deepspeed.accelerator import get_accelerator
from ..module_inject.policy import TransformerPolicy
from ..module_inject.auto_tp import AutoTP
from ..module_inject.replace_policy import generic_policies
DS_INFERENCE_ENABLED = False
from torch import nn
INFERENCE_MODEL_TIMER = "model-forward-inference"
def build_bloom_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
"""
Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it
relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value
`softmax(l+a) = softmax(l)`. Based on
https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742
TODO @thomasw21 this doesn't work as nicely due to the masking strategy, and so masking varies slightly.
Args:
Returns tensor shaped (batch_size * num_heads, 1, max_seq_len)
attention_mask (`torch.Tensor`):
Token-wise attention mask, this should be of shape (batch_size, max_seq_len).
num_heads (`int`, *required*):
number of heads
dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`):
dtype of the output tensor
"""
import math
batch_size, seq_length = attention_mask.shape
closest_power_of_2 = 2**math.floor(math.log2(num_heads))
base = torch.tensor(2**(-(2**-(math.log2(closest_power_of_2) - 3))),
device=attention_mask.device,
dtype=torch.float32)
powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32)
slopes = torch.pow(base, powers)
if closest_power_of_2 != num_heads:
extra_base = torch.tensor(2**(-(2**-(math.log2(2 * closest_power_of_2) - 3))),
device=attention_mask.device,
dtype=torch.float32)
num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32)
slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
# Note: alibi will added to the attention bias that will be applied to the query, key product of attention
# => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length)
# => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length)
# => the query_length dimension will then be broadcasted correctly
# This is more or less identical to T5's relative position bias:
# https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527
arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :]
alibi = slopes[..., None] * arange_tensor
if dist.is_initialized():
num_heads_per_rank = int(num_heads / dist.get_world_size())
offset = dist.get_rank() * num_heads_per_rank
alibi = alibi.view(batch_size, num_heads, 1, seq_length)
alibi = alibi[:, offset:num_heads_per_rank + offset, :, :]
return alibi.reshape(batch_size * num_heads_per_rank, 1, seq_length).to(dtype)
else:
return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype)
class InferenceEngine(Module):
inference_mp_group = None
inference_ep_group = None
expert_mp_group = None
def __init__(self, model, config):
"""
Args:
model: torch.nn.Module
config: DeepSpeedInferenceConfig
"""
global DS_INFERENCE_ENABLED
DS_INFERENCE_ENABLED = True
super().__init__()
self.module = model
self._config = config
self._get_model_config_generate(config) # keep for weird backward compatibility
# patch model generate with ours if model uses it
if hasattr(self.module, "generate"):
self.generate = self._generate
if hasattr(self.module, "config"):
TransformerPolicy.hf_model_config = self.module.config
# todo: keep this self.injection_dict because we don't use to change config.injection_policy API
# todo: this will get changed when Molly's PR on auto injection dict is merged
self.injection_dict = config.injection_policy
# todo: refactor the mp_group and mp_size related in the next refactor
self.mp_group = config.tensor_parallel.tp_group
self.mpu = config.tensor_parallel.mpu
#self._validate_args(self.mpu, config.replace_with_kernel_inject)
self.quantize_merge_count = 1
self.quantization_scales = None
# these are not needed in the config as we are creating them ourselves in the inference engine
self.ep_group = None # config.moe.ep_group
self.expert_mp_group = None # config.moe.ep_mp_group
self.cuda_graph_created = False
self.checkpoint_engine = TorchCheckpointEngine()
quantization_setting = None
self._init_quantization_setting(
quantization_setting) # todo: update with the new quant config for weight quant
self.model_profile_enabled = False
self._model_times = []
if not self.injection_dict and config.replace_with_kernel_inject:
# This is a hack to remove the prepare_mask function on HF side for BLOOM architecture
self.remove_mask_prepare_for_bloom()
if self.injection_dict or not config.replace_with_kernel_inject:
# This is a hack to redefine the alibi func due to TP
if config.tensor_parallel.tp_size > 1:
self.build_alibi_tensor()
if get_accelerator().device_name() == 'cuda' and config.enable_cuda_graph:
assert pkg_version.parse(torch.__version__) >= pkg_version.parse("1.10"), \
"If you want to use cuda graph, please upgrade torch to at least v1.10"
# Check if model passed to engine is loaded w/ meta tensors, in which case
# kernel injection must be enabled.
# NOTE: This check assumes a Hugging Face hierarchy for the device type i.e. module.device.type
self.model_meta_device = self.module.device.type == 'meta' if hasattr(self.module, "device") else False
# convert model to intended dtype
if config.dtype:
self._convert_to_dtype(config)
if self.mpu:
config.tensor_parallel.tp_size = dist.get_world_size(group=self.mpu.get_model_parallel_group())
self.mp_group = self.mpu.get_model_parallel_group()
elif config.tensor_parallel.tp_size > 1:
self._create_model_parallel_group(config)
config.tensor_parallel.tp_group = self.mp_group
if isinstance(self.module, torch.nn.Module):
moe, _ = has_moe_layers(self.module)
else:
moe = False
if moe and dist.get_world_size() > 1:
self._create_ep_parallel_group(config.moe.moe_experts)
# We only support three modes: 1) user specified policy for tensor-parallelism, 2) kernel injection (replace_with_kernel_inject), and 3) automatic tensor parallelism if tp_size > 1.
if self.injection_dict:
# 1. User specified Tensor Parallelism
assert not config.replace_with_kernel_inject, "Cannot use both user specified injection policy and kernel injection"
for client_module, injection_policy in self.injection_dict.items():
# construct the tuple and pass that instead of a string or dict.
if isinstance(injection_policy, str):
config.injection_policy_tuple = (injection_policy, )
else:
config.injection_policy_tuple = injection_policy
self._apply_injection_policy(config, client_module)
else:
if config.replace_with_kernel_inject:
# 2. DeepSpeed Kernel Injection
self._apply_injection_policy(config)
elif config.tensor_parallel.tp_size > 1:
# 3. Automatic Tensor Parallelism
parser_dict = AutoTP.tp_parser(model)
print("AutoTP: ", parser_dict)
for client_module, injection_policy in parser_dict:
if isinstance(injection_policy, str):
config.injection_policy_tuple = (injection_policy, )
else:
config.injection_policy_tuple = injection_policy
self._apply_injection_policy(config, client_module)
device = get_accelerator().current_device_name()
self.module.to(device)
if config.tensor_parallel.tp_size > 1:
_rng_state = get_accelerator().get_rng_state().to(get_accelerator().current_device_name())
dist.broadcast(_rng_state, 0)
get_accelerator().set_rng_state(_rng_state.cpu())
if config.tensor_parallel.tp_size > 1:
assert not config.enable_cuda_graph, "Cuda graph is not supported for model parallelism"
# Check if local CUDA graphs can be created in replacement modules
self.local_cuda_graph = self._local_cuda_graph_used(self.module)
def profile_model_time(self, use_cuda_events=True):
if not self.model_profile_enabled and not self._config.enable_cuda_graph:
self.module.register_forward_pre_hook(self._pre_forward_hook)
self.module.register_forward_hook(self._post_forward_hook)
self.model_profile_enabled = True
self.use_cuda_events = use_cuda_events
if self.use_cuda_events:
self.timers = SynchronizedWallClockTimer()
# todo: remove this once all the config dicts are centralized from top level pydantic config
def _get_model_config_generate(self, config):
# this is being passed to replace_transformer_layer(config=self.user_model_config_dict)
self.config = getattr(self.module, 'config', None) if config.config is None else config.config
def remove_mask_prepare_for_bloom(self):
if hasattr(self.module, 'transformer'):
if hasattr(self.module.transformer, '_prepare_attn_mask'):
self.module.transformer._prepare_attn_mask = lambda attention_mask, *args, **kwargs: attention_mask
def build_alibi_tensor(self):
if hasattr(self.module, 'transformer'):
if hasattr(self.module.transformer, 'build_alibi_tensor'):
self.module.transformer.build_alibi_tensor = build_bloom_alibi_tensor
def _pre_forward_hook(self, module, *inputs, **kwargs):
if self.use_cuda_events:
self.timers(INFERENCE_MODEL_TIMER).start()
else:
get_accelerator().synchronize()
self._start = time.time()
def _post_forward_hook(self, module, input, output):
if self.use_cuda_events:
self.timers(INFERENCE_MODEL_TIMER).stop()
elapsed_time = self.timers(INFERENCE_MODEL_TIMER).elapsed(reset=True)
else:
get_accelerator().synchronize()
self._end = time.time()
elapsed_time = (self._end - self._start) * 1e3 # convert seconds to ms
self._model_times.append(elapsed_time)
def _create_model_parallel_group(self, config):
# Call the init process
if InferenceEngine.inference_mp_group is None:
init_distributed()
local_rank = int(os.getenv('LOCAL_RANK', '0'))
get_accelerator().set_device(local_rank)
ranks = [i for i in range(config.tensor_parallel.tp_size)]
self.mp_group = dist.new_group(ranks)
InferenceEngine.inference_mp_group = self.mp_group
else:
self.mp_group = InferenceEngine.inference_mp_group
def _create_ep_parallel_group(self, moe_experts):
# Call the init process
self.ep_group = {}
self.expert_mp_group = {}
moe_experts = moe_experts if type(moe_experts) is list else [moe_experts]
for e in moe_experts:
self.ep_group.update({e: None})
self.expert_mp_group.update({e: None})
for moe_ep_size in self.ep_group.keys():
num_ep_groups = dist.get_world_size() // moe_ep_size
for i in range(num_ep_groups):
ep_cnt = i * moe_ep_size
size = dist.get_world_size() if moe_ep_size > dist.get_world_size() else moe_ep_size
ranks = list(range(ep_cnt, ep_cnt + size))
_ep_group = dist.new_group(ranks)
if dist.get_rank() in ranks:
self.ep_group.update({moe_ep_size: _ep_group})
if dist.get_world_size() > moe_ep_size:
num_expert_mp_groups = dist.get_world_size() // num_ep_groups
expert_mp_size = dist.get_world_size() // moe_ep_size
for i in range(num_expert_mp_groups):
expert_mp_comm_ranks = [i + nr * moe_ep_size for nr in range(expert_mp_size)]
_expert_mp_group = dist.new_group(expert_mp_comm_ranks)
if dist.get_rank() in expert_mp_comm_ranks:
self.expert_mp_group.update({moe_ep_size: _expert_mp_group})
def _init_quantization_setting(self, quantization_setting):
self.quantize_bits = 8
self.mlp_extra_grouping = False
self.quantize_groups = 1
if type(quantization_setting) is tuple:
self.mlp_extra_grouping, \
self.quantize_groups = quantization_setting
elif quantization_setting is not None:
self.quantize_groups = quantization_setting
log_dist(
f"quantize_bits = {self.quantize_bits} "
f"mlp_extra_grouping = {self.mlp_extra_grouping}, "
f"quantize_groups = {self.quantize_groups}", [0])
# TODO: remove this function and add this functionality to pydantic config checking
def _validate_args(self, mpu, replace_with_kernel_inject):
# TODO: to support SD pipeline we need to avoid this check for now
if replace_with_kernel_inject and not isinstance(self.module, Module):
raise ValueError(f"model must be a torch.nn.Module, got {type(self.module)}")
if not isinstance(self._config.tensor_parallel.tp_size, int) or self._config.tensor_parallel.tp_size < 1:
raise ValueError(f"mp_size must be an int >= 1, got {self._config.tensor_parallel.tp_size}")
if mpu:
methods = ["get_model_parallel_group", "get_data_parallel_group"]
for method in methods:
if not hasattr(mpu, method):
raise ValueError(f"mpu is missing {method}")
if self._config.checkpoint is not None and not isinstance(self._config.checkpoint, (str, dict)):
raise ValueError(f"checkpoint must be None, str or dict, got {type(self._config.checkpoint)}")
supported_dtypes = [None, torch.half, torch.int8, torch.float]
if self._config.dtype not in supported_dtypes:
raise ValueError(f"{self._config.dtype} not supported, valid dtype: {supported_dtypes}")
if self.injection_dict is not None and not isinstance(self.injection_dict, dict):
raise ValueError(f"injection_dict must be None or a dict, got: {self.injection_dict}")
def load_model_with_checkpoint(self, r_module):
self.mp_replace = ReplaceWithTensorSlicing(
mp_group=self.mp_group, mp_size=self._config.tensor_parallel.tp_size) #, out_dim=0, in_dim=1)
error_msgs = []
def load(module, state_dict, prefix):
args = (state_dict, prefix, {}, True, [], [], error_msgs)
if hasattr(module, 'weight'):
if module.weight.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.weight = torch.nn.parameter.Parameter(data=torch.empty_like(module.weight.data,
device="cpu"),
requires_grad=module.weight.data.requires_grad)
if 'query_key_value' in prefix:
module.weight = self.mp_replace.strided_copy(module.weight.data,
state_dict[prefix + 'weight'],
num_splits=3)
else:
module.weight = self.mp_replace.copy(module.weight.data, state_dict[prefix + 'weight'])
else:
if module.norm.weight.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.norm.weight = torch.nn.parameter.Parameter(
data=torch.empty_like(module.norm.weight.data, device="cpu"),
requires_grad=module.norm.weight.data.requires_grad)
module.norm.weight = self.mp_replace.copy(module.norm.weight.data, state_dict[prefix + 'weight'])
if prefix + 'bias' in self.key_list:
if hasattr(module, 'norm'):
if module.norm.bias.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.norm.bias = torch.nn.parameter.Parameter(
data=torch.empty_like(module.norm.bias.data, device="cpu"),
requires_grad=module.norm.bias.data.requires_grad)
module.norm.bias = self.mp_replace.copy(module.norm.bias, state_dict[prefix + 'bias'])
else:
if module.bias.data.is_meta:
# meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
module.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.bias.data,
device="cpu"),
requires_grad=module.bias.data.requires_grad)
data = state_dict[prefix + 'bias']
data = data.to(get_accelerator().current_device_name())
module.bias = self.mp_replace.copy(module.bias, data)
layer_policies = {
nn.Linear: load,
nn.Embedding: load,
nn.LayerNorm: load,
LinearLayer: load,
LinearAllreduce: load
}
def load_module_recursive(module, prefix='', level=0):
for name, child in module.named_children():
if child.__class__ in layer_policies:
checking_key = prefix + name + '.'
if not any(checking_key in item for item in self.key_list):
continue
if len(list(child.parameters())) > 0 and list(child.parameters())[0].numel() == 0:
if len(child.weight.ds_shape) == 1:
child = Normalize(dim=child.weight.ds_shape[-1], dtype=child.weight.dtype, eps=child.eps)
setattr(module, name, child)
load(child, self.sd, prefix + name + '.')
else:
load_module_recursive(child, prefix if level == 0 else prefix + name + '.', level + 1)
load_module_recursive(r_module)
embedding_weight = None
for n, p in r_module.named_parameters():
if "word_embeddings." in n or "embed_tokens." in n or "wte." in n:
embedding_weight = p
if embedding_weight is not None and hasattr(r_module, "lm_head") and hasattr(
r_module.lm_head, "weight") and r_module.lm_head.weight.is_meta:
r_module.lm_head.weight = embedding_weight
def _apply_injection_policy(self, config, client_module=None):
# client_module is only passed when using the injection_dict method.
checkpoint_dir = config.checkpoint
checkpoint = SDLoaderFactory.get_sd_loader_json(checkpoint_dir,
self.checkpoint_engine) if checkpoint_dir is not None else None
generic_injection(self.module,
fp16=(config.dtype == torch.half) or (config.dtype == torch.int8),
bf16=(config.dtype == torch.bfloat16),
enable_cuda_graph=config.enable_cuda_graph)
if isinstance(self.module, torch.nn.Module):
# config is our DeepSpeedInferenceConfig and self.config is the HF model config
replace_transformer_layer(client_module, self.module, checkpoint, config, self.config)
def _get_all_ckpt_names(self, checkpoints_path, tag):
ckpt_file_pattern = self._get_ckpt_name(checkpoints_path, tag, mp_placeholder="*")
import glob
ckpt_files = glob.glob(ckpt_file_pattern)
ckpt_files.sort()
return ckpt_files
def _get_ckpt_name(self, checkpoints_path, tag, mp_placeholder=None):
if mp_placeholder is not None:
mp_rank_str = mp_placeholder
else:
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
mp_rank_str = "{:02d}".format(mp_rank)
ckpt_name = os.path.join(
checkpoints_path,
"mp_rank_" + mp_rank_str + "_model_states.pt",
)
return ckpt_name
def _load_checkpoint(self, load_dir, load_module_strict=True, tag=None):
is_pipe_parallel = isinstance(self.module, PipelineModule)
if is_pipe_parallel:
raise RuntimeError('pipeline parallelism is currently not supported in inference.')
if not isinstance(load_dir, dict) and os.path.isdir(load_dir):
if tag is None:
latest_path = os.path.join(load_dir, "latest")
if os.path.isfile(latest_path):
with open(latest_path, "r") as fd:
tag = fd.read().strip()
ckpt_list = self._get_all_ckpt_names(load_dir, tag)
sd_loader = SDLoaderFactory.get_sd_loader(ckpt_list, self.checkpoint_engine)
else:
sd_loader = SDLoaderFactory.get_sd_loader_json(load_dir, self.checkpoint_engine)
checkpoint = sd_loader['checkpoints']
if type(checkpoint) is list:
self.sd = torch.load(checkpoint[0], map_location='cpu')
self.key_list = list(self.sd.keys())
self.load_model_with_checkpoint(self.module)
for i in range(1, len(checkpoint)):
if not dist.is_initialized() or dist.get_rank() == 0:
print(f"loading checkpoint ({i})")
self.sd = torch.load(checkpoint[i], map_location=get_accelerator().device_name())
self.key_list = list(self.sd.keys())
self.load_model_with_checkpoint(self.module)
else:
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
load_path, checkpoint, quantize_config = sd_loader.load(self._config.tensor_parallel.tp_size,
mp_rank,
is_pipe_parallel=is_pipe_parallel,
quantize=(self._config.dtype is torch.int8),
quantize_groups=self.quantize_groups,
mlp_extra_grouping=self.mlp_extra_grouping)
self.quantization_scales, self.quantize_merge_count = quantize_config
moe, _ = has_moe_layers(self.module)
if moe:
from deepspeed.runtime.engine import DeepSpeedEngine
old_moe_load = False
if not isinstance(checkpoint['num_experts'], list):
old_moe_load = True
DeepSpeedEngine.load_moe_state_dict(load_dir,
tag,
state_dict=checkpoint[self._choose_module_key(checkpoint)],
old_moe_load=old_moe_load,
model=self.module,
mpu=self.mpu,
checkpoint_engine=self.checkpoint_engine)
self.module.load_state_dict(state_dict=checkpoint[self._choose_module_key(checkpoint)],
strict=load_module_strict)
def _choose_module_key(self, sd):
assert not ('module' in sd
and 'model' in sd), "checkpoint has both 'model' and 'module' keys, not sure how to proceed"
assert 'module' in sd or 'model' in sd, "checkpoint contains neither 'model' or 'module' keys, not sure how to proceed"
if 'module' in sd:
return 'module'
elif 'model' in sd:
return 'model'
def _convert_to_dtype(self, config):
if not isinstance(self.module, torch.nn.Module):
return
if False: #config.dtype is torch.int8 and self.quantization_scales is None:
quantizer = WeightQuantization(mlp_extra_grouping=self.mlp_extra_grouping)
model, self.quantization_scales = quantizer.model_quantize(self.module, self.injection_dict,
self.quantize_bits, self.quantize_groups)
elif config.dtype == torch.half:
self.module.half()
elif config.dtype == torch.bfloat16:
self.module.bfloat16()
elif config.dtype == torch.float:
self.module.float()
def _create_cuda_graph(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = get_accelerator().Stream()
cuda_stream.wait_stream(get_accelerator().current_stream())
with get_accelerator().stream(cuda_stream):
for i in range(3):
ret = self.module(*inputs, **kwargs)
get_accelerator().current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._cuda_graphs = torch.cuda.CUDAGraph()
self.static_inputs = inputs
self.static_kwargs = kwargs
with torch.cuda.graph(self._cuda_graphs):
self.static_output = self.module(*self.static_inputs, **self.static_kwargs)
self.cuda_graph_created = True
def _graph_replay(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_inputs[i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_kwargs[k].copy_(kwargs[k])
self._cuda_graphs.replay()
return self.static_output
def model_times(self):
assert self.model_profile_enabled, "model profiling is not enabled"
model_times = self._model_times
if self._config.enable_cuda_graph and len(self._model_times) == 0:
raise ValueError("Model times are empty and cuda graph is enabled. If "
"this is a GPT-style model this combo is not supported. If this is a "
"BERT-style model this is a bug, please report it. "
f"Model type is: {type(self.module)}")
self._model_times = []
return model_times
def _module_match(self, module):
for policy in generic_policies:
policy = policy()
if policy.match_replaced(module):
return True
return False
def _local_cuda_graph_used(self, module):
if isinstance(module, torch.nn.Module):
return False
else:
sub_module_cuda_graph = False
for name in module.__dict__.keys():
sub_module = getattr(module, name)
if self._module_match(sub_module) and hasattr(sub_module, "enable_cuda_graph"):
sub_module_cuda_graph = True
return sub_module_cuda_graph
def forward(self, *inputs, **kwargs):
"""Execute forward propagation
Arguments:
*inputs: Variable length input list
**kwargs: variable length keyword arguments
"""
start = None
if self.model_profile_enabled and get_accelerator().device_name() == 'cuda' and self._config.enable_cuda_graph:
get_accelerator().synchronize()
start = time.time()
if get_accelerator().device_name() == 'cuda' and self._config.enable_cuda_graph and not self.local_cuda_graph:
if self.cuda_graph_created:
outputs = self._graph_replay(*inputs, **kwargs)
else:
self._create_cuda_graph(*inputs, **kwargs)
outputs = self._graph_replay(*inputs, **kwargs)
else:
outputs = self.module(*inputs, **kwargs)
if self.model_profile_enabled and self._config.enable_cuda_graph:
get_accelerator().synchronize()
duration = (time.time() - start) * 1e3 # convert seconds to ms
self._model_times.append(duration)
return outputs
def _generate(self, *inputs, **kwargs):
# Reset KV-cache at the beginning of generate
if hasattr(self.module, 'reset_cache'):
self.module.reset_cache()
num_beams = 1
if "generation_config" in kwargs:
gen_config = kwargs["generation_config"]
num_beams = getattr(gen_config, "num_beams", 1)
if "num_beams" in kwargs:
num_beams = kwargs["num_beams"]
if num_beams > 1:
raise NotImplementedError("DeepSpeed does not support `num_beams` > 1, if this is important to you please "
"add your request to: https://github.com/microsoft/DeepSpeed/issues/2506")
return self.module.generate(*inputs, **kwargs)
| 31,553 | 48.535322 | 189 | py |
DeepSpeed | DeepSpeed-master/deepspeed/inference/config.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
from deepspeed.runtime.config_utils import DeepSpeedConfigModel
from deepspeed.runtime.zero.config import DeepSpeedZeroConfig
from pydantic import Field
from pydantic import validator
from typing import Dict, Union
from enum import Enum
class DtypeEnum(Enum):
# The torch dtype must always be the first value (so we return torch.dtype)
fp16 = torch.float16, "torch.float16", "fp16", "float16", "half"
fp32 = torch.float32, "torch.float32", "fp32", "float32", "float"
bf16 = torch.bfloat16, "torch.bfloat16", "bf16", "bfloat16", "bfloat"
int8 = torch.int8, "torch.int8", "int8"
# Copied from https://stackoverflow.com/a/43210118
# Allows us to use multiple values for each Enum index and returns first
# listed value when Enum is called
def __new__(cls, *values):
obj = object.__new__(cls)
# first value is canonical value
obj._value_ = values[0]
for other_value in values[1:]:
cls._value2member_map_[other_value] = obj
obj._all_values = values
return obj
def __repr__(self):
return "<%s.%s: %s>" % (
self.__class__.__name__,
self._name_,
", ".join([repr(v) for v in self._all_values]),
)
class MoETypeEnum(str, Enum):
residual = "residual"
standard = "standard"
class DeepSpeedTPConfig(DeepSpeedConfigModel):
""" Configure tensor parallelism settings """
enabled: bool = True
""" Turn tensor parallelism on/off. """
tp_size: int = 1
""" Number of devices to split the model across using tensor parallelism. """
mpu: object = None
"""
A model parallelism unit object that implements
``get_{model,data}_parallel_{rank,group,world_size}()``.
"""
tp_group: object = None
class DeepSpeedMoEConfig(DeepSpeedConfigModel):
""" Sets parameters for MoE """
enabled: bool = True
ep_size: int = 1
"""
The expert-parallelism size which is used for partitioning the experts
across the GPUs in the expert-parallel group.
"""
moe_experts: list = Field([1], alias="num_experts")
""" The global number of experts used in an MoE layer. """
type: MoETypeEnum = MoETypeEnum.standard
"""
Specify the type of MoE layer. We have two types of MoE layer: 'Standard'
and 'Residual'.
"""
ep_mp_group: object = None
ep_group: object = Field(None, alias="expert_group")
class QuantTypeEnum(str, Enum):
asym = "asymmetric"
sym = "symmetric"
class BaseQuantConfig(DeepSpeedConfigModel):
enabled = True
num_bits = 8
q_type: QuantTypeEnum = QuantTypeEnum.sym
q_groups: int = 1
class WeightQuantConfig(BaseQuantConfig):
enabled = True
class ActivationQuantConfig(BaseQuantConfig):
enabled = True
class QKVQuantConfig(DeepSpeedConfigModel):
enabled = True
class QuantizationConfig(DeepSpeedConfigModel):
enabled: bool = True
activation: ActivationQuantConfig = ActivationQuantConfig()
weight: WeightQuantConfig = WeightQuantConfig()
qkv: QKVQuantConfig = QKVQuantConfig()
# todo: brainstorm on how to do ckpt loading for DS inference
class InferenceCheckpointConfig(DeepSpeedConfigModel):
checkpoint_dir: str = None
save_mp_checkpoint_path: str = None
base_dir: str = None
class DeepSpeedInferenceConfig(DeepSpeedConfigModel):
""" Sets parameters for DeepSpeed Inference Engine. """
replace_with_kernel_inject: bool = Field(False, alias="kernel_inject")
"""
Set to true to inject inference kernels for models such as, Bert, GPT2,
GPT-Neo and GPT-J. Otherwise, the injection_dict provides the names of two
linear layers as a tuple:
`(attention_output projection, transformer output projection)`
"""
dtype: DtypeEnum = torch.float16
"""
Desired model data type, will convert model to this type.
Supported target types: `torch.half`, `torch.int8`, `torch.float`
"""
tensor_parallel: DeepSpeedTPConfig = Field({}, alias="tp")
"""
Configuration for tensor parallelism used to split the model across several
GPUs. Expects a dictionary containing values for :any:`DeepSpeedTPConfig`.
"""
enable_cuda_graph: bool = False
"""
Use this flag for capturing the CUDA-Graph of the inference ops, so that it
can run faster using the graph replay method.
"""
use_triton: bool = False
"""
Use this flag to use triton kernels for inference ops.
"""
triton_autotune: bool = False
"""
Use this flag to enable triton autotuning.
Turning it on is better for performance but increase the 1st runtime for
autotuning.
"""
zero: DeepSpeedZeroConfig = {}
"""
ZeRO configuration to use with the Inference Engine. Expects a dictionary
containing values for :any:`DeepSpeedZeroConfig`.
"""
triangular_masking: bool = Field(True, alias="tm")
"""
Controls the type of masking for attention scores in transformer layer.
Note that the masking is application specific.
"""
moe: Union[bool, DeepSpeedMoEConfig] = {}
"""
Specify if the type of Transformer is MoE. Expects a dictionary containing
values for :any:`DeepSpeedMoEConfig`.
"""
quant: QuantizationConfig = {}
"""
NOTE: only works for int8 dtype.
Quantization settings used for quantizing your model using the MoQ. The
setting can be one element or a tuple. If one value is passed in, we
consider it as the number of groups used in quantization. A tuple is passed
in if we want to mention that there is extra-grouping for the MLP part of a
Transformer layer (e.g. (True, 8) shows we quantize the model using 8
groups for all the network except the MLP part that we use 8 extra
grouping). Expects a dictionary containing values for
:any:`QuantizationConfig`.
"""
#todo: refactor the following 3 into the new checkpoint_config
checkpoint: Union[str, Dict] = None
"""
Path to deepspeed compatible checkpoint or path to JSON with load policy.
"""
base_dir: str = None
"""
This shows the root directory under which all the checkpoint files exists.
This can be passed through the json config too.
"""
set_empty_params: bool = False
"""
specifying whether the inference-module is created with empty or real Tensor
"""
save_mp_checkpoint_path: str = None
"""
The path for which we want to save the loaded model with a checkpoint. This
feature is used for adjusting the parallelism degree to help alleviate the
model loading overhead. It does not save any new checkpoint if no path is
passed.
"""
checkpoint_config: InferenceCheckpointConfig = Field({}, alias="ckpt_config")
"""
TODO: Add docs. Expects a dictionary containing values for
:any:`InferenceCheckpointConfig`.
"""
return_tuple: bool = True
"""
Specify whether or not the transformer layers need to return a tuple or a
Tensor.
"""
training_mp_size: int = 1
"""
If loading a checkpoint this is the mp size that it was trained with, it
may be different than what the mp size that you want to use during
inference.
"""
replace_method: str = Field(
"auto",
deprecated=True,
deprecated_msg="This parameter is no longer needed, please remove from your call to DeepSpeed-inference")
injection_policy: Dict = Field(None, alias="injection_dict")
"""
Dictionary mapping a client nn.Module to its corresponding injection
policy. e.g., `{BertLayer : deepspeed.inference.HFBertLayerPolicy}`
"""
injection_policy_tuple: tuple = None
""" TODO: Add docs """
config: Dict = Field(None, alias="args") # todo: really no need for this field if we can refactor
max_out_tokens: int = Field(1024, alias="max_tokens")
"""
This argument shows the maximum number of tokens inference-engine can work
with, including the input and output tokens. Please consider increasing it
to the required token-length required for your use-case.
"""
min_out_tokens: int = Field(1, alias="min_tokens")
"""
This argument communicates to the runtime the minimum number of tokens you
expect you will need to generate. This will cause the runtime to error
if it unable to provide this and provide context on the memory pressure
rather than seg-faulting or providing corrupted output.
"""
transposed_mode: bool = Field(False, alias="transposed_mode")
mp_size: int = Field(1, deprecated=True, new_param="tensor_parallel.tp_size")
"""
Desired model parallel size, default is 1 meaning no model parallelism.
Deprecated, please use the ``tensor_parallel` config to control model
parallelism.
"""
mpu: object = Field(None, deprecated=True, new_param="tensor_parallel.mpu")
ep_size: int = Field(1, deprecated=True, new_param="moe.ep_size")
ep_group: object = Field(None, alias="expert_group", deprecated=True, new_param="moe.ep_group")
ep_mp_group: object = Field(None, alias="expert_mp_group", deprecated=True, new_param="moe.ep_mp_group")
moe_experts: list = Field([1], deprecated=True, new_param="moe.moe_experts")
moe_type: MoETypeEnum = Field(MoETypeEnum.standard, deprecated=True, new_param="moe.type")
@validator("moe")
def moe_backward_compat(cls, field_value, values):
if isinstance(field_value, bool):
return DeepSpeedMoEConfig(moe=field_value)
return field_value
@validator("use_triton")
def has_triton(cls, field_value, values):
if field_value and not deepspeed.HAS_TRITON:
raise ValueError('Triton needs to be installed to use deepspeed with triton kernels')
return field_value
class Config:
# Get the str representation of the datatype for serialization
json_encoders = {torch.dtype: lambda x: str(x)}
| 10,068 | 32.121711 | 113 | py |
DeepSpeed | DeepSpeed-master/deepspeed/autotuning/tuner/cost_model.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .utils import *
try:
import xgboost as xgb
except ImportError:
xgb = None
class XGBoostCostModel():
def __init__(self, loss_type, num_threads=None, log_interval=25, upper_model=None):
assert xgb is not None, "missing requirements, please install deepspeed w. 'autotuning_ml' extra."
self.loss_type = loss_type
if loss_type == "reg":
self.xgb_params = {
"max_depth": 3,
"gamma": 0.0001,
"min_child_weight": 1,
"subsample": 1.0,
"eta": 0.3,
"lambda": 1.0,
"alpha": 0,
"objective": "reg:linear",
}
elif loss_type == "rank":
self.xgb_params = {
"max_depth": 3,
"gamma": 0.0001,
"min_child_weight": 1,
"subsample": 1.0,
"eta": 0.3,
"lambda": 1.0,
"alpha": 0,
"objective": "rank:pairwise",
}
else:
raise RuntimeError("Invalid loss type: " + loss_type)
self.xgb_params["verbosity"] = 0
if num_threads:
self.xgb_params["nthread"] = num_threads
def fit(self, xs, ys):
x_train = np.array(xs, dtype=np.float32)
y_train = np.array(ys, dtype=np.float32)
y_max = np.max(y_train)
y_train = y_train / max(y_max, 1e-9)
index = np.random.permutation(len(x_train))
dtrain = xgb.DMatrix(x_train[index], y_train[index])
self.bst = xgb.train(self.xgb_params, dtrain)
def predict(self, xs):
features = xgb.DMatrix(xs)
return self.bst.predict(features)
| 1,820 | 26.179104 | 106 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/sparse_attention/softmax.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a
# https://github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/matmul.py
import torch
import triton
import triton.language as tl
def next_power_of_2(n):
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n += 1
return n
def num_warps(n):
if n < 512:
return 4
if n < 2048:
return 8
return 16
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[6] * meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[6] * meta['BLOCK'])})
@triton.jit
def _forward(X, scale, LUT, RPE, KP_M, ATTN_M, sizemax, stride_zx, stride_zrpe, stride_hrpe, stride_srpe, stride_zkpm,
stride_zattnm, **meta):
TN = meta['TN']
BLOCK = meta['BLOCK']
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from LUT
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# block id and column id
blockid = tl.load(LUT + offset + rbmn * 4 + 0)
columnid = tl.load(LUT + offset + rbmn * 4 + 1)
rowid = tl.load(LUT + offset + rbmn * 4 + 2)
headid = tl.load(LUT + offset + rbmn * 4 + 3)
# pointers to X
px = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
x = tl.load(px, mask=check, other=-float('inf'))
x = x.to(tl.float32)
# apply scale
if meta['APPLY_SCALE']:
x = x * scale
# apply RPE
if meta['APPLY_RPE']:
prpe = RPE + pidz * stride_zrpe + headid * stride_hrpe + columnid * BLOCK + rowid * BLOCK * stride_srpe + rxm * stride_srpe + rxn
rpe = tl.load(prpe, mask=check, other=0)
x = x + rpe
# apply key-padding mask
if meta['APPLY_KP_MASK']:
pkp_m = KP_M + pidz * stride_zkpm + columnid * BLOCK + rxn
kp_m = tl.load(pkp_m, mask=check, other=-float('inf'))
if meta['KP_MASK_MUL']:
kp_m = tl.where(kp_m == 0, -float('inf'), 0.)
x = x + kp_m
# apply attention mask
if meta['APPLY_ATTN_MASK']:
pattn_m = ATTN_M + columnid * BLOCK + rowid * BLOCK * stride_zattnm + rxm * stride_zattnm + rxn
attn_m = tl.load(pattn_m, mask=check, other=-float('inf'))
if meta['ATTN_MASK_MUL']:
attn_m = tl.where(attn_m == 0, -float('inf'), 0.)
x = x + attn_m
# computation
x = tl.softmax(x)
tl.store(px, x, mask=check)
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[4] * meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[4]) * meta['BLOCK']})
@triton.jit
def _backward(X, scale, DX, LUT, sizemax, stride_zx, stride_zdx, **meta):
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
TN = meta['TN']
BLOCK = meta['BLOCK']
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from look-up table
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
# bounds checking on lut
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# initialize pointers to block-sparse input
blockid = tl.load(LUT + offset + rbmn * 4)
X = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
DX = DX + pidz * stride_zdx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
# compute fused softmax backward
x = tl.load(X, mask=check, other=0)
dx = tl.load(DX, mask=check, other=0)
x = x.to(tl.float32)
dx = dx.to(tl.float32)
y = x * (dx - tl.sum(x * dx, 0)) * scale
tl.store(DX, y, mask=check)
class _sparse_softmax(torch.autograd.Function):
bwd_kernels = dict()
@staticmethod
def make_lut(layout, block, device):
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
sizes = _empty.clone()
# sizes along rows
for h in range(layout.shape[0]):
sizes = torch.cat((sizes, layout[h, :, :].sum(-1)))
# offsets in block format
offsets = torch.zeros_like(sizes)
offsets[1:] = torch.cumsum(sizes[:-1], dim=0)
# block indices
idx = torch.arange(layout.sum())
head = layout.nonzero()[:, 0]
rows = layout.nonzero()[:, 1]
columns = layout.nonzero()[:, 2]
core = torch.stack((idx, columns, rows, head), dim=1).view(-1)
# construct look-up table
offsets = offsets * 4 + 2 * sizes.numel()
header = torch.stack((sizes, offsets), dim=1).view(-1)
lut = torch.cat((header, core)).type(torch.int32).to(device)
return lut, int(sizes.max())
@staticmethod
def forward(ctx, x, scale, rpe, key_padding_mask, attn_mask, kp_mask_mode, attn_mask_mode, spdims, block, lut,
num_blocks, maxlut, bench, time):
apply_scale = False if scale == 1.0 else True
# handle None rpe
if rpe is None:
apply_rpe = False
stride_zrpe, stride_hrpe, stride_srpe = 0, 0, 0
rpe = torch.empty(0, dtype=x.dtype, device=x.device)
else:
apply_rpe = True
stride_zrpe, stride_hrpe, stride_srpe = rpe.stride(0), rpe.stride(1), rpe.stride(2)
# handle None key_padding_mask
if key_padding_mask is None:
apply_kp_mask = False
stride_zkpm = 0
key_padding_mask = torch.empty(0, dtype=x.dtype, device=x.device)
else:
apply_kp_mask = True
stride_zkpm = key_padding_mask.stride(0)
# handle None attention_mask
if attn_mask is None:
apply_attn_mask = False
stride_zattnm = 0
attn_mask = torch.empty(0, dtype=x.dtype, device=x.device)
else:
apply_attn_mask = True
stride_zattnm = attn_mask.stride(0)
# run kernel
M = x.shape[0]
meta = {
'BLOCK': block,
'APPLY_SCALE': apply_scale,
'APPLY_RPE': apply_rpe,
'APPLY_KP_MASK': apply_kp_mask,
'APPLY_ATTN_MASK': apply_attn_mask,
'KP_MASK_MUL': kp_mask_mode == 'mul',
'ATTN_MASK_MUL': attn_mask_mode == 'mul',
}
grid = lambda opt: [spdims[0] * spdims[1] * block, M]
_forward[grid](x, scale, lut, rpe, key_padding_mask, attn_mask, maxlut, x.stride(0),\
stride_zrpe, stride_hrpe, stride_srpe, stride_zkpm, stride_zattnm, **meta)
# save to context
ctx.mark_dirty(x)
ctx.save_for_backward(x, lut)
ctx.spdims = spdims
ctx.block = block
ctx.maxlut = maxlut
ctx.scale = scale
ctx.apply_scale = apply_scale
ctx.apply_rpe = apply_rpe
ctx.apply_kp_mask = apply_kp_mask
ctx.apply_attn_mask = apply_attn_mask
ctx.kp_mask_mode = kp_mask_mode
ctx.attn_mask_mode = attn_mask_mode
return x
@staticmethod
def backward(ctx, dx):
# retrieve from context
x, lut = ctx.saved_tensors
# run kernel
M = x.shape[0]
grid = lambda opt: [ctx.spdims[0] * ctx.spdims[1] * ctx.block, M]
_backward[grid](x, ctx.scale, dx, lut, ctx.maxlut, x.stride(0), dx.stride(0), BLOCK=ctx.block)
return dx, None, None, None, None, None, None, None, None, None, None, None, None, None, None
class Softmax:
"""Block-Sparse Softmax class; this class computes softmax on a block sparse matrix. It is also able to apply either/all of the following masks:
- relative position embedding
- key padding mask
- attention mask
For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
"""
def sparse_softmax(*args, **kwargs):
return _sparse_softmax.apply(*args, **kwargs)
def make_lut(self, device):
"""Generates the sparsity layout used in block-sparse softmax
"""
key = (device, )
if key not in self.lut_cache:
self.lut_cache[key] = _sparse_softmax.make_lut(self.layout, self.block, device)
return self.lut_cache[key]
def __init__(self, layout, block, bench=False):
"""Initialize the Block-Sparse Softmax class.
Arguments:
layout: required: sparsity layout tensor
block: required: an integer determining the block size.
bench: optional: set if you want to do benchmarking
"""
self.num_blocks = layout.sum().item()
self.spdims = layout.shape
self.layout = layout
self.block = block
self.bench = bench
self.lut_cache = dict()
def __call__(self,
x,
scale=1.,
rpe=None,
key_padding_mask=None,
attn_mask=None,
key_padding_mask_mode='add',
attn_mask_mode='add'):
"""Applies softmax on a Block-Sparse input tensor.
For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
Arguments:
x: required: a block-sparse tensor that softmax is applied on it; computation will be in place and result will be returned in the same tensor
scale: optional: a float value; x values will be multiplied by this value before normalization. Default value is 1.0.
rpe: optional: a tensor same dimension as x that is used as relative position embedding
key_padding_mask: optional: a mask tensor of size (BatchSize X SequenceLength)
attn_mask: optional: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported
key_padding_mask_mode: optional: a boolean determining if key_padding_mask needs to be added or multiplied
attn_mask_mode: optional: a boolean determining if attn_mask needs to be added or multiplied
Return:
x: a block-sparse tensor contains normalized input x using softmax; and masks applied if given
"""
time_y = [None]
if rpe is not None and rpe.dtype != x.dtype:
raise ValueError('relative position embedding must be %s' % x.dtype)
if attn_mask is not None and attn_mask.dtype != x.dtype:
raise ValueError('Attention mask must be %s' % x.dtype)
if key_padding_mask is not None and key_padding_mask.dtype != x.dtype:
raise ValueError('Key padding mask must be %s' % x.dtype)
lut, maxlut = self.make_lut(x.device)
x = Softmax.sparse_softmax(x, scale, rpe, key_padding_mask, attn_mask, key_padding_mask_mode, attn_mask_mode,
self.spdims, self.block, lut, self.num_blocks, maxlut, self.bench, time_y)
self.time_y = time_y[0]
return x
| 11,322 | 37.124579 | 154 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/sparse_attention/sparse_attention_utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from torch.nn import functional as F
from deepspeed.ops.sparse_attention import BertSparseSelfAttention, SparsityConfig
'''
This file contains few utility functions to handle adapting pretrained model with sparse self-attention module.
'''
class SparseAttentionUtils:
"""This class provides some utility functions that are use integrating sparse attention into transformer models.
Such utilities include extending position embeddings, replacing current self-attention layer with sparse attention, padding sequences to multiple of block size, etc.
"""
@staticmethod
def extend_position_embedding(model, max_position):
"""This function extends the position embedding weights of a model loaded from a checkpoint.
It assumes the new max position is bigger than the original max length.
Arguments:
model: required: a transformer model
max_position: required: an integer determining new position embedding size
Return:
model: updated model; in which position embedding weights have been extended based on new size
"""
if hasattr(model, 'bert'):
original_max_position = model.bert.embeddings.position_embeddings.weight.size(0)
assert max_position > original_max_position
extend_multiples = max(1, max_position // original_max_position)
model.bert.embeddings.position_embeddings.weight.data = model.bert.embeddings.position_embeddings.weight.repeat(
extend_multiples, 1)
elif hasattr(model, 'roberta'):
# RoBERTa has positions 0 & 1 reserved, so embedding size is max position + 2
original_max_position, embed_size = model.roberta.embeddings.position_embeddings.weight.shape
original_max_position -= 2
extend_multiples = max(1, max_position // original_max_position)
assert max_position > original_max_position
max_position += 2
extended_position_embedding = model.roberta.embeddings.position_embeddings.weight.new_empty(
max_position, embed_size)
k = 2
for i in range(extend_multiples):
extended_position_embedding[k:(
k + original_max_position)] = model.roberta.embeddings.position_embeddings.weight[2:]
k += original_max_position
model.roberta.embeddings.position_embeddings.weight.data = extended_position_embedding
else:
raise ValueError(
'Please extend \"extend_position_embedding\" function to support your model type. It currently only supports \"bert\" & \"roberta\"!'
)
model.config.max_position_embeddings = max_position
print(f'Extended position embeddings to {original_max_position * extend_multiples}')
return model
@staticmethod
def update_tokenizer_model_max_length(tokenizer, max_position):
"""This function updates the position embedding length of a tokenizer to a new max position.
Arguments:
tokenizer: required: a transformer tokenizer
max_position: required: an integer determining new position embedding size
Return:
tokenizer: updated tokenizer; in which model maximum length has been extended based on new size
"""
tokenizer.model_max_length = max_position
tokenizer.init_kwargs['model_max_length'] = max_position
print(f'updated tokenizer model max imum length to {max_position}')
return tokenizer
@staticmethod
def replace_model_self_attention_with_sparse_self_attention(
model,
max_position,
# SparsityConfig parameters needs to be set accordingly
sparsity_config=SparsityConfig(num_heads=4)):
"""This function replaces the self attention layers in model encoder with sparse self attention.
It currently supports bert and roberta model and can be easily extended to any other models following similar steps here.
For sparsityConfig, refer to the config class.
Arguments:
model: required: a transformer model
max_position: required: an integer determining new position embedding size
sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class
Return:
model: updated model; in which self attention layer has been replaced with DeepSpeed Sparse Self Attention layer.
"""
if hasattr(model, 'bert'):
model.config.max_position_embeddings = max_position
model.replace_self_attention_layer_with_sparse_self_attention_layer(model.config, model.bert.encoder.layer,
sparsity_config)
elif hasattr(model, 'roberta'):
model.config.max_position_embeddings = max_position + 2
model.replace_self_attention_layer_with_sparse_self_attention_layer(model.config,
model.roberta.encoder.layer,
sparsity_config)
else:
raise ValueError(
'Please extend \"update_model_self_attention_to_sparse_self_attention\" function to support \
your model type. It currently only supports \"bert\" & \"roberta\"!')
return model
@staticmethod
def replace_self_attention_layer_with_sparse_self_attention_layer(
config,
layers,
# SparsityConfig parameters needs to be set accordingly
sparsity_config=SparsityConfig(num_heads=4)):
"""This function replaces the self attention layers in attention layer with sparse self attention.
For sparsityConfig, refer to the config class.
Arguments:
config: required: transformer model config
layers: required: transformer model attention layers
sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class
Return:
layers: updated attention layers; in which self attention layers have been replaced with DeepSpeed Sparse Self Attention layer.
"""
for layer in layers:
deepspeed_sparse_self_attn = BertSparseSelfAttention(config, sparsity_config)
deepspeed_sparse_self_attn.query = layer.attention.self.query
deepspeed_sparse_self_attn.key = layer.attention.self.key
deepspeed_sparse_self_attn.value = layer.attention.self.value
layer.attention.self = deepspeed_sparse_self_attn
return layers
@staticmethod
def pad_to_block_size(block_size, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds,
pad_token_id, model_embeddings):
"""This function pads input tokens and attention mask on sequence length dimension to be multiple of block size.
This is a requirement for Sparse Transformer in which the self attention layer works on sequences of length multiple of block size.
It needs to be called in your model, such as BertModel, right before you calculate the embedding outputs.
Note)
1- instead of passing your embedding layer to this function, you can simply add this function to your model. It can be more simplified if given attention_mask and/or token_type_ids are none.
2- you need to call unpad function before returning your model output to unpad the encoder sequence output.
Arguments:
block_size: required: an integer determining the block size of sparsity config.
pad_token_id: required: an integer determining the pad token from the model config; such as bert.config.pad_token_id.
input_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary
attention_mask: a torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences.
token_type_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
position_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the indices of positions of each input sequence tokens in the position embeddings.
inputs_embeds: an optional torch.FloatTensor of shape [batch_size, sequence_length, hidden_size] that contains embedded representation and can be passed instead of input_ids directly.
model_embeddings: an optional object. If inputs_embeds are not none, this will be your model embeddings such as BertEmbeddings from your model such as BertModel. You can move this function inside your model and use self.embeddings instead of passing this parameter.
Return:
pad_len: an integer determining how much inputs have been padded to transfer sequence length dimension to multiple of block size.
input_ids: if input_ids are not none padded input_ids otherwise none.
attention_mask: if attention_mask is not none padded attention_mask otherwise none.
token_type_ids: if token_type_ids are not none padded token_type_ids otherwise none.
position_ids: if position_ids are not none padded position_ids otherwise none.
inputs_embeds: if inputs_embeds are not none padded inputs_embeds otherwise none.
"""
batch_size, seq_len = input_ids.shape if input_ids is not None else inputs_embeds.shape[:-1]
pad_len = (block_size - seq_len % block_size) % block_size
if pad_len > 0:
if inputs_embeds is not None:
pad_input_ids = inputs_embeds.new_full((batch_size, pad_len), pad_token_id, dtype=torch.long)
pad_inputs_embeds = model_embeddings(pad_input_ids)
inputs_embeds = torch.cat([inputs_embeds, pad_inputs_embeds], dim=-2)
# may not be needed as input_ids are not used if inputs_embeds are given
if input_ids is not None:
input_ids = F.pad(input_ids, (0, pad_len), value=pad_token_id)
if position_ids is not None:
# pad position_id with pad_token_id
position_ids = F.pad(position_ids, (0, pad_len), value=pad_token_id)
# pad attention mask without attention on the padding tokens
attention_mask = F.pad(attention_mask, (0, pad_len), value=False)
# pad token_type_ids with token_type_id = 0
token_type_ids = F.pad(token_type_ids, (0, pad_len), value=0)
return pad_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
@staticmethod
def unpad_sequence_output(pad_len, sequence_output):
"""This function unpads sequence output if inputs of the model were padded.
This is a requirement for Sparse Transformer in which the self attention layer works on sequences of length multiple of block size.
It needs to be called in your model, such as BertModel, right before you return the model outputs.
Arguments:
pad_len: required: an integer determining how much model inputs have been padded to transfer sequence length dimension to multiple of block size.
sequence_output: required: sequence output of the encoder layer.
Return:
sequence_output: unpaded sequence output of the encoder layer.
"""
if (pad_len > 0):
sequence_output = sequence_output[:, :-pad_len]
return sequence_output
| 12,300 | 57.856459 | 335 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/sparse_attention/sparsity_config.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import random
class SparsityConfig:
"""Abstract Configuration class to store `sparsity configuration of a self attention layer`.
It contains shared property of different block-sparse sparsity patterns. However, each class needs to extend it based on required property and functionality.
"""
def __init__(self, num_heads, block=16, different_layout_per_head=False):
"""Initialize the Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
"""
self.num_heads = num_heads
self.block = block
self.different_layout_per_head = different_layout_per_head
self.num_layout_heads = num_heads if different_layout_per_head else 1
def setup_layout(self, seq_len):
"""Create layout tensor for the given sequence length
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) for sparsity layout of all head; initialized with zero
"""
if (seq_len % self.block != 0):
raise ValueError(f'Sequence Length, {seq_len}, needs to be dividable by Block size {self.block}!')
num_blocks = seq_len // self.block
# TODO Currently we allocate layout per head; needs to be updated if heads share a single layout.
layout = torch.zeros((self.num_heads, num_blocks, num_blocks), dtype=torch.int64)
return layout
def check_and_propagate_first_head_layout(self, layout):
"""If all heads require same sparsity layout, it propagate first head layout to all heads
Arguments:
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head
"""
if not self.different_layout_per_head:
layout[1:self.num_heads, :, :] = layout[0, :, :]
return layout
class DenseSparsityConfig(SparsityConfig):
"""Configuration class to store `Dense` configuration.
In reality, this is not sparse and all blocks are used. We keep it for the sake of comparison and comprehension.
"""
def __init__(self, num_heads, block=16, different_layout_per_head=False):
"""Initialize the Dense Sparsity Pattern Config.
In reality, this is not sparse and all blocks are used. We keep it for the sake of comparison and comprehension.
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
seq_len: required: an integer determining number of attention heads of the layer.
different_layout_per_head: optional: this is just for the sake of consistency with other sparsity formats; can ignore it for DenseSparsityConfig
"""
super().__init__(num_heads, block, different_layout_per_head)
def make_layout(self, seq_len):
"""Set 1 to all blocks of the layout meaning the pattern is dense; not sparse.
Arguments:
seq_len: required: an integer determining the underling sequence length; must be <= max sequence length
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; for dense everything is 1
"""
layout = self.setup_layout(seq_len)
layout[:, :, :] = 1
return layout
class FixedSparsityConfig(SparsityConfig):
"""Configuration class to store `Fixed` sparsity configuration.
For more details about this sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509; this has been customized.
This class extends parent class of `SparsityConfig` and customizes it for `Fixed` sparsity.
"""
def __init__(self,
num_heads,
block=16,
different_layout_per_head=False,
num_local_blocks=4,
num_global_blocks=1,
attention='bidirectional',
horizontal_global_attention=False,
num_different_global_patterns=1):
"""Initialize `Fixed` Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
num_local_blocks: optional: an integer determining the number of blocks in local attention window.
num_global_blocks: optional: an integer determining how many consecutive blocks in a local window is used as the representative of the window for global attention.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
horizontal_global_attention: optional: a boolean determining if blocks that are global representative of a local window, also attend to all other blocks. This is valid only if attention type is `bidirectional`. Looking at the attention matrix, that means global attention not only includes the vertical blocks, but also horizontal blocks.
num_different_global_patterns: optional: an integer determining number of different global attentions layouts. While global attention can be fixed by which block/s are representative of any local window, since there are multi-heads, each head can use a different global representative. For example, with 4 blocks local window and global attention size of 1 block, we can have 4 different versions in which the first, Second, third, or forth block of each local window can be global representative of that window. This parameter determines how many of such patterns we want. Of course, there is a limitation based on num_local_blocks and num_global_blocks.
"""
super().__init__(num_heads, block, different_layout_per_head)
self.num_local_blocks = num_local_blocks
if (num_local_blocks % num_global_blocks != 0):
raise ValueError(
f'Number of blocks in a local window, {num_local_blocks}, must be dividable by number of global blocks, {num_global_blocks}!'
)
self.num_global_blocks = num_global_blocks
if (attention != 'unidirectional' and attention != 'bidirectional'):
raise NotImplementedError('only \"uni/bi-directional\" attentions are supported for now!')
self.attention = attention
if (attention != 'bidirectional' and horizontal_global_attention):
raise ValueError('only \"bi-directional\" attentions can support horizontal global attention!')
self.horizontal_global_attention = horizontal_global_attention
if (num_different_global_patterns > 1 and not different_layout_per_head):
raise ValueError(
f'Number of different layouts cannot be more than one when you have set a single layout for all heads! Set different_layout_per_head to True.'
)
if (num_different_global_patterns > (num_local_blocks // num_global_blocks)):
raise ValueError(
f'Number of layout versions (num_different_global_patterns), {num_different_global_patterns}, cannot be larger than number of local window blocks divided by number of global blocks, {num_local_blocks} / {num_global_blocks} = {num_local_blocks//num_global_blocks}!'
)
self.num_different_global_patterns = num_different_global_patterns
def set_local_layout(self, h, layout):
"""Sets local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local layout is set
"""
num_blocks = layout.shape[1]
for i in range(0, num_blocks, self.num_local_blocks):
end = min(i + self.num_local_blocks, num_blocks)
for row in range(i, end):
for col in range(i, (row + 1 if self.attention == 'unidirectional' else end)):
layout[h, row, col] = 1
return layout
def set_global_layout(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Currently we set global blocks starting from the last block of a local window to the first one. That means if a local window consists of 4 blocks and global attention size is one block, we use block #4 in each local window as global. If we have different layout per head, then other heads will get #3, #2, and #1. And if we have more heads (and different layout has set) than num of global attentions, multiple head may have same global attentions.
Note) if horizontal_global_attention is set, global blocks will be set both horizontally and vertically.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
first_global_block_idx = self.num_local_blocks - (
1 + h % self.num_different_global_patterns) * self.num_global_blocks
# set all global blocks except the last one if (in last local window)
end = num_blocks - (num_blocks % self.num_local_blocks)
for i in range(first_global_block_idx, end, self.num_local_blocks):
# vertical global attention
first_row = 0 if self.attention == 'bidirectional' else i
#(((i // self.num_local_blocks) + 1) * self.num_local_blocks)
#if (first_row < num_blocks):
layout[h, first_row:, i:i + self.num_global_blocks] = 1
# horizontal global attention; only in bidirectional attention
if (self.horizontal_global_attention):
layout[h, i:i + self.num_global_blocks, :] = 1
# set last global blocks; handle possible short last local window
if (end < num_blocks):
start = min(end + first_global_block_idx, num_blocks - self.num_global_blocks)
end = start + self.num_global_blocks
# vertical global attention
first_row = 0 if self.attention == 'bidirectional' else start
#(((start // self.num_local_blocks) + 1) * self.num_local_blocks)
#if (first_row < num_blocks):
layout[h, first_row:, start:end] = 1
# horizontal global attention
if (self.horizontal_global_attention):
layout[h, start:end, :] = 1
return layout
def make_layout(self, seq_len):
"""Generates `Fixed` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `Fixed` sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_local_layout(h, layout)
layout = self.set_global_layout(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
class VariableSparsityConfig(SparsityConfig):
"""Configuration class to store `Variable` sparsity configuration.
This layout is an extension of FixedSparsityConfig in which:
- user can set random layout; default value is zero means no random block
- user can provide a list of local block sizes
- user can provide a list of global block indices.
For more details about `Fixed` sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509; this has been customized.
This class extends parent class of `SparsityConfig` and customizes it for `Fixed` sparsity.
"""
def __init__(self,
num_heads,
block=16,
different_layout_per_head=False,
num_random_blocks=0,
local_window_blocks=[4],
global_block_indices=[0],
global_block_end_indices=None,
attention='bidirectional',
horizontal_global_attention=False):
"""Initialize `Variable` Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability. Currently this sparsity config can only assign single layout to all heads; needs to be extended for different layout per head.
num_random_blocks: optional: an integer determining the number of random blocks in each block row.
local_window_blocks: optional: a list of integers determining the number of blocks in each local attention window. It assumes first number determines # of blocks in the first local window, second the second window, ..., and the last number determines the number of blocks in the remaining local windows.
global_block_indices: optional: a list of integers determining which blocks are considered as global attention. Given indices, determine the blocks that all other token blocks attend to and they attend to all other token blocks. Default value is only index 0. Notice that if global_block_end_indices parameter is set, this parameter is used as starting index of each global window.
global_block_end_indices: optional: a list of integers determining end indices of global window blocks. By default this is not used. But if it is set, it must have the same size of global_block_indices parameter, and combining this two parameters, for each index i, blocks from global_block_indices[i] to global_block_end_indices[i] (exclusive) are considered as global attention.
num_global_blocks: optional: an integer determining how many consecutive blocks in a local window is used as the representative of the window for global attention.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
horizontal_global_attention: optional: a boolean determining if blocks that are global representative of a local window, also attend to all other blocks. This is valid only if attention type is `bidirectional`. Looking at the attention matrix, that means global attention not only includes the vertical blocks, but also horizontal blocks.
"""
super().__init__(num_heads, block, different_layout_per_head)
self.num_random_blocks = num_random_blocks
self.local_window_blocks = local_window_blocks
self.global_block_indices = global_block_indices
if (global_block_end_indices is not None):
if (len(global_block_indices) != len(global_block_end_indices)):
raise ValueError(
f'Global block start indices length, {len(global_block_indices)}, must be same as global block end indices length, {len(global_block_end_indices)}!'
)
for _, (start_idx, end_idx) in enumerate(zip(global_block_indices, global_block_end_indices)):
if start_idx >= end_idx:
raise ValueError(
f'Global block start index, {start_idx}, must be smaller than global block end index, {end_idx}!'
)
self.global_block_end_indices = global_block_end_indices
if (attention != 'unidirectional' and attention != 'bidirectional'):
raise NotImplementedError('only \"uni/bi-directional\" attentions are supported for now!')
self.attention = attention
if (attention != 'bidirectional' and horizontal_global_attention):
raise ValueError('only \"bi-directional\" attentions can support horizontal global attention!')
self.horizontal_global_attention = horizontal_global_attention
def set_random_layout(self, h, layout):
"""Sets random attention layout used by the given head in the sparse attention.
Note) By default, it assumes there will be a unique random block layout for all heads; unless `different_layout_per_head` parameter is set in which each head can have a different random layout.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which random layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_random_blocks):
raise ValueError(
f'Number of random blocks, {self.num_random_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
for row in range(0, num_blocks):
rnd_cols = random.sample(range(0, num_blocks), self.num_random_blocks)
layout[h, row, rnd_cols] = 1
return layout
def set_local_layout(self, h, layout):
"""Sets local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local layout is set
"""
num_blocks = layout.shape[1]
start_block_idx = 0
end_block_idx = 0
for block_size in self.local_window_blocks:
end_block_idx += block_size
end_block_idx = min(end_block_idx, num_blocks)
for row in range(start_block_idx, end_block_idx):
for col in range(start_block_idx, (row + 1 if self.attention == 'unidirectional' else end_block_idx)):
layout[h, row, col] = 1
start_block_idx += block_size
# if there is any remaining not attended part, use the lats local window block size as local window for the remaining applicable local windows
for i in range(start_block_idx, num_blocks, block_size):
end_block_idx = min(i + block_size, num_blocks)
for row in range(i, end_block_idx):
for col in range(i, (row + 1 if self.attention == 'unidirectional' else end_block_idx)):
layout[h, row, col] = 1
return layout
def set_global_layout(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
if (self.global_block_end_indices is None):
for idx in self.global_block_indices:
# if global block idx is in the range of the sequence blocks
if (idx < num_blocks):
#global rows
if (self.horizontal_global_attention):
layout[h, idx, :] = 1
#global columns
first_row = 0 if self.attention == 'bidirectional' else idx
layout[h, first_row:, idx] = 1
else:
for _, (start_idx, end_idx) in enumerate(zip(self.global_block_indices, self.global_block_end_indices)):
# if global block idx is in the range of the sequence blocks
if (start_idx < num_blocks):
end_idx = min(end_idx, num_blocks)
#global rows
if (self.horizontal_global_attention):
layout[h, start_idx:end_idx, :] = 1
#global columns
first_row = 0 if self.attention == 'bidirectional' else start_idx
layout[h, first_row:, start_idx:end_idx] = 1
return layout
def make_layout(self, seq_len):
"""Generates `Variable` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `Variable` sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_random_layout(h, layout)
layout = self.set_local_layout(h, layout)
layout = self.set_global_layout(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
class BigBirdSparsityConfig(SparsityConfig):
"""Configuration class to store `BigBird` sparsity configuration.
For more details about this sparsity config, please see `Big Bird: Transformers for Longer Sequences`: https://arxiv.org/pdf/2007.14062.pdf
This class extends parent class of `SparsityConfig` and customizes it for `BigBird` sparsity.
"""
def __init__(self,
num_heads,
block=16,
different_layout_per_head=False,
num_random_blocks=1,
num_sliding_window_blocks=3,
num_global_blocks=1,
attention='bidirectional'):
"""Initialize the BigBird Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
num_random_blocks: optional: an integer determining the number of random blocks in each block row.
num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window.
num_global_blocks: optional: an integer determining how many consecutive blocks, starting from index 0, are considered as global attention. Global block tokens will be attended by all other block tokens and will attend to all other block tokens as well.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
"""
super().__init__(num_heads, block, different_layout_per_head)
self.num_random_blocks = num_random_blocks
self.num_sliding_window_blocks = num_sliding_window_blocks
self.num_global_blocks = num_global_blocks
if (attention != 'unidirectional' and attention != 'bidirectional'):
raise NotImplementedError('only \"uni/bi-directional\" attentions are supported for now!')
self.attention = attention
def set_random_layout(self, h, layout):
"""Sets random attention layout used by the given head in the sparse attention.
Note) By default, it assumes there will be a unique random block layout for all heads; unless `different_layout_per_head` parameter is set in which each head can have a different random layout.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which random layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_random_blocks):
raise ValueError(
f'Number of random blocks, {self.num_random_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
for row in range(0, num_blocks):
sample_range = range(0, num_blocks) if self.attention == 'bidirectional' else range(0, row + 1)
rnd_cols = random.sample(sample_range, self.num_random_blocks)
layout[h, row, rnd_cols] = 1
return layout
def set_sliding_window_layout(self, h, layout):
"""Sets sliding local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_sliding_window_blocks):
raise ValueError(
f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
w = self.num_sliding_window_blocks // 2
for row in range(0, num_blocks):
start = max(0, row - w)
end = min(row + w + 1, num_blocks)
layout[h, row, start:end] = 1
return layout
def set_global_layout_itc(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_global_blocks):
raise ValueError(
f'Number of global blocks, {self.num_global_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
#global rows
layout[h, 0:self.num_global_blocks, :] = 1
#global columns
layout[h, :, 0:self.num_global_blocks] = 1
if self.attention == 'unidirectional':
# zero out anything attending to the future
layout = torch.tril(layout)
return layout
def make_layout(self, seq_len):
"""Generates `BigBird` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BigBird` sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_random_layout(h, layout)
layout = self.set_sliding_window_layout(h, layout)
layout = self.set_global_layout_itc(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
class BSLongformerSparsityConfig(SparsityConfig):
"""Configuration class to store edited `Longformer` sparsity configuration.
Note) this is a block-sparse version of the Longformer which is slightly different than original Longformer; which is element-wise sparsity.
For more details about this sparsity config, please see `Longformer: The Long-Document Transformer`: https://arxiv.org/pdf/2004.05150.pdf
This class extends parent class of `SparsityConfig` and customizes it for `Longformer` sparsity.
"""
def __init__(self,
num_heads,
block=16,
different_layout_per_head=False,
num_sliding_window_blocks=3,
global_block_indices=[0],
global_block_end_indices=None,
attention='bidirectional'):
"""Initialize the edited `Longformer` Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window.
global_block_indices: optional: a list of integers determining which blocks are considered as global attention. Given indices, determine the blocks that all other token blocks attend to and they attend to all other token blocks. Default value is only index 0. Notice that if global_block_end_indices parameter is set, this parameter is used as starting index of each global window.
global_block_end_indices: optional: a list of integers determining end indices of global window blocks. By default this is not used. But if it is set, it must have the same size of global_block_indices parameter, and combining this two parameters, for each index i, blocks from global_block_indices[i] to global_block_end_indices[i] (exclusive) are considered as global attention.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
"""
super().__init__(num_heads, block, different_layout_per_head)
self.num_sliding_window_blocks = num_sliding_window_blocks
self.global_block_indices = global_block_indices
self.attention = attention
if (global_block_end_indices is not None):
if (len(global_block_indices) != len(global_block_end_indices)):
raise ValueError(
f'Global block start indices length, {len(global_block_indices)}, must be same as global block end indices length, {len(global_block_end_indices)}!'
)
for _, (start_idx, end_idx) in enumerate(zip(global_block_indices, global_block_end_indices)):
if start_idx >= end_idx:
raise ValueError(
f'Global block start index, {start_idx}, must be smaller than global block end index, {end_idx}!'
)
self.global_block_end_indices = global_block_end_indices
def set_sliding_window_layout(self, h, layout):
"""Sets sliding local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_sliding_window_blocks):
raise ValueError(
f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
w = self.num_sliding_window_blocks // 2
for row in range(0, num_blocks):
start = max(0, row - w)
end = min(row + w + 1, num_blocks)
layout[h, row, start:end] = 1
return layout
def set_global_layout(self, h, layout):
"""Sets global attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
if (self.global_block_end_indices is None):
for idx in self.global_block_indices:
# if global block idx is in the range of the sequence blocks
if (idx < num_blocks):
#global rows
layout[h, idx, :] = 1
#global columns
layout[h, :, idx] = 1
else:
for _, (start_idx, end_idx) in enumerate(zip(self.global_block_indices, self.global_block_end_indices)):
# if global block idx is in the range of the sequence blocks
if (start_idx < num_blocks):
end_idx = min(end_idx, num_blocks)
#global rows
layout[h, start_idx:end_idx, :] = 1
#global columns
layout[h, :, start_idx:end_idx] = 1
if self.attention == 'unidirectional':
layout = torch.tril(layout)
return layout
def make_layout(self, seq_len):
"""Generates edited `Longformer` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BSLongformer` sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_sliding_window_layout(h, layout)
layout = self.set_global_layout(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
class LocalSlidingWindowSparsityConfig(SparsityConfig):
"""Configuration class to store `Local Sliding Window` sparsity configuration - a purely-local sliding window attention.
This class extends parent class of `SparsityConfig` and customizes it for `Local` sparsity.
"""
def __init__(self, num_heads, block=16, num_sliding_window_blocks=3, attention='unidirectional'):
"""Initialize the Local Sliding Window Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
"""
super().__init__(num_heads, block)
self.num_sliding_window_blocks = num_sliding_window_blocks
self.attention = attention
def set_sliding_window_layout(self, h, layout):
"""Sets sliding local attention layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set
"""
num_blocks = layout.shape[1]
if (num_blocks < self.num_sliding_window_blocks):
raise ValueError(
f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
)
w = self.num_sliding_window_blocks // 2
for row in range(0, num_blocks):
start = max(0, row - w)
end = min(row + w + 1, num_blocks) if self.attention == "bidirectional" else row + 1
layout[h, row, start:end] = 1
return layout
def make_layout(self, seq_len):
"""Generates `Local Sliding Window` sparsity layout used by each head in the sparse attention.
Arguments:
seq_len: required: an integer determining number of attention heads of the layer.
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BigBird` sparsity layout of all head
"""
layout = self.setup_layout(seq_len)
for h in range(0, self.num_layout_heads):
layout = self.set_sliding_window_layout(h, layout)
layout = self.check_and_propagate_first_head_layout(layout)
return layout
| 42,463 | 57.32967 | 668 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/sparse_attention/sparse_self_attention.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch.nn as nn
import torch
from torch import distributed as dist
from deepspeed.ops.sparse_attention import SparsityConfig
class SparseSelfAttention(nn.Module):
"""Implements an efficient Sparse Self Attention of Transformer layer based on `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
For more information please see, TODO DeepSpeed Sparse Transformer.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial.
"""
def __init__(
self,
# SparsityConfig parameters needs to be set accordingly
sparsity_config=SparsityConfig(num_heads=4),
key_padding_mask_mode='add',
attn_mask_mode='mul',
max_seq_length=2048):
"""Initialize the sparse self attention layer.
Arguments:
sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class.
key_padding_mask_mode: optional: a string determining if key padding mask needs to be added, `add`, or be multiplied, `mul`.
attn_mask_mode: optional: a string determining if attention mask needs to be added, `add`, or be multiplied, `mul`.
max_seq_length: optional: the maximum sequence length this sparse attention module will be applied to; it controls the size of the master_layout.
"""
super().__init__()
# sparsity information
self.sparsity_config = sparsity_config
# initialize sparse layout and register as buffer
master_layout = self.sparsity_config.make_layout(max_seq_length)
self.register_buffer("master_layout", master_layout)
self._need_layout_synchronization = True
# mask modes
self.key_padding_mask_mode = key_padding_mask_mode
self.attn_mask_mode = attn_mask_mode
ops = dict()
def get_layout(self, L):
# if layout is never synchronized across GPUs, broadcast the layout from global rank 0
if self._need_layout_synchronization and dist.is_initialized():
dist.broadcast(self.master_layout, src=0)
self._need_layout_synchronization = False
if (L % self.sparsity_config.block != 0):
raise ValueError(
f'Sequence Length, {L}, needs to be dividable by Block size {self.sparsity_config.block}!')
num_blocks = L // self.sparsity_config.block
return self.master_layout[..., :num_blocks, :num_blocks].cpu() # layout needs to be a CPU tensor
# add to cache
def get_ops(self, H, L):
from deepspeed.ops.sparse_attention.matmul import MatMul
from deepspeed.ops.sparse_attention.softmax import Softmax
if L not in SparseSelfAttention.ops:
sparsity_layout = self.get_layout(L)
sparse_dot_sdd_nt = MatMul(sparsity_layout, self.sparsity_config.block, 'sdd', trans_a=False, trans_b=True)
sparse_dot_dsd_nn = MatMul(sparsity_layout,
self.sparsity_config.block,
'dsd',
trans_a=False,
trans_b=False)
sparse_softmax = Softmax(sparsity_layout, self.sparsity_config.block)
SparseSelfAttention.ops[L] = (sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax)
return SparseSelfAttention.ops[L]
def transpose_key_for_scores(self, x, L):
bsz, num_heads, seq_len, head_dim = x.size()
if seq_len != L:
return x.permute(0, 1, 3, 2)
return x
def transpose_mask_for_sparse(self, qtype, x, is_key_padding_mask=False):
x = x.type(qtype)
if is_key_padding_mask:
xdim = x.dim()
for d in range(xdim - 1, 0, -1):
x = x.squeeze(dim=d)
return x
return x.squeeze()
# forward pass
def forward(self, query, key, value, rpe=None, key_padding_mask=None, attn_mask=None):
"""Applies forward phase of sparse self attention
Arguments:
query: required: query tensor
key: required: key tensor
value: required: value tensor
rpe: optional: a tensor same dimension as x that is used as relative position embedding
key_padding_mask: optional: a mask tensor of size (BatchSize X SequenceLength)
attn_mask: optional: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported
key_padding_mask_mode: optional: a boolean determining if key_padding_mask needs to be added or multiplied
attn_mask_mode: optional: a boolean determining if attn_mask needs to be added or multiplied
Return:
attn_output: a dense tensor containing attention context
"""
assert query.dtype == torch.half, "sparse attention only supports training in fp16 currently, please file a github issue if you need fp32 support"
bsz, num_heads, tgt_len, head_dim = query.size()
# transpose back key if it is already transposed
key = self.transpose_key_for_scores(key, tgt_len)
# check that operation is supported
if query.shape != key.shape or key.shape != value.shape:
raise NotImplementedError('only self-attention is supported for now')
# squeeze key_padding_mask if it is given
if key_padding_mask is not None:
key_padding_mask = self.transpose_mask_for_sparse(query.dtype, key_padding_mask, is_key_padding_mask=True)
# squeeze attn_mask if it is given
if attn_mask is not None:
attn_mask = self.transpose_mask_for_sparse(query.dtype, attn_mask)
# cache look-up table computations etc
sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax = self.get_ops(num_heads, tgt_len)
scaling = float(head_dim)**-0.5
# attention scores
attn_output_weights = sparse_dot_sdd_nt(query, key)
attn_output_weights = sparse_softmax(attn_output_weights,
scale=scaling,
rpe=rpe,
key_padding_mask=key_padding_mask,
attn_mask=attn_mask,
key_padding_mask_mode=self.key_padding_mask_mode,
attn_mask_mode=self.attn_mask_mode)
# outputs
attn_output = sparse_dot_dsd_nn(attn_output_weights, value)
return attn_output
| 6,746 | 43.98 | 163 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/sparse_attention/matmul.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a
# https://github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/matmul.py
import importlib
import torch
import triton
import triton.language as tl
import triton._C.libtriton as libtriton
from deepspeed.accelerator import get_accelerator
@triton.jit
def _kernel(A, B, C, stride_za, stride_ha, stride_ma, stride_ka, stride_zb, stride_hb, stride_kb, stride_nb, stride_zc,
stride_hc, stride_mc, stride_nc, DS0, DS1, SDD_K, SDD_off_width, lut, locks, nlocks, **meta):
TM = meta['TM']
TN = meta['TN']
TK = meta['TK']
TZ = meta['TZ']
BLOCK = meta['BLOCK']
#------------#
#- Prologue -#
#------------#
pid0 = tl.program_id(0)
pid1 = tl.program_id(1)
pidz = tl.program_id(2)
if meta['SDD']:
pid1 = pid1 + SDD_off_width
blockidm = tl.arange(0, TM) // BLOCK
blockidn = tl.arange(0, TN) // BLOCK
offlutm = blockidm * (TN // BLOCK) * 4
offlutn = blockidn * 4
header = lut + pid1 * (TM // BLOCK) * (TN // BLOCK) * 4
z = tl.load(header + 0)
i = tl.load(header + 1 + offlutm)
j = tl.load(header + 2 + offlutn)
AS1 = SDD_K // TZ
lockid = tl.where(TZ > 1, 1, 0)
offka = pid0 * AS1
offkb = pid0 * AS1
offmc = 0
offnc = 0
offpa = 0
offpb = 0
maxid = TZ
offhc = 0
offha = z
offhb = z
ram = i * BLOCK + (tl.arange(0, TM) % BLOCK)
rbn = j * BLOCK + (tl.arange(0, TN) % BLOCK)
else:
header = lut + pid0 * 6
offset = tl.load(header + 0)
AS1 = tl.load(header + 1)
column = tl.load(header + 2)
depth = tl.load(header + 3)
lockid = tl.load(header + 4)
maxid = tl.load(header + 5)
pinc = lut + offset
offhc = depth
if meta['DSD']:
# output offset
offnc = pid1 * TN
offmc = column * TM
offpc = 0
# dense input offset
offnb = pid1 * TN
offkb = tl.load(pinc)
offkb = tl.multiple_of(offkb, 8) # compiler hint
offpb = 0
# sparse input offset
offma = 0
offka = 0
offpa = tl.load(pinc + 1)
offpa = tl.multiple_of(offpa, 8) # compiler hint
offpa = offpa * BLOCK * BLOCK
offha = 0
offhb = depth
else:
# output offset
offmc = pid1 * TM
offnc = column * TN
offpc = 0
# dense input offset
offma = pid1 * TM
offka = tl.load(pinc)
offka = tl.multiple_of(offka, 8) # compiler hint
offpa = 0
# sparse input offset
offnb = 0
offkb = 0
offpb = tl.load(pinc + 1)
offpb = tl.multiple_of(offpb, 8) # compiler hint
offpb = offpb * BLOCK * BLOCK
offha = depth
offhb = 0
ram = offma + tl.arange(0, TM)
rbn = offnb + tl.arange(0, TN)
# initialize a, b pointers
rka = offka + tl.arange(0, TK)
rkb = offkb + tl.arange(0, TK)
pa = A + pidz * stride_za + offha * stride_ha + offpa + ram[:, None] * stride_ma + rka[None, :] * stride_ka
pb = B + pidz * stride_zb + offhb * stride_hb + offpb + rbn[None, :] * stride_nb + rkb[:, None] * stride_kb
if meta['DDS']:
checkam = ram[:, None] < DS0
else:
checkam = AS1 > 0
if meta['DSD']:
checkbn = rbn[None, :] < DS0
else:
checkbn = AS1 > 0
a = tl.load(pa, mask=checkam, other=0.)
b = tl.load(pb, mask=checkbn, other=0.)
## ---------------- ##
## Inner Loop ##
## ---------------- ##
acc = tl.zeros((TM, TN), dtype=tl.float32)
for k in range(AS1, 0, -TK):
acc += tl.dot(a, b)
if meta['SDD']:
inc_a = TK * stride_ka
inc_b = TK * stride_kb
else:
pinc += 2
if meta['DSD']:
inc_b = tl.load(pinc)
inc_a = tl.load(pinc + 1)
inc_b = tl.multiple_of(inc_b, 8)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = inc_b * stride_kb
if meta['DDS']:
inc_a = tl.load(pinc)
inc_b = tl.load(pinc + 1)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = tl.multiple_of(inc_b, 8)
inc_a = inc_a * stride_ka
pa += inc_a
pb += inc_b
# pre-fetch
checkak = k > TK
checkbk = k > TK
checka = checkam & checkak
checkb = checkbn & checkbk
a = tl.load(pa, mask=checka)
b = tl.load(pb, mask=checkb)
c = acc.to(C.dtype.element_ty)
if meta['SDD']:
checkc = True
rr_blockidm = tl.arange(0, TM) // BLOCK
rr_blockidn = tl.arange(0, TN) // BLOCK
rr_offlutm = rr_blockidm * (TN // BLOCK) * 4
rr_offlutn = rr_blockidn * 4
off_bkid = 3 + rr_offlutm[:, None] + rr_offlutn[None, :]
bkid = tl.load(header + off_bkid)
offpc = bkid * BLOCK * BLOCK
rcm = tl.arange(0, TM) % BLOCK
rcn = tl.arange(0, TN) % BLOCK
else:
rcm = offmc + tl.arange(0, TM)
rcn = offnc + tl.arange(0, TN)
if meta['DSD']:
checkc = rcn[None, :] < DS0
if meta['DDS']:
checkc = rcm[:, None] < DS0
pc = C + offpc + offhc * stride_hc + pidz * stride_zc + rcm[:, None] * stride_mc + rcn[None, :] * stride_nc
# write-back directly
if lockid == 0:
tl.store(pc, c, mask=checkc)
# accumulate partial results using spin-locks
else:
plock = locks + tl.program_id(2) * nlocks * tl.num_programs(1) + tl.program_id(1) * nlocks + lockid - 1
pcount = plock + tl.num_programs(2) * tl.num_programs(1) * nlocks
while tl.atomic_cas(plock, 0, 1) == 1:
pass
count = tl.load(pcount)
if count == 0:
tl.store(pc, c, mask=checkc)
else:
d = tl.load(pc, mask=checkc)
tl.store(pc, d + c, mask=checkc)
tl.atomic_xchg(pcount, (count + 1) % maxid)
tl.atomic_xchg(plock, 0)
##############
# MAIN API #
##############
class _sparse_matmul(torch.autograd.Function):
sdd_cache = dict()
dsd_cache = dict()
dds_cache = dict()
locks = dict()
# Given an array sizes representing reduction size for each
# column of a block-mode matrix multiplication,
# performs load-balancing to achieve more smaller reductions
# between `seg_size` elements
@staticmethod
def load_balance(sizes, block):
#global triton
#if triton is None:
# triton = importlib.import_module('triton')
# segment size
# heuristics taken from OpenAI blocksparse code
# https://github.com/openai/blocksparse/blob/master/blocksparse/matmul.py#L95
max_size = sizes.max()
min_size = sizes[sizes != 0].min()
#if max_size > min_size * 2.0:
# seg_max = max(triton.cdiv(max_size, 4), min_size*2)
#else:
# seg_max = max_size
seg_max = max_size
seg_min = max(triton.cdiv(seg_max, 4), 4)
# split reduction into segments
div = sizes // seg_max
rem = sizes % seg_max
packs = div + (sizes < seg_min).long() + (rem >= seg_min).long()
width = packs.sum()
segments = torch.empty(width, dtype=sizes.dtype)
column = torch.empty_like(segments)
lockid = torch.zeros_like(segments)
maxid = torch.zeros_like(segments)
nlocks = 0
current = 0
col_idx = 0
for i in range(len(sizes)):
d, r = div[i], rem[i]
isempty = sizes[i] < seg_min
last = current + d + (r >= seg_min) + isempty
# column id
column[current:last] = col_idx
# lock id
if d > 1 or (d == 1 and r >= seg_min):
nlocks += 1
lockid[current:last] = nlocks
maxid[current:last] = last - current
# segment size
segments[current:current + d] = seg_max
if r < seg_min and not isempty:
segments[current + d - 1] += r
if r >= seg_min or isempty:
segments[current + d] = r
current = last
col_idx += 1
offsets = torch.zeros_like(segments)
offsets[1:] = torch.cumsum(segments[:-1], dim=0)
return segments, column, lockid, maxid, offsets
@staticmethod
def get_locks(size, dev):
if dev not in _sparse_matmul.locks or \
size > _sparse_matmul.locks[dev].size(0):
_sparse_matmul.locks[dev] = torch.zeros(size, dtype=torch.int32, device=dev)
return _sparse_matmul.locks[dev]
##########################
# SPARSE = DENSE x DENSE #
##########################
@staticmethod
def make_sdd_lut(layout, block, dtype, device):
#_sparse_matmul._load_utils()
#start_width = 64 // block
#segmented = _sparse_matmul.sdd_segment(layout.type(torch.int32), start_width)
start_width = (128 if block > 16 else 32) // block
layout = layout.type(torch.int32)
segmented = libtriton.superblock(layout.data_ptr(), layout.shape[0], layout.shape[1], layout.shape[2],
start_width)
luts, widths, packs = [], [], []
for size, nnz in segmented:
""" width = nnz.shape[0] // (size * size)
h = nnz[:, 0]
i = nnz[:, 1]
j = nnz[:, 2]
b = nnz[:, 3]
lut = torch.stack((h, i, j, b), dim=1).view(-1).contiguous()
luts.append(lut.type(torch.int32).to(device))
widths.append(width)
packs.append(size) """
nnz = nnz.reshape(-1, 4)
width = nnz.shape[0] // (size * size)
luts.append(torch.from_numpy(nnz).type(torch.int32).to(device))
widths.append(width)
packs.append(size)
# create locks
return luts, None, widths, packs
@staticmethod
def _sdd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, luts, num_locks, widths, packs, bench, time):
if trans_c:
a, b = b, a
trans_a, trans_b = not trans_b, not trans_a
AS0 = a.size(0)
# Shape check
a_dim = -2 if trans_a else -1
b_dim = -1 if trans_b else -2
a_inner, b_inner = a.shape[a_dim], b.shape[b_dim]
if a_inner != b_inner:
raise ValueError(f"Size of tensor A along the {a_dim} dim ({a_inner}) must match size "
f"of tensor B along the {b_dim} dim ({b_inner})")
if a_inner % 16 != 0:
raise ValueError('Reduction size for SDD must be a multiple of 16')
batch_size = a.size(0)
a_outer = a.size(3 if trans_a else 2)
dtype = a.dtype
is_16_multiple = a_inner % 16 == 0
is_32_multiple = a_inner % 32 == 0
is_64_multiple = a_inner % 64 == 0
if not is_16_multiple:
raise ValueError('Reduction size for SDD must be a multiple of 16')
device = a.device
# create kernel
total_width = sum([width * pack * pack for width, pack in zip(widths, packs)])
c = torch.empty((batch_size, total_width, block, block), dtype=dtype, device=a.device)
for lut, width, pack in zip(luts, widths, packs):
F32TK = [8, 16]
F16TK = [16]
F16TK += [32] if is_32_multiple else []
F16TK += [64] if is_64_multiple else []
TK = {torch.float32: F32TK, torch.float16: F16TK}[dtype]
num_lock = 1
meta = {
'TM': block * pack,
'TN': block * pack,
'BLOCK': block,
'TK': TK[0],
'TZ': 1,
'SDD': True,
'DSD': False,
'DDS': False
}
# create output
locks = _sparse_matmul.get_locks(2 * width * AS0 * num_lock, a.device)
# maximum grid size is 65535
# so operation might be decomposed into multiple
# kernel calls
max_width = 49152
total = 0 if bench else None
for off_width in range(0, width, max_width):
grid = lambda meta: [meta['TZ'], min(max_width, width - off_width), batch_size]
_kernel[grid](a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(0),
c.stride(2),
c.stride(3),
a_outer,
a_outer,
a_inner,
off_width,
lut,
locks,
num_lock,
num_warps=4,
**meta)
# save for backward pass
return c
##########################
# DENSE = DENSE x SPARSE #
##########################
# Given a binary layout of 0s and 1s,
# Construct look-up table for efficient execution on GPUs
@staticmethod
def make_dxx_lut(layout, block, step, trans, device, transform=lambda idx: idx):
# load-balancing
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
segments = _empty.clone()
column = _empty.clone()
depth = _empty.clone()
lockid = _empty.clone()
maxid = _empty.clone()
offsets = _empty.clone()
current_offset = 0
current_maxid = 0
for z in range(layout.size(0)):
if trans:
sizes = torch.sum(layout[z, :, :], 1)
else:
sizes = torch.sum(layout[z, :, :], 0)
z_segments, z_column, z_lockid, z_maxid, z_offsets = _sparse_matmul.load_balance(sizes, block)
z_depth = z * torch.ones_like(z_segments)
z_lockid[z_lockid > 0] += current_maxid
current_maxid = z_lockid.max()
# concatenate depth
segments = torch.cat((segments, z_segments))
column = torch.cat((column, z_column))
depth = torch.cat((depth, z_depth))
maxid = torch.cat((maxid, z_maxid))
offsets = torch.cat((offsets, current_offset + z_offsets))
lockid = torch.cat((lockid, z_lockid))
current_offset += layout[z, :, :].sum()
segments *= step
# pointer increments
if trans:
nnz = layout.nonzero()
else:
nnz = layout.transpose(1, 2).nonzero()
num_blocks = nnz.size(0)
offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets))
idx = transform(nnz[:, 2] * block)
xincs = idx.clone()
xincs[1:] -= idx[:-1]
# divide block into multiple steps
div = block // step
xincs = xincs.view(-1, 1).repeat(1, div)
xincs[:, 1:] = step
xincs[:, 0] -= (div - 1) * step
# first increment for each reduction is actually the offset
xincs[offsets[segments > 0], 0] = idx[offsets[segments > 0]]
xincs = xincs.view(-1)
# block-mode input increments
if trans:
widx = torch.arange(num_blocks)
else:
widx = _empty.clone()
current_offset = 0
for z in range(layout.size(0)):
layoutw = layout[z, :, :].clone()
msum = layoutw.sum()
layoutw[layoutw > 0] = 1 + torch.arange(msum)
widx = torch.cat((widx, current_offset + layoutw.T[layoutw.T > 0] - 1))
current_offset += msum
widx = widx
wincs = widx * block * block
wincs[1:] -= widx[:-1] * block * block
wincs = wincs.view(-1, 1).repeat(1, div)
if trans:
wincs[:, 1:] = step
wincs[:, 0] -= (div - 1) * step
else:
wincs[:, 1:] = step * block
wincs[:, 0] -= (div - 1) * step * block
wincs[offsets[segments > 0], 0] = widx[offsets[segments > 0]]
wincs = wincs.view(-1)
# adjust offset and segment size
offsets *= 2 * div
segments *= div
# create header
width = column.size(0)
offsets += 6 * width
header = torch.stack((offsets, segments, column, depth, lockid, maxid), dim=1).view(-1).contiguous()
incs = torch.stack((xincs, wincs), dim=1).view(-1).contiguous()
incs = torch.cat((incs, torch.zeros(2, device=incs.device, dtype=incs.dtype)))
# create lut
lut = torch.cat((header, incs))
lut = lut.type(torch.int32).to(device)
# create locks
num_locks = max(1, lockid.max())
return lut, num_locks, width, None
@staticmethod
def _dds_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs, bench, time):
global triton
if triton is None:
triton = importlib.import_module('triton')
# shapes / dtypes
AS0 = a.size(0)
AS1 = a.size(1)
AS2 = a.size(3 if trans_a else 2)
AS3 = a.size(2 if trans_a else 3)
BS0 = spdims[0]
BS1 = block * spdims[2 if trans_b else 1]
BS2 = block * spdims[1 if trans_b else 2]
dtype = a.dtype
# kernel
meta = {'TN': block, 'TM': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1, 'SDD': False, 'DSD': False, 'DDS': True}
# output
CS0 = AS0
CS1 = AS1
CS2 = BS2 if trans_c else AS2
CS3 = AS2 if trans_c else BS2
locks = _sparse_matmul.get_locks(2 * AS0 * AS2 // 32 * num_locks, a.device)
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
grid = lambda meta: [width, triton.cdiv(AS2, meta['TM']), AS0]
_kernel[grid](a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(1),
c.stride(3 if trans_c else 2),
c.stride(2 if trans_c else 3),
AS2,
BS2,
0,
0,
lut,
locks,
num_locks,
num_warps=4,
**meta)
return c
@staticmethod
def _dsd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs, bench, time):
global triton
if triton is None:
triton = importlib.import_module('triton')
# shapes / dtypes
AS0 = spdims[0]
AS1 = block * spdims[2 if trans_a else 1]
AS2 = block * spdims[1 if trans_a else 2]
BS0 = b.size(0)
BS1 = b.size(1)
BS2 = b.size(3 if trans_b else 2)
BS3 = b.size(2 if trans_b else 3)
dtype = a.dtype
# kernel
meta = {'TM': block, 'TN': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1, 'SDD': False, 'DSD': True, 'DDS': False}
# output
CS0 = BS0
CS1 = BS1
CS2 = BS3 if trans_c else AS1
CS3 = AS1 if trans_c else BS3
locks = _sparse_matmul.get_locks(2 * BS0 * BS3 // 32 * num_locks, a.device)
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
grid = lambda meta: [width, triton.cdiv(BS3, meta['TN']), BS0]
_kernel[grid](a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(1),
c.stride(2),
c.stride(3),
BS3,
AS1,
0,
0,
lut,
locks,
num_locks,
num_warps=4,
**meta)
return c
fn = {'sdd': _sdd_matmul.__get__(object), 'dsd': _dsd_matmul.__get__(object), 'dds': _dds_matmul.__get__(object)}
@staticmethod
def forward(ctx, a, b, trans_a, trans_b, trans_c, mode, spdims, block, c_lut, c_num_locks, c_width, c_packs,
c_bench, c_time, da_lut, da_num_locks, da_width, da_packs, da_bench, da_time, db_lut, db_num_locks,
db_width, db_packs, db_bench, db_time):
c = _sparse_matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, c_lut, c_num_locks, c_width,
c_packs, c_bench, c_time)
# save for backward
ctx.save_for_backward(a, b)
ctx.da_num_locks = da_num_locks
ctx.da_lut = da_lut
ctx.da_width = da_width
ctx.da_packs = da_packs
ctx.da_bench = da_bench
ctx.da_time = da_time
ctx.db_lut = db_lut
ctx.db_num_locks = db_num_locks
ctx.db_width = db_width
ctx.db_bench = db_bench
ctx.db_packs = db_packs
ctx.db_time = db_time
ctx.mode = mode
ctx.spdims = spdims
ctx.block = block
ctx.trans_a = trans_a
ctx.trans_b = trans_b
return c
@staticmethod
def backward(ctx, dc):
# saved for backward
a, b = ctx.saved_tensors
mode = ctx.mode
# gradients w.r.t. a
if ctx.needs_input_grad[0]:
mode_da = mode[1] + mode[0] + mode[2]
da = _sparse_matmul.fn[mode_da](dc, b, False, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block,
ctx.da_lut, ctx.da_num_locks, ctx.da_width, ctx.da_packs, ctx.da_bench,
ctx.da_time)
# gradients w.r.t. b
if ctx.needs_input_grad[1]:
mode_db = mode[2] + mode[1] + mode[0]
db = _sparse_matmul.fn[mode_db](a, dc, not ctx.trans_a, False, ctx.trans_b, ctx.spdims, ctx.block,
ctx.db_lut, ctx.db_num_locks, ctx.db_width, ctx.db_packs, ctx.db_bench,
ctx.db_time)
return da, db, None, None, None,\
None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None
class MatMul:
"""Block-Sparse MatMul class; this class handles three types of matrix-multiplication:
- sparse = dense X dense
- dense = sparse X dense
- dense = dense X sparse
For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
"""
def make_lut(self, dtype, device):
"""Generates the sparsity layout/s used in block-sparse matmul
"""
key = (dtype, device)
if key in self.lut_cache:
return self.lut_cache[key]
# C look-up table
layout, block = self.layout, self.block
step = 16
if self.mode == 'sdd':
c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device)
elif self.mode == 'dsd':
c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_dxx_lut(layout, block, step, not self.trans_a,
device)
elif self.mode == 'dds':
c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_dxx_lut(layout, block, step, self.trans_b,
device)
# DA look-up table
if self.mode == 'sdd':
da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_dxx_lut(layout, block, step, True, device)
elif self.mode == 'dsd':
da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device)
elif self.mode == 'dds':
da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_dxx_lut(layout, block, step,
not self.trans_b, device)
# DB look-up table
if self.mode == 'sdd':
db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_dxx_lut(layout, block, step, False, device)
elif self.mode == 'dsd':
db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_dxx_lut(layout, block, step, self.trans_a,
device)
elif self.mode == 'dds':
db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device)
self.lut_cache[key] = (c_lut, c_num_locks, c_width, c_packs,\
da_lut, da_num_locks, da_width, da_packs,\
db_lut, db_num_locks, db_width, db_packs)
return self.lut_cache[key]
def __init__(self, layout, block, mode, trans_a=False, trans_b=False, bench=False):
"""Initialize the Block-Sparse MatMul class.
Arguments:
layout: required: sparsity layout tensor
block: required: an integer determining the block size.
mode: required: a string determining type of matmul; ('sdd') sparse = dense X dense, ('dsd') dense = sparse X dense, ('dds') dense = dense X sparse
trans_a: optional: a boolean determining if multiplication needs to be applied on transpose of input a; default is false
trans_b: optional: a boolean determining if multiplication needs to be applied on transpose of input b; default is false
bench: optional: set if you want to do benchmarking
"""
if mode not in ['sdd', 'dsd', 'dds']:
raise NotImplementedError('Supported modes are: sdd, dsd, dds')
# look-up table cache
self.lut_cache = dict()
# attributes
self.trans_a = trans_a
self.trans_b = trans_b
self.mode = mode
self.block = block
self.layout = layout
layout_dim = layout.ndim
assert layout_dim in (2, 3), "Layout should be a 2 or 3 dimensional tensor of 0s and 1s"
if not mode == 'sdd':
# Dims to be reduced on the 'inside' of the matmul, either -1 or -2
trans_dense, trans_sparse, sparse_inner = (trans_b, trans_a, -1) if mode == 'dsd' else (trans_a, trans_b,
-2)
self.dense_inner_dim = -((sparse_inner % 2) + 1) if not trans_dense else sparse_inner
sparse_inner = sparse_inner if not trans_sparse else -((sparse_inner % 2) + 1)
# Inner dim of the dense input should be equal to the inner dim of the sparse input
self.dense_inner_size = layout.shape[sparse_inner] * block
# Expected shape for sparse inputs
self.sparse_shape = (layout.sum().item(), block, block)
# Support using the same layout across attention heads etc.
if layout_dim == 2:
layout = layout.unsqueeze(0)
layout = layout.long() # Above code assumes the layout tensor is an integral type
self.spdims = layout.shape
# timings
self.bench = bench
self.time_c = None
self.time_da = None
self.time_db = None
# pad shapes of a tensor to make it
# compatible with kernel calls
@staticmethod
def _pad_shape(x, is_sparse):
max_dim = 3 if is_sparse else 4
for i in range(max_dim - x.dim()):
x = x.unsqueeze(0)
return x
def __call__(self, a, b):
"""Applies Block-Sparse MatMul.
For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
Arguments:
a: required: a dense/block-sparse tensor; first input of mat-mul
b: required: a dense/block-sparse tensor; second input of mat-mul
Return:
c: a dense/block-sparse tensor result of a X b
"""
c_lut, c_num_locks, c_width, c_packs,\
da_lut, da_num_locks, da_width, da_packs,\
db_lut, db_num_locks, db_width, db_packs = self.make_lut(a.dtype, a.device)
# timings
time_c = [None]
time_da = [None]
time_db = [None]
original_dims = max(a.ndim, b.ndim)
a, b = self._validate_inputs(a, b)
# pad shapes with ones
a = MatMul._pad_shape(a, self.mode == 'dsd')
b = MatMul._pad_shape(b, self.mode == 'dds')
# execute
c = _sparse_matmul.apply(a, b, self.trans_a, self.trans_b, False, self.mode, self.spdims, self.block, c_lut,
c_num_locks, c_width, c_packs, self.bench, time_c, da_lut, da_num_locks, da_width,
da_packs, self.bench, time_da, db_lut, db_num_locks, db_width, db_packs, self.bench,
time_db)
# This removes any leading singleton dimensions we may have added to the tensor that weren't in the input
dims_to_trim = c.ndim - original_dims
for _ in range(dims_to_trim):
c = c.squeeze(0)
self.time_c = time_c[0]
self.time_da = time_da[0]
self.time_db = time_db[0]
return c
def _validate_inputs(self, a, b):
if a.device != b.device:
raise ValueError(f"Inputs must be on the same device; got {a.device} for tensor A "
f"and {b.device} for tensor B")
if not get_accelerator().on_accelerator(a):
raise ValueError("Only GPU devices are supported for now")
# When autocast is enabled, torch.matmul autocasts to float16, so we do the same here
if torch.is_autocast_enabled():
a, b = a.half(), b.half()
elif a.dtype != b.dtype:
raise ValueError(f"Inputs must be the same dtype; got {a.dtype} for A and {b.dtype} for B")
mode, trans_a, trans_b = self.mode, self.trans_a, self.trans_b
if mode != 'sdd':
# One input is sparse
dense, dense_name, sparse, sparse_name = (a, 'A', b, 'B') if mode == 'dds' else (b, 'B', a, 'A')
dense_inner = dense.shape[self.dense_inner_dim]
if dense_inner != self.dense_inner_size:
raise ValueError(f"Expected tensor {dense_name} to have size {self.dense_inner_size} at dim "
f"{self.dense_inner_dim % dense.ndim}, got {dense_inner}.")
if sparse.shape[-len(self.sparse_shape):] != self.sparse_shape:
raise ValueError(f"Expected tensor with trailing dimensions of shape {self.sparse_shape} for argument "
f"{sparse_name}, got {sparse.shape}")
def add_extra_dims(x):
# Add extra leading singleton dimensions if needed
dims_needed = 4 - x.ndim
if dims_needed > 0:
singletons = [1] * dims_needed
x = x.view(*singletons, *x.shape)
elif dims_needed < 0:
raise ValueError("Tensors with more than 4 dimensions are not currently supported")
return x
# Pad shapes with leading singleton dimensions
a = add_extra_dims(a)
b = add_extra_dims(b)
return a, b
| 32,948 | 39.181707 | 160 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/sparse_attention/bert_sparse_self_attention.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from torch import nn
from deepspeed.ops.sparse_attention import SparseSelfAttention, FixedSparsityConfig
class BertSparseSelfAttention(nn.Module):
"""Implements Sparse Self Attention layer of Bert model based on https://github.com/microsoft/DeepSpeedExamples/blob/master/bing_bert/nvidia/modelingpreln.py#L373
For more information please see, TODO DeepSpeed Sparse Transformer.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial.
"""
def __init__(
self,
config,
# SparsityConfig parameters needs to be set accordingly
sparsity_config=FixedSparsityConfig(num_heads=4)):
"""Initialize the bert sparse self attention layer.
Note) you can use any of the provided sparsity configs or simply add yours!
Arguments:
config: required: Bert model config
sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on FixedSparsityConfig class.
"""
super(BertSparseSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError("The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.sparse_self_attention = SparseSelfAttention(sparsity_config)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
"""Applies forward phase of bert sparse self attention
Arguments:
hidden_states: required: hidden_states tensor of the bert model
attn_mask: required: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported
Return:
context_layer: a dense tensor containing attention context
"""
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
context_layer = self.sparse_self_attention(query_layer,
key_layer,
value_layer,
key_padding_mask=attention_mask)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size, )
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
| 3,463 | 43.410256 | 166 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/adam/cpu_adam.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from cpuinfo import get_cpu_info
from deepspeed.utils import logger
from deepspeed.utils.logging import should_log_le
from deepspeed.ops.op_builder import CPUAdamBuilder
class DeepSpeedCPUAdam(torch.optim.Optimizer):
optimizer_id = 0
def __init__(self,
model_params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
adamw_mode=True,
fp32_optimizer_states=True):
"""Fast vectorized implementation of two variations of Adam optimizer on CPU:
* Adam: A Method for Stochastic Optimization: (https://arxiv.org/abs/1412.6980);
* AdamW: Fixing Weight Decay Regularization in Adam (https://arxiv.org/abs/1711.05101)
DeepSpeed CPU Adam(W) provides between 5x to 7x speedup over torch.optim.adam(W).
In order to apply this optimizer, the model requires to have its master parameter (in FP32)
reside on the CPU memory.
To train on a heterogeneous system, such as coordinating CPU and GPU, DeepSpeed offers
the ZeRO-Offload technology which efficiently offloads the optimizer states into CPU memory,
with minimal impact on training throughput. DeepSpeedCPUAdam plays an important role to minimize
the overhead of the optimizer's latency on CPU. Please refer to ZeRO-Offload tutorial
(https://www.deepspeed.ai/tutorials/zero-offload/) for more information on how to enable this technology.
For calling step function, there are two options available: (1) update optimizer's states and (2) update
optimizer's states and copy the parameters back to GPU at the same time. We have seen that the second
option can bring 30% higher throughput than the doing the copy separately using option one.
.. note::
We recommend using our `config
<https://www.deepspeed.ai/docs/config-json/#optimizer-parameters>`_
to allow :meth:`deepspeed.initialize` to build this optimizer
for you.
Arguments:
model_params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in DeepSpeed CPUAdam!
adamw_mode: select between Adam and AdamW implementations (default: AdamW)
full_precision_optimizer_states: creates momentum and variance in full precision regardless of
the precision of the parameters (default: True)
"""
default_args = dict(lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
bias_correction=bias_correction,
amsgrad=amsgrad)
super(DeepSpeedCPUAdam, self).__init__(model_params, default_args)
cpu_info = get_cpu_info()
self.cpu_vendor = cpu_info["vendor_id_raw"].lower() if "vendor_id_raw" in cpu_info else "unknown"
if "amd" in self.cpu_vendor:
for group_id, group in enumerate(self.param_groups):
for param_id, p in enumerate(group['params']):
if p.dtype == torch.half:
logger.warning("FP16 params for CPUAdam may not work on AMD CPUs")
break
else:
continue
break
self.opt_id = DeepSpeedCPUAdam.optimizer_id
DeepSpeedCPUAdam.optimizer_id = DeepSpeedCPUAdam.optimizer_id + 1
self.adam_w_mode = adamw_mode
self.fp32_optimizer_states = fp32_optimizer_states
self.ds_opt_adam = CPUAdamBuilder().load()
self.ds_opt_adam.create_adam(self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode,
should_log_le("info"))
def __del__(self):
# need to destroy the C++ object explicitly to avoid a memory leak when deepspeed.initialize
# is used multiple times in the same process (notebook or pytest worker)
self.ds_opt_adam.destroy_adam(self.opt_id)
def __setstate__(self, state):
super(DeepSpeedCPUAdam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None, fp16_param_groups=None):
"""Update the model parameters.
.. note::
This method will be called internally by ZeRO-Offload. DeepSpeed
users should still use ``engine.step()`` as shown in the
`Getting Started
<https://www.deepspeed.ai/getting-started/#training>`_ guide.
Args:
closure (callable, optional): closure to compute the loss.
Defaults to ``None``.
fp16_param_groups: FP16 GPU parameters to update. Performing the
copy here reduces communication time. Defaults to ``None``.
Returns:
loss: if ``closure`` is provided. Otherwise ``None``.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# intended device for step
device = torch.device('cpu')
# converting the fp16 params to a group of parameter
if type(fp16_param_groups) is list:
if type(fp16_param_groups[0]) is not list:
fp16_param_groups = [fp16_param_groups]
elif fp16_param_groups is not None:
fp16_param_groups = [[fp16_param_groups]]
for group_id, group in enumerate(self.param_groups):
for param_id, p in enumerate(group['params']):
if p.grad is None:
continue
assert p.device == device, f"CPUAdam param is on {p.device} and must be 'cpu', make " \
"sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config."
state = self.state[p]
# State initialization
if len(state) == 0:
#print(f'group {group_id} param {param_id} = {p.numel()}')
state['step'] = 0
#use full precision by default unless self.fp32_optimizer_states is off
state_dtype = torch.float if self.fp32_optimizer_states else p.dtype
# gradient momentums
state['exp_avg'] = torch.zeros_like(p.data, dtype=state_dtype, device=device)
#memory_format=torch.preserve_format)
# gradient variances
state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=state_dtype, device=device)
#memory_format=torch.preserve_format)
state['step'] += 1
beta1, beta2 = group['betas']
if fp16_param_groups is not None:
self.ds_opt_adam.adam_update_copy(self.opt_id, state['step'], group['lr'], beta1, beta2,
group['eps'], group['weight_decay'], group['bias_correction'],
p.data, p.grad.data, state['exp_avg'], state['exp_avg_sq'],
fp16_param_groups[group_id][param_id].data)
else:
self.ds_opt_adam.adam_update(self.opt_id, state['step'], group['lr'], beta1, beta2, group['eps'],
group['weight_decay'], group['bias_correction'], p.data, p.grad.data,
state['exp_avg'], state['exp_avg_sq'])
return loss
| 8,544 | 45.950549 | 118 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/adam/fused_adam.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Copyright NVIDIA/apex
This file is adapted from fused adam in NVIDIA/apex, commit 6bd01c4
"""
import torch
from .multi_tensor_apply import MultiTensorApply
multi_tensor_applier = MultiTensorApply(2048 * 32)
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import FusedAdamBuilder
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This version of fused Adam implements 2 fusions.
* Fusion of the Adam update's elementwise operations
* A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
:class:`apex.optimizers.FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,
or ``torch.optim.Adam`` with ``adam_w_mode=False``::
opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
...
opt.step()
:class:`apex.optimizers.FusedAdam` may be used with or without Amp. If you wish to use :class:`FusedAdam` with Amp,
you may choose any ``opt_level``::
opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
...
opt.step()
In general, ``opt_level="O1"`` is recommended.
.. warning::
A previous version of :class:`FusedAdam` allowed a number of additional arguments to ``step``. These additional arguments
are now deprecated and unnecessary.
Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
True for decoupled weight decay(also known as AdamW) (default: True)
set_grad_none (bool, optional): whether set grad to None when zero_grad()
method is called. (default: True)
.. _Adam - A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
adam_w_mode=True,
weight_decay=0.,
amsgrad=False,
set_grad_none=True):
if amsgrad:
raise RuntimeError('FusedAdam does not support the AMSGrad variant.')
defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay)
super(FusedAdam, self).__init__(params, defaults)
self.adam_w_mode = 1 if adam_w_mode else 0
self.set_grad_none = set_grad_none
fused_adam_cuda = FusedAdamBuilder().load()
# Skip buffer
self._dummy_overflow_buf = get_accelerator().IntTensor([0])
self.multi_tensor_adam = fused_adam_cuda.multi_tensor_adam
def zero_grad(self):
if self.set_grad_none:
for group in self.param_groups:
for p in group['params']:
p.grad = None
else:
super(FusedAdam, self).zero_grad()
def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, grad_scaler=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.
"""
if any(p is not None for p in [grads, output_params, scale, grad_norms]):
raise RuntimeError(
'FusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.'
)
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
if len(group['params']) == 0:
continue
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' not in group:
group['step'] = 0
# create lists for multi-tensor apply
g_16, p_16, m_16, v_16 = [], [], [], []
g_bf, p_bf, m_bf, v_bf = [], [], [], []
g_32, p_32, m_32, v_32 = [], [], [], []
for p in group['params']:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError(
'FusedAdam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
# DeepSpeed ZeRO 3 processes each subgroup a time, so we need to keep tracking step count for each tensor separately.
# While this is not an issue for ZeRO 1 & 2, since they apply a single optimization step to the whole param group at the same time.
# In order to keep backward compatibility for the existing checkpoints, we use group['state'] to initialize state['step'] if it exists.
state['step'] = group.get('step', 0)
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if p.dtype == torch.float16:
g_16.append(p.grad.data)
p_16.append(p.data)
m_16.append(state['exp_avg'])
v_16.append(state['exp_avg_sq'])
elif p.dtype == torch.bfloat16:
g_bf.append(p.grad)
p_bf.append(p)
m_bf.append(state['exp_avg'])
v_bf.append(state['exp_avg_sq'])
elif p.dtype == torch.float32:
g_32.append(p.grad.data)
p_32.append(p.data)
m_32.append(state['exp_avg'])
v_32.append(state['exp_avg_sq'])
else:
raise RuntimeError('FusedAdam only support fp16, bf16 and fp32.')
if len(g_16) > 0:
state['step'] += 1
multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16],
group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
bias_correction, group['weight_decay'])
if len(g_bf) > 0:
state['step'] += 1
multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_bf, p_bf, m_bf, v_bf],
group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
bias_correction, group['weight_decay'])
if len(g_32) > 0:
state['step'] += 1
multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32],
group['lr'], beta1, beta2, group['eps'], state['step'], self.adam_w_mode,
bias_correction, group['weight_decay'])
return loss
| 8,767 | 43.734694 | 155 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/quantizer/quantizer.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.ops.op_builder import QuantizerBuilder
# Cuda modules will be imported if needed
quantizer_cuda_module = None
def ds_quantizer(input, groups=1, bit_num=8, sr=False, asym=False):
# Load cuda modules if needed
global quantizer_cuda_module
if quantizer_cuda_module is None:
quantizer_cuda_module = QuantizerBuilder().load()
if sr:
if asym:
quantize_func = quantizer_cuda_module.ds_sr_quantize_asym_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_sr_quantize_asym_fp32
else:
quantize_func = quantizer_cuda_module.ds_sr_quantize_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_sr_quantize_fp32
else:
if asym:
quantize_func = quantizer_cuda_module.ds_quantize_asym_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_quantize_asym_fp32
else:
quantize_func = quantizer_cuda_module.ds_quantize_fp16 if input.dtype == torch.half else quantizer_cuda_module.ds_quantize_fp32
return quantize_func(input, groups, bit_num)
| 1,193 | 38.8 | 155 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/random_ltd/dropping_utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.ops.op_builder import RandomLTDBuilder
"""
Returns:
sampled_indices: [layers, batch_size, reserved_length]
new_mask: [batch_size, 1, reserved_length, reserved_length]
"""
random_ltd_module = None
def gpt_sample_tokens(reserved_length: int,
seq_length: int,
batch_size: int,
layers: int = 1,
device: str = 'cpu',
attn_mask: torch.Tensor = None):
prob_dist = torch.ones((layers * batch_size, seq_length), device=device)
sampled_indices = torch.multinomial(prob_dist, reserved_length)
sampled_indices = sampled_indices.reshape(layers, batch_size, reserved_length).to(torch.int32)
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
sampled_indices = random_ltd_module.token_sort_(sampled_indices, seq_length)
# Not certain the optimized kernel is actually better here, cause it kind of screws
# with alignment right if the sequence length is not divisible by like 16
# new_mask = random_ltd_module.mask_gather_gpt(attn_mask, reserved_length)
if attn_mask is not None:
new_mask = attn_mask[:, :, :reserved_length, :reserved_length]
else:
new_mask = None
return sampled_indices, new_mask
"""
Returns:
sampled_indices: [layers, batch_size, reserved_length]
new_mask: [layers, batch_size, 1, reserved_length, reserved_length]
"""
def bert_sample_tokens(reserved_length: int,
seq_length: int,
batch_size: int,
layers: int = 1,
device: str = 'cpu',
attn_mask: torch.Tensor = None):
assert attn_mask is not None
prob_dist = torch.ones((layers * batch_size, seq_length), device=device)
sampled_indices = torch.multinomial(prob_dist, reserved_length)
sampled_indices = sampled_indices.reshape(layers, batch_size, reserved_length).to(torch.int32)
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
sampled_indices = random_ltd_module.token_sort_(sampled_indices, seq_length)
dtype = sampled_indices.dtype
sampled_indices = sampled_indices.to(torch.long)
new_mask = []
for l in range(layers):
tmp_mask_list = []
for i in range(batch_size):
mask_tmp = attn_mask[i:i + 1, :, sampled_indices[l][i], :]
tmp_mask_list.append(mask_tmp[:, :, :, sampled_indices[l][i]])
new_mask.append(torch.cat(tmp_mask_list, dim=0))
return sampled_indices.to(dtype), new_mask
class GatherTokens(torch.autograd.Function):
@staticmethod
def forward(ctx, activations: torch.Tensor, sorted_indices: torch.Tensor, batch_first: bool):
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
ctx.save_for_backward(activations, sorted_indices)
ctx.batch_first = batch_first
return activations, random_ltd_module.token_gather(activations, sorted_indices, batch_first)
@staticmethod
def backward(ctx, a_gradients: torch.Tensor, g_gradients: torch.Tensor):
g_gradients = g_gradients.contiguous()
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
activations, sorted_indices = ctx.saved_tensors
batch_first = ctx.batch_first
return random_ltd_module.token_scatter_(a_gradients, g_gradients, sorted_indices, batch_first), None, None
class ScatterTokens(torch.autograd.Function):
@staticmethod
def forward(ctx, all_activations: torch.Tensor, layer_activations: torch.Tensor, sorted_indices: torch.Tensor,
batch_first: bool):
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
scatter_results = random_ltd_module.token_scatter_(all_activations.clone(), layer_activations, sorted_indices,
batch_first)
ctx.save_for_backward(sorted_indices)
ctx.batch_first = batch_first
return scatter_results
@staticmethod
def backward(ctx, out_gradients: torch.Tensor):
out_gradients = out_gradients.contiguous()
global random_ltd_module
if random_ltd_module is None:
random_ltd_module = RandomLTDBuilder().load()
sorted_indices, = ctx.saved_tensors
batch_first = ctx.batch_first
ret_val = random_ltd_module.token_gather(out_gradients, sorted_indices, batch_first)
return out_gradients, ret_val, None, None
| 4,902 | 35.864662 | 118 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/adagrad/cpu_adagrad.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.ops.op_builder import CPUAdagradBuilder
from deepspeed.utils.logging import should_log_le
class DeepSpeedCPUAdagrad(torch.optim.Optimizer):
optimizer_id = 0
def __init__(self, model_params, lr=1e-2, eps=1e-10, weight_decay=0, amsgrad=False, fp32_optimizer_states=True):
default_args = dict(lr=lr, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(DeepSpeedCPUAdagrad, self).__init__(model_params, default_args)
self.opt_id = DeepSpeedCPUAdagrad.optimizer_id
DeepSpeedCPUAdagrad.optimizer_id = DeepSpeedCPUAdagrad.optimizer_id + 1
self.fp32_optimizer_states = fp32_optimizer_states
self.ds_opt_adagrad = CPUAdagradBuilder().load()
self.ds_opt_adagrad.create_adagrad(self.opt_id, lr, eps, weight_decay, should_log_le("info"))
def __del__(self):
# need to destroy the C++ object explicitly to avoid a memory leak when deepspeed.initialize
# is used multiple times in the same process (notebook or pytest worker)
self.ds_opt_adagrad.destroy_adagrad(self.opt_id)
def __setstate__(self, state):
super(DeepSpeedCPUAdagrad, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None, fp16_param_groups=None):
"""Update the model parameters.
.. note::
This method will be called internally by ZeRO-Offload. DeepSpeed
users should still use ``engine.step()`` as shown in the
`Getting Started
<https://www.deepspeed.ai/getting-started/#training>`_ guide.
Args:
closure (callable, optional): closure to compute the loss.
Defaults to ``None``.
fp16_param_groups: FP16 GPU parameters to update. Performing the
copy here reduces communication time. Defaults to ``None``.
Returns:
loss: if ``closure`` is provided. Otherwise ``None``.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# intended device for step
device = torch.device('cpu')
for group_id, group in enumerate(self.param_groups):
for param_id, p in enumerate(group['params']):
if p.grad is None:
continue
assert p.device == device, f"CPUAdagrad param is on {p.device} and must be 'cpu', make " \
"sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config."
state = self.state[p]
# State initialization
if len(state) == 0:
#print(f'group {group_id} param {param_id} = {p.numel()}')
state['step'] = 0
#use full precision by default unless self.fp32_optimizer_states is off
state_dtype = torch.float if self.fp32_optimizer_states else p.dtype
#memory_format=torch.preserve_format)
# gradient variances
state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=state_dtype, device='cpu')
#memory_format=torch.preserve_format)
state['step'] += 1
if p.grad.is_sparse == True:
sparse_param = p.sparse_mask(p.grad)
sparse_exp_avg_sq = state['exp_avg_sq'].sparse_mask(p.grad)
self.ds_opt_adagrad.adagrad_update(self.opt_id, state['step'], group['lr'], group['eps'],
group['weight_decay'], sparse_param.values(), p.grad.values(),
sparse_exp_avg_sq.values())
p[sparse_param.indices()] = sparse_param.values()
state['exp_avg_sq'][sparse_exp_avg_sq.indices()] = sparse_exp_avg_sq.values()
if fp16_param_groups is not None:
fp16_param_groups[group_id][param_id][sparse_param.indices()] = sparse_param.values()
else:
if fp16_param_groups is not None:
self.ds_opt_adagrad.adagrad_update_copy(self.opt_id, state['step'], group['lr'], group['eps'],
group['weight_decay'], p.data, p.grad.data,
state['exp_avg_sq'],
fp16_param_groups[group_id][param_id].data)
else:
self.ds_opt_adagrad.adagrad_update(self.opt_id, state['step'], group['lr'], group['eps'],
group['weight_decay'], p.data, p.grad.data,
state['exp_avg_sq'])
return loss
| 5,089 | 45.272727 | 118 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/transformer.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import json
import math
import torch
from torch import nn
from torch.autograd import Function
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import TransformerBuilder, StochasticTransformerBuilder
# Cuda modules will be imported if needed
transformer_cuda_module = None
stochastic_transformer_cuda_module = None
class TransformerConfig():
def __init__(self, batch_size, hidden_size, intermediate_size, heads, attn_dropout_ratio, hidden_dropout_ratio,
num_hidden_layers, initializer_range):
self.layer_id = -1
self.batch_size = batch_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.heads = heads
self.attn_dropout_ratio = attn_dropout_ratio
self.hidden_dropout_ratio = hidden_dropout_ratio
self.num_hidden_layers = num_hidden_layers
self.initializer_range = initializer_range
class DeepSpeedTransformerConfig(TransformerConfig):
"""Initialize the DeepSpeed Transformer Config.
Arguments:
batch_size: The maximum batch size used for running the kernel on each GPU
hidden_size: The hidden size of the transformer layer
intermediate_size: The intermediate size of the feed-forward part of transformer layer
heads: The number of heads in the self-attention of the transformer layer
attn_dropout_ratio: The ratio of dropout for the attention's output
hidden_dropout_ratio: The ratio of dropout for the transformer's output
num_hidden_layers: The number of transformer layers
initializer_range: BERT model's initializer range for initializing parameter data
local_rank: Optional: The rank of GPU running the transformer kernel, it is not required
to use if the model already set the current device, otherwise need to set it
so that the transformer kernel can work on the right device
seed: The random seed for the dropout layers
fp16: Enable half-precision computation
pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture
normalize_invertible: Optional: Enable invertible LayerNorm execution (dropping the input activation),
default is False
gelu_checkpoint: Optional: Enable checkpointing of Gelu activation output to save memory,
default is False
adjust_init_range: Optional: Set as True (default) if the model adjusts the weight initial values of
its self-attention output and layer output, False keeps the initializer_range no change.
See the adjustment below:
output_std = self.config.initializer_range / math.sqrt(2.0 * num_layers)
attn_dropout_checkpoint: Optional: Enable checkpointing of attention dropout to save memory,
default is False
stochastic_mode: Enable for high performance, please note that this flag has some level of
non-determinism and can produce different results on different runs. However, we have seen
that by enabling it, the pretraining tasks such as BERT are not affected and can obtain
a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend
to turn it off in order to be able to reproduce the same result through the regular kernel execution.
return_tuple: Enable if using the return_tuple interface style for sending out the forward results.
training: Enable for training rather than inference.
"""
def __init__(self,
batch_size=-1,
hidden_size=-1,
intermediate_size=-1,
heads=-1,
attn_dropout_ratio=-1,
hidden_dropout_ratio=-1,
num_hidden_layers=-1,
initializer_range=-1,
layer_norm_eps=1e-12,
local_rank=-1,
seed=-1,
fp16=False,
pre_layer_norm=True,
normalize_invertible=False,
gelu_checkpoint=False,
adjust_init_range=True,
attn_dropout_checkpoint=False,
stochastic_mode=False,
return_tuple=False,
training=True):
super(DeepSpeedTransformerConfig,
self).__init__(batch_size, hidden_size,
(intermediate_size if intermediate_size > 0 else 4 * hidden_size), heads,
attn_dropout_ratio, hidden_dropout_ratio, num_hidden_layers, initializer_range)
self.fp16 = fp16
self.pre_layer_norm = pre_layer_norm
self.local_rank = local_rank
self.seed = seed
self.normalize_invertible = normalize_invertible
self.gelu_checkpoint = gelu_checkpoint # True: if higher batch size is required
self.adjust_init_range = adjust_init_range
self.test_gemm = False
self.layer_norm_eps = layer_norm_eps
self.training = training
self.is_grad_enabled = True
self.attn_dropout_checkpoint = attn_dropout_checkpoint
self.stochastic_mode = stochastic_mode
self.return_tuple = return_tuple
@classmethod
def from_dict(cls, json_object):
config = DeepSpeedTransformerConfig()
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
with open(json_file, "r", encoding='utf-16') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
class DeepSpeedTransformerFunction(Function):
@staticmethod
def forward(ctx, input, input_mask, self, grads, layer_id, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw,
attn_nb, inter_w, inter_b, output_w, output_b, norm_w, norm_b, config):
cuda_module = stochastic_transformer_cuda_module if config.stochastic_mode else transformer_cuda_module
forward_func = cuda_module.forward_fp16 if config.fp16 else cuda_module.forward_fp32
inp_size = input.size()
if inp_size[1] % 16 != 0:
input = torch.cat(
(input,
torch.randn(
(inp_size[0], (16 - (inp_size[1] % 16)), inp_size[2]), device=input.device, dtype=input.dtype)),
1)
input_mask = torch.cat((input_mask, torch.ones((inp_size[0], input_mask.shape[1], input_mask.shape[2], \
(16 - (inp_size[1] % 16))), device=input_mask.device, dtype=input_mask.dtype) * -10000), 3)
(output, inp_norm, qkv_tf, soft_inp, ctx_bufB, attn_o_inp, add_res, ff1_inp, gelu_inp, ff2_inp,
attn_prob_dropout_mask, attn_output_dropout_mask, layer_output_dropout_mask, attn_layer_norm_var,
attn_layer_norm_mean, layer_norm_var, layer_norm_mean) = forward_func(
config.layer_id, input, input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w,
inter_b, output_w, output_b, norm_w, norm_b, config.training and config.is_grad_enabled,
config.pre_layer_norm, config.attn_dropout_checkpoint, config.normalize_invertible,
config.gelu_checkpoint)
# For testing only.
if grads is not None:
for i in [2]:
attn_qkvw.register_hook(lambda x, i=i, self=self: grads.append([
x[i * attn_ow.size(0):(i + 1) * attn_ow.size(0)], ("Q_W" if i == 0 else "K_W" if i == 1 else "V_W")
]))
for i in [2]:
attn_qkvb.register_hook(lambda x, i=i, self=self: grads.append([
x[i * attn_ow.size(0):(i + 1) * attn_ow.size(0)], ("Q_B" if i == 0 else "K_B" if i == 1 else "V_B")
]))
attn_ow.register_hook(lambda x, self=self: grads.append([x, "O_W"]))
attn_ob.register_hook(lambda x, self=self: grads.append([x, "O_B"]))
attn_nw.register_hook(lambda x, self=self: grads.append([x, "N2_W"]))
attn_nb.register_hook(lambda x, self=self: grads.append([x, "N2_B"]))
inter_w.register_hook(lambda x, self=self: grads.append([x, "int_W"]))
inter_b.register_hook(lambda x, self=self: grads.append([x, "int_B"]))
output_w.register_hook(lambda x, self=self: grads.append([x, "out_W"]))
output_b.register_hook(lambda x, self=self: grads.append([x, "out_B"]))
norm_w.register_hook(lambda x, self=self: grads.append([x, "norm_W"]))
norm_b.register_hook(lambda x, self=self: grads.append([x, "norm_B"]))
if config.is_grad_enabled and config.training:
if (config.pre_layer_norm and config.normalize_invertible):
ctx.save_for_backward(input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w,
inter_b, output_w, output_b, norm_w, norm_b)
else:
ctx.save_for_backward(output, input, input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw,
attn_nb, inter_w, inter_b, output_w, output_b, norm_w, norm_b)
ctx.config = config
if (config.pre_layer_norm or not config.normalize_invertible):
ctx.inp_norm = inp_norm
ctx.qkv_tf = qkv_tf
ctx.soft_inp = soft_inp
if not config.attn_dropout_checkpoint:
ctx.ctx_bufB = ctx_bufB
ctx.attn_o_inp = attn_o_inp
if not config.normalize_invertible:
ctx.add_res = add_res
ctx.attn_layer_norm_mean = attn_layer_norm_mean
ctx.layer_norm_mean = layer_norm_mean
ctx.ff1_inp = ff1_inp
if not config.gelu_checkpoint:
ctx.gelu_inp = gelu_inp
ctx.ff2_inp = ff2_inp
ctx.attn_prob_dropout_mask = attn_prob_dropout_mask
ctx.attn_output_dropout_mask = attn_output_dropout_mask
ctx.layer_output_dropout_mask = layer_output_dropout_mask
ctx.attn_layer_norm_var = attn_layer_norm_var
ctx.layer_norm_var = layer_norm_var
if inp_size[1] % 16 != 0:
output = torch.narrow(output, 1, 0, inp_size[1])
if config.return_tuple:
return (output, ) # outputs -> (output) : outputs[0] = output
else:
return output
@staticmethod
def backward(ctx, grad_output):
bsz = grad_output.shape[0]
grad_output_shape = grad_output.size()
if grad_output_shape[1] % 16 != 0:
grad_output = torch.cat((grad_output, torch.zeros((bsz, (16 - (grad_output_shape[1] % 16)), \
grad_output_shape[2]), device=grad_output.device, dtype=grad_output.dtype)), 1)
assert ctx.config.training
if (ctx.config.pre_layer_norm and ctx.config.normalize_invertible):
(input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w, inter_b, output_w,
output_b, norm_w, norm_b) = ctx.saved_tensors
else:
(output, input, input_mask, attn_qkvw, attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w, inter_b,
output_w, output_b, norm_w, norm_b) = ctx.saved_tensors
cuda_module = stochastic_transformer_cuda_module if ctx.config.stochastic_mode else transformer_cuda_module
backward_func = cuda_module.backward_fp16 if ctx.config.fp16 else cuda_module.backward_fp32
(grad_input, grad_attn_qkvw, grad_attn_qkvb, grad_attn_ow, grad_attn_ob, grad_attn_nw, grad_attn_nb,
grad_inter_w, grad_inter_b, grad_output_w, grad_output_b, grad_norm_w, grad_norm_b) = backward_func(
ctx.config.layer_id, grad_output,
(ctx.inp_norm if (ctx.config.pre_layer_norm and ctx.config.normalize_invertible) else output),
(ctx.inp_norm if (ctx.config.pre_layer_norm or not ctx.config.normalize_invertible) else input),
ctx.qkv_tf, ctx.soft_inp, (ctx.soft_inp if ctx.config.attn_dropout_checkpoint else ctx.ctx_bufB),
ctx.attn_o_inp, (ctx.ff1_inp if ctx.config.normalize_invertible else ctx.add_res), ctx.ff1_inp,
(ctx.ff2_inp if ctx.config.gelu_checkpoint else ctx.gelu_inp), ctx.ff2_inp, ctx.attn_prob_dropout_mask,
ctx.attn_output_dropout_mask, ctx.layer_output_dropout_mask, ctx.attn_layer_norm_var,
ctx.attn_layer_norm_mean, ctx.layer_norm_var, ctx.layer_norm_mean,
(ctx.inp_norm if
(ctx.config.pre_layer_norm and ctx.config.normalize_invertible) else input), input_mask, attn_qkvw,
attn_qkvb, attn_ow, attn_ob, attn_nw, attn_nb, inter_w, inter_b, output_w, output_b, norm_w, norm_b)
# This appears to be an effective way to release context memory
ctx.qkv_tf = None
ctx.soft_inp = None
ctx.ctx_bufB = None
ctx.gelu_inp = None
ctx.ff2_inp = None
ctx.attn_o_inp = None
ctx.ff1_inp = None
ctx.add_res = None
ctx.inp_norm = None
ctx.config = None
ctx.attn_layer_norm_mean = None
ctx.layer_norm_mean = None
ctx.attn_prob_dropout_mask = None
ctx.attn_output_dropout_mask = None
ctx.layer_output_dropout_mask = None
ctx.attn_layer_norm_var = None
ctx.layer_norm_var = None
if grad_output_shape[1] % 16 != 0:
grad_input = torch.narrow(grad_input, 1, 0, grad_output_shape[1])
return (grad_input, None, None, None, None, grad_attn_qkvw, grad_attn_qkvb, grad_attn_ow, grad_attn_ob,
grad_attn_nw, grad_attn_nb, grad_inter_w, grad_inter_b, grad_output_w, grad_output_b, grad_norm_w,
grad_norm_b, None)
class DeepSpeedTransformerLayer(nn.Module):
"""Initialize the DeepSpeed Transformer Layer.
Static variable:
layer_id: The layer-index counter starting from 0 and incrementing by 1 every time a layer object is instantiated,
e.g. if a model has 24 transformer layers, layer_id goes from 0 to 23.
Arguments:
config: An object of DeepSpeedTransformerConfig
initial_weights: Optional: Only used for unit test
initial_biases: Optional: Only used for unit test
"""
layer_id = 0
def __init__(self, config, initial_weights=None, initial_biases=None):
super(DeepSpeedTransformerLayer, self).__init__()
self.config = config
self.config.layer_id = DeepSpeedTransformerLayer.layer_id
DeepSpeedTransformerLayer.layer_id = DeepSpeedTransformerLayer.layer_id + 1
print("DeepSpeed Transformer config is ", self.config.__dict__)
if self.config.local_rank >= 0:
get_accelerator().set_device(self.config.local_rank)
if initial_weights is None and initial_biases is None:
self.attn_qkvw = nn.Parameter(torch.Tensor(self.config.hidden_size * 3, self.config.hidden_size))
self.attn_qkvb = nn.Parameter(torch.Tensor(self.config.hidden_size * 3))
self.attn_ow = nn.Parameter(torch.Tensor(self.config.hidden_size, self.config.hidden_size))
self.attn_ob = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.attn_nw = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.attn_nb = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.inter_w = nn.Parameter(torch.Tensor(self.config.intermediate_size, self.config.hidden_size))
self.inter_b = nn.Parameter(torch.Tensor(self.config.intermediate_size))
self.output_w = nn.Parameter(torch.Tensor(self.config.hidden_size, self.config.intermediate_size))
self.output_b = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.norm_w = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.norm_b = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.init_transformer_weights(self.config.adjust_init_range)
else:
# For testing only.
q = initial_weights[0].data
k = initial_weights[1].data
v = initial_weights[2].data
self.attn_qkvw = nn.Parameter(torch.cat((q, k, v)))
#self.attn_qkvw[i * self.config.hidden_size:(i + 1) * self.config.hidden_size] = \
# initial_weights[i].clone()
#torch.empty_like(initial_weights[i]).data.copy_(initial_weights[i].data)
self.attn_qkvb = nn.Parameter(torch.Tensor(self.config.hidden_size * 3))
self.attn_qkvb.data.zero_()
self.attn_ow = initial_weights[3]
self.attn_ob = initial_biases[3]
self.attn_nw = initial_weights[4]
self.attn_nb = initial_biases[4]
self.inter_w = initial_weights[5]
self.inter_b = initial_biases[5]
self.output_w = initial_weights[6]
self.output_b = initial_biases[6]
self.norm_w = initial_weights[7]
self.norm_b = initial_biases[7]
# Load cuda modules if needed
global transformer_cuda_module, stochastic_transformer_cuda_module
if transformer_cuda_module is None and not self.config.stochastic_mode:
transformer_cuda_module = TransformerBuilder().load()
if stochastic_transformer_cuda_module is None and self.config.stochastic_mode:
stochastic_transformer_cuda_module = StochasticTransformerBuilder().load()
# create the layer in cuda kernels.
cuda_module = stochastic_transformer_cuda_module if self.config.stochastic_mode else transformer_cuda_module
create_layer_func = cuda_module.create_transformer_layer_fp16 if self.config.fp16 else cuda_module.create_transformer_layer_fp32
create_layer_func(self.config.layer_id, self.config.batch_size, self.config.hidden_size, self.config.heads,
self.config.intermediate_size, self.config.attn_dropout_ratio,
self.config.hidden_dropout_ratio, self.config.layer_norm_eps, self.config.seed,
self.config.pre_layer_norm, self.config.test_gemm, self.config.attn_dropout_checkpoint,
self.config.normalize_invertible, self.config.gelu_checkpoint, self.config.stochastic_mode)
def init_transformer_weights(self, adjust_init_range=False):
num_layers = self.config.num_hidden_layers
output_std = self.config.initializer_range
if adjust_init_range and self.config.local_rank == 0:
print("Accounting for accumulation on the residual path")
output_std = self.config.initializer_range / math.sqrt(2.0 * num_layers)
self.attn_qkvw.data.normal_(mean=0.0, std=self.config.initializer_range)
self.attn_qkvb.data.zero_()
self.attn_ow.data.normal_(mean=0.0, std=output_std)
self.attn_ob.data.zero_()
self.attn_nw.data.fill_(1.0)
self.attn_nb.data.zero_()
self.inter_w.data.normal_(mean=0.0, std=self.config.initializer_range)
self.inter_b.data.zero_()
self.output_w.data.normal_(mean=0.0, std=output_std)
self.output_b.data.zero_()
self.norm_w.data.fill_(1.0)
self.norm_b.data.zero_()
def forward(self,
hidden_states,
attention_mask=None,
head_mask=None,
layer_head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
grads=None):
self.config.is_grad_enabled = torch.is_grad_enabled()
self.config.training = self.training
return DeepSpeedTransformerFunction.apply(hidden_states, attention_mask, self, grads, self.config.layer_id,
self.attn_qkvw, self.attn_qkvb, self.attn_ow, self.attn_ob,
self.attn_nw, self.attn_nb, self.inter_w, self.inter_b,
self.output_w, self.output_b, self.norm_w, self.norm_b, self.config)
| 20,600 | 48.881356 | 136 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/bias_add.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from typing import Optional
import torch
from deepspeed.ops.op_builder import SpatialInferenceBuilder
spatial_cuda_module = None
def nhwc_bias_add(activation: torch.Tensor,
bias: torch.Tensor,
other: Optional[torch.Tensor] = None,
other_bias: Optional[torch.Tensor] = None) -> torch.Tensor:
global spatial_cuda_module
if spatial_cuda_module is None:
spatial_cuda_module = SpatialInferenceBuilder().load()
if other is None:
return spatial_cuda_module.nhwc_bias_add(activation, bias)
elif other_bias is None:
return spatial_cuda_module.nhwc_bias_add_add(activation, bias, other)
else:
return spatial_cuda_module.nhwc_bias_add_bias_add(activation, bias, other, other_bias)
| 876 | 31.481481 | 94 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/triton_ops.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Inspired by original Triton implementation:
https://github.com/openai/triton/blob/b244db06da24a87453a40ad35b085ee37dac3705/python/tutorials/06-fused-attention.py
"""
import torch
import triton
import triton.language as tl
@triton.jit
def _fwd_kernel(
Q,
K,
V,
sm_scale,
TMP,
Out,
stride_qz,
stride_qh,
stride_qm,
stride_qk,
stride_kz,
stride_kh,
stride_kn,
stride_kk,
stride_vz,
stride_vh,
stride_vk,
stride_vn,
stride_oz,
stride_oh,
stride_om,
stride_on,
Z,
H,
N_CTX,
BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_DMODEL)
off_q = off_hz * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk
off_k = off_hz * stride_kh + offs_n[:, None] * stride_kn + offs_d[None, :] * stride_kk
off_v = off_hz * stride_vh + offs_n[:, None] * stride_qm + offs_d[None, :] * stride_qk
# Initialize pointers to Q, K, V
q_ptrs = Q + off_q
k_ptrs = K + off_k
v_ptrs = V + off_v
# initialize pointer to m and l
t_ptrs = TMP + off_hz * N_CTX + offs_m
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# load q: it will stay in SRAM throughout
q = tl.load(q_ptrs)
# loop over k, v and update accumulator
for start_n in range(0, N_CTX, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(k_ptrs + start_n * stride_kn)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k, trans_b=True)
qk *= sm_scale
# -- compute m_ij, p, l_ij
m_ij = tl.max(qk, 1)
p = tl.exp(qk - m_ij[:, None])
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
m_i_new = tl.maximum(m_i, m_ij)
alpha = tl.exp(m_i - m_i_new)
beta = tl.exp(m_ij - m_i_new)
l_i_new = alpha * l_i + beta * l_ij
# -- update output accumulator --
# scale p
p_scale = beta / l_i_new
p = p * p_scale[:, None]
# scale acc
acc_scale = l_i / l_i_new * alpha
tl.store(t_ptrs, acc_scale)
acc_scale = tl.load(t_ptrs) # BUG: have to store and immediately load
acc = acc * acc_scale[:, None]
# update acc
v = tl.load(v_ptrs + start_n * stride_vk)
p = p.to(tl.float16)
acc += tl.dot(p, v)
# update m_i and l_i
l_i = l_i_new
m_i = m_i_new
# initialize pointers to output
offs_n = tl.arange(0, BLOCK_DMODEL)
off_o = off_hz * stride_oh + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on
out_ptrs = Out + off_o
tl.store(out_ptrs, acc)
class triton_flash_attn(torch.nn.Module):
def __init__(self, ):
super(triton_flash_attn, self).__init__()
def forward(self, q, k, v, sm_scale, block_128=True):
BLOCK = 128 if block_128 else 64
# shape constraints
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
o = torch.empty_like(q)
grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1])
tmp = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
num_warps = 4 if Lk <= 64 else 8
_fwd_kernel[grid](
q,
k,
v,
sm_scale,
tmp,
o,
q.stride(0),
q.stride(1),
q.stride(2),
q.stride(3),
k.stride(0),
k.stride(1),
k.stride(2),
k.stride(3),
v.stride(0),
v.stride(1),
v.stride(2),
v.stride(3),
o.stride(0),
o.stride(1),
o.stride(2),
o.stride(3),
k.shape[0],
k.shape[1],
k.shape[2],
BLOCK_M=BLOCK,
BLOCK_N=BLOCK,
BLOCK_DMODEL=Lk,
num_warps=num_warps,
num_stages=1,
)
return o
| 4,434 | 27.798701 | 117 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/ds_attention.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
import torch
import torch.nn as nn
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
from .op_binding import LinearOp, VectorMatMulOp, SoftmaxContextOp, QKVGemmOp, SoftmaxOp
minus_inf = -10000.0
class DeepSpeedSelfAttention(nn.Module):
num_layers = 0
_qkv_buffers = []
def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1):
super(DeepSpeedSelfAttention, self).__init__()
self.config = config
data_type = self.config.dtype
data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype
self.config.layer_id = DeepSpeedSelfAttention.num_layers
DeepSpeedSelfAttention.num_layers = DeepSpeedSelfAttention.num_layers + 1
device = get_accelerator().current_device_name() #if config.bigscience_bloom else 'cpu'
if self.config.set_empty_params:
self.attn_qw = None
self.attn_qb = None
self.attn_kw = None
self.attn_kb = None
self.attn_vw = None
self.attn_vb = None
self.attn_qkvw = None
self.attn_qkvb = None
self.attn_ow = None
self.attn_ob = None
else:
qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3
self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size,
qkv_size_per_partition,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, dtype=data_type_fp, device=device),
requires_grad=False)
out_size_per_partition = self.config.hidden_size // self.config.mp_size
self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size
self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size
self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads
self.mp_group = mp_group
# used for quantization
self.q_scales = q_scales
self.q_groups = q_groups
self.merge_count = int(math.log2(merge_count))
self.norm_factor = math.sqrt(self.config.hidden_size // self.config.heads)
if not config.use_mup:
self.norm_factor = math.sqrt(self.norm_factor)
if self.config.scale_attn_by_inverse_layer_idx is True:
self.norm_factor *= math.sqrt(self.config.layer_id + 1)
# https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191
self.qkv_func = QKVGemmOp(config)
self.score_context_func = SoftmaxContextOp(config)
self.linear_func = LinearOp(config)
self.vector_matmul_func = VectorMatMulOp(config)
if len(DeepSpeedSelfAttention._qkv_buffers) == 0:
DeepSpeedSelfAttention._qkv_buffers = [
torch.empty(self.hidden_size_per_partition * 3,
self.config.hidden_size,
dtype=data_type_fp,
device=device),
torch.empty(self.hidden_size_per_partition * 3, dtype=data_type_fp, device=device)
]
def compute_attention(self, qkv_out, input_mask, layer_past, alibi):
if isinstance(qkv_out, list) or isinstance(qkv_out, tuple):
qkv_out = qkv_out[0]
no_masking = input_mask is None
if no_masking:
input_mask = torch.empty(1)
attn_key_value = self.score_context_func(
query_key_value=qkv_out,
attn_mask=((1 - input_mask).to(qkv_out.dtype) *
minus_inf) if input_mask.dtype == torch.int64 else input_mask,
heads=self.num_attention_heads_per_partition,
norm_factor=(1 / self.norm_factor if self.config.scale_attention else 1.0),
no_masking=no_masking,
layer_id=self.config.layer_id,
num_layers=DeepSpeedSelfAttention.num_layers,
alibi=alibi)
context_layer, key_layer, value_layer = attn_key_value
return context_layer, key_layer, value_layer
def _merge_qkv(self):
qvkw = DeepSpeedSelfAttention._qkv_buffers[0]
qvkw[:self.hidden_size_per_partition, :] = self.attn_qw # type: ignore
qvkw[self.hidden_size_per_partition:2 * self.hidden_size_per_partition, :] = self.attn_kw # type: ignore
qvkw[2 * self.hidden_size_per_partition:, :] = self.attn_vw # type: ignore
if self.attn_qb is not None:
qvkb = DeepSpeedSelfAttention._qkv_buffers[1]
qvkb[:self.hidden_size_per_partition] = self.attn_qb
qvkb[self.hidden_size_per_partition:2 * self.hidden_size_per_partition] = self.attn_kb # type: ignore
qvkb[2 * self.hidden_size_per_partition:] = self.attn_vb # type: ignore
return DeepSpeedSelfAttention._qkv_buffers
def forward(self,
input,
input_mask,
head_mask=None,
layer_past=None,
get_present=False,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
norm_w=None,
norm_b=None,
alibi=None):
if self.attn_qkvw is None:
self._attn_qkvw, self._attn_qkvb = self._merge_qkv()
else:
self._attn_qkvw = self.attn_qkvw
self._attn_qkvb = self.attn_qkvb
if not self.config.pre_layer_norm:
qkv_out = self.linear_func(input=input,
weight=self._attn_qkvw,
bias=self._attn_qkvb,
add_bias=self.attn_qkvb is not None,
do_flash_attn=False,
num_heads=self.num_attention_heads_per_partition,
num_layers=DeepSpeedSelfAttention.num_layers)
else:
qkv_out = self.qkv_func(input=input,
weight=self._attn_qkvw,
bias=self._attn_qkvb,
gamma=norm_w,
beta=norm_b)
context_layer, key_layer, value_layer = self.compute_attention(qkv_out=qkv_out,
input_mask=input_mask,
layer_past=layer_past,
alibi=alibi)
output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow)
inp_norm = qkv_out[-1]
if self.config.mlp_after_attn and self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1:
dist.all_reduce(output, group=self.mp_group)
return (output, key_layer, value_layer, context_layer, inp_norm)
class BloomSelfAttention(DeepSpeedSelfAttention):
def __init__(self, *args, **kwargs):
super(BloomSelfAttention, self).__init__(*args, **kwargs)
self.softmax_func = SoftmaxOp(self.config)
########### This part is taken/modified form the HF modeling_bloom.py ################
# Reference: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py
def _transpose_for_context(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_layer_shape = x.size()[:-2] + \
(self.hidden_size_per_partition,)
return x.view(*new_x_layer_shape).contiguous()
def _split_tensor_along_last_dim(self, tensor, num_partitions, contiguous_split_chunks=True):
"""Split a tensor along its last dimension.
Args:
tensor: ([`torch.tensor`], *required*):
input tensor to split
num_partitions ([`int`], *required*):
number of partitions to split the tensor
contiguous_split_chunks ([`bool`], *optional*, default=`False`)::
If True, make each chunk contiguous in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
numerator, denominator = tensor.size()[last_dim], num_partitions
if not (numerator % denominator == 0):
raise ValueError(f"{numerator} is not divisible by {denominator}")
last_dim_size = numerator // denominator
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
def compute_attention(self, qkv_out, input_mask, layer_past, alibi):
if isinstance(qkv_out, list) or isinstance(qkv_out, tuple):
qkv_out = qkv_out[0]
no_masking = input_mask is None
if no_masking:
input_mask = torch.empty(1)
mixed_x_layer = qkv_out
alibi = alibi.to(get_accelerator().current_device_name())
head_dim = self.hidden_size_per_partition // self.num_attention_heads_per_partition
new_tensor_shape = mixed_x_layer.size()[:-1] + (self.num_attention_heads_per_partition, 3 * head_dim)
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
query_layer, key_layer, value_layer = self._split_tensor_along_last_dim(mixed_x_layer, 3)
# [batch_size, head_dim, q_length, k_length]
output_size = (query_layer.size(0), query_layer.size(2), query_layer.size(1), key_layer.size(1))
# [batch_size, q_length, num_heads, head_dim] -> [q_length, batch_size * num_heads, head_dim]
query_layer = query_layer.transpose(1, 2).reshape(output_size[0] * output_size[1], output_size[2], -1)
# [batch_size, k_length, num_heads, head_dim] -> [k_length, batch_size * num_heads, head_dim]
key_layer = key_layer.transpose(1, 2).reshape(output_size[0] * output_size[1], output_size[3],
-1).transpose(-1, -2)
value_layer = value_layer.transpose(1, 2).reshape(output_size[0] * output_size[1], output_size[3], -1)
if layer_past is not None:
past_key, past_value = layer_past
# concatenate along seq_length dimension -> [batch_size, qk_length, num_heads, head_dim]
key_layer = torch.cat((past_key.type_as(key_layer), key_layer), dim=-1)
value_layer = torch.cat((past_value.type_as(value_layer), value_layer), dim=-2)
presents = (key_layer, value_layer)
# Raw attention scores. [batch_size * num_heads, q_length, k_length]
matmul_result = torch.matmul(query_layer, key_layer)
# change view to [batch_size, num_heads, q_length, k_length]
attention_scores = matmul_result.view(output_size[0], output_size[1], output_size[2], -1)
offset = dist.get_rank() * self.num_attention_heads_per_partition if dist.is_initialized() else 0
target_dtype = torch.float16 if self.config.dtype == torch.int8 else self.config.dtype
attention_probs = self.softmax_func(attn_scores=attention_scores,
attn_mask=((1 - input_mask).to(target_dtype) * minus_inf),
alibi=alibi,
triangular=(self.config.triangular_masking
and (attention_scores.shape[-2] > 1)),
recompute=False,
local_attention=False,
window_size=1,
async_op=False,
layer_scale=1 / (self.norm_factor * self.norm_factor),
head_offset=offset)
# change view [batch_size x num_heads, q_length, k_length]
attention_probs_reshaped = attention_probs.view(*matmul_result.shape)
# matmul: [batch_size * num_heads, q_length, head_dim]
context_layer = torch.bmm(attention_probs_reshaped, value_layer)
# change view [batch_size, num_heads, q_length, head_dim]
context_layer = context_layer.view(
context_layer.size(0) // self.num_attention_heads_per_partition, self.num_attention_heads_per_partition,
context_layer.size(1), context_layer.shape[-1])
context_layer = self._transpose_for_context(context_layer)
key_layer = presents[0]
value_layer = presents[1]
return context_layer, key_layer, value_layer
###################### End of HF modeling_bloom addition ########################
| 13,853 | 48.478571 | 121 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/moe_inference.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import json
import math
import torch
from torch.autograd import Function
# accelerator modules will be imported if needed
inference_module = None
specialized_mode = None
import torch.nn as nn
from .ds_attention import DeepSpeedSelfAttention
from .config import DeepSpeedInferenceConfig
from ....moe.sharded_moe import TopKGate
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
class DeepSpeedMoEInferenceConfig(DeepSpeedInferenceConfig):
"""Initialize the DeepSpeed Transformer Config.
Arguments:
hidden_size: The hidden size of the transformer layer
intermediate_size: The intermediate size of the feed-forward part of transformer layer
heads: The number of heads in the self-attention of the transformer layer
num_hidden_layers: The number of transformer layers
layer_norm_eps: The epsilon value for the layer norm
local_rank: Optional: The rank of GPU running the transformer kernel, it is not required
to use if the model already set the current device, otherwise need to set it
so that the transformer kernel can work on the right device
mp_size (optional): This argument is mainly used to create the parameters on the kernel side
using model-parallel architecture. If the client model already takes care of this, there is no
need to pass this argument.
fp16: Enable half-precision computation
bf16: Enable bf16 floating point computation
pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture
stochastic_mode: Enable for high performance, please note that this flag has some level of
non-determinism and can produce different results on different runs. However, we have seen
that by enabling it, the pretraining tasks such as BERT are not affected and can obtain
a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend
to turn it off in order to be able to reproduce the same result through the regular kernel execution.
scale_attention: If true, both q and k are scaled by 1/sqrt(attention_heads) before attention computation.
return_tuple: if True, returns the transformer output as a tuple, otherwise returns as a tensor
"""
def __init__(self,
hidden_size=-1,
intermediate_size=-1,
heads=-1,
num_hidden_layers=-1,
layer_norm_eps=1e-12,
local_rank=-1,
mp_size=1,
fp16=False,
bf16=False,
q_int8=False,
pre_layer_norm=True,
stochastic_mode=False,
scale_attention=True,
triangular_masking=True,
local_attention=False,
window_size=256,
return_tuple=True,
moe_experts=1,
global_experts=1,
k=1,
capacity_factor=1.,
eval_capacity_factor=1.,
min_capacity=1,
noisy_gate_policy=None,
drop_tokens=True,
use_rts=False,
mlp_type='standard',
scale_attn_by_inverse_layer_idx=False):
super(DeepSpeedMoEInferenceConfig,
self).__init__(hidden_size, (intermediate_size if intermediate_size > 0 else 4 * hidden_size), heads,
num_hidden_layers, layer_norm_eps, local_rank, mp_size, fp16, bf16, q_int8,
pre_layer_norm, stochastic_mode, scale_attention, triangular_masking, local_attention,
window_size, return_tuple)
self.moe_experts = moe_experts
self.k = k
self.capacity_factor = capacity_factor
self.eval_capacity_factor = eval_capacity_factor
self.min_capacity = min_capacity
self.noisy_gate_policy = noisy_gate_policy
self.drop_tokens = drop_tokens
self.use_rts = use_rts
self.global_experts = global_experts
self.mlp_type = mlp_type
self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
@classmethod
def from_dict(cls, json_object):
config = DeepSpeedInferenceConfig()
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
class DeepSpeedMLPFunction(Function):
@staticmethod
def forward(ctx, input, inter_w, inter_b, config, output_b, output_w, q_scales, q_groups, merge_count, mp_group,
async_op):
if config.q_int8:
intermediate = inference_module.fused_gemm_gelu_int8(input, inter_w, inter_b, config.epsilon, q_scales[2],
(q_groups * (2**merge_count)), config.pre_layer_norm)
output = inference_module.vector_matmul_int8(intermediate, output_w, q_scales[3], q_groups, (merge_count))
else:
mlp_gemm_func = inference_module.fused_gemm_gelu_fp16 if config.fp16 else \
inference_module.fused_gemm_gelu_fp32
output = mlp_gemm_func(input, inter_w, inter_b, output_w, config.epsilon, config.pre_layer_norm, async_op)
if mp_group is not None and dist.get_world_size(group=mp_group) > 1:
dist.all_reduce(output, group=mp_group, async_op=async_op)
return output + output_b
@staticmethod
def backward(ctx, grad_output):
raise RuntimeError('You are running with DeepSpeed Inference mode. \
Please switch to Training mode for running backward!')
class DeepSpeedMoEMLP(nn.Module):
def __init__(self, config, q_scales=None, q_groups=1, merge_count=1, mlp_extra_grouping=False, mp_group=None):
super(DeepSpeedMoEMLP, self).__init__()
self.config = config
self.attn_nw = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.attn_nb = nn.Parameter(torch.Tensor(self.config.hidden_size))
interm_size = self.config.intermediate_size // (1 if mp_group is None else dist.get_world_size(group=mp_group))
self.inter_w = nn.Parameter(torch.Tensor(self.config.hidden_size, interm_size))
self.inter_b = nn.Parameter(torch.Tensor(interm_size))
self.output_w = nn.Parameter(torch.Tensor((interm_size), self.config.hidden_size))
self.output_b = nn.Parameter(torch.Tensor(self.config.hidden_size))
# used for quantization
self.q_scales = q_scales
self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups
self.merge_count = int(math.log2(merge_count))
self.mp_group = mp_group
def forward(self, input, async_op=False):
return DeepSpeedMLPFunction.apply(input, self.inter_w, self.inter_b, self.config, self.output_b, self.output_w,
self.q_scales, self.q_groups, self.merge_count, self.mp_group, async_op)
class DeepSpeedMoEInference(nn.Module):
"""Initialize the DeepSpeed MoE Transformer Layer.
Arguments:
layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers,
layer_id will be 0,1,2...23 when each layer object is instantiated
config: An object of DeepSpeedInferenceConfig
mp_group: Model parallelism group initialized on the modeling side.
quantize_scales: This argument groups all the layers' scales used for quantization
quantize_groups: Number of groups used for quantizing the model
merge_count: Shows the number of model-parallel checkpoints merged before running inference.
We use this argument to control the quantization scale for the model parameters if a bigger
quantize-grouping than 1 is used.
mlp_extra_grouping: This flag is used to show a 2x higher number of groups used for the MLP part
of a Transformer layer. We use this feature for quantization to reduce the convergence impact
for specific downstream tasks.
"""
layer_id = 0
def __init__(self,
config,
mp_group=None,
ep_group=None,
expert_mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super(DeepSpeedMoEInference, self).__init__()
self.config = config
self.config.layer_id = DeepSpeedMoEInference.layer_id
global inference_module
global specialized_mode
if inference_module is None:
specialized_mode = False
# InferenceSpecializedBuilder is not among DeepSpeed provided builder yet, so we infer by builder name string
builder = get_accelerator().create_op_builder("InferenceSpecializedBuilder")
if builder is not None and builder.is_compatible():
inference_module = builder.load()
specialized_mode = True
else:
inference_module = InferenceBuilder().load()
self.config.specialized_mode = specialized_mode
assert self.config.dtype != torch.bfloat16, "DeepSpeed MoE Transformer Inference not yet tested for bfloat support"
DeepSpeedMoEInference.layer_id += 1
self.attention = DeepSpeedSelfAttention(self.config, mp_group, quantize_scales, quantize_groups, merge_count)
self.attn_nw = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.attn_nb = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.norm_w = nn.Parameter(torch.Tensor(self.config.hidden_size))
self.norm_b = nn.Parameter(torch.Tensor(self.config.hidden_size))
if config.mlp_type == 'residual':
self.res_mlp = DeepSpeedMoEMLP(config, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping,
mp_group)
self.res_coef = nn.Parameter(torch.Tensor(self.config.hidden_size, 2))
self.coef_func = inference_module.softmax_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
inference_module.softmax_fp32
self.vector_matmul_func = inference_module.vector_matmul_fp16 if self.config.dtype == torch.float16 else \
inference_module.vector_matmul_fp32
config.mp_size = 1
self.mlp = nn.ModuleList(
DeepSpeedMoEMLP(config, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping, expert_mp_group)
for i in range(self.config.moe_experts))
self.moe_gate = TopKGate(self.config.hidden_size, self.config.global_experts, self.config.k,
self.config.capacity_factor, self.config.eval_capacity_factor,
self.config.min_capacity, self.config.noisy_gate_policy, self.config.drop_tokens,
self.config.use_rts)
self.ep_group = ep_group
self.mp_group = mp_group
self.expert_mp_group = expert_mp_group
print("DeepSpeed MoE Transformer Inference config is ", self.config.__dict__)
self.bias_residual_func = inference_module.bias_residual_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
inference_module.bias_residual_fp32
self.ds_layernorm = inference_module.layer_norm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
inference_module.layer_norm_fp32
self.einsum_sec_sm_ecm = inference_module.einsum_sec_sm_ecm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
inference_module.einsum_sec_sm_ecm_fp32
def res_coef_func(self, inp, async_op):
inp = self.vector_matmul_func(inp, self.res_coef, async_op)
return self.coef_func(inp, torch.empty(1), False, False, False, 256, async_op)
def moe_gate_einsum(self, attention_output):
_, combined_weights, dispatch_mask, _ = self.moe_gate(
attention_output.view(-1, self.config.hidden_size),
None,
)
dispatched_attention = self.einsum_sec_sm_ecm(dispatch_mask.type_as(attention_output),
attention_output.view(-1, self.config.hidden_size))
return dispatched_attention, combined_weights
def expert_exec(self, dispatched_input):
dispatched_input = dispatched_input.reshape(self.config.global_experts // self.config.moe_experts,
self.config.moe_experts, -1, self.config.hidden_size)
chunks = dispatched_input.chunk(self.config.moe_experts, dim=1)
expert_outputs = torch.empty((
self.config.moe_experts,
chunks[0].shape[0],
) + chunks[0].shape[2:],
dtype=dispatched_input.dtype,
device=dispatched_input.device)
for chunk, expert in zip(chunks, range(len(self.mlp))):
expert_outputs[expert] = self.mlp[expert](chunk.view(-1, dispatched_input.shape[-2],
dispatched_input.shape[-1]))
return expert_outputs
def _alltoall(self, dispatched_attention):
if dist.get_world_size(group=self.ep_group) > 1:
dispatched_input = torch.empty_like(dispatched_attention)
dist.all_to_all_single(dispatched_input, dispatched_attention, group=self.ep_group)
return dispatched_input
else:
return dispatched_attention
def scale_expert_output(self, attention_output, expert_output, combined_weights):
combined_output = torch.matmul(
combined_weights.type_as(attention_output).reshape(combined_weights.shape[0], -1),
expert_output.reshape(-1, expert_output.shape[-1]))
return combined_output.reshape(attention_output.shape)
def forward(self,
input,
input_mask=None,
attention_mask=None,
head_mask=None,
layer_past=None,
get_key_value=False,
get_present=False,
encoder_output=None,
enc_dec_attn_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False):
get_present = (get_present or get_key_value or use_cache)
input_mask = input_mask if attention_mask is None else attention_mask
input_type = input.dtype
if (self.config.dtype in [torch.float16, torch.int8]) and input_type == torch.float:
input = input.half()
with torch.no_grad():
attention_output = self.attention(input, input_mask, head_mask, layer_past, get_present,
encoder_hidden_states, encoder_attention_mask, output_attentions,
self.norm_w, self.norm_b)
if get_present:
attention_output, p_key, p_value = attention_output[0:3]
presents = (p_key, p_value)
elif output_attentions:
attention_output, _, _, context_output = attention_output[0:4]
else:
attention_output = attention_output[0]
residual_add = attention_output + self.attention.attn_ob
attention_output = self.ds_layernorm(residual_add, self.attn_nw, self.attn_nb, self.config.epsilon)
if self.config.mlp_type == 'residual':
res_mlp_out = self.res_mlp(attention_output, async_op=True)
res_coef_out = self.res_coef_func(attention_output, async_op=True)
if self.expert_mp_group is not None:
tensor_list = [
torch.empty_like(attention_output) for _ in range(dist.get_world_size(group=self.expert_mp_group))
]
tensor_list[dist.get_rank(group=self.expert_mp_group)] = attention_output
dist.all_gather(tensor_list, attention_output, group=self.expert_mp_group)
attention_output = torch.cat(tensor_list).contiguous()
############## MoE Gating + Experts ###############
dispatched_attention, combined_weights = self.moe_gate_einsum(attention_output)
dispatched_input = self._alltoall(dispatched_attention)
expert_outputs = self.expert_exec(dispatched_input)
expert_output = self._alltoall(expert_outputs)
output = self.scale_expert_output(attention_output, expert_output, combined_weights)
################################################
if self.expert_mp_group is not None:
output = output.split(output.shape[0] // dist.get_world_size(group=self.expert_mp_group),
dim=0)[dist.get_rank(group=self.expert_mp_group)]
if self.config.mlp_type == 'residual':
inference_module.moe_res_matmul(res_mlp_out, res_coef_out, output)
output = self.bias_residual_func(output, residual_add, torch.empty(1))
if not self.config.pre_layer_norm:
output = self.ds_layernorm(output, self.norm_w, self.norm_b, self.config.epsilon)
if input_type != output.dtype:
output = output.to(input_type)
if get_present:
output = (output, presents)
if self.config.return_tuple:
return output if type(output) is tuple else (output, )
else:
return output
| 18,458 | 49.434426 | 131 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/config.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import json
import torch
from deepspeed.utils.types import ActivationFuncType, NormType
class TransformerConfig():
def __init__(self, hidden_size, intermediate_size, heads, num_hidden_layers):
self.layer_id = -1
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.heads = heads
self.num_hidden_layers = num_hidden_layers
class DeepSpeedInferenceConfig(TransformerConfig):
"""Initialize the DeepSpeed Transformer Config.
Arguments:
hidden_size: The hidden size of the transformer layer
intermediate_size: The intermediate size of the feed-forward part of transformer layer
heads: The number of heads in the self-attention of the transformer layer
num_hidden_layers: The number of transformer layers
layer_norm_eps: The epsilon value for the layer norm
local_rank: Optional: The rank of GPU running the transformer kernel, it is not required
to use if the model already set the current device, otherwise need to set it
so that the transformer kernel can work on the right device
mp_size (optional): This argument is mainly used to create the parameters on the kernel side
using model-parallel architecture. If the client model already takes care of this, there is no
need to pass this argument.
fp16: Enable half-precision computation
bf16: Enable bf16 floating point computation
pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture
stochastic_mode: Enable for high performance, please note that this flag has some level of
non-determinism and can produce different results on different runs. However, we have seen
that by enabling it, the pretraining tasks such as BERT are not affected and can obtain
a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend
to turn it off in order to be able to reproduce the same result through the regular kernel execution.
scale_attention: If true, both q and k are scaled by 1/sqrt(attention_heads) before attention computation.
return_tuple: if True, returns the transformer output as a tuple, otherwise returns as a tensor
bigscience_bloom: This flag is added temporarily for supporting the BLOOM-176B model architecture.
use_triton: This flag is to enable triton kernels in inference or not.
"""
def __init__(self,
hidden_size=-1,
intermediate_size=-1,
heads=-1,
num_hidden_layers=-1,
layer_norm_eps=1e-12,
local_rank=-1,
mp_size=1,
dtype=torch.float16,
pre_layer_norm=True,
norm_type=NormType.LayerNorm,
stochastic_mode=False,
scale_attention=True,
triangular_masking=True,
local_attention=False,
window_size=256,
rotary_dim=-1,
rotate_half=False,
rotate_every_two=True,
return_tuple=True,
mlp_after_attn=True,
mlp_act_func_type=ActivationFuncType.GELU,
training_mp_size=1,
bigscience_bloom=False,
max_out_tokens=1024,
min_out_tokens=1,
enable_qkv_quantization=False,
use_mup=False,
scale_attn_by_inverse_layer_idx=False,
return_single_tuple=False,
set_empty_params=False,
transposed_mode=False,
use_triton=False,
triton_autotune=False):
super(DeepSpeedInferenceConfig,
self).__init__(hidden_size, (intermediate_size if intermediate_size > 0 else 4 * hidden_size), heads,
num_hidden_layers)
self.dtype = dtype
self.pre_layer_norm = pre_layer_norm
self.norm_type = norm_type
self.local_rank = local_rank
self.stochastic_mode = stochastic_mode
self.epsilon = layer_norm_eps
self.mp_size = mp_size
self.scale_attention = scale_attention
self.triangular_masking = triangular_masking
self.local_attention = local_attention
self.window_size = window_size
self.rotary_dim = rotary_dim
self.rotate_half = rotate_half
self.rotate_every_two = rotate_every_two
self.return_tuple = return_tuple
self.mlp_after_attn = mlp_after_attn
self.mlp_act_func_type = mlp_act_func_type
self.specialized_mode = False
self.training_mp_size = training_mp_size
self.bigscience_bloom = bigscience_bloom
self.max_out_tokens = max_out_tokens
self.min_out_tokens = min_out_tokens
self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
self.enable_qkv_quantization = enable_qkv_quantization
self.use_mup = use_mup
self.return_single_tuple = return_single_tuple
self.set_empty_params = set_empty_params
self.transposed_mode = transposed_mode
self.use_triton = use_triton
self.triton_autotune = triton_autotune
@classmethod
def from_dict(cls, json_object):
config = DeepSpeedInferenceConfig()
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
| 5,967 | 44.907692 | 118 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/ds_mlp.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
import torch
import torch.nn as nn
from deepspeed import comm as dist
from deepspeed.utils.types import GATED_ACTIVATION_TYPES
from deepspeed.accelerator import get_accelerator
from .op_binding import MLPGemmOp, VectorMatMulOp, GELUGemmOp, ResidualAddOp
class DeepSpeedMLP(nn.Module):
_inter_w_buffers = []
def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1, mlp_extra_grouping=False):
super(DeepSpeedMLP, self).__init__()
self.config = config
data_type = torch.half if self.config.dtype == torch.int8 else self.config.dtype
data_type_fp = data_type
device = get_accelerator().current_device_name()
proj_factor = 2 if self.config.mlp_act_func_type in GATED_ACTIVATION_TYPES else 1
self.config.intermediate_size = self.config.intermediate_size if self.config.intermediate_size > 0 else 4 * self.config.hidden_size
self.intm_w_sz_per_partition = self.config.intermediate_size * proj_factor // self.config.mp_size
self.intm_o_sz_per_partition = self.config.intermediate_size // self.config.mp_size
if self.config.set_empty_params:
self.attn_nw = None
self.attn_nb = None
self.inter_w = None
self.inter_b = None
self.inter_up_w = None
self.inter_up_b = None
self.inter_gate_w = None
self.inter_gate_b = None
self.output_w = None
self.output_b = None
else:
self.attn_nw = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.attn_nb = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.inter_w = nn.Parameter(torch.empty(self.config.hidden_size,
self.intm_w_sz_per_partition,
dtype=data_type,
device=device),
requires_grad=False)
self.inter_b = nn.Parameter(torch.empty(self.intm_w_sz_per_partition, dtype=data_type_fp, device=device),
requires_grad=False)
self.output_w = nn.Parameter(torch.empty(self.intm_o_sz_per_partition,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.output_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
# used for quantization
self.q_scales = q_scales
self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups
self.merge_count = int(math.log2(merge_count))
self.mp_group = mp_group
self.mlp_gemm_func = MLPGemmOp(config)
self.vector_matmul_func = VectorMatMulOp(config)
self.fused_gemm_gelu = GELUGemmOp(config)
self.residual_add_func = ResidualAddOp(config)
if len(DeepSpeedMLP._inter_w_buffers) == 0:
DeepSpeedMLP._inter_w_buffers = [
torch.empty(self.config.hidden_size, self.intm_w_sz_per_partition, dtype=data_type, device=device),
torch.empty(self.intm_w_sz_per_partition, dtype=data_type_fp, device=device)
]
def _merge_inter_w(self):
inter_w = DeepSpeedMLP._inter_w_buffers[0]
inter_w[:self.intm_w_sz_per_partition, :] = self.inter_up_w # type: ignore
inter_w[self.intm_w_sz_per_partition:, :] = self.inter_gate_w # type: ignore
if self.inter_up_b is not None:
inter_b = DeepSpeedMLP._inter_w_buffers[1]
inter_b[:self.intm_w_sz_per_partition] = self.inter_up_b # type: ignore
inter_b[self.intm_w_sz_per_partition:] = self.inter_gate_b # type: ignore
return DeepSpeedMLP._inter_w_buffers
def forward(self, input, residual, residual_norm, bias):
if self.inter_w is None:
self._inter_w, self._inter_b = self._merge_inter_w()
else:
self._inter_w = self.inter_w
self._inter_b = self.inter_b
residual_add = None
if self.attn_nw is None:
output = self.fused_gemm_gelu(input=residual_norm,
weight=self.inter_w,
bias=self.inter_b,
weight_out=self.output_w)
else:
output, residual_add = self.mlp_gemm_func(input=input,
residual=residual,
weight_interm=self.inter_w,
weight_out=self.output_w,
input_bias=bias,
bias=self.inter_b,
gamma=self.attn_nw,
beta=self.attn_nb)
residual = self.residual_add_func(hidden_state=output,
residual=residual,
add_bias=bias is not None,
attention_output=input,
attention_bias=bias if bias is not None else self.output_b,
final_bias=self.output_b,
residual_add=residual_add)
if self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1:
dist.all_reduce(residual, group=self.mp_group)
return residual
| 6,212 | 48.309524 | 139 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/diffusers_transformer_block.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import torch.nn as nn
from deepspeed import module_inject
from .diffusers_attention import DeepSpeedDiffusersAttention
from .bias_add import nhwc_bias_add
from .diffusers_2d_transformer import Diffusers2DTransformerConfig
from deepspeed.ops.op_builder import InferenceBuilder, SpatialInferenceBuilder
from deepspeed.utils.types import ActivationFuncType
# Ops will be loaded on demand
transformer_cuda_module = None
spatial_cuda_module = None
def load_transformer_module():
global transformer_cuda_module
if transformer_cuda_module is None:
transformer_cuda_module = InferenceBuilder().load()
return transformer_cuda_module
def load_spatial_module():
global spatial_cuda_module
if spatial_cuda_module is None:
spatial_cuda_module = SpatialInferenceBuilder().load()
return spatial_cuda_module
class DeepSpeedDiffusersTransformerBlock(nn.Module):
def __init__(self, equivalent_module: nn.Module, config: Diffusers2DTransformerConfig):
super(DeepSpeedDiffusersTransformerBlock, self).__init__()
self.quantizer = module_inject.GroupQuantizer(q_int8=config.int8_quantization)
# Ensure ops are built by the time we start running
self.config = config
self.ff1_w = self.quantizer.quantize(
nn.Parameter(equivalent_module.ff.net[0].proj.weight.data, requires_grad=False))
self.ff1_b = nn.Parameter(equivalent_module.ff.net[0].proj.bias.data, requires_grad=False)
self.ff2_w = self.quantizer.quantize(nn.Parameter(equivalent_module.ff.net[2].weight.data,
requires_grad=False))
self.ff2_b = nn.Parameter(equivalent_module.ff.net[2].bias.data, requires_grad=False)
self.norm1_g = nn.Parameter(equivalent_module.norm1.weight.data, requires_grad=False)
self.norm1_b = nn.Parameter(equivalent_module.norm1.bias.data, requires_grad=False)
self.norm1_eps = equivalent_module.norm1.eps
self.norm2_g = nn.Parameter(equivalent_module.norm2.weight.data, requires_grad=False)
self.norm2_b = nn.Parameter(equivalent_module.norm2.bias.data, requires_grad=False)
self.norm2_eps = equivalent_module.norm2.eps
self.norm3_g = nn.Parameter(equivalent_module.norm3.weight.data, requires_grad=False)
self.norm3_b = nn.Parameter(equivalent_module.norm3.bias.data, requires_grad=False)
self.norm3_eps = equivalent_module.norm3.eps
self.attn_1 = equivalent_module.attn1
self.attn_2 = equivalent_module.attn2
# Pull the bias in if we can
if isinstance(self.attn_1, DeepSpeedDiffusersAttention):
self.attn_1.do_out_bias = False
self.attn_1_bias = self.attn_1.attn_ob
else:
self.attn_1_bias = nn.Parameter(torch.zeros_like(self.norm2_g), requires_grad=False)
# Pull the bias in if we can
if isinstance(self.attn_2, DeepSpeedDiffusersAttention):
self.attn_2.do_out_bias = False
self.attn_2_bias = self.attn_2.attn_ob
else:
self.attn_2_bias = nn.Paramaeter(torch.zeros_like(self.norm3_g), requires_grad=False)
self.transformer_cuda_module = load_transformer_module()
load_spatial_module()
def forward(self, hidden_states, context=None, timestep=None, **kwargs):
# In v0.12.0 of diffuser, several new kwargs were added. Capturing
# those with kwargs to maintain backward compatibility
# In v0.11.0 of diffusers, the kwarg was changed from 'context' to 'encoder_hidden_states'
# This is so we can support older and newer versions of diffusers
if "encoder_hidden_states" in kwargs and kwargs["encoder_hidden_states"] is not None:
context = kwargs["encoder_hidden_states"]
out_norm_1 = self.transformer_cuda_module.layer_norm(hidden_states, self.norm1_g, self.norm1_b, self.norm1_eps)
out_attn_1 = self.attn_1(out_norm_1)
out_norm_2, out_attn_1 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res(
out_attn_1, self.attn_1_bias, hidden_states, self.norm2_g, self.norm2_b, self.norm2_eps)
out_attn_2 = self.attn_2(out_norm_2, context=context)
out_norm_3, out_attn_2 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res(
out_attn_2, self.attn_2_bias, out_attn_1, self.norm3_g, self.norm3_b, self.norm3_eps)
out_ff1 = nn.functional.linear(out_norm_3, self.ff1_w)
out_geglu = self.transformer_cuda_module.gated_activation(out_ff1, self.ff1_b, ActivationFuncType.GATED_GELU)
out_ff2 = nn.functional.linear(out_geglu, self.ff2_w)
return nhwc_bias_add(out_ff2, self.ff2_b, other=out_attn_2)
| 4,857 | 45.266667 | 119 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/diffusers_attention.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
import torch
from torch.autograd import Function
import torch.nn as nn
from packaging import version as pkg_version
from deepspeed.utils.logging import log_dist
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
# Cuda modules will be imported if needed
inference_module = None
minus_inf = -10000.0
triton_flash_attn = None
def load_triton_flash_attn():
global triton_flash_attn
try:
import triton
except ImportError:
raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`")
if pkg_version.parse(triton.__version__) < pkg_version.parse("2.0"):
raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`")
from .triton_ops import triton_flash_attn
class DeepSpeedDiffusersAttentionFunction(Function):
@staticmethod
def forward(ctx, input, context, input_mask, config, attn_qkvw, attn_qw, attn_kw, attn_vw, attn_qkvb,
num_attention_heads_per_partition, norm_factor, hidden_size_per_partition, attn_ow, attn_ob,
do_out_bias, score_context_func, linear_func, triton_flash_attn_kernel):
def _transpose_for_context(x):
x = x.permute(0, 2, 1, 3)
new_x_layer_shape = x.size()[:-2] + \
(hidden_size_per_partition,)
return x.reshape(*new_x_layer_shape)
def _transpose_for_scores(x):
attention_head_size = x.shape[-1] // num_attention_heads_per_partition
new_x_shape = x.size()[:-1] + (num_attention_heads_per_partition, attention_head_size)
x = x.reshape(*new_x_shape)
x = x.permute(0, 2, 1, 3)
return x.contiguous()
def selfAttention_fp(input, context, input_mask):
if config.fp16 and input.dtype == torch.float32:
input = input.half()
head_size = input.shape[-1] // config.heads
do_flash_attn = (head_size <= 128)
scale = (1 / norm_factor) * (1 / norm_factor)
if do_flash_attn and context is None:
qkv_out = linear_func(input, attn_qkvw, attn_qkvb if attn_qkvb is not None else attn_qkvw, attn_qkvb
is not None, do_flash_attn, config.heads, False)
context_layer = triton_flash_attn_kernel(qkv_out[0], qkv_out[1], qkv_out[2], scale,
input.shape[-2] % 128 == 0)
context_layer = _transpose_for_context(context_layer[:, :, :, :head_size])
else:
do_flash_attn = False
if context is not None:
query = torch.matmul(input, attn_qw)
key = torch.matmul(context, attn_kw)
value = torch.matmul(context, attn_vw)
else:
qkv = torch.matmul(input, attn_qkvw)
query, key, value = qkv.chunk(3, dim=-1)
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
query, key, value = inference_module.pad_transform_fp16(query, key, value, config.heads, do_flash_attn)
attention_scores = (torch.matmul(query, key.transpose(-1, -2)) * scale).softmax(dim=-1)
context_layer = _transpose_for_context(torch.matmul(attention_scores, value))
output = linear_func(context_layer, attn_ow, attn_ob, do_out_bias, False, config.heads, False)
return output
output = selfAttention_fp(input, context, input_mask)
return output
@staticmethod
def backward(ctx, grad_output, grad_output1, grad_output2, grad_output3):
raise RuntimeError('You are running with DeepSpeed Inference mode. \
Please switch to Training mode for running backward!')
class DeepSpeedDiffusersAttention(nn.Module):
"""Initialize the DeepSpeed Transformer Layer.
Arguments:
layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers,
layer_id will be 0,1,2...23 when each layer object is instantiated
config: An object of DeepSpeedInferenceConfig
"""
layer_id = 0
def __init__(
self,
config,
):
super(DeepSpeedDiffusersAttention, self).__init__()
self.config = config
self.config.layer_id = DeepSpeedDiffusersAttention.layer_id
DeepSpeedDiffusersAttention.layer_id += 1
device = get_accelerator().current_device_name() if config.bigscience_bloom else 'cpu'
qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3
data_type = self.config.dtype
data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype
global inference_module
if inference_module is None:
builder = InferenceBuilder()
inference_module = builder.load()
if DeepSpeedDiffusersAttention.layer_id == 1:
log_dist(f"DeepSpeed-Attention config: {self.config.__dict__}", [0])
self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size,
qkv_size_per_partition,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_kw = nn.Parameter(torch.empty(self.config.hidden_size,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_vw = nn.Parameter(torch.empty(self.config.hidden_size,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_qw = nn.Parameter(torch.empty(self.config.hidden_size,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, dtype=data_type_fp, device=device),
requires_grad=False)
out_size_per_partition = self.config.hidden_size // self.config.mp_size
self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.do_out_bias = True
if triton_flash_attn is None:
load_triton_flash_attn()
self.triton_flash_attn_kernel = triton_flash_attn()
self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size
self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size
self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads
self.norm_factor = math.sqrt(math.sqrt(self.config.hidden_size // self.config.heads))
if self.config.scale_attn_by_inverse_layer_idx is True:
self.norm_factor *= math.sqrt(self.config.layer_id + 1)
# https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191
if self.config.dtype in [torch.float16, torch.int8]:
self.score_context_func = inference_module.softmax_context_fp16
self.linear_func = inference_module.linear_layer_fp16
self.allocate_workspace = inference_module.allocate_workspace_fp16
else:
self.score_context_func = inference_module.softmax_context_fp32
self.linear_func = inference_module.linear_layer_fp32
self.allocate_workspace = inference_module.allocate_workspace_fp32
def forward(self, input, context=None, input_mask=None):
if self.config.layer_id == 0:
self.allocate_workspace(self.config.hidden_size, self.config.heads,
input.size()[1],
input.size()[0], DeepSpeedDiffusersAttention.layer_id, self.config.mp_size, False,
0, self.config.max_out_tokens, self.config.min_out_tokens)
output = DeepSpeedDiffusersAttentionFunction.apply(input, context, input_mask, self.config, self.attn_qkvw,
self.attn_qw, self.attn_kw, self.attn_vw, self.attn_qkvb,
self.num_attention_heads_per_partition, self.norm_factor,
self.hidden_size_per_partition, self.attn_ow, self.attn_ob,
self.do_out_bias, self.score_context_func, self.linear_func,
self.triton_flash_attn_kernel)
return output
| 9,830 | 48.903553 | 121 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/base.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..config import DeepSpeedInferenceConfig
from deepspeed.ops.op_builder import InferenceBuilder
class BaseOp(torch.nn.Module):
inference_module = None
def __init__(self, config: DeepSpeedInferenceConfig):
super(BaseOp, self).__init__()
self.config = config
if BaseOp.inference_module is None:
builder = InferenceBuilder()
BaseOp.inference_module = builder.load()
| 536 | 24.571429 | 57 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/softmax.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import torch.nn.functional as F
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
class SoftmaxOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(SoftmaxOp, self).__init__(config)
self.num_attention_heads_per_partition = config.heads // config.mp_size
try:
if self.config.dtype in [torch.float16, torch.int8]:
self.softmax_func = self.inference_module.softmax_fp16
elif self.config.dtype == torch.bfloat16:
self.softmax_func = self.inference_module.softmax_bf16
else:
self.softmax_func = self.inference_module.softmax_fp32
except AttributeError:
self.softmax_func = self.softmax_fallback
def softmax_fallback(self, attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size,
async_op, layer_scale, head_offset, mp_size):
if os.environ.get('DS_KI_FALLBACK') == 'True':
alibi = alibi[head_offset:head_offset + self.num_attention_heads_per_partition]
input_dtype = attn_scores.dtype
if (triangular):
tri = ~torch.tril(torch.ones(attn_scores.size(), device=attn_scores.device)).to(bool)
attn_scores = torch.masked_fill(attn_scores * layer_scale, tri, torch.finfo(input_dtype).min)
if alibi is not None:
attn_scores += alibi
if attn_mask is not None:
# expand atten_mask from two dim into 4 dim, insert two dims in the middle
attn_mask = attn_mask[:, None, None, :]
attn_scores += attn_mask
output = F.softmax(attn_scores, dim=-1, dtype=torch.float32).to(input_dtype)
return output
else:
raise NotImplementedError
def forward(self, attn_scores: torch.Tensor, attn_mask: torch.Tensor, alibi: torch.Tensor, triangular: bool,
recompute: bool, local_attention: bool, window_size: int, async_op: bool, layer_scale: float,
head_offset: int):
output = self.softmax_func(attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size,
async_op, layer_scale, head_offset, self.config.mp_size)
return output
| 2,460 | 44.574074 | 118 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import torch.nn.functional as F
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
import deepspeed
from deepspeed.utils.types import NormType
class QKVGemmOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(QKVGemmOp, self).__init__(config)
try:
if self.config.norm_type == NormType.LayerNorm:
if self.config.dtype in [torch.float16, torch.int8]:
if deepspeed.HAS_TRITON and self.config.use_triton and self.config.dtype == torch.float16:
from deepspeed.ops.transformer.inference.triton.ops import qkv_gemm_func as _triton_qkv_gemm_func
self.qkv_gemm_func = _triton_qkv_gemm_func
triton_autotune = config.triton_autotune and config.layer_id == 0
if triton_autotune:
__class__._triton_autotune(2, self.config.max_out_tokens, self.config.hidden_size)
else:
self.qkv_gemm_func = self.inference_module.qkv_gemm_fp16 # type: ignore
elif self.config.dtype == torch.bfloat16:
self.qkv_gemm_func = self.inference_module.qkv_gemm_bf16
else:
self.qkv_gemm_func = self.inference_module.qkv_gemm_fp32 # type: ignore
elif self.config.norm_type == NormType.RMSNorm:
if self.config.dtype in [torch.float16, torch.int8]:
self.qkv_gemm_func = self.inference_module.rms_qkv_gemm_fp16 # type: ignore
elif self.config.dtype == torch.bfloat16:
self.qkv_gemm_func = self.inference_module.rms_qkv_gemm_bf16
else:
self.qkv_gemm_func = self.inference_module.rms_qkv_gemm_fp32 # type: ignore
except AttributeError:
if self.config.norm_type == NormType.LayerNorm:
self.qkv_gemm_func = self.qkv_gemm_fallback
elif self.config.norm_type == NormType.RMSNorm:
self.qkv_gemm_func = self.rms_qkv_gemm_fallback
@staticmethod
def _triton_autotune(min_seqlen, max_seqlen, hidden_size, dtype=torch.float16):
from deepspeed.ops.transformer.inference.triton.matmul_ext import Fp16Matmul, matmul
seqlen = [(min_seqlen + i)
for i in range(0, max_seqlen - min_seqlen + Fp16Matmul._cache_stride + 1, Fp16Matmul._cache_stride)]
Fp16Matmul._read_autotune_table()
for N in seqlen:
A = torch.randn((N, hidden_size), dtype=dtype, device='cuda')
B = torch.randn((hidden_size, 3 * hidden_size), dtype=dtype, device='cuda')
matmul(A, B)
Fp16Matmul._update_autotune_table()
def qkv_gemm_fallback(self, input, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose):
if os.environ.get('DS_KI_FALLBACK') == 'True' and not transpose:
inp_norm = F.layer_norm(input, (input.shape[2], ), gamma, beta, eps)
tmp = torch.matmul(inp_norm, weight)
if add_bias:
tmp += bias
output = [tmp, inp_norm]
return output
else:
raise NotImplementedError
def rms_qkv_gemm_fallback(self, input, weight, q_scale, gamma, eps, q_int8, transpose):
raise NotImplementedError
def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, gamma: torch.Tensor,
beta: torch.Tensor):
add_bias = bias is not None
bias = bias if add_bias else torch.empty(1) # type: ignore
q_scale = weight.scale if hasattr(weight, 'scale') else torch.empty(1) # type: ignore
q_int8 = self.config.dtype == torch.int8
if self.config.norm_type == NormType.LayerNorm:
output, norm = self.qkv_gemm_func(input, weight, q_scale, bias, gamma, beta, self.config.epsilon, add_bias,
q_int8, self.config.transposed_mode)
else:
output, norm = self.qkv_gemm_func(input, weight, q_scale, gamma, self.config.epsilon, q_int8,
self.config.transposed_mode)
return output, norm
| 4,369 | 48.101124 | 121 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
import deepspeed
class GELUGemmOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(GELUGemmOp, self).__init__(config)
try:
if self.config.dtype in [torch.float16, torch.int8]:
if deepspeed.HAS_TRITON and self.config.use_triton and self.config.dtype == torch.float16:
from deepspeed.ops.transformer.inference.triton.ops import fused_gemm_gelu as _triton_fused_gemm_gelu
self.fused_gemm_gelu = _triton_fused_gemm_gelu # type: ignore
else:
self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_fp16 # type: ignore
elif self.config.dtype == torch.bfloat16:
self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_bf16 # type: ignore
else:
self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_fp32 # type: ignore
except AttributeError:
self.fused_gemm_gelu = self.gelu_gemm_fallback
def gelu_gemm_fallback(self, input, weight, scale, bias, out, out_scale, dtype, transpose):
raise NotImplementedError
def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, weight_out: torch.Tensor):
output = self.fused_gemm_gelu(
input,
weight,
weight.scale if hasattr(weight, 'scale') else torch.empty(1), # type: ignore
bias,
weight_out,
weight_out.scale if hasattr(weight_out, 'scale') else torch.empty(1), # type: ignore
self.config.dtype == torch.int8,
self.config.transposed_mode)
return output
| 1,867 | 39.608696 | 121 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/softmax_context.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed import comm as dist
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
class SoftmaxContextOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(SoftmaxContextOp, self).__init__(config)
try:
if self.config.dtype in [torch.float16, torch.int8]:
self.softmax_context_func = self.inference_module.softmax_context_fp16
elif self.config.dtype == torch.bfloat16:
self.softmax_context_func = self.inference_module.softmax_context_bf16
else:
self.softmax_context_func = self.inference_module.softmax_context_fp32
except AttributeError:
self.softmax_context_func = self.softmax_context_fallback
def softmax_context_fallback(self, query_key_value, attn_mask, rotary_dim, rotate_half, roteate_every_two, heads,
norm_factor, triangular_masking, local_attention, window_size, no_masking, layer_id,
num_layers, alibi):
raise NotImplementedError
def forward(self, query_key_value: torch.Tensor, attn_mask: torch.Tensor, heads: int, norm_factor: float,
no_masking: bool, layer_id: int, num_layers: int, alibi: torch.Tensor):
if alibi is not None:
batch_heads = query_key_value.shape[0] * heads
offset = dist.get_rank() * batch_heads if dist.is_initialized() else 0
alibi = alibi[offset:batch_heads + offset, :, :]
else:
alibi = torch.empty(1)
output = self.softmax_context_func(query_key_value, attn_mask, self.config.rotary_dim, self.config.rotate_half,
self.config.rotate_every_two, heads, norm_factor,
self.config.triangular_masking, self.config.local_attention,
self.config.window_size, no_masking, layer_id, num_layers, alibi)
return output
| 2,131 | 44.361702 | 119 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/linear.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
import deepspeed
class LinearOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(LinearOp, self).__init__(config)
try:
if self.config.dtype in [torch.float16, torch.int8]:
if deepspeed.HAS_TRITON and self.config.use_triton and self.config.dtype == torch.float16:
from deepspeed.ops.transformer.inference.triton.ops import linear_func as _triton_linear_func
self.linear_func = _triton_linear_func
triton_autotune = config.triton_autotune and config.layer_id == 0
if triton_autotune:
__class__._triton_autotune(2, self.config.max_out_tokens, self.config.hidden_size)
else:
self.linear_func = self.inference_module.linear_layer_fp16
self.linear_func = self.inference_module.linear_layer_fp16
elif self.config.dtype == torch.bfloat16:
self.linear_func = self.inference_module.linear_layer_bf16
else:
self.linear_func = self.inference_module.linear_layer_fp32
except AttributeError:
self.linear_func = self.linear_fallback
def linear_fallback(self, input, weight, bias, add_bias, do_flash_attn, num_heads, transpose):
raise NotImplementedError
def forward(self,
input: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
add_bias: bool,
do_flash_attn: bool,
num_heads: int,
external_cache: bool = None,
num_layers: int = None):
qkv_out = self.linear_func(input, weight, bias, add_bias, do_flash_attn, num_heads,
self.config.transposed_mode)
return qkv_out
@staticmethod
def _triton_autotune(min_seqlen, max_seqlen, hidden_size, dtype=torch.float16):
from deepspeed.ops.transformer.inference.triton.matmul_ext import Fp16Matmul, matmul
seqlen = [(min_seqlen + i)
for i in range(0, max_seqlen - min_seqlen + Fp16Matmul._cache_stride + 1, Fp16Matmul._cache_stride)]
Fp16Matmul._read_autotune_table()
for N in seqlen:
A = torch.randn((N, hidden_size), dtype=dtype, device='cuda')
B = torch.randn((hidden_size, 3 * hidden_size), dtype=dtype, device='cuda')
matmul(A, B)
Fp16Matmul._update_autotune_table()
| 2,683 | 43 | 118 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from typing import Optional
import os
import torch
import torch.nn.functional as F
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
from deepspeed.utils.types import NormType
class MLPGemmOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(MLPGemmOp, self).__init__(config)
try:
if self.config.norm_type == NormType.LayerNorm:
if self.config.dtype in [
torch.float16, torch.int8
]: # non-triton cuda kernel has a higher performance in MLP than mlp_gemm_func in triton.ops
self.mlp_gemm_func = self.inference_module.mlp_gemm_fp16 # type: ignore
elif self.config.dtype == torch.bfloat16:
self.mlp_gemm_func = self.inference_module.mlp_gemm_bf16
else:
self.mlp_gemm_func = self.inference_module.mlp_gemm_fp32 # type: ignore
elif self.config.norm_type == NormType.RMSNorm:
if self.config.dtype in [torch.float16, torch.int8]:
self.mlp_gemm_func = self.inference_module.rms_mlp_gemm_fp16 # type: ignore
elif self.config.dtype == torch.bfloat16:
self.mlp_gemm_func = self.inference_module.rms_mlp_gemm_bf16
else:
self.mlp_gemm_func = self.inference_module.rms_mlp_gemm_fp32 # type: ignore
except AttributeError:
if self.config.norm_type == NormType.LayerNorm:
self.mlp_gemm_func = self.mlp_gemm_fallback
elif self.config.norm_type == NormType.RMSNorm:
self.mlp_gemm_func = self.rms_mlp_gemm_fallback
def mlp_gemm_fallback(self, input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps,
pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type,
transpose):
if os.environ.get('DS_KI_FALLBACK') == 'True' and mlp_after_attn and not transpose:
residual_add = F.layer_norm(input + residual + input_bias, (input.shape[2], ), gamma, beta,
self.config.epsilon)
tmp = torch.matmul(residual_add, weight_interm)
tmp = F.gelu(tmp + bias)
output = torch.matmul(tmp, weight_out)
return (output, residual_add)
else:
raise NotImplementedError
def rms_mlp_gemm_fallback(self, input, residual, weight_interm, weight_out, gamma, eps, interm_scale, out_scale,
dtype, mlp_act_func_type, transpose):
raise NotImplementedError
def forward(self,
input: torch.Tensor,
residual: torch.Tensor,
weight_interm: torch.Tensor,
weight_out: torch.Tensor,
input_bias: Optional[torch.Tensor] = None,
bias: Optional[torch.Tensor] = None,
gamma: Optional[torch.Tensor] = None,
beta: Optional[torch.Tensor] = None):
if self.config.norm_type == NormType.LayerNorm:
output, residual_add = self.mlp_gemm_func(
input,
residual,
input_bias,
weight_interm,
weight_out,
bias,
gamma,
beta,
self.config.epsilon,
self.config.pre_layer_norm,
self.config.mlp_after_attn,
weight_interm.scale if hasattr(weight_interm, 'scale') else torch.empty(1), # type: ignore
weight_out.scale if hasattr(weight_out, 'scale') else torch.empty(1), # type: ignore
self.config.dtype == torch.int8,
self.config.mlp_act_func_type,
self.config.transposed_mode)
else:
output, residual_add = self.mlp_gemm_func(
input,
residual,
weight_interm,
weight_out,
gamma,
self.config.epsilon,
weight_interm.scale if hasattr(weight_interm, 'scale') else torch.empty(1), # type: ignore
weight_out.scale if hasattr(weight_out, 'scale') else torch.empty(1), # type: ignore
self.config.dtype == torch.int8,
self.config.mlp_act_func_type,
self.config.transposed_mode)
return output, residual_add
| 4,605 | 44.60396 | 116 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/residual_add.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
from typing import Optional
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
class ResidualAddOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(ResidualAddOp, self).__init__(config)
try:
if self.config.dtype in [torch.float16, torch.int8]:
self.residual_add_func = self.inference_module.residual_add_bias_fp16
elif self.config.dtype == torch.bfloat16:
self.residual_add_func = self.inference_module.residual_add_bias_bf16
else:
self.residual_add_func = self.inference_module.residual_add_bias_fp32
except AttributeError:
self.residual_add_func = None
try:
self._vector_add = self.inference_module._vector_add
except AttributeError:
self._vector_add = None
def forward(self,
hidden_state: torch.Tensor,
residual: torch.Tensor,
add_bias: bool,
attention_output: Optional[torch.Tensor] = None,
residual_add: Optional[torch.Tensor] = None,
attention_bias: Optional[torch.Tensor] = None,
final_bias: Optional[torch.Tensor] = None):
if self.residual_add_func is not None:
if final_bias is None:
residual = self._vector_add(residual, hidden_state, 1.0 / self.config.mp_size)
else:
if not self.config.pre_layer_norm and residual_add is not None:
# only use residual add if its set and we are not pre layer norm
residual = residual_add
self.residual_add_func(hidden_state, residual, attention_output, attention_bias, final_bias,
self.config.mp_size, self.config.mlp_after_attn, add_bias,
self.config.pre_layer_norm)
else:
# fallback
if os.environ.get('DS_KI_FALLBACK') == 'True' and self.config.mlp_after_attn:
if self.config.pre_layer_norm:
tmp = (residual.float() + attention_output.float() + attention_bias.float() +
final_bias.float()) / self.config.mp_size + hidden_state.float()
else:
tmp = residual.float() + hidden_state.float() + final_bias.float()
input_dtype = hidden_state.dtype
residual = tmp.to(input_dtype)
else:
raise NotImplementedError
return residual
| 2,708 | 40.676923 | 108 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
from ..config import DeepSpeedInferenceConfig
from .base import BaseOp
import deepspeed
class VectorMatMulOp(BaseOp):
def __init__(self, config: DeepSpeedInferenceConfig):
super(VectorMatMulOp, self).__init__(config)
try:
if self.config.dtype == torch.float16:
if deepspeed.HAS_TRITON and config.use_triton:
from deepspeed.ops.transformer.inference.triton.ops import vector_matmul_func as _triton_vector_matmul_func
self.vector_matmul_func = _triton_vector_matmul_func
triton_autotune = config.triton_autotune and config.layer_id == 0
if triton_autotune:
__class__._triton_autotune(2, self.config.max_out_tokens, self.config.hidden_size)
else:
self.vector_matmul_func = self.inference_module.vector_matmul_fp16
elif self.config.dtype == torch.int8:
self.vector_matmul_func = self.inference_module.vector_matmul_fp16
elif self.config.dtype == torch.bfloat16:
self.vector_matmul_func = self.inference_module.vector_matmul_bf16
else:
self.vector_matmul_func = self.inference_module.vector_matmul_fp32
except AttributeError:
self.vector_matmul_func = self.vector_matmul_fallback
def vector_matmul_fallback(self, input, weight, async_op, q_scale, q_int8, transpose):
if os.environ.get('DS_KI_FALLBACK') == 'True' and not transpose:
return torch.matmul(input, weight)
else:
raise NotImplementedError
def forward(self, input: torch.Tensor, weight: torch.Tensor, async_op: bool = False):
q_scale = weight.scale if hasattr(weight, 'scale') else torch.empty(1)
q_int8 = self.config.dtype == torch.int8
output = self.vector_matmul_func(input, weight, async_op, q_scale, q_int8, self.config.transposed_mode)
return output
@staticmethod
def _triton_autotune(min_seqlen, max_seqlen, hidden_size, dtype=torch.float16):
from deepspeed.ops.transformer.inference.triton.matmul_ext import Fp16Matmul, matmul
seqlen = [(min_seqlen + i)
for i in range(0, max_seqlen - min_seqlen + Fp16Matmul._cache_stride + 1, Fp16Matmul._cache_stride)]
Fp16Matmul._read_autotune_table()
for N in seqlen:
A = torch.randn((N, hidden_size), dtype=dtype, device='cuda')
B = torch.randn((hidden_size, hidden_size), dtype=dtype, device='cuda')
matmul(A, B)
Fp16Matmul._update_autotune_table()
| 2,750 | 45.627119 | 127 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/triton/softmax.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import triton
import triton.language as tl
'''
softmax
modified the triton kernel in
https://github.com/openai/triton/blob/34817ecc954a6f4ca7b4dfb352fdde1f8bd49ca5/python/tutorials/02-fused-softmax.py
'''
@triton.jit
def softmax_kernel(output_ptr, input_ptr, stride, n_cols, BLOCK_SIZE: tl.constexpr):
row_idx = tl.program_id(0)
row_start_ptr = input_ptr + row_idx * stride
col_offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = row_start_ptr + col_offsets
row = tl.load(input_ptrs, mask=col_offsets < n_cols, other=-float('inf')).to(tl.float32)
row_minus_max = row - tl.max(row, axis=0)
numerator = tl.exp(row_minus_max)
denominator = tl.sum(numerator, axis=0)
softmax_output = numerator / denominator
output_row_start_ptr = output_ptr + row_idx * stride
output_ptrs = output_row_start_ptr + col_offsets
tl.store(output_ptrs, softmax_output, mask=col_offsets < n_cols)
@triton.jit
def masked_softmax_kernel(output_ptr, input_ptr, stride, mask_ptr, mask_stride, n_cols, BLOCK_SIZE: tl.constexpr):
row_idx = tl.program_id(0)
row_start_ptr = input_ptr + row_idx * stride
col_offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = row_start_ptr + col_offsets
mask_ptrs = mask_ptr + col_offsets + row_idx * mask_stride # mask_stride is 0 for 1d mask
row = tl.load(input_ptrs, mask=col_offsets < n_cols, other=-float('inf')).to(tl.float32)
mask = tl.load(mask_ptrs, mask=col_offsets < n_cols, other=0).to(tl.float32)
row_minus_max = row - tl.max(row, axis=0)
row_minus_max = row_minus_max + mask
numerator = tl.exp(row_minus_max)
denominator = tl.sum(numerator, axis=0)
softmax_output = numerator / denominator
output_row_start_ptr = output_ptr + row_idx * stride
output_ptrs = output_row_start_ptr + col_offsets
tl.store(output_ptrs, softmax_output, mask=col_offsets < n_cols)
def softmax(input: torch.Tensor, mask: torch.Tensor = None, dim=-1) -> torch.Tensor:
assert input.is_contiguous()
assert (dim == -1) or (dim == len(input.shape) - 1), "Only dim=-1 is supported"
use_mask = False if mask is None else True
input_arg = input.view(-1, input.shape[-1])
n_rows, n_cols = input_arg.shape
BLOCK_SIZE = max(triton.next_power_of_2(n_cols), 2)
num_warps = 4
if BLOCK_SIZE >= 2048:
num_warps = 8
if BLOCK_SIZE >= 4096:
num_warps = 16
# Allocate output
output = torch.empty_like(input)
if use_mask:
assert mask.is_contiguous()
mask = mask.view(-1, mask.shape[-1])
mask_stride = mask.shape[-1] if mask.shape[-2] > 1 else 0
masked_softmax_kernel[(n_rows, )](
output,
input,
input_arg.stride(0),
mask,
mask_stride,
n_cols,
num_warps=num_warps,
BLOCK_SIZE=BLOCK_SIZE,
)
else:
softmax_kernel[(n_rows, )](
output,
input,
input_arg.stride(0),
n_cols,
num_warps=num_warps,
BLOCK_SIZE=BLOCK_SIZE,
)
return output
| 3,208 | 34.655556 | 115 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/triton/matmul_ext.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import triton
import os
from filelock import FileLock
import deepspeed.ops.transformer.inference.triton.triton_matmul_kernel as triton_matmul_kernel
import pickle
from io import open
import deepspeed
from pathlib import Path
import atexit
# -----------------------------------------------------------------------------
# util class/functions for triton
def _default_cache_dir():
return os.path.join(Path.home(), ".triton", "autotune")
def bias_add_activation(C, bias=None, activation=""):
if bias is not None:
C += bias
# activation
if activation == "relu":
relu = torch.nn.Relu()
C = relu(C)
elif activation == "leaky_relu":
leaky_relu = torch.nn.LeakyReLU(0.01)
C = leaky_relu(C)
elif activation == "gelu":
sigmoid = torch.nn.Sigmoid()
C = sigmoid(1.702 * C) * C
elif activation == "sigmoid":
sigmoid = torch.nn.Sigmoid()
C = sigmoid(C)
return C
class AutotuneCacheManager:
"""
Cache manager for autotune
"""
def __init__(self, key):
self.key = key
self.file_path = None
self.lock_path = None
# if caching is enabled, get the lock and bin path
self.cache_dir = os.environ.get('TRITON_CACHE_DIR', _default_cache_dir())
if self.cache_dir:
os.makedirs(self.cache_dir, exist_ok=True)
if self.cache_dir:
self.file_path = os.path.join(self.cache_dir, self.key + ".pickle")
self.lock_path = self.file_path + ".lock"
def has_file(self):
return self.file_path and os.path.exists(self.file_path)
def put(self, table):
if self.file_path:
assert self.lock_path is not None
with FileLock(self.lock_path):
with open(self.file_path + ".tmp", 'wb') as handle:
pickle.dump(table, handle)
os.rename(self.file_path + ".tmp", self.file_path)
def load(self):
if os.path.exists(self.file_path):
with open(self.file_path, 'rb') as handle:
loaded_dict = pickle.load(handle)
return loaded_dict
else:
return None
# -----------------------------------------------------------------------------
# triton matmul class
class MatmulExt(torch.autograd.Function):
"""
a wrapper class that can call different triton matmul kernels depending on the input parameters
"""
@staticmethod
def forward(A, B, bias=None, activation="", use_triton=True, update_autotune_table=False):
"""
A: input, activation matrix A
B: input, weight matrix B
"""
matmul = None
quantize_activation = False
Batch = 0
if len(A.shape) == 3: # if A is 3d-tensor where batch index is given as 0-axis
assert A.is_contiguous(), "matrix A must be contiguous"
Batch, M, K = A.shape
A = A.view(-1, K)
# fp16 activation and fp16 weight matmul into fp16 output
matmul = fp16_matmul
C = matmul.forward(A, B, use_triton=use_triton, bias=bias, activation=activation)
if matmul and update_autotune_table:
matmul._update_autotune_table()
if Batch > 0:
C = C.view(Batch, M, -1)
return C
class TritonMatmul(torch.autograd.Function):
"""
triton matmul kernel superclass
"""
def __init__(self):
pass
@staticmethod
def _ref_forward(A, B, ref_dtype=torch.float32):
C = torch.matmul(A.type(ref_dtype), B.type(ref_dtype))
return C
@staticmethod
def _read_autotune_table(cache_key, triton_kernel):
cache_manager = AutotuneCacheManager(cache_key)
table = cache_manager.load()
if table:
triton_kernel.cache = table
@staticmethod
def _write_autotune_table(cache_key, triton_kernel):
cache_manager = AutotuneCacheManager(cache_key)
cache_manager.put(triton_kernel.cache)
@staticmethod
def _update_autotune_table(cache_key, triton_kernel):
cache_manager = AutotuneCacheManager(cache_key)
autotune_table = cache_manager.load()
if autotune_table is None:
autotune_table = dict()
autotune_table.update(triton_kernel.cache) # always overwrite with the new autotune results
cache_manager = AutotuneCacheManager(cache_key)
cache_manager.put(autotune_table)
@staticmethod
def forward(
A,
B,
ref_dtype=torch.float32, # fp32 only
bias=None,
activation=""):
C = torch.matmul(A.type(ref_dtype), B.type(ref_dtype))
C = bias_add_activation(C, bias, activation)
return C
class Fp16Matmul(TritonMatmul):
"""
fp16 matrix multiplication kernel
dtypes: fp16 x fp16 = fp16
"""
_2d_kernel = triton_matmul_kernel._fp_matmul
_4d_kernel = triton_matmul_kernel.matmul_4d_kernel
_cache_stride = 32
def __init__(self, read_cache=True):
super().__init__()
if read_cache:
__class__._read_autotune_table()
def skip_autotune(self):
__class__._2d_kernel.configs = [__class__._2d_kernel.configs[0]]
__class__._4d_kernel.configs = [__class__._4d_kernel.configs[0]]
@staticmethod
def forward(A, B, use_triton=True, bias=None, activation=""):
if use_triton:
device = A.device
# handle non-contiguous inputs if necessary
if A.stride(0) > 1 and A.stride(1) > 1:
A = A.contiguous()
if B.stride(0) > 1 and B.stride(1) > 1:
B = B.contiguous()
# checks constraints
assert A.shape[1] == B.shape[0], "incompatible dimensions"
M, K = A.shape
_, N = B.shape
# allocates output
C = torch.empty((M, N), device=device, dtype=A.dtype)
# accumulator types
ACC_TYPE = triton.language.float32 if A.dtype in [torch.float16, torch.bfloat16, torch.float32
] else triton.language.int32
# launch kernel
grid = lambda META: (triton.cdiv(M, META['BLOCK_M']) * triton.cdiv(N, META['BLOCK_N']), META['SPLIT_K'])
__class__._2d_kernel[grid](A,
B,
C,
M,
N,
K,
bias,
A.stride(0),
A.stride(1),
B.stride(0),
B.stride(1),
C.stride(0),
C.stride(1),
M // __class__._cache_stride,
N // __class__._cache_stride,
K // __class__._cache_stride,
GROUP_M=8,
ACC_TYPE=ACC_TYPE,
BIAS_ADD=(0 if bias is None else 1),
ACTIVATION=activation)
else:
C = torch.matmul(A, B)
return C
@staticmethod
def _matmul_4d(a, b):
assert a.shape[-1] == b.shape[-2], "incompatible dimensions"
assert a.is_contiguous(), "matrix A must be contiguous"
assert b.is_contiguous(), "matrix B must be contiguous"
B, H, M, K = a.shape
B, H, K, N = b.shape
assert K > 1, "inner-product dimension K should be larger than 1"
c = torch.empty((B, H, M, N), device=a.device, dtype=a.dtype)
grid = lambda META: (
triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),
H,
B,
)
__class__._4d_kernel[grid](
a,
b,
c,
M,
N,
K,
M // __class__._cache_stride,
N // __class__._cache_stride,
K // __class__._cache_stride,
a.stride(0),
a.stride(1),
a.stride(2),
a.stride(3),
b.stride(0),
b.stride(1),
b.stride(2),
b.stride(3),
c.stride(0),
c.stride(1),
c.stride(2),
c.stride(3),
scale=-1.0,
MASK=False,
)
return c
@staticmethod
def _score_4d_matmul(input, head_size, input_mask, scale=-1.0):
assert input.is_contiguous(), "matrix input must be contiguous"
batches = input.shape[0]
d_model = input.shape[-1] // 3
num_of_heads = d_model // head_size
q = input[:, :, :d_model]
k = input[:, :, d_model:d_model * 2]
q = q.view(batches, -1, num_of_heads, head_size)
k = k.view(batches, -1, num_of_heads, head_size)
# checks constraints
assert q.shape == k.shape, "incompatible dimensions"
B, M, H, K = q.shape
B, N, H, K = k.shape
assert K > 1, "inner-product dimension K should be larger than 1"
# allocates output
output = torch.empty((B, H, M, N), device=q.device, dtype=q.dtype)
grid = lambda META: (
triton.cdiv(M, META["BLOCK_SIZE_M"]) * triton.cdiv(N, META["BLOCK_SIZE_N"]),
H,
B,
)
__class__._4d_kernel[grid](
q,
k,
output,
M,
N,
K,
M // __class__._cache_stride,
N // __class__._cache_stride,
K // __class__._cache_stride,
q.stride(0),
q.stride(2),
q.stride(1),
q.stride(3),
k.stride(0),
k.stride(2),
k.stride(3),
k.stride(1),
output.stride(0),
output.stride(1),
output.stride(2),
output.stride(3),
scale=scale,
MASK=False,
)
return output
@staticmethod
def _context_4d_matmul(prob, input, head_size):
assert prob.is_contiguous(), "matrix prob must be contiguous"
assert input.is_contiguous(), "matrix input must be contiguous"
batches = input.shape[0]
d_model = input.shape[-1] // 3
num_of_heads = d_model // head_size
v = input[:, :, d_model * 2:]
v = v.view(batches, -1, num_of_heads, head_size)
# checks constraints
assert (prob.shape[0] == v.shape[0] and prob.shape[1] == v.shape[2] and prob.shape[2] == v.shape[1]
and prob.shape[3] == v.shape[1]), "incompatible dimensions"
B, H, M, K = prob.shape
B, K, H, N = v.shape
assert K > 1, "inner-product dimension K should be larger than 1"
# allocates output
output = torch.empty((B, M, H, N), device=v.device, dtype=v.dtype)
grid = lambda META: (
triton.cdiv(M, META["BLOCK_SIZE_M"]) * triton.cdiv(N, META["BLOCK_SIZE_N"]),
H,
B,
)
__class__._4d_kernel[grid](
prob,
v,
output,
M,
N,
K,
M // __class__._cache_stride,
N // __class__._cache_stride,
K // __class__._cache_stride,
prob.stride(0),
prob.stride(1),
prob.stride(2),
prob.stride(3),
v.stride(0),
v.stride(2),
v.stride(1),
v.stride(3),
# Here we also transpose the output when writing to memory.
output.stride(0),
output.stride(2),
output.stride(1),
output.stride(3),
scale=-1,
MASK=False,
)
return output.view(batches, -1, d_model)
@staticmethod
def _ref_forward(A, B, ref_dtype=torch.float32, bias=None, activation=""):
C = torch.matmul(A.type(ref_dtype), B.type(ref_dtype))
C = bias_add_activation(C, bias, activation)
return C
@staticmethod
def _check_parity(A,
B,
output_dtype,
SA=None,
SB=None,
qblock_size=None,
ref_dtype=torch.float32,
tol=0.01,
use_triton=True,
bias=None,
activation=""):
torch_output = __class__._ref_forward(A, B, ref_dtype=ref_dtype, bias=bias, activation=activation)
triton_output = __class__.forward(A, B, use_triton=use_triton, bias=bias, activation=activation)
assert triton.testing.allclose(triton_output.cpu().type(torch_output.dtype), torch_output.cpu(), tol=tol)
print(f"{__class__.__name__}: PASSed the parity check")
return triton_output, torch_output
@staticmethod
def _read_autotune_table():
TritonMatmul._read_autotune_table(__class__.__name__ + "_2d_kernel", __class__._2d_kernel)
TritonMatmul._read_autotune_table(__class__.__name__ + "_4d_kernel", __class__._4d_kernel)
@staticmethod
def _write_autotune_table():
TritonMatmul._write_autotune_table(__class__.__name__ + "_2d_kernel", __class__._2d_kernel)
TritonMatmul._write_autotune_table(__class__.__name__ + "_4d_kernel", __class__._4d_kernel)
@staticmethod
def _update_autotune_table():
TritonMatmul._update_autotune_table(__class__.__name__ + "_2d_kernel", __class__._2d_kernel)
TritonMatmul._update_autotune_table(__class__.__name__ + "_4d_kernel", __class__._4d_kernel)
# -----------------------------------------------------------------------------
# mapping
if deepspeed.HAS_TRITON:
fp16_matmul = Fp16Matmul()
matmul = MatmulExt.forward
matmul_4d = fp16_matmul._matmul_4d
score_4d_matmul = fp16_matmul._score_4d_matmul
context_4d_matmul = fp16_matmul._context_4d_matmul
else:
fp16_matmul = None
matmul = None
matmul_4d = None
score_4d_matmul = None
context_4d_matmul = None
@atexit.register
def matmul_ext_update_autotune_table():
if deepspeed.HAS_TRITON:
fp16_matmul._update_autotune_table()
| 14,630 | 31.878652 | 116 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/triton/gelu.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import triton
import triton.language as tl
from deepspeed.accelerator import get_accelerator
@triton.jit
def gelu_functor(x):
# Using approximation introduces greater parity errors.
# return tl.sigmoid(1.702 * x) * x
return x * 0.5 * (1.0 + tl.libdevice.erf(x / 1.41421356237))
@triton.jit
def gelu_kernel(x_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
output = gelu_functor(x)
tl.store(output_ptr + offsets, output, mask=mask)
def gelu(activations: torch.Tensor) -> torch.Tensor:
assert activations.is_contiguous()
assert get_accelerator().on_accelerator(activations)
output = torch.empty_like(activations)
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
gelu_kernel[grid](activations, output, n_elements, BLOCK_SIZE=1024)
return output
| 1,157 | 28.692308 | 73 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/triton/mlp.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import math
import torch.nn as nn
from deepspeed.accelerator import get_accelerator
from deepspeed import comm as dist
from ..op_binding import MLPGemmOp, VectorMatMulOp, GELUGemmOp, ResidualAddOp
class TritonMLP(nn.Module):
def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1, mlp_extra_grouping=False):
super(TritonMLP, self).__init__()
self.config = config
data_type = self.config.dtype
data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype
device = get_accelerator().current_device_name()
self.attn_nw = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.attn_nb = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
intm_size_per_partition = self.config.intermediate_size // self.config.mp_size
self.inter_w = nn.Parameter(torch.empty(self.config.hidden_size,
intm_size_per_partition,
dtype=data_type,
device=device),
requires_grad=False)
self.inter_b = nn.Parameter(torch.empty(intm_size_per_partition, dtype=data_type_fp, device=device),
requires_grad=False)
self.output_w = nn.Parameter(torch.empty(intm_size_per_partition,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.output_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
# used for quantization
self.q_scales = q_scales
self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups
self.merge_count = int(math.log2(merge_count))
self.mp_group = mp_group
self.mlp_gemm_func = MLPGemmOp(config)
self.vector_matmul_func = VectorMatMulOp(config)
self.fused_gemm_gelu = GELUGemmOp(config)
self.residual_add_func = ResidualAddOp(config)
def forward(self, input, residual, residual_norm, bias):
residual_add = None
if self.attn_nw is None:
output = self.fused_gemm_gelu(input=residual_norm,
weight=self.inter_w,
bias=self.inter_b,
weight_out=self.output_w)
else:
output, residual_add = self.mlp_gemm_func(input=input,
residual=residual,
input_bias=bias,
weight_interm=self.inter_w,
weight_out=self.output_w,
bias=self.inter_b,
gamma=self.attn_nw,
beta=self.attn_nb)
residual = self.residual_add_func(hidden_state=output,
residual=residual,
attention_output=input,
attention_bias=bias if bias is not None else self.output_b,
final_bias=self.output_b,
add_bias=bias is not None,
residual_add=residual_add)
if self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1:
dist.all_reduce(residual, group=self.mp_group)
return residual
| 4,225 | 50.536585 | 114 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/triton/triton_matmul_kernel.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import triton
import triton.language as tl
from .gelu import gelu_functor
import torch
AUTOTUNE_TOP_K = 10
SKIP_AUTOTUNE = False
def _fp16_matmul_prune_config(configs, named_args, skip_autotune=SKIP_AUTOTUNE):
if skip_autotune:
configs = [configs[0]]
else:
configs = triton.ops.matmul_perf_model.early_config_prune(configs, named_args)
return configs
"""
fp16 matmul implementation is adapted from triton matmul:
https://github.com/openai/triton/blob/34817ecc954a6f4ca7b4dfb352fdde1f8bd49ca5/python/triton/ops/matmul.py
"""
@triton.autotune(
configs=[
# basic configs for compute-bound matmuls
triton.Config({
'BLOCK_M': 128,
'BLOCK_N': 256,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=3, num_warps=8),
triton.Config({
'BLOCK_M': 256,
'BLOCK_N': 128,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=3, num_warps=8),
triton.Config({
'BLOCK_M': 256,
'BLOCK_N': 64,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=4, num_warps=4),
triton.Config({
'BLOCK_M': 64,
'BLOCK_N': 256,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=4, num_warps=4),
triton.Config({
'BLOCK_M': 128,
'BLOCK_N': 128,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=4, num_warps=4),
triton.Config({
'BLOCK_M': 128,
'BLOCK_N': 64,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=4, num_warps=4),
triton.Config({
'BLOCK_M': 64,
'BLOCK_N': 128,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=4, num_warps=4),
triton.Config({
'BLOCK_M': 128,
'BLOCK_N': 32,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=4, num_warps=4),
triton.Config({
'BLOCK_M': 64,
'BLOCK_N': 32,
'BLOCK_K': 32,
'SPLIT_K': 1
}, num_stages=5, num_warps=2),
],
key=['CACHE_M', 'CACHE_N', 'CACHE_K'],
prune_configs_by={
'early_config_prune': _fp16_matmul_prune_config,
'perf_model': None,
'top_k': AUTOTUNE_TOP_K
},
)
@triton.heuristics({
'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] * args['SPLIT_K']) == 0,
})
@triton.jit
def _fp_matmul(
A,
B,
C,
M,
N,
K,
bias,
stride_am,
stride_ak,
stride_bk,
stride_bn,
stride_cm,
stride_cn,
CACHE_M,
CACHE_N,
CACHE_K,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
GROUP_M: tl.constexpr,
SPLIT_K: tl.constexpr,
EVEN_K: tl.constexpr,
ACC_TYPE: tl.constexpr,
BIAS_ADD: tl.constexpr,
ACTIVATION: tl.constexpr,
):
# matrix multiplication
pid = tl.program_id(0)
pid_z = tl.program_id(1)
grid_m = (M + BLOCK_M - 1) // BLOCK_M
grid_n = (N + BLOCK_N - 1) // BLOCK_N
# re-order program ID for better L2 performance
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + (pid % group_size)
pid_n = (pid % width) // (group_size)
# do matrix multiplication
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
# pointers
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
for k in range(K, 0, -BLOCK_K * SPLIT_K):
if EVEN_K:
a = tl.load(A)
b = tl.load(B)
else:
a = tl.load(A, mask=rk[None, :] < k, other=0.)
b = tl.load(B, mask=rk[:, None] < k, other=0.)
acc += tl.dot(a, b)
A += BLOCK_K * SPLIT_K * stride_ak
B += BLOCK_K * SPLIT_K * stride_bk
# bias addition
if BIAS_ADD:
bias_offset = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
bias_ptr = bias + bias_offset
b = tl.load(bias_ptr, mask=bias_offset < N)
acc = acc + b[None, :]
# activation
if ACTIVATION == "relu":
acc = tl.where(acc >= 0, acc, 0)
elif ACTIVATION == "leaky_relu":
acc = tl.where(acc >= 0, acc, 0.01 * acc)
elif ACTIVATION == "gelu":
#acc = tl.sigmoid(1.702 * acc) * acc
acc = gelu_functor(acc)
elif ACTIVATION == "sigmoid":
acc = tl.sigmoid(acc) # sigmoid
acc = acc.to(C.dtype.element_ty)
# rematerialize rm and rn to save registers
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
mask = (rm < M)[:, None] & (rn < N)[None, :]
# handles write-back with reduction-splitting
if SPLIT_K == 1:
tl.store(C, acc, mask=mask)
else:
tl.atomic_add(C, acc, mask=mask)
def matmul_4d_prune_config(configs, named_args, skip_autotune=SKIP_AUTOTUNE):
if skip_autotune:
configs = [configs[0]]
else:
device = torch.cuda.current_device() #ignore-cuda
capability = torch.cuda.get_device_capability() #ignore-cuda
# BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages
dtsize = named_args['a_ptr'].element_size()
dtype = named_args['a_ptr'].dtype
# make sure we have enough smem
pruned_configs = []
for config in configs:
kw = config.kwargs
BLOCK_M, BLOCK_N, BLOCK_K, num_stages = \
kw['BLOCK_SIZE_M'], kw['BLOCK_SIZE_N'], kw['BLOCK_SIZE_K'], config.num_stages
triton.compiler.init_cuda_utils()
max_shared_memory = triton.compiler.cuda_utils.get_device_properties(device)["max_shared_mem"]
required_shared_memory = (BLOCK_M + BLOCK_N) * BLOCK_K * num_stages * dtsize
if required_shared_memory <= max_shared_memory:
pruned_configs.append(config)
configs = pruned_configs
return configs
@triton.autotune(
configs=[
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 64,
"GROUP_SIZE_M": 8
},
num_stages=1, # this is mainly for unit test, to minimize the share memory usage
num_warps=8),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 32,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 32,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=5,
num_warps=2,
),
triton.Config(
{
"BLOCK_SIZE_M": 32,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=5,
num_warps=2,
),
],
key=['CACHE_M', 'CACHE_N', 'CACHE_K'],
prune_configs_by={
'early_config_prune': matmul_4d_prune_config,
'perf_model': None,
'top_k': AUTOTUNE_TOP_K
},
)
@triton.jit
def matmul_4d_kernel(
# Pointers to matrices
a_ptr,
b_ptr,
c_ptr,
# Matrix dimensions
M,
N,
K,
CACHE_M,
CACHE_N,
CACHE_K,
stride_ab,
stride_ah,
stride_am,
stride_ak,
stride_bb,
stride_bh,
stride_bk,
stride_bn,
stride_cb,
stride_ch,
stride_cm,
stride_cn,
scale,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
MASK: tl.constexpr,
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
pid = tl.program_id(axis=0)
head = tl.program_id(axis=1)
batch = tl.program_id(axis=2)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
if MASK:
if (pid_m + 1) * BLOCK_SIZE_M - 1 < pid_n * BLOCK_SIZE_N:
c = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=c_ptr.dtype.element_ty) - float("inf")
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = (c_ptr + batch * stride_cb + head * stride_ch + stride_cm * offs_cm[:, None] +
stride_cn * offs_cn[None, :])
tl.store(c_ptrs, c)
return
offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = (a_ptr + batch * stride_ab + head * stride_ah +
(offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak))
b_ptrs = (b_ptr + batch * stride_bb + head * stride_bh +
(offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn))
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, K, BLOCK_SIZE_K):
a_mask = (offs_am[:, None] < M) & (offs_k[None, :] + k < K)
b_mask = (offs_k[:, None] + k < K) & (offs_bn[None, :] < N)
a = tl.load(a_ptrs, mask=a_mask, other=0.)
b = tl.load(b_ptrs, mask=b_mask, other=0.)
accumulator += tl.dot(a, b)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
c = accumulator.to(c_ptr.dtype.element_ty)
if scale > 0:
c = c * scale.to(c_ptr.dtype.element_ty)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
if MASK:
c += tl.where(offs_cm[:, None] >= offs_cn[None, :], 0, float("-inf"))
c_ptrs = (c_ptr + batch * stride_cb + head * stride_ch + stride_cm * offs_cm[:, None] +
stride_cn * offs_cn[None, :])
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
| 11,738 | 30.055556 | 106 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/triton/layer_norm.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import triton
import triton.language as tl
'''
layer-normalization
modified the triton kernel in
https://github.com/openai/triton/blob/34817ecc954a6f4ca7b4dfb352fdde1f8bd49ca5/python/tutorials/05-layer-norm.py
'''
@triton.jit
def layer_norm_kernel(
Out,
A,
Weight,
Bias,
stride,
N,
eps,
BLOCK_SIZE: tl.constexpr,
):
# position of elements processed by this program
row = tl.program_id(0)
Out += row * stride
A += row * stride
# compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32)
a = tl.where(cols < N, a - mean, 0.0)
_var += a * a
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# multiply by weight and add bias
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
weight = tl.load(Weight + cols, mask=mask)
bias = tl.load(Bias + cols, mask=mask)
a = tl.load(A + cols, mask=mask, other=0.0).to(tl.float32)
a_hat = (a - mean) * rstd
out = a_hat * weight + bias
# # write-back
tl.store(Out + cols, out, mask=mask)
@triton.jit
def layer_norm_residual_kernel(
Out,
A,
Residual,
ln_input,
Weight,
Bias,
stride,
N,
eps,
BLOCK_SIZE: tl.constexpr,
):
# position of elements processed by this program
row = tl.program_id(0)
Out += row * stride
A += row * stride
Residual += row * stride
ln_input += row * stride
# compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32)
res = tl.load(Residual + cols, mask=cols < N, other=0.0).to(tl.float32)
a = a + res
tl.store(ln_input + cols, a, mask=cols < N)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(ln_input + cols, mask=cols < N, other=0.0).to(tl.float32)
a = tl.where(cols < N, a - mean, 0.0)
_var += a * a
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# multiply by weight and add bias
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
weight = tl.load(Weight + cols, mask=mask)
bias = tl.load(Bias + cols, mask=mask)
a = tl.load(ln_input + cols, mask=mask, other=0.0).to(tl.float32)
a_hat = (a - mean) * rstd
out = a_hat * weight + bias
# write-back
tl.store(Out + cols, out, mask=mask)
@triton.jit
def layer_norm_residual_bias_kernel(
Out,
A,
Residual,
InputBias,
ln_input,
Weight,
Bias,
stride,
N,
eps,
BLOCK_SIZE: tl.constexpr,
):
# position of elements processed by this program
row = tl.program_id(0)
Out += row * stride
A += row * stride
Residual += row * stride
ln_input += row * stride
# compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32)
res = tl.load(Residual + cols, mask=cols < N, other=0.0).to(tl.float32)
b = tl.load(InputBias + cols, mask=cols < N, other=0.0).to(tl.float32)
a = a + b + res
tl.store(ln_input + cols, a, mask=cols < N)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(ln_input + cols, mask=cols < N, other=0.0).to(tl.float32)
a = tl.where(cols < N, a - mean, 0.0)
_var += a * a
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# multiply by weight and add bias
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
weight = tl.load(Weight + cols, mask=mask)
bias = tl.load(Bias + cols, mask=mask)
a = tl.load(ln_input + cols, mask=mask, other=0.0).to(tl.float32)
a_hat = (a - mean) * rstd
out = a_hat * weight + bias
# write-back
tl.store(Out + cols, out, mask=mask)
def layer_norm(a, weight, bias, eps):
assert a.is_contiguous()
assert weight.is_contiguous()
assert bias.is_contiguous()
# allocate output
out = torch.empty_like(a)
# reshape input data into 2D tensor
a_arg = a.view(-1, a.shape[-1])
M, N = a_arg.shape
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // a.element_size()
BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
BLOCK_SIZE = max(BLOCK_SIZE, 128)
BLOCK_SIZE = min(BLOCK_SIZE, 4096)
BLOCK_SIZE = BLOCK_SIZE if N <= 4096 else 8192
# heuristics for number of warps
num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
layer_norm_kernel[(M, )](
out,
a_arg,
weight,
bias,
a_arg.stride(0),
N,
eps,
BLOCK_SIZE=BLOCK_SIZE,
num_warps=num_warps,
)
return out
def layer_norm_residual(a, input_bias, residual, weight, bias, eps):
assert a.is_contiguous()
assert weight.is_contiguous()
assert bias.is_contiguous()
assert residual.is_contiguous()
# allocate output and scratch-pad for residual addition
out = torch.empty_like(a)
ln_input = torch.empty_like(a)
# reshape input data into 2D tensor
a_arg = a.view(-1, a.shape[-1])
residual = residual.view(-1, residual.shape[-1])
M, N = a_arg.shape
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // a.element_size()
BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
BLOCK_SIZE = max(BLOCK_SIZE, 128)
BLOCK_SIZE = min(BLOCK_SIZE, 4096)
BLOCK_SIZE = BLOCK_SIZE if N <= 4096 else 8192
# heuristics for number of warps
num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
if input_bias is None:
layer_norm_residual_kernel[(M, )](
out,
a_arg,
residual,
ln_input,
weight,
bias,
a_arg.stride(0),
N,
eps,
BLOCK_SIZE=BLOCK_SIZE,
num_warps=num_warps,
)
else:
layer_norm_residual_bias_kernel[(M, )](
out,
a_arg,
residual,
input_bias,
ln_input,
weight,
bias,
a_arg.stride(0),
N,
eps,
BLOCK_SIZE=BLOCK_SIZE,
num_warps=num_warps,
)
return out
| 7,512 | 29.052 | 112 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/triton/residual_add.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import triton
import triton.language as tl
from deepspeed.accelerator import get_accelerator
@triton.jit
def residual_add_bias_kernel(
hidden_state_ptr,
residual_ptr,
attn_output_ptr,
hidden_state_size,
attn_bias_ptr,
final_bias_ptr,
bias_size,
output_ptr,
mp_size: tl.constexpr,
mlp_after_attn: tl.constexpr,
pre_attn_norm: tl.constexpr,
add_attn_bias: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < hidden_state_size
bias_offsets = offsets % bias_size
bias_mask = bias_offsets < bias_size
tl_hidden_state = tl.load(hidden_state_ptr + offsets, mask=mask)
tl_residual = tl.load(residual_ptr + offsets, mask=mask)
tl_attn_output = tl.load(attn_output_ptr + offsets, mask=mask)
tl_attn_bias = tl.load(attn_bias_ptr + bias_offsets, mask=bias_mask)
tl_final_bias = tl.load(final_bias_ptr + bias_offsets, mask=bias_mask)
if mlp_after_attn:
if pre_attn_norm:
output = tl_hidden_state + (tl_residual + tl_final_bias + tl_attn_output + tl_attn_bias) / mp_size
else:
output = tl_hidden_state + tl_residual + tl_final_bias
else:
output = tl_hidden_state + tl_attn_output + (tl_residual + tl_final_bias) / mp_size
if add_attn_bias:
output += tl_attn_bias / mp_size
tl.store(output_ptr + offsets, output, mask=mask)
def residual_add_bias(hidden_state: torch.Tensor, residual: torch.Tensor, attn_output: torch.Tensor,
attn_bias: torch.Tensor, final_bias: torch.Tensor, mp_size: int, mlp_after_attn: bool,
add_attn_bias: bool, pre_attn_norm: bool):
# check that all tensors are on the same device
assert get_accelerator().on_accelerator(hidden_state) \
and get_accelerator().on_accelerator(residual) \
and get_accelerator().on_accelerator(attn_output) \
and get_accelerator().on_accelerator(attn_bias) \
and get_accelerator().on_accelerator(final_bias)
# check that all tensors have the same dtype
assert hidden_state.dtype == residual.dtype == attn_output.dtype \
== attn_bias.dtype == final_bias.dtype
# check that all tensors have the right shape
assert hidden_state.shape == residual.shape == attn_output.shape
assert attn_bias.shape == final_bias.shape
assert attn_bias.shape[0] == hidden_state.shape[2]
output = torch.empty_like(hidden_state)
hidden_state_size = output.numel()
bias_size = attn_bias.numel()
grid = lambda meta: (triton.cdiv(hidden_state_size, meta['BLOCK_SIZE']), )
residual_add_bias_kernel[grid](hidden_state, residual, attn_output, hidden_state_size,\
attn_bias, final_bias, bias_size, output, mp_size, mlp_after_attn, pre_attn_norm, \
add_attn_bias, \
BLOCK_SIZE=1024)
return output
| 3,118 | 34.044944 | 110 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/transformer/inference/triton/attention.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import math
import torch
import torch.nn as nn
from deepspeed.accelerator import get_accelerator
from deepspeed import comm as dist
from deepspeed.ops.transformer.inference.op_binding import LinearOp, VectorMatMulOp, SoftmaxContextOp, QKVGemmOp
from deepspeed.ops.transformer.inference.triton import (
softmax,
score_4d_matmul,
context_4d_matmul,
)
minus_inf = -10000.0
class TritonSelfAttention(nn.Module):
num_layers = 0
def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1, qkv_merging=False):
super(TritonSelfAttention, self).__init__()
self.config = config
data_type = self.config.dtype
data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype
assert data_type_fp == torch.half, "triton supports fp16 data_type_fp"
self.config.layer_id = TritonSelfAttention.num_layers
TritonSelfAttention.num_layers = TritonSelfAttention.num_layers + 1
device = get_accelerator().current_device_name() #if config.bigscience_bloom else 'cpu'
assert config.mp_size == 1, "mp_size has to be 1 with triton attention yet"
if self.config.set_empty_params:
self.attn_qw = None
self.attn_qb = None
self.attn_kw = None
self.attn_kb = None
self.attn_vw = None
self.attn_vb = None
self.attn_qkvw = None
self.attn_qkvb = None
self.attn_ow = None
self.attn_ob = None
else:
qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3
self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size,
qkv_size_per_partition,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, dtype=data_type_fp, device=device),
requires_grad=False)
# self-ouput weights
out_size_per_partition = self.config.hidden_size // self.config.mp_size
self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition,
self.config.hidden_size,
dtype=data_type,
device=device),
requires_grad=False)
self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
requires_grad=False)
self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size
self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size
self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads
self.mp_group = mp_group
self.use_flash = False
# used for quantization
self.q_scales = q_scales
self.q_groups = q_groups
self.merge_count = int(math.log2(merge_count))
self.norm_factor = math.sqrt(self.config.hidden_size // self.config.heads)
if not config.use_mup:
self.norm_factor = math.sqrt(self.norm_factor)
if self.config.scale_attn_by_inverse_layer_idx is True:
self.norm_factor *= math.sqrt(self.config.layer_id + 1)
# https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191
triton_autotune = self.config.triton_autotune and self.config.layer_id == 0
self.qkv_func = QKVGemmOp(config)
self.score_context_func = SoftmaxContextOp(config)
self.linear_func = LinearOp(config)
self.vector_matmul_func = VectorMatMulOp(config)
self.hidden_size = config.hidden_size
self.head_size = config.hidden_size // config.heads
self.scale = (1 / self.norm_factor / self.norm_factor if self.config.scale_attention else 1.0
) # making it back to 1/sqrt(head_size)
self.triangular_masking = self.config.triangular_masking
# triton autotune table update for score/context matmul
if triton_autotune:
print(f"running triton autotune for attention")
__class__._triton_autotune(2, self.config.max_out_tokens, self.head_size, self.config.hidden_size,
self.triangular_masking, self.scale)
@staticmethod
def _triton_autotune(min_seqlen,
max_seqlen,
head_size,
hidden_size,
triangular_masking,
scale,
dtype=torch.float16):
from deepspeed.ops.transformer.inference.triton.matmul_ext import Fp16Matmul, score_4d_matmul, context_4d_matmul
seqlen = [(min_seqlen + i)
for i in range(0, max_seqlen - min_seqlen + Fp16Matmul._cache_stride + 1, Fp16Matmul._cache_stride)]
Fp16Matmul._read_autotune_table()
for N in seqlen:
qkv = torch.randn((1, N, 3 * hidden_size), dtype=dtype, device='cuda')
output = score_4d_matmul(qkv, head_size, triangular_masking, scale)
context_4d_matmul(output, qkv, head_size)
Fp16Matmul._update_autotune_table()
def ds_compute_attention(self, qkv_out, input_mask, layer_past, alibi):
if isinstance(qkv_out, list):
qkv_out = qkv_out[0]
no_masking = input_mask is None
if no_masking:
input_mask = torch.empty(1)
attn_key_value = self.score_context_func(
query_key_value=qkv_out,
attn_mask=((1 - input_mask).to(qkv_out.dtype) *
minus_inf) if input_mask.dtype == torch.int64 else input_mask,
heads=self.num_attention_heads_per_partition,
norm_factor=(1 / self.norm_factor if self.config.scale_attention else 1.0),
no_masking=no_masking,
layer_id=self.config.layer_id,
num_layers=TritonSelfAttention.num_layers,
alibi=alibi)
context_layer, key_layer, value_layer = attn_key_value
return context_layer, key_layer, value_layer
def forward(
self,
input,
input_mask,
head_mask=None,
layer_past=None,
get_present=False, # not used
encoder_hidden_states=None, # not used
encoder_attention_mask=None, # not used
triangularutput_attentions=False, # not used
norm_w=None,
norm_b=None,
alibi=None,
use_triton_attention=True):
if not self.config.pre_layer_norm:
qkv_out = self.linear_func(input=input,
weight=self.attn_qkvw,
bias=self.attn_qkvb,
add_bias=self.attn_qkvb is not None,
do_flash_attn=False,
num_heads=self.num_attention_heads_per_partition,
num_layers=TritonSelfAttention.num_layers)
qkv = qkv_out
else:
qkv_out = self.qkv_func(input=input,
weight=self.attn_qkvw,
bias=(self.attn_qkvb if self.attn_qkvb is not None else norm_b),
gamma=norm_w,
beta=norm_b)
qkv = qkv_out[0]
if use_triton_attention and (alibi is None):
context_layer = compute_attention(qkv=qkv,
input_mask=input_mask,
scale=self.scale,
layer_past=layer_past,
alibi=alibi,
head_size=self.head_size,
use_triton_flash=self.use_flash,
use_cuda_flash=False,
triangular=self.triangular_masking)
key_layer, value_layer = qkv[:, :, self.hidden_size:2 * self.hidden_size], qkv[:, :, 2 * self.hidden_size:]
else:
context_layer, key_layer, value_layer = self.ds_compute_attention(qkv_out=qkv_out,
input_mask=input_mask,
layer_past=layer_past,
alibi=alibi)
output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow)
inp_norm = qkv_out[-1]
if self.config.mlp_after_attn and self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1:
dist.all_reduce(output, group=self.mp_group)
return (output, key_layer, value_layer, context_layer, inp_norm)
global inference_module
def compute_attention(qkv,
input_mask,
layer_past,
alibi,
scale,
head_size,
triangular=False,
use_cuda_flash=False,
use_triton_flash=False,
use_ds_attention=False):
if isinstance(qkv, list):
qkv = qkv[0]
#assert layer_past is None, "layer_past not supported in triton yet"
assert alibi is None, "layer_past not supported in alibi yet"
output = score_4d_matmul(qkv, head_size, triangular, scale)
if triangular:
output = softmax(output)
else:
output = softmax(output, input_mask)
output = context_4d_matmul(output, qkv, head_size)
return output
| 10,335 | 43.93913 | 121 | py |
DeepSpeed | DeepSpeed-master/deepspeed/ops/lamb/fused_lamb.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Copyright NVIDIA/apex
This file is adapted from NVIDIA/apex/optimizer/fused_adam and implements the LAMB optimizer
"""
import types
import torch
from deepspeed.ops.op_builder import FusedLambBuilder
class FusedLamb(torch.optim.Optimizer):
"""Implements the LAMB algorithm. Currently GPU-only.
LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes.
https://arxiv.org/abs/1904.00962
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
bias_correction (bool, optional): bias correction (default: True)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
max_grad_norm (float, optional): value used to clip global grad norm
(default: 0.0)
max_coeff(float, optional): maximum value of the lamb coefficient (default: 10.0)
min_coeff(float, optional): minimum value of the lamb coefficient (default: 0.01)
amsgrad (boolean, optional): NOT SUPPORTED in FusedLamb!
"""
def __init__(self,
params,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.,
max_grad_norm=0.,
max_coeff=10.0,
min_coeff=0.01,
amsgrad=False):
self.fused_lamb_cuda = FusedLambBuilder().load()
if amsgrad:
raise RuntimeError('FusedLamb does not support the AMSGrad variant.')
defaults = dict(lr=lr,
bias_correction=bias_correction,
betas=betas,
eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm,
max_coeff=max_coeff,
min_coeff=min_coeff)
super(FusedLamb, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
self.lamb_coeffs = []
def step(self, closure=None, grads=None, output_params=None, scale=1., grad_norms=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
if output_params is None:
output_params_group = [None] * len(self.param_groups)
elif isinstance(output_params, types.GeneratorType):
output_params_group = [output_params]
elif type(output_params[0]) != list:
output_params_group = [output_params]
else:
output_params_group = output_params
if grad_norms is None:
grad_norms = [None] * len(self.param_groups)
#remove the previous coeffs
del self.lamb_coeffs[:]
for group, grads_this_group, output_params_this_group, grad_norm_group in zip(
self.param_groups, grads_group, output_params_group, grad_norms):
if grads_this_group is None:
grads_this_group = [None] * len(group['params'])
if output_params_this_group is None:
output_params_this_group = [None] * len(group['params'])
if grad_norm_group is None:
grad_norm_group = [None] * len(group['params'])
elif not isinstance(grad_norm_group, list):
grad_norm_group = [grad_norm_group]
bias_correction = 1 if group['bias_correction'] else 0
for p, grad, output_param, grad_norm in zip(group['params'], grads_this_group, output_params_this_group,
grad_norm_group):
# compute combined scale factor for this group
combined_scale = scale
if group['max_grad_norm'] > 0:
# norm is in fact norm*scale
clip = ((grad_norm / scale) + 1e-6) / group['max_grad_norm']
if clip > 1:
combined_scale = clip * scale
#note: p.grad should not ever be set for correct operation of mixed precision optimizer that sometimes sends None gradients
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('FusedLamb does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
max_coeff = group['max_coeff']
min_coeff = group['min_coeff']
state['step'] += 1
out_p = torch.tensor([], dtype=torch.float) if output_param is None else output_param
lamb_coeff = self.fused_lamb_cuda.lamb(p.data, out_p, exp_avg, exp_avg_sq, grad, group['lr'], beta1,
beta2, max_coeff, min_coeff, group['eps'], combined_scale,
state['step'], self.eps_mode, bias_correction,
group['weight_decay'])
self.lamb_coeffs.append(lamb_coeff)
return loss
def get_lamb_coeffs(self):
lamb_coeffs = [lamb_coeff.item() for lamb_coeff in self.lamb_coeffs]
return lamb_coeffs
| 7,815 | 43.662857 | 139 | py |
DeepSpeed | DeepSpeed-master/deepspeed/model_implementations/diffusers/vae.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..features.cuda_graph import CUDAGraph
class DSVAE(CUDAGraph, torch.nn.Module):
def __init__(self, vae, enable_cuda_graph=True):
super().__init__(enable_cuda_graph=enable_cuda_graph)
self.vae = vae
self.config = vae.config
self.device = self.vae.device
self.dtype = self.vae.dtype
self.vae.requires_grad_(requires_grad=False)
self.decoder_cuda_graph_created = False
self.encoder_cuda_graph_created = False
self.all_cuda_graph_created = False
def _graph_replay_decoder(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_decoder_inputs[i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_decoder_kwargs[k].copy_(kwargs[k])
self._decoder_cuda_graph.replay()
return self.static_decoder_output
def _decode(self, x, return_dict=True):
return self.vae.decode(x, return_dict=return_dict)
def _create_cuda_graph_decoder(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._decode(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._decoder_cuda_graph = torch.cuda.CUDAGraph()
self.static_decoder_inputs = inputs
self.static_decoder_kwargs = kwargs
with torch.cuda.graph(self._decoder_cuda_graph):
self.static_decoder_output = self._decode(*self.static_decoder_inputs, **self.static_decoder_kwargs)
self.decoder_cuda_graph_created = True
def decode(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.decoder_cuda_graph_created:
outputs = self._graph_replay_decoder(*inputs, **kwargs)
else:
self._create_cuda_graph_decoder(*inputs, **kwargs)
outputs = self._graph_replay_decoder(*inputs, **kwargs)
return outputs
else:
return self._decode(*inputs, **kwargs)
def _graph_replay_encoder(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_encoder_inputs[i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_encoder_kwargs[k].copy_(kwargs[k])
self._encoder_cuda_graph.replay()
return self.static_encoder_output
def _encode(self, x, return_dict=True):
return self.vae.encode(x, return_dict=return_dict)
def _create_cuda_graph_encoder(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._encode(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._encoder_cuda_graph = torch.cuda.CUDAGraph()
self.static_encoder_inputs = inputs
self.static_encoder_kwargs = kwargs
with torch.cuda.graph(self._encoder_cuda_graph):
self.static_encoder_output = self._encode(*self.static_encoder_inputs, **self.static_encoder_kwargs)
self.encoder_cuda_graph_created = True
def encode(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.encoder_cuda_graph_created:
outputs = self._graph_replay_encoder(*inputs, **kwargs)
else:
self._create_cuda_graph_encoder(*inputs, **kwargs)
outputs = self._graph_replay_encoder(*inputs, **kwargs)
return outputs
else:
return self._encode(*inputs, **kwargs)
def _graph_replay(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_inputs[i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_kwargs[k].copy_(kwargs[k])
self._all_cuda_graph.replay()
return self.static_output
def forward(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.cuda_graph_created:
outputs = self._graph_replay(*inputs, **kwargs)
else:
self._create_cuda_graph(*inputs, **kwargs)
outputs = self._graph_replay(*inputs, **kwargs)
return outputs
else:
return self._forward(*inputs, **kwargs)
def _create_cuda_graph(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._forward(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._all_cuda_graph = torch.cuda.CUDAGraph()
self.static_inputs = inputs
self.static_kwargs = kwargs
with torch.cuda.graph(self._all_cuda_graph):
self.static_output = self._forward(*self.static_inputs, **self.static_kwargs)
self.all_cuda_graph_created = True
def _forward(self, sample, timestamp, encoder_hidden_states, return_dict=True):
return self.vae(sample, timestamp, encoder_hidden_states, return_dict)
| 6,025 | 38.907285 | 112 | py |
DeepSpeed | DeepSpeed-master/deepspeed/model_implementations/diffusers/unet.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from ..features.cuda_graph import CUDAGraph
class DSUNet(CUDAGraph, torch.nn.Module):
def __init__(self, unet, enable_cuda_graph=True):
super().__init__(enable_cuda_graph=enable_cuda_graph)
self.unet = unet
# SD pipeline accesses this attribute
self.in_channels = unet.in_channels
self.device = self.unet.device
self.dtype = self.unet.dtype
self.config = self.unet.config
self.fwd_count = 0
self.unet.requires_grad_(requires_grad=False)
self.unet.to(memory_format=torch.channels_last)
self.cuda_graph_created = False
def _graph_replay(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_inputs[i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_kwargs[k].copy_(kwargs[k])
self._cuda_graphs.replay()
return self.static_output
def forward(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.cuda_graph_created:
outputs = self._graph_replay(*inputs, **kwargs)
else:
self._create_cuda_graph(*inputs, **kwargs)
outputs = self._graph_replay(*inputs, **kwargs)
return outputs
else:
return self._forward(*inputs, **kwargs)
def _create_cuda_graph(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._forward(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._cuda_graphs = torch.cuda.CUDAGraph()
self.static_inputs = inputs
self.static_kwargs = kwargs
with torch.cuda.graph(self._cuda_graphs):
self.static_output = self._forward(*self.static_inputs, **self.static_kwargs)
self.cuda_graph_created = True
def _forward(self, sample, timestamp, encoder_hidden_states, return_dict=True, cross_attention_kwargs=None):
if cross_attention_kwargs:
return self.unet(sample,
timestamp,
encoder_hidden_states,
return_dict,
cross_attention_kwargs=cross_attention_kwargs)
else:
return self.unet(sample, timestamp, encoder_hidden_states, return_dict)
| 2,792 | 36.743243 | 112 | py |
DeepSpeed | DeepSpeed-master/deepspeed/model_implementations/transformers/ds_transformer.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import torch.nn as nn
from deepspeed import comm as dist
from deepspeed.utils.logging import log_dist
from deepspeed.ops.transformer.inference.ds_mlp import DeepSpeedMLP
from deepspeed.ops.transformer.inference.ds_attention import DeepSpeedSelfAttention, BloomSelfAttention
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
import deepspeed
if deepspeed.HAS_TRITON:
from deepspeed.ops.transformer.inference.triton.mlp import TritonMLP
from deepspeed.ops.transformer.inference.triton.attention import TritonSelfAttention
inference_module = None
class DeepSpeedTransformerInference(nn.Module):
"""Initialize the DeepSpeed Transformer Layer.
Arguments:
layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers,
layer_id will be 0,1,2...23 when each layer object is instantiated
config: An object of DeepSpeedInferenceConfig
mp_group: Model parallelism group initialized on the modeling side.
quantize_scales: This argument groups all the layers' scales used for quantization
quantize_groups: Number of groups used for quantizing the model
merge_count: Shows the number of model-parallel checkpoints merged before running inference.
We use this argument to control the quantization scale for the model parameters if a bigger
quantize-grouping than 1 is used.
mlp_extra_grouping: This flag is used to show a 2x higher number of groups used for the MLP part
of a Transformer layer. We use this feature for quantization to reduce the convergence impact
for specific downstream tasks.
"""
layer_id = 0
def __init__(self,
config,
mp_group=None,
quantize_scales=None,
quantize_groups=1,
merge_count=1,
mlp_extra_grouping=False):
super(DeepSpeedTransformerInference, self).__init__()
self.config = config
self.config.layer_id = DeepSpeedTransformerInference.layer_id
DeepSpeedTransformerInference.layer_id += 1
data_type = torch.half if self.config.dtype == torch.int8 else self.config.dtype
global inference_module
if inference_module is None:
builder = InferenceBuilder()
inference_module = builder.load()
if DeepSpeedTransformerInference.layer_id == 1:
log_dist(f"DeepSpeed-Inference config: {self.config.__dict__}", [0])
if deepspeed.HAS_TRITON and self.config.use_triton:
log_dist(f"Injecting Triton kernels ...", [0])
if self.config.bigscience_bloom:
self.attention = BloomSelfAttention(self.config, mp_group, quantize_scales, quantize_groups, merge_count)
assert not self.config.use_triton
else:
if deepspeed.HAS_TRITON and self.config.use_triton:
self.attention = TritonSelfAttention(self.config)
else:
self.attention = DeepSpeedSelfAttention(self.config, mp_group, quantize_scales, quantize_groups,
merge_count)
if deepspeed.HAS_TRITON and self.config.use_triton:
self.mlp = TritonMLP(self.config)
else:
self.mlp = DeepSpeedMLP(self.config, mp_group, quantize_scales, quantize_groups, merge_count,
mlp_extra_grouping)
device = get_accelerator().current_device_name() # if config.bigscience_bloom else 'cpu'
if self.config.set_empty_params:
self.norm_w = None
self.norm_b = None
else:
self.norm_w = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device),
requires_grad=False)
self.norm_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device),
requires_grad=False)
self.layer_past = None
try:
if config.dtype == torch.float32:
self.allocate_workspace = inference_module.allocate_workspace_fp32
elif config.dtype == torch.bfloat16:
self.allocate_workspace = inference_module.allocate_workspace_bf16
else:
self.allocate_workspace = inference_module.allocate_workspace_fp32
self._alloc_workspace = True
except AttributeError:
self.allocate_workspace = None
self._alloc_workspace = False
@classmethod
def reset_cache(cls):
if inference_module is not None:
inference_module.reset_cache()
def forward(
self,
input=None,
input_mask=None,
attention_mask=None,
attn_mask=None,
head_mask=None,
layer_past=None,
get_key_value=False,
get_present=False,
encoder_output=None,
enc_dec_attn_mask=None,
x=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
alibi=None,
output_attentions=False,
# TODO(arashb): 'layer_head_mask' and 'past_key_value' are only added to satisfy the OPT models API.
# This needs to be redesigned later!
layer_head_mask=None,
past_key_value=None,
**kwargs):
if x is not None:
input = x
if "hidden_states" in kwargs:
input = kwargs["hidden_states"]
input_mask = (input_mask if attn_mask is None else attn_mask) if attention_mask is None else attention_mask
# Allocate memory only on first layer forward
if self.config.layer_id == 0 and self._alloc_workspace:
self.allocate_workspace(self.config.hidden_size, self.config.heads,
input.size()[1],
input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size,
self.config.bigscience_bloom,
dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens,
self.config.min_out_tokens)
self._alloc_workspace = False
get_present = (get_present or get_key_value or use_cache)
input_mask = input_mask if attention_mask is None else attention_mask
# We set the prev key/value to None when there is a prompt
if input.shape[1] > 1:
self.layer_past = None
layer_past = layer_past if layer_past is not None else self.layer_past
head_mask = layer_head_mask if layer_head_mask is not None else head_mask
attn_mask = None
if isinstance(input, tuple):
attn_mask = input[1]
input = input[0]
input_type = input.dtype
if (self.config.dtype in [torch.float16, torch.bfloat16, torch.int8]) \
and input.dtype == torch.float:
target_dtype = torch.half if self.dtype == torch.int8 else self.dtype
input = input.to(target_dtype)
with torch.no_grad():
attention_output, key, value, context_outputtn_ctx, inp_norm = \
self.attention(input,
input_mask,
head_mask,
layer_past,
get_present,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
self.norm_w,
self.norm_b,
alibi)
presents = (key, value)
self.layer_past = presents if layer_past is None else None
output = self.mlp(attention_output, input, inp_norm, self.attention.attn_ob)
if not self.config.pre_layer_norm:
output = inference_module.layer_norm(output, self.norm_w, self.norm_b, self.config.epsilon)
output = output.to(input_type)
if get_present:
output = (output, presents)
if self.config.return_single_tuple:
return (output, )
elif self.config.return_tuple:
return output if type(output) is tuple else (output, attn_mask)
else:
return output
| 8,909 | 43.55 | 117 | py |
DeepSpeed | DeepSpeed-master/deepspeed/model_implementations/transformers/clip_encoder.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.accelerator import get_accelerator
from ..features.cuda_graph import CUDAGraph
class DSClipEncoder(CUDAGraph, torch.nn.Module):
def __init__(self, enc, enable_cuda_graph=False):
super().__init__(enable_cuda_graph=enable_cuda_graph)
enc.text_model._build_causal_attention_mask = self._build_causal_attention_mask
self.enc = enc
self.device = self.enc.device
self.dtype = self.enc.dtype
self.cuda_graph_created = [False, False]
self.static_inputs = [None, None]
self.static_kwargs = [None, None]
self.static_output = [None, None]
self._cuda_graphs = [None, None]
self.iter = 0
self.config = self.enc.config
def _build_causal_attention_mask(self, bsz, seq_len, dtype):
mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype, device=get_accelerator().current_device_name())
mask.fill_(torch.tensor(torch.finfo(dtype).min))
mask.triu_(1)
mask = mask.unsqueeze(1)
return mask
def _graph_replay(self, *inputs, **kwargs):
for i in range(len(inputs)):
if torch.is_tensor(inputs[i]):
self.static_inputs[self.iter][i].copy_(inputs[i])
for k in kwargs:
if torch.is_tensor(kwargs[k]):
self.static_kwargs[self.iter][k].copy_(kwargs[k])
self._cuda_graphs[self.iter].replay()
return self.static_output[self.iter]
def forward(self, *inputs, **kwargs):
if self.enable_cuda_graph:
if self.cuda_graph_created[self.iter]:
outputs = self._graph_replay(*inputs, **kwargs)
else:
self._create_cuda_graph(*inputs, **kwargs)
outputs = self._graph_replay(*inputs, **kwargs)
self.iter = (self.iter + 1) % 2
return outputs
else:
return self.enc(*inputs, **kwargs)
def _create_cuda_graph(self, *inputs, **kwargs):
# warmup to create the workspace and cublas handle
cuda_stream = torch.cuda.Stream()
cuda_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(cuda_stream):
for i in range(3):
ret = self._forward(*inputs, **kwargs)
torch.cuda.current_stream().wait_stream(cuda_stream)
# create cuda_graph and assign static_inputs and static_outputs
self._cuda_graphs[self.iter] = torch.cuda.CUDAGraph()
self.static_inputs[self.iter] = inputs
self.static_kwargs[self.iter] = kwargs
with torch.cuda.graph(self._cuda_graphs[self.iter]):
self.static_output[self.iter] = self._forward(*self.static_inputs[self.iter],
**self.static_kwargs[self.iter])
self.cuda_graph_created[self.iter] = True
def _forward(self, *inputs, **kwargs):
return self.enc(*inputs, **kwargs)
| 3,045 | 38.051282 | 110 | py |
DeepSpeed | DeepSpeed-master/deepspeed/model_implementations/transformers/ds_base.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch.nn as nn
class DeepSpeedTransformerBase(nn.module):
def __init__(self):
pass
# this would be the new clean base class that will replace DeepSpeedTransformerInference.
# we currently don't know how this will look like but keeping it here as a placeholder.
| 388 | 23.3125 | 93 | py |
DeepSpeed | DeepSpeed-master/deepspeed/nebula/constants.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
#########################################
# nebula
#########################################
# Nebula. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
NEBULA_FORMAT = '''
nebula should be enabled as:
"session_params": {
"nebula": {
"enabled": true,
"persistent_storage_path": "/foo/bar",
"persistent_time_interval": 100,
"num_of_version_in_retention": 2,
"enable_nebula_load": true
}
}
'''
NEBULA = "nebula"
NEBULA_ENABLED = "enabled"
NEBULA_ENABLED_DEFAULT = False
# There is a case where customer want to load the checkpoint saved
# by raw torch. Because nebula cannot load torch checkpoint directly
# as they have different folder structures to bring the gap for
# loading(the data are totally same in bytes for torch and nebula
# saving).
# In this case, we must disable nebula load to use raw torch load.
# Customer can just set NEBULA_ENABLE_NEBULA_LOAD to False. Then use
# original way of deepspeed to load, i.e. set the value of "--load".
NEBULA_ENABLE_NEBULA_LOAD = "enable_nebula_load"
NEBULA_ENABLE_NEBULA_LOAD_DEFAULT = True
# When you want to resume the previous checkpoint saved by nebula,
# you can set NEBULA_LOAD_PATH as the parent folder of checkpoint.
# If NEBULA_LOAD_PATH is None, the NEBULA_PERSISTENT_STORAGE_PATH
# will be the default path to load.
NEBULA_LOAD_PATH = "nebula_load_path"
NEBULA_LOAD_PATH_DEFAULT = None
# Nebula will save the checkpoint under NEBULA_LOAD_PATH in the
# asynchronous way.
NEBULA_PERSISTENT_STORAGE_PATH = "persistent_storage_path"
NEBULA_PERSISTENT_STORAGE_PATH_DEFAULT = None
# Time interval to trigger the nebula persistence.
NEBULA_PERSISTENT_TIME_INTERVAL = "persistent_time_interval"
NEBULA_PERSISTENT_TIME_INTERVAL_DEFAULT = 100
# Checkpoint number which will be kept in memory. Let us say,
# if the value is 2. Then we have checkpoints 1 and 2 are ready
# now. When it comes to checkpoint 3, the 1 will be removed if
# 1 has been persisted to disk.
NEBULA_NUM_OF_VERSION_IN_RETENTION = "num_of_version_in_retention"
NEBULA_NUM_OF_VERSION_IN_RETENTION_DEFAULT = 2
# Nebula envs
NEBULA_EXPORT_ENVS = [
'DLTS_JOB_ID', 'DLTS_NUM_WORKER', 'NEBULA_PERSISTENT_STORAGE_PATH', 'NEBULA_PERSISTENT_TIME_INTERVAL',
'AML_RUN_ID', 'AZUREML_RUN_TOKEN', 'AZUREML_WORKSPACE_SCOPE', 'AZUREML_EXPERIMENT_SCOPE',
'AZUREML_RUN_HISTORY_SERVICE_ENDPOINT', 'AZUREML_RUN_ID', 'NEBULA_MEMORY_BUFFER_SIZE',
'AZUREML_PARAMETER_ITPJOB_NAME', 'FC_TASKROLE_NAME', 'FC_TASK_INDEX', 'MASTER_HOST', 'LOCAL_HOST',
'AZUREML_BLOB_ACCOUNT_NAME', 'AZUREML_BLOB_ACCOUNT_KEY'
]
# ITP env files
DLTS_POD_ENV_PATH = '/dlts-runtime/env/pod.env'
| 2,786 | 36.662162 | 106 | py |
DeepSpeed | DeepSpeed-master/accelerator/mps_accelerator.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from .abstract_accelerator import DeepSpeedAccelerator
# During setup stage torch may not be installed, pass on no torch will
# allow op builder related API to be executed.
try:
import torch.mps
except ImportError:
pass
class MPS_Accelerator(DeepSpeedAccelerator):
def __init__(self):
self._name = "mps"
self._communication_backend_name = None
def is_synchronized_device(self):
return False
# Device APIs
def device_name(self, device_index=None):
if device_index == None:
return "mps"
return "mps:{}".format(device_index)
def device(self, device_index):
return torch.device("mps", index=0)
def set_device(self, device_index):
return
def current_device(self):
return torch.device("mps", index=0)
def current_device_name(self):
return "mps:0"
def device_count(self):
return 1
def synchronize(self, device_index=None):
return torch.mps.synchronize()
# RNG APIs
def random(self):
return torch.random
def set_rng_state(self, new_state, device_index=None):
return torch.mps.set_rng_state(new_state)
def get_rng_state(self, device_index=None):
return torch.mps.get_rng_state()
def manual_seed(self, seed):
return torch.mps.manual_seed(seed)
def manual_seed_all(self, seed):
return torch.mps.manual_seed(seed)
def seed(self):
return torch.mps.seed()
def initial_seed(self, seed):
return
def default_generator(self, device_index):
return
# Streams/Events
@property
def Stream(self):
return None
def stream(self, stream):
return None
def current_stream(self, device_index=None):
return None
def default_stream(self, device_index=None):
return None
@property
def Event(self):
return None
# Memory management
def empty_cache(self):
return torch.mps.empty_cache()
def memory_allocated(self, device_index=None):
return torch.mps.current_allocated_memory()
def max_memory_allocated(self, device_index=None):
return torch.mps.driver_allocated_memory()
def set_per_process_memory_fraction(self, fraction):
return torch.mps.set_per_process_memory_fraction(fraction)
def reset_max_memory_allocated(self, device_index=None):
return
def memory_cached(self, device_index=None):
return
def max_memory_cached(self, device_index=None):
return
def reset_max_memory_cached(self, device_index=None):
return
def memory_stats(self, device_index=None):
return
def reset_peak_memory_stats(self, device_index=None):
return
def memory_reserved(self, device_index=None):
return
def max_memory_reserved(self, device_index=None):
return
def total_memory(self, device_index=None):
return
# Data types
def is_bf16_supported(self):
return False
def is_fp16_supported(self):
return False
# Misc
def amp(self):
return
def is_available(self):
return hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
def range_push(self, msg):
return
def range_pop(self):
return
def lazy_call(self, callback):
return
def communication_backend_name(self):
return self._communication_backend_name
# Tensor operations
@property
def BFloat16Tensor(self):
return
@property
def ByteTensor(self):
return
@property
def DoubleTensor(self):
return
@property
def FloatTensor(self):
return
@property
def HalfTensor(self):
return
@property
def IntTensor(self):
return
@property
def LongTensor(self):
return
def pin_memory(self, tensor):
return tensor.pin_memory()
def on_accelerator(self, tensor):
device_str = str(tensor.device)
if device_str.startswith("mps"):
return True
else:
return False
def op_builder_dir(self):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
return "op_builder"
except ImportError:
return "deepspeed.ops.op_builder"
# create an instance of op builder, specified by class_name
def create_op_builder(self, op_name):
builder_class = self.get_op_builder(op_name)
if builder_class != None:
return builder_class()
return None
# return an op builder class, specified by class_name
def get_op_builder(self, class_name):
from deepspeed.ops.op_builder.cpu import NotImplementedBuilder
return NotImplementedBuilder
def build_extension(self):
from torch.utils.cpp_extension import BuildExtension
return BuildExtension
| 5,266 | 22.30531 | 102 | py |
DeepSpeed | DeepSpeed-master/accelerator/real_accelerator.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
try:
# Importing logger currently requires that torch is installed, hence the try...except
# TODO: Remove logger dependency on torch.
from deepspeed.utils import logger as accel_logger
except ImportError as e:
accel_logger = None
try:
from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1
except ImportError as e:
dsa1 = None
try:
from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa2
except ImportError as e:
dsa2 = None
ds_accelerator = None
def _validate_accelerator(accel_obj):
# because abstract_accelerator has different path during
# build time (accelerator.abstract_accelerator)
# and run time (deepspeed.accelerator.abstract_accelerator)
# and extension would import the
# run time abstract_accelerator/DeepSpeedAccelerator as its base
# class, so we need to compare accel_obj with both base class.
# if accel_obj is instance of DeepSpeedAccelerator in one of
# accelerator.abstractor_accelerator
# or deepspeed.accelerator.abstract_accelerator, consider accel_obj
# is a conforming object
if not ((dsa1 != None and isinstance(accel_obj, dsa1)) or (dsa2 != None and isinstance(accel_obj, dsa2))):
raise AssertionError(f"{accel_obj.__class__.__name__} accelerator is not subclass of DeepSpeedAccelerator")
# TODO: turn off is_available test since this breaks tests
# assert accel_obj.is_available(), \
# f'{accel_obj.__class__.__name__} accelerator fails is_available() test'
def get_accelerator():
global ds_accelerator
if ds_accelerator is not None:
return ds_accelerator
accelerator_name = None
ds_set_method = None
# 1. Detect whether there is override of DeepSpeed accelerators from environment variable.
# DS_ACCELERATOR = 'cuda'|'xpu'|'cpu'
if "DS_ACCELERATOR" in os.environ.keys():
accelerator_name = os.environ["DS_ACCELERATOR"]
if accelerator_name == "xpu":
try:
from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401
except ImportError as e:
raise ValueError(
f"XPU_Accelerator requires intel_extension_for_deepspeed, which is not installed on this system.")
elif accelerator_name == "cpu":
try:
import intel_extension_for_pytorch # noqa: F401
except ImportError as e:
raise ValueError(
f"CPU_Accelerator requires intel_extension_for_pytorch, which is not installed on this system.")
elif accelerator_name == "cuda":
pass
elif accelerator_name == "mps":
try:
import torch.mps
# should use torch.mps.is_available() if it exists someday but this is used as proxy
torch.mps.current_allocated_memory()
except (RuntimeError, ImportError) as e:
raise ValueError(f"MPS_Accelerator requires torch.mps, which is not installed on this system.")
else:
raise ValueError(
f'DS_ACCELERATOR must be one of "cuda", "cpu", or "xpu". Value "{accelerator_name}" is not supported')
ds_set_method = "override"
# 2. If no override, detect which accelerator to use automatically
if accelerator_name == None:
try:
from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401,F811
accelerator_name = "xpu"
except ImportError as e:
# We need a way to choose between CUDA_Accelerator and CPU_Accelerator
# Currently we detect whether intel_extension_for_pytorch is installed
# in the environment and use CPU_Accelerator if the answer is True.
# An alternative might be detect whether CUDA device is installed on
# the system but this comes with two pitfalls:
# 1. the system may not have torch pre-installed, so
# get_accelerator().is_available() may not work.
# 2. Some scenario like install on login node (without CUDA device)
# and run on compute node (with CUDA device) may cause mismatch
# between installation time and runtime.
try:
import intel_extension_for_pytorch # noqa: F401,F811
accelerator_name = "cpu"
except ImportError as e:
try:
import torch.mps
# should use torch.mps.is_available() if it exists someday but this is used as proxy
torch.mps.current_allocated_memory()
accelerator_name = "mps"
except (RuntimeError, ImportError) as e:
accelerator_name = "cuda"
ds_set_method = "auto detect"
# 3. Set ds_accelerator accordingly
if accelerator_name == "cuda":
from .cuda_accelerator import CUDA_Accelerator
ds_accelerator = CUDA_Accelerator()
elif accelerator_name == "cpu":
from .cpu_accelerator import CPU_Accelerator
ds_accelerator = CPU_Accelerator()
elif accelerator_name == "xpu":
# XPU_Accelerator is already imported in detection stage
ds_accelerator = XPU_Accelerator()
elif accelerator_name == "mps":
from .mps_accelerator import MPS_Accelerator
ds_accelerator = MPS_Accelerator()
_validate_accelerator(ds_accelerator)
if accel_logger is not None:
accel_logger.info(f"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})")
return ds_accelerator
def set_accelerator(accel_obj):
global ds_accelerator
_validate_accelerator(accel_obj)
if accel_logger is not None:
accel_logger.info(f"Setting ds_accelerator to {accel_obj._name} (model specified)")
ds_accelerator = accel_obj
"""
-----------[code] test_get.py -----------
from deepspeed.accelerator import get_accelerator
my_accelerator = get_accelerator()
logger.info(f'{my_accelerator._name=}')
logger.info(f'{my_accelerator._communication_backend=}')
logger.info(f'{my_accelerator.HalfTensor().device=}')
logger.info(f'{my_accelerator.total_memory()=}')
-----------[code] test_get.py -----------
---[output] python test_get.py---------
my_accelerator.name()='cuda'
my_accelerator.communication_backend='nccl'
my_accelerator.HalfTensor().device=device(type='cuda', index=0)
my_accelerator.total_memory()=34089730048
---[output] python test_get.py---------
**************************************************************************
-----------[code] test_set.py -----------
from deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator
cu_accel = CUDA_Accelerator()
logger.info(f'{id(cu_accel)=}')
from deepspeed.accelerator import set_accelerator, get_accelerator
set_accelerator(cu_accel)
my_accelerator = get_accelerator()
logger.info(f'{id(my_accelerator)=}')
logger.info(f'{my_accelerator._name=}')
logger.info(f'{my_accelerator._communication_backend=}')
logger.info(f'{my_accelerator.HalfTensor().device=}')
logger.info(f'{my_accelerator.total_memory()=}')
-----------[code] test_set.py -----------
---[output] python test_set.py---------
id(cu_accel)=139648165478304
my_accelerator=<deepspeed.accelerator.cuda_accelerator.CUDA_Accelerator object at 0x7f025f4bffa0>
my_accelerator.name='cuda'
my_accelerator.communication_backend='nccl'
my_accelerator.HalfTensor().device=device(type='cuda', index=0)
my_accelerator.total_memory()=34089730048
---[output] python test_set.py---------
"""
| 7,666 | 39.781915 | 119 | py |
DeepSpeed | DeepSpeed-master/accelerator/cpu_accelerator.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator
import oneccl_bindings_for_pytorch # noqa: F401
import psutil
import os
# accelerator for Intel CPU
class CPU_Accelerator(DeepSpeedAccelerator):
def __init__(self):
self._name = 'cpu'
self._communication_backend_name = 'ccl'
self.max_mem = psutil.Process().memory_info().rss
def is_synchronized_device(self):
return True
# Device APIs
def device_name(self, device_index=None):
return 'cpu'
def device(self, device_index=None):
return None
def set_device(self, device_index):
return
def current_device(self):
return os.environ.get('LOCAL_RANK', 0)
def current_device_name(self):
return 'cpu'
def device_count(self):
device_count = int(os.environ.get('LOCAL_SIZE', 0))
if device_count > 0:
return device_count
else:
from deepspeed.utils.numa import get_numa_cores
# Count NUMA node for number of cpu accelerators. On machine with HBM
# In flat mode, HBM is in separate NUMA node with no cores on this node.
# Ignore these NUMA nodes with no cores.
numa_core_lists = get_numa_cores()
numa_count = 0
for core_list in numa_core_lists:
if len(core_list) > 0:
numa_count += 1
return numa_count
def synchronize(self, device_index=None):
return
# RNG APIs
def random(self):
return torch.random
def set_rng_state(self, new_state, device_index=None):
if device_index == None:
return torch.set_rng_state(new_state)
return torch.set_rng_state(new_state, device_index)
def get_rng_state(self, device_index=None):
return torch.get_rng_state()
def manual_seed(self, seed):
return torch.manual_seed(seed)
def manual_seed_all(self, seed):
return torch.manual_seed(seed)
def initial_seed(self, seed):
return torch.initial_seed(seed)
def default_generator(self, device_index):
return torch.default_generator
# Streams/Events
@property
def Stream(self):
return None
def stream(self, stream):
from deepspeed.runtime.utils import noop_decorator
return noop_decorator
def current_stream(self, device_index=None):
return None
def default_stream(self, device_index=None):
return None
@property
def Event(self):
return None
# Memory management
def empty_cache(self):
return
def get_rss(self):
mem = psutil.Process().memory_info().rss
if mem > self.max_mem:
self.max_mem = mem
return mem
def reset_rss(self):
mem = psutil.Process().memory_info().rss
self.max_mem = mem
return mem
def memory_allocated(self, device_index=None):
return self.get_rss()
def max_memory_allocated(self, device_index=None):
self.get_rss()
return self.max_mem
def reset_max_memory_allocated(self, device_index=None):
self.reset_rss()
return
def memory_cached(self, device_index=None):
return self.get_rss()
def max_memory_cached(self, device_index=None):
self.get_rss()
return self.max_mem
def reset_max_memory_cached(self, device_index=None):
self.reset_rss()
return
def memory_stats(self, device_index=None):
return self.get_rss()
def reset_peak_memory_stats(self, device_index=None):
self.reset_rss()
return
def memory_reserved(self, device_index=None):
return self.get_rss()
def max_memory_reserved(self, device_index=None):
self.get_rss()
return self.max_mem
def total_memory(self, device_index=None):
return psutil.virtual_memory().total
# Misc
def amp(self):
return torch.cpu.amp
def is_available(self):
return True
def range_push(self, msg):
# TODO itt is currently not supported yet
# return torch.profiler.itt.range_push(msg)
return
def range_pop(self):
# TODO itt is currently not supported yet
# return torch.profiler.itt.range_pop()
return
def lazy_call(self, callback):
return callback()
def communication_backend_name(self):
return self._communication_backend_name
# Data types
def is_bf16_supported(self):
return True
def is_fp16_supported(self):
return True
# Tensor operations
@property
def BFloat16Tensor(self):
return torch.BFloat16Tensor
@property
def ByteTensor(self):
return torch.ByteTensor
@property
def DoubleTensor(self):
return torch.DoubleTensor
@property
def FloatTensor(self):
return torch.FloatTensor
@property
def HalfTensor(self):
return torch.HalfTensor
@property
def IntTensor(self):
return torch.IntTensor
@property
def LongTensor(self):
return torch.LongTensor
def pin_memory(self, tensor):
return tensor
def op_builder_dir(self):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
return "op_builder.cpu"
except ImportError:
return "deepspeed.ops.op_builder.cpu"
def on_accelerator(self, tensor):
device_str = str(tensor.device)
if device_str.startswith('cpu'):
return True
else:
return False
# create an instance of op builder and return, name specified by class_name
def create_op_builder(self, op_name):
builder_class = self.get_op_builder(op_name)
if builder_class != None:
return builder_class()
return None
# return an op builder class, name specified by class_name
def get_op_builder(self, class_name):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
from op_builder.cpu import CCLCommBuilder, NotImplementedBuilder
except ImportError:
from deepspeed.ops.op_builder.cpu import CCLCommBuilder, NotImplementedBuilder
if class_name == "CCLCommBuilder":
return CCLCommBuilder
else:
# return a NotImplementedBuilder to avoid get NoneType[Name] in unit tests
return NotImplementedBuilder
def build_extension(self):
from torch.utils.cpp_extension import BuildExtension
return BuildExtension
| 7,124 | 26.298851 | 102 | py |
DeepSpeed | DeepSpeed-master/accelerator/npu_accelerator.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .abstract_accelerator import DeepSpeedAccelerator
# During setup stage torch may not be installed, pass on no torch will
# allow op builder related API to be executed.
try:
import torch.npu
except ImportError:
pass
class NPU_Accelerator(DeepSpeedAccelerator):
def __init__(self):
self._name = 'npu'
self._communication_backend_name = 'hccl'
def is_synchronized_device(self):
return False
# Device APIs
def device_name(self, device_index=None):
if device_index == None:
return 'npu'
return 'npu:{}'.format(device_index)
def device(self, device_index=None):
return torch.npu.device(device_index)
def set_device(self, device_index):
torch.npu.set_device(device_index)
def current_device(self):
return torch.npu.current_device()
def current_device_name(self):
return 'npu:{}'.format(torch.npu.current_device())
def device_count(self):
return torch.npu.device_count()
def synchronize(self, device_index=None):
return torch.npu.synchronize(device_index)
# RNG APIs
def random(self):
return torch.random
def set_rng_state(self, new_state, device_index=None):
if device_index is None:
return torch.npu.set_rng_state(new_state)
return torch.npu.set_rng_state(new_state, device_index)
def get_rng_state(self, device_index=None):
if device_index is None:
return torch.npu.get_rng_state()
return torch.npu.get_rng_state(device_index)
def manual_seed(self, seed):
return torch.npu.manual_seed(seed)
def manual_seed_all(self, seed):
return torch.npu.manual_seed_all(seed)
def initial_seed(self, seed):
return torch.npu.initial_seed(seed)
def default_generator(self, device_index):
return torch.npu.default_generators[device_index]
# Streams/Events
@property
def Stream(self):
return torch.npu.Stream
def stream(self, stream):
return torch.npu.stream(stream)
def current_stream(self, device_index=None):
return torch.npu.current_stream(device_index)
def default_stream(self, device_index=None):
return torch.npu.default_stream(device_index)
@property
def Event(self):
return torch.npu.Event
# Memory management
def empty_cache(self):
return torch.npu.empty_cache()
def memory_allocated(self, device_index=None):
return torch.npu.memory_allocated(device_index)
def max_memory_allocated(self, device_index=None):
return torch.npu.max_memory_allocated(device_index)
def reset_max_memory_allocated(self, device_index=None):
return torch.npu.reset_max_memory_allocated(device_index)
def memory_cached(self, device_index=None):
return torch.npu.memory_cached(device_index)
def max_memory_cached(self, device_index=None):
return torch.npu.max_memory_cached(device_index)
def reset_max_memory_cached(self, device_index=None):
return torch.npu.reset_max_memory_cached(device_index)
def memory_stats(self, device_index=None):
if hasattr(torch.npu, 'memory_stats'):
return torch.npu.memory_stats(device_index)
def reset_peak_memory_stats(self, device_index=None):
if hasattr(torch.npu, 'reset_peak_memory_stats'):
return torch.npu.reset_peak_memory_stats(device_index)
def memory_reserved(self, device_index=None):
if hasattr(torch.npu, 'memory_reserved'):
return torch.npu.memory_reserved(device_index)
def max_memory_reserved(self, device_index=None):
if hasattr(torch.npu, 'max_memory_reserved'):
return torch.npu.max_memory_reserved(device_index)
def total_memory(self, device_index=None):
return torch.npu.get_device_properties(device_index).total_memory
# Data types
def is_bf16_supported(self):
return torch.npu.is_bf16_supported()
def is_fp16_supported(self):
return True
# Misc
def amp(self):
if hasattr(torch.npu, 'amp'):
return torch.npu.amp
return None
def is_available(self):
return torch.npu.is_available()
def range_push(self, msg):
return
def range_pop(self):
return
def lazy_call(self, callback):
return torch.npu._lazy_call(callback)
def communication_backend_name(self):
return self._communication_backend_name
# Tensor operations
@property
def BFloat16Tensor(self):
return torch.npu.BFloat16Tensor
@property
def ByteTensor(self):
return torch.npu.ByteTensor
@property
def DoubleTensor(self):
return torch.npu.DoubleTensor
@property
def FloatTensor(self):
return torch.npu.FloatTensor
@property
def HalfTensor(self):
return torch.npu.HalfTensor
@property
def IntTensor(self):
return torch.npu.IntTensor
@property
def LongTensor(self):
return torch.npu.LongTensor
def pin_memory(self, tensor):
return tensor.pin_memory()
def on_accelerator(self, tensor):
device_str = str(tensor.device)
if device_str.startswith('npu:'):
return True
else:
return False
def op_builder_dir(self):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
return "op_builder.npu"
except ImportError:
return "deepspeed.ops.op_builder.npu"
# dict that holds class name <--> class type mapping i.e.
# 'AsyncIOBuilder': <class 'op_builder.async_io.AsyncIOBuilder'>
# this dict will be filled at init stage
class_dict = None
def _lazy_init_class_dict(self):
if self.class_dict != None:
return
else:
self.class_dict = {}
# create an instance of op builder and return, name specified by class_name
def create_op_builder(self, class_name):
self._lazy_init_class_dict()
if class_name in self.class_dict:
return self.class_dict[class_name]()
else:
return None
# return an op builder class, name specified by class_name
def get_op_builder(self, class_name):
self._lazy_init_class_dict()
if class_name in self.class_dict:
return self.class_dict[class_name]
else:
return None
def build_extension(self):
from torch.utils.cpp_extension import BuildExtension
return BuildExtension
| 6,893 | 27.487603 | 102 | py |
DeepSpeed | DeepSpeed-master/accelerator/cuda_accelerator.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import pkgutil
import importlib
from .abstract_accelerator import DeepSpeedAccelerator
# During setup stage torch may not be installed, pass on no torch will
# allow op builder related API to be executed.
try:
import torch.cuda
except ImportError:
pass
class CUDA_Accelerator(DeepSpeedAccelerator):
def __init__(self):
self._name = 'cuda'
self._communication_backend_name = 'nccl'
def is_synchronized_device(self):
return False
# Device APIs
def device_name(self, device_index=None):
if device_index == None:
return 'cuda'
return 'cuda:{}'.format(device_index)
def device(self, device_index=None):
return torch.cuda.device(device_index)
def set_device(self, device_index):
torch.cuda.set_device(device_index)
def current_device(self):
return torch.cuda.current_device()
def current_device_name(self):
return 'cuda:{}'.format(torch.cuda.current_device())
def device_count(self):
return torch.cuda.device_count()
def synchronize(self, device_index=None):
return torch.cuda.synchronize(device_index)
# RNG APIs
def random(self):
return torch.random
def set_rng_state(self, new_state, device_index=None):
if device_index is None:
return torch.cuda.set_rng_state(new_state)
return torch.cuda.set_rng_state(new_state, device_index)
def get_rng_state(self, device_index=None):
if device_index is None:
return torch.cuda.get_rng_state()
return torch.cuda.get_rng_state(device_index)
def manual_seed(self, seed):
return torch.cuda.manual_seed(seed)
def manual_seed_all(self, seed):
return torch.cuda.manual_seed_all(seed)
def initial_seed(self, seed):
return torch.cuda.initial_seed(seed)
def default_generator(self, device_index):
return torch.cuda.default_generators[device_index]
# Streams/Events
@property
def Stream(self):
return torch.cuda.Stream
def stream(self, stream):
return torch.cuda.stream(stream)
def current_stream(self, device_index=None):
return torch.cuda.current_stream(device_index)
def default_stream(self, device_index=None):
return torch.cuda.default_stream(device_index)
@property
def Event(self):
return torch.cuda.Event
# Memory management
def empty_cache(self):
return torch.cuda.empty_cache()
def memory_allocated(self, device_index=None):
return torch.cuda.memory_allocated(device_index)
def max_memory_allocated(self, device_index=None):
return torch.cuda.max_memory_allocated(device_index)
def reset_max_memory_allocated(self, device_index=None):
return torch.cuda.reset_max_memory_allocated(device_index)
def memory_cached(self, device_index=None):
return torch.cuda.memory_cached(device_index)
def max_memory_cached(self, device_index=None):
return torch.cuda.max_memory_cached(device_index)
def reset_max_memory_cached(self, device_index=None):
return torch.cuda.reset_max_memory_cached(device_index)
def memory_stats(self, device_index=None):
if hasattr(torch.cuda, 'memory_stats'):
return torch.cuda.memory_stats(device_index)
def reset_peak_memory_stats(self, device_index=None):
if hasattr(torch.cuda, 'reset_peak_memory_stats'):
return torch.cuda.reset_peak_memory_stats(device_index)
def memory_reserved(self, device_index=None):
if hasattr(torch.cuda, 'memory_reserved'):
return torch.cuda.memory_reserved(device_index)
def max_memory_reserved(self, device_index=None):
if hasattr(torch.cuda, 'max_memory_reserved'):
return torch.cuda.max_memory_reserved(device_index)
def total_memory(self, device_index=None):
return torch.cuda.get_device_properties(device_index).total_memory
# Data types
def is_bf16_supported(self):
return torch.cuda.is_bf16_supported()
def is_fp16_supported(self):
major, _ = torch.cuda.get_device_capability()
if major >= 7:
return True
else:
return False
# Misc
def amp(self):
if hasattr(torch.cuda, 'amp'):
return torch.cuda.amp
return None
def is_available(self):
return torch.cuda.is_available()
def range_push(self, msg):
if hasattr(torch.cuda.nvtx, 'range_push'):
return torch.cuda.nvtx.range_push(msg)
def range_pop(self):
if hasattr(torch.cuda.nvtx, 'range_pop'):
return torch.cuda.nvtx.range_pop()
def lazy_call(self, callback):
return torch.cuda._lazy_call(callback)
def communication_backend_name(self):
return self._communication_backend_name
# Tensor operations
@property
def BFloat16Tensor(self):
return torch.cuda.BFloat16Tensor
@property
def ByteTensor(self):
return torch.cuda.ByteTensor
@property
def DoubleTensor(self):
return torch.cuda.DoubleTensor
@property
def FloatTensor(self):
return torch.cuda.FloatTensor
@property
def HalfTensor(self):
return torch.cuda.HalfTensor
@property
def IntTensor(self):
return torch.cuda.IntTensor
@property
def LongTensor(self):
return torch.cuda.LongTensor
def pin_memory(self, tensor):
return tensor.pin_memory()
def on_accelerator(self, tensor):
device_str = str(tensor.device)
if device_str.startswith('cuda:'):
return True
else:
return False
def op_builder_dir(self):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
return "op_builder"
except ImportError:
return "deepspeed.ops.op_builder"
# dict that holds class name <--> class type mapping i.e.
# 'AsyncIOBuilder': <class 'op_builder.async_io.AsyncIOBuilder'>
# this dict will be filled at init stage
class_dict = None
def _lazy_init_class_dict(self):
if self.class_dict != None:
return
else:
self.class_dict = {}
# begin initialize for create_op_builder()
# put all valid class name <--> class type mapping into class_dict
op_builder_dir = self.op_builder_dir()
op_builder_module = importlib.import_module(op_builder_dir)
for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(op_builder_module.__file__)]):
# avoid self references
if module_name != 'all_ops' and module_name != 'builder' and module_name != 'cpu':
module = importlib.import_module("{}.{}".format(op_builder_dir, module_name))
for member_name in module.__dir__():
if member_name.endswith(
'Builder'
) and member_name != "OpBuilder" and member_name != "CUDAOpBuilder" and member_name != "TorchCPUOpBuilder": # avoid abstract classes
if not member_name in self.class_dict:
self.class_dict[member_name] = getattr(module, member_name)
# end initialize for create_op_builder()
# create an instance of op builder and return, name specified by class_name
def create_op_builder(self, class_name):
self._lazy_init_class_dict()
if class_name in self.class_dict:
return self.class_dict[class_name]()
else:
return None
# return an op builder class, name specified by class_name
def get_op_builder(self, class_name):
self._lazy_init_class_dict()
if class_name in self.class_dict:
return self.class_dict[class_name]
else:
return None
def build_extension(self):
from torch.utils.cpp_extension import BuildExtension
return BuildExtension
| 8,389 | 30.423221 | 157 | py |
DeepSpeed | DeepSpeed-master/op_builder/transformer_inference.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import CUDAOpBuilder, installed_cuda_version
class InferenceBuilder(CUDAOpBuilder):
BUILD_VAR = "DS_BUILD_TRANSFORMER_INFERENCE"
NAME = "transformer_inference"
def __init__(self, name=None):
name = self.NAME if name is None else name
super().__init__(name=name)
def absolute_name(self):
return f'deepspeed.ops.transformer.inference.{self.NAME}_op'
def is_compatible(self, verbose=True):
try:
import torch
except ImportError:
self.warning("Please install torch if trying to pre-compile inference kernels")
return False
cuda_okay = True
if not self.is_rocm_pytorch() and torch.cuda.is_available():
sys_cuda_major, _ = installed_cuda_version()
torch_cuda_major = int(torch.version.cuda.split('.')[0])
cuda_capability = torch.cuda.get_device_properties(0).major
if cuda_capability < 6:
self.warning("NVIDIA Inference is only supported on Pascal and newer architectures")
cuda_okay = False
if cuda_capability >= 8:
if torch_cuda_major < 11 or sys_cuda_major < 11:
self.warning("On Ampere and higher architectures please use CUDA 11+")
cuda_okay = False
return super().is_compatible(verbose) and cuda_okay
def filter_ccs(self, ccs):
ccs_retained = []
ccs_pruned = []
for cc in ccs:
if int(cc[0]) >= 6:
ccs_retained.append(cc)
else:
ccs_pruned.append(cc)
if len(ccs_pruned) > 0:
self.warning(f"Filtered compute capabilities {ccs_pruned}")
return ccs_retained
def sources(self):
return [
'csrc/transformer/inference/csrc/pt_binding.cpp',
'csrc/transformer/inference/csrc/gelu.cu',
'csrc/transformer/inference/csrc/relu.cu',
'csrc/transformer/inference/csrc/layer_norm.cu',
'csrc/transformer/inference/csrc/rms_norm.cu',
'csrc/transformer/inference/csrc/softmax.cu',
'csrc/transformer/inference/csrc/dequantize.cu',
'csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu',
'csrc/transformer/inference/csrc/transform.cu',
'csrc/transformer/inference/csrc/pointwise_ops.cu',
]
def extra_ldflags(self):
if not self.is_rocm_pytorch():
return ['-lcurand']
else:
return []
def include_paths(self):
return ['csrc/transformer/inference/includes', 'csrc/includes']
| 2,745 | 35.613333 | 100 | py |
DeepSpeed | DeepSpeed-master/op_builder/cpu_adagrad.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from .builder import TorchCPUOpBuilder
class CPUAdagradBuilder(TorchCPUOpBuilder):
BUILD_VAR = "DS_BUILD_CPU_ADAGRAD"
NAME = "cpu_adagrad"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.adagrad.{self.NAME}_op'
def sources(self):
if self.build_for_cpu:
return ['csrc/adagrad/cpu_adagrad.cpp']
return ['csrc/adagrad/cpu_adagrad.cpp', 'csrc/common/custom_cuda_kernel.cu']
def libraries_args(self):
args = super().libraries_args()
if self.build_for_cpu:
return args
if not self.is_rocm_pytorch():
args += ['curand']
return args
def include_paths(self):
import torch
if self.build_for_cpu:
CUDA_INCLUDE = []
elif not self.is_rocm_pytorch():
CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")]
else:
CUDA_INCLUDE = [
os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include"),
os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "rocrand"),
os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "hiprand"),
]
return ['csrc/includes'] + CUDA_INCLUDE
| 1,420 | 28.604167 | 89 | py |
DeepSpeed | DeepSpeed-master/op_builder/cpu_adam.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
from .builder import TorchCPUOpBuilder
class CPUAdamBuilder(TorchCPUOpBuilder):
BUILD_VAR = "DS_BUILD_CPU_ADAM"
NAME = "cpu_adam"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.adam.{self.NAME}_op'
def sources(self):
if self.build_for_cpu:
return ['csrc/adam/cpu_adam.cpp']
return ['csrc/adam/cpu_adam.cpp', 'csrc/common/custom_cuda_kernel.cu']
def libraries_args(self):
args = super().libraries_args()
if self.build_for_cpu:
return args
if not self.is_rocm_pytorch():
args += ['curand']
return args
def include_paths(self):
import torch
if self.build_for_cpu:
CUDA_INCLUDE = []
elif not self.is_rocm_pytorch():
CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")]
else:
CUDA_INCLUDE = [
os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include"),
os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "rocrand"),
os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "hiprand"),
]
return ['csrc/includes'] + CUDA_INCLUDE
| 1,397 | 27.530612 | 89 | py |
DeepSpeed | DeepSpeed-master/op_builder/transformer.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import CUDAOpBuilder
class TransformerBuilder(CUDAOpBuilder):
BUILD_VAR = "DS_BUILD_TRANSFORMER"
NAME = "transformer"
def __init__(self, name=None):
name = self.NAME if name is None else name
super().__init__(name=name)
def absolute_name(self):
return f'deepspeed.ops.transformer.{self.NAME}_op'
def extra_ldflags(self):
if not self.is_rocm_pytorch():
return ['-lcurand']
else:
return []
def sources(self):
return [
'csrc/transformer/ds_transformer_cuda.cpp', 'csrc/transformer/cublas_wrappers.cu',
'csrc/transformer/transform_kernels.cu', 'csrc/transformer/gelu_kernels.cu',
'csrc/transformer/dropout_kernels.cu', 'csrc/transformer/normalize_kernels.cu',
'csrc/transformer/softmax_kernels.cu', 'csrc/transformer/general_kernels.cu'
]
def include_paths(self):
includes = ['csrc/includes']
if self.is_rocm_pytorch():
from torch.utils.cpp_extension import ROCM_HOME
includes += ['{}/hiprand/include'.format(ROCM_HOME), '{}/rocrand/include'.format(ROCM_HOME)]
return includes
| 1,294 | 31.375 | 104 | py |
DeepSpeed | DeepSpeed-master/op_builder/__init__.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import sys
import os
import pkgutil
import importlib
from .builder import get_default_compute_capabilities, OpBuilder
# Do not remove, required for abstract accelerator to detect if we have a deepspeed or 3p op_builder
__deepspeed__ = True
# List of all available op builders from deepspeed op_builder
try:
import deepspeed.ops.op_builder # noqa: F401
op_builder_dir = "deepspeed.ops.op_builder"
except ImportError:
op_builder_dir = "op_builder"
__op_builders__ = []
this_module = sys.modules[__name__]
def builder_closure(member_name):
if op_builder_dir == "op_builder":
# during installation time cannot get builder due to torch not installed,
# return closure instead
def _builder():
from deepspeed.accelerator import get_accelerator
builder = get_accelerator().create_op_builder(member_name)
return builder
return _builder
else:
# during runtime, return op builder class directly
from deepspeed.accelerator import get_accelerator
builder = get_accelerator().get_op_builder(member_name)
return builder
# reflect builder names and add builder closure, such as 'TransformerBuilder()' creates op builder wrt current accelerator
for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(this_module.__file__)]):
if module_name != 'all_ops' and module_name != 'builder':
module = importlib.import_module(f".{module_name}", package=op_builder_dir)
for member_name in module.__dir__():
if member_name.endswith('Builder') and member_name != "OpBuilder" and member_name != "CUDAOpBuilder":
# assign builder name to variable with same name
# the following is equivalent to i.e. TransformerBuilder = "TransformerBuilder"
this_module.__dict__[member_name] = builder_closure(member_name)
| 1,990 | 35.87037 | 122 | py |
DeepSpeed | DeepSpeed-master/op_builder/fused_lamb.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import CUDAOpBuilder
import sys
class FusedLambBuilder(CUDAOpBuilder):
BUILD_VAR = 'DS_BUILD_FUSED_LAMB'
NAME = "fused_lamb"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.lamb.{self.NAME}_op'
def sources(self):
return ['csrc/lamb/fused_lamb_cuda.cpp', 'csrc/lamb/fused_lamb_cuda_kernel.cu']
def include_paths(self):
return ['csrc/includes']
def cxx_args(self):
args = super().cxx_args()
return args + self.version_dependent_macros()
def nvcc_args(self):
nvcc_flags = ['-O3'] + self.version_dependent_macros()
if self.is_rocm_pytorch():
ROCM_MAJOR, ROCM_MINOR = self.installed_rocm_version()
nvcc_flags += ['-DROCM_VERSION_MAJOR=%s' % ROCM_MAJOR, '-DROCM_VERSION_MINOR=%s' % ROCM_MINOR]
else:
nvcc_flags.extend(
['-allow-unsupported-compiler' if sys.platform == "win32" else '', '-lineinfo', '--use_fast_math'] +
self.compute_capability_args())
return nvcc_flags
| 1,216 | 28.682927 | 116 | py |
DeepSpeed | DeepSpeed-master/op_builder/fused_adam.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import CUDAOpBuilder
import sys
class FusedAdamBuilder(CUDAOpBuilder):
BUILD_VAR = "DS_BUILD_FUSED_ADAM"
NAME = "fused_adam"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.adam.{self.NAME}_op'
def sources(self):
return ['csrc/adam/fused_adam_frontend.cpp', 'csrc/adam/multi_tensor_adam.cu']
def include_paths(self):
return ['csrc/includes', 'csrc/adam']
def cxx_args(self):
args = super().cxx_args()
return args + self.version_dependent_macros()
def nvcc_args(self):
nvcc_flags = ['-O3'] + self.version_dependent_macros()
if not self.is_rocm_pytorch():
nvcc_flags.extend(
['-allow-unsupported-compiler' if sys.platform == "win32" else '', '-lineinfo', '--use_fast_math'] +
self.compute_capability_args())
return nvcc_flags
| 1,044 | 26.5 | 116 | py |
DeepSpeed | DeepSpeed-master/op_builder/random_ltd.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import CUDAOpBuilder
class RandomLTDBuilder(CUDAOpBuilder):
BUILD_VAR = "DS_BUILD_RANDOM_LTD"
NAME = "random_ltd"
def __init__(self, name=None):
name = self.NAME if name is None else name
super().__init__(name=name)
def absolute_name(self):
return f'deepspeed.ops.{self.NAME}_op'
def extra_ldflags(self):
if not self.is_rocm_pytorch():
return ['-lcurand']
else:
return []
def sources(self):
return [
'csrc/random_ltd/pt_binding.cpp', 'csrc/random_ltd/gather_scatter.cu',
'csrc/random_ltd/slice_attn_masks.cu', 'csrc/random_ltd/token_sort.cu'
]
def include_paths(self):
includes = ['csrc/includes']
if self.is_rocm_pytorch():
from torch.utils.cpp_extension import ROCM_HOME
includes += ['{}/hiprand/include'.format(ROCM_HOME), '{}/rocrand/include'.format(ROCM_HOME)]
return includes
| 1,079 | 27.421053 | 104 | py |
DeepSpeed | DeepSpeed-master/op_builder/builder.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import sys
import time
import importlib
from pathlib import Path
import subprocess
import shlex
import shutil
import tempfile
import distutils.ccompiler
import distutils.log
import distutils.sysconfig
from distutils.errors import CompileError, LinkError
from abc import ABC, abstractmethod
from typing import List
YELLOW = '\033[93m'
END = '\033[0m'
WARNING = f"{YELLOW} [WARNING] {END}"
DEFAULT_TORCH_EXTENSION_PATH = "/tmp/torch_extensions"
DEFAULT_COMPUTE_CAPABILITIES = "6.0;6.1;7.0"
try:
import torch
except ImportError:
print(f"{WARNING} unable to import torch, please install it if you want to pre-compile any deepspeed ops.")
else:
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
def installed_cuda_version(name=""):
import torch.utils.cpp_extension
cuda_home = torch.utils.cpp_extension.CUDA_HOME
assert cuda_home is not None, "CUDA_HOME does not exist, unable to compile CUDA op(s)"
# Ensure there is not a cuda version mismatch between torch and nvcc compiler
output = subprocess.check_output([cuda_home + "/bin/nvcc", "-V"], universal_newlines=True)
output_split = output.split()
release_idx = output_split.index("release")
release = output_split[release_idx + 1].replace(',', '').split(".")
# Ignore patch versions, only look at major + minor
cuda_major, cuda_minor = release[:2]
return int(cuda_major), int(cuda_minor)
def get_default_compute_capabilities():
compute_caps = DEFAULT_COMPUTE_CAPABILITIES
import torch.utils.cpp_extension
if torch.utils.cpp_extension.CUDA_HOME is not None and installed_cuda_version()[0] >= 11:
if installed_cuda_version()[0] == 11 and installed_cuda_version()[1] == 0:
# Special treatment of CUDA 11.0 because compute_86 is not supported.
compute_caps += ";8.0"
else:
compute_caps += ";8.0;8.6"
return compute_caps
# list compatible minor CUDA versions - so that for example pytorch built with cuda-11.0 can be used
# to build deepspeed and system-wide installed cuda 11.2
cuda_minor_mismatch_ok = {
10: [
"10.0",
"10.1",
"10.2",
],
11: ["11.0", "11.1", "11.2", "11.3", "11.4", "11.5", "11.6", "11.7", "11.8"],
12: ["12.0", "12.1"],
}
def assert_no_cuda_mismatch(name=""):
cuda_major, cuda_minor = installed_cuda_version(name)
sys_cuda_version = f'{cuda_major}.{cuda_minor}'
torch_cuda_version = ".".join(torch.version.cuda.split('.')[:2])
# This is a show-stopping error, should probably not proceed past this
if sys_cuda_version != torch_cuda_version:
if (cuda_major in cuda_minor_mismatch_ok and sys_cuda_version in cuda_minor_mismatch_ok[cuda_major]
and torch_cuda_version in cuda_minor_mismatch_ok[cuda_major]):
print(f"Installed CUDA version {sys_cuda_version} does not match the "
f"version torch was compiled with {torch.version.cuda} "
"but since the APIs are compatible, accepting this combination")
return True
elif os.getenv("DS_SKIP_CUDA_CHECK", "0") == "1":
print(
f"{WARNING} DeepSpeed Op Builder: Installed CUDA version {sys_cuda_version} does not match the "
f"version torch was compiled with {torch.version.cuda}."
"Detected `DS_SKIP_CUDA_CHECK=1`: Allowing this combination of CUDA, but it may result in unexpected behavior."
)
return True
raise Exception(f">- DeepSpeed Op Builder: Installed CUDA version {sys_cuda_version} does not match the "
f"version torch was compiled with {torch.version.cuda}, unable to compile "
"cuda/cpp extensions without a matching cuda version.")
return True
class OpBuilder(ABC):
_rocm_version = None
_is_rocm_pytorch = None
def __init__(self, name):
self.name = name
self.jit_mode = False
self.build_for_cpu = False
self.enable_bf16 = False
self.error_log = None
@abstractmethod
def absolute_name(self):
'''
Returns absolute build path for cases where the op is pre-installed, e.g., deepspeed.ops.adam.cpu_adam
will be installed as something like: deepspeed/ops/adam/cpu_adam.so
'''
pass
@abstractmethod
def sources(self):
'''
Returns list of source files for your op, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed)
'''
pass
def hipify_extension(self):
pass
@staticmethod
def validate_torch_version(torch_info):
install_torch_version = torch_info['version']
current_torch_version = ".".join(torch.__version__.split('.')[:2])
if install_torch_version != current_torch_version:
raise RuntimeError("PyTorch version mismatch! DeepSpeed ops were compiled and installed "
"with a different version than what is being used at runtime. "
f"Please re-install DeepSpeed or switch torch versions. "
f"Install torch version={install_torch_version}, "
f"Runtime torch version={current_torch_version}")
@staticmethod
def validate_torch_op_version(torch_info):
if not OpBuilder.is_rocm_pytorch():
current_cuda_version = ".".join(torch.version.cuda.split('.')[:2])
install_cuda_version = torch_info['cuda_version']
if install_cuda_version != current_cuda_version:
raise RuntimeError("CUDA version mismatch! DeepSpeed ops were compiled and installed "
"with a different version than what is being used at runtime. "
f"Please re-install DeepSpeed or switch torch versions. "
f"Install CUDA version={install_cuda_version}, "
f"Runtime CUDA version={current_cuda_version}")
else:
current_hip_version = ".".join(torch.version.hip.split('.')[:2])
install_hip_version = torch_info['hip_version']
if install_hip_version != current_hip_version:
raise RuntimeError("HIP version mismatch! DeepSpeed ops were compiled and installed "
"with a different version than what is being used at runtime. "
f"Please re-install DeepSpeed or switch torch versions. "
f"Install HIP version={install_hip_version}, "
f"Runtime HIP version={current_hip_version}")
@staticmethod
def is_rocm_pytorch():
if OpBuilder._is_rocm_pytorch is not None:
return OpBuilder._is_rocm_pytorch
_is_rocm_pytorch = False
try:
import torch
except ImportError:
pass
else:
if TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 5):
_is_rocm_pytorch = hasattr(torch.version, 'hip') and torch.version.hip is not None
if _is_rocm_pytorch:
from torch.utils.cpp_extension import ROCM_HOME
_is_rocm_pytorch = ROCM_HOME is not None
OpBuilder._is_rocm_pytorch = _is_rocm_pytorch
return OpBuilder._is_rocm_pytorch
@staticmethod
def installed_rocm_version():
if OpBuilder._rocm_version:
return OpBuilder._rocm_version
ROCM_MAJOR = '0'
ROCM_MINOR = '0'
if OpBuilder.is_rocm_pytorch():
from torch.utils.cpp_extension import ROCM_HOME
rocm_ver_file = Path(ROCM_HOME).joinpath(".info/version-dev")
if rocm_ver_file.is_file():
with open(rocm_ver_file, 'r') as file:
ROCM_VERSION_DEV_RAW = file.read()
elif "rocm" in torch.__version__:
ROCM_VERSION_DEV_RAW = torch.__version__.split("rocm")[1]
else:
assert False, "Could not detect ROCm version"
assert ROCM_VERSION_DEV_RAW != "", "Could not detect ROCm version"
ROCM_MAJOR = ROCM_VERSION_DEV_RAW.split('.')[0]
ROCM_MINOR = ROCM_VERSION_DEV_RAW.split('.')[1]
OpBuilder._rocm_version = (int(ROCM_MAJOR), int(ROCM_MINOR))
return OpBuilder._rocm_version
def include_paths(self):
'''
Returns list of include paths, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed)
'''
return []
def nvcc_args(self):
'''
Returns optional list of compiler flags to forward to nvcc when building CUDA sources
'''
return []
def cxx_args(self):
'''
Returns optional list of compiler flags to forward to the build
'''
return []
def is_compatible(self, verbose=True):
'''
Check if all non-python dependencies are satisfied to build this op
'''
return True
def extra_ldflags(self):
return []
def libraries_installed(self, libraries):
valid = False
check_cmd = 'dpkg -l'
for lib in libraries:
result = subprocess.Popen(f'dpkg -l {lib}', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
valid = valid or result.wait() == 0
return valid
def has_function(self, funcname, libraries, verbose=False):
'''
Test for existence of a function within a tuple of libraries.
This is used as a smoke test to check whether a certain library is available.
As a test, this creates a simple C program that calls the specified function,
and then distutils is used to compile that program and link it with the specified libraries.
Returns True if both the compile and link are successful, False otherwise.
'''
tempdir = None # we create a temporary directory to hold various files
filestderr = None # handle to open file to which we redirect stderr
oldstderr = None # file descriptor for stderr
try:
# Echo compile and link commands that are used.
if verbose:
distutils.log.set_verbosity(1)
# Create a compiler object.
compiler = distutils.ccompiler.new_compiler(verbose=verbose)
# Configure compiler and linker to build according to Python install.
distutils.sysconfig.customize_compiler(compiler)
# Create a temporary directory to hold test files.
tempdir = tempfile.mkdtemp()
# Define a simple C program that calls the function in question
prog = "void %s(void); int main(int argc, char** argv) { %s(); return 0; }" % (funcname, funcname)
# Write the test program to a file.
filename = os.path.join(tempdir, 'test.c')
with open(filename, 'w') as f:
f.write(prog)
# Redirect stderr file descriptor to a file to silence compile/link warnings.
if not verbose:
filestderr = open(os.path.join(tempdir, 'stderr.txt'), 'w')
oldstderr = os.dup(sys.stderr.fileno())
os.dup2(filestderr.fileno(), sys.stderr.fileno())
# Workaround for behavior in distutils.ccompiler.CCompiler.object_filenames()
# Otherwise, a local directory will be used instead of tempdir
drive, driveless_filename = os.path.splitdrive(filename)
root_dir = driveless_filename[0] if os.path.isabs(driveless_filename) else ''
output_dir = os.path.join(drive, root_dir)
# Attempt to compile the C program into an object file.
cflags = shlex.split(os.environ.get('CFLAGS', ""))
objs = compiler.compile([filename], output_dir=output_dir, extra_preargs=self.strip_empty_entries(cflags))
# Attempt to link the object file into an executable.
# Be sure to tack on any libraries that have been specified.
ldflags = shlex.split(os.environ.get('LDFLAGS', ""))
compiler.link_executable(objs,
os.path.join(tempdir, 'a.out'),
extra_preargs=self.strip_empty_entries(ldflags),
libraries=libraries)
# Compile and link succeeded
return True
except CompileError:
return False
except LinkError:
return False
except:
return False
finally:
# Restore stderr file descriptor and close the stderr redirect file.
if oldstderr is not None:
os.dup2(oldstderr, sys.stderr.fileno())
if filestderr is not None:
filestderr.close()
# Delete the temporary directory holding the test program and stderr files.
if tempdir is not None:
shutil.rmtree(tempdir)
def strip_empty_entries(self, args):
'''
Drop any empty strings from the list of compile and link flags
'''
return [x for x in args if len(x) > 0]
def cpu_arch(self):
try:
from cpuinfo import get_cpu_info
except ImportError as e:
cpu_info = self._backup_cpuinfo()
if cpu_info is None:
return "-march=native"
try:
cpu_info = get_cpu_info()
except Exception as e:
self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), "
"falling back to `lscpu` to get this information.")
cpu_info = self._backup_cpuinfo()
if cpu_info is None:
return "-march=native"
if cpu_info['arch'].startswith('PPC_'):
# gcc does not provide -march on PowerPC, use -mcpu instead
return '-mcpu=native'
return '-march=native'
def is_cuda_enable(self):
try:
assert_no_cuda_mismatch(self.name)
return '-D__ENABLE_CUDA__'
except BaseException:
print(f"{WARNING} {self.name} cuda is missing or is incompatible with installed torch, "
"only cpu ops can be compiled!")
return '-D__DISABLE_CUDA__'
return '-D__DISABLE_CUDA__'
def _backup_cpuinfo(self):
# Construct cpu_info dict from lscpu that is similar to what py-cpuinfo provides
if not self.command_exists('lscpu'):
self.warning(f"{self.name} attempted to query 'lscpu' after failing to use py-cpuinfo "
"to detect the CPU architecture. 'lscpu' does not appear to exist on "
"your system, will fall back to use -march=native and non-vectorized execution.")
return None
result = subprocess.check_output('lscpu', shell=True)
result = result.decode('utf-8').strip().lower()
cpu_info = {}
cpu_info['arch'] = None
cpu_info['flags'] = ""
if 'genuineintel' in result or 'authenticamd' in result:
cpu_info['arch'] = 'X86_64'
if 'avx512' in result:
cpu_info['flags'] += 'avx512,'
elif 'avx512f' in result:
cpu_info['flags'] += 'avx512f,'
if 'avx2' in result:
cpu_info['flags'] += 'avx2'
elif 'ppc64le' in result:
cpu_info['arch'] = "PPC_"
return cpu_info
def simd_width(self):
try:
from cpuinfo import get_cpu_info
except ImportError as e:
cpu_info = self._backup_cpuinfo()
if cpu_info is None:
return '-D__SCALAR__'
try:
cpu_info = get_cpu_info()
except Exception as e:
self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), "
"falling back to `lscpu` to get this information.")
cpu_info = self._backup_cpuinfo()
if cpu_info is None:
return '-D__SCALAR__'
if cpu_info['arch'] == 'X86_64':
if 'avx512' in cpu_info['flags'] or 'avx512f' in cpu_info['flags']:
return '-D__AVX512__'
elif 'avx2' in cpu_info['flags']:
return '-D__AVX256__'
return '-D__SCALAR__'
def command_exists(self, cmd):
if '|' in cmd:
cmds = cmd.split("|")
else:
cmds = [cmd]
valid = False
for cmd in cmds:
result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)
valid = valid or result.wait() == 0
if not valid and len(cmds) > 1:
print(f"{WARNING} {self.name} requires one of the following commands '{cmds}', but it does not exist!")
elif not valid and len(cmds) == 1:
print(f"{WARNING} {self.name} requires the '{cmd}' command, but it does not exist!")
return valid
def warning(self, msg):
self.error_log = f"{msg}"
print(f"{WARNING} {msg}")
def deepspeed_src_path(self, code_path):
if os.path.isabs(code_path):
return code_path
else:
return os.path.join(Path(__file__).parent.parent.absolute(), code_path)
def builder(self):
from torch.utils.cpp_extension import CppExtension
return CppExtension(name=self.absolute_name(),
sources=self.strip_empty_entries(self.sources()),
include_dirs=self.strip_empty_entries(self.include_paths()),
extra_compile_args={'cxx': self.strip_empty_entries(self.cxx_args())},
extra_link_args=self.strip_empty_entries(self.extra_ldflags()))
def load(self, verbose=True):
from deepspeed.git_version_info import installed_ops, torch_info
if installed_ops.get(self.name, False):
# Ensure the op we're about to load was compiled with the same
# torch/cuda versions we are currently using at runtime.
self.validate_torch_version(torch_info)
if torch.cuda.is_available() and isinstance(self, CUDAOpBuilder):
self.validate_torch_op_version(torch_info)
return importlib.import_module(self.absolute_name())
else:
return self.jit_load(verbose)
def jit_load(self, verbose=True):
if not self.is_compatible(verbose):
raise RuntimeError(
f"Unable to JIT load the {self.name} op due to it not being compatible due to hardware/software issue. {self.error_log}"
)
try:
import ninja # noqa: F401
except ImportError:
raise RuntimeError(f"Unable to JIT load the {self.name} op due to ninja not being installed.")
if isinstance(self, CUDAOpBuilder) and not self.is_rocm_pytorch():
try:
assert_no_cuda_mismatch(self.name)
self.build_for_cpu = False
except BaseException:
self.build_for_cpu = True
self.jit_mode = True
from torch.utils.cpp_extension import load
start_build = time.time()
sources = [self.deepspeed_src_path(path) for path in self.sources()]
extra_include_paths = [self.deepspeed_src_path(path) for path in self.include_paths()]
# Torch will try and apply whatever CCs are in the arch list at compile time,
# we have already set the intended targets ourselves we know that will be
# needed at runtime. This prevents CC collisions such as multiple __half
# implementations. Stash arch list to reset after build.
torch_arch_list = None
if "TORCH_CUDA_ARCH_LIST" in os.environ:
torch_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST")
os.environ["TORCH_CUDA_ARCH_LIST"] = ""
nvcc_args = self.strip_empty_entries(self.nvcc_args())
cxx_args = self.strip_empty_entries(self.cxx_args())
if isinstance(self, CUDAOpBuilder):
if not self.build_for_cpu and self.enable_bf16:
cxx_args.append("-DBF16_AVAILABLE")
nvcc_args.append("-DBF16_AVAILABLE")
op_module = load(name=self.name,
sources=self.strip_empty_entries(sources),
extra_include_paths=self.strip_empty_entries(extra_include_paths),
extra_cflags=cxx_args,
extra_cuda_cflags=nvcc_args,
extra_ldflags=self.strip_empty_entries(self.extra_ldflags()),
verbose=verbose)
build_duration = time.time() - start_build
if verbose:
print(f"Time to load {self.name} op: {build_duration} seconds")
# Reset arch list so we are not silently removing it for other possible use cases
if torch_arch_list:
os.environ["TORCH_CUDA_ARCH_LIST"] = torch_arch_list
return op_module
class CUDAOpBuilder(OpBuilder):
def compute_capability_args(self, cross_compile_archs=None):
"""
Returns nvcc compute capability compile flags.
1. `TORCH_CUDA_ARCH_LIST` takes priority over `cross_compile_archs`.
2. If neither is set default compute capabilities will be used
3. Under `jit_mode` compute capabilities of all visible cards will be used plus PTX
Format:
- `TORCH_CUDA_ARCH_LIST` may use ; or whitespace separators. Examples:
TORCH_CUDA_ARCH_LIST="6.1;7.5;8.6" pip install ...
TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6+PTX" pip install ...
- `cross_compile_archs` uses ; separator.
"""
ccs = []
if self.jit_mode:
# Compile for underlying architectures since we know those at runtime
for i in range(torch.cuda.device_count()):
CC_MAJOR, CC_MINOR = torch.cuda.get_device_capability(i)
cc = f"{CC_MAJOR}.{CC_MINOR}"
if cc not in ccs:
ccs.append(cc)
ccs = sorted(ccs)
ccs[-1] += '+PTX'
else:
# Cross-compile mode, compile for various architectures
# env override takes priority
cross_compile_archs_env = os.environ.get('TORCH_CUDA_ARCH_LIST', None)
if cross_compile_archs_env is not None:
if cross_compile_archs is not None:
print(
f"{WARNING} env var `TORCH_CUDA_ARCH_LIST={cross_compile_archs_env}` overrides `cross_compile_archs={cross_compile_archs}`"
)
cross_compile_archs = cross_compile_archs_env.replace(' ', ';')
else:
if cross_compile_archs is None:
cross_compile_archs = get_default_compute_capabilities()
ccs = cross_compile_archs.split(';')
ccs = self.filter_ccs(ccs)
if len(ccs) == 0:
raise RuntimeError(
f"Unable to load {self.name} op due to no compute capabilities remaining after filtering")
args = []
self.enable_bf16 = True
for cc in ccs:
num = cc[0] + cc[2]
args.append(f'-gencode=arch=compute_{num},code=sm_{num}')
if cc.endswith('+PTX'):
args.append(f'-gencode=arch=compute_{num},code=compute_{num}')
if int(cc[0]) <= 7:
self.enable_bf16 = False
return args
def filter_ccs(self, ccs: List[str]):
"""
Prune any compute capabilities that are not compatible with the builder. Should log
which CCs have been pruned.
"""
return ccs
def version_dependent_macros(self):
# Fix from apex that might be relevant for us as well, related to https://github.com/NVIDIA/apex/issues/456
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
return version_ge_1_1 + version_ge_1_3 + version_ge_1_5
def is_compatible(self, verbose=True):
return super().is_compatible(verbose)
def builder(self):
try:
if not self.is_rocm_pytorch():
assert_no_cuda_mismatch(self.name)
self.build_for_cpu = False
except BaseException:
self.build_for_cpu = True
if self.build_for_cpu:
from torch.utils.cpp_extension import CppExtension as ExtensionBuilder
else:
from torch.utils.cpp_extension import CUDAExtension as ExtensionBuilder
compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())} if self.build_for_cpu else \
{'cxx': self.strip_empty_entries(self.cxx_args()), \
'nvcc': self.strip_empty_entries(self.nvcc_args())}
if not self.build_for_cpu and self.enable_bf16:
compile_args['cxx'].append("-DBF16_AVAILABLE")
cuda_ext = ExtensionBuilder(name=self.absolute_name(),
sources=self.strip_empty_entries(self.sources()),
include_dirs=self.strip_empty_entries(self.include_paths()),
libraries=self.strip_empty_entries(self.libraries_args()),
extra_compile_args=compile_args,
extra_link_args=self.strip_empty_entries(self.extra_ldflags()))
if self.is_rocm_pytorch():
# hip converts paths to absolute, this converts back to relative
sources = cuda_ext.sources
curr_file = Path(__file__).parent.parent # ds root
for i in range(len(sources)):
src = Path(sources[i])
if src.is_absolute():
sources[i] = str(src.relative_to(curr_file))
else:
sources[i] = str(src)
cuda_ext.sources = sources
return cuda_ext
def hipify_extension(self):
if self.is_rocm_pytorch():
from torch.utils.hipify import hipify_python
hipify_python.hipify(
project_directory=os.getcwd(),
output_directory=os.getcwd(),
header_include_dirs=self.include_paths(),
includes=[os.path.join(os.getcwd(), '*')],
extra_files=[os.path.abspath(s) for s in self.sources()],
show_detailed=True,
is_pytorch_extension=True,
hipify_extra_files_only=True,
)
def cxx_args(self):
if sys.platform == "win32":
return ['-O2']
else:
return ['-O3', '-std=c++17', '-g', '-Wno-reorder']
def nvcc_args(self):
if self.build_for_cpu:
return []
args = ['-O3']
if self.is_rocm_pytorch():
ROCM_MAJOR, ROCM_MINOR = self.installed_rocm_version()
args += [
'-std=c++17', '-U__HIP_NO_HALF_OPERATORS__', '-U__HIP_NO_HALF_CONVERSIONS__',
'-U__HIP_NO_HALF2_OPERATORS__',
'-DROCM_VERSION_MAJOR=%s' % ROCM_MAJOR,
'-DROCM_VERSION_MINOR=%s' % ROCM_MINOR
]
else:
cuda_major, _ = installed_cuda_version()
args += [
'-allow-unsupported-compiler' if sys.platform == "win32" else '', '--use_fast_math',
'-std=c++17' if cuda_major > 10 else '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__'
]
if os.environ.get('DS_DEBUG_CUDA_BUILD', '0') == '1':
args.append('--ptxas-options=-v')
args += self.compute_capability_args()
return args
def libraries_args(self):
if self.build_for_cpu:
return []
if sys.platform == "win32":
return ['cublas', 'curand']
else:
return []
class TorchCPUOpBuilder(CUDAOpBuilder):
def extra_ldflags(self):
if self.build_for_cpu:
return ['-fopenmp']
if not self.is_rocm_pytorch():
return ['-lcurand']
return []
def cxx_args(self):
import torch
args = []
if not self.build_for_cpu:
if not self.is_rocm_pytorch():
CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.CUDA_HOME, "lib64")
else:
CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.ROCM_HOME, "lib")
args += super().cxx_args()
args += [
f'-L{CUDA_LIB64}',
'-lcudart',
'-lcublas',
'-g',
]
CPU_ARCH = self.cpu_arch()
SIMD_WIDTH = self.simd_width()
CUDA_ENABLE = self.is_cuda_enable()
args += [
CPU_ARCH,
'-fopenmp',
SIMD_WIDTH,
CUDA_ENABLE,
]
return args
| 29,737 | 39.459864 | 147 | py |
DeepSpeed | DeepSpeed-master/op_builder/spatial_inference.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import CUDAOpBuilder, installed_cuda_version
class SpatialInferenceBuilder(CUDAOpBuilder):
BUILD_VAR = "DS_BUILD_SPATIAL_INFERENCE"
NAME = "spatial_inference"
def __init__(self, name=None):
name = self.NAME if name is None else name
super().__init__(name=name)
def absolute_name(self):
return f'deepspeed.ops.spatial.{self.NAME}_op'
def is_compatible(self, verbose=True):
try:
import torch
except ImportError:
self.warning("Please install torch if trying to pre-compile inference kernels")
return False
cuda_okay = True
if not self.is_rocm_pytorch() and torch.cuda.is_available():
sys_cuda_major, _ = installed_cuda_version()
torch_cuda_major = int(torch.version.cuda.split('.')[0])
cuda_capability = torch.cuda.get_device_properties(0).major
if cuda_capability >= 8:
if torch_cuda_major < 11 or sys_cuda_major < 11:
self.warning("On Ampere and higher architectures please use CUDA 11+")
cuda_okay = False
return super().is_compatible(verbose) and cuda_okay
def sources(self):
return [
'csrc/spatial/csrc/opt_bias_add.cu',
'csrc/spatial/csrc/pt_binding.cpp',
]
def include_paths(self):
return ['csrc/spatial/includes', 'csrc/includes']
| 1,534 | 32.369565 | 91 | py |
DeepSpeed | DeepSpeed-master/op_builder/sparse_attn.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .builder import OpBuilder
try:
from packaging import version as pkg_version
except ImportError:
pkg_version = None
class SparseAttnBuilder(OpBuilder):
BUILD_VAR = "DS_BUILD_SPARSE_ATTN"
NAME = "sparse_attn"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.sparse_attention.{self.NAME}_op'
def sources(self):
return ['csrc/sparse_attention/utils.cpp']
def cxx_args(self):
return ['-O2', '-fopenmp']
def is_compatible(self, verbose=True):
# Check to see if llvm and cmake are installed since they are dependencies
#required_commands = ['llvm-config|llvm-config-9', 'cmake']
#command_status = list(map(self.command_exists, required_commands))
#deps_compatible = all(command_status)
if self.is_rocm_pytorch():
self.warning(f'{self.NAME} is not compatible with ROCM')
return False
try:
import torch
except ImportError:
self.warning(f"unable to import torch, please install it first")
return False
# torch-cpu will not have a cuda version
if torch.version.cuda is None:
cuda_compatible = False
self.warning(f"{self.NAME} cuda is not available from torch")
else:
major, minor = torch.version.cuda.split('.')[:2]
cuda_compatible = (int(major) == 10 and int(minor) >= 1) or (int(major) >= 11)
if not cuda_compatible:
self.warning(f"{self.NAME} requires CUDA version 10.1+")
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
torch_compatible = (TORCH_MAJOR == 1 and TORCH_MINOR >= 5)
if not torch_compatible:
self.warning(
f'{self.NAME} requires a torch version >= 1.5 and < 2.0 but detected {TORCH_MAJOR}.{TORCH_MINOR}')
try:
import triton
except ImportError:
# auto-install of triton is broken on some systems, reverting to manual install for now
# see this issue: https://github.com/microsoft/DeepSpeed/issues/1710
self.warning(f"please install triton==1.0.0 if you want to use sparse attention")
return False
if pkg_version:
installed_triton = pkg_version.parse(triton.__version__)
triton_mismatch = installed_triton != pkg_version.parse("1.0.0")
else:
installed_triton = triton.__version__
triton_mismatch = installed_triton != "1.0.0"
if triton_mismatch:
self.warning(f"using untested triton version ({installed_triton}), only 1.0.0 is known to be compatible")
return False
return super().is_compatible(verbose) and torch_compatible and cuda_compatible
| 2,994 | 35.084337 | 117 | py |
DeepSpeed | DeepSpeed-master/op_builder/cpu/builder.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
from op_builder.builder import OpBuilder
except ImportError:
from deepspeed.ops.op_builder.builder import OpBuilder
class CPUOpBuilder(OpBuilder):
def builder(self):
from torch.utils.cpp_extension import CppExtension as ExtensionBuilder
compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())}
cpp_ext = ExtensionBuilder(name=self.absolute_name(),
sources=self.strip_empty_entries(self.sources()),
include_dirs=self.strip_empty_entries(self.include_paths()),
libraries=self.strip_empty_entries(self.libraries_args()),
extra_compile_args=compile_args)
return cpp_ext
def cxx_args(self):
return ['-O3', '-g', '-Wno-reorder']
def libraries_args(self):
return []
| 1,224 | 34 | 95 | py |
DeepSpeed | DeepSpeed-master/scripts/check-torchcuda.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from __future__ import annotations
'''Copyright The Microsoft DeepSpeed Team'''
"""
Checks each file in sys.argv for the string "torch.cuda".
Modified from https://github.com/jlebar/pre-commit-hooks/blob/master/check_do_not_submit.py
"""
import subprocess
import sys
def err(s: str) -> None:
print(s, file=sys.stderr)
# There are many ways we could search for the string "torch.cuda", but `git
# grep --no-index` is nice because
# - it's very fast (as compared to iterating over the file in Python)
# - we can reasonably assume it's available on all machines
# - unlike plain grep, which is slower and has different flags on MacOS versus
# Linux, git grep is always the same.
res = subprocess.run(
["git", "grep", "-Hn", "--no-index", "-e", r"torch\.cuda", "--and", "--not", "-e", "#ignore-cuda", *sys.argv[1:]],
capture_output=True,
)
if res.returncode == 0:
err('Error: The string "torch.cuda" was found.\nPlease replace all calls to torch.cuda with "get_accelerator()" and add the following import line:\n\n from deepspeed.accelerator import get_accelerator\n\nIf your code is mean to be cuda specific, please add the following comment in the line with torch.cuda:\n\n #ignore-cuda\n'
)
err(res.stdout.decode("utf-8"))
sys.exit(1)
elif res.returncode == 2:
err(f"Error invoking grep on {', '.join(sys.argv[1:])}:")
err(res.stderr.decode("utf-8"))
sys.exit(2)
res = subprocess.run(
["git", "grep", "-Hn", "--no-index", r"\.cuda()", *sys.argv[1:]],
capture_output=True,
)
if res.returncode == 0:
err('Error: The string ".cuda()" was found. This implies convert a tensor to cuda tensor. Please replace all calls to tensor.cuda() with "tensor.to(get_accelerator().device_name())" and add the following import line:\nfrom deepspeed.accelerator import get_accelerator'
)
err(res.stdout.decode("utf-8"))
sys.exit(1)
elif res.returncode == 2:
err(f"Error invoking grep on {', '.join(sys.argv[1:])}:")
err(res.stderr.decode("utf-8"))
sys.exit(2)
files = []
for file in sys.argv[1:]:
if not file.endswith(".cpp"):
files.append(file)
res = subprocess.run(
["git", "grep", "-Hn", "--no-index", r"\.is_cuda", *files],
capture_output=True,
)
if res.returncode == 0:
err('''
Error: The string ".is_cuda" was found. This implies checking if a tensor is a cuda tensor.
Please replace all calls to "tensor.is_cuda" with "get_accelerator().on_accelerator(tensor)",
and add the following import line:
'from deepspeed.accelerator import get_accelerator'
''')
err(res.stdout.decode("utf-8"))
sys.exit(1)
elif res.returncode == 2:
err(f"Error invoking grep on {', '.join(files)}:")
err(res.stderr.decode("utf-8"))
sys.exit(2)
| 2,895 | 36.128205 | 337 | py |
DeepSpeed | DeepSpeed-master/scripts/check-torchdist.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from __future__ import annotations
'''Copyright The Microsoft DeepSpeed Team'''
"""
Checks each file in sys.argv for the string "torch.distributed".
Modified from https://github.com/jlebar/pre-commit-hooks/blob/master/check_do_not_submit.py
"""
import subprocess
import sys
def err(s: str) -> None:
print(s, file=sys.stderr)
# There are many ways we could search for the string "torch.distributed", but `git
# grep --no-index` is nice because
# - it's very fast (as compared to iterating over the file in Python)
# - we can reasonably assume it's available on all machines
# - unlike plain grep, which is slower and has different flags on MacOS versus
# Linux, git grep is always the same.
res = subprocess.run(
["git", "grep", "-Hn", "--no-index", r"torch\.distributed", *sys.argv[1:]],
capture_output=True,
)
if res.returncode == 0:
err('Error: The string "torch.distributed" was found. Please replace all calls to torch.distributed with "deepspeed.comm"'
)
err(res.stdout.decode("utf-8"))
sys.exit(1)
elif res.returncode == 2:
err(f"Error invoking grep on {', '.join(sys.argv[1:])}:")
err(res.stderr.decode("utf-8"))
sys.exit(2)
| 1,306 | 30.878049 | 126 | py |
DeepSpeed | DeepSpeed-master/tests/conftest.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# tests directory-specific settings - this file is run automatically by pytest before any tests are run
import sys
import pytest
import os
from os.path import abspath, dirname, join
import torch
import warnings
# Set this environment variable for the T5 inference unittest(s) (e.g. google/t5-v1_1-small)
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python'
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
git_repo_path = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
def pytest_configure(config):
config.option.color = "yes"
config.option.durations = 0
config.option.durations_min = 1
config.option.verbose = True
def pytest_addoption(parser):
parser.addoption("--torch_ver", default=None, type=str)
parser.addoption("--cuda_ver", default=None, type=str)
def validate_version(expected, found):
version_depth = expected.count('.') + 1
found = '.'.join(found.split('.')[:version_depth])
return found == expected
@pytest.fixture(scope="session", autouse=True)
def check_environment(pytestconfig):
expected_torch_version = pytestconfig.getoption("torch_ver")
expected_cuda_version = pytestconfig.getoption("cuda_ver")
if expected_torch_version is None:
warnings.warn(
"Running test without verifying torch version, please provide an expected torch version with --torch_ver")
elif not validate_version(expected_torch_version, torch.__version__):
pytest.exit(
f"expected torch version {expected_torch_version} did not match found torch version {torch.__version__}",
returncode=2)
if expected_cuda_version is None:
warnings.warn(
"Running test without verifying cuda version, please provide an expected cuda version with --cuda_ver")
elif not validate_version(expected_cuda_version, torch.version.cuda):
pytest.exit(
f"expected cuda version {expected_cuda_version} did not match found cuda version {torch.version.cuda}",
returncode=2)
# Override of pytest "runtest" for DistributedTest class
# This hook is run before the default pytest_runtest_call
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_call(item):
# We want to use our own launching function for distributed tests
if getattr(item.cls, "is_dist_test", False):
dist_test_class = item.cls()
dist_test_class(item._request)
item.runtest = lambda: True # Dummy function so test is not run twice
# We allow DistributedTest to reuse distributed environments. When the last
# test for a class is run, we want to make sure those distributed environments
# are destroyed.
def pytest_runtest_teardown(item, nextitem):
if getattr(item.cls, "reuse_dist_env", False) and not nextitem:
dist_test_class = item.cls()
for num_procs, pool in dist_test_class._pool_cache.items():
dist_test_class._close_pool(pool, num_procs, force=True)
@pytest.hookimpl(tryfirst=True)
def pytest_fixture_setup(fixturedef, request):
if getattr(fixturedef.func, "is_dist_fixture", False):
dist_fixture_class = fixturedef.func()
dist_fixture_class(request)
| 3,397 | 37.613636 | 118 | py |
DeepSpeed | DeepSpeed-master/tests/benchmarks/flatten_bench.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
#!/usr/bin/env python
# run the benchmark under timeit (-t), cProfile (-c), line_profiler (-l)
#
# usage:
# ./flatten_bench.py -t
# ./flatten_bench.py -c
# kernprof -l flatten_bench.py -l; python -m line_profiler flatten_bench.py.lprof
import argparse
import gc
import torch
from torch._utils import _flatten_dense_tensors
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import UtilsBuilder
from apex_C import flatten as flatten_apex
util_ops = UtilsBuilder().load()
flatten = util_ops.flatten
unflatten = util_ops.unflatten
torch.manual_seed(0)
# emulate a small typical model weights
x = [
torch.rand((512, 512)).to(get_accelerator().device_name()),
torch.rand((512, 1024)).to(get_accelerator().device_name()),
torch.rand((512, 30000)).to(get_accelerator().device_name())
]
t = x * 30
# warm up and check that the same output is produced
flat_py = _flatten_dense_tensors(t)
flat_cpp = flatten(t)
flat_apex = flatten_apex(t)
#numel = flat_cpp.numel()
assert torch.eq(flat_py, flat_cpp).all(), "both produce the same tensor"
assert torch.eq(flat_py, flat_apex).all(), "both produce the same tensor"
TIMES = 1000
# the programs being tested
def py():
for i in range(TIMES):
flat = _flatten_dense_tensors(t)
def cpp():
for i in range(TIMES):
flat = flatten(t)
def apex():
for i in range(TIMES):
flat = flatten_apex(t)
#### cProfile ####
import cProfile
def cprofileme():
print("--------------- cProfile -----------------")
print("py")
cProfile.run("py()", sort=-1)
gc.collect()
get_accelerator().empty_cache()
print("cpp")
cProfile.run("cpp()", sort=-1)
gc.collect()
get_accelerator().empty_cache()
print("apex")
cProfile.run("apex()", sort=-1)
gc.collect()
get_accelerator().empty_cache()
#### timeit ####
import timeit
def timeme():
print("--------------- timeit -----------------")
print(f'py ={timeit.Timer("py()", globals=globals()).timeit(number=1)}')
gc.collect()
get_accelerator().empty_cache()
print(f'cpp ={timeit.Timer("cpp()", globals=globals()).timeit(number=1)}')
gc.collect()
get_accelerator().empty_cache()
print(f'apex={timeit.Timer("apex()", globals=globals()).timeit(number=1)}')
gc.collect()
get_accelerator().empty_cache()
#### line_profiler ####
# this one requires a special way to be called
# pip install line_profiler
# kernprof -l flatten_bench.py -l; python -m line_profiler flatten_bench.py.lprof
def line_profileme():
print("--------------- line_profiler -----------------")
print("py")
profile(py)() # noqa: F821
gc.collect()
get_accelerator().empty_cache()
print("cpp")
profile(cpp)() # noqa: F821
gc.collect()
get_accelerator().empty_cache()
print("apex")
profile(apex)() # noqa: F821
gc.collect()
get_accelerator().empty_cache()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-l", action='store_true')
parser.add_argument("-c", action='store_true')
parser.add_argument("-t", action='store_true')
args = parser.parse_args()
if args.l:
line_profileme()
elif args.c:
cprofileme()
elif args.t:
timeme()
| 3,378 | 23.485507 | 82 | py |
DeepSpeed | DeepSpeed-master/tests/benchmarks/unflatten_bench.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
#!/usr/bin/env python
# run the benchmark under timeit (-t), cProfile (-c), line_profiler (-l)
#
# usage:
# ./unflatten_bench.py -t
# ./unflatten_bench.py -c
# kernprof -l unflatten_bench.py -l; python -m line_profiler unflatten_bench.py.lprof
import argparse
import gc
import torch
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import UtilsBuilder
from apex_C import flatten as flatten_apex
from apex_C import unflatten as unflatten_apex
util_ops = UtilsBuilder().load()
flatten = util_ops.flatten
unflatten = util_ops.unflatten
torch.manual_seed(0)
# emulate a small typical model weights
x = [
torch.rand((512, 512)).to(get_accelerator().device_name()),
torch.rand((512, 1024)).to(get_accelerator().device_name()),
torch.rand((512, 30000)).to(get_accelerator().device_name())
]
unflat_t = x * 30
# warm up and check that the same output is produced
flat_py = _flatten_dense_tensors(unflat_t)
flat_cpp = flatten(unflat_t)
flat_apex = flatten_apex(unflat_t)
#numel = flat_cpp.numel()
assert torch.eq(flat_py, flat_cpp).all(), "both produce the same tensor"
assert torch.eq(flat_py, flat_apex).all(), "both produce the same tensor"
flat_t = flat_py
unflat_py = _unflatten_dense_tensors(flat_py, unflat_t)
for i in range(len(unflat_t)):
assert torch.eq(unflat_t[i], unflat_py[i]).all()
unflat_cpp = _unflatten_dense_tensors(flat_cpp, unflat_t)
for i in range(len(unflat_t)):
assert torch.eq(unflat_t[i], unflat_cpp[i]).all()
unflat_apex = _unflatten_dense_tensors(flat_apex, unflat_t)
for i in range(len(unflat_t)):
assert torch.eq(unflat_t[i], unflat_apex[i]).all()
# the programs being tested
def py():
for i in range(1000):
unflat = _unflatten_dense_tensors(flat_t, unflat_t)
def cpp():
for i in range(1000):
unflat = unflatten(flat_t, unflat_t)
def apex():
for i in range(1000):
unflat = unflatten_apex(flat_t, unflat_t)
#### cProfile ####
import cProfile
def cprofileme():
print("--------------- cProfile -----------------")
print("py")
cProfile.run("py()", sort=-1)
gc.collect()
get_accelerator().empty_cache()
print("cpp")
cProfile.run("cpp()", sort=-1)
gc.collect()
get_accelerator().empty_cache()
print("apex")
cProfile.run("apex()", sort=-1)
gc.collect()
get_accelerator().empty_cache()
#### timeit ####
import timeit
def timeme():
print("--------------- timeit -----------------")
print(f'py ={timeit.Timer("py()", globals=globals()).timeit(number=1)}')
gc.collect()
get_accelerator().empty_cache()
print(f'cpp ={timeit.Timer("cpp()", globals=globals()).timeit(number=1)}')
gc.collect()
get_accelerator().empty_cache()
print(f'apex={timeit.Timer("apex()", globals=globals()).timeit(number=1)}')
gc.collect()
get_accelerator().empty_cache()
#### line_profiler ####
# this one requires a special way to be called
# pip install line_profiler
# kernprof -l unflatten_bench.py -l; python -m line_profiler unflatten_bench.py.lprof
def line_profileme():
print("--------------- line_profier -----------------")
print("py")
profile(py)() # noqa: F821
gc.collect()
get_accelerator().empty_cache()
print("cpp")
profile(cpp)() # noqa: F821
gc.collect()
get_accelerator().empty_cache()
print("apex")
profile(apex)() # noqa: F821
gc.collect()
get_accelerator().empty_cache()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-l", action='store_true')
parser.add_argument("-c", action='store_true')
parser.add_argument("-t", action='store_true')
args = parser.parse_args()
if args.l:
line_profileme()
elif args.c:
cprofileme()
elif args.t:
timeme()
| 3,975 | 26.047619 | 86 | py |
DeepSpeed | DeepSpeed-master/tests/accelerator/test_ds_init.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import torch
import deepspeed
from deepspeed.accelerator import get_accelerator
class OneLayerNet(torch.nn.Module):
def __init__(self, D_in, D_out):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
super(OneLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, D_out)
def forward(self, x):
"""
In the forward function we accept a Variable of input data and we must return
a Variable of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Variables.
"""
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear1(h_relu)
return y_pred
def test_literal_device():
model = OneLayerNet(128, 128)
os.environ['RANK'] = '0'
os.environ['WORLD_SIZE'] = '1'
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '8088'
os.environ['LOCAL_RANK'] = '0'
deepspeed.init_distributed(get_accelerator().communication_backend_name())
deepspeed.initialize(model=model, config='ds_config.json')
string = get_accelerator().device_name() #'xpu' or 'cuda'
string0 = get_accelerator().device_name(0) #'xpu:0' or 'cuda:0'
string1 = get_accelerator().device_name(1) #'xpu:1' or 'cuda:1'
assert string == 'xpu' or string == 'cuda'
assert string0 == 'xpu:0' or string0 == 'cuda:0'
assert string1 == 'xpu:1' or string1 == 'cuda:1'
| 1,610 | 31.877551 | 85 | py |
DeepSpeed | DeepSpeed-master/tests/hybrid_engine/hybrid_engine_test.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from transformers import AutoModelForCausalLM
import deepspeed
import argparse
from deepspeed.accelerator import get_accelerator
deepspeed.runtime.utils.see_memory_usage('pre test', force=True)
model = AutoModelForCausalLM.from_pretrained('facebook/opt-350M').half().to(get_accelerator().device_name())
parser = argparse.ArgumentParser()
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
deepspeed.runtime.utils.see_memory_usage('post test', force=True)
m, _, _, _ = deepspeed.initialize(model=model, args=args, enable_hybrid_engine=True)
m.eval()
input = torch.ones(1, 16, device='cuda', dtype=torch.long)
out = m(input)
m.train()
out = m(input)
print(out['logits'], out['logits'].norm())
| 831 | 25.83871 | 108 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.