repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_profiler/gpt_utils.py | tests/test_fx/test_profiler/gpt_utils.py | import torch.nn as nn
from transformers import GPT2Config, GPT2LMHeadModel
class GPTLMModel(nn.Module):
def __init__(
self,
hidden_size=768,
num_layers=12,
num_attention_heads=12,
max_seq_len=1024,
vocab_size=50257,
checkpoint=False,
):
super().__init__()
self.checkpoint = checkpoint
self.model = GPT2LMHeadModel(
GPT2Config(
n_embd=hidden_size,
n_layer=num_layers,
n_head=num_attention_heads,
n_positions=max_seq_len,
n_ctx=max_seq_len,
vocab_size=vocab_size,
)
)
if checkpoint:
self.model.gradient_checkpointing_enable()
def forward(self, input_ids, attention_mask):
# Only return lm_logits
return self.model(input_ids=input_ids, attention_mask=attention_mask, use_cache=not self.checkpoint)[0]
class GPTLMLoss(nn.Module):
def __init__(self):
super().__init__()
self.loss_fn = nn.CrossEntropyLoss()
def forward(self, logits, labels):
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
return self.loss_fn(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
def gpt2_medium(checkpoint=False):
return GPTLMModel(hidden_size=1024, num_layers=24, num_attention_heads=16, checkpoint=checkpoint)
def gpt2_xl(checkpoint=False):
return GPTLMModel(hidden_size=1600, num_layers=48, num_attention_heads=32, checkpoint=checkpoint)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_profiler/test_profiler_meta_info_prop.py | tests/test_fx/test_profiler/test_profiler_meta_info_prop.py | from typing import Tuple
import torch
import torch.fx
import torchvision.models as tm
from gpt_utils import gpt2_medium
from torch.fx import symbolic_trace
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
from colossalai.fx.profiler import calculate_fwd_out, calculate_fwd_tmp, is_compatible_with_meta, parameter_size
from colossalai.fx.tracer.tracer import ColoTracer
from colossalai.testing import clear_cache_before_run, run_on_environment_flag
if is_compatible_with_meta():
from colossalai.fx.profiler import MetaTensor
TM_BATCH_SIZE = 64
GPT_BATCH_SIZE = 8
NUM_STEPS = 5
def extract_forward_mem(gm: torch.fx.GraphModule):
node_size = 0
param_size = 0
for node in gm.graph.nodes:
node_size += calculate_fwd_tmp(node)
node_size += calculate_fwd_out(node)
param_size = parameter_size(gm)
return (node_size + param_size) / 1024**2, param_size / 1024**2
def extract_forward_flops(gm: torch.fx.GraphModule):
fwd_flop = 0
bwd_flop = 0
for node in gm.graph.nodes:
fwd_flop += node.meta.get("fwd_flop", 0)
bwd_flop += node.meta.get("bwd_flop", 0)
return fwd_flop, bwd_flop
def gen_tm_data(batch_size: int, shape: Tuple[int, int, int], device="cuda"):
data = torch.rand(batch_size, *shape, device=device)
label = torch.empty(batch_size, dtype=torch.long, device=device).random_(1000)
return data, label
def gen_gpt_data(batch_size, seq_len, vocab_size, device="cpu"):
input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), device=device)
attention_mask = torch.ones_like(input_ids, device=device)
return input_ids, attention_mask
def run_tm_forward(gm: torch.fx.GraphModule):
torch.cuda.reset_peak_memory_stats()
forward_mem = -torch.cuda.memory_allocated(device="cuda:0") / 1024**2
param_mem = -torch.cuda.memory_allocated(device="cuda:0") / 1024**2
gm.cuda()
param_mem += torch.cuda.memory_allocated(device="cuda:0") / 1024**2
gm.train()
for n in range(NUM_STEPS):
torch.cuda.reset_peak_memory_stats()
data, _ = gen_tm_data(TM_BATCH_SIZE, (3, 224, 224))
# If we need to dive deep into the memory usage by
# inspecting `saved_tensor_hooks`
# =====================================================
# fwd_mem = 0
# cache = set()
# def pack(x):
# if isinstance(x, torch.Tensor):
# nonlocal fwd_mem, cache
# if x.data_ptr() not in cache:
# fwd_mem += activation_size(x)
# cache.add(x.data_ptr())
# return x
# def unpack(x):
# return x
#
# with torch.autograd.graph.saved_tensors_hooks(pack, unpack):
# output = gm(data)
# print(f'Memory estimation by saved_tensor_hooks: {fwd_mem / 1024**2}')
# =====================================================
output = gm(data)
forward_mem += torch.cuda.memory_allocated(device="cuda:0") / 1024**2 / NUM_STEPS
del output
return forward_mem, param_mem
def run_gpt_forward(gm: torch.fx.GraphModule):
torch.cuda.reset_peak_memory_stats()
forward_mem = -torch.cuda.memory_allocated(device="cuda:0") / 1024**2
param_mem = -torch.cuda.memory_allocated(device="cuda:0") / 1024**2
gm.cuda()
param_mem += torch.cuda.memory_allocated(device="cuda:0") / 1024**2
for n in range(NUM_STEPS):
torch.cuda.reset_peak_memory_stats()
data, mask = gen_gpt_data(GPT_BATCH_SIZE, 1024, 50257, device="cuda:0")
# If we need to dive deep into the memory usage by
# inspecting `saved_tensor_hooks`
# =====================================================
# fwd_mem = 0
# cache = set()
# def pack(x):
# if isinstance(x, torch.Tensor):
# nonlocal fwd_mem, cache
# if x.data_ptr() not in cache:
# fwd_mem += activation_size(x)
# cache.add(x.data_ptr())
# return x
# def unpack(x):
# return x
#
# with torch.autograd.graph.saved_tensors_hooks(pack, unpack):
# output = gm(data, mask)
# print(f'Memory estimation by saved_tensor_hooks: {fwd_mem / 1024**2}')
# =====================================================
output = gm(data, mask)
forward_mem += torch.cuda.memory_allocated(device="cuda:0") / 1024**2 / NUM_STEPS
del output
return forward_mem, param_mem
@run_on_environment_flag(name="FX_PROFILER")
@clear_cache_before_run()
def test_meta_info_prop():
for m in [
tm.alexnet,
tm.resnet18,
tm.resnet34,
tm.resnet50,
tm.resnet101,
tm.resnet152,
tm.densenet121,
tm.densenet161,
tm.densenet169,
tm.densenet201,
tm.convnext_tiny,
tm.convnext_small,
tm.convnext_base,
tm.convnext_large,
tm.wide_resnet50_2,
tm.wide_resnet101_2,
tm.regnet_x_16gf,
tm.mnasnet0_5,
tm.efficientnet_b0,
tm.shufflenet_v2_x0_5,
tm.shufflenet_v2_x1_0,
tm.shufflenet_v2_x1_5,
tm.shufflenet_v2_x2_0,
tm.mobilenet_v2,
tm.mobilenet_v3_small,
tm.mobilenet_v3_large,
tm.resnext50_32x4d,
tm.resnext101_32x8d,
tm.resnext101_64x4d,
tm.vit_b_16,
tm.vit_b_32,
tm.vit_h_14,
tm.vit_l_16,
tm.vit_l_32,
tm.vgg11,
tm.vgg11_bn,
tm.vgg13,
tm.vgg13_bn,
tm.vgg16,
tm.vgg16_bn,
tm.vgg19,
tm.vgg19_bn,
]:
model = m().cuda()
model.train()
data = MetaTensor(torch.rand(int(TM_BATCH_SIZE), 3, 224, 224, device="meta"), fake_device="cuda:0")
gm = symbolic_trace(model)
interp = MetaInfoProp(gm)
interp.propagate(data)
gm.cpu()
meta_forward_mem, meta_param_mem = extract_forward_mem(gm)
fwd_flop, bwd_flop = extract_forward_flops(gm)
concrete_forward_mem, concrete_param_mem = run_tm_forward(gm)
print(
f"|{m.__name__}|{meta_forward_mem:.3f} MB|{meta_param_mem:.3f} MB|{concrete_forward_mem:.3f} MB|{concrete_param_mem:.3f} MB|fwd_flop={fwd_flop / 1e9:.3f}GFLOPs|bwd_flop={bwd_flop / 1e9:.3f}GFLOPs|"
)
del model, gm
@run_on_environment_flag(name="FX_PROFILER")
@clear_cache_before_run()
def test_gpt_meta_info_prop():
for m in [gpt2_medium]:
model = m().cuda()
model.train()
data, mask = gen_gpt_data(GPT_BATCH_SIZE, 1024, 50257, device="meta")
graph = ColoTracer().trace(model, meta_args={"input_ids": data, "attention_mask": mask})
gm = torch.fx.GraphModule(model, graph)
interp = MetaInfoProp(gm)
interp.propagate(MetaTensor(data, fake_device="cuda:0"), MetaTensor(mask, fake_device="cuda:0"))
model.cpu()
fwd_flop, bwd_flop = extract_forward_flops(gm)
concrete_forward_mem, concrete_param_mem = run_gpt_forward(gm)
meta_forward_mem, meta_param_mem = extract_forward_mem(gm)
print(
f"|{m.__name__}|{meta_forward_mem:.3f} MB|{meta_param_mem:.3f} MB|{concrete_forward_mem:.3f} MB|{concrete_param_mem:.3f} MB|fwd_flop={fwd_flop / 1e9:.3f}GFLOPs|bwd_flop={bwd_flop / 1e9:.3f}GFLOPs|"
)
del model, gm
if __name__ == "__main__":
test_meta_info_prop()
test_gpt_meta_info_prop()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_pipeline/test_timm_model/timm_utils.py | tests/test_fx/test_pipeline/test_timm_model/timm_utils.py | import inspect
import random
import numpy as np
import torch
from torch.fx import GraphModule
from colossalai.fx import ColoTracer
from colossalai.fx.passes.adding_split_node_pass import balanced_split_pass, split_with_split_nodes_pass
MANUAL_SEED = 0
random.seed(MANUAL_SEED)
np.random.seed(MANUAL_SEED)
torch.manual_seed(MANUAL_SEED)
torch.backends.cudnn.deterministic = True
def split_model_and_compare_output(model, data, meta_args=None):
model.eval()
# get origin output and rng state
cpu_rng_state = torch.get_rng_state()
output = model(data)
# tracing model
tracer = ColoTracer()
try:
graph = tracer.trace(root=model, meta_args=meta_args)
except Exception as e:
raise RuntimeError(f"Failed to trace {model.__class__.__name__}, error: {e}")
gm = GraphModule(model, graph, model.__class__.__name__)
gm.recompile()
# apply transform passes
annotated_model = balanced_split_pass(gm, 2)
split_model, split_submodules = split_with_split_nodes_pass(annotated_model)
# get split model
model_part0 = list(split_model.children())[0]
model_part1 = list(split_model.children())[1]
# set rng state and compute output of split model
torch.set_rng_state(cpu_rng_state)
output_part0 = model_part0(data)
sig = inspect.signature(model_part1.forward)
if isinstance(output_part0, torch.Tensor):
output_part1 = model_part1(output_part0)
else:
if len(output_part0) > len(sig.parameters):
output_part0 = output_part0[: len(sig.parameters)]
output_part1 = model_part1(*output_part0)
assert output.equal(output_part1)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_pipeline/test_timm_model/test_timm.py | tests/test_fx/test_pipeline/test_timm_model/test_timm.py | import pytest
import timm.models as tm
import torch
from timm_utils import split_model_and_compare_output
@pytest.mark.skip("balance split v2 is not ready")
def test_timm_models_without_control_flow():
MODEL_LIST = [
tm.resnest.resnest50d,
tm.beit.beit_base_patch16_224,
tm.cait.cait_s24_224,
tm.convmixer.convmixer_768_32,
tm.efficientnet.efficientnetv2_m,
tm.resmlp_12_224,
tm.vision_transformer.vit_base_patch16_224,
tm.deit_base_distilled_patch16_224,
]
data = torch.rand(2, 3, 224, 224)
for model_cls in MODEL_LIST:
model = model_cls()
split_model_and_compare_output(model, data)
@pytest.mark.skip("balance split v2 is not ready")
def test_timm_models_with_control_flow():
torch.backends.cudnn.deterministic = True
MODEL_LIST_WITH_CONTROL_FLOW = [
tm.convnext.convnext_base,
tm.vgg.vgg11,
tm.dpn.dpn68,
tm.densenet.densenet121,
tm.rexnet.rexnet_100,
tm.swin_transformer.swin_base_patch4_window7_224,
]
data = torch.rand(2, 3, 224, 224)
meta_args = {"x": data.to("meta")}
for model_cls in MODEL_LIST_WITH_CONTROL_FLOW:
model = model_cls()
split_model_and_compare_output(model, data, meta_args)
if __name__ == "__main__":
test_timm_models_without_control_flow()
test_timm_models_with_control_flow()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_pipeline/test_hf_model/test_t5.py | tests/test_fx/test_pipeline/test_hf_model/test_t5.py | import pytest
import torch
import transformers
from hf_utils import split_model_and_compare_output
BATCH_SIZE = 1
SEQ_LENGHT = 16
@pytest.mark.skip("balance split v2 is not ready")
def test_t5():
MODEL_LIST = [
transformers.T5Model,
transformers.T5ForConditionalGeneration,
transformers.T5EncoderModel,
]
config = transformers.T5Config(vocab_size=100, d_model=128, num_layers=2)
def data_gen():
input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
decoder_input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
kwargs = dict(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
return kwargs
def data_gen_for_encoder_only():
input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
kwargs = dict(input_ids=input_ids)
return kwargs
for model_cls in MODEL_LIST:
model = model_cls(config=config)
if isinstance(model, transformers.T5EncoderModel):
data_gen_func = data_gen_for_encoder_only
else:
data_gen_func = data_gen
split_model_and_compare_output(model, data_gen_func)
if __name__ == "__main__":
test_t5()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_pipeline/test_hf_model/test_albert.py | tests/test_fx/test_pipeline/test_hf_model/test_albert.py | import pytest
import torch
import transformers
from hf_utils import split_model_and_compare_output
BATCH_SIZE = 2
SEQ_LENGHT = 16
@pytest.mark.skip("balance split v2 is not ready")
def test_single_sentence_albert():
MODEL_LIST = [
transformers.AlbertModel,
transformers.AlbertForPreTraining,
transformers.AlbertForMaskedLM,
transformers.AlbertForSequenceClassification,
transformers.AlbertForTokenClassification,
]
config = transformers.AlbertConfig(
vocab_size=100,
embedding_size=128,
hidden_size=128,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=256,
)
def data_gen():
input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
token_type_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
meta_args = dict(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
return meta_args
for model_cls in MODEL_LIST:
model = model_cls(config=config)
split_model_and_compare_output(model, data_gen)
if __name__ == "__main__":
test_single_sentence_albert()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_pipeline/test_hf_model/hf_utils.py | tests/test_fx/test_pipeline/test_hf_model/hf_utils.py | import inspect
import random
import numpy as np
import torch
from torch.fx import GraphModule
from colossalai.fx import ColoTracer
from colossalai.fx.passes.adding_split_node_pass import balanced_split_pass, split_with_split_nodes_pass
MANUAL_SEED = 0
random.seed(MANUAL_SEED)
np.random.seed(MANUAL_SEED)
torch.manual_seed(MANUAL_SEED)
def split_model_and_compare_output(model, data_gen):
model.eval()
# generate input sample
kwargs = data_gen()
# get origin output and rng state
cpu_rng_state = torch.get_rng_state()
output = model(**kwargs)
# tracing model
tracer = ColoTracer()
try:
meta_args = {k: v.to("meta") for k, v in kwargs.items()}
graph = tracer.trace(root=model, meta_args=meta_args)
except Exception as e:
raise RuntimeError(f"Failed to trace {model.__class__.__name__}, error: {e}")
gm = GraphModule(model, graph, model.__class__.__name__)
gm.recompile()
# apply transform passes
annotated_model = balanced_split_pass(gm, 2)
split_model, split_submodules = split_with_split_nodes_pass(annotated_model)
# get split model
model_part0 = list(split_model.children())[0]
model_part1 = list(split_model.children())[1]
# set rng state and compute output of split model
torch.set_rng_state(cpu_rng_state)
output_part0 = model_part0(**kwargs)
sig = inspect.signature(model_part1.forward)
if isinstance(output_part0, torch.Tensor):
output_part1 = model_part1(output_part0)
else:
if len(output_part0) > len(sig.parameters):
output_part0 = output_part0[: len(sig.parameters)]
output_part1 = model_part1(*output_part0)
# get output tensor from HFOutput datastructure
if "logits" in output:
output_to_compare = output["logits"]
elif "prediction_logits" in output:
output_to_compare = output["prediction_logits"]
else:
output_to_compare = output["last_hidden_state"]
# compare output
if isinstance(output_part1, torch.Tensor):
assert output_to_compare.equal(output_part1)
elif isinstance(output_part1, (tuple, list)):
assert output_to_compare.equal(output_part1[0])
else:
assert False
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_pipeline/test_hf_model/test_opt.py | tests/test_fx/test_pipeline/test_hf_model/test_opt.py | import pytest
import torch
import transformers
from hf_utils import split_model_and_compare_output
BATCH_SIZE = 1
SEQ_LENGHT = 16
@pytest.mark.skip("balance split v2 is not ready")
def test_opt():
MODEL_LIST = [
transformers.OPTModel,
transformers.OPTForCausalLM,
]
config = transformers.OPTConfig(vocab_size=100, hidden_size=128, num_hidden_layers=4, num_attention_heads=4)
def data_gen():
input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
kwargs = dict(input_ids=input_ids, attention_mask=attention_mask)
return kwargs
for model_cls in MODEL_LIST:
model = model_cls(config=config)
split_model_and_compare_output(model, data_gen)
if __name__ == "__main__":
test_opt()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_pipeline/test_hf_model/test_gpt.py | tests/test_fx/test_pipeline/test_hf_model/test_gpt.py | import pytest
import torch
import transformers
from hf_utils import split_model_and_compare_output
BATCH_SIZE = 64
SEQ_LENGHT = 16
NUM_EPOCHS = 2
NUM_CHUNKS = 1
@pytest.mark.skip("balance split v2 is not ready")
def test_gpt():
MODEL_LIST = [
transformers.GPT2Model,
transformers.GPT2LMHeadModel,
transformers.GPT2DoubleHeadsModel,
transformers.GPT2ForTokenClassification,
# transformers.GPT2ForSequenceClassification, # not supported yet
]
config = transformers.GPT2Config(n_position=64, n_layer=4, n_head=8)
def data_gen():
input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
token_type_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
kwargs = dict(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
return kwargs
for model_cls in MODEL_LIST:
model = model_cls(config=config)
split_model_and_compare_output(model, data_gen)
if __name__ == "__main__":
test_gpt()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_pipeline/test_hf_model/test_bert.py | tests/test_fx/test_pipeline/test_hf_model/test_bert.py | import pytest
import torch
import transformers
from hf_utils import split_model_and_compare_output
BATCH_SIZE = 2
SEQ_LENGHT = 16
@pytest.mark.skip("balance split v2 is not ready")
def test_single_sentence_bert():
MODEL_LIST = [
transformers.BertModel,
transformers.BertForPreTraining,
transformers.BertLMHeadModel,
transformers.BertForMaskedLM,
transformers.BertForSequenceClassification,
transformers.BertForTokenClassification,
]
config = transformers.BertConfig(
vocab_size=100, hidden_size=128, num_hidden_layers=4, num_attention_heads=4, intermediate_size=256
)
def data_gen():
input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
token_type_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
meta_args = dict(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
return meta_args
for model_cls in MODEL_LIST:
model = model_cls(config=config)
split_model_and_compare_output(model, data_gen)
if __name__ == "__main__":
test_single_sentence_bert()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_pipeline/test_torchvision/test_torchvision.py | tests/test_fx/test_pipeline/test_torchvision/test_torchvision.py | import inspect
import random
import numpy as np
import pytest
import torch
import torchvision
import torchvision.models as tm
from packaging import version
from torch.fx import GraphModule
from colossalai.fx import ColoTracer
from colossalai.fx.passes.adding_split_node_pass import balanced_split_pass, split_with_split_nodes_pass
MANUAL_SEED = 0
random.seed(MANUAL_SEED)
np.random.seed(MANUAL_SEED)
torch.manual_seed(MANUAL_SEED)
torch.backends.cudnn.deterministic = True
@pytest.mark.skip("balance split v2 is not ready")
def test_torchvision_models():
MODEL_LIST = [
tm.vgg11,
tm.resnet18,
tm.densenet121,
tm.mobilenet_v3_small,
tm.resnext50_32x4d,
tm.wide_resnet50_2,
tm.regnet_x_16gf,
tm.efficientnet_b0,
tm.mnasnet0_5,
]
if version.parse(torchvision.__version__) >= version.parse("0.12.0"):
MODEL_LIST.extend([tm.vit_b_16, tm.convnext_small])
tracer = ColoTracer()
data = torch.rand(2, 3, 224, 224)
for model_cls in MODEL_LIST:
model = model_cls()
model.eval()
cpu_rng_state = torch.get_rng_state()
output = model(data)
graph = tracer.trace(root=model)
gm = GraphModule(model, graph, model.__class__.__name__)
gm.recompile()
# apply transform passes
annotated_model = balanced_split_pass(gm, 2)
split_model, split_submodules = split_with_split_nodes_pass(annotated_model)
# get split model
model_part0 = list(split_model.children())[0]
model_part1 = list(split_model.children())[1]
# set rng state and compute output of split model
torch.set_rng_state(cpu_rng_state)
output_part0 = model_part0(data)
sig = inspect.signature(model_part1.forward)
if isinstance(output_part0, torch.Tensor):
output_part1 = model_part1(output_part0)
else:
if len(output_part0) > len(sig.parameters):
output_part0 = output_part0[: len(sig.parameters)]
output_part1 = model_part1(*output_part0)
assert output.equal(output_part1)
if __name__ == "__main__":
test_torchvision_models()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_pipeline/test_topo/topo_utils.py | tests/test_fx/test_pipeline/test_topo/topo_utils.py | import random
import numpy as np
import torch
from torch.fx import GraphModule
from colossalai.fx import ColoTracer
from colossalai.fx.passes.adding_split_node_pass import balanced_split_pass, split_with_split_nodes_pass
from colossalai.legacy.pipeline.middleware import Partition, Topo
from colossalai.legacy.pipeline.middleware.adaptor import get_fx_topology
MANUAL_SEED = 0
random.seed(MANUAL_SEED)
np.random.seed(MANUAL_SEED)
torch.manual_seed(MANUAL_SEED)
class MLP(torch.nn.Module):
def __init__(self, config={}):
super().__init__()
dim = config["dim"]
layers = config["layers"]
self.layers = torch.nn.ModuleList()
for _ in range(layers):
self.layers.append(torch.nn.Linear(dim, dim, bias=False))
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
def split_model_and_get_DAG(model, data_gen):
model.eval()
# generate input sample
kwargs = data_gen()
# tracing model
tracer = ColoTracer()
try:
meta_args = {k: v.to("meta") for k, v in kwargs.items()}
graph = tracer.trace(root=model, meta_args=meta_args)
except Exception as e:
raise RuntimeError(f"Failed to trace {model.__class__.__name__}, error: {e}")
gm = GraphModule(model, graph, model.__class__.__name__)
gm.recompile()
# apply transform passes
annotated_model = balanced_split_pass(gm, 2)
top_module, split_submodules = split_with_split_nodes_pass(annotated_model)
topo = get_fx_topology(top_module)
for submodule in split_submodules:
if isinstance(submodule, torch.fx.GraphModule):
setattr(submodule, "_topo", topo)
return top_module, split_submodules[0]._topo
def check_input(top_module, input_partition: Partition):
partition_output = input_partition.get_output_vals()
arg_pos = 0
for node in top_module.graph.nodes:
if node.op == "placeholder":
cur_checkee = partition_output[arg_pos]
to_partition_and_offset = cur_checkee.get()
assert len(to_partition_and_offset) == len(node.users.keys())
arg_pos += 1
assert arg_pos == len(partition_output)
def check_submod(top_module, part_id, mid_partition: Partition):
partition_input = mid_partition.get_input_vals()
partition_output = mid_partition.get_output_vals()
cnt = 1
cur_node = None
for node in top_module.graph.nodes:
if node.name.startswith("submod"):
cnt += 1
if cnt == part_id:
cur_node = node
break
assert len(partition_input) == len(cur_node.args)
assert len(partition_output) == len(cur_node.users)
def check_topo(top_module, topo: Topo):
input_partition = topo.get_input_partition()
mid_partitions = topo.get_mid_partitions()
check_input(top_module, input_partition)
for part_id, submod in mid_partitions.items():
check_submod(top_module, part_id, submod)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_pipeline/test_topo/test_topo.py | tests/test_fx/test_pipeline/test_topo/test_topo.py | import pytest
import torch
import transformers
from topo_utils import MLP, check_topo, split_model_and_get_DAG
BATCH_SIZE = 1
SEQ_LENGHT = 16
@pytest.mark.skip("ShapeProp is not compatible with PyTorch 1.11.0")
def test_opt():
MODEL_LIST = [
MLP,
transformers.OPTModel,
]
CONFIGS = [
{"dim": 10, "layers": 12},
transformers.OPTConfig(vocab_size=100, hidden_size=128, num_hidden_layers=4, num_attention_heads=4),
]
def data_gen_MLP():
x = torch.zeros((16, 10))
kwargs = dict(x=x)
return kwargs
def data_gen_OPT():
input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64)
kwargs = dict(input_ids=input_ids, attention_mask=attention_mask)
return kwargs
DATAGEN = [
data_gen_MLP,
data_gen_OPT,
]
for i, model_cls in enumerate(MODEL_LIST):
model = model_cls(config=CONFIGS[i])
top_mod, topo = split_model_and_get_DAG(model, DATAGEN[i])
# print(f'{top_mod=}\n----\n{topo=}')
check_topo(top_mod, topo)
if __name__ == "__main__":
test_opt()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_control_flow.py | tests/test_fx/test_tracer/test_control_flow.py | import torch
import torch.nn as nn
from torch.fx import GraphModule
from colossalai.fx import ColoTracer as Tracer
from colossalai.testing import clear_cache_before_run
class ControlFlowModel(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(10, 10)
self.linear2 = nn.Linear(10, 10)
def forward(self, x, y):
x1 = self.linear1(x)
y1 = self.linear2(y)
if x1.dim() == 2:
return x1 + y1
else:
return x1 - y1
@clear_cache_before_run()
def test_control_flow():
model = ControlFlowModel()
tracer = Tracer()
graph_branch_true = tracer.trace(
model, meta_args={"x": torch.rand(4, 10, device="meta"), "y": torch.rand(4, 10, device="meta")}
)
graph_branch_false = tracer.trace(
model, meta_args={"x": torch.rand(10, device="meta"), "y": torch.rand(4, 10, device="meta")}
)
gm_branch_true = GraphModule(model, graph_branch_true, model.__class__.__name__)
gm_branch_false = GraphModule(model, graph_branch_false, model.__class__.__name__)
gm_branch_true.recompile()
gm_branch_false.recompile()
# test the true branch
x = torch.rand(4, 10)
y = torch.rand(4, 10)
assert torch.all(model(x, y) == gm_branch_true(x, y))
assert torch.all(gm_branch_false(x, y) != gm_branch_true(x, y))
# test the true branch
x = torch.rand(10)
y = torch.rand(4, 10)
assert torch.all(model(x, y) == gm_branch_false(x, y))
assert torch.all(gm_branch_false(x, y) != gm_branch_true(x, y))
if __name__ == "__main__":
test_control_flow()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_functional_conv.py | tests/test_fx/test_tracer/test_functional_conv.py | import torch
from torch.nn import functional as F
from colossalai.fx.tracer.meta_patch import patched_function
from colossalai.testing import clear_cache_before_run
@clear_cache_before_run()
def test_conv():
# test F.conv_1d
data_1d = torch.rand(3, 16, 10)
weight_1d = torch.rand(3, 16, 3)
out_1d = F.conv1d(data_1d, weight_1d)
patched_out_1d = patched_function.torch_nn_functional_conv1d(data_1d, weight_1d)
assert out_1d.shape == patched_out_1d.shape
# test F.conv_transpose1d
weight_1d = torch.transpose(weight_1d, 0, 1)
out_transpose_1d = F.conv_transpose1d(data_1d, weight_1d)
patched_out_transpose_1d = patched_function.torch_nn_functional_convtranspose1d(data_1d, weight_1d)
assert out_transpose_1d.shape == patched_out_transpose_1d.shape
# test F.conv2d
data_2d = torch.rand(3, 16, 10, 10)
weight_2d = torch.rand(3, 16, 3, 3)
out_2d = F.conv2d(data_2d, weight_2d)
patched_out_2d = patched_function.torch_nn_functional_conv2d(data_2d, weight_2d)
assert out_2d.shape == patched_out_2d.shape
# test F.conv_transpose2d
weight_2d = torch.transpose(weight_2d, 0, 1)
out_transpose_2d = F.conv_transpose2d(data_2d, weight_2d)
patched_out_transpose_2d = patched_function.torch_nn_functional_convtranspose2d(data_2d, weight_2d)
assert out_transpose_2d.shape == patched_out_transpose_2d.shape
# test F.conv3d
data_3d = torch.rand(3, 16, 10, 10, 10)
weight_3d = torch.rand(3, 16, 3, 3, 3)
out_3d = F.conv3d(data_3d, weight_3d)
patched_out_3d = patched_function.torch_nn_functional_conv3d(data_3d, weight_3d)
assert out_3d.shape == patched_out_3d.shape
# test F.conv_transpose3d
weight_3d = torch.transpose(weight_3d, 0, 1)
out_transpose_3d = F.conv_transpose3d(data_3d, weight_3d)
patched_out_transpose_3d = patched_function.torch_nn_functional_convtranspose3d(data_3d, weight_3d)
assert out_transpose_3d.shape == patched_out_transpose_3d.shape
if __name__ == "__main__":
test_conv()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_patched_module.py | tests/test_fx/test_tracer/test_patched_module.py | import torch
from colossalai.fx.tracer.meta_patch import patched_module
from colossalai.testing import clear_cache_before_run
def _run(data, module, patch_fn):
try:
if isinstance(data, dict):
output = patch_fn(module, **data)
if isinstance(data, tuple) or isinstance(data, list):
output = patch_fn(module, *data)
else:
output = patch_fn(module, data)
return output
except Exception as e:
return e
def _assert_output_shape(data, module, patch_fn, expect_exception, output_shape):
output = _run(data, module, patch_fn)
if expect_exception:
assert isinstance(output, AssertionError)
else:
assert not isinstance(output, Exception)
if isinstance(output, tuple):
for item, shape in zip(output, output_shape):
assert item.is_meta
assert item.shape == shape
else:
assert output.is_meta
assert output.shape == output_shape
@clear_cache_before_run()
def test_linear():
# test linear patch can produce the meta output with correct shape
data = torch.rand(2, 4, device="meta")
module = torch.nn.Linear(4, 2)
_assert_output_shape(data, module, patched_module.torch_nn_linear, False, torch.Size([2, 2]))
# test if the linear patch can catch exception when dimension does not match
data = torch.rand(2, 2, device="meta")
_assert_output_shape(data, module, patched_module.torch_nn_linear, True, None)
@clear_cache_before_run()
def test_rnn():
# test rnn patch can produce the meta output with correct shape
data = (torch.randn(5, 3, 10), torch.randn(2, 3, 20))
module = torch.nn.RNN(10, 20, 2)
output, hn = module(*data)
meta_data = (torch.randn(5, 3, 10).to("meta"), torch.randn(2, 3, 20).to("meta"))
_assert_output_shape(meta_data, module, patched_module.torch_nn_rnn, False, (output.shape, hn.shape))
# test if the rnn patch can catch exception when dimension does not match
data = (torch.randn(5, 3, 10), torch.randn(2, 3, 20))
module = torch.nn.RNN(10, 20, 2)
output, hn = module(*data)
meta_data = (torch.randn(5, 3, 1).to("meta"), torch.randn(2, 3, 20).to("meta"))
_assert_output_shape(meta_data, module, patched_module.torch_nn_rnn, True, None)
@clear_cache_before_run()
def test_embedding():
data = torch.rand(2, 4, device="meta")
# test layernorm
ln = torch.nn.LayerNorm(4)
_assert_output_shape(data, ln, patched_module.torch_nn_normalize, False, data.shape)
# test group norm
gn = torch.nn.GroupNorm(4, num_channels=8)
_assert_output_shape(data, gn, patched_module.torch_nn_normalize, False, data.shape)
# test batch norm 1d
bn1d = torch.nn.BatchNorm1d(4)
data = torch.rand(2, 4, device="meta")
_assert_output_shape(
data=data,
module=bn1d,
patch_fn=patched_module.torch_nn_normalize,
expect_exception=False,
output_shape=data.shape,
)
data = torch.rand(2, 4, device="meta")
_assert_output_shape(
data=data,
module=bn1d,
patch_fn=patched_module.torch_nn_normalize,
expect_exception=False,
output_shape=data.shape,
)
data = torch.rand(2, 3, 4, device="meta")
_assert_output_shape(
data=data,
module=bn1d,
patch_fn=patched_module.torch_nn_normalize,
expect_exception=False,
output_shape=data.shape,
)
data = torch.rand(1, 2, 3, 4, device="meta")
_assert_output_shape(
data=data, module=bn1d, patch_fn=patched_module.torch_nn_normalize, expect_exception=True, output_shape=None
)
# test batch norm 2d
bn2d = torch.nn.BatchNorm2d(4)
data = torch.rand(1, 2, 3, 4, device="meta")
_assert_output_shape(
data=data,
module=bn2d,
patch_fn=patched_module.torch_nn_normalize,
expect_exception=False,
output_shape=data.shape,
)
data = torch.rand(2, 3, 4, device="meta")
_assert_output_shape(
data=data, module=bn2d, patch_fn=patched_module.torch_nn_normalize, expect_exception=True, output_shape=None
)
# # test batch size 3d
bn3d = torch.nn.BatchNorm3d(4)
data = torch.rand(1, 1, 2, 3, 4, device="meta")
_assert_output_shape(
data=data,
module=bn3d,
patch_fn=patched_module.torch_nn_normalize,
expect_exception=False,
output_shape=data.shape,
)
data = torch.rand(1, 2, 3, 4, device="meta")
_assert_output_shape(
data=data, module=bn3d, patch_fn=patched_module.torch_nn_normalize, expect_exception=True, output_shape=None
)
@clear_cache_before_run()
def test_conv1d():
# test conv 1d
data = torch.rand(2, 3, 4)
conv1d = torch.nn.Conv1d(in_channels=3, out_channels=4, kernel_size=2)
materialized_output = conv1d(data)
meta_data = data.to("meta")
_assert_output_shape(
data=meta_data,
module=conv1d,
patch_fn=patched_module.torch_nn_conv1d,
expect_exception=False,
output_shape=materialized_output.shape,
)
conv1d = torch.nn.Conv1d(in_channels=3, out_channels=4, kernel_size=2, padding=1)
materialized_output = conv1d(data)
meta_data = data.to("meta")
_assert_output_shape(
data=meta_data,
module=conv1d,
patch_fn=patched_module.torch_nn_conv1d,
expect_exception=False,
output_shape=materialized_output.shape,
)
conv1d = torch.nn.Conv1d(
in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2, padding_mode="reflect"
)
materialized_output = conv1d(data)
meta_data = data.to("meta")
_assert_output_shape(
data=meta_data,
module=conv1d,
patch_fn=patched_module.torch_nn_conv1d,
expect_exception=False,
output_shape=materialized_output.shape,
)
def test_conv2d():
# test conv 2d
data = torch.rand(2, 3, 4, 4)
conv2d = torch.nn.Conv2d(in_channels=3, out_channels=4, kernel_size=2)
materialized_output = conv2d(data)
_assert_output_shape(
data=data,
module=conv2d,
patch_fn=patched_module.torch_nn_conv2d,
expect_exception=False,
output_shape=materialized_output.shape,
)
conv2d = torch.nn.Conv2d(in_channels=3, out_channels=4, kernel_size=2, padding=1)
materialized_output = conv2d(data)
_assert_output_shape(
data=data,
module=conv2d,
patch_fn=patched_module.torch_nn_conv2d,
expect_exception=False,
output_shape=materialized_output.shape,
)
conv2d = torch.nn.Conv2d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2)
materialized_output = conv2d(data)
_assert_output_shape(
data=data,
module=conv2d,
patch_fn=patched_module.torch_nn_conv2d,
expect_exception=False,
output_shape=materialized_output.shape,
)
conv2d = torch.nn.Conv2d(
in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2, padding_mode="reflect"
)
materialized_output = conv2d(data)
_assert_output_shape(
data=data,
module=conv2d,
patch_fn=patched_module.torch_nn_conv2d,
expect_exception=False,
output_shape=materialized_output.shape,
)
@clear_cache_before_run()
def test_conv3d():
# test conv 3d
data = torch.rand(2, 3, 4, 4, 4)
conv3d = torch.nn.Conv3d(in_channels=3, out_channels=4, kernel_size=2)
materialized_output = conv3d(data)
_assert_output_shape(
data=data,
module=conv3d,
patch_fn=patched_module.torch_nn_conv3d,
expect_exception=False,
output_shape=materialized_output.shape,
)
conv3d = torch.nn.Conv3d(in_channels=3, out_channels=4, kernel_size=2, padding=1)
materialized_output = conv3d(data)
_assert_output_shape(
data=data,
module=conv3d,
patch_fn=patched_module.torch_nn_conv3d,
expect_exception=False,
output_shape=materialized_output.shape,
)
conv3d = torch.nn.Conv3d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2)
materialized_output = conv3d(data)
_assert_output_shape(
data=data,
module=conv3d,
patch_fn=patched_module.torch_nn_conv3d,
expect_exception=False,
output_shape=materialized_output.shape,
)
conv3d = torch.nn.Conv3d(
in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2, padding_mode="reflect"
)
materialized_output = conv3d(data)
_assert_output_shape(
data=data,
module=conv3d,
patch_fn=patched_module.torch_nn_conv3d,
expect_exception=False,
output_shape=materialized_output.shape,
)
@clear_cache_before_run()
def test_conv_transpose1d():
# test conv transpose1d
data = torch.rand(2, 3, 4)
convtrans1d = torch.nn.ConvTranspose1d(in_channels=3, out_channels=4, kernel_size=2)
materialized_output = convtrans1d(data)
meta_data = data.to("meta")
_assert_output_shape(
data=meta_data,
module=convtrans1d,
patch_fn=patched_module.torch_nn_convtranspose1d,
expect_exception=False,
output_shape=materialized_output.shape,
)
convtrans1d = torch.nn.ConvTranspose1d(in_channels=3, out_channels=4, kernel_size=2, padding=1)
materialized_output = convtrans1d(data)
meta_data = data.to("meta")
_assert_output_shape(
data=meta_data,
module=convtrans1d,
patch_fn=patched_module.torch_nn_convtranspose1d,
expect_exception=False,
output_shape=materialized_output.shape,
)
@clear_cache_before_run()
def test_conv_transpose2d():
# test conv transpose2d
data = torch.rand(2, 3, 4, 4)
convtrans2d = torch.nn.ConvTranspose2d(in_channels=3, out_channels=4, kernel_size=2)
materialized_output = convtrans2d(data)
meta_data = data.to("meta")
_assert_output_shape(
data=meta_data,
module=convtrans2d,
patch_fn=patched_module.torch_nn_convtranspose2d,
expect_exception=False,
output_shape=materialized_output.shape,
)
convtrans2d = torch.nn.ConvTranspose2d(in_channels=3, out_channels=4, kernel_size=2, padding=1)
materialized_output = convtrans2d(data)
meta_data = data.to("meta")
_assert_output_shape(
data=meta_data,
module=convtrans2d,
patch_fn=patched_module.torch_nn_convtranspose2d,
expect_exception=False,
output_shape=materialized_output.shape,
)
@clear_cache_before_run()
def test_conv_transpose3d():
# test conv transpose2d
data = torch.rand(2, 3, 4, 4, 4)
convtrans3d = torch.nn.ConvTranspose3d(in_channels=3, out_channels=4, kernel_size=2)
materialized_output = convtrans3d(data)
meta_data = data.to("meta")
_assert_output_shape(
data=meta_data,
module=convtrans3d,
patch_fn=patched_module.torch_nn_convtranspose3d,
expect_exception=False,
output_shape=materialized_output.shape,
)
convtrans3d = torch.nn.ConvTranspose3d(in_channels=3, out_channels=4, kernel_size=2, padding=1)
materialized_output = convtrans3d(data)
meta_data = data.to("meta")
_assert_output_shape(
data=meta_data,
module=convtrans3d,
patch_fn=patched_module.torch_nn_convtranspose3d,
expect_exception=False,
output_shape=materialized_output.shape,
)
@clear_cache_before_run()
def test_pool1d():
combinations = [
[torch.nn.MaxPool1d, patched_module.torch_nn_maxpool1d],
[torch.nn.AvgPool1d, patched_module.torch_nn_avgpool1d],
]
for layer_cls, patch_func in combinations:
pooler = layer_cls(kernel_size=3)
data = torch.rand(2, 3, 4)
materialized_output = pooler(data)
_assert_output_shape(
data=data,
module=pooler,
patch_fn=patch_func,
expect_exception=False,
output_shape=materialized_output.shape,
)
data = torch.rand(2, 4)
materialized_output = pooler(data)
_assert_output_shape(
data=data,
module=pooler,
patch_fn=patch_func,
expect_exception=False,
output_shape=materialized_output.shape,
)
data = torch.rand(2, 3, 4, 4)
_assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=True, output_shape=None)
@clear_cache_before_run()
def test_pool2d():
combinations = [
[torch.nn.MaxPool2d, patched_module.torch_nn_maxpool2d],
[torch.nn.AvgPool2d, patched_module.torch_nn_avgpool2d],
]
for layer_cls, patch_func in combinations:
pooler = layer_cls(kernel_size=3)
# test max pool 3d
data = torch.rand(2, 3, 4, 4)
materialized_output = pooler(data)
_assert_output_shape(
data=data,
module=pooler,
patch_fn=patch_func,
expect_exception=False,
output_shape=materialized_output.shape,
)
# test max pool 3d
data = torch.rand(2, 4, 4)
materialized_output = pooler(data)
_assert_output_shape(
data=data,
module=pooler,
patch_fn=patch_func,
expect_exception=False,
output_shape=materialized_output.shape,
)
# test max pool 3d
data = torch.rand(2, 3, 4, 4, 4)
_assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=True, output_shape=None)
@clear_cache_before_run()
def test_pool3d():
combinations = [
[torch.nn.MaxPool3d, patched_module.torch_nn_maxpool3d],
[torch.nn.AvgPool3d, patched_module.torch_nn_avgpool3d],
]
for layer_cls, patch_func in combinations:
pooler = layer_cls(kernel_size=3)
# test max pool 3d
data = torch.rand(2, 3, 4, 4, 4)
materialized_output = pooler(data)
_assert_output_shape(
data=data,
module=pooler,
patch_fn=patch_func,
expect_exception=False,
output_shape=materialized_output.shape,
)
# test max pool 3d
data = torch.rand(2, 4, 4, 4)
materialized_output = pooler(data)
_assert_output_shape(
data=data,
module=pooler,
patch_fn=patch_func,
expect_exception=False,
output_shape=materialized_output.shape,
)
# test max pool 3d
data = torch.rand(2, 3, 4)
_assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=True, output_shape=None)
# adapative pooling is different from other pooling, so test it individually
@clear_cache_before_run()
def test_adaptive_pooling_1d():
pooler = torch.nn.AdaptiveAvgPool1d(output_size=3)
patch_func = patched_module.torch_nn_adapative_pooling_1d
data = torch.rand(3, 4)
output = pooler(data)
_assert_output_shape(
data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=output.shape
)
data = torch.rand(2, 3, 4)
output = pooler(data)
_assert_output_shape(
data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=output.shape
)
data = torch.rand(2, 3, 4, 5)
_assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=True, output_shape=None)
@clear_cache_before_run()
def test_adaptive_pooling_2d():
pooler = torch.nn.AdaptiveAvgPool2d(output_size=3)
patch_func = patched_module.torch_nn_adapative_pooling_2d
data = torch.rand(3, 4)
_assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=True, output_shape=None)
data = torch.rand(2, 3, 4)
output = pooler(data)
_assert_output_shape(
data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=output.shape
)
data = torch.rand(2, 3, 4, 5)
output = pooler(data)
_assert_output_shape(
data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=output.shape
)
@clear_cache_before_run()
def test_adaptive_pooling_3d():
pooler = torch.nn.AdaptiveAvgPool3d(output_size=3)
patch_func = patched_module.torch_nn_adapative_pooling_3d
data = torch.rand(3, 4, 5)
_assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=True, output_shape=None)
data = torch.rand(2, 3, 4, 5)
output = pooler(data)
_assert_output_shape(
data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=output.shape
)
data = torch.rand(2, 3, 4, 5, 6)
output = pooler(data)
_assert_output_shape(
data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=output.shape
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_patched_op.py | tests/test_fx/test_tracer/test_patched_op.py | from functools import partial
import torch
from colossalai.fx.tracer.meta_patch import patched_function
from colossalai.testing import clear_cache_before_run
def _run(data, patch_fn):
try:
output = patch_fn(data)
return output
except Exception as e:
return e
def _assert_output_shape(data, patch_fn, expect_exception, output_shape):
output = _run(data, patch_fn)
if expect_exception:
assert isinstance(output, AssertionError)
else:
assert not isinstance(output, Exception)
assert output.is_meta
assert output.shape == output_shape
@clear_cache_before_run()
def test_repeat_interleave():
patch_fn = patched_function.torch_repeat_interleave
# examples from https://pytorch.org/docs/stable/generated/torch.repeat_interleave.html
data = torch.tensor([1, 2, 3])
materialized_output = torch.repeat_interleave(data, repeats=2)
repeat_interleave = partial(patch_fn, repeats=2)
meta_data = data.to("meta")
_assert_output_shape(
data=meta_data, patch_fn=repeat_interleave, expect_exception=False, output_shape=materialized_output.shape
)
data = torch.tensor([[1, 2], [3, 4]])
materialized_output = torch.repeat_interleave(data, repeats=3, dim=1)
repeat_interleave = partial(patch_fn, repeats=3, dim=1)
meta_data = data.to("meta")
_assert_output_shape(
data=meta_data, patch_fn=repeat_interleave, expect_exception=False, output_shape=materialized_output.shape
)
data = torch.tensor([[1, 2], [3, 4]])
materialized_output = torch.repeat_interleave(data, repeats=torch.tensor([1, 2]), dim=-1)
repeat_interleave = partial(patch_fn, repeats=torch.tensor([1, 2]), dim=-1)
meta_data = data.to("meta")
_assert_output_shape(
data=meta_data, patch_fn=repeat_interleave, expect_exception=False, output_shape=materialized_output.shape
)
data = torch.tensor([[1, 2], [3, 4]])
materialized_output = torch.repeat_interleave(data, repeats=torch.tensor([1, 2]), dim=0)
repeat_interleave = partial(patch_fn, repeats=[1, 2], dim=0)
meta_data = data.to("meta")
_assert_output_shape(
data=meta_data, patch_fn=repeat_interleave, expect_exception=True, output_shape=materialized_output.shape
)
@clear_cache_before_run()
def test_torch_max():
data = torch.rand(4, 3)
out = torch.max(data)
patched_out = patched_function.torch_max(data)
assert out.shape == patched_out.shape
data = torch.rand(4, 3, 2)
out, idx = torch.max(data, dim=1)
patched_out, patched_idx = patched_function.torch_max(data, dim=1)
assert out.shape == patched_out.shape
assert idx.shape == patched_idx.shape
data = torch.rand(4, 3, 2)
out, idx = torch.max(data, dim=1, keepdim=True)
patched_out, patched_idx = patched_function.torch_max(data, dim=1, keepdim=True)
assert out.shape == patched_out.shape
assert idx.shape == patched_idx.shape
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_bias_addition_module.py | tests/test_fx/test_tracer/test_bias_addition_module.py | import torch
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.testing import clear_cache_before_run
class LinearModel(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features)
def forward(self, x):
x = self.linear(x)
x = x * 2
return x
class ConvModel(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bias=True):
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, bias=bias
)
def forward(self, x):
x = self.conv(x)
x = x * 2
return x
@clear_cache_before_run()
def test_linear_module():
model = LinearModel(3, 6)
tracer = ColoTracer()
# graph():
# %x : torch.Tensor [#users=1] = placeholder[target=x]
# %linear_weight : [#users=1] = get_attr[target=linear.weight]
# %linear_bias : [#users=1] = get_attr[target=linear.bias]
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%x, %linear_weight), kwargs = {})
# %add : [#users=1] = call_function[target=operator.add](args = (%linear, %linear_bias), kwargs = {})
# %mul : [#users=1] = call_function[target=operator.mul](args = (%add, 2), kwargs = {})
# return mul
graph = tracer.trace(root=model, meta_args={"x": torch.rand(3, 3).to("meta")})
# def forward(self, x : torch.Tensor):
# linear_weight = self.linear.weight
# linear_bias = self.linear.bias
# linear = torch._C._nn.linear(x, linear_weight); x = linear_weight = None
# add = linear + linear_bias; linear = linear_bias = None
# mul = add * 2; add = None
# return mul
gm = ColoGraphModule(model, graph)
gm.recompile()
node_list = list(graph.nodes)
for node in node_list:
if node.op == "output":
continue
assert hasattr(node, "_meta_data")
weight_node = node_list[1]
bias_node = node_list[2]
linear_node = node_list[3]
add_node = node_list[4]
assert weight_node._meta_data.shape == (6, 3)
assert bias_node._meta_data.shape == (6,)
assert linear_node._meta_data.shape == (3, 6)
assert add_node._meta_data.shape == (3, 6)
@clear_cache_before_run()
def test_conv_module():
model = ConvModel(3, 6, 2)
tracer = ColoTracer()
# graph():
# %x : torch.Tensor [#users=1] = placeholder[target=x]
# %conv_weight : [#users=1] = get_attr[target=conv.weight]
# %conv_bias : [#users=1] = get_attr[target=conv.bias]
# %conv2d : [#users=1] = call_function[target=torch.conv2d](args = (%x, %conv_weight), kwargs = {})
# %view : [#users=1] = call_method[target=view](args = (%conv_bias, [1, -1, 1, 1]), kwargs = {})
# %add : [#users=1] = call_function[target=operator.add](args = (%conv2d, %view), kwargs = {})
# %mul : [#users=1] = call_function[target=operator.mul](args = (%add, 2), kwargs = {})
# return mul
graph = tracer.trace(root=model, meta_args={"x": torch.rand(4, 3, 64, 64).to("meta")})
# def forward(self, x : torch.Tensor):
# conv_weight = self.conv.weight
# conv_bias = self.conv.bias
# conv2d = torch.conv2d(x, conv_weight); x = conv_weight = None
# view = conv_bias.view([1, -1, 1, 1]); conv_bias = None
# add = conv2d + view; conv2d = view = None
# mul = add * 2; add = None
# return mul
gm = ColoGraphModule(model, graph)
gm.recompile()
node_list = list(graph.nodes)
for node in node_list:
if node.op == "output":
continue
assert hasattr(node, "_meta_data")
weight_node = node_list[1]
bias_node = node_list[2]
conv_node = node_list[3]
view_node = node_list[4]
add_node = node_list[5]
assert weight_node._meta_data.shape == (6, 3, 2, 2)
assert bias_node._meta_data.shape == (6,)
assert conv_node._meta_data.shape == (4, 6, 63, 63)
assert view_node._meta_data.shape == (6, 1, 1)
assert add_node._meta_data.shape == (4, 6, 63, 63)
if __name__ == "__main__":
test_linear_module()
test_conv_module()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_activation_checkpoint_annotation.py | tests/test_fx/test_tracer/test_activation_checkpoint_annotation.py | import torch
from torch.fx import GraphModule
from torch.utils.checkpoint import checkpoint
from colossalai.fx import ColoTracer
from colossalai.testing import clear_cache_before_run
class MLP(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(4, 4)
self.linear2 = torch.nn.Linear(4, 4)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return x
# Simple module for demonstration
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.mlp_1 = MLP()
self.mlp_2 = MLP()
self.output = torch.nn.Linear(4, 4)
def forward(self, x):
x = checkpoint(self.mlp_1, x)
x = checkpoint(self.mlp_2, x)
x = self.output(x)
return x
@clear_cache_before_run()
def test_activation_checkpoint_annotation():
module = MyModule()
# test tracing with activation checkpoint
tracer = ColoTracer(trace_act_ckpt=True)
graph = tracer.trace(module)
gm = GraphModule(module, graph)
for node in gm.graph.nodes:
if node.name in ["mlp_1_linear1", "mlp_1_linear2"]:
assert node.meta.get("activation_checkpoint", -1) == 0
for node in gm.graph.nodes:
if node.name in ["mlp_2_linear1", "mlp_2_linear2"]:
assert node.meta.get("activation_checkpoint", -1) == 1
tracer = ColoTracer(trace_act_ckpt=False)
graph = tracer.trace(module)
gm = GraphModule(module, graph)
for node in gm.graph.nodes:
assert not hasattr(node, "activation_checkpoint")
if __name__ == "__main__":
test_activation_checkpoint_annotation()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_timm_model/test_timm_model.py | tests/test_fx/test_tracer/test_timm_model/test_timm_model.py | import pytest
import torch
from packaging import version
from colossalai._analyzer.fx import symbolic_trace
from colossalai.testing import clear_cache_before_run
from tests.kit.model_zoo import model_zoo
def trace_and_compare(model_cls, data, output_transform_fn, meta_args=None):
# trace
model = model_cls()
# convert to eval for inference
# it is important to set it to eval mode before tracing
# without this statement, the torch.nn.functional.batch_norm will always be in training mode
model.eval()
# TODO: support the following models
# 1. ConViT
# 2. NormFreeNet
# as they are not supported, let's skip them
if model.__class__.__name__ in ["ConViT", "NormFreeNet"]:
return
gm = symbolic_trace(model, meta_args=meta_args)
# run forward
with torch.no_grad():
fx_out = gm(**data)
non_fx_out = model(**data)
# compare output
transformed_fx_out = output_transform_fn(fx_out)
transformed_non_fx_out = output_transform_fn(non_fx_out)
assert len(transformed_fx_out) == len(transformed_non_fx_out)
for key in transformed_fx_out.keys():
fx_output_val = transformed_fx_out[key]
non_fx_output_val = transformed_non_fx_out[key]
assert torch.allclose(
fx_output_val, non_fx_output_val, atol=1e-5
), f"{model.__class__.__name__} has inconsistent outputs, {fx_output_val} vs {non_fx_output_val}"
# FIXME(ver217): timm/models/convit.py:71: in forward
# if self.rel_indices is None or self.rel_indices.shape[1] != N:
# torch/fx/proxy.py:284: in __bool__
# return self.tracer.to_bool(self)
# torch.fx.proxy.TraceError: symbolically traced variables cannot be used as inputs to control flow
@pytest.mark.skip("convit is not supported yet")
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@clear_cache_before_run()
def test_timm_models():
torch.backends.cudnn.deterministic = True
sub_model_zoo = model_zoo.get_sub_registry("timm")
for name, (model_fn, data_gen_fn, output_transform_fn, _, _, attribute) in sub_model_zoo.items():
data = data_gen_fn()
if attribute is not None and attribute.has_control_flow:
meta_args = {k: v.to("meta") for k, v in data.items()}
else:
meta_args = None
trace_and_compare(model_fn, data, output_transform_fn, meta_args)
if __name__ == "__main__":
test_timm_models()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_torchvision_model/test_torchvision_model.py | tests/test_fx/test_tracer/test_torchvision_model/test_torchvision_model.py | import torch
from colossalai._analyzer.fx import symbolic_trace
from colossalai.testing import clear_cache_before_run
from tests.kit.model_zoo import model_zoo
@clear_cache_before_run()
def test_torchvision_models():
torch.backends.cudnn.deterministic = True
tv_sub_registry = model_zoo.get_sub_registry("torchvision")
for name, (model_fn, data_gen_fn, output_transform_fn, _, model_attribute) in tv_sub_registry.items():
data = data_gen_fn()
if model_attribute is not None and model_attribute.has_stochastic_depth_prob:
model = model_fn(stochastic_depth_prob=0)
else:
model = model_fn()
gm = symbolic_trace(model)
model.eval()
gm.eval()
try:
with torch.no_grad():
fx_out = gm(**data)
non_fx_out = model(**data)
transformed_out = output_transform_fn(fx_out)
transformed_non_fx_out = output_transform_fn(non_fx_out)
assert len(transformed_out) == len(transformed_non_fx_out)
for key in transformed_out.keys():
fx_val = transformed_out[key]
non_fx_val = transformed_non_fx_out[key]
assert torch.allclose(
fx_val, non_fx_val
), f"{model.__class__.__name__} has inconsistent outputs, {fx_val} vs {non_fx_val}"
except Exception as e:
print(name, e)
if __name__ == "__main__":
test_torchvision_models()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_hf_model/test_hf_diffuser.py | tests/test_fx/test_tracer/test_hf_model/test_hf_diffuser.py | import pytest
import torch
from colossalai.fx import symbolic_trace
from colossalai.testing import clear_cache_before_run
from colossalai.testing.random import seed_all
from tests.kit.model_zoo import model_zoo
def assert_dict(da, db, assert_fn):
assert len(da) == len(db)
for k, v in da.items():
assert k in db
if not torch.is_tensor(v):
continue
u = db.get(k)
assert_fn(u, v)
def trace_and_compare(model_cls, data, output_fn):
model = model_cls()
model.eval()
concrete_args = {k: v for k, v in data.items() if not torch.is_tensor(v)}
meta_args = {k: v.to("meta") for k, v in data.items() if torch.is_tensor(v)}
gm = symbolic_trace(model, concrete_args=concrete_args, meta_args=meta_args)
# run forward
with torch.no_grad():
fx_out = gm(**data)
non_fx_out = model(**data)
# compare output
transformed_fx_out = output_fn(fx_out)
transformed_non_fx_out = output_fn(non_fx_out)
def assert_fn(ta, tb):
assert torch.equal(ta, tb)
assert_dict(transformed_fx_out, transformed_non_fx_out, assert_fn)
@pytest.mark.skip(reason="cannot pass this test yet")
@clear_cache_before_run()
def test_diffusers():
seed_all(9091, cuda_deterministic=True)
sub_model_zoo = model_zoo.get_sub_registry("diffusers")
for name, (model_fn, data_gen_fn, output_transform_fn, _, _, attribute) in sub_model_zoo.items():
data = data_gen_fn()
trace_and_compare(model_fn, data, output_transform_fn)
torch.cuda.synchronize()
print(f"{name:40s} √")
@clear_cache_before_run()
def test_torch_diffusers():
seed_all(65535, cuda_deterministic=True)
sub_model_zoo = model_zoo.get_sub_registry("diffusers")
for name, (model_fn, data_gen_fn, output_transform_fn, _, attribute) in sub_model_zoo.items():
data = data_gen_fn()
model = model_fn()
model(**data)
torch.cuda.synchronize()
print(f"{name:40s} √")
if __name__ == "__main__":
test_torch_diffusers()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_hf_model/hf_tracer_utils.py | tests/test_fx/test_tracer/test_hf_model/hf_tracer_utils.py | from typing import List
import torch
# from colossalai.fx import symbolic_trace
from colossalai._analyzer.fx import symbolic_trace
def trace_model_and_compare_output(model, data_gen, ignore_data: List[str] = None):
# must turn on eval mode to ensure the output is consistent
model.eval()
inputs = data_gen()
if ignore_data is not None:
# drop the ignore_data key
inputs = {k: v for k, v in inputs.items() if k not in ignore_data}
try:
meta_args = {k: v.to("meta") for k, v in inputs.items()}
gm = symbolic_trace(model, meta_args=meta_args)
except Exception as e:
raise RuntimeError(f"Failed to trace {model.__class__.__name__}, error: {e}")
# run forward
non_fx_out = model(**inputs)
fx_out = gm(**inputs)
# check output
for k in non_fx_out.keys():
if torch.is_tensor(fx_out[k]):
assert torch.equal(
fx_out[k], non_fx_out[k]
), f"{model.__class__.__name__} has incorrect output {k}, expect {non_fx_out[k]}, but got {fx_out[k]}"
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_hf_model/test_hf_bert.py | tests/test_fx/test_tracer/test_hf_model/test_hf_bert.py | import pytest
import torch
from hf_tracer_utils import trace_model_and_compare_output
from packaging import version
from colossalai.testing import clear_cache_before_run
from tests.kit.model_zoo import model_zoo
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@clear_cache_before_run()
def test_bert():
sub_registry = model_zoo.get_sub_registry("transformers_bert")
for name, (model_fn, data_gen_fn, _, _, _) in sub_registry.items():
model = model_fn()
if model.__class__.__name__ == "BertForQuestionAnswering":
continue
trace_model_and_compare_output(model, data_gen_fn, ignore_data=["labels", "next_sentence_label"])
if __name__ == "__main__":
test_bert()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_hf_model/test_hf_gpt.py | tests/test_fx/test_tracer/test_hf_model/test_hf_gpt.py | import pytest
import torch
from hf_tracer_utils import trace_model_and_compare_output
from packaging import version
from colossalai.testing import clear_cache_before_run
from tests.kit.model_zoo import model_zoo
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@clear_cache_before_run()
def test_gpt():
sub_registry = model_zoo.get_sub_registry("transformers_gpt")
for name, (model_fn, data_gen_fn, _, _, _) in sub_registry.items():
model = model_fn()
# TODO(ver217): support the following models
# 1. "GPT2DoubleHeadsModel", "GPT2ForQuestionAnswering", "GPTJForQuestionAnswering"
# as they are not supported, let's skip them
if model.__class__.__name__ in ["GPT2DoubleHeadsModel", "GPT2ForQuestionAnswering", "GPTJForQuestionAnswering"]:
continue
trace_model_and_compare_output(model, data_gen_fn, ignore_data=["labels"])
if __name__ == "__main__":
test_gpt()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_hf_model/test_hf_t5.py | tests/test_fx/test_tracer/test_hf_model/test_hf_t5.py | import pytest
import torch
from hf_tracer_utils import trace_model_and_compare_output
from packaging import version
from colossalai.testing import clear_cache_before_run
from tests.kit.model_zoo import model_zoo
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@clear_cache_before_run()
def test_t5():
sub_registry = model_zoo.get_sub_registry("transformers_t5")
for name, (model_fn, data_gen_fn, _, _, _) in sub_registry.items():
if name == "transformers_t5_for_conditional_generation":
# cannot trace for loss function yet
# so we use a data gen which does not produce labels
data_gen_fn = sub_registry.get("transformers_t5")[1]
model = model_fn()
trace_model_and_compare_output(model, data_gen_fn, ignore_data=["labels"])
if __name__ == "__main__":
test_t5()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_hf_model/test_hf_albert.py | tests/test_fx/test_tracer/test_hf_model/test_hf_albert.py | import pytest
import torch
from hf_tracer_utils import trace_model_and_compare_output
from packaging import version
from colossalai.testing import clear_cache_before_run
from tests.kit.model_zoo import model_zoo
BATCH_SIZE = 2
SEQ_LENGTH = 16
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@clear_cache_before_run()
def test_albert():
sub_registry = model_zoo.get_sub_registry("transformers_albert")
for name, (model_fn, data_gen_fn, _, _, _) in sub_registry.items():
model = model_fn()
# TODO: support the following models
# 1. "AlbertForPreTraining"
# as they are not supported, let's skip them
if model.__class__.__name__ in ["AlbertForPreTraining"]:
continue
trace_model_and_compare_output(model, data_gen_fn)
if __name__ == "__main__":
test_albert()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_hf_model/test_hf_opt.py | tests/test_fx/test_tracer/test_hf_model/test_hf_opt.py | import pytest
import torch
from hf_tracer_utils import trace_model_and_compare_output
from packaging import version
from colossalai.testing import clear_cache_before_run
from tests.kit.model_zoo import model_zoo
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@clear_cache_before_run()
def test_opt():
sub_registry = model_zoo.get_sub_registry("transformers_opt")
for name, (model_fn, data_gen_fn, _, _, _) in sub_registry.items():
model = model_fn()
trace_model_and_compare_output(model, data_gen_fn, ignore_data=["labels", "start_positions", "end_positions"])
if __name__ == "__main__":
test_opt()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_torchrec_model/test_deepfm_model.py | tests/test_fx/test_tracer/test_torchrec_model/test_deepfm_model.py | import torch
from colossalai._analyzer.fx import symbolic_trace
from colossalai.testing import clear_cache_before_run
from tests.kit.model_zoo import model_zoo
BATCH = 2
SHAPE = 10
def trace_and_compare(model_cls, data, output_transform_fn, meta_args=None):
# trace
model = model_cls()
# convert to eval for inference
# it is important to set it to eval mode before tracing
# without this statement, the torch.nn.functional.batch_norm will always be in training mode
model.eval()
gm = symbolic_trace(model, meta_args=meta_args)
gm.eval()
# run forward
with torch.no_grad():
fx_out = gm(**data)
non_fx_out = model(**data)
# compare output
transformed_fx_out = output_transform_fn(fx_out)
transformed_non_fx_out = output_transform_fn(non_fx_out)
assert len(transformed_fx_out) == len(transformed_non_fx_out)
if torch.is_tensor(fx_out):
assert torch.allclose(
fx_out, non_fx_out
), f"{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}"
else:
assert torch.allclose(
fx_out.values(), non_fx_out.values()
), f"{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}"
for key in transformed_fx_out.keys():
fx_output_val = transformed_fx_out[key]
non_fx_output_val = transformed_non_fx_out[key]
if torch.is_tensor(fx_output_val):
assert torch.allclose(
fx_output_val, non_fx_output_val, atol=1e-5
), f"{model.__class__.__name__} has inconsistent outputs, {fx_output_val} vs {non_fx_output_val}"
else:
assert torch.allclose(
fx_output_val.values(), non_fx_output_val.values()
), f"{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}"
@clear_cache_before_run()
def test_torchrec_deepfm_models():
deepfm_models = model_zoo.get_sub_registry(keyword="deepfm", allow_empty=True)
torch.backends.cudnn.deterministic = True
for name, (model_fn, data_gen_fn, output_transform_fn, _, attribute) in deepfm_models.items():
data = data_gen_fn()
if attribute is not None and attribute.has_control_flow:
meta_args = {k: v.to("meta") for k, v in data.items()}
else:
meta_args = None
trace_and_compare(model_fn, data, output_transform_fn, meta_args)
if __name__ == "__main__":
test_torchrec_deepfm_models()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_torchrec_model/test_dlrm_model.py | tests/test_fx/test_tracer/test_torchrec_model/test_dlrm_model.py | import torch
from colossalai._analyzer.fx import symbolic_trace
from colossalai.testing import clear_cache_before_run
from tests.kit.model_zoo import model_zoo
BATCH = 2
SHAPE = 10
def trace_and_compare(model_cls, data, output_transform_fn, meta_args=None):
# trace
model = model_cls()
# convert to eval for inference
# it is important to set it to eval mode before tracing
# without this statement, the torch.nn.functional.batch_norm will always be in training mode
model.eval()
gm = symbolic_trace(model, meta_args=meta_args)
gm.eval()
# run forward
with torch.no_grad():
fx_out = gm(**data)
non_fx_out = model(**data)
# compare output
transformed_fx_out = output_transform_fn(fx_out)
transformed_non_fx_out = output_transform_fn(non_fx_out)
assert len(transformed_fx_out) == len(transformed_non_fx_out)
if torch.is_tensor(fx_out):
assert torch.allclose(
fx_out, non_fx_out
), f"{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}"
else:
assert torch.allclose(
fx_out.values(), non_fx_out.values()
), f"{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}"
for key in transformed_fx_out.keys():
fx_output_val = transformed_fx_out[key]
non_fx_output_val = transformed_non_fx_out[key]
if torch.is_tensor(fx_output_val):
assert torch.allclose(
fx_output_val, non_fx_output_val, atol=1e-5
), f"{model.__class__.__name__} has inconsistent outputs, {fx_output_val} vs {non_fx_output_val}"
else:
assert torch.allclose(
fx_output_val.values(), non_fx_output_val.values()
), f"{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}"
@clear_cache_before_run()
def test_torchrec_dlrm_models():
torch.backends.cudnn.deterministic = True
dlrm_models = model_zoo.get_sub_registry(keyword="deepfm", allow_empty=True)
for name, (model_fn, data_gen_fn, output_transform_fn, _, attribute) in dlrm_models.items():
data = data_gen_fn()
# dlrm_interactionarch is not supported
# TODO(FrankLeeeee): support this model
if name == "dlrm_interactionarch":
continue
if attribute is not None and attribute.has_control_flow:
meta_args = {k: v.to("meta") for k, v in data.items()}
else:
meta_args = None
trace_and_compare(model_fn, data, output_transform_fn, meta_args)
if __name__ == "__main__":
test_torchrec_dlrm_models()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_torchaudio_model/test_torchaudio_model.py | tests/test_fx/test_tracer/test_torchaudio_model/test_torchaudio_model.py | import pytest
import torch
from torchaudio_utils import trace_and_compare
from colossalai.testing import clear_cache_before_run
from tests.kit.model_zoo import model_zoo
# We cannot handle the tensors constructed with constant during forward, such as ``torch.empty(0).to(device=Proxy.device)``
# TODO: We could handle this case by hijacking torch.Tensor.to function.
@pytest.mark.skip
@clear_cache_before_run()
def test_torchaudio_models():
torch.backends.cudnn.deterministic = True
sub_model_zoo = model_zoo.get_sub_registry("torchaudio")
for name, (model_fn, data_gen_fn, output_transform_fn, _, _, attribute) in sub_model_zoo.items():
model = model_fn()
trace_and_compare(
model, data_gen_fn, output_transform_fn, need_meta=(attribute is not None and attribute.has_control_flow)
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_tracer/test_torchaudio_model/torchaudio_utils.py | tests/test_fx/test_tracer/test_torchaudio_model/torchaudio_utils.py | import torch
from colossalai._analyzer.fx import symbolic_trace
def trace_and_compare(model, data_gen, output_transform_fn, need_meta=False, need_concrete=False):
data = data_gen()
concrete_args = data if need_concrete else {}
meta_args = {k: v.to("meta") for k, v in data.items()} if need_meta else {}
model.eval()
gm = symbolic_trace(model, concrete_args=concrete_args, meta_args=meta_args)
with torch.no_grad():
non_fx_out = model(**data)
fx_out = gm(**data)
# compare output
transformed_fx_out = output_transform_fn(fx_out)
transformed_non_fx_out = output_transform_fn(non_fx_out)
assert len(transformed_fx_out) == len(transformed_non_fx_out)
for key, fx_output_val in transformed_fx_out.items():
non_fx_output_val = transformed_non_fx_out[key]
assert torch.allclose(
fx_output_val, non_fx_output_val, atol=1e-5
), f"{model.__class__.__name__} has inconsistent outputs, {fx_output_val} vs {non_fx_output_val}"
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_meta/test_backward.py | tests/test_fx/test_meta/test_backward.py | import pytest
import timm.models as tmm
import torch
import torchvision.models as tm
from colossalai.fx._compatibility import is_compatible_with_meta
if is_compatible_with_meta():
from colossalai.fx.profiler import MetaTensor
from colossalai.testing import clear_cache_before_run
tm_models = [
tm.vgg11,
tm.resnet18,
tm.densenet121,
tm.mobilenet_v3_small,
tm.resnext50_32x4d,
tm.wide_resnet50_2,
tm.regnet_x_16gf,
tm.mnasnet0_5,
tm.efficientnet_b0,
]
tmm_models = [
tmm.resnest.resnest50d,
tmm.beit.beit_base_patch16_224,
tmm.cait.cait_s24_224,
tmm.efficientnet.efficientnetv2_m,
tmm.resmlp_12_224,
tmm.vision_transformer.vit_base_patch16_224,
tmm.deit_base_distilled_patch16_224,
tmm.convnext.convnext_base,
tmm.vgg.vgg11,
tmm.dpn.dpn68,
tmm.densenet.densenet121,
tmm.rexnet.rexnet_100,
tmm.swin_transformer.swin_base_patch4_window7_224,
]
@pytest.mark.skipif(not is_compatible_with_meta(), reason="torch version is lower than 1.12.0")
@clear_cache_before_run()
def test_torchvision_models():
for m in tm_models:
model = m()
data = torch.rand(100000, 3, 224, 224, device="meta")
model(MetaTensor(data, fake_device=torch.device("cpu"))).sum().backward()
@pytest.mark.skipif(not is_compatible_with_meta(), reason="torch version is lower than 1.12.0")
@clear_cache_before_run()
def test_timm_models():
for m in tmm_models:
model = m()
data = torch.rand(100000, 3, 224, 224, device="meta")
model(MetaTensor(data, fake_device=torch.device("cpu"))).sum().backward()
if __name__ == "__main__":
test_torchvision_models()
test_timm_models()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_meta/test_aten.py | tests/test_fx/test_meta/test_aten.py | from typing import Any, Callable, Union
import pytest
import torch
import torch.nn as nn
from colossalai.fx._compatibility import is_compatible_with_meta
from colossalai.testing import clear_cache_before_run
if is_compatible_with_meta():
from colossalai.fx.profiler import MetaTensor
aten = torch.ops.aten
registered_meta = {
("aten.convolution.default", True): [ # (aten ops, requires_backward)
(nn.Conv1d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2), torch.rand(2, 3, 4)),
(nn.Conv2d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2), torch.rand(2, 3, 4, 4)),
(nn.Conv3d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2), torch.rand(2, 3, 4, 4, 4)),
(nn.ConvTranspose1d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2), torch.rand(2, 3, 4)),
(
nn.ConvTranspose2d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2),
torch.rand(2, 3, 4, 4),
),
(
nn.ConvTranspose3d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2),
torch.rand(2, 3, 4, 4, 4),
),
],
("aten.native_batch_norm.default", True): [
(nn.BatchNorm1d(4), torch.rand(2, 4)),
(nn.BatchNorm2d(4), torch.rand(1, 4, 4, 4)),
(nn.BatchNorm3d(4), torch.rand(1, 4, 4, 4, 4)),
],
("aten.native_layer_norm.default", True): [
(nn.LayerNorm(4), torch.rand(1, 2, 3, 4)),
],
("aten.avg_pool1d.default", True): [
(nn.MaxPool1d(3, stride=2), torch.rand(4, 5, 5)),
(nn.AvgPool1d(3, stride=2), torch.rand(4, 5, 5)),
(nn.AdaptiveMaxPool1d(3), torch.rand(4, 5, 5)),
(nn.AdaptiveAvgPool1d(3), torch.rand(4, 5, 5)),
],
("aten.avg_pool2d.default", True): [
(nn.MaxPool2d((3, 2), stride=(2, 1)), torch.rand(2, 4, 5, 5)),
(nn.AvgPool2d((3, 2), stride=(2, 1)), torch.rand(2, 4, 5, 5)),
(nn.AdaptiveMaxPool2d((3, 2)), torch.rand(2, 4, 5, 5)),
(nn.AdaptiveAvgPool2d((3, 2)), torch.rand(2, 4, 5, 5)),
],
("aten.relu.default", True): [
(nn.ReLU(), torch.rand(4, 3, 1, 2)),
(nn.LeakyReLU(), torch.rand(4, 3, 1, 2)),
(nn.SiLU(), torch.rand(4, 3, 1, 2)),
(nn.GELU(), torch.rand(4, 3, 1, 2)),
(nn.ELU(), torch.rand(4, 3, 1, 2)),
(nn.Sigmoid(), torch.rand(4, 3, 1, 2)),
(nn.Tanh(), torch.rand(4, 3, 1, 2)),
(nn.Hardswish(), torch.rand(4, 3, 1, 2)),
],
}
def compare_all(tensor: torch.Tensor, meta_tensor: torch.Tensor) -> Any:
assert (
tensor.shape == meta_tensor.shape
), f"the shape of tensor ({tensor.shape}) and meta tensor ({meta_tensor.shape}) does not match."
assert (
tensor.dtype == meta_tensor.dtype
), f"the dtype of tensor ({tensor.dtype}) and meta tensor ({meta_tensor.dtype}) does not match."
assert (
tensor.stride() == meta_tensor.stride()
), f"the stride of tensor ({tensor.stride()}) and meta tensor ({meta_tensor.stride()}) does not match."
def run_and_compare(f: Union[nn.Module, Callable], x: torch.Tensor, requires_backward=False) -> Any:
x.requires_grad = requires_backward
meta_x = MetaTensor(x)
x_out, meta_out = f(x), f(meta_x)
compare_all(x_out, meta_out)
if requires_backward:
x_out.sum().backward()
meta_out.sum().backward()
compare_all(x.grad, meta_x.grad)
@pytest.mark.skipif(not is_compatible_with_meta(), reason="torch version is lower than 1.12.0")
@clear_cache_before_run()
def test_meta_aten():
for (aten_op, requires_backward), v in registered_meta.items():
for f, x in v:
run_and_compare(f, x, requires_backward)
if __name__ == "__main__":
test_meta_aten()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_meta/test_meta_trace.py | tests/test_fx/test_meta/test_meta_trace.py | import pytest
import timm.models as tmm
import torch
import torchvision.models as tm
from colossalai.fx._compatibility import is_compatible_with_meta
if is_compatible_with_meta():
from colossalai.fx import meta_trace
from colossalai.testing import clear_cache_before_run
tm_models = [
tm.vgg11,
tm.resnet18,
tm.densenet121,
tm.mobilenet_v3_small,
tm.resnext50_32x4d,
tm.wide_resnet50_2,
tm.regnet_x_16gf,
tm.mnasnet0_5,
tm.efficientnet_b0,
]
tmm_models = [
tmm.resnest.resnest50d,
tmm.beit.beit_base_patch16_224,
tmm.cait.cait_s24_224,
tmm.efficientnet.efficientnetv2_m,
tmm.resmlp_12_224,
tmm.vision_transformer.vit_base_patch16_224,
tmm.deit_base_distilled_patch16_224,
tmm.convnext.convnext_base,
tmm.vgg.vgg11,
tmm.dpn.dpn68,
tmm.densenet.densenet121,
tmm.rexnet.rexnet_100,
tmm.swin_transformer.swin_base_patch4_window7_224,
]
@pytest.mark.skipif(not is_compatible_with_meta(), reason="torch version is lower than 1.12.0")
@clear_cache_before_run()
def test_torchvision_models_trace():
for m in tm_models:
model = m()
data = torch.rand(1000, 3, 224, 224, device="meta")
meta_trace(model, torch.device("cpu"), data)
@pytest.mark.skipif(not is_compatible_with_meta(), reason="torch version is lower than 1.12.0")
@clear_cache_before_run()
def test_timm_models_trace():
for m in tmm_models:
model = m()
data = torch.rand(1000, 3, 224, 224, device="meta")
meta_trace(model, torch.device("cpu"), data)
if __name__ == "__main__":
test_torchvision_models_trace()
test_timm_models_trace()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_codegen/test_offload_codegen.py | tests/test_fx/test_codegen/test_offload_codegen.py | import copy
import pytest
import torch
from torch.fx import GraphModule
import colossalai
from colossalai.fx import ColoTracer
from colossalai.fx.graph_module import ColoGraphModule
from colossalai.legacy.core import global_context as gpc
from colossalai.testing import rerun_if_address_is_in_use, spawn
try:
from colossalai.fx.codegen import ActivationCheckpointCodeGen
with_codegen = True
except:
# fall back to older pytorch version
from colossalai.fx.codegen import python_code_with_activation_checkpoint
with_codegen = False
class MyNet(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear0 = torch.nn.Linear(4, 4)
self.linear1 = torch.nn.Linear(4, 4)
self.linear2 = torch.nn.Linear(4, 4)
self.linear3 = torch.nn.Linear(4, 4)
self.linear4 = torch.nn.Linear(4, 4)
self.linear5 = torch.nn.Linear(4, 4)
self.linear6 = torch.nn.Linear(4, 4)
def forward(self, x):
x = self.linear0(x)
x = self.linear1(x)
x = self.linear2(x)
x = self.linear3(x)
x = self.linear4(x)
x = self.linear5(x)
x = self.linear6(x)
return x
def _is_all_gradient_close(m: torch.nn.Module, gm: GraphModule) -> bool:
for m_p, gm_p in zip(m.parameters(), gm.parameters()):
if not torch.allclose(m_p.grad, gm_p.grad):
return False
return True
def _test_fwd_and_bwd(model: torch.nn.Module, gm: ColoGraphModule, data: torch.Tensor):
# test forward
non_fx_out = model(data)
fx_out = gm(data)
assert torch.equal(non_fx_out, fx_out), "fx_out doesn't comply with original output"
# test backward
loss0 = non_fx_out.sum()
loss0.backward()
loss1 = fx_out.sum()
loss1.backward()
assert _is_all_gradient_close(model, gm), "gm doesn't have the same gradient as original one"
def _run_offload_codegen(rank, world_size, port):
# launch colossalai to make sure we could execute colossalai.utils.checkpoint currently
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
# build model and input
model = MyNet().cuda()
data = torch.rand(4, 4).cuda()
# trace the module and replace codegen
tracer = ColoTracer(trace_act_ckpt=True)
graph = tracer.trace(model)
codegen = ActivationCheckpointCodeGen()
graph.set_codegen(codegen)
# annotate the activation offload part
# also annotate the activation_checkpoint so we could test both types
# of input offload
for node in graph.nodes:
if node.name == "linear0":
node.meta["activation_offload"] = [0, True, False]
if node.name == "linear1":
node.meta["activation_offload"] = [0, True, False]
if node.name == "linear2":
node.meta["activation_offload"] = [1, True, True]
if node.name == "linear4":
node.meta["activation_offload"] = [2, False, True]
if node.name == "linear5":
node.meta["activation_checkpoint"] = [0]
node.meta["activation_offload"] = True
gm = ColoGraphModule(copy.deepcopy(model), graph)
gm.recompile()
# assert we have all the components
code = graph.python_code("self").src
assert (
"def pack_hook_input(self, x):" in code
and "def unpack_hook(self, packed):" in code
and "def pack_hook_no_input(self, x):" in code
and "setattr(x, 'offload', True)" in code
and "setattr(linear3, 'offload', False)" in code
and "with torch.autograd.graph.saved_tensors_hooks(self.pack_hook_input, self.unpack_hook):" in code
and "with torch.autograd.graph.save_on_cpu(pin_memory=True):" in code
and "with torch.autograd.graph.saved_tensors_hooks(self.pack_hook_no_input, self.unpack_hook):" in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0, True, linear4, use_reentrant=False)"
in code
)
_test_fwd_and_bwd(model, gm, data)
gpc.destroy()
@pytest.mark.skipif(not with_codegen, reason="torch version is lower than 1.12.0")
@rerun_if_address_is_in_use()
def test_act_ckpt_codegen():
spawn(_run_offload_codegen, 1)
def _run_offload_codegen_torch11(rank, world_size, port):
# launch colossalai to make sure we could execute colossalai.utils.checkpoint currently
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
# build model and input
model = MyNet().cuda()
data = torch.rand(4, 4).cuda()
# trace the module and replace codegen
tracer = ColoTracer(trace_act_ckpt=True)
graph = tracer.trace(model)
# replace a bound method of an object
graph._python_code = python_code_with_activation_checkpoint.__get__(graph)
# annotate the activation offload part
# also annotate the activation_checkpoint so we could test both types
# of input offload
for node in graph.nodes:
if node.name == "linear0":
node.meta["activation_offload"] = [0, True, False]
if node.name == "linear1":
node.meta["activation_offload"] = [0, True, False]
if node.name == "linear2":
node.meta["activation_offload"] = [1, True, True]
if node.name == "linear4":
node.meta["activation_offload"] = [2, False, True]
if node.name == "linear5":
node.meta["activation_checkpoint"] = [0]
node.meta["activation_offload"] = True
gm = ColoGraphModule(copy.deepcopy(model), graph)
gm.recompile()
# assert we have all the components
code = graph.python_code("self").src
assert (
"def pack_hook_input(self, x):" in code
and "def unpack_hook(self, packed):" in code
and "def pack_hook_no_input(self, x):" in code
and "setattr(x, 'offload', True)" in code
and "setattr(linear3, 'offload', False)" in code
and "with torch.autograd.graph.saved_tensors_hooks(self.pack_hook_input, self.unpack_hook):" in code
and "with torch.autograd.graph.save_on_cpu(pin_memory=True):" in code
and "with torch.autograd.graph.saved_tensors_hooks(self.pack_hook_no_input, self.unpack_hook):" in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0, True, linear4, use_reentrant=False)"
in code
)
_test_fwd_and_bwd(model, gm, data)
gpc.destroy()
@pytest.mark.skip(reason="currently torch11 ColoGraphModule is not implemented")
@rerun_if_address_is_in_use()
def test_act_ckpt_python_code_torch11():
spawn(_run_offload_codegen_torch11, 1)
if __name__ == "__main__":
_run_offload_codegen(0)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_codegen/test_nested_activation_checkpoint_codegen.py | tests/test_fx/test_codegen/test_nested_activation_checkpoint_codegen.py | import pytest
import torch
import colossalai
from colossalai.fx import ColoTracer
from colossalai.fx.graph_module import ColoGraphModule
from colossalai.legacy.core import global_context as gpc
from colossalai.testing import rerun_if_address_is_in_use, spawn
try:
from colossalai.fx.codegen import ActivationCheckpointCodeGen
with_codegen = True
except:
# fall back to older pytorch version
with_codegen = False
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(4, 4)
self.linear2 = torch.nn.Linear(4, 4)
self.linear3 = torch.nn.Linear(4, 4)
self.linear4 = torch.nn.Linear(4, 4)
self.linear5 = torch.nn.Linear(4, 4)
self.linear6 = torch.nn.Linear(4, 4)
def forward(self, x):
return self.linear6(self.linear5(self.linear4(self.linear3(self.linear2(self.linear1(x))))))
def _run_act_ckpt_codegen(rank, world_size, port):
# launch colossalai to make sure we could execute colossalai.utils.checkpoint currently
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
# build model and run forward
model = MyModule()
data1 = torch.rand(4, 4)
# copy model to cuda
model = model.to(device="cuda")
data1 = data1.to(device="cuda")
non_fx_out = model(data1)
# trace the module and replace codegen
tracer = ColoTracer(trace_act_ckpt=True)
graph = tracer.trace(model)
codegen = ActivationCheckpointCodeGen()
graph.set_codegen(codegen)
# annotate nested checkpoint
for node in graph.nodes:
if node.name == "linear1":
node.meta["activation_checkpoint"] = [0, 0, 0]
continue
if node.name == "linear2":
node.meta["activation_checkpoint"] = [0, 0, None]
if node.name == "linear3":
node.meta["activation_checkpoint"] = [0, 0, 1]
if node.name == "linear4":
node.meta["activation_checkpoint"] = [0, 1, None]
if node.name == "linear5":
node.meta["activation_checkpoint"] = 1
gm = ColoGraphModule(model, graph)
gm.recompile()
# assert checkpoint function will be generated and
code = graph.python_code("self").src
assert (
"colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_0, False, x, use_reentrant=False)" in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_1, False, linear3, use_reentrant=False)"
in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_0_0, False, x, use_reentrant=False)"
in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_0_1, False, linear2, use_reentrant=False)"
in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0, False, x, use_reentrant=False)"
in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_1, False, linear4, use_reentrant=False)"
in code
)
# recompile and verify the outputs are consistent
fx_out = gm(data1)
assert torch.equal(non_fx_out, fx_out)
gpc.destroy()
@pytest.mark.skipif(not with_codegen, reason="torch version is lower than 1.12.0")
def test_act_ckpt_codegen():
spawn(_run_act_ckpt_codegen, 1)
def _run_act_ckpt_python_code_torch11(rank, world_size, port):
# launch colossalai to make sure we could execute colossalai.utils.checkpoint currently
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
# build model and run forward
model = MyModule()
data1 = torch.rand(4, 4)
# copy model to cuda
model = model.to(device="cuda")
data1 = data1.to(device="cuda")
non_fx_out = model(data1)
# trace the module and replace codegen
tracer = ColoTracer(trace_act_ckpt=True)
graph = tracer.trace(model)
codegen = ActivationCheckpointCodeGen()
graph.set_codegen(codegen)
# annotate nested checkpoint
for node in graph.nodes:
if node.name == "linear1":
node.meta["activation_checkpoint"] = [0, 0, 0]
continue
if node.name == "linear2":
node.meta["activation_checkpoint"] = [0, 0, None]
if node.name == "linear3":
node.meta["activation_checkpoint"] = [0, 0, 1]
if node.name == "linear4":
node.meta["activation_checkpoint"] = [0, 1, None]
if node.name == "linear5":
node.meta["activation_checkpoint"] = 1
gm = ColoGraphModule(model, graph)
gm.recompile()
# assert checkpoint function will be generated and
code = graph.python_code("self").src
assert (
"colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_0, False, x, use_reentrant=False)" in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_1, False, linear3, use_reentrant=False)"
in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_0_0, False, x, use_reentrant=False)"
in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_0_1, False, linear2, use_reentrant=False)"
in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0, False, x, use_reentrant=False)"
in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_1, False, linear4, use_reentrant=False)"
in code
)
# recompile and verify the outputs are consistent
fx_out = gm(data1)
assert torch.equal(non_fx_out, fx_out)
gpc.destroy()
@pytest.mark.skipif(with_codegen, reason="torch version is equal to or higher than 1.12.0")
@pytest.mark.skip(reason="currently torch11 ColoGraphModule is not done")
@rerun_if_address_is_in_use()
def test_act_ckpt_python_code_torch11():
spawn(_run_act_ckpt_python_code_torch11, 1)
if __name__ == "__main__":
_run_act_ckpt_codegen(rank=0)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_codegen/test_activation_checkpoint_codegen.py | tests/test_fx/test_codegen/test_activation_checkpoint_codegen.py | import pytest
import torch
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
import colossalai
from colossalai.fx import ColoTracer
from colossalai.fx.graph_module import ColoGraphModule
from colossalai.legacy.core import global_context as gpc
from colossalai.testing import rerun_if_address_is_in_use, spawn
try:
from colossalai.fx.codegen import ActivationCheckpointCodeGen
with_codegen = True
except:
# fall back to older pytorch version
from colossalai.fx.codegen import python_code_with_activation_checkpoint
with_codegen = False
class MLP(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(4, 4)
self.linear2 = torch.nn.Linear(4, 4)
def forward(self, x):
return self.linear1(x), self.linear2(x)
class relu(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.relu = torch.nn.ReLU(inplace=True)
def forward(self, x):
return self.relu(x)
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.mlp1 = MLP()
self.relu = relu()
self.linear2 = torch.nn.Linear(4, 4)
def ckpt2(self, x):
return F.relu(x, inplace=True)
def ckpt3(self, x, y):
return self.linear2(x) + self.linear2(y)
def forward(self, x, y):
y1, y2 = checkpoint(self.mlp1, x)
y3 = checkpoint(self.relu, x)
y4 = checkpoint(self.ckpt2, y)
y5 = checkpoint(self.ckpt3, y, y4)
y6 = self.linear2(y4)
return y1 + y2 + y3 + y4 + y5 + y6
def _run_act_ckpt_codegen(rank, world_size, port):
# launch colossalai to make sure we could execute colossalai.utils.checkpoint currently
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
# build model and run forward
model = MyModule()
data1 = torch.rand(4, 4)
data2 = torch.rand(4, 4)
# copy model to cuda
model = model.to(device="cuda")
data1 = data1.to(device="cuda")
data2 = data2.to(device="cuda")
non_fx_out = model(data1, data2)
# trace the module and replace codegen
tracer = ColoTracer(trace_act_ckpt=True)
graph = tracer.trace(model)
codegen = ActivationCheckpointCodeGen()
graph.set_codegen(codegen)
# check ops are annotated with ckpt
# also annotate the selected node for offloading
ckpt_nodes = ["mlp1_linear1", "mlp1_linear2", "relu_relu", "relu"]
offload_starts = ["mlp1_linear1"]
for node in graph.nodes:
if node.name in ckpt_nodes:
assert "activation_checkpoint" in node.meta
# annotate the selected node for offload
if node.name in offload_starts:
node.meta["activation_offload"] = True
gm = ColoGraphModule(model, graph)
gm.recompile()
# assert checkpoint function will be generated and
# the offload option is correct
code = graph.python_code("self").src
assert (
"colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0, True, x, use_reentrant=False)" in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_1, False, x, use_reentrant=False)"
in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_2, False, y, use_reentrant=False)"
in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_3, False, y, relu, use_reentrant=True)"
in code
)
# recompile and verify the outputs are consistent
fx_out = gm(data1, data2)
assert torch.equal(non_fx_out, fx_out)
gpc.destroy()
@pytest.mark.skipif(not with_codegen, reason="torch version is lower than 1.12.0")
@rerun_if_address_is_in_use()
def test_act_ckpt_codegen():
spawn(_run_act_ckpt_codegen, 1)
def _run_act_ckpt_python_code_torch11(rank, world_size, port):
# launch colossalai to make sure we could execute colossalai.utils.checkpoint currently
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
# build model and run forward
model = MyModule()
data1 = torch.rand(4, 4)
data2 = torch.rand(4, 4)
# copy model to cuda
data1 = data1.to(device="cuda")
data2 = data2.to(device="cuda")
non_fx_out = model(data1, data2)
# trace the module and replace codegen
tracer = ColoTracer(trace_act_ckpt=True)
graph = tracer.trace(model)
# replace a bound method of an object
graph._python_code = python_code_with_activation_checkpoint.__get__(graph)
# check ops are annotated with ckpt
ckpt_nodes = ["mlp1_linear1", "mlp1_linear2", "relu_relu", "relu"]
offload_starts = ["mlp1_linear1"]
for node in graph.nodes:
if node.name in ckpt_nodes:
assert "activation_checkpoint" in node.meta
# annotate the selected node for offload
if node.name in offload_starts:
node.meta["activation_offload"] = True
gm = ColoGraphModule(model, graph)
gm.recompile()
# assert checkpoint function will be generated and
# the offload option is correct
code = graph.python_code("self").src
assert (
"colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0, True, x, use_reentrant=False)" in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_1, False, x, use_reentrant=False)"
in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_2, False, y, use_reentrant=False)"
in code
and "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_3, False, y, relu, use_reentrant=True)"
in code
)
# recompile and verify the outputs are consistent
fx_out = gm(data1, data2)
assert torch.equal(non_fx_out, fx_out)
gpc.destroy()
@pytest.mark.skipif(with_codegen, reason="torch version is equal to or higher than 1.12.0")
@pytest.mark.skip(reason="currently torch11 ColoGraphModule is not done")
@rerun_if_address_is_in_use()
def test_act_ckpt_python_code_torch11():
spawn(_run_act_ckpt_python_code_torch11, 1)
if __name__ == "__main__":
_run_act_ckpt_codegen(rank=0)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_lazy/lazy_init_utils.py | tests/test_lazy/lazy_init_utils.py | import random
from copy import deepcopy
from typing import Any, Callable, Optional, Tuple
import numpy as np
import torch
from packaging import version
from colossalai.device.device_mesh import DeviceMesh
from colossalai.lazy.lazy_init import LazyInitContext, LazyTensor, _MyTensor
from colossalai.tensor.d_tensor import to_global
from colossalai.tensor.d_tensor.layout import Layout
from tests.kit.model_zoo.registry import ModelAttribute
SUPPORT_LAZY = version.parse(torch.__version__) >= version.parse("1.12.0")
# model_fn, data_gen_fn, output_transform_fn, model_attr
TestingEntry = Tuple[Callable[[], torch.nn.Module], Callable[[], dict], Callable[[], dict], Optional[ModelAttribute]]
def set_seed(seed: int) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def assert_model_equal(m1: torch.nn.Module, m2: torch.nn.Module) -> None:
s1 = m1.state_dict()
s2 = m2.state_dict()
assert len(s1) == len(s2), f"len {len(s1)} vs {len(s2)}"
for (n1, t1), (n2, t2) in zip(s1.items(), s2.items()):
assert n1 == n2
assert torch.equal(t1, t2), f"{n1} {t1} vs {t2}"
for p1, p2 in zip(m1.parameters(), m2.parameters()):
assert p1.requires_grad == p2.requires_grad
def assert_forward_equal(
m1: torch.nn.Module,
m2: torch.nn.Module,
data_gen_fn: Callable[[], dict],
output_transform_fn: Callable[[Any], dict],
) -> None:
data = data_gen_fn()
m1.eval()
m2.eval()
# run forward
with torch.no_grad():
outputs1 = m1(**data)
outputs2 = m2(**data)
# compare output
transformed_out1 = output_transform_fn(outputs1)
transformed_out2 = output_transform_fn(outputs2)
assert len(transformed_out1) == len(transformed_out2)
for key, out1 in transformed_out1.items():
out2 = transformed_out2[key]
assert torch.allclose(
out1, out2, atol=1e-5
), f"{m1.__class__.__name__} has inconsistent outputs, {out1} vs {out2}"
def check_lazy_init(
entry: TestingEntry, seed: int = 42, verbose: bool = False, check_forward: bool = False, default_device: str = "cpu"
) -> None:
model_fn, data_gen_fn, output_transform_fn, _, model_attr = entry
_MyTensor._pre_op_fn = lambda *args: set_seed(seed)
LazyTensor._pre_op_fn = lambda *args: set_seed(seed)
ctx = LazyInitContext(tensor_cls=_MyTensor, default_device=default_device)
with ctx:
model = model_fn()
ctx = LazyInitContext(default_device=default_device)
with ctx:
deferred_model = model_fn()
copied_deferred_model = deepcopy(deferred_model)
deferred_model = ctx.materialize(deferred_model, verbose=verbose)
copied_deferred_model = ctx.materialize(copied_deferred_model, verbose=verbose)
assert_model_equal(model, deferred_model)
assert_model_equal(deferred_model, copied_deferred_model)
if check_forward:
assert_forward_equal(model, deferred_model, data_gen_fn, output_transform_fn)
assert_forward_equal(deferred_model, copied_deferred_model, data_gen_fn, output_transform_fn)
if verbose:
print(f"{model.__class__.__name__} pass")
def assert_dist_model_equal(
model: torch.nn.Module, distributed_model: torch.nn.Module, device_mesh: DeviceMesh, sharding_spec_dict: dict
) -> None:
state = model.state_dict()
distributed_state = distributed_model.state_dict()
assert len(state) == len(distributed_state), f"len {len(state)} vs {len(distributed_state)}"
for (n1, t1), (n2, t2) in zip(state.items(), distributed_state.items()):
assert n1 == n2
t1 = t1.cuda()
t2 = t2.cuda()
if n2 in sharding_spec_dict:
layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec_dict[n2], global_shape=t1.shape)
t2.dist_layout = layout
t2 = to_global(t2)
assert torch.equal(t1, t2), f"{n1} {t1} vs {t2}"
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_lazy/test_models.py | tests/test_lazy/test_models.py | import pytest
from lazy_init_utils import SUPPORT_LAZY, check_lazy_init
from tests.kit.model_zoo import COMMON_MODELS, IS_FAST_TEST, model_zoo
@pytest.mark.skipif(not SUPPORT_LAZY, reason="requires torch >= 1.12.0")
@pytest.mark.parametrize(
"subset",
(
[COMMON_MODELS]
if IS_FAST_TEST
else ["torchvision", "diffusers", "timm", "transformers", "torchaudio", "deepfm", "dlrm"]
),
)
@pytest.mark.parametrize("default_device", ["cpu", "cuda"])
def test_models_lazy_init(subset, default_device):
sub_model_zoo = model_zoo.get_sub_registry(subset, allow_empty=True)
for name, entry in sub_model_zoo.items():
# TODO(ver217): lazy init does not support weight norm, skip these models
if name in (
"torchaudio_wav2vec2_base",
"torchaudio_hubert_base",
"timm_beit",
"timm_vision_transformer",
"timm_deit",
"timm_beitv2",
"timm_deit3",
"timm_convit",
"timm_tnt_b_patch16_224",
) or name.startswith(
("transformers_vit", "transformers_blip2", "transformers_whisper", "transformers_deepseek")
):
continue
check_lazy_init(entry, verbose=True, default_device=default_device)
if __name__ == "__main__":
test_models_lazy_init("transformers", "cpu")
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_lazy/test_ops.py | tests/test_lazy/test_ops.py | import copy
import pytest
import torch
import torch.nn as nn
from lazy_init_utils import SUPPORT_LAZY
from torch.nn import Parameter
from colossalai.lazy import LazyInitContext
@pytest.mark.skipif(not SUPPORT_LAZY, reason="requires torch >= 1.12.0")
def test_lazy_ops():
with LazyInitContext():
x = torch.rand(2, 3)
assert tuple(x.shape) == (2, 3)
assert x.device.type == "cpu"
x.requires_grad is False
y = x.cuda()
assert tuple(y.shape) == (2, 3)
assert y.device.type == "cuda"
assert y.requires_grad is False
assert x.cpu() is x
p = Parameter(torch.empty(2, 3))
assert tuple(p.shape) == (2, 3)
assert p.device.type == "cpu"
assert p.requires_grad is True
assert isinstance(p, Parameter)
x.materialize()
assert tuple(x.shape) == (2, 3)
assert x.device.type == "cpu"
assert x.requires_grad is False
y.materialize()
assert tuple(y.shape) == (2, 3)
assert y.device.type == "cuda"
assert y.requires_grad is False
p.materialize()
assert tuple(p.shape) == (2, 3)
assert p.device.type == "cpu"
assert p.requires_grad is True
assert isinstance(p, Parameter)
with LazyInitContext():
x = torch.empty(2, 3)
x.uniform_()
x.materialize()
assert tuple(x.shape) == (2, 3)
with LazyInitContext():
model = nn.Linear(3, 4)
model = model.cuda()
model_copied = copy.deepcopy(model)
LazyInitContext.materialize(model)
assert model.weight.device.type == "cuda"
assert model.bias.device.type == "cuda"
LazyInitContext.materialize(model_copied)
assert model_copied.weight.device.type == "cuda"
assert model_copied.bias.device.type == "cuda"
assert torch.equal(model.weight, model_copied.weight)
assert torch.equal(model.bias, model_copied.bias)
if __name__ == "__main__":
test_lazy_ops()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_lazy/test_from_pretrained.py | tests/test_lazy/test_from_pretrained.py | import os
from transformers import BertForPreTraining, LlamaForCausalLM
import colossalai.interface.pretrained as pretrained_utils
from colossalai.lazy import LazyInitContext
def test_lazy_from_pretrained():
# test from cached file, unsharded
model = BertForPreTraining.from_pretrained("prajjwal1/bert-tiny")
with LazyInitContext():
deffered_model = BertForPreTraining.from_pretrained("prajjwal1/bert-tiny")
pretrained_path = pretrained_utils.get_pretrained_path(deffered_model)
assert os.path.isfile(pretrained_path)
for p, lazy_p in zip(model.parameters(), deffered_model.parameters()):
assert p.shape == lazy_p.shape
# test from local file, sharded
llama_path = os.environ["LLAMA_PATH"]
model = LlamaForCausalLM.from_pretrained(llama_path)
with LazyInitContext():
deffered_model = LlamaForCausalLM.from_pretrained(llama_path)
pretrained_path = pretrained_utils.get_pretrained_path(deffered_model)
assert os.path.isfile(pretrained_path)
for p, lazy_p in zip(model.parameters(), deffered_model.parameters()):
assert p.shape == lazy_p.shape
if __name__ == "__main__":
test_lazy_from_pretrained()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_checkpoint_io/test_safetensors_async_io.py | tests/test_checkpoint_io/test_safetensors_async_io.py | import tempfile
import pytest
import torch
from safetensors.torch import load_file
from colossalai.checkpoint_io.utils import create_pinned_state_dict
from colossalai.testing import check_state_dict_equal, clear_cache_before_run
from colossalai.utils import get_current_device
from colossalai.utils.safetensors import load_flat, move_and_save, save, save_nested
def gen_optim_state_dict():
return {
"state": {
0: {
"step": torch.tensor(1.0),
"exp_avg": torch.rand((1024, 1024)),
"exp_avg_sq": torch.rand((1024, 1024)),
},
1: {
"step": torch.tensor(1.0),
"exp_avg": torch.rand((1024, 1024)),
"exp_avg_sq": torch.rand((1024, 1024)),
},
2: {
"step": torch.tensor(1.0),
"exp_avg": torch.rand((1024, 1024)),
"exp_avg_sq": torch.rand((1024, 1024)),
},
},
"param_groups": [
{
"lr": 0.001,
"betas": (0.9, 0.999),
"eps": 1e-08,
"weight_decay": 0,
"bias_correction": True,
"params": [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
],
}
],
}
def gen_model_state_dict():
return {
"module.weight0": torch.rand((1024, 1024)),
"module.weight1": torch.rand((1024, 1024)),
"module.weight2": torch.rand((1024, 1024)),
}
@pytest.mark.parametrize("empty", [True, False])
@pytest.mark.parametrize("num_threads", [1, 4])
def test_create_pin(empty: bool, num_threads: int):
model_state_dict = gen_model_state_dict()
model_state_dict_pinned = create_pinned_state_dict(model_state_dict, empty=empty, num_threads=num_threads)
for k in model_state_dict.keys():
assert model_state_dict_pinned[k].is_pinned()
if not empty:
assert torch.equal(model_state_dict_pinned[k], model_state_dict[k])
optim_state_dict = gen_optim_state_dict()
optim_state_dict_pinned = create_pinned_state_dict(optim_state_dict, empty=empty, num_threads=num_threads)
for k in optim_state_dict.keys():
if k == "state":
for idx in optim_state_dict[k].keys():
for kk in optim_state_dict[k][idx].keys():
assert optim_state_dict_pinned[k][idx][kk].is_pinned()
if not empty:
assert torch.equal(optim_state_dict_pinned[k][idx][kk], optim_state_dict[k][idx][kk])
else:
assert optim_state_dict[k] == optim_state_dict_pinned[k]
@clear_cache_before_run()
def test_save_load():
with tempfile.TemporaryDirectory() as tempdir:
optimizer_state_dict = gen_optim_state_dict()
optimizer_saved_path = f"{tempdir}/save_optimizer.safetensors"
f_writer = save_nested(optimizer_saved_path, optimizer_state_dict)
f_writer.sync_before_step()
f_writer.synchronize()
del f_writer
load_state_dict = load_flat(optimizer_saved_path)
check_state_dict_equal(load_state_dict, optimizer_state_dict)
optimizer_shard_saved_path = f"{tempdir}/save_optimizer_shard.safetensors"
f_writer = save_nested(optimizer_shard_saved_path, optimizer_state_dict["state"])
f_writer.sync_before_step()
f_writer.synchronize()
del f_writer
load_state_dict_shard = load_flat(optimizer_shard_saved_path)
check_state_dict_equal(load_state_dict_shard, optimizer_state_dict["state"])
model_state_dict = gen_model_state_dict()
model_saved_path = f"{tempdir}/save_model.safetensors"
f_writer = save(model_saved_path, model_state_dict)
f_writer.sync_before_step()
f_writer.synchronize()
del f_writer
load_state_dict = load_file(model_saved_path)
check_state_dict_equal(model_state_dict, load_state_dict)
model_state_dict_cuda = {k: v.to(get_current_device()) for k, v in model_state_dict.items()}
model_state_pinned = {k: v.pin_memory() for k, v in model_state_dict.items()}
model_saved_path = f"{tempdir}/save_model_cuda.safetensors"
f_writer = move_and_save(model_saved_path, model_state_dict_cuda, model_state_pinned)
f_writer.sync_before_step()
f_writer.synchronize()
del f_writer
load_state_dict = load_file(model_saved_path)
check_state_dict_equal(model_state_dict, load_state_dict)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_checkpoint_io/test_general_checkpoint_io.py | tests/test_checkpoint_io/test_general_checkpoint_io.py | import tempfile
import pytest
import torch
from torch.optim import Adam
from torchvision.models import resnet18
from colossalai.checkpoint_io import GeneralCheckpointIO
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
from colossalai.testing import check_state_dict_equal, clear_cache_before_run, parameterize
# ========
# Note:
# 1. due to checkpoint IO can be quite slow if tested with all models, we will only test on resnet for now
# 2. we will test on both sharded and unsharded checkpoints
# 3. implement sharded checkpoint and test it
# ========
@clear_cache_before_run()
@parameterize("use_safetensors", [True, False])
@parameterize("use_async", [False, True])
def test_unsharded_checkpoint(use_safetensors: bool, use_async: bool):
# create a model and optimizer
model = resnet18()
optimizer = Adam(model.parameters(), lr=0.001)
lr_scheduler = CosineAnnealingWarmupLR(optimizer, total_steps=10)
# create test data sample
x = torch.randn(1, 3, 224, 224)
# run fwd and bwd
y = model(x)
loss = y.sum()
loss.backward()
optimizer.step()
lr_scheduler.step()
# create a temp file for checkpoint
if use_async or use_safetensors:
suffix = ".safetensors"
else:
suffix = ".bin"
model_ckpt_tempfile = tempfile.NamedTemporaryFile(suffix=suffix)
if use_async:
optimizer_ckpt_tempfile = tempfile.NamedTemporaryFile(suffix=suffix)
else:
optimizer_ckpt_tempfile = tempfile.NamedTemporaryFile()
lr_scheduler_ckpt_tempfile = tempfile.NamedTemporaryFile()
# save the model, optimizer, lr_scheduler
ckpt_io = GeneralCheckpointIO()
ckpt_io.save_model(model, model_ckpt_tempfile.name, use_safetensors=use_safetensors, use_async=use_async)
ckpt_io.save_optimizer(optimizer, optimizer_ckpt_tempfile.name, use_async=use_async)
ckpt_io.save_lr_scheduler(lr_scheduler, lr_scheduler_ckpt_tempfile.name)
# create new model
new_model = resnet18()
new_optimizer = Adam(new_model.parameters(), lr=0.001)
new_lr_scheduler = CosineAnnealingWarmupLR(optimizer, total_steps=10)
ckpt_io._sync_d2h()
ckpt_io._sync_io()
# load the model, optimizer, lr_scheduler
ckpt_io.load_model(new_model, model_ckpt_tempfile.name)
ckpt_io.load_optimizer(new_optimizer, optimizer_ckpt_tempfile.name)
ckpt_io.load_lr_scheduler(new_lr_scheduler, lr_scheduler_ckpt_tempfile.name)
# check for model and optimizer state dict recursively
check_state_dict_equal(model.state_dict(), new_model.state_dict())
check_state_dict_equal(optimizer.state_dict(), new_optimizer.state_dict())
@pytest.mark.parametrize("use_safetensors", [True, False])
@pytest.mark.parametrize("use_async", [False, True])
def test_sharded_model_checkpoint(use_safetensors: bool, use_async: bool):
# create a model and optimizer
model = resnet18()
optimizer = Adam(model.parameters(), lr=0.001)
# create test data sample
x = torch.randn(1, 3, 224, 224)
# run fwd and bwd
y = model(x)
loss = y.sum()
loss.backward()
optimizer.step()
model_ckpt_dir = tempfile.TemporaryDirectory()
optimizer_ckpt_tempfile = tempfile.NamedTemporaryFile()
# save the model and optimizer
ckpt_io = GeneralCheckpointIO()
ckpt_io.save_model(
model, model_ckpt_dir.name, True, True, "", 10, use_safetensors=use_safetensors, use_async=use_async
)
ckpt_io.save_optimizer(optimizer, optimizer_ckpt_tempfile.name, shard=False)
ckpt_io._sync_d2h()
ckpt_io._sync_io()
# create new model
new_model = resnet18()
new_optimizer = Adam(new_model.parameters(), lr=0.001)
ckpt_io.load_model(new_model, str(model_ckpt_dir.name), strict=True)
ckpt_io.load_optimizer(new_optimizer, optimizer_ckpt_tempfile.name)
# check for model and optimizer state dict recursively
check_state_dict_equal(model.state_dict(), new_model.state_dict())
check_state_dict_equal(optimizer.state_dict(), new_optimizer.state_dict())
@pytest.mark.parametrize("use_async", [False, True])
def test_sharded_optimizer_checkpoint(use_async: bool):
# create a model and optimizer
model = resnet18()
optimizer = Adam(model.parameters(), lr=0.001)
# create test data sample
x = torch.randn(1, 3, 224, 224)
# run fwd and bwd
y = model(x)
loss = y.sum()
loss.backward()
optimizer.step()
# create temp directories for checkpoint
model_ckpt_dir = tempfile.TemporaryDirectory()
optimizer_ckpt_dir = tempfile.TemporaryDirectory()
# save the model and optimizer
ckpt_io = GeneralCheckpointIO()
ckpt_io.save_model(model, model_ckpt_dir.name, True, True, "", 10, use_safetensors=False)
ckpt_io.save_optimizer(optimizer, optimizer_ckpt_dir.name, shard=True, size_per_shard=10, use_async=use_async)
ckpt_io._sync_d2h()
ckpt_io._sync_io()
# create new model
new_model = resnet18()
new_optimizer = Adam(new_model.parameters(), lr=0.001)
ckpt_io.load_model(new_model, str(model_ckpt_dir.name), strict=True)
ckpt_io.load_optimizer(new_optimizer, str(optimizer_ckpt_dir.name))
# check for model and optimizer state dict recursively
check_state_dict_equal(model.state_dict(), new_model.state_dict())
check_state_dict_equal(optimizer.state_dict(), new_optimizer.state_dict())
# continue running fwd and bwd
for _ in range(5):
y = new_model(x)
loss = y.sum()
loss.backward()
new_optimizer.step()
# create temp directories for checkpoint
model_ckpt_dir = tempfile.TemporaryDirectory()
optimizer_ckpt_dir = tempfile.TemporaryDirectory()
# save the newly got optimizer
ckpt_io.save_model(new_model, model_ckpt_dir.name, True, True, "", 10, use_safetensors=False)
ckpt_io.save_optimizer(new_optimizer, optimizer_ckpt_dir.name, shard=True, size_per_shard=10, use_async=use_async)
ckpt_io._sync_d2h()
ckpt_io._sync_io()
# create another new model
new_new_model = resnet18()
new_new_optimizer = Adam(new_new_model.parameters(), lr=0.001)
ckpt_io.load_model(new_new_model, str(model_ckpt_dir.name), strict=True)
ckpt_io.load_optimizer(new_new_optimizer, str(optimizer_ckpt_dir.name))
# check for model and optimizer state dict recursively
check_state_dict_equal(new_model.state_dict(), new_new_model.state_dict())
check_state_dict_equal(new_optimizer.state_dict(), new_new_optimizer.state_dict())
@pytest.mark.parametrize("use_async", [False, True])
def test_sharded_optimizer_multiple_param_groups(use_async: bool):
# create a model and optimizer
model = resnet18()
optimizer = Adam(
[{"params": model.layer1.parameters()}, {"params": model.layer2.parameters(), "lr": 0.002}], lr=0.001
)
# create test data sample
x = torch.randn(1, 3, 224, 224)
# run fwd and bwd
y = model(x)
loss = y.sum()
loss.backward()
optimizer.step()
# create temp directories for checkpoint
model_ckpt_dir = tempfile.TemporaryDirectory()
optimizer_ckpt_dir = tempfile.TemporaryDirectory()
# save the model and optimizer
ckpt_io = GeneralCheckpointIO()
ckpt_io.save_model(model, model_ckpt_dir.name, True, True, "", 10, use_safetensors=False)
ckpt_io.save_optimizer(optimizer, optimizer_ckpt_dir.name, shard=True, size_per_shard=10, use_async=use_async)
ckpt_io._sync_d2h()
ckpt_io._sync_io()
# create new model
new_model = resnet18()
new_optimizer = Adam(
[{"params": new_model.layer1.parameters()}, {"params": new_model.layer2.parameters(), "lr": 0.002}], lr=0.001
)
ckpt_io.load_model(new_model, str(model_ckpt_dir.name), strict=True)
ckpt_io.load_optimizer(new_optimizer, str(optimizer_ckpt_dir.name))
# check for model and optimizer state dict recursively
check_state_dict_equal(model.state_dict(), new_model.state_dict())
check_state_dict_equal(optimizer.state_dict(), new_optimizer.state_dict())
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_checkpoint_io/test_plugins_huggingface_compatibility.py | tests/test_checkpoint_io/test_plugins_huggingface_compatibility.py | import pytest
import torch
import torch.distributed as dist
from utils import shared_tempdir
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin import GeminiPlugin, LowLevelZeroPlugin, TorchDDPPlugin
from colossalai.nn.optimizer import HybridAdam
from colossalai.testing import (
check_state_dict_equal,
clear_cache_before_run,
parameterize,
rerun_if_address_is_in_use,
spawn,
)
from tests.kit.model_zoo import model_zoo
@clear_cache_before_run()
@parameterize("model_name", ["transformers_llama_for_causal_lm"])
@parameterize("plugin_type", ["ddp", "zero", "gemini"])
def exam_from_pretrained(plugin_type: str, model_name: str, shard=True, size_per_shard=32):
(model_fn, data_gen_fn, output_transform_fn, loss_fn, _) = next(
iter(model_zoo.get_sub_registry(model_name).values())
)
criterion = loss_fn
if plugin_type == "ddp":
plugin = TorchDDPPlugin()
elif plugin_type == "zero":
plugin = LowLevelZeroPlugin(stage=2, max_norm=1.0, initial_scale=32)
elif plugin_type == "gemini":
plugin = GeminiPlugin(precision="fp16", initial_scale=32)
else:
raise ValueError(f"Plugin with type {plugin_type} is invalid, please check your argument.")
booster = Booster(plugin=plugin)
model = model_fn().cuda()
model_huggingface_cls = model.__class__
optimizer = HybridAdam(model.parameters(), lr=0.001)
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
data = data_gen_fn()
data = {k: v.to("cuda") if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v for k, v in data.items()}
output = model(**data)
loss = criterion(output)
booster.backward(loss, optimizer)
optimizer.step()
with shared_tempdir() as tempdir:
model_ckpt_path = f"{tempdir}/model"
booster.save_model(model, model_ckpt_path, shard=shard, size_per_shard=size_per_shard)
dist.barrier()
new_model = model_huggingface_cls.from_pretrained(model_ckpt_path)
new_model = new_model.cuda()
new_optimizer = HybridAdam(new_model.parameters(), lr=0.001)
new_model, new_optimizer, criterion, _, _ = booster.boost(new_model, new_optimizer, criterion)
if plugin_type == "gemini":
check_state_dict_equal(model.state_dict(only_rank_0=False), new_model.state_dict(only_rank_0=False))
else:
check_state_dict_equal(model.unwrap().state_dict(), new_model.unwrap().state_dict())
dist.barrier()
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
exam_from_pretrained()
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [2])
@rerun_if_address_is_in_use()
def test_huggingface_compatibility(world_size):
spawn(run_dist, world_size)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_checkpoint_io/test_gemini_torch_compability.py | tests/test_checkpoint_io/test_gemini_torch_compability.py | import pytest
import torch
import torch.distributed as dist
from torch.optim import Adam
from utils import shared_tempdir
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin import GeminiPlugin, TorchDDPPlugin
from colossalai.nn.optimizer import HybridAdam
from colossalai.testing import (
check_state_dict_equal,
clear_cache_before_run,
parameterize,
rerun_if_address_is_in_use,
spawn,
)
from tests.kit.model_zoo import model_zoo
@clear_cache_before_run()
@parameterize("shard", [False, True])
@parameterize("model_name", ["transformers_llama_for_causal_lm"])
def exam_torch_load_from_gemini(shard: bool, model_name: str):
(model_fn, data_gen_fn, output_transform_fn, _, _) = next(iter(model_zoo.get_sub_registry(model_name).values()))
criterion = lambda x: x.mean()
plugin = GeminiPlugin(precision="fp16", initial_scale=(2**14))
booster = Booster(plugin=plugin)
model = model_fn()
optimizer = HybridAdam(model.parameters(), lr=0.001)
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
data = data_gen_fn()
data = {k: v.to("cuda") if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v for k, v in data.items()}
output = model(**data)
output = output_transform_fn(output)
output_key = list(output.keys())[0]
loss = criterion(output[output_key])
booster.backward(loss, optimizer)
optimizer.step()
with shared_tempdir() as tempdir:
model_ckpt_path = f"{tempdir}/model"
optimizer_ckpt_path = f"{tempdir}/optimizer"
booster.save_model(model, model_ckpt_path, shard=shard)
booster.save_optimizer(optimizer, optimizer_ckpt_path, shard=shard)
dist.barrier()
new_model = model_fn()
new_optimizer = Adam(new_model.parameters(), lr=0.001)
new_plugin = TorchDDPPlugin()
new_booster = Booster(plugin=new_plugin)
new_model, new_optimizer, criterion, _, _ = new_booster.boost(new_model, new_optimizer, criterion)
# Loading HybridAdam states to torch.Adam
new_booster.load_model(new_model, model_ckpt_path, strict=True)
# Add prefix to get aligned with pytorch parameter names.
check_state_dict_equal(
model.state_dict(only_rank_0=False, prefix="module.module."),
new_model.state_dict(),
ignore_device=False,
ignore_dtype=True,
)
new_booster.load_optimizer(new_optimizer, optimizer_ckpt_path)
check_state_dict_equal(optimizer.state_dict(only_rank_0=False), new_optimizer.state_dict(), ignore_device=False)
# Check the new model/optimizer can successfully run.
data = data_gen_fn()
data = {
k: v.to("cuda") if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v for k, v in data.items()
}
output = new_model(**data)
output = output_transform_fn(output)
output_key = list(output.keys())[0]
loss = criterion(output[output_key])
new_booster.backward(loss, new_optimizer)
new_optimizer.step()
new_booster.save_model(new_model, model_ckpt_path, shard=shard)
new_booster.save_optimizer(new_optimizer, optimizer_ckpt_path, shard=shard)
@clear_cache_before_run()
@parameterize("shard", [False, True])
@parameterize("model_name", ["transformers_gpt"])
def exam_gemini_load_from_torch(shard: bool, model_name: str):
(model_fn, data_gen_fn, output_transform_fn, _, _) = next(iter(model_zoo.get_sub_registry(model_name).values()))
criterion = lambda x: x.mean()
plugin = TorchDDPPlugin()
booster = Booster(plugin=plugin)
model = model_fn()
optimizer = Adam(model.parameters(), lr=0.001)
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
data = data_gen_fn()
data = {k: v.to("cuda") if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v for k, v in data.items()}
output = model(**data)
output = output_transform_fn(output)
output_key = list(output.keys())[0]
loss = criterion(output[output_key])
booster.backward(loss, optimizer)
optimizer.step()
with shared_tempdir() as tempdir:
model_ckpt_path = f"{tempdir}/model"
optimizer_ckpt_path = f"{tempdir}/optimizer"
booster.save_model(model, model_ckpt_path, shard=shard)
booster.save_optimizer(optimizer, optimizer_ckpt_path, shard=shard)
dist.barrier()
new_model = model_fn()
new_optimizer = HybridAdam(new_model.parameters(), lr=0.001)
new_plugin = GeminiPlugin()
new_booster = Booster(plugin=new_plugin)
new_model, new_optimizer, criterion, _, _ = new_booster.boost(new_model, new_optimizer, criterion)
# Loading torch.Adam states to HybridAdam
new_booster.load_model(new_model, model_ckpt_path, strict=True)
# Add prefix to get aligned with pytorch parameter names.
check_state_dict_equal(
new_model.state_dict(only_rank_0=False, prefix="module.module."),
model.state_dict(),
ignore_device=False,
ignore_dtype=True,
)
new_booster.load_optimizer(new_optimizer, optimizer_ckpt_path)
old_state_dict = optimizer.state_dict()
new_state_dict = new_optimizer.state_dict(only_rank_0=False)
# Comparison of param_groups needs special care here,
# since not all hyperparameters in Adam are used by HybridAdam
hyperparameters_to_examine = ["params", "lr", "betas", "eps", "weight_decay"]
for old_group, new_group in zip(old_state_dict["param_groups"], new_state_dict["param_groups"]):
for k in hyperparameters_to_examine:
assert (
k in old_group and k in new_group
), f"Old group's keys: {list(old_group.keys())}, New group's keys: {list(new_group.keys())}"
assert old_group[k] == new_group[k]
check_state_dict_equal(old_state_dict["state"], new_state_dict["state"], ignore_device=False)
# Check the new model/optimizer can successfully run.
data = data_gen_fn()
data = {
k: v.to("cuda") if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v for k, v in data.items()
}
output = new_model(**data)
output = output_transform_fn(output)
output_key = list(output.keys())[0]
loss = criterion(output[output_key])
new_booster.backward(loss, new_optimizer)
new_optimizer.step()
new_booster.save_model(new_model, model_ckpt_path, shard=shard)
new_booster.save_optimizer(new_optimizer, optimizer_ckpt_path, shard=shard)
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
exam_torch_load_from_gemini()
exam_gemini_load_from_torch()
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [2])
@rerun_if_address_is_in_use()
def test_gemini_ckpIO(world_size):
spawn(run_dist, world_size)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_checkpoint_io/test_torch_ddp_checkpoint_io.py | tests/test_checkpoint_io/test_torch_ddp_checkpoint_io.py | import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD
from torchvision.models import resnet18
from utils import shared_tempdir
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin import TorchDDPPlugin
from colossalai.interface import OptimizerWrapper
from colossalai.testing import check_state_dict_equal, parameterize, rerun_if_address_is_in_use, spawn
@parameterize("shard", [False, True])
@parameterize("size_per_shard", [16, 128])
@parameterize("use_async", [False, True])
@parameterize("low_cpu_mem_mode", [False, True])
def check_torch_ddp_checkpointIO(shard: bool, size_per_shard: int, use_async: bool, low_cpu_mem_mode: bool):
plugin = TorchDDPPlugin()
booster = Booster(plugin=plugin)
model = resnet18()
criterion = lambda x: x.mean()
optimizer = SGD((model.parameters()), lr=0.001, momentum=0.5)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion, lr_scheduler=scheduler)
assert isinstance(model.module, DDP)
assert isinstance(optimizer, OptimizerWrapper)
x = torch.randn(4, 3, 224, 224)
x = x.to("cuda")
output = model(x)
loss = criterion(output)
booster.backward(loss, optimizer)
optimizer.clip_grad_by_norm(1.0)
optimizer.step()
scheduler.step()
with shared_tempdir() as tempdir:
model_ckpt_path = f"{tempdir}/model"
optimizer_ckpt_path = f"{tempdir}/optimizer"
lr_scheduler_ckpt_path = f"{tempdir}/lr_scheduler"
if not shard and use_async:
model_ckpt_path = f"{model_ckpt_path}.safetensors"
optimizer_ckpt_path = f"{optimizer_ckpt_path}.safetensors"
booster.save_model(model, model_ckpt_path, shard=shard, size_per_shard=size_per_shard, use_async=use_async)
booster.save_optimizer(
optimizer, optimizer_ckpt_path, shard=shard, size_per_shard=size_per_shard, use_async=use_async
)
booster.save_lr_scheduler(scheduler, lr_scheduler_ckpt_path)
booster.checkpoint_io._sync_d2h()
booster.checkpoint_io._sync_io()
dist.barrier()
new_model = resnet18()
new_optimizer = SGD((new_model.parameters()), lr=0.001)
new_scheduler = torch.optim.lr_scheduler.StepLR(new_optimizer, step_size=1, gamma=0.1)
new_model, new_optimizer, _, _, new_scheduler = booster.boost(
new_model, new_optimizer, lr_scheduler=new_scheduler
)
booster.load_model(new_model, model_ckpt_path, low_cpu_mem_mode=low_cpu_mem_mode)
check_state_dict_equal(model.state_dict(), new_model.state_dict())
booster.load_optimizer(new_optimizer, optimizer_ckpt_path, low_cpu_mem_mode=low_cpu_mem_mode)
check_state_dict_equal(optimizer.state_dict(), new_optimizer.state_dict())
booster.load_lr_scheduler(new_scheduler, lr_scheduler_ckpt_path)
check_state_dict_equal(scheduler.state_dict(), new_scheduler.state_dict())
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost")
check_torch_ddp_checkpointIO()
@rerun_if_address_is_in_use()
def test_torch_ddp_checkpointIO():
spawn(run_dist, 2)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_checkpoint_io/utils.py | tests/test_checkpoint_io/utils.py | import tempfile
from contextlib import contextmanager, nullcontext
from typing import Iterator
import torch.distributed as dist
@contextmanager
def shared_tempdir() -> Iterator[str]:
"""
A temporary directory that is shared across all processes.
"""
ctx_fn = tempfile.TemporaryDirectory if dist.get_rank() == 0 else nullcontext
with ctx_fn() as tempdir:
try:
obj = [tempdir]
dist.broadcast_object_list(obj, src=0)
tempdir = obj[0] # use the same directory on all ranks
yield tempdir
finally:
dist.barrier()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_checkpoint_io/test_gemini_checkpoint_io.py | tests/test_checkpoint_io/test_gemini_checkpoint_io.py | import os
import pytest
import torch
import torch.distributed as dist
from transformers import LlamaForCausalLM
from utils import shared_tempdir
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin import GeminiPlugin
from colossalai.lazy import LazyInitContext
from colossalai.nn.optimizer import HybridAdam
from colossalai.testing import (
check_state_dict_equal,
clear_cache_before_run,
parameterize,
rerun_if_address_is_in_use,
spawn,
)
from tests.kit.model_zoo import model_zoo
MODEL_PLACEMENT_CONFIGS = [
{"placement_policy": "static", "shard_param_frac": 0.5},
]
OPTIM_PLACEMENT_CONFIGS = [
{"placement_policy": "static", "shard_param_frac": 0.0, "offload_optim_frac": 0.5}, # zero2-offload-half
]
@clear_cache_before_run()
@parameterize("placement_config", MODEL_PLACEMENT_CONFIGS)
@parameterize("model_name", ["transformers_bert_for_sequence_classification"])
@parameterize("use_safetensors", [False, True])
@parameterize("tp_size", [1, 2])
@parameterize("zero_size", [2])
@parameterize("use_async", [False, True])
def exam_state_dict_with_origin(
placement_config, model_name, use_safetensors: bool, tp_size: int, zero_size: int, use_async: bool
):
from transformers import BertForSequenceClassification
(model_fn, data_gen_fn, output_transform_fn, _, _) = next(iter(model_zoo.get_sub_registry(model_name).values()))
bert_model = model_fn()
enable_flash_attention = True if tp_size > 1 else False
enable_fused_normalization = True if tp_size > 1 else False
enable_jit_fused = True if tp_size > 1 else False
with shared_tempdir() as tempdir:
pretrained_path = os.path.join(tempdir, "pretrained")
bert_model.config.save_pretrained(save_directory=pretrained_path)
extra_dp_size = dist.get_world_size() // (zero_size * tp_size)
plugin = GeminiPlugin(
**placement_config,
tp_size=tp_size,
enable_flash_attention=enable_flash_attention,
enable_fused_normalization=enable_fused_normalization,
enable_jit_fused=enable_jit_fused,
extra_dp_size=extra_dp_size,
)
booster = Booster(plugin=plugin)
bert_model, _, _, _, _ = booster.boost(bert_model)
model_size = sum(p.numel() * p.element_size() for p in bert_model.parameters()) / 1024**2
booster.save_model(
bert_model,
pretrained_path,
True,
True,
"",
(model_size / 3),
use_safetensors=use_safetensors,
use_async=use_async,
)
booster.checkpoint_io._sync_d2h()
booster.checkpoint_io._sync_io()
dist.barrier()
new_bert_model = BertForSequenceClassification.from_pretrained(pretrained_path)
check_state_dict_equal(bert_model.state_dict(only_rank_0=False), new_bert_model.state_dict())
@clear_cache_before_run()
@parameterize("placement_config", OPTIM_PLACEMENT_CONFIGS)
@parameterize("shard", [True, False])
@parameterize("model_name", ["transformers_llama_for_causal_lm"])
@parameterize("size_per_shard", [32])
@parameterize("tp_size", [1, 2])
@parameterize("zero_size", [2])
@parameterize("use_async", [False, True])
@parameterize("low_cpu_mem_mode", [True, False])
def exam_state_dict(
placement_config,
shard: bool,
model_name: str,
size_per_shard: int,
tp_size: int,
zero_size: int,
use_async: bool,
low_cpu_mem_mode: bool,
):
(model_fn, data_gen_fn, output_transform_fn, _, _) = next(iter(model_zoo.get_sub_registry(model_name).values()))
criterion = lambda x: x.mean()
enable_flash_attention = True if tp_size > 1 else False
enable_fused_normalization = True if tp_size > 1 else False
enable_jit_fused = True if tp_size > 1 else False
extra_dp_size = dist.get_world_size() // (zero_size * tp_size)
plugin = GeminiPlugin(
**placement_config,
precision="fp16",
initial_scale=(2**14),
tp_size=tp_size,
extra_dp_size=extra_dp_size,
enable_flash_attention=enable_flash_attention,
enable_fused_normalization=enable_fused_normalization,
enable_jit_fused=enable_jit_fused,
)
booster = Booster(plugin=plugin)
model = model_fn()
new_model = model_fn()
optimizer = HybridAdam(model.parameters(), lr=0.001)
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
new_optimizer = HybridAdam(new_model.parameters(), lr=0.01)
new_model, new_optimizer, criterion, _, _ = booster.boost(new_model, new_optimizer, criterion)
data = data_gen_fn()
data = {k: v.to("cuda") if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v for k, v in data.items()}
output = model(**data)
output = output_transform_fn(output)
output_key = list(output.keys())[0]
loss = criterion(output[output_key])
booster.backward(loss, optimizer)
optimizer.step()
for group in optimizer.param_groups:
group["lr"] = 0.1
with shared_tempdir() as tempdir:
model_ckpt_path = f"{tempdir}/model"
optimizer_ckpt_path = f"{tempdir}/optimizer"
if not shard and use_async:
model_ckpt_path = f"{model_ckpt_path}.safetensors"
optimizer_ckpt_path = f"{optimizer_ckpt_path}.safetensors"
booster.save_model(model, model_ckpt_path, shard=shard, size_per_shard=size_per_shard, use_async=use_async)
booster.save_optimizer(
optimizer, optimizer_ckpt_path, shard=shard, size_per_shard=size_per_shard, use_async=use_async
)
booster.checkpoint_io._sync_d2h()
booster.checkpoint_io._sync_io()
dist.barrier()
booster.load_model(new_model, model_ckpt_path, low_cpu_mem_mode=low_cpu_mem_mode)
check_state_dict_equal(
model.state_dict(only_rank_0=False), new_model.state_dict(only_rank_0=False), ignore_dtype=True
)
booster.load_optimizer(new_optimizer, optimizer_ckpt_path, low_cpu_mem_mode=low_cpu_mem_mode)
check_state_dict_equal(optimizer.state_dict(only_rank_0=False), new_optimizer.state_dict(only_rank_0=False))
for group in new_optimizer.param_groups:
assert group["lr"] == 0.1
# Check the new model/optimizer can successfully run.
data = data_gen_fn()
data = {
k: v.to("cuda") if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v for k, v in data.items()
}
output = new_model(**data)
output = output_transform_fn(output)
output_key = list(output.keys())[0]
loss = criterion(output[output_key])
booster.backward(loss, new_optimizer)
new_optimizer.step()
with shared_tempdir() as new_tempdir:
model_ckpt_path = f"{new_tempdir}/model"
optimizer_ckpt_path = f"{new_tempdir}/optimizer"
if not shard and use_async:
model_ckpt_path = f"{model_ckpt_path}.safetensors"
optimizer_ckpt_path = f"{optimizer_ckpt_path}.safetensors"
booster.save_model(new_model, model_ckpt_path, shard=shard, use_async=use_async)
booster.save_optimizer(new_optimizer, optimizer_ckpt_path, shard=shard, use_async=use_async)
booster.checkpoint_io._sync_d2h()
booster.checkpoint_io._sync_io()
def exam_lazy_from_pretrained():
llama_path = os.environ["LLAMA_PATH"]
plugin = GeminiPlugin()
booster = Booster(plugin=plugin)
orig_model = LlamaForCausalLM.from_pretrained(llama_path)
orig_state_dict = {k: v.half() for k, v in orig_model.state_dict().items()}
with LazyInitContext():
model = LlamaForCausalLM.from_pretrained(llama_path)
model, *_ = booster.boost(model)
with shared_tempdir() as tempdir:
save_path = os.path.join(tempdir, "model.pt")
booster.save_model(model, save_path, shard=False)
dist.barrier()
state_dict = torch.load(save_path, map_location="cpu")
check_state_dict_equal(state_dict, orig_state_dict, ignore_dtype=True)
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
exam_state_dict()
exam_state_dict_with_origin()
exam_lazy_from_pretrained()
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_gemini_ckpIO():
spawn(run_dist, 4)
if __name__ == "__main__":
test_gemini_ckpIO()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_checkpoint_io/test_hybrid_parallel_plugin_checkpoint_io.py | tests/test_checkpoint_io/test_hybrid_parallel_plugin_checkpoint_io.py | import pytest
import torch
import torch.distributed as dist
from packaging.version import Version
from torch.optim import Adam
from utils import shared_tempdir
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin import HybridParallelPlugin
from colossalai.shardformer.layer.utils import Randomizer
from colossalai.tensor.d_tensor.api import clear_layout_converter
from colossalai.testing import (
assert_close_loose,
check_state_dict_equal,
clear_cache_before_run,
parameterize,
rerun_if_address_is_in_use,
spawn,
)
from tests.kit.model_zoo import model_zoo
if Version(torch.__version__) < Version("2.0.0"):
TEST_CONFIGS = [
{
"tp_size": 4,
"pp_size": 1,
"precision": "fp32",
},
{"tp_size": 2, "pp_size": 2, "num_microbatches": 4, "precision": "fp16", "initial_scale": 1},
{"tp_size": 2, "pp_size": 1, "zero_stage": 2, "precision": "fp16", "initial_scale": 1},
{"tp_size": 1, "pp_size": 2, "num_microbatches": 4, "zero_stage": 1, "precision": "fp16", "initial_scale": 1},
]
else:
TEST_CONFIGS = [
# TODO(ver217): other configs lead to hang
{"tp_size": 1, "pp_size": 2, "num_microbatches": 4, "zero_stage": 1, "precision": "fp16", "initial_scale": 1},
]
@parameterize("shard", [False, True])
@parameterize("model_name", ["transformers_llama_for_causal_lm"])
@parameterize("size_per_shard", [32])
@parameterize("test_config", TEST_CONFIGS)
@parameterize("use_async", [False, True])
@parameterize("low_cpu_mem_mode", [False, True])
@clear_cache_before_run()
def exam_state_dict(
shard: bool, model_name: str, size_per_shard: int, test_config: dict, use_async: bool, low_cpu_mem_mode: bool
):
(model_fn, data_gen_fn, output_transform_fn, loss_fn, _) = next(
iter(model_zoo.get_sub_registry(model_name).values())
)
criterion = loss_fn
plugin = HybridParallelPlugin(**test_config)
booster = Booster(plugin=plugin)
def _criterion(outputs, inputs):
outputs = output_transform_fn(outputs)
loss = criterion(outputs)
return loss
def _preprocess_data(data):
if booster.plugin.stage_manager is not None:
for k, v in data.items():
if torch.is_tensor(v) or "Tensor" in v.__class__.__name__:
new_shape = [1] * v.dim()
new_shape[0] = 4
data[k] = v.to("cuda").repeat(*new_shape)
return iter([data])
else:
return {k: v.cuda() for k, v in data.items()}
model = model_fn().cuda()
optimizer = Adam(model.parameters(), lr=1e-3)
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
data = data_gen_fn()
model.train()
if booster.plugin.stage_manager is not None:
booster.execute_pipeline(_preprocess_data(data), model, _criterion, optimizer, return_loss=True)
else:
output = model(**_preprocess_data(data))
loss = criterion(output)
optimizer.backward(loss)
optimizer.step()
optimizer.zero_grad()
with shared_tempdir() as tempdir:
model_ckpt_path = f"{tempdir}/model"
optimizer_ckpt_path = f"{tempdir}/optimizer"
if not shard and use_async:
model_ckpt_path = f"{model_ckpt_path}.safetensors"
optimizer_ckpt_path = f"{optimizer_ckpt_path}.safetensors"
booster.save_model(model, model_ckpt_path, shard=shard, size_per_shard=size_per_shard, use_async=use_async)
booster.save_optimizer(
optimizer, optimizer_ckpt_path, shard=shard, size_per_shard=size_per_shard, use_async=use_async
)
booster.checkpoint_io._sync_d2h()
booster.checkpoint_io._sync_io()
dist.barrier()
new_model = model_fn().cuda()
new_optimizer = Adam(new_model.parameters(), lr=1e-3)
new_model, new_optimizer, criterion, _, _ = booster.boost(new_model, new_optimizer, criterion)
booster.load_model(new_model, model_ckpt_path, low_cpu_mem_mode=low_cpu_mem_mode)
check_state_dict_equal(model.unwrap().state_dict(), new_model.unwrap().state_dict())
booster.load_optimizer(new_optimizer, optimizer_ckpt_path, low_cpu_mem_mode=low_cpu_mem_mode)
check_state_dict_equal(optimizer.unwrap().state_dict(), new_optimizer.unwrap().state_dict())
dist.barrier()
# Check whether the loaded model & optimizer works smoothly.
model.train()
new_model.train()
data_for_shard = data_gen_fn()
data_for_origin = data_gen_fn()
if booster.plugin.stage_manager is not None:
booster.execute_pipeline(_preprocess_data(data_for_shard), model, _criterion, optimizer, return_loss=True)
booster.execute_pipeline(
_preprocess_data(data_for_origin),
new_model,
_criterion,
new_optimizer,
return_loss=True,
)
else:
old_model_loss = criterion(model(**_preprocess_data(data_for_shard)))
optimizer.backward(old_model_loss)
new_model_loss = criterion(new_model(**_preprocess_data(data_for_origin)))
new_optimizer.backward(new_model_loss)
optimizer.step()
new_optimizer.step()
# Check updated weights.
for p1, p2 in zip(model.unwrap().parameters(), new_model.unwrap().parameters()):
assert_close_loose(p1, p2, atol=5e-3, rtol=5e-3)
dist.barrier()
Randomizer.reset_index()
clear_layout_converter()
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
exam_state_dict()
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [4])
@rerun_if_address_is_in_use()
def test_hybrid_ckpIO(world_size):
spawn(run_dist, world_size)
if __name__ == "__main__":
test_hybrid_ckpIO(4)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_checkpoint_io/test_torch_fsdp_checkpoint_io.py | tests/test_checkpoint_io/test_torch_fsdp_checkpoint_io.py | import pytest
import torch
from packaging import version
from torch.optim import SGD
from torchvision.models import resnet18
from utils import shared_tempdir
import colossalai
from colossalai.booster import Booster
if version.parse(torch.__version__) >= version.parse("1.12.0"):
from colossalai.booster.plugin import TorchFSDPPlugin
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
def compare_nested_dict(dict1, dict2):
for key in dict1:
if key in dict2:
if type(dict1[key]) is dict:
assert type(dict2[key]) is dict
diff = compare_nested_dict(dict1[key], dict2[key])
if not diff:
return diff
elif type(dict1[key]) is list:
assert type(dict2[key]) is list
for i, val in enumerate(dict1[key]):
if isinstance(val, torch.Tensor):
if not torch.equal(dict1[key][i], dict2[key][i]):
return False
elif val != dict2[key][i]:
return False
elif type(dict1[key]) is torch.Tensor:
assert type(dict2[key]) is torch.Tensor
if not torch.equal(dict1[key], dict2[key]):
return False
else:
if dict1[key] != dict2[key]:
return False
else:
return False
return True
@parameterize("use_async", [False, True])
def check_torch_fsdp_ckpt(use_async: bool):
model = resnet18()
plugin = TorchFSDPPlugin()
booster = Booster(plugin=plugin)
optimizer = SGD(model.parameters(), lr=0.001, momentum=0.9)
criterion = lambda x: x.mean()
fsdp_model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
inputs = torch.randn(4, 3, 224, 224)
outputs = None
def run_model():
nonlocal outputs
outputs = fsdp_model(inputs)
optimizer.zero_grad()
criterion(outputs).backward()
optimizer.step()
with shared_tempdir() as tempdir:
model_ckpt_path = f"{tempdir}/model"
optim_ckpt_path = f"{tempdir}/optimizer"
if use_async:
model_ckpt_path = f"{model_ckpt_path}.safetensors"
optim_ckpt_path = f"{optim_ckpt_path}.safetensors"
run_model()
booster.save_model(fsdp_model, model_ckpt_path, shard=False, use_async=use_async)
booster.save_optimizer(optimizer, optim_ckpt_path, shard=False, use_async=use_async)
booster.checkpoint_io._sync_d2h()
booster.checkpoint_io._sync_io()
full_msd = fsdp_model.state_dict()
# full_osd = FSDP.full_optim_state_dict(fsdp_model, optimizer)
sharded_osd = optimizer.state_dict()
import copy
sharded_osd = copy.deepcopy(sharded_osd)
run_model()
full_msd_updated = fsdp_model.state_dict()
# full_osd_updated = FSDP.full_optim_state_dict(fsdp_model, optimizer, rank0_only=True)
sharded_osd_updated = optimizer.state_dict()
assert not compare_nested_dict(sharded_osd, sharded_osd_updated)
assert not compare_nested_dict(full_msd_updated, full_msd)
outputs_first = fsdp_model(inputs)
assert criterion(outputs_first) != criterion(outputs)
booster.load_model(fsdp_model, model_ckpt_path)
booster.load_optimizer(optimizer, optim_ckpt_path)
full_msd_restore = fsdp_model.state_dict()
# full_osd_restore = FSDP.full_optim_state_dict(fsdp_model, optimizer, rank0_only=True)
sharded_osd_restore = optimizer.state_dict()
assert compare_nested_dict(sharded_osd, sharded_osd_restore)
assert compare_nested_dict(full_msd_restore, full_msd)
outputs_sec = fsdp_model(inputs)
assert criterion(outputs_sec) == criterion(outputs)
with shared_tempdir() as tempdir:
model_ckpt_path = f"{tempdir}/model"
optim_ckpt_path = f"{tempdir}/optimizer"
run_model()
booster.save_model(fsdp_model, model_ckpt_path, shard=True, use_async=use_async)
booster.save_optimizer(optimizer, optim_ckpt_path, shard=True, use_async=use_async)
booster.checkpoint_io._sync_d2h()
booster.checkpoint_io._sync_io()
full_msd = fsdp_model.unwrap().state_dict()
full_osd = FSDP.full_optim_state_dict(optimizer.unwrap_model().unwrap(), optim=optimizer)
import copy
sharded_osd = copy.deepcopy(full_osd)
run_model()
full_msd_updated = fsdp_model.unwrap().state_dict()
full_osd_updated = FSDP.full_optim_state_dict(optimizer.unwrap_model().unwrap(), optim=optimizer)
# cost much time led to timeout
# assert not compare_nested_dict(full_osd_updated, sharded_osd)
# assert not compare_nested_dict(full_msd_updated, full_msd)
outputs_first = fsdp_model(inputs)
assert criterion(outputs_first) != criterion(outputs)
booster.load_model(fsdp_model, model_ckpt_path)
booster.load_optimizer(optimizer, optim_ckpt_path)
full_msd_restore = fsdp_model.unwrap().state_dict()
sharded_osd_restore = FSDP.full_optim_state_dict(optimizer.unwrap_model().unwrap(), optim=optimizer)
assert compare_nested_dict(sharded_osd, sharded_osd_restore)
assert compare_nested_dict(full_msd_restore, full_msd)
outputs_sec = fsdp_model(inputs)
assert criterion(outputs_sec) == criterion(outputs)
def run_dist(rank, world_size, port):
# init dist env
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost")
check_torch_fsdp_ckpt()
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="requires torch1.12 or higher")
@rerun_if_address_is_in_use()
def test_torch_fsdp_ckpt():
spawn(run_dist, 2)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_checkpoint_io/test_low_level_zero_checkpoint_io.py | tests/test_checkpoint_io/test_low_level_zero_checkpoint_io.py | from copy import deepcopy
from typing import Optional
import torch
import torch.distributed as dist
from peft import LoraConfig
from torchvision.models import resnet18
from utils import shared_tempdir
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin import LowLevelZeroPlugin
from colossalai.nn.optimizer import HybridAdam
from colossalai.testing import (
check_state_dict_equal,
clear_cache_before_run,
parameterize,
rerun_if_address_is_in_use,
spawn,
)
from colossalai.zero import LowLevelZeroOptimizer
from tests.kit.model_zoo import model_zoo
# stage 1 and 2 process the optimizer/mode the same way
# only test 2 is fine
@clear_cache_before_run()
@parameterize("stage", [2])
@parameterize("shard", [False, True])
@parameterize("offload", [False, True])
@parameterize("use_async", [False, True])
@parameterize("low_cpu_mem_mode", [False, True])
def check_low_level_zero_checkpointIO(stage: int, shard: bool, offload: bool, use_async: bool, low_cpu_mem_mode: bool):
plugin = LowLevelZeroPlugin(stage=stage, max_norm=1.0, initial_scale=32, cpu_offload=offload)
booster = Booster(plugin=plugin)
model = resnet18()
criterion = lambda x: x.mean()
optimizer = HybridAdam((model.parameters()), lr=0.001)
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
x = torch.randn(1, 3, 224, 224, device="cuda")
output = model(x)
loss = criterion(output)
booster.backward(loss, optimizer)
optimizer.step()
with shared_tempdir() as tempdir:
model_ckpt_path = f"{tempdir}/model"
optimizer_ckpt_path = f"{tempdir}/optimizer"
if not shard and not use_async:
model_ckpt_path = f"{model_ckpt_path}.pt"
if not shard and use_async:
model_ckpt_path = f"{model_ckpt_path}.safetensors"
if not shard and use_async:
optimizer_ckpt_path = f"{tempdir}/optimizer.safetensors"
booster.save_model(
model,
model_ckpt_path,
shard=shard,
use_async=use_async,
)
# lr scheduler is tested in test_torch_ddp_checkpoint_io.py and low level zero does not change it, we can skip it here
booster.save_optimizer(optimizer, optimizer_ckpt_path, shard=shard, use_async=use_async)
booster.checkpoint_io._sync_d2h()
booster.checkpoint_io._sync_io()
dist.barrier()
new_model = resnet18()
new_optimizer = HybridAdam((new_model.parameters()), lr=0.001)
new_model, new_optimizer, _, _, _ = booster.boost(new_model, new_optimizer)
booster.load_model(new_model, model_ckpt_path, low_cpu_mem_mode=low_cpu_mem_mode)
check_state_dict_equal(model.state_dict(), new_model.state_dict())
# check master weight
assert isinstance(new_optimizer, LowLevelZeroOptimizer)
working_param_id_set = set(id(p) for p in new_model.parameters())
for p_id, master_param in new_optimizer.working_to_master_param.items():
assert p_id in working_param_id_set
working_param = new_optimizer.master_to_working_param[id(master_param)]
padding = new_optimizer.get_param_padding_size(working_param)
padded_param = torch.nn.functional.pad(working_param.data.view(-1), (0, padding))
working_shard = padded_param.chunk(dist.get_world_size())[dist.get_rank()]
assert torch.equal(
working_shard, master_param.data.view(-1).to(dtype=padded_param.dtype, device=padded_param.device)
)
booster.load_optimizer(new_optimizer, optimizer_ckpt_path, low_cpu_mem_mode=low_cpu_mem_mode)
check_state_dict_equal(optimizer.optim.state_dict(), new_optimizer.optim.state_dict())
torch.cuda.empty_cache()
def run_fn(stage, shard, offload, model_fn, data_gen_fn, output_transform_fn, lora_config=None) -> Optional[str]:
try:
plugin = LowLevelZeroPlugin(stage=stage, max_norm=1.0, initial_scale=2**5, cpu_offload=offload)
new_plugin = LowLevelZeroPlugin(stage=stage, max_norm=1.0, initial_scale=2**5, cpu_offload=offload)
booster = Booster(plugin=plugin)
new_booster = Booster(plugin=new_plugin)
model = model_fn()
optimizer = HybridAdam(model.parameters(), lr=1e-3)
new_model = deepcopy(model)
new_optimizer = HybridAdam(new_model.parameters(), lr=1e-3)
model = booster.enable_lora(model, lora_config=lora_config)
criterion = lambda x: x.mean()
data = data_gen_fn()
data = {
k: v.to("cuda") if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v for k, v in data.items()
}
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
output = model(**data)
output = output_transform_fn(output)
output_key = list(output.keys())[0]
loss = criterion(output[output_key])
booster.backward(loss, optimizer)
optimizer.step()
with shared_tempdir() as tempdir:
model_ckpt_path = f"{tempdir}/model"
optimizer_ckpt_path = f"{tempdir}/optimizer"
booster.save_lora_as_pretrained(model, model_ckpt_path)
booster.save_optimizer(optimizer, optimizer_ckpt_path, shard=False)
dist.barrier()
new_model = new_booster.enable_lora(new_model, pretrained_dir=model_ckpt_path, lora_config=lora_config)
new_model, new_optimizer, criterion, _, _ = new_booster.boost(new_model, new_optimizer, criterion)
check_state_dict_equal(model.state_dict(), new_model.state_dict())
# check master weight
assert isinstance(new_optimizer, LowLevelZeroOptimizer)
working_param_id_set = set(id(p) for p in new_model.parameters())
for p_id, master_param in new_optimizer.working_to_master_param.items():
assert p_id in working_param_id_set
working_param = new_optimizer.master_to_working_param[id(master_param)]
padding = new_optimizer.get_param_padding_size(working_param)
padded_param = torch.nn.functional.pad(working_param.data.view(-1), (0, padding))
working_shard = padded_param.chunk(dist.get_world_size())[dist.get_rank()]
assert torch.equal(
working_shard, master_param.data.view(-1).to(dtype=padded_param.dtype, device=padded_param.device)
)
new_booster.load_optimizer(new_optimizer, optimizer_ckpt_path)
check_state_dict_equal(optimizer.optim.state_dict(), new_optimizer.optim.state_dict())
except Exception as e:
# return repr(e)
raise e
@clear_cache_before_run()
@parameterize("stage", [2])
@parameterize("shard", [True, False])
@parameterize("offload", [False, True])
@parameterize("model_name", ["transformers_llama"])
def check_low_level_zero_lora_checkpointIO(
stage: int, shard: bool, offload: bool, model_name: str, early_stop: bool = True
):
passed_models = []
failed_info = {} # (model_name, error) pair
sub_model_zoo = model_zoo.get_sub_registry(model_name)
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
if name != "transformers_llama":
continue
task_type = None
if name == "transformers_llama_for_causal_lm":
task_type = "CAUSAL_LM"
if name == "transformers_llama_for_sequence_classification":
task_type = "SEQ_CLS"
lora_config = LoraConfig(task_type=task_type, r=8, lora_alpha=32, lora_dropout=0.1)
err = run_fn(stage, shard, offload, model_fn, data_gen_fn, output_transform_fn, lora_config)
torch.cuda.empty_cache()
if err is None:
passed_models.append(name)
else:
failed_info[name] = err
if early_stop:
break
if dist.get_rank() == 0:
print(f"Passed models({len(passed_models)}): {passed_models}\n\n")
print(f"Failed models({len(failed_info)}): {list(failed_info.keys())}\n\n")
assert len(failed_info) == 0, "\n".join([f"{k}: {v}" for k, v in failed_info.items()])
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost")
check_low_level_zero_checkpointIO()
check_low_level_zero_lora_checkpointIO()
torch.cuda.empty_cache()
@rerun_if_address_is_in_use()
@clear_cache_before_run()
def test_low_level_zero_checkpointIO():
spawn(run_dist, 2)
if __name__ == "__main__":
test_low_level_zero_checkpointIO()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_moe/test_kernel.py | tests/test_moe/test_kernel.py | import os
import pytest
import torch
from colossalai.accelerator import get_accelerator
from colossalai.moe._operation import MoeCombine, MoeDispatch, moe_cumsum
NUM_EXPERTS = 4
BATCH_SIZE = 4
SEQ_LEN = 4
MOE_TENSOR_PATH = os.getenv("MOE_TENSOR_PATH")
def check_equal(tensor_a, tensor_b, atol=1e-06):
assert torch.allclose(tensor_a, tensor_b, rtol=0, atol=atol) is True
def run_moe_cumsum():
test_mask = torch.tensor(
[
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
],
dtype=torch.int32,
).to("cuda")
out_no_kernel = moe_cumsum(test_mask, use_kernel=False)
out_kernel = moe_cumsum(test_mask, use_kernel=True)
print(out_no_kernel.dtype, out_kernel.dtype)
check_equal(out_no_kernel.to(torch.int32), out_kernel)
def run_moe_dispatch_combine_fwd_bwd(data_type=torch.float32, hidden_size=128, num_experts=4):
tokens = torch.randn(
BATCH_SIZE, hidden_size, dtype=data_type, device=get_accelerator().get_current_device(), requires_grad=True
)
# use kernel
route_result_list_kernel = torch.load(f"{MOE_TENSOR_PATH}/True_4_{data_type}.pt")
# dispatch
dispatch_data_kernel = MoeDispatch.apply(tokens, *route_result_list_kernel[1:])
dispatch_data_kernel = dispatch_data_kernel.reshape(num_experts, -1, hidden_size)
# combine
expert_output = dispatch_data_kernel.reshape(-1, hidden_size)
ans_kernel = MoeCombine.apply(expert_output, *route_result_list_kernel)
# no kernel
route_result_list_no_kernel = torch.load(f"{MOE_TENSOR_PATH}/False_2_{data_type}.pt")
# dispatch
sec_mask_f = route_result_list_no_kernel[1].type_as(tokens)
dispatch_data_no_kernel = torch.matmul(sec_mask_f.permute(1, 2, 0), tokens)
# combine
combine_weights = route_result_list_no_kernel[0].type_as(tokens)
combine_weights = combine_weights.view(combine_weights.shape[0], -1)
expert_output = expert_output.view(-1, expert_output.shape[-1])
ans_no_kernel = torch.matmul(combine_weights, expert_output)
# check fwd
if data_type == torch.float32:
check_equal(dispatch_data_kernel.reshape(dispatch_data_no_kernel.shape), dispatch_data_no_kernel)
else:
check_equal(dispatch_data_kernel.reshape(dispatch_data_no_kernel.shape), dispatch_data_no_kernel, 1e-2)
if data_type == torch.float32:
check_equal(ans_kernel, ans_no_kernel)
else:
check_equal(ans_kernel, ans_no_kernel, 1e-2)
# check bwd
out_shape = ans_kernel.shape
grad = torch.randn(out_shape, device=get_accelerator().get_current_device())
ans_kernel.backward(grad, retain_graph=True)
grad_kernel = tokens.grad.data.clone()
tokens.grad.zero_()
ans_no_kernel.backward(grad) # get gradient
grad_no_kernel = tokens.grad.data.clone()
tokens.grad.zero_()
if data_type == torch.float32:
check_equal(grad_no_kernel, grad_kernel)
else:
check_equal(grad_no_kernel, grad_kernel, 1e-2)
@pytest.mark.parametrize("data_type", [torch.float32, torch.float16])
def test_moe_kernel(data_type):
torch.manual_seed(1024)
run_moe_cumsum()
run_moe_dispatch_combine_fwd_bwd(data_type=data_type)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_moe/moe_utils.py | tests/test_moe/moe_utils.py | import os
import traceback
from contextlib import contextmanager
from time import sleep
from typing import Callable, List, Optional
import torch
import torch.distributed as dist
from torch.utils._pytree import tree_map
def assert_loose_close(a, b, dtype: torch.dtype = torch.float32, name=""):
assert loose_close(a, b, dtype), f"{name} not close {a.mean()} {b.mean()}"
def loose_close(a, b, dtype: torch.dtype = torch.float32):
rtol = None
atol = None
if dtype is torch.float16:
rtol = 5e-2
atol = 5e-4
elif dtype is torch.bfloat16:
rtol = 4e-3
atol = 4e-3
else:
assert dtype is torch.float32
rtol = 1e-05
atol = 1e-08
a = a.detach().to(dtype)
b = b.detach().to(dtype).to(a.device)
return torch.allclose(a, b, rtol=rtol, atol=atol)
def check_model_equal(model1, model2, dtype):
assert set(model1.state_dict().keys()) == set(model2.state_dict().keys())
for i, ((name, p1), p2) in enumerate(zip(model1.named_parameters(), model2.parameters())):
assert_loose_close(p1, p2, dtype, name=name)
@contextmanager
def distributed_debug_mode(num_stacks: int = 1, funcs_to_patch: Optional[List[Callable]] = None, enable=True):
if enable:
assert (
os.environ.get("CUDA_LAUNCH_BLOCKING", "0") == "1"
), f"Expect CUDA_LAUNCH_BLOCKING=1, got {os.environ.get('CUDA_LAUNCH_BLOCKING', '0')}"
if funcs_to_patch is None:
funcs_to_patch = [
dist.all_reduce,
dist.all_reduce_coalesced,
dist.all_gather,
dist.all_gather_coalesced,
dist.all_gather_into_tensor,
dist.all_to_all,
dist.all_to_all_single,
dist.reduce_scatter,
]
original_funcs = {}
patched_funcs = {}
def make_patched(func):
def patched_func(*args, **kwargs):
stack = traceback.format_stack()
def format_node(node):
if isinstance(node, torch.Tensor):
return f"{node.shape}"
elif isinstance(node, list):
return f"[{', '.join([format_node(n) for n in node])}]"
return str(node)
args_str, kwargs_str = tree_map(format_node, (args, kwargs))
en = len(stack) - 1
st = max(0, en - num_stacks)
dist.barrier()
sleep(0.001 * dist.get_rank())
print(
f"[Rank {dist.get_rank()}-{func.__name__}-{dist.get_process_group_ranks(kwargs.get('group', dist.group.WORLD))}]: Called from {''.join(stack[st:en])}args={args_str} kwargs={kwargs_str}\n"
)
dist.barrier()
return func(*args, **kwargs)
return patched_func
if enable:
for func in funcs_to_patch:
original_funcs[func.__name__] = getattr(dist, func.__name__)
patched_funcs[func.__name__] = make_patched(func)
setattr(dist, func.__name__, patched_funcs[func.__name__])
try:
yield
finally:
for func_name, original_func in original_funcs.items():
setattr(dist, func_name, original_func)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_moe/test_deepseek_layer.py | tests/test_moe/test_deepseek_layer.py | from copy import deepcopy
import pytest
import torch
import torch.distributed as dist
from torch.testing import assert_close
from transformers import AutoConfig, AutoModel
import colossalai
from colossalai.booster.plugin.moe_hybrid_parallel_plugin import MoeHybridParallelPlugin
from colossalai.shardformer.modeling.deepseek import EPDeepseekMoE
from colossalai.testing.utils import spawn
tokens, n_experts = 7, 4
hidden_size = 8
top_k = 2
def check_deepseek_moe_layer():
torch.cuda.set_device(dist.get_rank())
plugin = MoeHybridParallelPlugin(
precision="bf16",
tp_size=1,
pp_size=1,
zero_stage=1,
ep_size=dist.get_world_size(),
)
config = AutoConfig.from_pretrained(
"deepseek-ai/deepseek-moe-16b-base",
num_hidden_layers=1,
n_routed_experts=n_experts,
num_experts_per_tok=top_k,
hidden_size=hidden_size,
intermediate_size=hidden_size * 2,
first_k_dense_replace=0,
num_attention_heads=2,
trust_remote_code=True,
)
torch.manual_seed(0)
# get the moe layer in auto model
orig_model = AutoModel.from_config(config, trust_remote_code=True).layers[0].mlp.cuda()
x = torch.rand(1, tokens, hidden_size, requires_grad=True).cuda()
orig_output = orig_model(x)
model = deepcopy(orig_model)
model = EPDeepseekMoE.from_native_module(
model,
ep_group=plugin.ep_group,
moe_dp_group=plugin.moe_dp_group,
tp_group=plugin.tp_group,
)
ep_output = model(x)
assert_close(orig_output, ep_output)
orig_loss = orig_output.mean()
orig_loss.backward()
ep_loss = ep_output.mean()
ep_loss.backward()
assert_close(orig_loss, ep_loss)
name_to_p = {n: p for n, p in orig_model.named_parameters()}
for n, ep_p in model.named_parameters():
p = name_to_p[n]
if ep_p.grad is not None:
assert_close(p.grad, ep_p.grad)
def run_dist(rank: int, world_size: int, port: int):
colossalai.launch(rank, world_size, "localhost", port)
check_deepseek_moe_layer()
@pytest.mark.skip("tested in corresponding sharderformer")
@pytest.mark.parametrize("world_size", [2])
def test_deepseek_moe_layer(world_size: int):
spawn(run_dist, world_size)
if __name__ == "__main__":
test_deepseek_moe_layer(2)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_moe/test_mixtral_layer.py | tests/test_moe/test_mixtral_layer.py | from copy import deepcopy
import pytest
import torch
import torch.distributed as dist
from torch.testing import assert_close
from transformers.models.mixtral.configuration_mixtral import MixtralConfig
from transformers.models.mixtral.modeling_mixtral import MixtralSparseMoeBlock
import colossalai
from colossalai.booster.plugin.moe_hybrid_parallel_plugin import MoeHybridParallelPlugin
from colossalai.shardformer.modeling.mixtral import EPMixtralSparseMoeBlock
from colossalai.testing.utils import spawn
tokens, n_experts = 7, 4
hidden_size = 8
top_k = 2
def check_mixtral_moe_layer():
torch.cuda.set_device(dist.get_rank())
plugin = MoeHybridParallelPlugin(
precision="bf16",
tp_size=1,
pp_size=1,
zero_stage=1,
ep_size=dist.get_world_size(),
)
config = MixtralConfig(
hidden_size=hidden_size,
intermediate_size=hidden_size * 2,
num_local_experts=n_experts,
num_experts_per_tok=top_k,
)
torch.manual_seed(0)
orig_model = MixtralSparseMoeBlock(config).cuda()
x = torch.rand(1, tokens, hidden_size, requires_grad=True).cuda()
orig_output, orig_logits = orig_model(x)
model = deepcopy(orig_model)
model = EPMixtralSparseMoeBlock.from_native_module(
model,
ep_group=plugin.ep_group,
tp_group=plugin.tp_group,
moe_dp_group=plugin.moe_dp_group,
)
ep_output, ep_logits = model(x)
assert_close(orig_logits, ep_logits)
assert_close(orig_output, ep_output)
orig_loss = orig_output.mean()
orig_loss.backward()
ep_loss = ep_output.mean()
ep_loss.backward()
assert_close(orig_loss, ep_loss)
name_to_p = {n: p for n, p in orig_model.named_parameters()}
for n, ep_p in model.named_parameters():
p = name_to_p[n]
if ep_p.grad is not None:
assert_close(p.grad, ep_p.grad)
def run_dist(rank: int, world_size: int, port: int):
colossalai.launch(rank, world_size, "localhost", port)
check_mixtral_moe_layer()
@pytest.mark.skip("tested in corresponding sharderformer")
@pytest.mark.parametrize("world_size", [2])
def test_mixtral_moe_layer(world_size: int):
spawn(run_dist, world_size)
if __name__ == "__main__":
test_mixtral_moe_layer(2)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_moe/test_moe_ep_zero.py | tests/test_moe/test_moe_ep_zero.py | from copy import deepcopy
import pytest
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from transformers.models.mixtral.configuration_mixtral import MixtralConfig
from transformers.models.mixtral.modeling_mixtral import MixtralModel
import colossalai
from colossalai.booster.booster import Booster
from colossalai.booster.plugin.moe_hybrid_parallel_plugin import MoeHybridParallelPlugin
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
from colossalai.testing.random import seed_all
from tests.test_moe.moe_utils import assert_loose_close
NUM_BATCH = 4
NUM_TOK_PER_BATCH, NUM_EXPERTS = 7, 4
HIDDEN_SIZE_PER_HEAD = 4
NUM_HEADS = 2
TOP_K = 1
@parameterize("stage", [1])
@parameterize("ep_size", [2, 4])
def run_zero_with_original_model(stage: int, ep_size: int):
dtype = torch.bfloat16
rank = torch.distributed.get_rank()
torch.cuda.set_device(dist.get_rank())
plugin = MoeHybridParallelPlugin(
pp_size=1, tp_size=1, ep_size=ep_size, zero_stage=stage, overlap_communication=False, initial_scale=1
)
booster = Booster(plugin=plugin)
seed_all(10086)
config = MixtralConfig(
hidden_size=HIDDEN_SIZE_PER_HEAD * NUM_HEADS,
intermediate_size=HIDDEN_SIZE_PER_HEAD * NUM_HEADS * 2,
num_hidden_layers=2,
num_attention_heads=NUM_HEADS,
num_key_value_heads=NUM_HEADS,
num_local_experts=NUM_EXPERTS,
num_experts_per_tok=TOP_K,
)
torch_model = MixtralModel(config).to(dtype).cuda()
zero_model = deepcopy(torch_model).to(dtype)
zero_optimizer = torch.optim.SGD(zero_model.parameters(), lr=1)
zero_model, zero_optimizer, _, _, _ = booster.boost(zero_model, zero_optimizer)
ddp_model = DDP(
torch_model.cuda(),
process_group=plugin.dp_group,
find_unused_parameters=True, # important for torch ddp, not all experts are routed
).cuda()
ddp_optimizer = torch.optim.SGD(ddp_model.parameters(), lr=1)
# create different input
seed_all(1453 + rank)
ddp_model.train()
zero_model.train()
for _ in range(2):
# zero-dp forward
input_data = torch.rand(
NUM_BATCH, NUM_TOK_PER_BATCH, HIDDEN_SIZE_PER_HEAD * NUM_HEADS, requires_grad=True
).cuda()
zero_output = zero_model(inputs_embeds=input_data.to(dtype)).last_hidden_state.mean()
# zero-dp backward
zero_optimizer.backward(zero_output)
# torch-ddp forward
ddp_output = ddp_model(inputs_embeds=input_data.to(dtype)).last_hidden_state.mean()
assert_loose_close(zero_output, ddp_output, dtype=dtype)
# torch-ddp backward
ddp_output.backward()
# check grad
name_to_p = {n: p for n, p in ddp_model.named_parameters()}
for n, p in zero_model.named_parameters():
zero_grad = zero_optimizer.get_param_grad(p)
if name_to_p[n].grad is None:
name_to_p[n].grad = torch.zeros_like(name_to_p[n].data)
continue
assert_loose_close(zero_grad, name_to_p[n].grad, dtype=dtype, name=n)
# zero-dp step
zero_optimizer.step()
# original model step
ddp_optimizer.step()
# check updated param
for n, p in zero_model.named_parameters():
assert_loose_close(p.data, name_to_p[n].data, dtype=dtype, name=n)
print(f"{dist.get_rank()} test passed")
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_zero_with_original_model()
@pytest.mark.skip("tested in corresponding sharderformer")
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [4])
@rerun_if_address_is_in_use()
def test_moe_ep_zero(world_size):
spawn(run_dist, world_size)
if __name__ == "__main__":
test_moe_ep_zero(world_size=4)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_moe/test_moe_checkpoint.py | tests/test_moe/test_moe_checkpoint.py | import os
import tempfile
from contextlib import nullcontext
from copy import deepcopy
import pytest
import torch
import torch.distributed as dist
from torch.optim import SGD, Adam
from transformers.models.mixtral.configuration_mixtral import MixtralConfig
from transformers.models.mixtral.modeling_mixtral import MixtralForCausalLM
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin.moe_hybrid_parallel_plugin import MoeHybridParallelPlugin
from colossalai.testing import parameterize, spawn
from colossalai.testing.random import seed_all
from colossalai.testing.utils import spawn
from tests.test_moe.moe_utils import check_model_equal
tokens, n_experts = 7, 4
hidden_size = 8
top_k = 2
def get_optimizer_snapshot(optim):
state = {id(k): deepcopy(v) for k, v in optim.state.items()}
param_groups = []
for group in optim.param_groups:
params = [id(p) for p in group["params"]]
new_group = {"params": params}
for k, v in group.items():
if k != "params":
new_group[k] = v
param_groups.append(new_group)
return {
"state": state,
"param_groups": param_groups,
}
def check_optimizer_snapshot_equal(snapshot1, snapshot2, param2name, moe_dp_group=None):
assert len(snapshot1["param_groups"]) == len(snapshot2["param_groups"])
for group1, group2 in zip(snapshot1["param_groups"], snapshot2["param_groups"]):
assert set(group1.keys()) == set(group2.keys())
for k in group1.keys():
assert group1[k] == group2[k]
# check state
assert set(snapshot1["state"].keys()) == set(
snapshot2["state"].keys()
), f"{snapshot1['state'].keys()}, {snapshot2['state'].keys()}"
passed = True
count = 0
for pid in snapshot1["state"].keys():
state1, state2 = snapshot1["state"][pid], snapshot2["state"][pid]
assert set(state1.keys()) == set(state2.keys())
bug = False
for k in state1.keys():
if isinstance(state1[k], torch.Tensor):
if not torch.equal(state1[k], state2[k]):
bug = True
count += 1
else:
assert state1[k] == state2[k]
if bug:
passed = False
if not passed:
raise AssertionError(f"A total of {count} optim states are not equal")
@parameterize(
"test_config",
[
[
MixtralConfig(
hidden_size=hidden_size,
intermediate_size=hidden_size * 2,
num_local_experts=n_experts,
num_experts_per_tok=top_k,
num_attention_heads=2,
num_key_value_heads=2,
num_hidden_layers=2,
),
MixtralForCausalLM,
],
],
)
def check_moe_checkpoint(test_config):
dtype, precision = torch.float16, "fp16"
config, model_cls = test_config
torch.cuda.set_device(dist.get_rank())
context = tempfile.TemporaryDirectory() if dist.get_rank() == 0 else nullcontext()
with context as f:
if dist.get_rank() == 0:
broadcast_objects = [f] # any picklable object
else:
broadcast_objects = [None]
dist.broadcast_object_list(broadcast_objects, src=0)
input_ids = torch.randint(0, 100, (2, tokens)).cuda()
orig_model = model_cls(config).cuda().to(dtype)
seed_all(10086)
model = deepcopy(orig_model)
optimizer = SGD(model.parameters(), lr=1e-3)
plugin = MoeHybridParallelPlugin(
pp_size=2, ep_size=2, tp_size=1, microbatch_size=1, zero_stage=1, precision=precision
)
booster = Booster(plugin=plugin)
model, optimizer, *_ = booster.boost(model=model, optimizer=optimizer)
# initialize grads
data_iter = iter(
[{"input_ids": input_ids, "attention_mask": torch.ones_like(input_ids), "labels": input_ids.clone()}]
)
booster.execute_pipeline(
data_iter,
model,
lambda outputs, inputs: outputs.loss,
optimizer,
)
tmpdirname = broadcast_objects[0]
model_dir = os.path.join(tmpdirname, "mixtral_model")
hf_model_dir = os.path.join(tmpdirname, "mixtral_hf_model")
optim_dir = os.path.join(tmpdirname, "mixtral_optim")
booster.save_model(model, model_dir, shard=True)
dist.barrier()
if dist.get_rank() == 0:
saved_model = model_cls.from_pretrained(model_dir).cuda().to(dtype)
check_model_equal(orig_model, saved_model, dtype=dtype)
saved_model.save_pretrained(hf_model_dir)
dist.barrier()
# check load model
new_model = model_cls(config).cuda().to(dtype)
new_optimizer = Adam(new_model.parameters(), lr=1e-3)
new_model, new_optimizer, *_ = booster.boost(model=new_model, optimizer=new_optimizer)
booster.load_model(new_model, hf_model_dir)
check_model_equal(model, new_model, dtype=dtype)
# check save optimizer
optimizer.step()
for group in optimizer.param_groups:
group["lr"] = 0.1
snapshot = get_optimizer_snapshot(optimizer.unwrap())
booster.save_optimizer(optimizer, optim_dir, shard=True)
dist.barrier()
# reset optimizer state
for state in optimizer.unwrap().state.values():
for v in state.values():
if isinstance(v, torch.Tensor):
v.zero_()
booster.load_optimizer(optimizer, optim_dir)
loaded_snapshot = get_optimizer_snapshot(optimizer.unwrap())
check_optimizer_snapshot_equal(snapshot, loaded_snapshot, None, model)
# Ensure rank 0 waits for all other ranks to finish
dist.barrier()
def run_dist(rank: int, world_size: int, port: int):
colossalai.launch(rank, world_size, "localhost", port)
check_moe_checkpoint()
# Test EP + ZeRO + PP
@pytest.mark.parametrize("world_size", [4])
def test_mixtral_moe_layer(world_size: int):
spawn(run_dist, world_size)
if __name__ == "__main__":
test_mixtral_moe_layer(4)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_moe/test_moe_ep_tp.py | tests/test_moe/test_moe_ep_tp.py | from copy import deepcopy
import pytest
import torch
import torch.distributed as dist
from transformers.models.mixtral.configuration_mixtral import MixtralConfig
from transformers.models.mixtral.modeling_mixtral import MixtralModel
import colossalai
from colossalai.booster.booster import Booster
from colossalai.booster.plugin import HybridParallelPlugin
from colossalai.booster.plugin.moe_hybrid_parallel_plugin import MoeHybridParallelPlugin
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
from colossalai.testing.random import seed_all
from tests.test_moe.moe_utils import assert_loose_close
NUM_BATCH = 4
NUM_TOK_PER_BATCH, NUM_EXPERTS = 7, 4
HIDDEN_SIZE_PER_HEAD = 4
NUM_HEADS = 4
TOP_K = 2
@parameterize("stage", [1])
@parameterize("ep_size", [2])
def run_zero_with_original_model(stage: int, ep_size: int):
tp_size = dist.get_world_size() // ep_size
dtype = torch.bfloat16
rank = torch.distributed.get_rank()
torch.cuda.set_device(dist.get_rank())
seed_all(10086)
config = MixtralConfig(
hidden_size=HIDDEN_SIZE_PER_HEAD * NUM_HEADS,
intermediate_size=HIDDEN_SIZE_PER_HEAD * NUM_HEADS * 2,
num_hidden_layers=2,
num_attention_heads=NUM_HEADS,
num_key_value_heads=NUM_HEADS,
num_local_experts=NUM_EXPERTS,
num_experts_per_tok=TOP_K,
)
torch_model = MixtralModel(config).to(dtype).cuda()
zero_model = deepcopy(torch_model).to(dtype)
zero_optimizer = torch.optim.SGD(zero_model.parameters(), lr=1)
moe_booster = Booster(
plugin=MoeHybridParallelPlugin(
tp_size=tp_size,
moe_tp_size=tp_size,
pp_size=1,
ep_size=ep_size,
zero_stage=stage,
overlap_communication=False,
initial_scale=1,
)
)
zero_model, zero_optimizer, _, _, _ = moe_booster.boost(zero_model, zero_optimizer)
hybird_booster = Booster(
plugin=HybridParallelPlugin(
tp_size=tp_size,
pp_size=1,
zero_stage=stage,
overlap_communication=False,
initial_scale=1,
)
)
hybrid_model, hybrid_optimizer, _, _, _ = hybird_booster.boost(
torch_model, torch.optim.SGD(torch_model.parameters(), lr=1)
)
# create different input
seed_all(1453 + rank)
hybrid_model.train()
zero_model.train()
for _ in range(2):
# zero-dp forward
input_data = torch.rand(
NUM_BATCH, NUM_TOK_PER_BATCH, HIDDEN_SIZE_PER_HEAD * NUM_HEADS, requires_grad=True
).cuda()
zero_output = zero_model(inputs_embeds=input_data.to(dtype)).last_hidden_state.mean()
# zero-dp backward
zero_optimizer.backward(zero_output)
# torch-ddp forward
hybrid_output = hybrid_model(inputs_embeds=input_data.to(dtype)).last_hidden_state.mean()
assert_loose_close(zero_output, hybrid_output, dtype=dtype)
# torch-ddp backward
hybrid_optimizer.backward(hybrid_output)
# check grad
name_to_p = {n: p for n, p in hybrid_model.named_parameters()}
for n, p in zero_model.named_parameters():
zero_grad = zero_optimizer.get_param_grad(p)
if name_to_p[n].grad is None:
name_to_p[n].grad = torch.zeros_like(name_to_p[n])
continue
if zero_grad.shape != name_to_p[n].grad.shape: # TODO check sharded and sliced moe
continue
assert_loose_close(zero_grad, name_to_p[n].grad, dtype=dtype, name=n)
# zero-dp step
zero_optimizer.step()
# original model step
hybrid_optimizer.step()
# check updated param
for n, p in zero_model.named_parameters():
if p.data.shape != name_to_p[n].data.shape: # TODO check sharded and sliced moe
continue
assert_loose_close(p.data, name_to_p[n].data, dtype=dtype, name=n)
print(f"{dist.get_rank()} test passed")
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_zero_with_original_model()
@pytest.mark.skip("tested in corresponding sharderformer")
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [4])
@rerun_if_address_is_in_use()
def test_moe_ep_tp(world_size):
spawn(run_dist, world_size)
if __name__ == "__main__":
test_moe_ep_tp(world_size=4)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_device/test_extract_alpha_beta.py | tests/test_device/test_extract_alpha_beta.py | import pytest
from colossalai.device import AlphaBetaProfiler
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
def check_extract_alpha_beta(rank, world_size, port, physical_devices):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
profiler = AlphaBetaProfiler(physical_devices)
mesh_alpha, mesh_beta = profiler.extract_alpha_beta_for_device_mesh()
for alpha in mesh_alpha:
assert alpha > 0 and alpha < 1e-3
for beta in mesh_beta:
assert beta > 0 and beta < 1e-10
@pytest.mark.skip(reason="Skip because assertion may fail for CI devices")
@pytest.mark.dist
@parameterize("physical_devices", [[0, 1, 2, 3], [0, 3]])
@rerun_if_address_is_in_use()
def test_profile_alpha_beta(physical_devices):
spawn(check_extract_alpha_beta, 4, physical_devices=physical_devices)
if __name__ == "__main__":
test_profile_alpha_beta()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_device/test_device_mesh.py | tests/test_device/test_device_mesh.py | import pytest
import torch
import torch.distributed as dist
import colossalai
from colossalai.device.device_mesh import DeviceMesh
from colossalai.testing import rerun_if_address_is_in_use, spawn
def test_device_mesh():
physical_mesh_id = torch.arange(0, 16)
mesh_shape = (4, 4)
# [[0, 1, 2, 3],
# [4, 5, 6, 7],
# [8, 9, 10,11],
# [12,13,14,15]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
assert device_mesh.global_rank_to_local_rank(5) == [1, 1]
assert device_mesh.global_rank_to_local_rank(11) == [2, 3]
assert device_mesh.get_ranks_in_process_group(axis=1, global_rank=2) == [0, 1, 2, 3]
def check_1d_device_mesh():
# check for 1D device mesh
process_group = dist.GroupMember.WORLD
device_mesh = DeviceMesh.from_process_group(process_group)
# checks
assert device_mesh.shape == [4]
assert len(device_mesh.get_process_group_for_all_axes().keys()) == 1, "Expected 1 axis for the process group dict"
assert device_mesh.get_process_group(axis=0) == process_group, "Expected world process group"
assert device_mesh.is_initialized
assert device_mesh.num_devices == 4
assert device_mesh.is_initialized
assert device_mesh.logical_mesh_id is None
assert device_mesh._is_init_from_process_group
def check_2d_device_mesh():
# create process group for 2D device mesh
first_row_ranks = [0, 1]
second_row_ranks = [2, 3]
first_col_ranks = [0, 2]
second_col_ranks = [1, 3]
first_row_pg = dist.new_group(first_row_ranks, backend="nccl")
second_row_pg = dist.new_group(second_row_ranks, backend="nccl")
first_col_pg = dist.new_group(first_col_ranks, backend="nccl")
second_col_pg = dist.new_group(second_col_ranks, backend="nccl")
# check for
current_rank = dist.get_rank()
if current_rank in first_row_ranks:
row_pg = first_row_pg
else:
row_pg = second_row_pg
if current_rank in first_col_ranks:
col_pg = first_col_pg
else:
col_pg = second_col_pg
device_mesh = DeviceMesh.from_process_group([col_pg, row_pg])
# checks
assert device_mesh.shape == [2, 2]
assert len(device_mesh.get_process_group_for_all_axes().keys()) == 2, "Expected 2 axes for the process group dict"
assert device_mesh.get_process_group(axis=0) == col_pg, "Expected column process group"
assert device_mesh.get_process_group(axis=1) == row_pg, "Expected row process group"
assert device_mesh.num_devices == 4
assert device_mesh.is_initialized
assert device_mesh.logical_mesh_id is None
assert device_mesh._is_init_from_process_group
def check_init_from_process_group(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_device_mesh_from_process_group():
spawn(check_init_from_process_group, 4)
if __name__ == "__main__":
test_device_mesh()
test_device_mesh_from_process_group()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_device/test_alpha_beta.py | tests/test_device/test_alpha_beta.py | import pytest
from colossalai.device import AlphaBetaProfiler
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
def check_alpha_beta(rank, world_size, port, physical_devices):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
profiler = AlphaBetaProfiler(physical_devices)
ab_dict = profiler.profile_ab()
for _, (alpha, beta) in ab_dict.items():
assert alpha > 0 and alpha < 1e-4 and beta > 0 and beta < 1e-10
@pytest.mark.skip(reason="Skip because assertion fails for CI devices")
@pytest.mark.dist
@parameterize("physical_devices", [[0, 1, 2, 3], [0, 3]])
@rerun_if_address_is_in_use()
def test_profile_alpha_beta(physical_devices):
spawn(check_alpha_beta, 4, physical_devices=physical_devices)
if __name__ == "__main__":
test_profile_alpha_beta()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_device/test_init_logical_pg.py | tests/test_device/test_init_logical_pg.py | import pytest
import torch
import torch.distributed as dist
from torch.distributed import ReduceOp
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.testing import rerun_if_address_is_in_use, spawn
def check_layer(rank, world_size, port):
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
physical_mesh_id = torch.arange(0, 4)
assert rank == dist.get_rank()
tensor_to_check = torch.tensor([2, 2, 2, 2]).cuda()
mesh_shape = (2, 2)
# [[0, 1,
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
for axis in range(len(mesh_shape)):
tensor = torch.ones(4).cuda()
pg = device_mesh.get_process_group(axis=axis)
dist.all_reduce(tensor, op=ReduceOp.SUM, group=pg)
assert tensor.equal(tensor_to_check)
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_logical_pg():
spawn(check_layer, 4)
if __name__ == "__main__":
test_logical_pg()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_device/test_search_logical_device_mesh.py | tests/test_device/test_search_logical_device_mesh.py | import pytest
from colossalai.device import AlphaBetaProfiler
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
def check_alpha_beta(rank, world_size, port, physical_devices):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
profiler = AlphaBetaProfiler(physical_devices)
best_logical_mesh = profiler.search_best_logical_mesh()
if physical_devices == [0, 1, 2, 3]:
assert best_logical_mesh == [[0, 1], [2, 3]]
elif physical_devices == [0, 3]:
assert best_logical_mesh == [[0, 3]]
@pytest.mark.skip(reason="Skip because assertion may fail for CI devices")
@pytest.mark.dist
@parameterize("physical_devices", [[0, 1, 2, 3], [0, 3]])
@rerun_if_address_is_in_use()
def test_profile_alpha_beta(physical_devices):
spawn(check_alpha_beta, 4, physical_devices=physical_devices)
if __name__ == "__main__":
test_profile_alpha_beta()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_tensor/test_mix_gather.py | tests/test_tensor/test_mix_gather.py | import pytest
import torch
import torch.distributed as dist
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.tensor.shape_consistency import CollectiveCommPattern, CommSpec
from colossalai.tensor.sharding_spec import ShardingSpec
from colossalai.tensor.utils import mix_gather_simulator
from colossalai.testing import rerun_if_address_is_in_use, spawn
def check_mix_gather_S0S1(device_mesh, rank):
tensor_to_check = torch.arange(64).reshape((8, 8)).cuda()
(f, b) = (0, 1)
f_target_pair = (f, [0])
b_target_pair = (b, [1])
gather_dim, logical_process_axes = mix_gather_simulator(f_target_pair, b_target_pair)
tensor_slice = [4, 2] # (4, 2)
rank_slice = 4
f_start = (rank // rank_slice) * tensor_slice[0]
b_start = (rank % rank_slice) * tensor_slice[1]
tensor_to_comm = (
tensor_to_check[f_start : f_start + tensor_slice[0], b_start : b_start + tensor_slice[1]].contiguous().cuda()
)
dim_partition_dict = {0: [0], 1: [1]}
# DistSpec:
# shard_sequence: S0,S1
# device_mesh_shape: (2, 4)
source_spec = ShardingSpec(device_mesh, tensor_to_check.shape, dim_partition_dict=dim_partition_dict)
comm_spec = CommSpec(
CollectiveCommPattern.MIXGATHER_FWD_SPLIT_BWD,
sharding_spec=source_spec,
gather_dim=gather_dim,
logical_process_axis=logical_process_axes,
forward_only=True,
mix_gather=True,
)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
assert tensor_to_comm.equal(tensor_to_check)
def check_two_all_gather_S0S1(device_mesh, rank):
tensor_width = 8
tensor_to_check = torch.arange(int(tensor_width * tensor_width)).reshape((tensor_width, tensor_width)).cuda()
dim_partition_dict = {0: [0], 1: [1]}
tensor_slice = [tensor_width // 2, tensor_width // 4] # (4, 2)
rank_slice = 4
f_start = (rank // rank_slice) * tensor_slice[0]
b_start = (rank % rank_slice) * tensor_slice[1]
tensor_to_comm = (
tensor_to_check[f_start : f_start + tensor_slice[0], b_start : b_start + tensor_slice[1]].contiguous().cuda()
)
# DistSpec:
# shard_sequence: S0,S1
# device_mesh_shape: (2, 4)
sharding_spec = ShardingSpec(device_mesh, tensor_to_check.shape, dim_partition_dict=dim_partition_dict)
# CommSpec:(comm_pattern:allgather, gather_dim:0, logical_process_axis:0)
comm_spec = CommSpec(
CollectiveCommPattern.GATHER_FWD_SPLIT_BWD, sharding_spec, gather_dim=0, logical_process_axis=0
)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
dim_partition_dict = {1: [1]}
# DistSpec:
# shard_sequence: R,S1
# device_mesh_shape: (2, 4)
sharding_spec = ShardingSpec(device_mesh, tensor_to_check.shape, dim_partition_dict=dim_partition_dict)
# CommSpec:(comm_pattern:allgather, gather_dim:1, logical_process_axis:1)
comm_spec = CommSpec(
CollectiveCommPattern.GATHER_FWD_SPLIT_BWD, sharding_spec, gather_dim=1, logical_process_axis=1
)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
assert tensor_to_comm.equal(tensor_to_check)
def check_mix_gather_S1S0(device_mesh, rank):
tensor_to_check = torch.arange(64).reshape((8, 8)).cuda()
(f, b) = (0, 1)
f_target_pair = (f, [1])
b_target_pair = (b, [0])
gather_dim, logical_process_axes = mix_gather_simulator(f_target_pair, b_target_pair)
tensor_slice = [2, 4]
rank_slice = 4
f_start = (rank % rank_slice) * tensor_slice[0]
b_start = (rank // rank_slice) * tensor_slice[1]
tensor_to_comm = (
tensor_to_check[f_start : f_start + tensor_slice[0], b_start : b_start + tensor_slice[1]].contiguous().cuda()
)
dim_partition_dict = {0: [1], 1: [0]}
# DistSpec:
# shard_sequence: S1,S0
# device_mesh_shape: (2, 4)
source_spec = ShardingSpec(device_mesh, tensor_to_check.shape, dim_partition_dict=dim_partition_dict)
comm_spec = CommSpec(
CollectiveCommPattern.MIXGATHER_FWD_SPLIT_BWD,
sharding_spec=source_spec,
gather_dim=gather_dim,
logical_process_axis=logical_process_axes,
forward_only=True,
mix_gather=True,
)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
assert tensor_to_comm.equal(tensor_to_check)
def check_two_all_gather_S1S0(device_mesh, rank):
tensor_width = 8
tensor_to_check = torch.arange(int(tensor_width * tensor_width)).reshape((tensor_width, tensor_width)).cuda()
tensor_slice = [tensor_width // 4, tensor_width // 2] # (4, 2)
rank_slice = 4
f_start = (rank % rank_slice) * tensor_slice[0]
b_start = (rank // rank_slice) * tensor_slice[1]
tensor_to_comm = (
tensor_to_check[f_start : f_start + tensor_slice[0], b_start : b_start + tensor_slice[1]].contiguous().cuda()
)
dim_partition_dict = {0: [1], 1: [0]}
# DistSpec:
# shard_sequence: S1,S0
# device_mesh_shape: (2, 4)
sharding_spec = ShardingSpec(device_mesh, tensor_to_check.shape, dim_partition_dict=dim_partition_dict)
# CommSpec:(comm_pattern:allgather, gather_dim:0, logical_process_axis:1)
comm_spec = CommSpec(
CollectiveCommPattern.GATHER_FWD_SPLIT_BWD, sharding_spec, gather_dim=0, logical_process_axis=1
)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
dim_partition_dict = {1: [0]}
# DistSpec:
# shard_sequence: R,S0
# device_mesh_shape: (2, 4)
sharding_spec = ShardingSpec(device_mesh, tensor_to_check.shape, dim_partition_dict=dim_partition_dict)
# CommSpec:(comm_pattern:allgather, gather_dim:1, logical_process_axis:0)
comm_spec = CommSpec(
CollectiveCommPattern.GATHER_FWD_SPLIT_BWD, sharding_spec, gather_dim=1, logical_process_axis=0
)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
assert tensor_to_comm.equal(tensor_to_check)
def check_mix_gather_S01R(device_mesh, rank):
tensor_to_check = torch.arange(64).reshape((8, 8)).cuda()
(f, b) = (0, 1)
f_target_pair = (f, [0, 1])
b_target_pair = (b, [])
gather_dim, logical_process_axes = mix_gather_simulator(f_target_pair, b_target_pair)
tensor_to_comm = tensor_to_check[rank : rank + 1, :].contiguous().cuda()
dim_partition_dict = {0: [0, 1]}
# DistSpec:
# shard_sequence: S01,R
# device_mesh_shape: (2, 4)
source_spec = ShardingSpec(device_mesh, tensor_to_check.shape, dim_partition_dict=dim_partition_dict)
comm_spec = CommSpec(
CollectiveCommPattern.MIXGATHER_FWD_SPLIT_BWD,
sharding_spec=source_spec,
gather_dim=gather_dim,
logical_process_axis=logical_process_axes,
forward_only=True,
mix_gather=True,
)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
assert tensor_to_comm.equal(tensor_to_check)
def check_two_all_gather_S01R(device_mesh, rank):
tensor_width = 8
tensor_to_check = torch.arange(int(tensor_width * tensor_width)).reshape((tensor_width, tensor_width)).cuda()
rank_stride = tensor_width // 8
tensor_to_comm = tensor_to_check[rank : rank + rank_stride, :].contiguous().cuda()
dim_partition_dict = {0: [0, 1]}
# DistSpec:
# shard_sequence: S01, R
# device_mesh_shape: (2, 4)
sharding_spec = ShardingSpec(device_mesh, tensor_to_check.shape, dim_partition_dict=dim_partition_dict)
# CommSpec:(comm_pattern:allgather, gather_dim:0, logical_process_axis:0)
comm_spec = CommSpec(
CollectiveCommPattern.GATHER_FWD_SPLIT_BWD, sharding_spec, gather_dim=0, logical_process_axis=1
)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
dim_partition_dict = {0: [0]}
# DistSpec:
# shard_sequence: S1, R
# device_mesh_shape: (2, 4)
sharding_spec = ShardingSpec(device_mesh, tensor_to_check.shape, dim_partition_dict=dim_partition_dict)
# CommSpec:(comm_pattern:allgather, gather_dim:0, logical_process_axis:1)
comm_spec = CommSpec(
CollectiveCommPattern.GATHER_FWD_SPLIT_BWD, sharding_spec, gather_dim=0, logical_process_axis=0
)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
assert tensor_to_comm.equal(tensor_to_check)
def check_mix_gather_RS01(device_mesh, rank):
tensor_to_check = torch.arange(64).reshape((8, 8)).cuda()
(f, b) = (0, 1)
f_target_pair = (f, [])
b_target_pair = (b, [0, 1])
gather_dim, logical_process_axes = mix_gather_simulator(f_target_pair, b_target_pair)
tensor_to_comm = tensor_to_check[:, rank : rank + 1].contiguous().cuda()
dim_partition_dict = {1: [0, 1]}
# DistSpec:
# shard_sequence: R, S01
# device_mesh_shape: (2, 4)
source_spec = ShardingSpec(device_mesh, tensor_to_check.shape, dim_partition_dict=dim_partition_dict)
comm_spec = CommSpec(
CollectiveCommPattern.MIXGATHER_FWD_SPLIT_BWD,
sharding_spec=source_spec,
gather_dim=gather_dim,
logical_process_axis=logical_process_axes,
forward_only=True,
mix_gather=True,
)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
assert tensor_to_comm.equal(tensor_to_check)
def check_two_all_gather_RS01(device_mesh, rank):
tensor_width = 8
tensor_to_check = torch.arange(int(tensor_width * tensor_width)).reshape((tensor_width, tensor_width)).cuda()
rank_stride = tensor_width // 8
tensor_to_comm = tensor_to_check[:, rank : rank + rank_stride].contiguous().cuda()
dim_partition_dict = {1: [0, 1]}
# DistSpec:
# shard_sequence: R, S01
# device_mesh_shape: (2, 4)
sharding_spec = ShardingSpec(device_mesh, tensor_to_check.shape, dim_partition_dict=dim_partition_dict)
# CommSpec:(comm_pattern:allgather, gather_dim:1, logical_process_axis:0)
comm_spec = CommSpec(
CollectiveCommPattern.GATHER_FWD_SPLIT_BWD, sharding_spec, gather_dim=1, logical_process_axis=1
)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
dim_partition_dict = {1: [0]}
# DistSpec:
# shard_sequence: R, S1
# device_mesh_shape: (2, 4)
sharding_spec = ShardingSpec(device_mesh, tensor_to_check.shape, dim_partition_dict=dim_partition_dict)
# CommSpec:(comm_pattern:allgather, gather_dim:1, logical_process_axis:1)
comm_spec = CommSpec(
CollectiveCommPattern.GATHER_FWD_SPLIT_BWD, sharding_spec, gather_dim=1, logical_process_axis=0
)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
assert tensor_to_comm.equal(tensor_to_check)
def check_comm(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
physical_mesh_id = torch.arange(0, 8)
assert rank == dist.get_rank()
mesh_shape = (2, 4)
# [[0, 1, 2, 3],
# [4, 5, 6, 7]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True, need_flatten=True)
check_mix_gather_S0S1(device_mesh, rank)
check_two_all_gather_S0S1(device_mesh, rank)
check_mix_gather_S1S0(device_mesh, rank)
check_two_all_gather_S1S0(device_mesh, rank)
check_mix_gather_S01R(device_mesh, rank)
check_two_all_gather_S01R(device_mesh, rank)
check_mix_gather_RS01(device_mesh, rank)
check_two_all_gather_RS01(device_mesh, rank)
@pytest.mark.skip(reason="Skip because the check functions assume 8 GPUS but CI only have 4 GPUs")
@rerun_if_address_is_in_use()
def test_mix_gather():
world_size = 8
spawn(check_comm, world_size)
if __name__ == "__main__":
test_mix_gather()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_tensor/test_sharding_spec.py | tests/test_tensor/test_sharding_spec.py | import torch
from colossalai.device.device_mesh import DeviceMesh
from colossalai.tensor.sharding_spec import ShardingSpec
def test_sharding_spec():
physical_mesh_id = torch.arange(0, 16)
mesh_shape = (4, 4)
# [[0, 1, 2, 3],
# [4, 5, 6, 7],
# [8, 9, 10,11],
# [12,13,14,15]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
entire_shape = torch.Size((16, 8, 6))
dim_partition_dict = {0: [0, 1]}
# DistSpec:
# shard_sequence: S01,R,R
# device_mesh_shape: (4, 4)
sharding_spec = ShardingSpec(device_mesh, entire_shape, dim_partition_dict)
assert str(sharding_spec.sharding_sequence) == "[S01, R, R]"
if __name__ == "__main__":
test_sharding_spec()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_tensor/test_comm_spec_apply.py | tests/test_tensor/test_comm_spec_apply.py | import pytest
import torch
import torch.distributed as dist
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.tensor.shape_consistency import CollectiveCommPattern, CommSpec
from colossalai.tensor.sharding_spec import ShardingSpec
from colossalai.testing import rerun_if_address_is_in_use, spawn
def check_all_gather(device_mesh, rank):
# tensor to comm
if rank in (0, 2):
sharded_tensor_to_comm = torch.ones(2, 2).cuda()
else:
sharded_tensor_to_comm = torch.zeros(2, 2).cuda()
# tensor to check
tensor_to_check = torch.cat((torch.ones(2, 2), torch.zeros(2, 2)), 1).cuda()
# test all gather
dim_partition_dict = {1: [1]}
# DistSpec:
# shard_sequence: R,S1
# device_mesh_shape: (2, 2)
sharding_spec = ShardingSpec(device_mesh, tensor_to_check.shape, dim_partition_dict=dim_partition_dict)
# CommSpec:(comm_pattern:allgather, gather_dim:1, logical_process_axis:1)
comm_spec = CommSpec(
CollectiveCommPattern.GATHER_FWD_SPLIT_BWD, sharding_spec, gather_dim=1, logical_process_axis=1
)
sharded_tensor_to_comm = sharded_tensor_to_comm = comm_spec.covert_spec_to_action(sharded_tensor_to_comm)
assert sharded_tensor_to_comm.equal(tensor_to_check)
def check_shard(device_mesh, rank):
# tensor to comm
sharded_tensor_to_comm_0 = torch.zeros(2, 2).cuda()
sharded_tensor_to_comm_1 = torch.ones(2, 2).cuda()
# tensor([[0., 0., 1., 1.],
# [0., 0., 1., 1.]])
tensor_to_shard = torch.cat((sharded_tensor_to_comm_0, sharded_tensor_to_comm_1), 1)
# test shard
dim_partition_dict = {}
# DistSpec:
# shard_sequence: R,R
# device_mesh_shape: (2, 2)
sharding_spec = ShardingSpec(device_mesh, tensor_to_shard.shape, dim_partition_dict=dim_partition_dict)
# CommSpec:(comm_pattern:shard, shard_dim:1, logical_process_axis:1)
comm_spec = CommSpec(CollectiveCommPattern.SPLIT_FWD_GATHER_BWD, sharding_spec, shard_dim=1, logical_process_axis=1)
tensor_to_shard = comm_spec.covert_spec_to_action(tensor_to_shard)
if rank in (0, 2):
assert tensor_to_shard.equal(sharded_tensor_to_comm_0)
if rank in (1, 3):
assert tensor_to_shard.equal(sharded_tensor_to_comm_1)
def check_all_to_all(device_mesh, rank):
# tensor to comm
if rank in (0, 1):
sharded_tensor_0 = torch.zeros(2, 1)
sharded_tensor_1 = torch.ones(2, 1)
# tensor([[0., 1.],
# [0., 1.]])
tensor_to_comm = torch.cat((sharded_tensor_0, sharded_tensor_1), 1).cuda()
if rank in (2, 3):
sharded_tensor_0 = torch.ones(2, 1) * 2
sharded_tensor_1 = torch.ones(2, 1) * 3
# tensor([[2., 3.],
# [2., 3.]])
tensor_to_comm = torch.cat((sharded_tensor_0, sharded_tensor_1), 1).cuda()
if rank in (0, 1):
# tensor([[0.],
# [0.],
# [2.],
# [2.]])
tensor_to_check = torch.tensor([[0], [0], [2], [2]], dtype=tensor_to_comm.dtype).cuda()
if rank in (2, 3):
# tensor([[1.],
# [1.],
# [3.],
# [3.]])
tensor_to_check = torch.tensor([[1], [1], [3], [3]], dtype=tensor_to_comm.dtype).cuda()
# test shard
dim_partition_dict = {0: [0]}
# DistSpec:
# shard_sequence: S0,R
# device_mesh_shape: (2, 2)
sharding_spec = ShardingSpec(device_mesh, torch.Size((4, 2)), dim_partition_dict=dim_partition_dict)
# CommSpec:(comm_pattern:shard, shard_dim:1, logical_process_axis:1)
comm_spec = CommSpec(
CollectiveCommPattern.ALL2ALL_FWD_ALL2ALL_BWD, sharding_spec, gather_dim=0, shard_dim=1, logical_process_axis=0
)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
assert tensor_to_comm.equal(tensor_to_check)
def check_all_reduce_fwd(device_mesh, rank):
# tensor to comm
tensor_to_comm = torch.ones(2, 2).cuda() * rank
# reduce through logical process axis 0
# tensor to check
if rank in (0, 2):
# tensor([[2., 2.],
# [2., 2.]])
tensor_to_check = torch.tensor([[2, 2], [2, 2]], dtype=tensor_to_comm.dtype).cuda()
if rank in (1, 3):
# tensor([[4., 4.],
# [4., 4.]])
tensor_to_check = torch.tensor([[4, 4], [4, 4]], dtype=tensor_to_comm.dtype).cuda()
dim_partition_dict = {}
# DistSpec:
# shard_sequence: R,R
# device_mesh_shape: (2, 2)
sharding_spec = ShardingSpec(device_mesh, tensor_to_comm.shape, dim_partition_dict=dim_partition_dict)
comm_spec = CommSpec(CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD, sharding_spec, logical_process_axis=0)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
assert tensor_to_comm.equal(tensor_to_check)
def check_all_reduce_bwd(device_mesh, rank):
# tensor to comm
tensor_to_comm = torch.ones(2, 2).cuda() * rank
tensor_to_check = torch.ones(2, 2).cuda() * rank
dim_partition_dict = {}
# DistSpec:
# shard_sequence: R,R
# device_mesh_shape: (2, 2)
sharding_spec = ShardingSpec(device_mesh, tensor_to_comm.shape, dim_partition_dict=dim_partition_dict)
comm_spec = CommSpec(CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, sharding_spec, logical_process_axis=0)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
assert tensor_to_comm.equal(tensor_to_check)
def check_all_reduce_in_flatten_device_mesh(device_mesh, rank):
# tensor to comm
tensor_to_comm = torch.ones(2, 2).cuda() * rank
# reduce through logical process axis 0 at flatten device mesh
# tensor to check
# tensor([[6., 6.],
# [6., 6.]])
tensor_to_check = torch.tensor([[6, 6], [6, 6]], dtype=tensor_to_comm.dtype).cuda()
dim_partition_dict = {}
# DistSpec:
# shard_sequence: R,R
# device_mesh_shape: (2, 2)
sharding_spec = ShardingSpec(device_mesh, tensor_to_comm.shape, dim_partition_dict=dim_partition_dict)
# CommSpec:(comm_pattern:all_reduce, logical_process_axis:[0, 1])
comm_spec = CommSpec(CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD, sharding_spec, logical_process_axis=[0, 1])
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
assert tensor_to_comm.equal(tensor_to_check)
def check_comm(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
physical_mesh_id = torch.arange(0, 4)
assert rank == dist.get_rank()
mesh_shape = (2, 2)
# [[0, 1,
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# test all gather
check_all_gather(device_mesh, rank)
# test shard
check_shard(device_mesh, rank)
# test all to all
check_all_to_all(device_mesh, rank)
# test all reduce
check_all_reduce_fwd(device_mesh, rank)
check_all_reduce_bwd(device_mesh, rank)
# test all reduce in 1D flatten device mesh
check_all_reduce_in_flatten_device_mesh(device_mesh, rank)
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_comm_spec():
world_size = 4
spawn(check_comm, world_size)
if __name__ == "__main__":
test_comm_spec()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_tensor/test_shape_consistency_apply.py | tests/test_tensor/test_shape_consistency_apply.py | import pytest
import torch
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.tensor.shape_consistency import ShapeConsistencyManager
from colossalai.tensor.sharding_spec import ShardingSpec
from colossalai.testing import rerun_if_address_is_in_use, spawn
def check_apply(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1,
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
entire_shape = torch.Size((4, 2))
shape_consistency_manager = ShapeConsistencyManager()
dim_partition_source = {0: [0]}
dim_partition_target = {1: [0]}
# DistSpec:
# shard_sequence: S0,R
# device_mesh_shape: (2, 2)
sharding_spec_source = ShardingSpec(device_mesh, entire_shape, dim_partition_source)
# DistSpec:
# shard_sequence: R,S0
# device_mesh_shape: (2, 2)
sharding_spec_target = ShardingSpec(device_mesh, entire_shape, dim_partition_target)
if rank in (0, 1):
sharded_tensor_0 = torch.zeros(2, 1)
sharded_tensor_1 = torch.ones(2, 1)
# tensor([[0., 1.],
# [0., 1.]])
tensor_to_comm = torch.cat((sharded_tensor_0, sharded_tensor_1), 1).cuda()
if rank in (2, 3):
sharded_tensor_0 = torch.ones(2, 1) * 2
sharded_tensor_1 = torch.ones(2, 1) * 3
# tensor([[2., 3.],
# [2., 3.]])
tensor_to_comm = torch.cat((sharded_tensor_0, sharded_tensor_1), 1).cuda()
if rank in (0, 1):
# tensor([[0.],
# [0.],
# [2.],
# [2.]])
tensor_to_check = torch.tensor([[0], [0], [2], [2]], dtype=tensor_to_comm.dtype).cuda()
if rank in (2, 3):
# tensor([[1.],
# [1.],
# [3.],
# [3.]])
tensor_to_check = torch.tensor([[1], [1], [3], [3]], dtype=tensor_to_comm.dtype).cuda()
tensor_to_comm.sharding_spec = sharding_spec_source
tensor_to_comm = shape_consistency_manager.apply(tensor_to_comm, sharding_spec_target)
assert tensor_to_comm.equal(tensor_to_check)
assert str(tensor_to_comm.sharding_spec.sharding_sequence) == str(sharding_spec_target.sharding_sequence)
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_apply():
world_size = 4
spawn(check_apply, world_size)
if __name__ == "__main__":
test_apply()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_tensor/test_shape_consistency.py | tests/test_tensor/test_shape_consistency.py | import torch
from colossalai.device.device_mesh import DeviceMesh
from colossalai.tensor.shape_consistency import CollectiveCommPattern, ShapeConsistencyManager
from colossalai.tensor.sharding_spec import ShardingSpec
physical_mesh_id = torch.arange(0, 16)
mesh_shape = (4, 4)
# [[0, 1, 2, 3],
# [4, 5, 6, 7],
# [8, 9, 10,11],
# [12,13,14,15]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
entire_shape = torch.Size((64, 32, 16))
shape_consistency_manager = ShapeConsistencyManager()
def test_one_step_transform():
dim_partition_dict = {0: [0], 1: [1]}
# DistSpec:
# shard_sequence: S0,S1,R
# device_mesh_shape: (4, 4)
sharding_spec = ShardingSpec(device_mesh, entire_shape, dim_partition_dict)
# {DistSpec:
# shard_sequence: R,S1,R
# device_mesh_shape: (4, 4): (CommSpec:(comm_pattern:allgather, gather_dim:0, logical_process_axis:0), 0), DistSpec:
# shard_sequence: S0,R,R
# device_mesh_shape: (4, 4): (CommSpec:(comm_pattern:allgather, gather_dim:1, logical_process_axis:1), 0)}
rst_dict = shape_consistency_manager.get_all_all_gather_spec(
sharding_spec, {"forward": 0, "backward": 0, "total": 0}
)
assert "[R, S1, R]" in [
str(all_gather_sharding_spec.sharding_sequence) for all_gather_sharding_spec in rst_dict.keys()
]
assert "[S0, R, R]" in [
str(all_gather_sharding_spec.sharding_sequence) for all_gather_sharding_spec in rst_dict.keys()
]
dim_partition_dict_all2all = {0: [0], 1: [1]}
# DistSpec:
# shard_sequence: S0,S1,R
# device_mesh_shape: (4, 4)
sharding_spec_all2all = ShardingSpec(device_mesh, entire_shape, dim_partition_dict_all2all)
# {DistSpec:
# shard_sequence: S01,R,R
# device_mesh_shape: (4, 4): (CommSpec:(comm_pattern:all2all, gather_dim:1, shard_dim:0, logical_process_axis: 1), 0), DistSpec:
# shard_sequence: R,S1,S0
# device_mesh_shape: (4, 4): (CommSpec:(comm_pattern:all2all, gather_dim:0, shard_dim:2, logical_process_axis: 0), 0), DistSpec:
# shard_sequence: S0,R,S1
# device_mesh_shape: (4, 4): (CommSpec:(comm_pattern:all2all, gather_dim:1, shard_dim:2, logical_process_axis: 1), 0)}
rst_dict_all2all = shape_consistency_manager.get_all_all_to_all_spec(
sharding_spec_all2all, {"forward": 0, "backward": 0, "total": 0}
)
assert "[S01, R, R]" in [
str(all2all_sharding_spec.sharding_sequence) for all2all_sharding_spec in rst_dict_all2all.keys()
]
assert "[R, S1, S0]" in [
str(all2all_sharding_spec.sharding_sequence) for all2all_sharding_spec in rst_dict_all2all.keys()
]
assert "[S0, R, S1]" in [
str(all2all_sharding_spec.sharding_sequence) for all2all_sharding_spec in rst_dict_all2all.keys()
]
dim_partition_shard = {0: [0]}
# DistSpec:
# shard_sequence: S0,R,R
# device_mesh_shape: (4, 4)
sharding_spec_shard = ShardingSpec(device_mesh, entire_shape, dim_partition_shard)
# {DistSpec:
# shard_sequence: S01,R,R
# device_mesh_shape: (4, 4): (CommSpec:(comm_pattern:shard, shard_dim:0, logical_process_axis:1), 0), DistSpec:
# shard_sequence: S0,S1,R
# device_mesh_shape: (4, 4): (CommSpec:(comm_pattern:shard, shard_dim:1, logical_process_axis:1), 0), DistSpec:
# shard_sequence: S0,R,S1
# device_mesh_shape: (4, 4): (CommSpec:(comm_pattern:shard, shard_dim:2, logical_process_axis:1), 0)}
rst_dict_shard = shape_consistency_manager.get_all_shard_spec(
sharding_spec_shard, {"forward": 0, "backward": 0, "total": 0}
)
assert "[S01, R, R]" in [
str(shard_sharding_spec.sharding_sequence) for shard_sharding_spec in rst_dict_shard.keys()
]
assert "[S0, S1, R]" in [
str(shard_sharding_spec.sharding_sequence) for shard_sharding_spec in rst_dict_shard.keys()
]
assert "[S0, R, S1]" in [
str(shard_sharding_spec.sharding_sequence) for shard_sharding_spec in rst_dict_shard.keys()
]
def test_shape_consistency():
dim_partition_source = {1: [0, 1]}
dim_partition_target = {0: [0, 1]}
# DistSpec:
# shard_sequence: R,S01,R
# device_mesh_shape: (4, 4)
sharding_spec_source = ShardingSpec(device_mesh, entire_shape, dim_partition_source)
# DistSpec:
# shard_sequence: S01,R,R
# device_mesh_shape: (4, 4)
sharding_spec_target = ShardingSpec(device_mesh, entire_shape, dim_partition_target)
transform_path, comm_action_sequence, total_cost = shape_consistency_manager.shape_consistency(
sharding_spec_source, sharding_spec_target
)
transform_path_str = "->".join([str(sharding_spec.sharding_sequence) for sharding_spec in transform_path])
assert transform_path_str == "[R, S01, R]->[R, S0, R]->[S0, R, R]->[S01, R, R]"
# all-gather(S01) -> S0
assert comm_action_sequence[0].comm_pattern == CollectiveCommPattern.GATHER_FWD_SPLIT_BWD
assert comm_action_sequence[0].gather_dim == 1
assert comm_action_sequence[0].logical_process_axis == 1
# all-to-all(R, S0) -> [S0, R]
assert comm_action_sequence[1].comm_pattern == CollectiveCommPattern.ALL2ALL_FWD_ALL2ALL_BWD
assert comm_action_sequence[1].gather_dim == 1
assert comm_action_sequence[1].shard_dim == 0
assert comm_action_sequence[1].logical_process_axis == 0
# shard(S0) -> [S01]
assert comm_action_sequence[2].comm_pattern == CollectiveCommPattern.SPLIT_FWD_GATHER_BWD
assert comm_action_sequence[2].shard_dim == 0
assert comm_action_sequence[2].logical_process_axis == 1
assert (
shape_consistency_manager.cached_spec_pairs_transform_path[("[R, S01, R]", "[S01, R, R]")][0] == transform_path
)
assert (
shape_consistency_manager.cached_spec_pairs_transform_path[("[R, S01, R]", "[S01, R, R]")][1]
== comm_action_sequence
)
if __name__ == "__main__":
test_one_step_transform()
test_shape_consistency()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_tensor/test_padded_tensor.py | tests/test_tensor/test_padded_tensor.py | import torch
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.tensor.d_tensor import ShardingSpec, distribute_tensor, is_distributed_tensor, to_global
from colossalai.tensor.padded_tensor import is_padded_tensor, to_padded_tensor, to_unpadded_tensor
from colossalai.testing import rerun_if_address_is_in_use, spawn
def check_padded_tensor(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
original_tensor = torch.rand(32, 64).to("cuda")
device_mesh = DeviceMesh(torch.Tensor([0, 1, 2, 3]), (2, 2), init_process_group=True)
target_sharding_spec = ShardingSpec(dim_size=original_tensor.dim(), dim_partition_dict={0: [0]})
d_tensor = distribute_tensor(original_tensor, device_mesh, target_sharding_spec)
padded_tensor = to_padded_tensor(d_tensor, current_length=64, padding_dim=0)
assert padded_tensor.dist_layout == d_tensor.dist_layout
tensor_copy = padded_tensor.clone()
assert is_padded_tensor(tensor_copy)
assert is_distributed_tensor(tensor_copy)
tensor_detached = padded_tensor.detach()
assert is_padded_tensor(tensor_detached)
assert is_distributed_tensor(tensor_detached)
unpadded_tensor = to_unpadded_tensor(padded_tensor)
assert unpadded_tensor.shape == d_tensor.shape
assert is_distributed_tensor(unpadded_tensor)
global_tensor = to_global(unpadded_tensor)
assert global_tensor.shape == original_tensor.shape
@rerun_if_address_is_in_use()
def test_padded_tensor():
world_size = 4
spawn(check_padded_tensor, world_size)
if __name__ == "__main__":
test_padded_tensor()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_tensor/test_dtensor/test_dtensor_sharding_spec.py | tests/test_tensor/test_dtensor/test_dtensor_sharding_spec.py | import operator
from functools import reduce
from colossalai.tensor.d_tensor.sharding_spec import ALLGATHER_COST, SHARD_COST, STEP_PENALTY, ShardingSpec
def test_dtensor_sharding_spec():
dims = 4
dim_partition_dict_0 = {0: [0, 1]}
# DistSpec:
# shard_sequence: S01,R,R,R
sharding_spec_0 = ShardingSpec(dims, dim_partition_dict=dim_partition_dict_0)
assert str(sharding_spec_0.sharding_sequence) == "[S01, R, R, R]"
dim_partition_dict_1 = {1: [0, 1]}
# DistSpec:
# shard_sequence: R,S01,R,R
sharding_spec_1 = ShardingSpec(dims, dim_partition_dict=dim_partition_dict_1)
assert str(sharding_spec_1.sharding_sequence) == "[R, S01, R, R]"
dim_spec_list_0 = [dim_spec for dim_spec in sharding_spec_0.sharding_sequence]
dim_spec_list_1 = [dim_spec for dim_spec in sharding_spec_1.sharding_sequence]
assert dim_spec_list_0[0].dim_diff(dim_spec_list_1[0]) == ALLGATHER_COST + STEP_PENALTY + ALLGATHER_COST
assert dim_spec_list_0[1].dim_diff(dim_spec_list_1[1]) == SHARD_COST + STEP_PENALTY + SHARD_COST
assert dim_spec_list_0[2].dim_diff(dim_spec_list_1[2]) == 0
assert dim_spec_list_0[3].dim_diff(dim_spec_list_1[3]) == 0
assert sharding_spec_0.spec_diff(sharding_spec_1) == reduce(
operator.add, [dim_spec_list_0[i].dim_diff(dim_spec_list_1[i]) for i in range(dims)], 0
)
if __name__ == "__main__":
test_dtensor_sharding_spec()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_tensor/test_dtensor/test_comm_spec.py | tests/test_tensor/test_dtensor/test_comm_spec.py | import pytest
import torch
import torch.distributed as dist
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.tensor.d_tensor.comm_spec import CollectiveCommPattern, CommSpec
from colossalai.testing import rerun_if_address_is_in_use, spawn
def check_all_gather(process_groups_dict, rank):
# tensor to comm
if rank in (0, 2):
sharded_tensor_to_comm = torch.ones(2, 2).cuda()
else:
sharded_tensor_to_comm = torch.zeros(2, 2).cuda()
# tensor to check
tensor_to_check = torch.cat((torch.ones(2, 2), torch.zeros(2, 2)), 1).cuda()
# CommSpec:(comm_pattern:allgather, gather_dim:1, logical_process_axis:1)
comm_spec = CommSpec(
CollectiveCommPattern.GATHER_FWD_SPLIT_BWD, process_groups_dict, gather_dim=1, logical_process_axis=1
)
sharded_tensor_to_comm = sharded_tensor_to_comm = comm_spec.covert_spec_to_action(sharded_tensor_to_comm)
assert sharded_tensor_to_comm.equal(tensor_to_check)
def check_shard(process_groups_dict, rank):
# tensor to comm
sharded_tensor_to_comm_0 = torch.zeros(2, 2).cuda()
sharded_tensor_to_comm_1 = torch.ones(2, 2).cuda()
# tensor([[0., 0., 1., 1.],
# [0., 0., 1., 1.]])
tensor_to_shard = torch.cat((sharded_tensor_to_comm_0, sharded_tensor_to_comm_1), 1)
# CommSpec:(comm_pattern:shard, shard_dim:1, logical_process_axis:1)
comm_spec = CommSpec(
CollectiveCommPattern.SPLIT_FWD_GATHER_BWD, process_groups_dict, shard_dim=1, logical_process_axis=1
)
tensor_to_shard = comm_spec.covert_spec_to_action(tensor_to_shard)
if rank in (0, 2):
assert tensor_to_shard.equal(sharded_tensor_to_comm_0)
if rank in (1, 3):
assert tensor_to_shard.equal(sharded_tensor_to_comm_1)
def check_all_to_all(process_groups_dict, rank):
# tensor to comm
if rank in (0, 1):
sharded_tensor_0 = torch.zeros(2, 1)
sharded_tensor_1 = torch.ones(2, 1)
# tensor([[0., 1.],
# [0., 1.]])
tensor_to_comm = torch.cat((sharded_tensor_0, sharded_tensor_1), 1).cuda()
if rank in (2, 3):
sharded_tensor_0 = torch.ones(2, 1) * 2
sharded_tensor_1 = torch.ones(2, 1) * 3
# tensor([[2., 3.],
# [2., 3.]])
tensor_to_comm = torch.cat((sharded_tensor_0, sharded_tensor_1), 1).cuda()
if rank in (0, 1):
# tensor([[0.],
# [0.],
# [2.],
# [2.]])
tensor_to_check = torch.tensor([[0], [0], [2], [2]], dtype=tensor_to_comm.dtype).cuda()
if rank in (2, 3):
# tensor([[1.],
# [1.],
# [3.],
# [3.]])
tensor_to_check = torch.tensor([[1], [1], [3], [3]], dtype=tensor_to_comm.dtype).cuda()
# CommSpec:(comm_pattern:shard, shard_dim:1, logical_process_axis:1)
comm_spec = CommSpec(
CollectiveCommPattern.ALL2ALL_FWD_ALL2ALL_BWD,
process_groups_dict,
gather_dim=0,
shard_dim=1,
logical_process_axis=0,
)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
assert tensor_to_comm.equal(tensor_to_check)
def check_all_reduce_fwd(process_groups_dict, rank):
# tensor to comm
tensor_to_comm = torch.ones(2, 2).cuda() * rank
# reduce through logical process axis 0
# tensor to check
if rank in (0, 2):
# tensor([[2., 2.],
# [2., 2.]])
tensor_to_check = torch.tensor([[2, 2], [2, 2]], dtype=tensor_to_comm.dtype).cuda()
if rank in (1, 3):
# tensor([[4., 4.],
# [4., 4.]])
tensor_to_check = torch.tensor([[4, 4], [4, 4]], dtype=tensor_to_comm.dtype).cuda()
comm_spec = CommSpec(CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD, process_groups_dict, logical_process_axis=0)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
assert tensor_to_comm.equal(tensor_to_check)
def check_all_reduce_bwd(process_groups_dict, rank):
# tensor to comm
tensor_to_comm = torch.ones(2, 2).cuda() * rank
tensor_to_check = torch.ones(2, 2).cuda() * rank
comm_spec = CommSpec(CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, process_groups_dict, logical_process_axis=0)
tensor_to_comm = comm_spec.covert_spec_to_action(tensor_to_comm)
assert tensor_to_comm.equal(tensor_to_check)
def check_comm(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
physical_mesh_id = torch.arange(0, 4)
assert rank == dist.get_rank()
mesh_shape = (2, 2)
# [[0, 1,
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
process_group_dict = device_mesh._process_group_dict[rank]
# test all gather
check_all_gather(process_group_dict, rank)
# test shard
check_shard(process_group_dict, rank)
# test all to all
check_all_to_all(process_group_dict, rank)
# test all reduce
check_all_reduce_fwd(process_group_dict, rank)
check_all_reduce_bwd(process_group_dict, rank)
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_comm_spec():
world_size = 4
spawn(check_comm, world_size)
if __name__ == "__main__":
test_comm_spec()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_tensor/test_dtensor/test_layout_converter.py | tests/test_tensor/test_dtensor/test_layout_converter.py | import math
import pytest
import torch
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.tensor.d_tensor.comm_spec import CollectiveCommPattern
from colossalai.tensor.d_tensor.layout import Layout
from colossalai.tensor.d_tensor.layout_converter import LayoutConverter
from colossalai.tensor.d_tensor.sharding_spec import ShardingSpec
from colossalai.testing import rerun_if_address_is_in_use, spawn
global_shape = torch.Size((64, 32, 16))
layout_converter = LayoutConverter()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
def check_one_step_transform(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
# [[0, 1],
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
dim_partition_dict = {0: [0], 1: [1]}
# DistSpec:
# shard_sequence: S0,S1,R
# device_mesh_shape: (2, 2)
sharding_spec = ShardingSpec(dim_size=3, dim_partition_dict=dim_partition_dict)
layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec, global_shape=global_shape)
rst_dict = layout_converter.all_gather_transform_layouts(layout)
assert "[R, S1, R]" in [
str(all_gather_layout.sharding_spec.sharding_sequence) for all_gather_layout in rst_dict.keys()
]
assert "[S0, R, R]" in [
str(all_gather_layout.sharding_spec.sharding_sequence) for all_gather_layout in rst_dict.keys()
]
dim_partition_dict_all2all = {0: [0], 1: [1]}
# DistSpec:
# shard_sequence: S0,S1,R
# device_mesh_shape: (4, 4)
sharding_spec_all2all = ShardingSpec(dim_size=3, dim_partition_dict=dim_partition_dict_all2all)
layout_all2all = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec_all2all, global_shape=global_shape)
rst_dict_all2all = layout_converter.all_to_all_transform_layout(layout_all2all)
assert "[S01, R, R]" in [
str(all2all_layout.sharding_spec.sharding_sequence) for all2all_layout in rst_dict_all2all.keys()
]
assert "[R, S1, S0]" in [
str(all2all_layout.sharding_spec.sharding_sequence) for all2all_layout in rst_dict_all2all.keys()
]
assert "[S0, R, S1]" in [
str(all2all_layout.sharding_spec.sharding_sequence) for all2all_layout in rst_dict_all2all.keys()
]
dim_partition_shard = {0: [0]}
# DistSpec:
# shard_sequence: S0,R,R
# device_mesh_shape: (4, 4)
sharding_spec_shard = ShardingSpec(dim_size=3, dim_partition_dict=dim_partition_shard)
shard_layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec_shard, global_shape=global_shape)
rst_dict_shard = layout_converter.shard_transform_layout(shard_layout)
assert "[S01, R, R]" in [
str(shard_layout.sharding_spec.sharding_sequence) for shard_layout in rst_dict_shard.keys()
]
assert "[S0, S1, R]" in [
str(shard_layout.sharding_spec.sharding_sequence) for shard_layout in rst_dict_shard.keys()
]
assert "[S0, R, S1]" in [
str(shard_layout.sharding_spec.sharding_sequence) for shard_layout in rst_dict_shard.keys()
]
def check_layout_converting(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
dim_partition_source = {1: [0, 1]}
dim_partition_target = {0: [0, 1]}
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# DistSpec:
# shard_sequence: R,S01,R
# device_mesh_shape: (4, 4)
sharding_spec_source = ShardingSpec(dim_size=3, dim_partition_dict=dim_partition_source)
source_layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec_source, global_shape=global_shape)
# DistSpec:
# shard_sequence: S01,R,R
# device_mesh_shape: (4, 4)
sharding_spec_target = ShardingSpec(dim_size=3, dim_partition_dict=dim_partition_target)
target_layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec_target, global_shape=global_shape)
transform_path, comm_action_sequence = layout_converter.layout_converting(source_layout, target_layout)
# check transform path
transform_path_str = "->".join([str(layout.sharding_spec.sharding_sequence) for layout in transform_path])
assert transform_path_str == "[R, S01, R]->[R, S0, R]->[S0, R, R]->[S01, R, R]"
# check comm action sequence
# all-gather(S01) -> S0
assert comm_action_sequence[0].comm_pattern == CollectiveCommPattern.GATHER_FWD_SPLIT_BWD
assert comm_action_sequence[0].gather_dim == 1
assert comm_action_sequence[0].logical_process_axis == 1
# all-to-all(R, S0) -> [S0, R]
assert comm_action_sequence[1].comm_pattern == CollectiveCommPattern.ALL2ALL_FWD_ALL2ALL_BWD
assert comm_action_sequence[1].gather_dim == 1
assert comm_action_sequence[1].shard_dim == 0
assert comm_action_sequence[1].logical_process_axis == 0
# shard(S0) -> [S01]
assert comm_action_sequence[2].comm_pattern == CollectiveCommPattern.SPLIT_FWD_GATHER_BWD
assert comm_action_sequence[2].shard_dim == 0
assert comm_action_sequence[2].logical_process_axis == 1
# checkout chached_spec_pairs_transform_path
src_shape = source_layout.get_sharded_shape_per_device()
dst_shape = target_layout.get_sharded_shape_per_device()
assert (
layout_converter.cached_solution[(("[R, S01, R]", src_shape), ("[S01, R, R]", dst_shape))][0] == transform_path
)
assert (
layout_converter.cached_solution[(("[R, S01, R]", src_shape), ("[S01, R, R]", dst_shape))][1]
== comm_action_sequence
)
comm_cost = layout_converter.get_total_comm_cost(source_layout, target_layout)
assert comm_cost["forward"] == comm_cost["backward"]
assert math.floor(comm_cost["total"]) == math.floor(comm_cost["forward"] + comm_cost["backward"])
def check_layout_converting_apply(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
dim_partition_source = {1: [0, 1]}
dim_partition_target = {0: [0, 1]}
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# DistSpec:
# shard_sequence: R,S01,R
# device_mesh_shape: (4, 4)
sharding_spec_source = ShardingSpec(dim_size=3, dim_partition_dict=dim_partition_source)
source_layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec_source, global_shape=global_shape)
# DistSpec:
# shard_sequence: S01,R,R
# device_mesh_shape: (4, 4)
sharding_spec_target = ShardingSpec(dim_size=3, dim_partition_dict=dim_partition_target)
target_layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec_target, global_shape=global_shape)
original_tensor = torch.rand(global_shape).cuda()
# tensor_to_apply: [R, S01, R]
tensor_to_apply = original_tensor.narrow(1, rank * 8, 8)
# tensor_to_check: [S01, R, R]
tensor_to_check = original_tensor.narrow(0, rank * 16, 16)
converted_tensor = layout_converter.apply(tensor_to_apply, source_layout, target_layout)
assert converted_tensor.equal(tensor_to_check)
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_layout_converter():
world_size = 4
spawn(check_one_step_transform, world_size)
spawn(check_layout_converting, world_size)
spawn(check_layout_converting_apply, world_size)
if __name__ == "__main__":
test_layout_converter()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_tensor/test_dtensor/test_dtensor.py | tests/test_tensor/test_dtensor/test_dtensor.py | import torch
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.tensor.d_tensor import ShardingSpec, distribute_tensor, get_global_shape, redistribute, to_global
from colossalai.testing import rerun_if_address_is_in_use, spawn
class TestModel(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear_1 = torch.nn.Linear(in_features, out_features)
self.linear_2 = torch.nn.Linear(out_features, in_features)
def forward(self, x):
x = self.linear_1(x)
x = self.linear_2(x)
return x
def check_dtensor(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
test_model = TestModel(8, 8).to("cuda")
original_tensor = torch.rand(4, 8).to("cuda")
compare_output = test_model(original_tensor)
device_mesh = DeviceMesh(torch.Tensor([0, 1, 2, 3]), (2, 2), init_process_group=True)
target_sharding_spec = ShardingSpec(dim_size=original_tensor.dim(), dim_partition_dict={0: [0]})
d_tensor = distribute_tensor(original_tensor, device_mesh, target_sharding_spec)
assert get_global_shape(d_tensor) == original_tensor.shape
assert d_tensor.dtype == original_tensor.dtype
if rank in (0, 1):
assert d_tensor.equal(original_tensor.narrow(0, 0, 2))
elif rank in (2, 3):
assert d_tensor.equal(original_tensor.narrow(0, 2, 2))
else:
raise ValueError(f"rank {rank} is not in the device mesh")
assert to_global(d_tensor).equal(original_tensor)
output = test_model(d_tensor)
if rank in (0, 1):
assert output.equal(compare_output.narrow(0, 0, 2))
elif rank in (2, 3):
assert output.equal(compare_output.narrow(0, 2, 2))
else:
raise ValueError(f"rank {rank} is not in the device mesh")
new_sharding_spec = ShardingSpec(dim_size=original_tensor.dim(), dim_partition_dict={0: [0, 1]})
d_tensor = redistribute(d_tensor, device_mesh, new_sharding_spec)
if rank == 0:
assert d_tensor.equal(original_tensor.narrow(0, 0, 1))
elif rank == 1:
assert d_tensor.equal(original_tensor.narrow(0, 1, 1))
elif rank == 2:
assert d_tensor.equal(original_tensor.narrow(0, 2, 1))
elif rank == 3:
assert d_tensor.equal(original_tensor.narrow(0, 3, 1))
else:
raise ValueError(f"rank {rank} is not in the device mesh")
dtensor_from_local = distribute_tensor(original_tensor, device_mesh, new_sharding_spec)
if rank == 0:
assert dtensor_from_local.equal(original_tensor.narrow(0, 0, 1))
elif rank == 1:
assert dtensor_from_local.equal(original_tensor.narrow(0, 1, 1))
elif rank == 2:
assert dtensor_from_local.equal(original_tensor.narrow(0, 2, 1))
elif rank == 3:
assert dtensor_from_local.equal(original_tensor.narrow(0, 3, 1))
else:
raise ValueError(f"rank {rank} is not in the device mesh")
@rerun_if_address_is_in_use()
def test_dtensor():
world_size = 4
spawn(check_dtensor, world_size)
if __name__ == "__main__":
test_dtensor()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_booster/test_accelerator.py | tests/test_booster/test_accelerator.py | import torch.nn as nn
from colossalai.booster.accelerator import Accelerator
from colossalai.testing import clear_cache_before_run, parameterize
@clear_cache_before_run()
@parameterize("device", ["cpu", "cuda"])
def test_accelerator(device):
accelerator = Accelerator(device)
model = nn.Linear(8, 8)
model = accelerator.configure_model(model)
assert next(model.parameters()).device.type == device
del model, accelerator
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_booster/test_plugin/test_torch_ddp_plugin.py | tests/test_booster/test_plugin/test_torch_ddp_plugin.py | from contextlib import nullcontext
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin import TorchDDPPlugin
from colossalai.interface import OptimizerWrapper
from colossalai.testing import clear_cache_before_run, rerun_if_address_is_in_use, spawn
from tests.kit.model_zoo import COMMON_MODELS, IS_FAST_TEST, model_zoo
@clear_cache_before_run()
def run_fn(model_fn, data_gen_fn, output_transform_fn):
plugin = TorchDDPPlugin()
booster = Booster(plugin=plugin)
model = model_fn()
optimizer = SGD(model.parameters(), lr=1e-3)
criterion = lambda x: x.mean()
data = data_gen_fn()
data = {k: v.to("cuda") if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v for k, v in data.items()}
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
assert isinstance(model.module, DDP)
assert isinstance(optimizer, OptimizerWrapper)
output = model(**data)
output = output_transform_fn(output)
output_key = list(output.keys())[0]
loss = criterion(output[output_key])
booster.backward(loss, optimizer)
optimizer.clip_grad_by_norm(1.0)
optimizer.step()
def check_torch_ddp_plugin():
if IS_FAST_TEST:
registry = model_zoo.get_sub_registry(COMMON_MODELS)
else:
registry = model_zoo
for name, (model_fn, data_gen_fn, output_transform_fn, _, _) in registry.items():
if name in ("dlrm_interactionarch", "transformers_mixtral") or name.startswith("simple_"):
continue
run_fn(model_fn, data_gen_fn, output_transform_fn)
torch.cuda.empty_cache()
class DummyModel(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.rand(1))
def forward(self, x):
return self.weight * x
def check_torch_ddp_no_sync():
plugin = TorchDDPPlugin()
booster = Booster(plugin=plugin)
model = DummyModel()
criterion = lambda x: x.mean()
optimizer = SGD(model.parameters(), lr=1e-3)
# create a custom dataset with 0 to 10
dataset = torch.arange(0, 10)
train_dataloader = plugin.prepare_dataloader(dataset, batch_size=2)
model, optimizer, criterion, train_dataloader, _ = booster.boost(
model, optimizer, criterion, dataloader=train_dataloader
)
def fwd_bwd():
output = model(batch.cuda())
loss = criterion(output)
booster.backward(loss, optimizer)
def get_grad_set_over_all_ranks():
for p in model.parameters():
# grad shape is (1, )
assert p.grad.shape == (1,)
grad_list = [torch.empty_like(p.grad) for _ in range(dist.get_world_size())]
dist.all_gather(grad_list, p.grad)
# get grad set of all ranks
grad_set = set([grad.item() for grad in grad_list])
# as the model only has one parameter, we can return here
return grad_set
for i, batch in enumerate(train_dataloader):
if i > 1:
# only check the first two batches
break
# no_sync for the first batch, sync for the second batch
ctx = booster.no_sync(model) if i == 0 else nullcontext()
with ctx:
fwd_bwd()
grad_set = get_grad_set_over_all_ranks()
# for the first batch, all ranks should have different grads
# for the second batch, as grad is synchronized,all ranks should have the same grads
target_num_different_grad = dist.get_world_size() if i == 0 else 1
assert len(grad_set) == target_num_different_grad
def run_dist(rank, world_size, port):
# init dist env
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost")
check_torch_ddp_plugin()
check_torch_ddp_no_sync()
@rerun_if_address_is_in_use()
def test_torch_ddp_plugin():
spawn(run_dist, 2)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_booster/test_plugin/test_3d_plugin.py | tests/test_booster/test_plugin/test_3d_plugin.py | import copy
from contextlib import nullcontext
from typing import Optional
import torch
import torch.distributed as dist
from torch.testing import assert_close
from torch.utils.data import Dataset
import colossalai
from colossalai.accelerator import get_accelerator
from colossalai.booster import Booster
from colossalai.booster.plugin import HybridParallelPlugin
from colossalai.fx import is_compatible_with_meta
from colossalai.lazy.lazy_init import LazyInitContext
from colossalai.nn.optimizer import HybridAdam
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn
from colossalai.utils import set_seed
from tests.kit.model_zoo import model_zoo
class RandomDataset(Dataset):
def __init__(self, num_samples: int = 100, max_length: int = 512, vocab_size: int = 32000):
self.num_samples = num_samples
self.max_length = max_length
set_seed(42)
self.input_ids = torch.randint(
0, vocab_size, (num_samples, max_length), device=get_accelerator().get_current_device()
)
self.attention_mask = torch.ones_like(self.input_ids)
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
return {
"input_ids": self.input_ids[idx],
"attention_mask": self.attention_mask[idx],
"labels": self.input_ids[idx],
}
def move_to_cuda(batch):
return {k: v.cuda() for k, v in batch.items()}
@clear_cache_before_run()
def run_fn(init_method, model_fn, data_gen_fn, output_transform_fn) -> Optional[str]:
try:
if init_method == "lazy":
ctx = LazyInitContext()
else:
ctx = nullcontext()
plugin = HybridParallelPlugin(tp_size=2, pp_size=2, num_microbatches=4, precision="bf16")
booster = Booster(plugin=plugin)
with ctx:
model = model_fn()
optimizer = HybridAdam(model.parameters(), lr=1e-3)
criterion = lambda x: x.mean()
data = data_gen_fn()
data = {
k: v.to("cuda").repeat(4, 1) if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v
for k, v in data.items()
}
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
data_iter = iter([data])
def _criterion(outputs, inputs):
outputs = output_transform_fn(outputs)
output_key = list(outputs.keys())[0]
loss = criterion(outputs[output_key])
return loss
booster.execute_pipeline(data_iter, model, _criterion, optimizer, return_loss=True)
optimizer.step()
grad_norm = optimizer.get_grad_norm()
assert grad_norm is None or isinstance(grad_norm, float)
except Exception as e:
return repr(e)
@parameterize("init_method", ["none", "lazy"])
def check_3d_plugin(init_method: str = "none", early_stop: bool = True):
"""check hybrid plugin over model zoo
Args:
early_stop (bool, optional): Whether to stop when getting the first error. Defaults to True.
"""
is_support_meta = is_compatible_with_meta()
if not is_support_meta and init_method == "lazy":
return
passed_models = []
failed_info = {} # (model_name, error) pair
# TODO(ver217): add more models
for name, (model_fn, data_gen_fn, output_transform_fn, _, _) in model_zoo.get_sub_registry(
"transformers_llama_for_causal_lm"
).items():
err = run_fn(init_method, model_fn, data_gen_fn, output_transform_fn)
if err is None:
passed_models.append(name)
else:
failed_info[name] = err
if early_stop:
break
if dist.get_rank() == 0:
print(f"Init method: {init_method}")
print(f"Passed models({len(passed_models)}): {passed_models}\n\n")
print(f"Failed models({len(failed_info)}): {list(failed_info.keys())}\n\n")
assert len(failed_info) == 0, "\n".join([f"{k}: {v}" for k, v in failed_info.items()])
@parameterize(
"test_args",
[
{
"batch_size": 8,
"num_steps": 4,
"tp": 2,
"pp": 2,
"pp_style": "1f1b",
"num_model_chunks": 1,
"num_microbatches": 4,
"zero": 1,
"precision": "fp16",
"initial_scale": 1,
"max_length": 512,
"gradient_accumulation_step": 2,
},
{
"batch_size": 8,
"num_steps": 4,
"tp": 2,
"pp": 2,
"pp_style": "1f1b",
"num_model_chunks": 1,
"num_microbatches": 4,
"zero": 0,
"precision": "fp16",
"initial_scale": 1,
"max_length": 512,
"gradient_accumulation_step": 2,
},
{
"batch_size": 8,
"num_steps": 4,
"tp": 1,
"pp": 2,
"pp_style": "1f1b",
"num_model_chunks": 1,
"num_microbatches": 4,
"zero": 1,
"precision": "fp16",
"initial_scale": 1,
"max_length": 512,
"gradient_accumulation_step": 2,
},
{
"batch_size": 1,
"num_steps": 4,
"tp": 2,
"pp": 1,
"pp_style": "1f1b",
"num_model_chunks": 1,
"num_microbatches": 1,
"zero": 2,
"precision": "fp16",
"initial_scale": 1,
"max_length": 512,
"gradient_accumulation_step": 2,
},
{
"batch_size": 1,
"num_steps": 4,
"tp": 2,
"pp": 1,
"pp_style": "1f1b",
"num_model_chunks": 1,
"num_microbatches": 1,
"zero": 0,
"precision": "fp16",
"initial_scale": 1,
"max_length": 512,
"gradient_accumulation_step": 2,
},
],
)
def run_grad_acc_test(test_args):
model_fn, *_ = next(iter(model_zoo.get_sub_registry("transformers_gpt_lm").values()))
model = model_fn()
optimizer = HybridAdam(model.parameters())
origin_model = copy.deepcopy(model).cuda()
origin_optimizer = HybridAdam(origin_model.parameters())
plugin = HybridParallelPlugin(
tp_size=test_args["tp"],
pp_size=test_args["pp"],
pp_style=test_args["pp_style"],
zero_stage=test_args["zero"],
num_model_chunks=test_args["num_model_chunks"],
enable_fused_normalization=True,
num_microbatches=test_args["num_microbatches"],
precision=test_args["precision"],
)
booster = Booster(plugin=plugin)
dataset = RandomDataset(
num_samples=test_args["batch_size"] * test_args["num_steps"] * plugin.dp_size,
max_length=test_args["max_length"],
vocab_size=model.config.vocab_size,
)
dataloader = plugin.prepare_dataloader(dataset, batch_size=test_args["batch_size"], shuffle=True, drop_last=True)
model, optimizer, _, dataloader, _ = booster.boost(model, optimizer, dataloader=dataloader)
grad_accu_step = test_args["gradient_accumulation_step"]
for step, batch in enumerate(dataloader):
batch = move_to_cuda(batch)
# train origin model
origin_output = origin_model(**batch)
origin_loss = origin_output[0] / grad_accu_step
origin_loss.backward()
if (step + 1) % grad_accu_step != 0 and test_args["zero"] != 2:
ctx = booster.no_sync(model, optimizer)
else:
ctx = nullcontext()
with ctx:
if plugin.stage_manager is not None:
batch = iter([batch])
booster.execute_pipeline(
batch,
model,
criterion=lambda outputs, inputs: outputs[0] / grad_accu_step,
optimizer=optimizer,
return_loss=False,
)
else:
outputs = model(**batch)
loss = outputs[0] / grad_accu_step
booster.backward(loss, optimizer)
if (step + 1) % grad_accu_step == 0:
# update origin model weight
origin_optimizer.step()
origin_optimizer.zero_grad()
# update sharded model
optimizer.step()
optimizer.zero_grad()
# tricky code here, shard the origin model inorder to check the parameters in the same stage.
origin_model, origin_optimizer, _, dataloader, _ = booster.boost(
origin_model, origin_optimizer, dataloader=dataloader
)
for p1, p2 in zip(model.unwrap().parameters(), origin_model.unwrap().parameters()):
assert_close(p1.to(p2.dtype), p2, atol=1e-2, rtol=1e-2)
def run_dist(rank, world_size, port, early_stop: bool = True):
# init dist env
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost")
check_3d_plugin(early_stop=early_stop)
run_grad_acc_test()
@rerun_if_address_is_in_use()
def test_3d_plugin(early_stop: bool = True):
spawn(run_dist, 4, early_stop=early_stop)
if __name__ == "__main__":
test_3d_plugin(early_stop=False)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_booster/test_plugin/test_dp_plugin_base.py | tests/test_booster/test_plugin/test_dp_plugin_base.py | from typing import Callable, Dict, Iterator, List, Tuple, Union
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
from torch.utils.data import DataLoader, TensorDataset
import colossalai
from colossalai.booster.plugin.dp_plugin_base import DPPluginBase
from colossalai.checkpoint_io import CheckpointIO
from colossalai.interface import OptimizerWrapper
from colossalai.testing import rerun_if_address_is_in_use, spawn
class DPPluginWrapper(DPPluginBase):
"""This is a wrapper class for testing DP plugin initialization and dataloader creation."""
def configure(
self,
model: nn.Module,
optimizer: Optimizer,
criterion: Callable = None,
dataloader: DataLoader = None,
lr_scheduler: LRScheduler = None,
) -> Tuple[Union[nn.Module, OptimizerWrapper, LRScheduler, DataLoader]]:
pass
def control_checkpoint_io(self) -> bool:
pass
def control_device(self) -> bool:
pass
def control_precision(self) -> bool:
pass
def get_checkpoint_io(self) -> CheckpointIO:
pass
def support_no_sync(self) -> bool:
pass
def supported_devices(self) -> List[str]:
pass
def supported_precisions(self) -> List[str]:
pass
def no_sync(self, model: nn.Module) -> Iterator[None]:
pass
def enable_lora(self, model: nn.Module, pretrained_dir: str, lora_config: Dict) -> nn.Module:
pass
def support_lora(self) -> bool:
pass
def check_dataloader_sharding():
plugin = DPPluginWrapper()
# create a custom dataset with 0 to 10
dataset = TensorDataset(torch.arange(0, 10))
train_dataloader = plugin.prepare_dataloader(dataset, batch_size=2)
# get the first batch of data
batch = next(iter(train_dataloader))[0].cuda()
is_rank_0 = dist.get_rank() == 0
if is_rank_0:
batch_to_compare = batch.clone()
else:
batch_to_compare = batch
# pass to the rank 1 value to rank 0
dist.broadcast(batch_to_compare, src=1)
# compare on rank 0
if is_rank_0:
assert not torch.equal(
batch, batch_to_compare
), "Same number was found across ranks but expected it to be different"
def run_dist(rank, world_size, port):
# init dist env
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost")
check_dataloader_sharding()
@rerun_if_address_is_in_use()
def test_dp_plugin_dataloader():
spawn(run_dist, 2)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_booster/test_plugin/test_low_level_zero_plugin.py | tests/test_booster/test_plugin/test_low_level_zero_plugin.py | from typing import Optional
import torch
import torch.distributed as dist
from peft import LoraConfig
from torch.optim import Adam
import colossalai
from colossalai.accelerator import get_accelerator
from colossalai.booster import Booster
from colossalai.booster.plugin import LowLevelZeroPlugin
# from colossalai.nn.optimizer import HybridAdam
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn
from tests.kit.model_zoo import COMMON_MODELS, IS_FAST_TEST, model_zoo
# These models are not compatible with AMP
_AMP_ERR_MODELS = ["timm_convit", "deepfm_interactionarch"]
# These models have no parameters
_LOW_LEVEL_ZERO_ERR_MODELS = ["dlrm_interactionarch"]
# These models will cause stuck, to be fixed
_STUCK_MODELS = ["transformers_albert_for_multiple_choice"]
@clear_cache_before_run()
def run_fn(stage, model_fn, data_gen_fn, output_transform_fn, lora_config=None) -> Optional[str]:
device = get_accelerator().get_current_device()
try:
plugin = LowLevelZeroPlugin(stage=stage, max_norm=1.0, initial_scale=2**5)
booster = Booster(plugin=plugin)
model = model_fn()
optimizer = Adam(model.parameters(), lr=1e-3)
if lora_config is not None:
model = booster.enable_lora(model, lora_config=lora_config)
criterion = lambda x: x.mean()
data = data_gen_fn()
data = {
k: v.to(device) if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v for k, v in data.items()
}
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
output = model(**data)
output = output_transform_fn(output)
output_key = list(output.keys())[0]
loss = criterion(output[output_key])
booster.backward(loss, optimizer)
optimizer.step()
grad_norm = optimizer.get_grad_norm()
assert grad_norm is None or isinstance(grad_norm, float)
except Exception as e:
return repr(e)
# raise e
@parameterize("stage", [2])
def check_low_level_zero_plugin(stage: int, early_stop: bool = True):
"""check low level zero plugin over model zoo
Args:
stage (int), stage of low level zero plugin
early_stop (bool, optional): Whether to stop when getting the first error. Defaults to True.
"""
passed_models = []
failed_info = {} # (model_name, error) pair
ignore_models = _AMP_ERR_MODELS + _LOW_LEVEL_ZERO_ERR_MODELS + _STUCK_MODELS
skipped_models = []
if IS_FAST_TEST:
registry = model_zoo.get_sub_registry(COMMON_MODELS)
else:
registry = model_zoo
for name, (model_fn, data_gen_fn, output_transform_fn, _, _) in registry.items():
# FIXME(ver217): fix these models
if name in ignore_models:
skipped_models.append(name)
continue
err = run_fn(stage, model_fn, data_gen_fn, output_transform_fn)
get_accelerator().empty_cache()
if err is None:
passed_models.append(name)
else:
failed_info[name] = err
if early_stop:
break
if dist.get_rank() == 0:
print(f"Passed models({len(passed_models)}): {passed_models}\n\n")
print(f"Failed models({len(failed_info)}): {list(failed_info.keys())}\n\n")
print(f"Skipped models({len(skipped_models)}): {skipped_models}\n\n")
assert len(failed_info) == 0, "\n".join([f"{k}: {v}" for k, v in failed_info.items()])
@parameterize("stage", [2])
@parameterize("model_name", ["transformers_llama"])
def check_low_level_zero_lora(stage, model_name, early_stop: bool = True):
passed_models = []
failed_info = {} # (model_name, error) pair
sub_model_zoo = model_zoo.get_sub_registry(model_name)
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
task_type = None
if name == "transformers_llama_for_causal_lm":
task_type = "CAUSAL_LM"
if name == "transformers_llama_for_sequence_classification":
task_type = "SEQ_CLS"
lora_config = LoraConfig(task_type=task_type, r=8, lora_alpha=32, lora_dropout=0.1)
err = run_fn(stage, model_fn, data_gen_fn, output_transform_fn, lora_config)
torch.cuda.empty_cache()
if err is None:
passed_models.append(name)
else:
failed_info[name] = err
if early_stop:
break
if dist.get_rank() == 0:
print(f"Passed models({len(passed_models)}): {passed_models}\n\n")
print(f"Failed models({len(failed_info)}): {list(failed_info.keys())}\n\n")
assert len(failed_info) == 0, "\n".join([f"{k}: {v}" for k, v in failed_info.items()])
def run_dist(rank, world_size, port, early_stop: bool = True):
# init dist env
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost")
check_low_level_zero_plugin(early_stop=early_stop)
check_low_level_zero_lora(early_stop=early_stop)
@rerun_if_address_is_in_use()
def test_low_level_zero_plugin(early_stop: bool = True):
spawn(run_dist, 2, early_stop=early_stop)
if __name__ == "__main__":
test_low_level_zero_plugin(early_stop=False)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_booster/test_plugin/test_torch_fsdp_plugin.py | tests/test_booster/test_plugin/test_torch_fsdp_plugin.py | import pytest
import torch
from packaging import version
from torch.optim import SGD
import colossalai
from colossalai.booster import Booster
if version.parse(torch.__version__) >= version.parse("1.12.0"):
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from colossalai.booster.plugin import TorchFSDPPlugin
from colossalai.interface import OptimizerWrapper
from colossalai.testing import clear_cache_before_run, rerun_if_address_is_in_use, spawn
from tests.kit.model_zoo import COMMON_MODELS, IS_FAST_TEST, model_zoo
# test basic fsdp function
@clear_cache_before_run()
def run_fn(model_fn, data_gen_fn, output_transform_fn):
plugin = TorchFSDPPlugin()
booster = Booster(plugin=plugin)
model = model_fn()
optimizer = SGD(model.parameters(), lr=1e-3)
criterion = lambda x: x.mean()
data = data_gen_fn()
data = {k: v.to("cuda") if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v for k, v in data.items()}
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
assert isinstance(model.module, FSDP)
assert isinstance(optimizer, OptimizerWrapper)
output = model(**data)
output = output_transform_fn(output)
output_key = list(output.keys())[0]
loss = criterion(output[output_key])
booster.backward(loss, optimizer)
optimizer.clip_grad_by_norm(1.0)
optimizer.step()
del model
del optimizer
del criterion
del booster
del plugin
def check_torch_fsdp_plugin():
if IS_FAST_TEST:
registry = model_zoo.get_sub_registry(COMMON_MODELS)
else:
registry = model_zoo.get_sub_registry("transformers_gptj")
for name, (model_fn, data_gen_fn, output_transform_fn, _, _) in registry.items():
if any(
element in name
for element in [
"diffusers",
"deepfm_sparsearch",
"dlrm_interactionarch",
"torchvision_googlenet",
"torchvision_inception_v3",
]
):
continue
print(name)
run_fn(model_fn, data_gen_fn, output_transform_fn)
torch.cuda.empty_cache()
def run_dist(rank, world_size, port):
# init dist env
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost")
check_torch_fsdp_plugin()
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="requires torch1.12 or higher")
@rerun_if_address_is_in_use()
def test_torch_fsdp_plugin():
spawn(run_dist, 2)
if __name__ == "__main__":
test_torch_fsdp_plugin()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_booster/test_plugin/test_gemini_plugin.py | tests/test_booster/test_plugin/test_gemini_plugin.py | from contextlib import nullcontext
from typing import Optional
import torch
import torch.distributed as dist
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin import GeminiPlugin
from colossalai.fx import is_compatible_with_meta
from colossalai.lazy.lazy_init import LazyInitContext
from colossalai.nn.optimizer import HybridAdam
from colossalai.tensor.colo_parameter import ColoParameter
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn
from tests.kit.model_zoo import COMMON_MODELS, IS_FAST_TEST, model_zoo
@clear_cache_before_run()
def run_fn(init_method, model_fn, data_gen_fn, output_transform_fn, zero_size, tp_size) -> Optional[str]:
try:
if init_method == "lazy":
ctx = LazyInitContext()
else:
ctx = nullcontext()
extra_dp_size = dist.get_world_size() // (zero_size * tp_size)
enable_all_optimization = True if tp_size > 1 else False
plugin = GeminiPlugin(
max_norm=1.0,
initial_scale=2**5,
tp_size=tp_size,
extra_dp_size=extra_dp_size,
enable_all_optimization=enable_all_optimization,
)
booster = Booster(plugin=plugin)
with ctx:
model = model_fn()
optimizer = HybridAdam(model.parameters(), lr=1e-3)
criterion = lambda x: x.mean()
data = data_gen_fn()
data = {
k: v.to("cuda") if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v for k, v in data.items()
}
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
for n, p in model.named_parameters():
assert isinstance(p, ColoParameter), f"{n} is not a ColoParameter"
output = model(**data)
output = output_transform_fn(output)
output_key = list(output.keys())[0]
loss = criterion(output[output_key])
booster.backward(loss, optimizer)
optimizer.step()
grad_norm = optimizer.get_grad_norm()
assert grad_norm is None or isinstance(grad_norm, float)
except NotImplementedError:
print(f"Tensor Parallelism policy for {model.__class__} is not implemented yet\n.")
except Exception as e:
# raise e
return repr(e)
# TODO(ver217): CI does not support lazy now
# @parameterize('init_method', ['lazy', 'none', 'colo'])
@parameterize("subset", [COMMON_MODELS] if IS_FAST_TEST else ["torchvision", "transformers", "diffusers"])
@parameterize("init_method", ["none"])
@parameterize("zero_size", [2])
@parameterize("tp_size", [2])
def check_gemini_plugin(
subset: str, init_method: str = "none", early_stop: bool = True, zero_size: int = 1, tp_size: int = 1
):
"""check gemini plugin over model zoo
Args:
early_stop (bool, optional): Whether to stop when getting the first error. Defaults to True.
"""
is_support_meta = is_compatible_with_meta()
if not is_support_meta and init_method == "lazy":
return
passed_models = []
failed_info = {} # (model_name, error) pair
for name, (model_fn, data_gen_fn, output_transform_fn, _, _) in model_zoo.get_sub_registry(subset).items():
# These models lead to CUDA error
if name in (
"diffusers_auto_encoder_kl",
"diffusers_vq_model",
"diffusers_unet2d_model",
"timm_resmlp",
"timm_gmixer_12_224",
"timm_gmlp_b16_224",
"timm_mixer_b16_224",
"timm_convnext",
"torchvision_convnext_base",
):
continue
# These models are not compatible with gemini
if name in [
"timm_convit",
"timm_dm_nfnet",
"torchvision_vit_b_16",
"transformers_t5",
"transformers_t5_for_conditional_generation",
"transformers_t5_encoder_model", # does not support apex rmsnorm
"transformers_chatglm",
"transformers_sam",
"transformers_vit",
"transformers_gpt_double_heads", # TODO check why does the model fail to run using Gemini
"transformers_falcon", # TODO check why falcon fails to run Gemini
"transformers_falcon_for_causal_lm",
"transformers_falcon_for_sequence_classification",
"transformers_falcon_for_token_classification",
"transformers_falcon_for_question_answering",
"transformers_gptj_lm", # lead to OOM when running in ci
"transformers_gptj_for_question_answering",
"transformers_gptj_for_sequence_classification",
]:
continue
if init_method == "lazy" and name in [
"timm_convmixer",
"timm_vision_transformer",
"timm_deit",
"timm_deit3",
"timm_inception_v3",
"timm_tnt_b_patch16_224",
"timm_rexnet",
"torchvision_densenet121",
"torchvision_efficientnet_b0",
"torchvision_mobilenet_v2",
"torchvision_mnasnet0_5",
"torchvision_regnet_x_16gf",
"torchvision_shufflenet_v2_x0_5",
"torchvision_efficientnet_v2_s",
]:
continue
# TODO debug blip2 when using tp, something wrong with shift_logits's shape
if "transformers_blip2" in name:
tp_size = 1
err = run_fn(init_method, model_fn, data_gen_fn, output_transform_fn, zero_size, tp_size)
if err is None:
passed_models.append(name)
else:
failed_info[name] = err
if early_stop:
break
if dist.get_rank() == 0:
print(f"Init method: {init_method}")
print(f"Passed models({len(passed_models)}): {passed_models}\n\n")
print(f"Failed models({len(failed_info)}): {list(failed_info.keys())}\n\n")
assert len(failed_info) == 0, "\n".join([f"{k}: {v}" for k, v in failed_info.items()])
def run_dist(rank, world_size, port, early_stop: bool = True):
# init dist env
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost")
check_gemini_plugin(early_stop=early_stop)
@rerun_if_address_is_in_use()
def test_gemini_plugin(early_stop: bool = True):
spawn(run_dist, 4, early_stop=early_stop)
if __name__ == "__main__":
test_gemini_plugin(early_stop=False)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_booster/test_mixed_precision/test_fp16_torch.py | tests/test_booster/test_mixed_precision/test_fp16_torch.py | import torch
from torch.optim import Adam
import colossalai
from colossalai.booster.mixed_precision import FP16TorchMixedPrecision
from colossalai.testing import rerun_if_address_is_in_use, spawn
from tests.kit.model_zoo import model_zoo
def run_torch_amp(rank, world_size, port):
# init dist env
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost")
sub_model_zoo = model_zoo.get_sub_registry("timm")
for name, (model_fn, data_gen_fn, output_transform_fn, _, _) in sub_model_zoo.items():
# dlrm_interactionarch has not parameters, so skip
if name == "dlrm_interactionarch":
continue
model = model_fn().cuda()
optimizer = Adam(model.parameters(), lr=1e-3)
criterion = lambda x: x.mean()
data = data_gen_fn()
data = {
k: v.to("cuda") if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v for k, v in data.items()
}
mixed_precision = FP16TorchMixedPrecision()
model, optimizer, criterion = mixed_precision.configure(model, optimizer, criterion)
output = model(**data)
output = output_transform_fn(output)
output_key = list(output.keys())[0]
loss = criterion(output[output_key])
optimizer.backward(loss)
optimizer.clip_grad_by_norm(1.0)
optimizer.step()
del model, optimizer, criterion, data, output, mixed_precision
@rerun_if_address_is_in_use()
def test_torch_ddp_plugin():
spawn(run_torch_amp, 1)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_with_torch_ddp.py | tests/test_shardformer/test_with_torch_ddp.py | from contextlib import nullcontext
import pytest
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
import colossalai
from colossalai.cluster import DistCoordinator
from colossalai.lazy import LazyInitContext
from colossalai.logging import disable_existing_loggers
from colossalai.shardformer import ShardConfig, ShardFormer
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn
from tests.kit.model_zoo import model_zoo
@parameterize("lazy_init", [True, False])
def check_shardformer_with_ddp(lazy_init: bool):
sub_model_zoo = model_zoo.get_sub_registry("transformers_gpt", exclude="transformers_gptj")
# create shardformer
# ranks: [0, 1, 2, 3]
# tp ranks = [0, 1], [2, 3]
# dp ranks = [0, 2], [1, 3]
dp_process_group_1 = dist.new_group([0, 2])
dp_process_group_2 = dist.new_group([1, 3])
tp_process_group_1 = dist.new_group([0, 1])
tp_process_group_2 = dist.new_group([2, 3])
coordinator = DistCoordinator()
if coordinator.rank in [0, 1]:
tp_process_group = tp_process_group_1
else:
tp_process_group = tp_process_group_2
if coordinator.rank in [0, 2]:
dp_process_group = dp_process_group_1
else:
dp_process_group = dp_process_group_2
shard_config = ShardConfig(tensor_parallel_process_group=tp_process_group, enable_fused_normalization=True)
shardformer = ShardFormer(shard_config=shard_config)
ctx = LazyInitContext() if lazy_init else nullcontext()
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
# create and shard model
with ctx:
model = model_fn().cuda()
sharded_model, _ = shardformer.optimize(model)
# add ddp
sharded_ddp_model = DDP(sharded_model, process_group=dp_process_group)
# prepare input
data = data_gen_fn()
data = {k: v.cuda() for k, v in data.items()}
# switch to train mode
sharded_ddp_model.train()
# run forward
output = sharded_ddp_model(**data)
loss = loss_fn(output)
# backward
loss.backward()
torch.cuda.empty_cache()
def run_dist(rank, world_size, port):
disable_existing_loggers()
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
check_shardformer_with_ddp()
@pytest.mark.dist
@rerun_if_address_is_in_use()
@clear_cache_before_run()
def test_gpt2():
spawn(run_dist, 4)
if __name__ == "__main__":
test_gpt2()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_flash_attention.py | tests/test_shardformer/test_flash_attention.py | import math
from copy import copy
import torch
from torch.testing import assert_close
from colossalai.kernel.kernel_loader import FlashAttentionLoader, FlashAttentionWithCustomMaskLoader
from colossalai.shardformer.layer import AttnMaskType, ColoAttention
from colossalai.shardformer.layer.attn import invert_mask
from colossalai.testing import clear_cache_before_run, parameterize
from colossalai.utils import get_current_device, set_seed
DTYPE = [torch.float16, torch.bfloat16]
B, N, S, D = 2, 8, 256, 32
TOL_MAP = {
torch.float16: {"atol": 5e-4, "rtol": 2e-3},
torch.bfloat16: {},
}
def attention_ref(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, attn_mask=None, dropout_p=0.0):
head_dim = q.size(-1)
attn_weights = torch.matmul(q, k.transpose(2, 3)) / math.sqrt(head_dim)
if attn_mask is not None:
attn_weights = attn_weights + attn_mask
attn_weights = torch.softmax(attn_weights, dim=-1, dtype=torch.float).to(q.dtype)
attn_weights = torch.dropout(attn_weights, p=dropout_p, train=True)
attn_output = torch.matmul(attn_weights, v)
return attn_output
def gen_padded_kwargs(dtype: torch.dtype):
padding_mask = torch.ones((B, S), dtype=torch.int, device=get_current_device())
padding_mask[0, : S // 4] = 0
return (
ColoAttention.prepare_attn_kwargs((B, 1, S, S), dtype, padding_mask.device, q_padding_mask=padding_mask),
padding_mask,
)
def gen_padded_causal_kwargs(dtype: torch.dtype):
padding_mask = torch.ones((B, S), dtype=torch.int, device=get_current_device())
padding_mask[0, S // 2 :] = 0
return (
ColoAttention.prepare_attn_kwargs(
(B, 1, S, S), dtype, padding_mask.device, q_padding_mask=padding_mask, is_causal=True
),
padding_mask,
)
def gen_causal_kwargs(dtype: torch.dtype):
return ColoAttention.prepare_attn_kwargs((B, 1, S, S), dtype, get_current_device(), is_causal=True), None
def gen_custom_kwargs(dtype: torch.dtype):
attn_mask = torch.ones((B, S, S), dtype=dtype, device=get_current_device())
attn_mask[0, : S // 2, S // 2 :] = 0
attn_mask[0, S // 2 :, : S // 2] = 0
attn_mask[1, :, S // 4 :] = 0
attn_mask = invert_mask(attn_mask).unsqueeze(1)
assert not torch.all(attn_mask != 0, dim=-1).any()
return {"attention_mask": attn_mask}, None
def post_process_kwargs_for_raw_attn(attn_kwargs: dict):
if "attention_mask_type" in attn_kwargs:
attn_kwargs = copy(attn_kwargs)
mask_type = attn_kwargs.pop("attention_mask_type")
attn_kwargs["is_causal"] = mask_type in (AttnMaskType.CAUSAL, AttnMaskType.PADDED_CAUSAL)
return attn_kwargs
def check_attn_func(dtype: torch.dtype, attn_func, attn_kwargs: dict, padding_mask=None):
tols = TOL_MAP[dtype]
q = torch.rand((B, N, S, D), dtype=dtype, device=get_current_device(), requires_grad=True)
k = torch.rand((B, N, S, D), dtype=dtype, device=get_current_device(), requires_grad=True)
v = torch.rand((B, N, S, D), dtype=dtype, device=get_current_device(), requires_grad=True)
q_flash = q.clone().detach().requires_grad_(True)
k_flash = k.clone().detach().requires_grad_(True)
v_flash = v.clone().detach().requires_grad_(True)
attn_mask = attn_kwargs.get("attention_mask", None)
ref_output = attention_ref(q, k, v, attn_mask)
output = attn_func(q_flash, k_flash, v_flash, **attn_kwargs)
if padding_mask is not None:
# [B, Sq] -> [B, 1, Sq, 1]
padding_mask = padding_mask[:, None, :, None].logical_not()
ref_output = ref_output.masked_fill(padding_mask, 0)
output = output.masked_fill(padding_mask, 0)
assert_close(output, ref_output, **tols)
output.mean().backward()
ref_output.mean().backward()
assert_close(q.grad, q_flash.grad, **tols)
assert_close(k.grad, k_flash.grad, **tols)
assert_close(v.grad, v_flash.grad, **tols)
@clear_cache_before_run()
@parameterize("dtype", DTYPE)
def test_flash_attn_func(dtype: torch.dtype):
torch.backends.cudnn.deterministic = True
set_seed(0)
# (func, name, need_postprocess)
avail_attn_funcs = [(ColoAttention.attention, "coloattn", False)]
avail_custom_mask_attn_funcs = [(ColoAttention.attention, "coloattn", False)]
avail_padding_mask_attn_funcs = [(ColoAttention.attention, "coloattn", False)]
for ext_cls in FlashAttentionLoader.REGISTRY:
ext = ext_cls()
if ext.is_available():
ext.assert_compatible()
avail_attn_funcs.append((ext.load(), ext.name, True))
for ext_cls in FlashAttentionWithCustomMaskLoader.REGISTRY:
ext = ext_cls()
if ext.is_available():
ext.assert_compatible()
avail_custom_mask_attn_funcs.append((ext.load(), ext.name, True))
test_sets = {
"none": (lambda dtype: ({}, None), avail_attn_funcs),
"padded": (gen_padded_kwargs, avail_padding_mask_attn_funcs),
"padded_causal": (gen_padded_causal_kwargs, avail_padding_mask_attn_funcs),
"causal": (gen_causal_kwargs, avail_attn_funcs),
"custom": (gen_custom_kwargs, avail_custom_mask_attn_funcs),
}
for mask_type, (gen_kwargs_func, attn_funcs) in test_sets.items():
attn_kwargs, padding_mask = gen_kwargs_func(dtype)
for attn_func, name, need_postprocess in attn_funcs:
print(f"{dtype}, {name}, {mask_type}")
if mask_type == "padded":
pass
if need_postprocess:
check_attn_func(dtype, attn_func, post_process_kwargs_for_raw_attn(attn_kwargs), padding_mask)
else:
check_attn_func(dtype, attn_func, attn_kwargs, padding_mask)
if __name__ == "__main__":
test_flash_attn_func()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/__init__.py | tests/test_shardformer/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_shard_utils.py | tests/test_shardformer/test_shard_utils.py | import torch
import torch.nn as nn
from colossalai.shardformer.shard.utils import set_tensors_to_none
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.layers = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 3))
self.out = nn.Linear(3, 1)
def test_release_layer():
orig_cuda_allocated = torch.cuda.memory_allocated()
model = Net().cuda()
set_tensors_to_none(model, exclude={model.layers[0]})
assert model.layers[1].weight is None
assert model.layers[1].bias is None
assert model.out.weight is None
assert model.out.bias is None
set_tensors_to_none(model)
assert model.layers[0].weight is None
assert model.layers[0].bias is None
assert len(list(model.parameters())) == 0
assert torch.cuda.memory_allocated() == orig_cuda_allocated
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_layer/test_dropout.py | tests/test_shardformer/test_layer/test_dropout.py | import torch
import torch.distributed as dist
import torch.nn as nn
import colossalai
from colossalai.shardformer.layer import DropoutForParallelInput, DropoutForReplicatedInput
from colossalai.testing import assert_equal, assert_not_equal, rerun_if_address_is_in_use, spawn
def check_dropout_parallel_input():
dropout = nn.Dropout().cuda()
dropout_1d = DropoutForParallelInput.from_native_module(dropout, process_group=None)
# check computation correctness
x = torch.rand(4, 128).cuda()
# we set seed so that dropout will generate the same mask
torch.cuda.manual_seed(1024)
out = dropout(x)
# we set seed to simulate the same scenario
# but expect the dropout mask to be different
# due to the internal randomness control
torch.cuda.manual_seed(1024)
out_1d = dropout_1d(x)
# ensure out is the same across all ranks
world_size = dist.get_world_size()
out_all = [torch.empty_like(out) for _ in range(world_size)]
dist.all_gather(out_all, out)
for i in range(world_size):
assert_equal(out_all[i], out_all[0])
# ensure out_1d is different across ranks
out_1d_all = [torch.zeros_like(out_1d) for _ in range(world_size)]
dist.all_gather(out_1d_all, out_1d)
for i in range(1, world_size):
assert_not_equal(out_1d_all[i], out_1d_all[0])
def check_dropout_replicated_input():
dropout = nn.Dropout().cuda()
dropout_replica = DropoutForReplicatedInput.from_native_module(dropout, process_group=None)
# check computation correctness
x = torch.rand(4, 128).cuda()
out_1d = dropout_replica(x)
# ensure out_1d is different across ranks
world_size = dist.get_world_size()
out_1d_all = [torch.zeros_like(out_1d) for _ in range(world_size)]
dist.all_gather(out_1d_all, out_1d)
for i in range(1, world_size):
assert_equal(out_1d_all[i], out_1d_all[0])
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
check_dropout_parallel_input()
check_dropout_replicated_input()
@rerun_if_address_is_in_use()
def test_dropout():
spawn(run_dist, nprocs=2)
if __name__ == "__main__":
test_dropout()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_layer/test_dist_crossentropy.py | tests/test_shardformer/test_layer/test_dist_crossentropy.py | import pytest
import torch
import torch.nn.functional as F
import colossalai
from colossalai.logging import disable_existing_loggers
from colossalai.shardformer.layer import cross_entropy_1d
from colossalai.testing import rerun_if_address_is_in_use, spawn
CONFIG = dict(
parallel=dict(data=1, pipeline=1, tensor=dict(size=2, mode="1d")),
)
def check_dist_crossentropy(rank, world_size, port, ignore_index):
disable_existing_loggers()
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost", backend="nccl")
# prepare data
pred = torch.randn(2, 4, 8, requires_grad=True).cuda()
labels = torch.randint(8, (2, 4)).cuda()
# set some label to -100 to test the ignore index
labels[0, -1] = ignore_index
org_pred = pred.view(-1, 8)
org_labels = labels.view(-1)
org_loss = F.cross_entropy(org_pred, org_labels)
pred.retain_grad()
org_loss.backward()
dist_pred = pred.clone().chunk(world_size, -1)[rank].detach()
dist_pred.requires_grad = True
dist_loss = cross_entropy_1d(dist_pred, labels, ignore_index=ignore_index)
dist_pred.retain_grad()
dist_loss.backward()
assert torch.allclose(
org_loss, dist_loss, atol=1e-5
), f"dist cross entropy loss is not equal to orgin loss\n{org_loss}\n{dist_loss}"
target_grad = torch.chunk(pred.grad, world_size, dim=-1)[rank]
assert torch.allclose(
target_grad, dist_pred.grad
), f"dist grad is not equal to orgin grad\n{target_grad}\n{dist_pred.grad}"
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_dist_crossentropy():
ignore_index = -100
spawn(check_dist_crossentropy, 2, ignore_index=ignore_index)
if __name__ == "__main__":
test_dist_crossentropy()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_layer/test_vocab_parallel_embedding_1d.py | tests/test_shardformer/test_layer/test_vocab_parallel_embedding_1d.py | from contextlib import nullcontext
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.testing import assert_close
import colossalai
from colossalai.lazy import LazyInitContext
from colossalai.shardformer.layer import VocabParallelEmbedding1D
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
@parameterize("lazy_init", [False, True])
def check_vocab_embedding_1d(lazy_init: bool):
ctx = LazyInitContext() if lazy_init else nullcontext()
embedding = nn.Embedding(128, 32).to("cuda")
with ctx:
embedding_copy = nn.Embedding(128, 32).to("cuda")
dist_embedding_1d = VocabParallelEmbedding1D.from_native_module(embedding_copy, process_group=None)
assert dist_embedding_1d.weight.shape == torch.Size([64, 32])
assert dist_embedding_1d.num_embeddings == 128
assert dist_embedding_1d.embedding_dim == 32
assert embedding_copy.weight is dist_embedding_1d.weight
# ensure state dict is reversibly loadable
embedding.load_state_dict(dist_embedding_1d.state_dict())
dist_embedding_1d.load_state_dict(embedding.state_dict())
# check embedding correctness
x = torch.randint(0, 128, (4, 32)).to("cuda")
org_out = embedding(x)
dist_out = dist_embedding_1d(x)
assert_close(org_out, dist_out)
# check backward correctness
org_out.sum().backward()
dist_out.sum().backward()
rank = dist.get_rank()
target_grad = torch.chunk(embedding.weight.grad, 2, dim=0)[rank]
assert_close(target_grad, dist_embedding_1d.weight.grad)
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
check_vocab_embedding_1d()
@rerun_if_address_is_in_use()
def test_vocab_embedding():
spawn(run_dist, nprocs=2)
if __name__ == "__main__":
test_vocab_embedding()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_layer/test_gpt2_qkv_fused_linear_1d.py | tests/test_shardformer/test_layer/test_gpt2_qkv_fused_linear_1d.py | import os
from contextlib import nullcontext
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.testing import assert_close
import colossalai
from colossalai.lazy import LazyInitContext
from colossalai.pipeline.weight_grad_store import WeightGradStore
from colossalai.shardformer.layer import GPT2FusedLinearConv, GPT2FusedLinearConv1D_Col, GPT2FusedLinearConv1D_Row
from colossalai.shardformer.layer.qkv_fused_linear import split_fused_qkv_in_gpt2_style
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
# This code is copied from https://github.com/huggingface/transformers
os.environ["CUDA_DEVICE_MAX_CONNECTIONS"] = "1"
class Conv1D(nn.Module):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (`int`): The number of output features.
nx (`int`): The number of input features.
"""
def __init__(self, nf, nx):
super().__init__()
self.nf = nf
self.weight = nn.Parameter(torch.empty(nx, nf))
self.bias = nn.Parameter(torch.zeros(nf))
nn.init.normal_(self.weight, std=0.02)
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(size_out)
return x
def check_linear_conv_1d_col(lazy_init: bool, seq_parallel_mode: str):
ctx = LazyInitContext() if lazy_init else nullcontext()
linear = Conv1D(192, 48).cuda()
with ctx:
linear_copy = Conv1D(192, 48).cuda()
linear_conv_col = GPT2FusedLinearConv1D_Col.from_native_module(
linear_copy,
process_group=None,
gather_output=True,
seq_parallel_mode=seq_parallel_mode,
split_sizes=[64] * 3,
)
assert linear.weight.shape == torch.Size([48, 192])
assert linear.bias.shape == torch.Size([192])
assert linear_conv_col.weight.shape == torch.Size([48, 96])
assert linear_conv_col.bias.shape == torch.Size([96])
assert linear_copy.weight is linear_conv_col.weight
assert linear_copy.bias is linear_conv_col.bias
# ensure weights are reversibly loadable
linear_conv_col.load_state_dict(linear.state_dict())
linear.load_state_dict(linear_conv_col.state_dict())
# check computation correctness
x = torch.rand(1, 4, 48).cuda()
out = linear(x)
x_for_shard = (
x.expand_as(x.clone()) if seq_parallel_mode is None else torch.chunk(x.clone(), 2, dim=1)[dist.get_rank()]
)
gather_out = linear_conv_col(x_for_shard)
assert_close(out, gather_out)
# check backward correctness
out.sum().backward()
gather_out.sum().backward()
target_grad = split_fused_qkv_in_gpt2_style(linear.weight.grad, [64] * 3, None, True)
assert_close(target_grad, linear_conv_col.weight.grad)
def check_linear_conv_1d_row(lazy_init: bool, seq_parallel_mode: bool):
ctx = LazyInitContext() if lazy_init else nullcontext()
linear = Conv1D(192, 48).cuda()
with ctx:
linear_copy = Conv1D(192, 48).cuda()
linear_row = GPT2FusedLinearConv1D_Row.from_native_module(
linear_copy, process_group=None, parallel_input=False, seq_parallel_mode=seq_parallel_mode
)
assert linear.weight.shape == torch.Size([48, 192])
assert linear_row.weight.shape == torch.Size([24, 192])
assert linear_row.bias.shape == torch.Size([192])
assert linear_copy.weight is linear_row.weight
assert linear_copy.bias is linear_row.bias
# ensure weights are reversibly loadable
linear_row.load_state_dict(linear.state_dict())
linear.load_state_dict(linear_row.state_dict())
# check computation correctness
x = torch.rand(1, 4, 48).cuda()
out = linear(x)
gather_out = linear_row(x)
target_out = out if seq_parallel_mode is None else torch.chunk(out.clone(), 2, dim=1)[dist.get_rank()]
assert_close(target_out, gather_out)
# check backward correctness
out.sum().backward()
gather_out.sum().backward()
rank = dist.get_rank()
target_grad = torch.chunk(linear.weight.grad, 2, dim=0)[rank]
assert_close(target_grad, linear_row.weight.grad)
def check_linear_conv_1d_without_weight_grad_store(lazy_init: bool, seq_parallel_mode: str):
ctx = LazyInitContext() if lazy_init else nullcontext()
linear = Conv1D(192, 48).cuda()
with ctx:
linear_copy = Conv1D(192, 48).cuda()
linear_base = GPT2FusedLinearConv.from_native_module(linear_copy, seq_parallel_mode=seq_parallel_mode)
assert linear.weight.shape == torch.Size([48, 192])
assert linear_base.weight.shape == torch.Size([48, 192])
assert linear_base.bias.shape == torch.Size([192])
assert linear_copy.weight is linear_base.weight
assert linear_copy.bias is linear_base.bias
# ensure weights are reversibly loadable
linear_base.load_state_dict(linear.state_dict())
linear.load_state_dict(linear_base.state_dict())
# check computation correctness
x = torch.rand(1, 4, 48).cuda()
out = linear(x)
gather_out = linear_base(x)
assert_close(out, gather_out)
# check backward correctness
out.sum().backward()
gather_out.sum().backward()
# check the input gradients & weight gradients
assert_close(out.grad, gather_out.grad)
assert_close(linear.weight.grad, linear_base.weight.grad)
def check_linear_conv_1d_with_weight_grad_store(lazy_init: bool, seq_parallel_mode: str):
ctx = LazyInitContext() if lazy_init else nullcontext()
linear = Conv1D(192, 48).cuda()
with ctx:
linear_copy = Conv1D(192, 48).cuda()
linear_base = GPT2FusedLinearConv.from_native_module(linear_copy, seq_parallel_mode=seq_parallel_mode, use_zbv=True)
assert linear.weight.shape == torch.Size([48, 192])
assert linear_base.weight.shape == torch.Size([48, 192])
assert linear_base.bias.shape == torch.Size([192])
assert linear_copy.weight is linear_base.weight
assert linear_copy.bias is linear_base.bias
# ensure weights are reversibly loadable
linear_base.load_state_dict(linear.state_dict())
linear.load_state_dict(linear_base.state_dict())
# check computation correctness
x = torch.rand(1, 4, 48).cuda()
out = linear(x)
gather_out = linear_base(x)
assert_close(out, gather_out)
# check backward correctness
out.sum().backward()
gather_out.sum().backward()
WeightGradStore.flush(chunk=0) # flush buffer to chunk 0 Queue
WeightGradStore.pop(chunk=0)
# check the input gradients & weight gradients
assert_close(out.grad, gather_out.grad)
assert_close(linear.weight.grad, linear_base.weight.grad)
@parameterize("lazy_init", [False, True])
@parameterize("seq_parallel_mode", ["split_gather", None])
def check_gpt2_qkv_fused_linear_1d(lazy_init: bool, seq_parallel_mode: bool):
check_linear_conv_1d_col(lazy_init, seq_parallel_mode)
check_linear_conv_1d_row(lazy_init, seq_parallel_mode)
check_linear_conv_1d_without_weight_grad_store(lazy_init, None)
check_linear_conv_1d_with_weight_grad_store(lazy_init, None)
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
# test for linear conv
check_gpt2_qkv_fused_linear_1d()
@rerun_if_address_is_in_use()
def test_linearconv():
spawn(run_dist, nprocs=2)
if __name__ == "__main__":
test_linearconv()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_layer/test_ring_attn.py | tests/test_shardformer/test_layer/test_ring_attn.py | import torch
import torch.distributed as dist
import torch.nn.functional as F
from flash_attn import flash_attn_qkvpacked_func, flash_attn_varlen_qkvpacked_func
from torch.testing import assert_close
import colossalai
from colossalai.cluster import ProcessGroupMesh
from colossalai.shardformer.layer import AttnMaskType
from colossalai.shardformer.layer.attn import AttnMaskType, RingAttention
from colossalai.shardformer.layer.utils import split_batch_zigzag, split_varlen_zigzag
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
from colossalai.utils import get_current_device
@parameterize("seq_len", [4096])
@parameterize("bs", [2])
@parameterize("nheads", [5])
@parameterize("d", [128])
@parameterize("dtype", [torch.bfloat16, torch.float16])
def check_ring_attn(seq_len, bs, nheads, d, dtype, inner_ring_size):
torch.cuda.manual_seed(2)
device = get_current_device()
sp_group = dist.group.WORLD
dp_size, pp_size, tp_size = 1, 1, 1
sp_size = dist.get_world_size()
sp_axis = 2
pg_mesh = ProcessGroupMesh(dp_size, pp_size, sp_size, tp_size)
# Some outliers may seem large, but our errors are still lower than
# than Megatron-LM context parallel's
# (https://github.com/NVIDIA/TransformerEngine/blob/33a3d02f81c56e6f7b542c09bfa86657078d57fb/tests/pytorch/fused_attn/run_fused_attn_with_cp.py#L215)
# and the original zigzag implementation's (https://github.com/zhuzilin/ring-flash-attention/tree/main)
atol = rtol = 7e-3
# Setup inputs
qkv = torch.randn(bs, seq_len, 3, nheads, d, device=device, dtype=dtype, requires_grad=True)
local_qkv = split_batch_zigzag(qkv, sp_group)
q, k, v = local_qkv.unbind(dim=-3)
q, k, v = [x.squeeze(2).detach().clone().transpose(1, 2) for x in (q, k, v)] # (B, nHeads, Sq, D)
q.requires_grad = k.requires_grad = v.requires_grad = True
# Ring attention vs single GPU
ring_out, ring_lse = RingAttention.attention(
q,
k,
v,
sp_axis,
AttnMaskType.CAUSAL,
return_softmax=True,
inner_ring_size=inner_ring_size,
pg_mesh=pg_mesh,
)
ring_out = ring_out.transpose(1, 2)
out, lse, _ = flash_attn_qkvpacked_func(
qkv, dropout_p=0.0, causal=True, window_size=(-1, -1), alibi_slopes=None, return_attn_probs=True
)
# Checkout out and softmax denominator
local_out = split_batch_zigzag(out, sp_group)
local_lse = split_batch_zigzag(lse, sp_group, seq_dim=-1)
local_lse = local_lse.transpose(1, 2).contiguous().view(-1, ring_lse.shape[-1]) # (B, nHeads, Sq) -> (T, nHeads)
assert_close(ring_lse, local_lse, atol=atol, rtol=rtol)
assert_close(ring_out, local_out, atol=atol, rtol=rtol)
# Check grads
ring_out.sum().backward()
out.sum().backward()
ring_dq, ring_dk, ring_dv = [x.transpose(1, 2) for x in (q.grad, k.grad, v.grad)]
dqkv = qkv.grad
local_dqkv = split_batch_zigzag(dqkv, sp_group)
assert_close(ring_dq, local_dqkv[:, :, 0], atol=atol, rtol=rtol)
assert_close(ring_dk, local_dqkv[:, :, 1], atol=atol, rtol=rtol)
assert_close(ring_dv, local_dqkv[:, :, 2], atol=atol, rtol=rtol)
if dist.get_rank() == 0:
print(
f"sp_size {dist.get_world_size()}, inner ring size {dist.get_world_size(RingAttention.INNER_RING_GROUP)} passed."
)
@parameterize("seqlen", [4096])
@parameterize("bs", [2])
@parameterize("nheads", [5])
@parameterize("d", [128])
@parameterize("dtype", [torch.bfloat16, torch.float16])
def check_packed_seq(seqlen, bs, nheads, d, dtype):
device = get_current_device()
sp_group = dist.group.WORLD
sp_size = dist.get_world_size()
sp_axis = 2
atol = rtol = 7e-3
torch.cuda.manual_seed(2)
# Prepare varlen attention mask
padding_mask = torch.ones((bs, seqlen), dtype=torch.int, device=device)
padding_mask[: bs // 2, (seqlen // 4) * 3 :] = 0
padding_mask[:, seqlen // 2 :] = 0
input_embeds = torch.randn(bs, seqlen, nheads, d, device=device, dtype=dtype, requires_grad=True)
# Forward
# out = ColoAttention.attention(q, k, v, **mask_info)
flat_input = input_embeds.view(-1, nheads, d)[padding_mask.flatten().nonzero().squeeze()]
qkv = torch.stack([flat_input] * 3, dim=1)
qkv.retain_grad()
input_embeds, mask_info, _ = RingAttention.prepare_varlen_batch(padding_mask, sp_group, input_embeds)
out, lse, _ = flash_attn_varlen_qkvpacked_func(
qkv,
mask_info["cu_seqlens"] * sp_size,
mask_info["max_seqlen"] * sp_size,
return_attn_probs=True,
causal=True,
# deterministic=True
)
# Test the splitting function
local_input = split_varlen_zigzag(
flat_input, mask_info["cu_seqlens"] * sp_size, sp_group, mask_info["max_seqlen"] * sp_size
)
assert (local_input == input_embeds.view(-1, nheads, d)[mask_info["valid_indices"]]).all()
del local_input, flat_input
q_ring, k_ring, v_ring = [input_embeds.clone().transpose(1, 2) for _ in range(3)]
q_ring.retain_grad()
k_ring.retain_grad()
v_ring.retain_grad()
ring_out, ring_lse = RingAttention.attention(
q_ring,
k_ring,
v_ring,
sp_axis,
**mask_info,
pad_output=False,
return_softmax=True,
pg_mesh=ProcessGroupMesh(1, 1, sp_size, 1),
# deterministic=True
)
ring_out = ring_out.transpose(1, 2).reshape(-1, nheads, d)
# Check output
lse = lse.transpose(0, 1)
out, lse = split_varlen_zigzag(
[out, lse], mask_info["cu_seqlens"] * sp_size, sp_group, mask_info["max_seqlen"] * sp_size
)
assert_close(lse, ring_lse, atol=atol, rtol=rtol)
assert_close(out, ring_out, atol=atol, rtol=rtol)
# Check grads
labels = torch.ones(out.shape[0], dtype=dtype, device=device)
F.mse_loss(out.sum((-2, -1)), labels).backward()
F.mse_loss(ring_out.sum((-2, -1)), labels[: ring_out.shape[0]]).backward()
dq, dk, dv = [
split_varlen_zigzag(
qkv.grad[:, i], mask_info["cu_seqlens"] * sp_size, sp_group, mask_info["max_seqlen"] * sp_size
)
for i in range(3)
]
dq_ring, dk_ring, dv_ring = [
x.transpose(1, 2).reshape(-1, nheads, d)[mask_info["valid_indices"]]
for x in (q_ring.grad, k_ring.grad, v_ring.grad)
]
assert_close(dq, dq_ring, atol=atol, rtol=rtol)
assert_close(dk, dk_ring, atol=atol, rtol=rtol)
assert_close(dv, dv_ring, atol=atol, rtol=rtol)
def launch_single_ring(rank, world_size, port):
colossalai.launch(rank, world_size, "localhost", port)
check_packed_seq()
check_ring_attn(inner_ring_size=None)
def launch_double_ring(rank, world_size, port):
colossalai.launch(rank, world_size, "localhost", port)
check_ring_attn(inner_ring_size=2)
@rerun_if_address_is_in_use()
@parameterize("world_size", [2])
def test_ring_attn(world_size):
spawn(launch_single_ring, nprocs=world_size)
@rerun_if_address_is_in_use()
@parameterize("world_size", [4])
def test_double_ring(world_size):
spawn(launch_double_ring, nprocs=world_size)
if __name__ == "__main__":
test_ring_attn()
test_double_ring()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_layer/test_embedding.py | tests/test_shardformer/test_layer/test_embedding.py | from contextlib import nullcontext
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.testing import assert_close
import colossalai
from colossalai.lazy import LazyInitContext
from colossalai.shardformer.layer import Embedding1D
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
@parameterize("lazy_init", [False, True])
def check_embedding_1d(lazy_init: bool):
ctx = LazyInitContext() if lazy_init else nullcontext()
embedding = nn.Embedding(32, 128).cuda()
with ctx:
embedding_copy = nn.Embedding(32, 128).cuda()
embedding_1d = Embedding1D.from_native_module(embedding_copy, process_group=None)
assert embedding_1d.weight.shape == torch.Size([32, 64])
assert embedding_1d.weight is embedding_copy.weight
# ensure state dict is reversibly loadable
embedding.load_state_dict(embedding_1d.state_dict())
embedding_1d.load_state_dict(embedding.state_dict())
# check computation correctness
x = torch.randint(low=0, high=32, size=(4, 32)).cuda()
out = embedding(x)
gather_out = embedding_1d(x)
assert_close(out, gather_out)
# check backward correctness
out.sum().backward()
gather_out.sum().backward()
rank = dist.get_rank()
target_grad = torch.chunk(embedding.weight.grad, 2, dim=1)[rank]
assert_close(target_grad, embedding_1d.weight.grad)
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
check_embedding_1d()
@rerun_if_address_is_in_use()
def test_embedding_1d():
spawn(run_dist, nprocs=2)
if __name__ == "__main__":
test_embedding_1d()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_layer/test_layernorm.py | tests/test_shardformer/test_layer/test_layernorm.py | from contextlib import nullcontext
import torch
import torch.nn as nn
from torch.testing import assert_close
import colossalai
from colossalai.lazy import LazyInitContext
from colossalai.shardformer.layer import FusedLayerNorm
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
@parameterize("lazy_init", [False, True])
def check_layernorm(lazy_init: bool):
ctx = LazyInitContext() if lazy_init else nullcontext()
norm = nn.LayerNorm(128, 0.00001).cuda()
with ctx:
norm_copy = nn.LayerNorm(128, 0.00001).cuda()
norm1d = FusedLayerNorm.from_native_module(norm_copy, process_group=None)
assert norm1d.weight.shape == torch.Size([128])
assert norm_copy.weight is norm1d.weight
assert norm_copy.bias is norm1d.bias
# ensure state dict is reversibly loadable
norm.load_state_dict(norm1d.state_dict())
norm1d.load_state_dict(norm.state_dict())
# check computation correctness
x = torch.rand(4, 128).cuda()
out = norm(x)
gather_out = norm1d(x)
assert_close(out, gather_out)
# check backward correctness
out.sum().backward()
gather_out.sum().backward()
assert_close(norm.weight.grad, norm1d.weight.grad)
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
check_layernorm()
@rerun_if_address_is_in_use()
def test_layernorm():
spawn(run_dist, nprocs=2)
if __name__ == "__main__":
test_layernorm()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_layer/test_qkv_fused_linear_1d.py | tests/test_shardformer/test_layer/test_qkv_fused_linear_1d.py | import os
from contextlib import nullcontext
import torch
import torch.nn as nn
from torch.testing import assert_close
import colossalai
from colossalai.lazy import LazyInitContext
from colossalai.shardformer.layer import FusedLinear, FusedLinear1D_Col, FusedLinear1D_Row
from colossalai.shardformer.layer.qkv_fused_linear import split_fused_qkv_in_gpt2_style
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
# This code is copied from https://github.com/huggingface/transformers
os.environ["CUDA_DEVICE_MAX_CONNECTIONS"] = "1"
@parameterize("lazy_init", [False, True])
def check_linear_1d_col(lazy_init: bool):
ctx = LazyInitContext() if lazy_init else nullcontext()
linear = nn.Linear(8, 80).cuda()
with ctx:
linear_copy = nn.Linear(8, 80).cuda()
linear_col = FusedLinear1D_Col.from_native_module(
linear_copy, process_group=None, gather_output=True, split_sizes=[32, 32, 16]
)
assert linear.weight.shape == torch.Size([80, 8])
assert linear.bias.shape == torch.Size([80])
assert linear_col.weight.shape == torch.Size([40, 8])
assert linear_col.bias.shape == torch.Size([40])
assert linear_copy.weight is linear_col.weight
assert linear_copy.bias is linear_col.bias
# ensure weights are reversibly loadable
linear_col.load_state_dict(linear.state_dict())
linear.load_state_dict(linear_col.state_dict())
# check computation correctness
x = torch.rand(4, 8).cuda()
out = linear(x)
gather_out = linear_col(x)
assert_close(out, gather_out)
# check backward correctness
out.sum().backward()
gather_out.sum().backward()
target_grad = split_fused_qkv_in_gpt2_style(linear.weight.grad, [32, 32, 16], None, False)
assert_close(target_grad, linear_col.weight.grad)
@parameterize("lazy_init", [False, True])
def check_linear_1d_row(lazy_init: bool):
ctx = LazyInitContext() if lazy_init else nullcontext()
linear = nn.Linear(80, 8).cuda()
with ctx:
linear_copy = nn.Linear(80, 8).cuda()
linear_row = FusedLinear1D_Row.from_native_module(
linear_copy, process_group=None, split_sizes=[32, 32, 16], parallel_input=False
)
assert linear.weight.shape == torch.Size([8, 80])
assert linear_row.weight.shape == torch.Size([8, 40])
assert linear_row.bias.shape == torch.Size([8])
assert linear_copy.weight is linear_row.weight
assert linear_copy.bias is linear_row.bias
# ensure weights are reversibly loadable
linear_row.load_state_dict(linear.state_dict())
linear.load_state_dict(linear_row.state_dict())
# check computation correctness
x = torch.rand(4, 80).cuda()
out = linear(x)
gather_out = linear_row(x)
assert_close(out, gather_out)
# check backward correctness
out.sum().backward()
gather_out.sum().backward()
target_grad = split_fused_qkv_in_gpt2_style(linear.weight.grad, [32, 32, 16], None, True)
assert_close(target_grad, linear_row.weight.grad)
@parameterize("lazy_init", [False, True])
def check_linear_1d_col_row(lazy_init: bool):
ctx = LazyInitContext() if lazy_init else nullcontext()
linear1 = nn.Linear(8, 80).cuda()
linear2 = nn.Linear(80, 8).cuda()
with ctx:
linear1_copy = nn.Linear(8, 80).cuda()
linear2_copy = nn.Linear(80, 8).cuda()
linear_col = FusedLinear1D_Col.from_native_module(linear1_copy, process_group=None, split_sizes=[32, 32, 16])
linear_row = FusedLinear1D_Row.from_native_module(
linear2_copy,
process_group=None,
split_sizes=[32, 32, 16],
)
# ensure weights are reversibly loadable
linear_col.load_state_dict(linear1.state_dict())
linear_row.load_state_dict(linear2.state_dict())
# check computation correctness
x = torch.rand(4, 8).cuda()
target_out = linear2(linear1(x))
out = linear_row(linear_col(x))
assert_close(out, target_out)
# check backward correctness
target_out.sum().backward()
out.sum().backward()
target_grad1 = split_fused_qkv_in_gpt2_style(linear1.weight.grad, [32, 32, 16], None, False)
assert_close(target_grad1, linear_col.weight.grad)
target_grad2 = split_fused_qkv_in_gpt2_style(linear2.weight.grad, [32, 32, 16], None, True)
assert_close(target_grad2, linear_row.weight.grad)
@parameterize("lazy_init", [False, True])
def check_linear_1d_base(lazy_init: bool):
ctx = LazyInitContext() if lazy_init else nullcontext()
linear = nn.Linear(8, 80).cuda()
with ctx:
linear_copy = nn.Linear(8, 80).cuda()
linear_base = FusedLinear.from_native_module(linear_copy)
assert linear.weight.shape == torch.Size([80, 8])
assert linear.bias.shape == torch.Size([80])
assert linear_base.weight.shape == torch.Size([80, 8])
assert linear_base.bias.shape == torch.Size([80])
assert linear_copy.weight is linear_base.weight
assert linear_copy.bias is linear_base.bias
# ensure weights are reversibly loadable
linear_base.load_state_dict(linear.state_dict())
linear.load_state_dict(linear_base.state_dict())
# check computation correctness
x = torch.rand(4, 8).cuda()
out = linear(x)
base_out = linear_base(x)
assert_close(out, base_out)
# check backward correctness
out.sum().backward()
base_out.sum().backward()
assert_close(linear.weight.grad, linear_base.weight.grad)
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
check_linear_1d_col()
check_linear_1d_row()
check_linear_1d_col_row()
check_linear_1d_base()
@rerun_if_address_is_in_use()
def test_linearconv():
spawn(run_dist, nprocs=2)
if __name__ == "__main__":
test_linearconv()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_layer/test_sequence_parallel.py | tests/test_shardformer/test_layer/test_sequence_parallel.py | import copy
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.testing import assert_close
import colossalai
from colossalai.shardformer.layer import all_to_all_comm
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
class SequenceParallelAttention(torch.nn.Module):
"""Initialization.
Arguments:
local_attention (Module): local attention with q,k,v
sequence_process_group (ProcessGroup): sequence parallel process group
scatter_idx (int): scatter_idx for all2all comm
gather_idx (int): gather_idx for all2all comm
"""
def __init__(
self,
heads_num: torch.Tensor,
hidden_dim: torch.Tensor,
enable_sequence_parallellism: bool = False,
sequence_process_group: dist.ProcessGroup = None,
scatter_idx: int = 2,
gather_idx: int = 1,
) -> None:
super(SequenceParallelAttention, self).__init__()
self.spg = sequence_process_group
self.scatter_idx = scatter_idx
self.gather_idx = gather_idx
self.heads_num = heads_num
self.hidden_dim = hidden_dim
assert hidden_dim % heads_num == 0
self.head_dim = hidden_dim // heads_num
self.enable_sequence_parallellism = enable_sequence_parallellism
self.q = nn.Linear(hidden_dim, hidden_dim)
self.k = nn.Linear(hidden_dim, hidden_dim)
self.v = nn.Linear(hidden_dim, hidden_dim)
self.out = nn.Linear(hidden_dim, hidden_dim)
def attn(self, q, k, v):
batch_size, seq_len = q.shape[0], q.shape[1]
scale = self.head_dim**0.5
qk = torch.matmul(q, k.transpose(-2, -1)) / scale
weights = F.softmax(qk, dim=-1)
attention_score = torch.matmul(weights, v)
return attention_score
def forward(self, x) -> Tensor:
bsz, q_len, _ = x.size()
seq_len = q_len * dist.get_world_size(self.spg) if self.enable_sequence_parallellism else q_len
num_heads = (
self.heads_num // dist.get_world_size(self.spg) if self.enable_sequence_parallellism else self.heads_num
)
# in shape : e.g., [s/p:h:]
query_states = self.q(x)
key_states = self.k(x)
value_states = self.v(x)
if self.enable_sequence_parallellism:
query_states = all_to_all_comm(query_states, self.spg, self.scatter_idx, self.gather_idx)
key_states = all_to_all_comm(key_states, self.spg, self.scatter_idx, self.gather_idx)
value_states = all_to_all_comm(value_states, self.spg, self.scatter_idx, self.gather_idx)
query_states = query_states.view(bsz, seq_len, num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, seq_len, num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, seq_len, num_heads, self.head_dim).transpose(1, 2)
# out shape : e.g., [s:h/p:]
attn_score = self.attn(query_states, key_states, value_states)
attn_score = attn_score.transpose(1, 2).contiguous()
attn_score = attn_score.reshape(bsz, seq_len, num_heads * self.head_dim)
if self.enable_sequence_parallellism:
attn_score = all_to_all_comm(attn_score, self.spg, self.gather_idx, self.scatter_idx)
# output e.g., [s/p::h]
output = self.out(attn_score)
return output
def seq_parallel_attn(seq_len, hidden_dim, head_num, batch_size):
seq_len = seq_len
hidden_dim = hidden_dim
head_num = head_num
batch_size = batch_size
world_size = dist.get_world_size()
x = torch.randn(batch_size, seq_len, hidden_dim).cuda()
x_unshard = x.clone()
x_unshard.requires_grad_(True)
x_input = torch.chunk(x.clone(), world_size, dim=1)[dist.get_rank()]
x_input.requires_grad_(True)
# Multi-head Attention
mha = SequenceParallelAttention(head_num, hidden_dim).cuda()
# Multi-head Attention forward
mha_out = mha(x_unshard)
# Sequence parallel Attention
sp_attn = SequenceParallelAttention(head_num, hidden_dim, True).cuda()
sp_attn.load_state_dict(copy.deepcopy(mha.state_dict()))
# Sequence parallel Attention forward
dist_attn_out = sp_attn(x_input)
# gather the output of sequence parallel attention
out_list = [torch.empty_like(dist_attn_out) for _ in range(world_size)]
dist.all_gather(out_list, dist_attn_out)
seq_out = torch.cat(out_list, dim=1)
# forward result check
assert_close(seq_out, mha_out)
# Multi-head Attention backward
mha_out.sum().backward()
q_grad = mha.q.weight.grad
k_grad = mha.k.weight.grad
v_grad = mha.v.weight.grad
o_grad = mha.out.weight.grad
x_grad = x_unshard.grad
# Sequence parallel Attention backward
dist_attn_out.sum().backward()
q_grad_seq = sp_attn.q.weight.grad
k_grad_seq = sp_attn.k.weight.grad
v_grad_seq = sp_attn.v.weight.grad
o_grad_seq = sp_attn.out.weight.grad
x_grad_seq = x_input.grad
# all_reduce the grad of sequence parallel attention weight
dist.all_reduce(q_grad_seq)
dist.all_reduce(k_grad_seq)
dist.all_reduce(v_grad_seq)
dist.all_reduce(o_grad_seq)
# gather the grad of sequence parallel attention input
x_grad_seq_list = [torch.empty_like(x_grad_seq) for _ in range(world_size)]
dist.all_gather(x_grad_seq_list, x_grad_seq)
x_grad_seq_gather = torch.cat(x_grad_seq_list, dim=1)
# backward result check
assert_close(q_grad_seq, q_grad)
assert_close(k_grad_seq, k_grad)
assert_close(v_grad_seq, v_grad, atol=1e-4, rtol=1e-4)
assert_close(o_grad_seq, o_grad)
assert_close(x_grad_seq_gather, x_grad)
@parameterize("seq_len", [128])
@parameterize("hidden_dim", [64])
@parameterize("head_num", [4])
@parameterize("batch_size", [1])
def run_seq_parallel_attn(seq_len, hidden_dim, head_num, batch_size):
seq_parallel_attn(seq_len, hidden_dim, head_num, batch_size)
def check_all2all_attn(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_seq_parallel_attn()
@rerun_if_address_is_in_use()
def test_all_to_all_attention():
spawn(check_all2all_attn, nprocs=4)
if __name__ == "__main__":
test_all_to_all_attention()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_layer/test_linear_1d.py | tests/test_shardformer/test_layer/test_linear_1d.py | import os
from contextlib import nullcontext
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.testing import assert_close
import colossalai
from colossalai.lazy import LazyInitContext
from colossalai.pipeline.weight_grad_store import WeightGradStore
from colossalai.shardformer.layer import Linear1D_Col, Linear1D_Row, LinearWithGradAccum
from colossalai.tensor.d_tensor import is_distributed_tensor
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
os.environ["CUDA_DEVICE_MAX_CONNECTIONS"] = "1"
def check_linear_1d_col(lazy_init: bool, seq_parallel_mode: bool, overlap: bool):
ctx = LazyInitContext() if lazy_init else nullcontext()
linear = nn.Linear(32, 128).cuda()
with ctx:
linear_copy = nn.Linear(32, 128).cuda()
linear_col = Linear1D_Col.from_native_module(
linear_copy, process_group=None, gather_output=True, seq_parallel_mode=seq_parallel_mode, overlap=overlap
)
# ensure that the parameters are distributed
assert is_distributed_tensor(linear_col.weight)
assert is_distributed_tensor(linear_col.bias)
assert linear_copy.weight is linear_col.weight
assert linear_copy.bias is linear_col.bias
# ensure the shape is correct
assert linear_col.weight.shape == torch.Size([64, 32])
assert linear_col.bias.shape == torch.Size([64])
# ensure state dict is reversibly loadable
linear.load_state_dict(linear_col.state_dict())
linear_col.load_state_dict(linear.state_dict())
# check computation correctness
# [batch_size, seq_len, hidden_size]
x = torch.rand(2, 4, 32).cuda()
x_for_unshard = x.expand_as(x.clone())
x_for_unshard.requires_grad_(True)
x_for_shard = (
x.expand_as(x.clone()) if seq_parallel_mode is None else torch.chunk(x.clone(), 2, dim=1)[dist.get_rank()]
)
x_for_shard.requires_grad_(True)
out = linear(x_for_unshard)
gather_out = linear_col(x_for_shard)
assert_close(out, gather_out)
# check backward correctness
out.sum().backward()
gather_out.sum().backward()
rank = dist.get_rank()
target_grad = torch.chunk(linear.weight.grad, 2, dim=0)[rank]
assert_close(target_grad, linear_col.weight.grad)
# check the input gradients
assert x_for_shard.grad is not None
assert x_for_unshard.grad is not None
target_unshard_gard = (
x_for_unshard.grad
if seq_parallel_mode is None
else torch.chunk(x_for_unshard.grad.clone(), 2, dim=1)[dist.get_rank()]
)
assert_close(target_unshard_gard, x_for_shard.grad)
def check_linear_1d_row(lazy_init: bool, seq_parallel_mode: bool):
ctx = LazyInitContext() if lazy_init else nullcontext()
linear = nn.Linear(32, 128).cuda()
with ctx:
linear_copy = nn.Linear(32, 128).cuda()
linear_row = Linear1D_Row.from_native_module(
linear_copy, process_group=None, parallel_input=False, seq_parallel_mode=seq_parallel_mode
)
assert linear_row.weight.shape == torch.Size([128, 16])
assert linear_row.bias.shape == torch.Size([128])
assert linear_copy.weight is linear_row.weight
assert linear_copy.bias is linear_row.bias
linear.load_state_dict(linear_row.state_dict())
linear_row.load_state_dict(linear.state_dict())
# check computation correctness
# [batch_size, seq_len, hidden_size]
x = torch.rand(2, 4, 32).cuda()
x_for_unshard = x.expand_as(x.clone())
x_for_unshard.requires_grad_(True)
x_for_shard = x.expand_as(x.clone())
x_for_shard.requires_grad_(True)
# run forward
out = linear(x_for_unshard)
gather_out = linear_row(x_for_shard)
target_out = out if seq_parallel_mode is None else torch.chunk(out.clone(), 2, dim=1)[dist.get_rank()]
assert_close(target_out, gather_out)
# check backward correctness
out.sum().backward()
gather_out.sum().backward()
rank = dist.get_rank()
target_grad = torch.chunk(linear.weight.grad, 2, dim=1)[rank]
assert_close(target_grad, linear_row.weight.grad)
# check the input gradients
assert x_for_shard.grad is not None
assert x_for_unshard.grad is not None
assert_close(x_for_unshard.grad, x_for_shard.grad)
def check_linear_without_weight_grad_store(lazy_init: bool, seq_parallel_mode: bool):
ctx = LazyInitContext() if lazy_init else nullcontext()
linear = nn.Linear(32, 128).cuda()
with ctx:
linear_copy = nn.Linear(32, 128).cuda()
linear_base = LinearWithGradAccum.from_native_module(
linear_copy, parallel_input=False, seq_parallel_mode=seq_parallel_mode, use_zbv=False
)
assert linear_base.weight.shape == torch.Size([128, 32])
assert linear_base.bias.shape == torch.Size([128])
assert linear_copy.weight is linear_base.weight
assert linear_copy.bias is linear_base.bias
linear.load_state_dict(linear_base.state_dict())
linear_base.load_state_dict(linear.state_dict())
# check computation correctness
# [batch_size, seq_len, hidden_size]
x = torch.rand(2, 4, 32).cuda()
x_for_unshard = x.expand_as(x.clone())
x_for_unshard.requires_grad_(True)
x_for_shard = x.expand_as(x.clone())
x_for_shard.requires_grad_(True)
# run forward
out = linear(x_for_unshard)
gather_out = linear_base(x_for_shard)
assert_close(out, gather_out)
# check backward correctness
out.sum().backward()
gather_out.sum().backward()
assert_close(linear.weight.grad, linear_base.weight.grad)
# check the input gradients
assert x_for_shard.grad is not None
assert x_for_unshard.grad is not None
assert_close(x_for_unshard.grad, x_for_shard.grad)
def check_linear_with_weight_grad_store(lazy_init: bool, seq_parallel_mode: bool):
ctx = LazyInitContext() if lazy_init else nullcontext()
linear = nn.Linear(32, 128).cuda()
with ctx:
linear_copy = nn.Linear(32, 128).cuda()
linear_base = LinearWithGradAccum.from_native_module(
linear_copy, parallel_input=False, seq_parallel_mode=seq_parallel_mode, use_zbv=True
)
assert linear_base.weight.shape == torch.Size([128, 32])
assert linear_base.bias.shape == torch.Size([128])
assert linear_copy.weight is linear_base.weight
assert linear_copy.bias is linear_base.bias
linear.load_state_dict(linear_base.state_dict())
linear_base.load_state_dict(linear.state_dict())
# check computation correctness
# [batch_size, seq_len, hidden_size]
x = torch.rand(2, 4, 32).cuda()
x_for_unshard = x.expand_as(x.clone())
x_for_unshard.requires_grad_(True)
x_for_shard = x.expand_as(x.clone())
x_for_shard.requires_grad_(True)
# run forward
out = linear(x_for_unshard)
gather_out = linear_base(x_for_shard)
assert_close(out, gather_out)
# check backward correctness
out.sum().backward()
gather_out.sum().backward()
# Weight grad is None before we do WeightGradStore pop
assert linear_base.weight.grad is None
# after WeightGradStore pop (dw computation complete), we assert weight grad
WeightGradStore.flush(chunk=0) # flush buffer to chunk 0 Queue
WeightGradStore.pop(chunk=0)
assert_close(linear.weight.grad, linear_base.weight.grad)
# check the input gradients
assert x_for_shard.grad is not None
assert x_for_unshard.grad is not None
assert_close(x_for_unshard.grad, x_for_shard.grad)
def check_linear_col_plus_row(lazy_init: bool, seq_parallel_mode: bool, overlap: bool):
ctx = LazyInitContext() if lazy_init else nullcontext()
linear_1 = nn.Linear(32, 128).cuda()
linear_2 = nn.Linear(128, 32).cuda()
with ctx:
linear_1_copy = nn.Linear(32, 128).cuda()
linear_2_copy = nn.Linear(128, 32).cuda()
linear_col = Linear1D_Col.from_native_module(
linear_1_copy, process_group=None, gather_output=False, seq_parallel_mode=seq_parallel_mode, overlap=overlap
)
linear_row = Linear1D_Row.from_native_module(
linear_2_copy, process_group=None, parallel_input=True, seq_parallel_mode=seq_parallel_mode
)
linear_1.load_state_dict(linear_col.state_dict())
linear_col.load_state_dict(linear_1.state_dict())
linear_2.load_state_dict(linear_row.state_dict())
linear_row.load_state_dict(linear_2.state_dict())
# check computation correctness
# [batch_size, seq_len, hidden_size]
x = torch.rand(2, 4, 32).cuda()
x_for_unshard = x.expand_as(x.clone())
x_for_unshard.requires_grad_(True)
x_for_shard = (
x.expand_as(x.clone()) if seq_parallel_mode is None else torch.chunk(x.clone(), 2, dim=1)[dist.get_rank()]
)
x_for_shard.requires_grad_(True)
# run forward
unshard_out = linear_2(linear_1(x_for_unshard))
shard_out = linear_row(linear_col(x_for_shard))
target_out = (
unshard_out if seq_parallel_mode is None else torch.chunk(unshard_out.clone(), 2, dim=1)[dist.get_rank()]
)
assert_close(target_out, shard_out)
# check backward correctness
unshard_out.sum().backward()
shard_out.sum().backward()
rank = dist.get_rank()
target_1_grad = torch.chunk(linear_1.weight.grad, 2, dim=0)[rank]
assert_close(target_1_grad, linear_col.weight.grad)
# check the input gradients
assert x_for_shard.grad is not None
assert x_for_unshard.grad is not None
target_unshard_gard = (
x_for_unshard.grad
if seq_parallel_mode is None
else torch.chunk(x_for_unshard.grad.clone(), 2, dim=1)[dist.get_rank()]
)
assert_close(target_unshard_gard, x_for_shard.grad)
@parameterize("lazy_init", [False, True])
@parameterize("seq_parallel_mode", [None, "split_gather"])
@parameterize("overlap", [True])
def run_dist_linear_test(lazy_init, seq_parallel_mode, overlap):
check_linear_1d_col(lazy_init, seq_parallel_mode, overlap)
check_linear_1d_row(lazy_init, seq_parallel_mode)
check_linear_col_plus_row(lazy_init, seq_parallel_mode, overlap)
check_linear_without_weight_grad_store(lazy_init, seq_parallel_mode)
check_linear_with_weight_grad_store(lazy_init, seq_parallel_mode)
def check_dist_linear(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_dist_linear_test()
@rerun_if_address_is_in_use()
def test_linear():
spawn(check_dist_linear, nprocs=2)
if __name__ == "__main__":
test_linear()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_layer/test_dist_log_prob.py | tests/test_shardformer/test_layer/test_dist_log_prob.py | import pytest
import torch
import colossalai
from colossalai.logging import disable_existing_loggers
from colossalai.shardformer.layer import dist_log_prob_1d
from colossalai.testing import rerun_if_address_is_in_use, spawn
CONFIG = dict(
parallel=dict(data=1, pipeline=1, tensor=dict(size=2, mode="1d")),
)
def log_probs_from_logits(logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
"""
Compute the log probabilities from logits for the given labels.
Args:
logits (torch.Tensor): The input logits.
labels (torch.Tensor): The target labels.
Returns:
torch.Tensor: The log probabilities corresponding to the labels.
"""
log_probs = torch.log_softmax(logits, dim=-1)
per_label_logps = log_probs.gather(dim=-1, index=labels.unsqueeze(-1))
return per_label_logps.squeeze(-1)
def check_dist_log_prob(rank, world_size, port):
disable_existing_loggers()
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost", backend="nccl")
# prepare data
pred = torch.randn(2, 4, 8, requires_grad=True).cuda()
labels = torch.randint(8, (2, 4)).cuda()
logprob = log_probs_from_logits(pred, labels)
pred.retain_grad()
logprob.mean().backward()
dist_pred = pred.clone().chunk(world_size, -1)[rank].detach()
dist_pred.requires_grad = True
dist_logprob = dist_log_prob_1d(dist_pred, labels)
dist_pred.retain_grad()
dist_logprob.squeeze(-1).mean().backward()
assert torch.allclose(
logprob, dist_logprob.squeeze(-1), atol=1e-5
), f"dist cross entropy logprob is not equal to orgin logprob\n{logprob}\n{dist_logprob.squeeze(-1)}"
pred_grad_partial = pred.grad.clone().chunk(world_size, -1)[rank].detach()
assert torch.allclose(
pred_grad_partial, dist_pred.grad
), f"dist grad is not equal to orgin grad\n{pred.grad}\n{dist_pred.grad}"
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_dist_log_prob():
spawn(check_dist_log_prob, 2)
if __name__ == "__main__":
test_dist_log_prob()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_hybrid_parallel_grad_clip_norm/test_amp_optimizer.py | tests/test_shardformer/test_hybrid_parallel_grad_clip_norm/test_amp_optimizer.py | import pytest
import torch
from torch.nn.utils.clip_grad import clip_grad_norm_
import colossalai
from colossalai.logging import disable_existing_loggers
from colossalai.shardformer.layer.utils import Randomizer
from colossalai.tensor.d_tensor.api import clear_layout_converter
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn
from tests.kit.model_zoo import model_zoo
from tests.test_shardformer.test_model._utils import (
build_model_from_hybrid_plugin,
check_all_grad_tensors,
check_loss,
check_output_hidden_state,
check_weight,
get_grad_tensors_for_check,
run_forward_backward_with_hybrid_plugin,
unwrap_model,
)
def check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, test_config):
org_model, org_optimizer, sharded_model, sharded_optimizer, criterion, booster = build_model_from_hybrid_plugin(
model_fn, loss_fn, test_config
)
org_loss, org_output, sharded_loss, sharded_output = run_forward_backward_with_hybrid_plugin(
org_model, sharded_model, sharded_optimizer, data_gen_fn, output_transform_fn, criterion, booster
)
stage_manager = booster.plugin.stage_manager
tp_group = booster.plugin.tp_group
bert = unwrap_model(org_model, "BertModel", "bert")
sharded_bert = unwrap_model(sharded_model, "BertModel", "bert")
col_layer_for_check = ["encoder.layer[0].output.dense"]
row_layer_for_check = ["embeddings.word_embeddings", "encoder.layer[0].intermediate.dense"]
if test_config["precision"] == "fp32":
atol, rtol = 1e-4, 1e-3
elif test_config["precision"] == "fp16":
atol, rtol = 5e-3, 5e-3
else:
atol, rtol = 2e-2, 2e-2
# Check grads
# Save gradient tensors for comparison between the original model and the sharded model.
grads_to_check = {}
if (stage_manager is None or stage_manager.is_first_stage()) and booster.plugin.zero_stage == 0:
col_layer_grads = get_grad_tensors_for_check(
bert, sharded_bert, col_layer_for_check, tp_group, atol=atol, rtol=rtol, dim=1, verbose=False
)
row_layer_grads = get_grad_tensors_for_check(
bert, sharded_bert, row_layer_for_check, tp_group, atol=atol, rtol=rtol, dim=0, verbose=False
)
grads_to_check.update(col_layer_grads)
grads_to_check.update(row_layer_grads)
check_all_grad_tensors(grads_to_check)
# Check gradient norm
# Convert the gradient data of the working parameter to float and assign it to the master parameter's gradient
# Note that this operation should have been done in the 'step' function, but it is performed here in advance for gradient norm calculation purposes.
# Although it will be done again in the 'step' function, it does not affect correctness.
for group in sharded_optimizer.optim.param_groups:
for p in group["params"]:
working_param = sharded_optimizer.master_to_working_map[p]
if p is working_param:
continue
if working_param.grad is not None:
p.grad = working_param.grad.data.float()
working_param.grad = None
# Create a list of parameter-gradient pairs containing working parameters and their gradients
param_gradient_pairs = [
(sharded_optimizer.master_to_working_map[p], p.grad)
for group in sharded_optimizer.param_groups
for p in group["params"]
if p.grad is not None
]
origin_norm = clip_grad_norm_(org_model.parameters(), test_config["max_norm"])
# Calculate the gradient norm of the sharded optimizer
device = origin_norm.device
hybrid_norm = torch.tensor(sharded_optimizer._compute_grad_norm(param_gradient_pairs)).to(device)
# If using fp16 precision, divide by the initial scale
if test_config["precision"] == "fp16":
hybrid_norm /= test_config["initial_scale"]
# Assert that the gradient norm of the original model is close to the gradient norm of the hybrid model
assert torch.allclose(
origin_norm, hybrid_norm, atol=atol, rtol=rtol
), f"Original model grad norm is not equal to sharded model grad norm\n{origin_norm}\n{hybrid_norm}"
# Optimizer executes step
org_optimizer.step()
sharded_optimizer.step()
# Check last hidden state & loss
if stage_manager is None or stage_manager.is_last_stage():
if test_config["precision"] == "fp32":
atol, rtol = 1e-5, 1e-3
elif test_config["precision"] == "fp16":
atol, rtol = 5e-3, 5e-3
else:
atol, rtol = 2e-2, 2e-2
if org_model.__class__.__name__ == "BertModel":
check_output_hidden_state(org_output, sharded_output, stage_manager, atol=atol, rtol=rtol)
check_loss(org_loss, sharded_loss, atol=atol, rtol=rtol)
# Check weights
if test_config["precision"] == "fp32":
atol, rtol = 5e-3, 1e-3
else:
atol, rtol = 5e-3, 5e-3
if stage_manager is None or stage_manager.is_first_stage():
check_weight(bert, sharded_bert, col_layer_for_check, tp_group, atol=atol, rtol=rtol, dim=1, verbose=False)
torch.cuda.empty_cache()
@parameterize(
"test_config",
[
{
"tp_size": 2,
"pp_size": 2,
"num_microbatches": 4,
"enable_all_optimization": True,
"use_lazy_init": False,
"precision": "fp16",
"max_norm": 5,
"initial_scale": 1,
},
{
"tp_size": 2,
"pp_size": 2,
"num_microbatches": 4,
"enable_all_optimization": True,
"use_lazy_init": False,
"precision": "bf16",
"max_norm": 5,
},
],
)
def run_test(test_config):
sub_model_zoo = model_zoo.get_sub_registry("transformers_bert")
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, test_config)
clear_layout_converter()
Randomizer.reset_index()
torch.cuda.empty_cache()
@parameterize(
"test_config",
[
{
"tp_size": 2,
"pp_size": 2,
"num_microbatches": 4,
"enable_all_optimization": True,
"use_lazy_init": False,
"precision": "bf16",
"max_norm": 5,
},
{
"tp_size": 2,
"pp_size": 2,
"num_microbatches": 4,
"enable_all_optimization": True,
"use_lazy_init": False,
"precision": "fp16",
"max_norm": 5,
"initial_scale": 1,
},
],
)
def run_3d_test(test_config):
sub_model_zoo = model_zoo.get_sub_registry("transformers_bert")
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, test_config)
clear_layout_converter()
Randomizer.reset_index()
torch.cuda.empty_cache()
def check_grad_clip_norm(rank, world_size, port):
disable_existing_loggers()
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_test()
def check_grad_clip_norm_3d(rank, world_size, port):
disable_existing_loggers()
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_3d_test()
@pytest.mark.dist
@rerun_if_address_is_in_use()
@clear_cache_before_run()
def test_grad_clip_norm():
spawn(check_grad_clip_norm, 4)
@pytest.mark.largedist
@rerun_if_address_is_in_use()
@clear_cache_before_run()
def test_grad_clip_norm_3d():
spawn(check_grad_clip_norm_3d, 8)
if __name__ == "__main__":
test_grad_clip_norm()
test_grad_clip_norm_3d()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_hybrid_parallel_grad_clip_norm/test_naive_optimizer.py | tests/test_shardformer/test_hybrid_parallel_grad_clip_norm/test_naive_optimizer.py | import pytest
import torch
from torch.nn.utils.clip_grad import clip_grad_norm_
import colossalai
from colossalai.logging import disable_existing_loggers
from colossalai.shardformer.layer.utils import Randomizer
from colossalai.tensor.d_tensor.api import clear_layout_converter
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn
from tests.kit.model_zoo import model_zoo
from tests.test_shardformer.test_model._utils import (
build_model_from_hybrid_plugin,
check_all_grad_tensors,
check_loss,
check_output_hidden_state,
check_weight,
get_grad_tensors_for_check,
run_forward_backward_with_hybrid_plugin,
unwrap_model,
)
def check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, test_config):
org_model, org_optimizer, sharded_model, sharded_optimizer, criterion, booster = build_model_from_hybrid_plugin(
model_fn, loss_fn, test_config
)
org_loss, org_output, sharded_loss, sharded_output = run_forward_backward_with_hybrid_plugin(
org_model, sharded_model, sharded_optimizer, data_gen_fn, output_transform_fn, criterion, booster
)
stage_manager = booster.plugin.stage_manager
tp_group = booster.plugin.tp_group
bert = unwrap_model(org_model, "BertModel", "bert")
sharded_bert = unwrap_model(sharded_model, "BertModel", "bert")
col_layer_for_check = ["encoder.layer[0].output.dense"]
row_layer_for_check = ["embeddings.word_embeddings", "encoder.layer[0].intermediate.dense"]
if test_config["precision"] == "fp32":
atol, rtol = 1e-4, 1e-3
elif test_config["precision"] == "fp16":
atol, rtol = 5e-3, 5e-3
else:
atol, rtol = 2e-2, 2e-2
# Check grads
# Save gradient tensors for comparison between the original model and the sharded model.
grads_to_check = {}
if (stage_manager is None or stage_manager.is_first_stage()) and booster.plugin.zero_stage == 0:
col_layer_grads = get_grad_tensors_for_check(
bert, sharded_bert, col_layer_for_check, tp_group, atol=atol, rtol=rtol, dim=1, verbose=False
)
row_layer_grads = get_grad_tensors_for_check(
bert, sharded_bert, row_layer_for_check, tp_group, atol=atol, rtol=rtol, dim=0, verbose=False
)
grads_to_check.update(col_layer_grads)
grads_to_check.update(row_layer_grads)
check_all_grad_tensors(grads_to_check)
# Check grad norm
param_gradient_pairs = [
(p, p.grad) for group in sharded_optimizer.param_groups for p in group["params"] if p.grad is not None
]
origin_norm = clip_grad_norm_(org_model.parameters(), test_config["max_norm"])
device = origin_norm.device
hybrid_norm = torch.tensor(sharded_optimizer._compute_grad_norm(param_gradient_pairs)).to(device)
assert torch.allclose(
origin_norm, hybrid_norm, atol=atol, rtol=rtol
), f"orgin origin model grad norm is not equal to shard model grad norm\n{origin_norm}\n{hybrid_norm}"
# optimizer executes step
org_optimizer.step()
sharded_optimizer.step()
# check last hidden state & loss
if stage_manager is None or stage_manager.is_last_stage():
if test_config["precision"] == "fp32":
atol, rtol = 1e-5, 1e-3
elif test_config["precision"] == "fp16":
atol, rtol = 5e-3, 5e-3
else:
atol, rtol = 2e-2, 2e-2
if org_model.__class__.__name__ == "BertModel":
check_output_hidden_state(org_output, sharded_output, stage_manager, atol=atol, rtol=rtol)
check_loss(org_loss, sharded_loss, atol=atol, rtol=rtol)
# check weights
if test_config["precision"] == "fp32":
atol, rtol = 5e-3, 1e-3
else:
atol, rtol = 5e-3, 5e-3
if stage_manager is None or stage_manager.is_first_stage():
check_weight(bert, sharded_bert, col_layer_for_check, tp_group, atol=atol, rtol=rtol, dim=1, verbose=False)
torch.cuda.empty_cache()
@parameterize(
"test_config",
[
{
"tp_size": 2,
"pp_size": 2,
"num_microbatches": 4,
"enable_all_optimization": False,
"use_lazy_init": False,
"precision": "fp32",
"max_norm": 5,
},
],
)
def run_test(test_config):
sub_model_zoo = model_zoo.get_sub_registry("transformers_bert")
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, test_config)
clear_layout_converter()
Randomizer.reset_index()
torch.cuda.empty_cache()
@parameterize(
"test_config",
[
{
"tp_size": 2,
"pp_size": 2,
"num_microbatches": 4,
"enable_all_optimization": False,
"use_lazy_init": False,
"precision": "fp32",
"max_norm": 5,
},
],
)
def run_3d_test(test_config):
sub_model_zoo = model_zoo.get_sub_registry("transformers_bert")
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, test_config)
clear_layout_converter()
Randomizer.reset_index()
torch.cuda.empty_cache()
def check_grad_clip_norm(rank, world_size, port):
disable_existing_loggers()
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_test()
def check_grad_clip_norm_3d(rank, world_size, port):
disable_existing_loggers()
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_3d_test()
@pytest.mark.dist
@rerun_if_address_is_in_use()
@clear_cache_before_run()
def test_grad_clip_norm():
spawn(check_grad_clip_norm, 4)
@pytest.mark.largedist
@rerun_if_address_is_in_use()
@clear_cache_before_run()
def test_grad_clip_norm_3d():
spawn(check_grad_clip_norm_3d, 8)
if __name__ == "__main__":
test_grad_clip_norm()
test_grad_clip_norm_3d()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_shardformer/test_hybrid_parallel_grad_clip_norm/test_zero_optimizer.py | tests/test_shardformer/test_hybrid_parallel_grad_clip_norm/test_zero_optimizer.py | import math
import pytest
import torch
import torch.distributed as dist
from torch.nn.utils.clip_grad import clip_grad_norm_
import colossalai
from colossalai.logging import disable_existing_loggers
from colossalai.shardformer.layer.utils import Randomizer
from colossalai.tensor.d_tensor.api import clear_layout_converter
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn
from tests.kit.model_zoo import model_zoo
from tests.test_shardformer.test_model._utils import (
build_model_from_hybrid_plugin,
check_loss,
check_output_hidden_state,
check_weight,
run_forward_backward_with_hybrid_plugin,
unwrap_model,
)
def check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, test_config):
org_model, org_optimizer, sharded_model, sharded_optimizer, criterion, booster = build_model_from_hybrid_plugin(
model_fn, loss_fn, test_config
)
org_loss, org_output, sharded_loss, sharded_output = run_forward_backward_with_hybrid_plugin(
org_model, sharded_model, sharded_optimizer, data_gen_fn, output_transform_fn, criterion, booster
)
stage_manager = booster.plugin.stage_manager
tp_group = booster.plugin.tp_group
dp_group = booster.plugin.dp_group
bert = unwrap_model(org_model, "BertModel", "bert")
sharded_bert = unwrap_model(sharded_model, "BertModel", "bert")
col_layer_for_check = ["encoder.layer[0].output.dense"]
if test_config["precision"] == "fp32":
atol, rtol = 1e-4, 1e-3
elif test_config["precision"] == "fp16":
atol, rtol = 5e-3, 5e-3
else:
atol, rtol = 2e-2, 2e-2
dist.barrier()
# Check gradient norm
origin_norm = clip_grad_norm_(org_model.parameters(), test_config["max_norm"])
# Calculate the gradient norm of the sharded optimizer
device = origin_norm.device
norm_groups = []
for group_id in range(sharded_optimizer.num_param_groups):
working_grads = sharded_optimizer.get_working_grads_by_group_id(group_id)
norm_group = sharded_optimizer._compute_grad_norm(dp_group, gradients=working_grads)
norm_groups.append(norm_group)
total_norm = 0.0
for norm in norm_groups:
total_norm += norm**2.0
hybrid_norm = torch.tensor(math.sqrt(total_norm)).to(device)
# If using fp16 precision, divide by the initial scale
if test_config["precision"] == "fp16":
hybrid_norm /= test_config["initial_scale"]
# Assert that the gradient norm of the original model is close to the gradient norm of the hybrid model
assert torch.allclose(
origin_norm, hybrid_norm, atol=atol, rtol=rtol
), f"Original model grad norm is not equal to sharded model grad norm\n{origin_norm}\n{hybrid_norm}"
# optimizer executes step
org_optimizer.step()
sharded_optimizer.step()
# check last hidden state & loss
if stage_manager is None or stage_manager.is_last_stage():
if test_config["precision"] == "fp32":
atol, rtol = 1e-5, 1e-3
elif test_config["precision"] == "fp16":
atol, rtol = 5e-3, 5e-3
else:
atol, rtol = 2e-2, 2e-2
if org_model.__class__.__name__ == "BertModel":
check_output_hidden_state(org_output, sharded_output, stage_manager, atol=atol, rtol=rtol)
check_loss(org_loss, sharded_loss, atol=atol, rtol=rtol)
# check weights
if test_config["precision"] == "fp32":
atol, rtol = 5e-3, 1e-3
else:
atol, rtol = 5e-3, 5e-3
if stage_manager is None or stage_manager.is_first_stage():
check_weight(bert, sharded_bert, col_layer_for_check, tp_group, atol=atol, rtol=rtol, dim=1, verbose=False)
torch.cuda.empty_cache()
@parameterize(
"test_config",
[
{
"tp_size": 1,
"pp_size": 2,
"num_microbatches": 4,
"zero_stage": 1,
"enable_all_optimization": False,
"use_lazy_init": False,
"precision": "fp16",
"max_norm": 5,
"initial_scale": 1,
},
{
"tp_size": 2,
"pp_size": 1,
"zero_stage": 2,
"enable_all_optimization": False,
"use_lazy_init": False,
"precision": "fp16",
"max_norm": 5,
"initial_scale": 1,
},
{
"tp_size": 2,
"pp_size": 1,
"zero_stage": 1,
"enable_all_optimization": False,
"use_lazy_init": False,
"precision": "bf16",
"max_norm": 5,
},
],
)
def run_test(test_config):
sub_model_zoo = model_zoo.get_sub_registry("transformers_bert")
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, test_config)
clear_layout_converter()
Randomizer.reset_index()
torch.cuda.empty_cache()
@parameterize(
"test_config",
[
{
"tp_size": 2,
"pp_size": 2,
"num_microbatches": 4,
"zero_stage": 1,
"enable_all_optimization": True,
"use_lazy_init": False,
"precision": "bf16",
"max_norm": 5,
},
{
"tp_size": 2,
"pp_size": 2,
"num_microbatches": 4,
"zero_stage": 1,
"enable_all_optimization": True,
"use_lazy_init": False,
"precision": "fp16",
"max_norm": 5,
"initial_scale": 1,
},
],
)
def run_3d_test(test_config):
sub_model_zoo = model_zoo.get_sub_registry("transformers_bert")
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
check_forward_backward(model_fn, data_gen_fn, output_transform_fn, loss_fn, test_config)
clear_layout_converter()
Randomizer.reset_index()
torch.cuda.empty_cache()
def check_grad_clip_norm(rank, world_size, port):
disable_existing_loggers()
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_test()
def check_grad_clip_norm_3d(rank, world_size, port):
disable_existing_loggers()
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_3d_test()
@pytest.mark.dist
@rerun_if_address_is_in_use()
@clear_cache_before_run()
def test_grad_clip_norm():
spawn(check_grad_clip_norm, 4)
@pytest.mark.largedist
@rerun_if_address_is_in_use()
@clear_cache_before_run()
def test_grad_clip_norm_3d():
spawn(check_grad_clip_norm_3d, 8)
if __name__ == "__main__":
test_grad_clip_norm()
test_grad_clip_norm_3d()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.