repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_layers/test_2d/test_2d.py | tests/test_legacy/test_layers/test_2d/test_2d.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pytest
import torch
from checks_2d.check_layer_2d import (
check_classifier_given_embed_weight,
check_classifier_no_given_weight,
check_embed,
check_layernorm,
check_linear,
check_loss,
check_patch_embed,
check_vocab_parallel_classifier_given_embed_weight,
check_vocab_parallel_classifier_no_given_weight,
check_vocab_parallel_embed,
check_vocab_parallel_loss,
)
from checks_2d.check_operation_2d import check_AB, check_ABT, check_ATB
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, spawn
CONFIG = dict(
parallel=dict(pipeline=dict(size=1), tensor=dict(size=4, mode="2d")),
)
def check_operations():
check_AB()
check_ABT()
check_ATB()
def check_layer():
check_linear()
check_layernorm()
check_embed()
check_patch_embed()
check_vocab_parallel_embed()
check_classifier_no_given_weight()
check_vocab_parallel_classifier_no_given_weight()
check_classifier_given_embed_weight()
check_vocab_parallel_classifier_given_embed_weight()
check_loss()
check_vocab_parallel_loss()
def check_layer_and_operation(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
torch.backends.cudnn.deterministic = True
# check_operations()
check_layer()
gpc.destroy()
torch.cuda.empty_cache()
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_2d():
spawn(check_layer_and_operation, 4)
if __name__ == "__main__":
test_2d()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_layers/test_2d/checks_2d/common.py | tests/test_legacy/test_layers/test_2d/checks_2d/common.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
DEPTH = 2
BATCH_SIZE = 8
SEQ_LENGTH = 8
HIDDEN_SIZE = 8
NUM_CLASSES = 8
VOCAB_SIZE = 16
IMG_SIZE = 16
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-3, atol=1e-2)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_layers/test_2d/checks_2d/check_operation_2d.py | tests/test_legacy/test_layers/test_2d/checks_2d/check_operation_2d.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
from colossalai.accelerator import get_accelerator
from colossalai.legacy.context.parallel_mode import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.nn.layer.parallel_2d._operation import Matmul_AB_2D, Matmul_ABT_2D, Matmul_ATB_2D
from colossalai.legacy.utils import print_rank_0
from .common import BATCH_SIZE, DEPTH, HIDDEN_SIZE, SEQ_LENGTH, check_equal
def check_AB():
data_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA)
pipeline_parallel_rank = (
0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank(ParallelMode.PIPELINE)
)
pipeline_parallel_size = (
1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size(ParallelMode.PIPELINE)
)
tensor_parallel_size = gpc.get_world_size(ParallelMode.TENSOR)
dtype = torch.float
j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=get_accelerator().get_current_device())
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, DEPTH, dim=0)[i]
A = torch.chunk(A, DEPTH, dim=-1)[j]
A = A.clone()
A.requires_grad = True
B_shape = (HIDDEN_SIZE, 4 * HIDDEN_SIZE)
B_master = torch.randn(B_shape, dtype=dtype, device=get_accelerator().get_current_device())
torch.distributed.broadcast(B_master, src=0)
B = torch.chunk(B_master, DEPTH, dim=0)[i]
B = torch.chunk(B, DEPTH, dim=-1)[j]
B = B.clone()
B.requires_grad = True
out_shape = (BATCH_SIZE // DEPTH, SEQ_LENGTH, 4 * HIDDEN_SIZE // DEPTH)
out = Matmul_AB_2D.apply(
A,
B,
DEPTH,
out_shape,
i,
j,
ParallelMode.PARALLEL_2D_ROW,
ParallelMode.PARALLEL_2D_COL,
data_parallel_rank,
pipeline_parallel_rank,
pipeline_parallel_size,
tensor_parallel_size,
)
(BATCH_SIZE, SEQ_LENGTH, 4 * HIDDEN_SIZE)
A_master = A_master.clone()
A_master.requires_grad = True
B_master = B_master.clone()
B_master.requires_grad = True
C_master = torch.matmul(A_master, B_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
# check forward correctness
check_equal(out, C)
print_rank_0("AB forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=get_accelerator().get_current_device())
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
out.backward(grad)
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]
A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[j]
# check backward correctness
check_equal(A_grad, A.grad)
B_grad = B_master.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i]
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[j]
# check backward correctness
check_equal(B_grad, B.grad)
print_rank_0("AB backward: pass")
def check_ABT():
data_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA)
pipeline_parallel_rank = (
0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank(ParallelMode.PIPELINE)
)
pipeline_parallel_size = (
1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size(ParallelMode.PIPELINE)
)
tensor_parallel_size = gpc.get_world_size(ParallelMode.TENSOR)
dtype = torch.float
device = get_accelerator().get_current_device()
j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
C_shape = (BATCH_SIZE, SEQ_LENGTH, 4 * HIDDEN_SIZE)
C_master = torch.randn(C_shape, dtype=dtype, device=device)
torch.distributed.broadcast(C_master, src=0)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
C = C.clone()
C.requires_grad = True
B_shape = (HIDDEN_SIZE, 4 * HIDDEN_SIZE)
B_master = torch.randn(B_shape, dtype=dtype, device=device)
torch.distributed.broadcast(B_master, src=0)
B = torch.chunk(B_master, DEPTH, dim=0)[i]
B = torch.chunk(B, DEPTH, dim=-1)[j]
B = B.clone()
B.requires_grad = True
out = Matmul_ABT_2D.apply(
C,
B,
DEPTH,
(BATCH_SIZE // DEPTH, SEQ_LENGTH, HIDDEN_SIZE // DEPTH),
i,
j,
ParallelMode.PARALLEL_2D_ROW,
ParallelMode.PARALLEL_2D_COL,
data_parallel_rank,
pipeline_parallel_rank,
pipeline_parallel_size,
tensor_parallel_size,
)
(BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE)
C_master = C_master.clone()
C_master.requires_grad = True
B_master = B_master.clone()
B_master.requires_grad = True
A_master = torch.matmul(C_master, B_master.transpose(0, 1))
A = torch.chunk(A_master, DEPTH, dim=0)[i]
A = torch.chunk(A, DEPTH, dim=-1)[j]
check_equal(out, A)
print_rank_0("ABT forward: pass")
grad_shape = A_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
# backward
out.backward(grad)
A_master.backward(grad_master)
C_grad = C_master.grad
C_grad = torch.chunk(C_grad, DEPTH, dim=0)[i]
C_grad = torch.chunk(C_grad, DEPTH, dim=-1)[j]
check_equal(C_grad, C.grad)
B_grad = B_master.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i]
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[j]
check_equal(B_grad, B.grad)
print_rank_0("ABT backward: pass")
def check_ATB():
data_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA)
pipeline_parallel_rank = (
0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank(ParallelMode.PIPELINE)
)
pipeline_parallel_size = (
1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size(ParallelMode.PIPELINE)
)
tensor_parallel_size = gpc.get_world_size(ParallelMode.TENSOR)
device = get_accelerator().get_current_device()
dtype = torch.float
j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, DEPTH, dim=0)[i]
A = torch.chunk(A, DEPTH, dim=-1)[j]
A = A.clone()
A.requires_grad = True
C_shape = (BATCH_SIZE, SEQ_LENGTH, 4 * HIDDEN_SIZE)
C_master = torch.randn(C_shape, dtype=dtype, device=device)
torch.distributed.broadcast(C_master, src=0)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
C = C.clone()
C.requires_grad = True
out = Matmul_ATB_2D.apply(
A,
C,
DEPTH,
(HIDDEN_SIZE // DEPTH, 4 * HIDDEN_SIZE // DEPTH),
i,
j,
ParallelMode.PARALLEL_2D_ROW,
ParallelMode.PARALLEL_2D_COL,
data_parallel_rank,
pipeline_parallel_rank,
pipeline_parallel_size,
tensor_parallel_size,
)
(HIDDEN_SIZE, 4 * HIDDEN_SIZE)
A_master = A_master.clone()
A_master.requires_grad = True
C_master = C_master.clone()
C_master.requires_grad = True
B_master = torch.matmul(
A_master.view(-1, A_master.shape[-1]).transpose(0, 1), C_master.view(-1, C_master.shape[-1])
)
B = torch.chunk(B_master, DEPTH, dim=0)[i]
B = torch.chunk(B, DEPTH, dim=-1)[j]
check_equal(out, B)
print_rank_0("ATB forward: pass")
grad_shape = B_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
out.backward(grad)
B_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]
A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[j]
check_equal(A_grad, A.grad)
C_grad = C_master.grad
C_grad = torch.chunk(C_grad, DEPTH, dim=0)[i]
C_grad = torch.chunk(C_grad, DEPTH, dim=-1)[j]
check_equal(C_grad, C.grad)
print_rank_0("ATB backward: pass")
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_layers/test_2d/checks_2d/check_layer_2d.py | tests/test_legacy/test_layers/test_2d/checks_2d/check_layer_2d.py | import torch
from colossalai.accelerator import get_accelerator
from colossalai.legacy.context.parallel_mode import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.nn import (
Classifier2D,
CrossEntropyLoss2D,
Embedding2D,
LayerNorm2D,
Linear2D,
PatchEmbedding2D,
VanillaClassifier,
VanillaPatchEmbedding,
VocabParallelClassifier2D,
VocabParallelCrossEntropyLoss2D,
VocabParallelEmbedding2D,
)
from colossalai.legacy.utils import print_rank_0
from .common import BATCH_SIZE, DEPTH, HIDDEN_SIZE, IMG_SIZE, NUM_CLASSES, SEQ_LENGTH, VOCAB_SIZE, check_equal
def check_linear():
device = get_accelerator().get_current_device()
dtype = torch.float32
INPUT_SIZE = HIDDEN_SIZE
OUTPUT_SIZE = HIDDEN_SIZE
j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
layer = Linear2D(INPUT_SIZE, OUTPUT_SIZE)
A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, DEPTH, dim=0)[i]
A = torch.chunk(A, DEPTH, dim=-1)[j]
A = A.clone()
A.requires_grad = True
W_shape = (INPUT_SIZE, OUTPUT_SIZE)
W_master = torch.randn(W_shape, dtype=dtype, device=device)
torch.distributed.broadcast(W_master, src=0)
W = torch.chunk(W_master, DEPTH, dim=0)[i]
W = torch.chunk(W, DEPTH, dim=-1)[j]
W = W.clone()
W.requires_grad = True
B_shape = OUTPUT_SIZE
B_master = torch.randn(B_shape, dtype=dtype, device=device)
torch.distributed.broadcast(B_master, src=0)
B = torch.chunk(B_master, DEPTH, dim=-1)[j]
B = torch.chunk(B, DEPTH, dim=-1)[i]
B = B.clone()
B.requires_grad = True
layer.weight.data.copy_(W)
layer.bias.data.copy_(B)
out = layer(A)
A_master = A_master.clone()
A_master.requires_grad = True
W_master = W_master.clone()
W_master.requires_grad = True
B_master = B_master.clone()
B_master.requires_grad = True
C_master = torch.matmul(A_master, W_master) + B_master
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
check_equal(out, C)
print_rank_0("linear forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=get_accelerator().get_current_device())
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]
A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[j]
check_equal(A_grad, A.grad)
W_grad = W_master.grad
W_grad = torch.chunk(W_grad, DEPTH, dim=0)[i]
W_grad = torch.chunk(W_grad, DEPTH, dim=-1)[j]
check_equal(W_grad, layer.weight.grad)
B_grad = B_master.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[j]
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[i]
# if i == 0:
check_equal(B_grad, layer.bias.grad)
print_rank_0("linear backward: pass")
def check_layernorm():
device = get_accelerator().get_current_device()
dtype = torch.float32
INPUT_SIZE = HIDDEN_SIZE
EPS = 1e-12
j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
layernorm = LayerNorm2D(INPUT_SIZE)
A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, DEPTH, dim=0)[i]
A = torch.chunk(A, DEPTH, dim=-1)[j]
A = A.clone()
A.requires_grad = True
out = layernorm(A)
A_master = A_master.clone()
A_master.requires_grad = True
E_master = torch.sum(A_master, dim=-1, keepdim=True)
E_master /= INPUT_SIZE
V_master = torch.sum(A_master * A_master, dim=-1, keepdim=True)
V_master /= INPUT_SIZE
V_master = V_master - E_master * E_master
V_master = 1.0 / torch.sqrt(V_master + EPS)
C_master = (A_master - E_master) * V_master
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
check_equal(out, C)
print_rank_0("layer norm forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=get_accelerator().get_current_device())
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
out.backward(grad)
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]
A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[j]
check_equal(A_grad, A.grad)
print_rank_0("layer norm backward: pass")
def check_embed():
device = get_accelerator().get_current_device()
dtype = torch.float32
j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
embed = Embedding2D(VOCAB_SIZE, HIDDEN_SIZE)
embed = embed.to(dtype).to(device)
embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
embed_master = embed_master.to(dtype).to(device)
weight_master = embed_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=-1)[j]
weight = torch.chunk(weight, DEPTH, dim=-1)[i]
embed.weight.data.copy_(weight)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
out = embed(A)
A_master = A_master.clone()
C_master = embed_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
check_equal(out, C)
print_rank_0("embed forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
B_grad = embed_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[j]
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[i]
check_equal(B_grad, embed.weight.grad)
print_rank_0("embed backward: pass")
def check_patch_embed():
device = get_accelerator().get_current_device()
dtype = torch.float32
j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
layer = PatchEmbedding2D(IMG_SIZE, 4, 3, HIDDEN_SIZE, dtype=dtype)
torch.nn.init.ones_(layer.cls_token)
torch.nn.init.ones_(layer.pos_embed)
layer = layer.to(device)
layer_master = VanillaPatchEmbedding(IMG_SIZE, 4, 3, HIDDEN_SIZE, dtype=dtype)
torch.nn.init.ones_(layer_master.cls_token)
torch.nn.init.ones_(layer_master.pos_embed)
layer_master = layer_master.to(device)
proj_weight_master = layer_master.weight.data
torch.distributed.broadcast(proj_weight_master, src=0)
proj_weight = torch.chunk(proj_weight_master, DEPTH, dim=0)[j]
proj_weight = torch.chunk(proj_weight, DEPTH, dim=0)[i]
layer.weight.data.copy_(proj_weight)
proj_bias_master = layer_master.bias.data
torch.distributed.broadcast(proj_bias_master, src=0)
proj_bias = torch.chunk(proj_bias_master, DEPTH, dim=0)[j]
proj_bias = torch.chunk(proj_bias, DEPTH, dim=0)[i]
layer.bias.data.copy_(proj_bias)
A_shape = (BATCH_SIZE, 3, IMG_SIZE, IMG_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
out = layer(A)
A_master = A_master.clone()
C_master = layer_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
check_equal(out, C)
print_rank_0("patch embed forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
cls_grad_master = layer_master.cls_token.grad
cls_grad = torch.chunk(cls_grad_master, DEPTH, dim=-1)[j]
cls_grad = torch.chunk(cls_grad, DEPTH, dim=-1)[i]
check_equal(cls_grad, layer.cls_token.grad)
pos_grad_master = layer_master.pos_embed.grad
pos_grad = torch.chunk(pos_grad_master, DEPTH, dim=-1)[j]
pos_grad = torch.chunk(pos_grad, DEPTH, dim=-1)[i]
check_equal(pos_grad, layer.pos_embed.grad)
B_grad = layer_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j]
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i]
check_equal(B_grad, layer.weight.grad)
bias_grad = layer_master.bias.grad
bias_grad = torch.chunk(bias_grad, DEPTH)[j]
bias_grad = torch.chunk(bias_grad, DEPTH)[i]
check_equal(bias_grad, layer.bias.grad)
print_rank_0("patch embed backward: pass")
def check_vocab_parallel_embed():
device = get_accelerator().get_current_device()
dtype = torch.float32
j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
embed = VocabParallelEmbedding2D(VOCAB_SIZE, HIDDEN_SIZE)
embed = embed.to(dtype).to(device)
embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
embed_master = embed_master.to(dtype).to(device)
weight_master = embed_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=-1)[j]
weight = torch.chunk(weight, DEPTH, dim=0)[i]
embed.weight.data.copy_(weight)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
out = embed(A)
A_master = A_master.clone()
C_master = embed_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
check_equal(out, C)
print_rank_0("vocab parallel embed forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
B_grad = embed_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[j]
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i]
check_equal(B_grad, embed.weight.grad)
print_rank_0("vocab parallel embed backward: pass")
def check_classifier_no_given_weight():
device = get_accelerator().get_current_device()
dtype = torch.float32
INPUT_SIZE = HIDDEN_SIZE
OUTPUT_SIZE = NUM_CLASSES
j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
layer = Classifier2D(INPUT_SIZE, OUTPUT_SIZE)
A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
A_master = torch.randint(5, A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, DEPTH, dim=0)[i]
A = torch.chunk(A, DEPTH, dim=-1)[j]
A = A.clone()
A.requires_grad = True
W_shape = (OUTPUT_SIZE, INPUT_SIZE)
W_master = torch.randint(5, W_shape, dtype=dtype, device=device)
torch.distributed.broadcast(W_master, src=0)
W = torch.chunk(W_master, DEPTH, dim=-1)[j]
W = torch.chunk(W, DEPTH, dim=-1)[i]
W = W.clone()
layer.weight.data.copy_(W)
# W.requires_grad = True
B_shape = (OUTPUT_SIZE,)
B_master = torch.randint(5, B_shape, dtype=dtype, device=device)
torch.distributed.broadcast(B_master, src=0)
# B = torch.chunk(B_master, DEPTH, dim=0)[j]
B = B_master.clone()
layer.bias.data.copy_(B)
out = layer(A)
A_master = A_master.clone()
A_master.requires_grad = True
W_master = W_master.clone()
W_master.requires_grad = True
B_master = B_master.clone()
B_master.requires_grad = True
C_master = torch.matmul(A_master, W_master.transpose(0, 1)) + B_master
C = torch.chunk(C_master, DEPTH, dim=0)[i]
# C = torch.chunk(C, DEPTH, dim=-1)[j]
check_equal(out, C)
print_rank_0("classifier (no given weight) forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=get_accelerator().get_current_device())
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
# grad = torch.chunk(grad, DEPTH, dim=-1)[j]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]
A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[j]
check_equal(A_grad, A.grad)
W_grad = W_master.grad
W_grad = torch.chunk(W_grad, DEPTH, dim=-1)[j]
W_grad = torch.chunk(W_grad, DEPTH, dim=-1)[i]
check_equal(W_grad, layer.weight.grad)
B_grad = B_master.grad
# B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j]
# if i == 0:
check_equal(B_grad, layer.bias.grad)
print_rank_0("classifier (no given weight) backward: pass")
def check_vocab_parallel_classifier_no_given_weight():
device = get_accelerator().get_current_device()
dtype = torch.float32
j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
layer = VocabParallelClassifier2D(HIDDEN_SIZE, VOCAB_SIZE, bias=True)
layer = layer.to(dtype).to(device)
layer_master = VanillaClassifier(HIDDEN_SIZE, VOCAB_SIZE, bias=True)
layer_master = layer_master.to(dtype).to(device)
weight_master = layer_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=0)[i]
weight = torch.chunk(weight, DEPTH, dim=-1)[j]
layer.weight.data.copy_(weight)
bias_master = layer_master.bias.data
torch.distributed.broadcast(bias_master, src=0)
bias = torch.chunk(bias_master, DEPTH)[j]
bias = torch.chunk(bias, DEPTH)[i]
layer.bias.data.copy_(bias)
A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, DEPTH, dim=0)[i]
A = torch.chunk(A, DEPTH, dim=-1)[j]
A = A.clone()
A.requires_grad = True
out = layer(A)
A_master = A_master.clone()
A_master.requires_grad = True
C_master = layer_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
check_equal(out, C)
print_rank_0("vocab parallel classifier (no given weight) forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]
A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[j]
check_equal(A_grad, A.grad)
W_grad = layer_master.weight.grad
W_grad = torch.chunk(W_grad, DEPTH, dim=0)[i]
W_grad = torch.chunk(W_grad, DEPTH, dim=-1)[j]
check_equal(W_grad, layer.weight.grad)
B_grad = layer_master.bias.grad
B_grad = torch.chunk(B_grad, DEPTH)[j]
B_grad = torch.chunk(B_grad, DEPTH)[i]
check_equal(B_grad, layer.bias.grad)
print_rank_0("vocab parallel classifier (no given weight) backward: pass")
def check_classifier_given_embed_weight():
device = get_accelerator().get_current_device()
dtype = torch.float32
j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
embed = Embedding2D(VOCAB_SIZE, HIDDEN_SIZE)
embed = embed.to(dtype).to(device)
embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
embed_master = embed_master.to(dtype).to(device)
weight_master = embed_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=-1)[j]
weight = torch.chunk(weight, DEPTH, dim=-1)[i]
embed.weight.data.copy_(weight)
layer = Classifier2D(HIDDEN_SIZE, VOCAB_SIZE, weight=embed.weight, bias=False)
layer = layer.to(dtype).to(device)
layer_master = VanillaClassifier(HIDDEN_SIZE, VOCAB_SIZE, weight=embed_master.weight, bias=False)
layer_master = layer_master.to(dtype).to(device)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
out = layer(embed(A))
A_master = A_master.clone()
C_master = layer_master(embed_master(A_master))
C = torch.chunk(C_master, DEPTH, dim=0)[i]
check_equal(out, C)
print_rank_0("classifier (given embed weight) forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
W_grad = embed_master.weight.grad
W_grad = torch.chunk(W_grad, DEPTH, dim=-1)[j]
W_grad = torch.chunk(W_grad, DEPTH, dim=-1)[i]
check_equal(W_grad, embed.weight.grad)
print_rank_0("classifier (given embed weight) backward: pass")
def check_vocab_parallel_classifier_given_embed_weight():
device = get_accelerator().get_current_device()
dtype = torch.float32
j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
embed = VocabParallelEmbedding2D(VOCAB_SIZE, HIDDEN_SIZE)
embed = embed.to(dtype).to(device)
embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
embed_master = embed_master.to(dtype).to(device)
weight_master = embed_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=-1)[j]
weight = torch.chunk(weight, DEPTH, dim=0)[i]
embed.weight.data.copy_(weight)
layer = VocabParallelClassifier2D(HIDDEN_SIZE, VOCAB_SIZE, weight=embed.weight, bias=False)
layer = layer.to(dtype).to(device)
layer_master = VanillaClassifier(HIDDEN_SIZE, VOCAB_SIZE, weight=embed_master.weight, bias=False)
layer_master = layer_master.to(dtype).to(device)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
out = layer(embed(A))
A_master = A_master.clone()
C_master = layer_master(embed_master(A_master))
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
check_equal(out, C)
print_rank_0("vocab parallel classifier (given embed weight) forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
W_grad = embed_master.weight.grad
W_grad = torch.chunk(W_grad, DEPTH, dim=-1)[j]
W_grad = torch.chunk(W_grad, DEPTH, dim=0)[i]
check_equal(W_grad, embed.weight.grad)
print_rank_0("vocab parallel classifier (given embed weight) backward: pass")
def check_loss():
device = get_accelerator().get_current_device()
dtype = torch.float32
gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
criterion = CrossEntropyLoss2D()
criterion_master = torch.nn.CrossEntropyLoss()
out_shape = (BATCH_SIZE, NUM_CLASSES)
out_master = torch.randn(out_shape, dtype=dtype, device=device)
target_master = torch.randint(NUM_CLASSES, (BATCH_SIZE,), dtype=torch.long, device=device)
torch.distributed.broadcast(out_master, src=0)
torch.distributed.broadcast(target_master, src=0)
out = torch.chunk(out_master, DEPTH, dim=0)[i]
out = out.clone()
out.requires_grad = True
loss = criterion(out, target_master)
out_master = out_master.clone()
out_master.requires_grad = True
loss_master = criterion_master(out_master, target_master)
check_equal(loss, loss_master)
print_rank_0("cross entropy loss forward: pass")
loss.backward()
loss_master.backward()
out_grad = out_master.grad
out_grad = torch.chunk(out_grad, DEPTH, dim=0)[i]
check_equal(out_grad, out.grad)
print_rank_0("cross entropy loss backward: pass")
def check_vocab_parallel_loss():
device = get_accelerator().get_current_device()
dtype = torch.float32
j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
criterion = VocabParallelCrossEntropyLoss2D()
criterion_master = torch.nn.CrossEntropyLoss()
out_shape = (BATCH_SIZE, NUM_CLASSES)
out_master = torch.randn(out_shape, dtype=dtype, device=device)
target_master = torch.randint(NUM_CLASSES, (BATCH_SIZE,), dtype=torch.long, device=device)
torch.distributed.broadcast(out_master, src=0)
torch.distributed.broadcast(target_master, src=0)
out = torch.chunk(out_master, DEPTH, dim=0)[i]
out = torch.chunk(out, DEPTH, dim=-1)[j]
out = out.clone()
out.requires_grad = True
loss = criterion(out, target_master)
out_master = out_master.clone()
out_master.requires_grad = True
loss_master = criterion_master(out_master, target_master)
check_equal(loss, loss_master)
print_rank_0("vocab parallel cross entropy loss forward: pass")
loss.backward()
loss_master.backward()
out_grad = out_master.grad
out_grad = torch.chunk(out_grad, DEPTH, dim=0)[i]
out_grad = torch.chunk(out_grad, DEPTH, dim=-1)[j]
check_equal(out_grad, out.grad)
print_rank_0("vocab parallel cross entropy loss backward: pass")
# def check_attention():
# device = get_accelerator().get_current_device()
# dtype = torch.float32
# INPUT_SIZE = HIDDEN_SIZE
# NUM_ATTENTION_HEADS = 2
# j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
# i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
# layer = TransformerSelfAttention2D(
# HIDDEN_SIZE,
# NUM_ATTENTION_HEADS,
# attention_dropout_prob=0.5,
# hidden_dropout_prob=0.5,
# )
# A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
# A_master = torch.randn(A_shape, dtype=dtype, device=device)
# torch.distributed.broadcast(A_master, src=0)
# A = torch.chunk(A_master, DEPTH, dim=0)[i]
# A = torch.chunk(A, DEPTH, dim=-1)[j]
# A = A.clone()
# A.requires_grad = True
# mask_shape = (BATCH_SIZE // DEPTH, NUM_ATTENTION_HEADS // DEPTH, SEQ_LENGTH, SEQ_LENGTH)
# attention_mask = torch.zeros(mask_shape, dtype=dtype, device=device)
# out = layer(A, attention_mask)
# assert out.shape == (BATCH_SIZE // DEPTH, SEQ_LENGTH, INPUT_SIZE // DEPTH)
# print_rank_0('self attention forward: pass')
# grad_shape = out.shape
# grad = torch.randn(grad_shape, dtype=dtype, device=device)
# out.backward(grad)
# assert A.grad.shape == A.shape
# print_rank_0('self attention backward: pass')
# def check_mlp():
# device = get_accelerator().get_current_device()
# dtype = torch.float32
# INPUT_SIZE = HIDDEN_SIZE
# j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
# i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
# layer = TransformerMLP2D(
# HIDDEN_SIZE,
# dropout_prob=0.5,
# act_func='gelu',
# )
# A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
# A_master = torch.randn(A_shape, dtype=dtype, device=device)
# torch.distributed.broadcast(A_master, src=0)
# A = torch.chunk(A_master, DEPTH, dim=0)[i]
# A = torch.chunk(A, DEPTH, dim=-1)[j]
# A = A.clone()
# A.requires_grad = True
# out = layer(A)
# assert out.shape == (BATCH_SIZE // DEPTH, SEQ_LENGTH, INPUT_SIZE // DEPTH)
# print_rank_0('mlp forward: pass')
# grad_shape = out.shape
# grad = torch.randn(grad_shape, dtype=dtype, device=device)
# out.backward(grad)
# assert A.grad.shape == A.shape
# print_rank_0('mlp backward: pass')
# def check_transformerlayer():
# device = get_accelerator().get_current_device()
# dtype = torch.float32
# INPUT_SIZE = HIDDEN_SIZE
# NUM_ATTENTION_HEADS = 2
# j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
# i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
# layer = TransformerLayer2D(HIDDEN_SIZE,
# NUM_ATTENTION_HEADS,
# act_func='gelu',
# attention_dropout_prob=0.5,
# hidden_dropout_prob=0.5)
# A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
# A_master = torch.randn(A_shape, dtype=dtype, device=device)
# torch.distributed.broadcast(A_master, src=0)
# A = torch.chunk(A_master, DEPTH, dim=0)[i]
# A = torch.chunk(A, DEPTH, dim=-1)[j]
# A = A.clone()
# A.requires_grad = True
# mask_shape = (BATCH_SIZE // DEPTH, NUM_ATTENTION_HEADS // DEPTH, SEQ_LENGTH, SEQ_LENGTH)
# attention_mask = torch.zeros(mask_shape, dtype=dtype, device=device)
# out = layer(A, attention_mask)
# assert out.shape == (BATCH_SIZE // DEPTH, SEQ_LENGTH, INPUT_SIZE // DEPTH)
# print_rank_0('transformerlayer forward: pass')
# grad_shape = out.shape
# grad = torch.randn(grad_shape, dtype=dtype, device=device)
# out.backward(grad)
# assert A.grad.shape == A.shape
# print_rank_0('transformerlayer backward: pass')
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_layers/test_2d/checks_2d/__init__.py | tests/test_legacy/test_layers/test_2d/checks_2d/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_layers/test_3d/test_3d.py | tests/test_legacy/test_layers/test_3d/test_3d.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pytest
import torch
from checks_3d.check_layer_3d import (
check_classifier_no_given_weight,
check_embed,
check_layernorm,
check_linear,
check_loss,
check_patch_embed,
check_vocab_parallel_classifier_given_embed_weight,
check_vocab_parallel_classifier_no_given_weight,
check_vocab_parallel_embed,
check_vocab_parallel_loss,
)
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, skip_if_not_enough_gpus, spawn
CONFIG = dict(
parallel=dict(
pipeline=1,
tensor=dict(mode="3d", size=8),
),
seed=42,
)
def check_layer():
check_linear()
check_layernorm()
check_classifier_no_given_weight()
check_vocab_parallel_classifier_no_given_weight()
check_vocab_parallel_classifier_given_embed_weight()
check_embed()
check_patch_embed()
check_vocab_parallel_embed()
check_loss()
check_vocab_parallel_loss()
def check_layer_and_operation(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
torch.backends.cudnn.deterministic = True
check_layer()
gpc.destroy()
torch.cuda.empty_cache()
@pytest.mark.dist
@skip_if_not_enough_gpus(min_gpus=8)
@rerun_if_address_is_in_use()
def test_3d():
spawn(check_layer_and_operation, 8)
if __name__ == "__main__":
test_3d()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_layers/test_3d/checks_3d/check_layer_3d.py | tests/test_legacy/test_layers/test_3d/checks_3d/check_layer_3d.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import time
import torch
from colossalai.accelerator import get_accelerator
from colossalai.legacy.constants import INPUT_GROUP_3D, OUTPUT_GROUP_3D, WEIGHT_GROUP_3D
from colossalai.legacy.core import global_context
from colossalai.legacy.nn import (
Classifier3D,
CrossEntropyLoss3D,
Embedding3D,
LayerNorm3D,
Linear3D,
PatchEmbedding3D,
VanillaClassifier,
VanillaPatchEmbedding,
VocabParallelClassifier3D,
VocabParallelCrossEntropyLoss3D,
VocabParallelEmbedding3D,
)
from colossalai.legacy.nn.layer.parallel_3d._utils import get_parallel_mode_from_env
from colossalai.legacy.utils import print_rank_0
from colossalai.logging import get_dist_logger
from .common import BATCH_SIZE, DEPTH, HIDDEN_SIZE, IMG_SIZE, NUM_CLASSES, SEQ_LENGTH, VOCAB_SIZE, check_equal
def check_linear():
rank = torch.distributed.get_rank()
logger = get_dist_logger()
device = get_accelerator().get_current_device()
INPUT_SIZE = HIDDEN_SIZE
OUTPUT_SIZE = 2 * HIDDEN_SIZE
input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)
j = global_context.get_local_rank(input_parallel_mode)
i = global_context.get_local_rank(weight_parallel_mode)
k = global_context.get_local_rank(output_parallel_mode)
layer = Linear3D(INPUT_SIZE, OUTPUT_SIZE, bias=True)
layer = layer.to(device)
layer_master = torch.nn.Linear(INPUT_SIZE, OUTPUT_SIZE)
layer_master = layer_master.to(device)
weight_master = layer_master.weight.data.transpose(0, 1).contiguous()
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=0)[k]
weight = torch.chunk(weight, DEPTH, dim=-1)[j]
weight = torch.chunk(weight, DEPTH, dim=0)[i]
layer.weight.data.copy_(weight)
bias_master = layer_master.bias.data
torch.distributed.broadcast(bias_master, src=0)
bias = torch.chunk(bias_master, DEPTH)[j]
layer.bias.data.copy_(bias)
A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
A_master = torch.randn(A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, DEPTH, dim=0)[i]
A = torch.chunk(A, DEPTH, dim=-1)[k]
A = torch.chunk(A, DEPTH, dim=0)[j]
A = A.clone()
A.requires_grad = True
fwd_start = time.time()
out = layer(A)
torch.cuda.synchronize()
fwd_end = time.time()
print_rank_0(
"linear forward: {0} --> {1} | {2:.3f} s".format(tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger
)
A_master = A_master.clone()
A_master.requires_grad = True
C_master = layer_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
C = torch.chunk(C, DEPTH, dim=0)[k]
logger.info("Rank {} linear forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, device=get_accelerator().get_current_device())
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
grad = torch.chunk(grad, DEPTH, dim=0)[k]
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
print_rank_0("linear backward: {:.3f} s".format(bwd_end - bwd_start), logger)
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]
A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k]
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j]
logger.info("Rank {} linear backward (input_grad): {}".format(rank, check_equal(A_grad, A.grad)))
B_grad = layer_master.weight.grad.transpose(0, 1)
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[k]
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[j]
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i]
logger.info("Rank {} linear backward (weight_grad): {}".format(rank, check_equal(B_grad, layer.weight.grad)))
bias_grad = layer_master.bias.grad
bias_grad = torch.chunk(bias_grad, DEPTH)[j]
logger.info("Rank {} linear backward (bias_grad): {}".format(rank, check_equal(bias_grad, layer.bias.grad)))
return fwd_end - fwd_start, bwd_end - bwd_start
def check_layernorm():
rank = torch.distributed.get_rank()
logger = get_dist_logger()
device = get_accelerator().get_current_device()
INPUT_SIZE = HIDDEN_SIZE
input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)
j = global_context.get_local_rank(input_parallel_mode)
i = global_context.get_local_rank(weight_parallel_mode)
k = global_context.get_local_rank(output_parallel_mode)
norm = LayerNorm3D(INPUT_SIZE, eps=1e-6)
norm = norm.to(device)
norm_master = torch.nn.LayerNorm(INPUT_SIZE, eps=1e-6)
norm_master = norm_master.to(device)
weight_master = norm_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH)[k]
norm.weight.data.copy_(weight)
bias_master = norm_master.bias.data
torch.distributed.broadcast(bias_master, src=0)
bias = torch.chunk(bias_master, DEPTH)[k]
norm.bias.data.copy_(bias)
A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
A_master = torch.randn(A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, DEPTH, dim=0)[i]
A = torch.chunk(A, DEPTH, dim=-1)[k]
A = torch.chunk(A, DEPTH, dim=0)[j]
A = A.clone()
A.requires_grad = True
fwd_start = time.time()
out = norm(A)
torch.cuda.synchronize()
fwd_end = time.time()
print_rank_0(
"layer norm forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(A.shape), tuple(out.shape), fwd_end - fwd_start
),
logger,
)
A_master = A_master.clone()
A_master.requires_grad = True
C_master = norm_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[k]
C = torch.chunk(C, DEPTH, dim=0)[j]
logger.info("Rank {} layernorm forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[k]
grad = torch.chunk(grad, DEPTH, dim=0)[j]
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
print_rank_0("layer norm backward: pass | {:.3f} s".format(bwd_end - bwd_start), logger)
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]
A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k]
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j]
logger.info("Rank {} layernorm backward (input_grad): {}".format(rank, check_equal(A_grad, A.grad)))
bias_grad = norm_master.weight.grad
bias_grad = torch.chunk(bias_grad, DEPTH)[k]
logger.info("Rank {} layernorm backward (weight_grad): {}".format(rank, check_equal(bias_grad, norm.weight.grad)))
bias_grad = norm_master.bias.grad
bias_grad = torch.chunk(bias_grad, DEPTH)[k]
logger.info("Rank {} layernorm backward (bias_grad): {}".format(rank, check_equal(bias_grad, norm.bias.grad)))
return fwd_end - fwd_start, bwd_end - bwd_start
def check_classifier_no_given_weight():
rank = torch.distributed.get_rank()
logger = get_dist_logger()
device = get_accelerator().get_current_device()
INPUT_SIZE = HIDDEN_SIZE
input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)
j = global_context.get_local_rank(input_parallel_mode)
i = global_context.get_local_rank(weight_parallel_mode)
k = global_context.get_local_rank(output_parallel_mode)
layer = Classifier3D(INPUT_SIZE, NUM_CLASSES, bias=True)
layer = layer.to(device)
layer_master = VanillaClassifier(INPUT_SIZE, NUM_CLASSES, bias=True)
layer_master = layer_master.to(device)
weight_master = layer_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=-1)[k]
layer.weight.data.copy_(weight)
bias_master = layer_master.bias.data
torch.distributed.broadcast(bias_master, src=0)
layer.bias.data.copy_(bias_master)
A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
A_master = torch.randn(A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, DEPTH, dim=0)[i]
A = torch.chunk(A, DEPTH, dim=-1)[k]
A = torch.chunk(A, DEPTH, dim=0)[j]
A = A.clone()
A.requires_grad = True
fwd_start = time.time()
out = layer(A)
torch.cuda.synchronize()
fwd_end = time.time()
print_rank_0(
"classifier (no given weight) forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(A.shape), tuple(out.shape), fwd_end - fwd_start
),
logger,
)
A_master = A_master.clone()
A_master.requires_grad = True
C_master = layer_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=0)[j]
logger.info("Rank {} classifier (no given weight) forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, device=get_accelerator().get_current_device())
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=0)[j]
grad = grad.clone()
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
print_rank_0("classifier (no given weight) backward: pass | {:.3f} s".format(bwd_end - bwd_start), logger)
grad_master = grad_master.clone()
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]
A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k]
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j]
logger.info(
"Rank {} classifier (no given weight) backward (input_grad): {}".format(rank, check_equal(A_grad, A.grad))
)
B_grad = layer_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]
if j == k:
logger.info(
"Rank {} classifier (no given weight) backward (weight_grad): {}".format(
rank, check_equal(B_grad, layer.weight.grad)
)
)
else:
logger.info(
"Rank {} classifier (no given weight) backward (weight_grad): {}".format(rank, layer.weight.grad is None)
)
bias_grad = layer_master.bias.grad
logger.info(
"Rank {} classifier (no given weight) backward (bias_grad): {}".format(
rank, check_equal(bias_grad, layer.bias.grad)
)
)
return fwd_end - fwd_start, bwd_end - bwd_start
def check_vocab_parallel_classifier_no_given_weight():
rank = torch.distributed.get_rank()
logger = get_dist_logger()
device = get_accelerator().get_current_device()
INPUT_SIZE = HIDDEN_SIZE
input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)
j = global_context.get_local_rank(input_parallel_mode)
i = global_context.get_local_rank(weight_parallel_mode)
k = global_context.get_local_rank(output_parallel_mode)
layer = VocabParallelClassifier3D(INPUT_SIZE, VOCAB_SIZE, bias=True)
layer = layer.to(device)
layer_master = VanillaClassifier(INPUT_SIZE, VOCAB_SIZE, bias=True)
layer_master = layer_master.to(device)
weight_master = layer_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=0)[j]
weight = torch.chunk(weight, DEPTH, dim=0)[i]
weight = torch.chunk(weight, DEPTH, dim=-1)[k]
layer.weight.data.copy_(weight)
bias_master = layer_master.bias.data
torch.distributed.broadcast(bias_master, src=0)
bias = torch.chunk(bias_master, DEPTH)[j]
layer.bias.data.copy_(bias)
A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
A_master = torch.randn(A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, DEPTH, dim=0)[i]
A = torch.chunk(A, DEPTH, dim=-1)[k]
A = torch.chunk(A, DEPTH, dim=0)[j]
A = A.clone()
A.requires_grad = True
fwd_start = time.time()
out = layer(A)
torch.cuda.synchronize()
fwd_end = time.time()
print_rank_0(
"vocab parallel classifier (no given weight) forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(A.shape), tuple(out.shape), fwd_end - fwd_start
),
logger,
)
A_master = A_master.clone()
A_master.requires_grad = True
C_master = layer_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
C = torch.chunk(C, DEPTH, dim=0)[k]
logger.info("Rank {} vocab parallel classifier (no given weight) forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
grad = torch.chunk(grad, DEPTH, dim=0)[k]
grad = grad.clone()
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
print_rank_0(
"vocab parallel classifier (no given weight) backward: pass | {:.3f} s".format(bwd_end - bwd_start), logger
)
grad_master = grad_master.clone()
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]
A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k]
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j]
logger.info(
"Rank {} vocab parallel classifier (no given weight) backward (input_grad): {}".format(
rank, check_equal(A_grad, A.grad)
)
)
B_grad = layer_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j]
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i]
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]
logger.info(
"Rank {} vocab parallel classifier (no given weight) backward (weight_grad): {}".format(
rank, check_equal(B_grad, layer.weight.grad)
)
)
bias_grad = layer_master.bias.grad
bias_grad = torch.chunk(bias_grad, DEPTH)[j]
logger.info(
"Rank {} vocab parallel classifier (no given weight) backward (bias_grad): {}".format(
rank, check_equal(bias_grad, layer.bias.grad)
)
)
return fwd_end - fwd_start, bwd_end - bwd_start
def check_classifier_given_embed_weight():
rank = torch.distributed.get_rank()
logger = get_dist_logger()
device = get_accelerator().get_current_device()
dtype = torch.float32
input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)
j = global_context.get_local_rank(input_parallel_mode)
i = global_context.get_local_rank(weight_parallel_mode)
k = global_context.get_local_rank(output_parallel_mode)
embed = Embedding3D(VOCAB_SIZE, HIDDEN_SIZE)
embed = embed.to(dtype).to(device)
embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
embed_master = embed_master.to(dtype).to(device)
weight_master = embed_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=-1)[k]
embed.weight.data.copy_(weight)
layer = Classifier3D(HIDDEN_SIZE, VOCAB_SIZE, weight=embed.weight, bias=False)
layer = layer.to(dtype).to(device)
layer_master = VanillaClassifier(HIDDEN_SIZE, VOCAB_SIZE, weight=embed_master.weight, bias=False)
layer_master = layer_master.to(dtype).to(device)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
fwd_start = time.time()
out = layer(embed(A))
torch.cuda.synchronize()
fwd_end = time.time()
print_rank_0(
"classifier (given embed weight) forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(A.shape), tuple(out.shape), fwd_end - fwd_start
),
logger,
)
A_master = A_master.clone()
C_master = layer_master(embed_master(A_master))
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=0)[j]
logger.info("Rank {} classifier (given embed weight) forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=get_accelerator().get_current_device())
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=0)[j]
grad = grad.clone()
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
print_rank_0("classifier (given embed weight) backward: pass | {:.3f} s".format(bwd_end - bwd_start), logger)
grad_master = grad_master.clone()
C_master.backward(grad_master)
B_grad = embed_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]
if j == k:
logger.info(
"Rank {} classifier (given embed weight) backward (weight_grad): {}".format(
rank, check_equal(B_grad, embed.weight.grad)
)
)
else:
logger.info(
"Rank {} classifier (given embed weight) backward (weight_grad): {}".format(rank, embed.weight.grad is None)
)
return fwd_end - fwd_start, bwd_end - bwd_start
def check_vocab_parallel_classifier_given_embed_weight():
rank = torch.distributed.get_rank()
logger = get_dist_logger()
device = get_accelerator().get_current_device()
input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)
j = global_context.get_local_rank(input_parallel_mode)
i = global_context.get_local_rank(weight_parallel_mode)
k = global_context.get_local_rank(output_parallel_mode)
embed = VocabParallelEmbedding3D(VOCAB_SIZE, HIDDEN_SIZE)
embed = embed.to(device)
embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
embed_master = embed_master.to(device)
weight_master = embed_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=0)[j]
weight = torch.chunk(weight, DEPTH, dim=0)[i]
weight = torch.chunk(weight, DEPTH, dim=-1)[k]
embed.weight.data.copy_(weight)
layer = VocabParallelClassifier3D(HIDDEN_SIZE, VOCAB_SIZE, weight=embed.weight, bias=False)
layer = layer.to(device)
layer_master = VanillaClassifier(HIDDEN_SIZE, VOCAB_SIZE, weight=embed_master.weight, bias=False)
layer_master = layer_master.to(device)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
fwd_start = time.time()
out = layer(embed(A))
torch.cuda.synchronize()
fwd_end = time.time()
print_rank_0(
"vocab parallel classifier (given embed weight) forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(A.shape), tuple(out.shape), fwd_end - fwd_start
),
logger,
)
A_master = A_master.clone()
C_master = layer_master(embed_master(A_master))
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
C = torch.chunk(C, DEPTH, dim=0)[k]
logger.info("Rank {} vocab parallel classifier (given embed weight) forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
grad = torch.chunk(grad, DEPTH, dim=0)[k]
grad = grad.clone()
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
print_rank_0(
"vocab parallel classifier (given embed weight) backward: pass | {:.3f} s".format(bwd_end - bwd_start), logger
)
grad_master = grad_master.clone()
C_master.backward(grad_master)
B_grad = embed_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j]
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i]
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]
logger.info(
"Rank {} vocab parallel embed backward (weight_grad): {}".format(rank, check_equal(B_grad, embed.weight.grad))
)
return fwd_end - fwd_start, bwd_end - bwd_start
def check_patch_embed():
rank = torch.distributed.get_rank()
device = get_accelerator().get_current_device()
logger = get_dist_logger()
torch.float32
input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)
j = global_context.get_local_rank(input_parallel_mode)
i = global_context.get_local_rank(weight_parallel_mode)
k = global_context.get_local_rank(output_parallel_mode)
layer = PatchEmbedding3D(IMG_SIZE, 4, 3, HIDDEN_SIZE)
torch.nn.init.ones_(layer.cls_token)
torch.nn.init.ones_(layer.pos_embed)
layer = layer.to(device)
layer_master = VanillaPatchEmbedding(IMG_SIZE, 4, 3, HIDDEN_SIZE)
torch.nn.init.ones_(layer_master.cls_token)
torch.nn.init.ones_(layer_master.pos_embed)
layer_master = layer_master.to(device)
proj_weight_master = layer_master.weight.data
torch.distributed.broadcast(proj_weight_master, src=0)
proj_weight = torch.chunk(proj_weight_master, DEPTH, dim=0)[k]
layer.weight.data.copy_(proj_weight)
proj_bias_master = layer_master.bias.data
torch.distributed.broadcast(proj_bias_master, src=0)
proj_bias = torch.chunk(proj_bias_master, DEPTH)[k]
layer.bias.data.copy_(proj_bias)
A_shape = (BATCH_SIZE, 3, IMG_SIZE, IMG_SIZE)
A_master = torch.randn(A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
fwd_start = time.time()
out = layer(A)
torch.cuda.synchronize()
fwd_end = time.time()
print_rank_0(
"patch embed forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(A.shape), tuple(out.shape), fwd_end - fwd_start
),
logger,
)
A_master = A_master.clone()
C_master = layer_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[k]
C = torch.chunk(C, DEPTH, dim=0)[j]
logger.info("Rank {} patch embed forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[k]
grad = torch.chunk(grad, DEPTH, dim=0)[j]
grad = grad.clone()
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
print_rank_0("patch embed backward: pass | {:.3f} s".format(bwd_end - bwd_start), logger)
grad_master = grad_master.clone()
C_master.backward(grad_master)
cls_grad_master = layer_master.cls_token.grad
cls_grad = torch.chunk(cls_grad_master, DEPTH, dim=-1)[k]
logger.info("Rank {} patch embed backward (cls_grad): {}".format(rank, check_equal(cls_grad, layer.cls_token.grad)))
pos_grad_master = layer_master.pos_embed.grad
pos_grad = torch.chunk(pos_grad_master, DEPTH, dim=-1)[k]
logger.info(
"Rank {} patch embed backward (pos_embed_grad): {}".format(rank, check_equal(pos_grad, layer.pos_embed.grad))
)
B_grad = layer_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[k]
logger.info(
"Rank {} patch embed backward (proj_weight_grad): {}".format(rank, check_equal(B_grad, layer.weight.grad))
)
bias_grad = layer_master.bias.grad
bias_grad = torch.chunk(bias_grad, DEPTH)[k]
logger.info(
"Rank {} patch embed backward (proj_bias_grad): {}".format(rank, check_equal(bias_grad, layer.bias.grad))
)
return fwd_end - fwd_start, bwd_end - bwd_start
def check_embed():
rank = torch.distributed.get_rank()
device = get_accelerator().get_current_device()
logger = get_dist_logger()
torch.float32
input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)
j = global_context.get_local_rank(input_parallel_mode)
i = global_context.get_local_rank(weight_parallel_mode)
k = global_context.get_local_rank(output_parallel_mode)
layer = Embedding3D(VOCAB_SIZE, HIDDEN_SIZE)
layer = layer.to(device)
layer_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
layer_master = layer_master.to(device)
weight_master = layer_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=-1)[k]
layer.weight.data.copy_(weight)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
fwd_start = time.time()
out = layer(A)
torch.cuda.synchronize()
fwd_end = time.time()
logger.info(
"embed forward: pass | {0} --> {1} | {2:.3f} s".format(tuple(A.shape), tuple(out.shape), fwd_end - fwd_start),
ranks=[0],
)
A_master = A_master.clone()
C_master = layer_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[k]
C = torch.chunk(C, DEPTH, dim=0)[j]
logger.info("Rank {} embed forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[k]
grad = torch.chunk(grad, DEPTH, dim=0)[j]
grad = grad.clone()
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
logger.info("embed backward: pass | {:.3f} s".format(bwd_end - bwd_start), ranks=[0])
grad_master = grad_master.clone()
C_master.backward(grad_master)
B_grad = layer_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]
logger.info("Rank {} embed backward (weight_grad): {}".format(rank, check_equal(B_grad, layer.weight.grad)))
return fwd_end - fwd_start, bwd_end - bwd_start
def check_vocab_parallel_embed():
rank = torch.distributed.get_rank()
device = get_accelerator().get_current_device()
logger = get_dist_logger()
torch.float32
input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)
j = global_context.get_local_rank(input_parallel_mode)
i = global_context.get_local_rank(weight_parallel_mode)
k = global_context.get_local_rank(output_parallel_mode)
layer = VocabParallelEmbedding3D(VOCAB_SIZE, HIDDEN_SIZE)
layer = layer.to(device)
layer_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
layer_master = layer_master.to(device)
weight_master = layer_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=0)[j]
weight = torch.chunk(weight, DEPTH, dim=0)[i]
weight = torch.chunk(weight, DEPTH, dim=-1)[k]
layer.weight.data.copy_(weight)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
fwd_start = time.time()
out = layer(A)
torch.cuda.synchronize()
fwd_end = time.time()
logger.info(
"vocab parallel embed forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(A.shape), tuple(out.shape), fwd_end - fwd_start
),
ranks=[0],
)
A_master = A_master.clone()
C_master = layer_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[k]
C = torch.chunk(C, DEPTH, dim=0)[j]
logger.info("Rank {} vocab parallel embed forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[k]
grad = torch.chunk(grad, DEPTH, dim=0)[j]
grad = grad.clone()
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
logger.info("vocab parallel embed backward: pass | {:.3f} s".format(bwd_end - bwd_start), ranks=[0])
grad_master = grad_master.clone()
C_master.backward(grad_master)
B_grad = layer_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j]
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i]
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]
logger.info(
"Rank {} vocab parallel embed backward (weight_grad): {}".format(rank, check_equal(B_grad, layer.weight.grad))
)
return fwd_end - fwd_start, bwd_end - bwd_start
def check_loss():
rank = torch.distributed.get_rank()
logger = get_dist_logger()
device = get_accelerator().get_current_device()
input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
j = global_context.get_local_rank(input_parallel_mode)
i = global_context.get_local_rank(weight_parallel_mode)
criterion = CrossEntropyLoss3D()
criterion_master = torch.nn.CrossEntropyLoss()
out_shape = (BATCH_SIZE, NUM_CLASSES)
out_master = torch.randn(out_shape, device=device)
target_master = torch.randint(NUM_CLASSES, (BATCH_SIZE,), dtype=torch.long, device=device)
torch.distributed.broadcast(out_master, src=0)
torch.distributed.broadcast(target_master, src=0)
out = torch.chunk(out_master, DEPTH, dim=0)[i]
out = torch.chunk(out, DEPTH, dim=0)[j]
out = out.clone()
out.requires_grad = True
fwd_start = time.time()
loss = criterion(out, target_master)
fwd_end = time.time()
logger.info(
"cross entropy loss forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(out.shape), tuple(loss.shape), fwd_end - fwd_start
),
ranks=[0],
)
out_master = out_master.clone()
out_master.requires_grad = True
loss_master = criterion_master(out_master, target_master)
logger.info("Rank {} cross entropy loss forward: {}".format(rank, check_equal(loss, loss_master)))
bwd_start = time.time()
loss.backward()
bwd_end = time.time()
logger.info("cross entropy loss backward: pass | {:.3f} s".format(bwd_end - bwd_start), ranks=[0])
loss_master.backward()
out_grad = out_master.grad
out_grad = torch.chunk(out_grad, DEPTH, dim=0)[i]
out_grad = torch.chunk(out_grad, DEPTH, dim=0)[j]
logger.info("Rank {} cross entropy loss backward: {}".format(rank, check_equal(out_grad, out.grad)))
return fwd_end - fwd_start, bwd_end - bwd_start
def check_vocab_parallel_loss():
rank = torch.distributed.get_rank()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | true |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_layers/test_3d/checks_3d/common.py | tests/test_legacy/test_layers/test_3d/checks_3d/common.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
DEPTH = 2
BATCH_SIZE = 8
SEQ_LENGTH = 8
HIDDEN_SIZE = 8
NUM_CLASSES = 8
NUM_BLOCKS = 2
IMG_SIZE = 16
VOCAB_SIZE = 16
def check_equal(A, B):
eq = torch.allclose(A, B, rtol=1e-3, atol=1e-2)
assert eq, f"\nA = {A}\nB = {B}"
return eq
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_layers/test_3d/checks_3d/__init__.py | tests/test_legacy/test_layers/test_3d/checks_3d/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_layers/test_2p5d/test_2p5d.py | tests/test_legacy/test_layers/test_2p5d/test_2p5d.py | import pytest
import torch
from checks_2p5d.check_layer_2p5d import *
from checks_2p5d.check_operation_2p5d import check_AB, check_ABT, check_ATB
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, spawn
CONFIG = dict(
parallel=dict(
pipeline=dict(size=1),
tensor=dict(size=4, mode="2.5d", depth=1),
),
)
def check_operations():
check_AB()
check_ABT()
check_ATB()
def check_layer():
check_linear()
check_layernorm()
check_embed()
check_patch_embed()
check_vocab_parallel_embed()
check_classifier_no_given_weight()
check_vocab_parallel_classifier_no_given_weight()
check_classifier_given_embed_weight()
check_vocab_parallel_classifier_given_embed_weight()
check_loss()
check_vocab_parallel_loss()
def check_layer_and_operation(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
torch.backends.cudnn.deterministic = True
check_operations()
check_layer()
gpc.destroy()
torch.cuda.empty_cache()
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_2p5d():
spawn(check_layer_and_operation, 4)
if __name__ == "__main__":
test_2p5d()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_layers/test_2p5d/checks_2p5d/common.py | tests/test_legacy/test_layers/test_2p5d/checks_2p5d/common.py | import torch
TESSERACT_DIM = 2
TESSERACT_DEP = 2
BATCH_SIZE = 8
SEQ_LENGTH = 8
HIDDEN_SIZE = 8
NUM_CLASSES = 8
VOCAB_SIZE = 16
IMG_SIZE = 16
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-5, atol=1e-2)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_layers/test_2p5d/checks_2p5d/check_layer_2p5d.py | tests/test_legacy/test_layers/test_2p5d/checks_2p5d/check_layer_2p5d.py | import torch
from torch.nn import Parameter
from colossalai.accelerator import get_accelerator
from colossalai.legacy.context.parallel_mode import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.nn import (
Classifier2p5D,
CrossEntropyLoss2p5D,
Embedding2p5D,
LayerNorm2p5D,
Linear2p5D,
PatchEmbedding2p5D,
VanillaClassifier,
VanillaPatchEmbedding,
VocabParallelClassifier2p5D,
VocabParallelCrossEntropyLoss2p5D,
VocabParallelEmbedding2p5D,
)
from colossalai.legacy.utils import print_rank_0
from .common import *
def check_linear():
device = get_accelerator().get_current_device()
dtype = torch.float32
INPUT_SIZE = HIDDEN_SIZE
OUTPUT_SIZE = 2 * HIDDEN_SIZE
i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
layer = Linear2p5D(INPUT_SIZE, OUTPUT_SIZE, dtype=dtype, skip_bias_add=False)
A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i]
A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j]
A = A.clone()
A.requires_grad = True
W_shape = (INPUT_SIZE, OUTPUT_SIZE)
W_master = torch.randn(W_shape, dtype=dtype, device=device)
torch.distributed.broadcast(W_master, src=0)
W = torch.chunk(W_master, TESSERACT_DIM, dim=0)[i]
W = torch.chunk(W, TESSERACT_DIM, dim=-1)[j]
W = W.clone()
W.requires_grad = True
B_shape = OUTPUT_SIZE
B_master = torch.randn(B_shape, dtype=dtype, device=device)
torch.distributed.broadcast(B_master, src=0)
B = torch.chunk(B_master, TESSERACT_DIM, dim=0)[j]
B = B.clone()
B.requires_grad = True
layer.weight = Parameter(W)
layer.bias = Parameter(B)
out = layer(A)
layer.bias
A_master = A_master.clone()
A_master.requires_grad = True
W_master = W_master.clone()
W_master.requires_grad = True
B_master = B_master.clone()
B_master.requires_grad = True
C_master = torch.matmul(A_master, W_master) + B_master
C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i]
C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j]
check_equal(out, C)
print_rank_0("linear forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=get_accelerator().get_current_device())
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i]
grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=0)[i]
A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=-1)[j]
check_equal(A_grad, A.grad)
W_grad = W_master.grad
W_grad = torch.chunk(W_grad, TESSERACT_DIM, dim=0)[i]
W_grad = torch.chunk(W_grad, TESSERACT_DIM, dim=-1)[j]
check_equal(W_grad, layer.weight.grad)
B_grad = B_master.grad
B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=0)[j]
if i == 0:
check_equal(B_grad, layer.bias.grad)
print_rank_0("linear backward: pass")
def check_layernorm():
device = get_accelerator().get_current_device()
dtype = torch.float32
INPUT_SIZE = HIDDEN_SIZE
EPS = 1e-12
i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
layernorm = LayerNorm2p5D(INPUT_SIZE, dtype=dtype)
A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i]
A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j]
A = A.clone()
A.requires_grad = True
out = layernorm(A)
A_master = A_master.clone()
A_master.requires_grad = True
E_master = torch.sum(A_master, dim=-1, keepdim=True)
E_master /= INPUT_SIZE
V_master = torch.sum(A_master * A_master, dim=-1, keepdim=True)
V_master /= INPUT_SIZE
V_master = V_master - E_master * E_master
V_master = 1.0 / torch.sqrt(V_master + EPS)
C_master = (A_master - E_master) * V_master
C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i]
C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j]
check_equal(out, C)
print_rank_0("layer norm forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=get_accelerator().get_current_device())
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i]
grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j]
out.backward(grad)
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=0)[i]
A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=-1)[j]
check_equal(A_grad, A.grad)
print_rank_0("layer norm backward: pass")
def check_embed():
device = get_accelerator().get_current_device()
dtype = torch.float32
i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
embed = Embedding2p5D(VOCAB_SIZE, HIDDEN_SIZE)
embed = embed.to(dtype).to(device)
embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
embed_master = embed_master.to(dtype).to(device)
weight_master = embed_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, TESSERACT_DIM, dim=-1)[j]
weight = torch.chunk(weight, TESSERACT_DIM, dim=-1)[i]
embed.weight.data.copy_(weight)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
out = embed(A)
A_master = A_master.clone()
C_master = embed_master(A_master)
C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i]
C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j]
check_equal(out, C)
print_rank_0("embed forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i]
grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
B_grad = embed_master.weight.grad
B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=-1)[j]
B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=-1)[i]
check_equal(B_grad, embed.weight.grad)
print_rank_0("embed backward: pass")
def check_patch_embed():
device = get_accelerator().get_current_device()
dtype = torch.float32
i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
layer = PatchEmbedding2p5D(IMG_SIZE, 4, 3, HIDDEN_SIZE, dtype=dtype)
torch.nn.init.ones_(layer.cls_token)
torch.nn.init.ones_(layer.pos_embed)
layer = layer.to(device)
layer_master = VanillaPatchEmbedding(IMG_SIZE, 4, 3, HIDDEN_SIZE, dtype=dtype)
torch.nn.init.ones_(layer_master.cls_token)
torch.nn.init.ones_(layer_master.pos_embed)
layer_master = layer_master.to(device)
proj_weight_master = layer_master.weight.data
torch.distributed.broadcast(proj_weight_master, src=0)
proj_weight = torch.chunk(proj_weight_master, TESSERACT_DIM, dim=0)[j]
proj_weight = torch.chunk(proj_weight, TESSERACT_DIM, dim=0)[i]
layer.weight.data.copy_(proj_weight)
proj_bias_master = layer_master.bias.data
torch.distributed.broadcast(proj_bias_master, src=0)
proj_bias = torch.chunk(proj_bias_master, TESSERACT_DIM, dim=0)[j]
proj_bias = torch.chunk(proj_bias, TESSERACT_DIM, dim=0)[i]
layer.bias.data.copy_(proj_bias)
A_shape = (BATCH_SIZE, 3, IMG_SIZE, IMG_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
out = layer(A)
A_master = A_master.clone()
C_master = layer_master(A_master)
C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i]
C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j]
check_equal(out, C)
print_rank_0("patch embed forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i]
grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
cls_grad_master = layer_master.cls_token.grad
cls_grad = torch.chunk(cls_grad_master, TESSERACT_DIM, dim=-1)[j]
cls_grad = torch.chunk(cls_grad, TESSERACT_DIM, dim=-1)[i]
check_equal(cls_grad, layer.cls_token.grad)
pos_grad_master = layer_master.pos_embed.grad
pos_grad = torch.chunk(pos_grad_master, TESSERACT_DIM, dim=-1)[j]
pos_grad = torch.chunk(pos_grad, TESSERACT_DIM, dim=-1)[i]
check_equal(pos_grad, layer.pos_embed.grad)
B_grad = layer_master.weight.grad
B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=0)[j]
B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=0)[i]
check_equal(B_grad, layer.weight.grad)
bias_grad = layer_master.bias.grad
bias_grad = torch.chunk(bias_grad, TESSERACT_DIM)[j]
bias_grad = torch.chunk(bias_grad, TESSERACT_DIM)[i]
check_equal(bias_grad, layer.bias.grad)
print_rank_0("patch embed backward: pass")
def check_vocab_parallel_embed():
device = get_accelerator().get_current_device()
dtype = torch.float32
i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
embed = VocabParallelEmbedding2p5D(VOCAB_SIZE, HIDDEN_SIZE)
embed = embed.to(dtype).to(device)
embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
embed_master = embed_master.to(dtype).to(device)
weight_master = embed_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, TESSERACT_DIM, dim=-1)[j]
weight = torch.chunk(weight, TESSERACT_DIM, dim=0)[i]
embed.weight.data.copy_(weight)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
out = embed(A)
A_master = A_master.clone()
C_master = embed_master(A_master)
C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i]
C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j]
check_equal(out, C)
print_rank_0("vocab parallel embed forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i]
grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
B_grad = embed_master.weight.grad
B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=-1)[j]
B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=0)[i]
check_equal(B_grad, embed.weight.grad)
print_rank_0("vocab parallel embed backward: pass")
def check_classifier_no_given_weight():
device = get_accelerator().get_current_device()
dtype = torch.float32
INPUT_SIZE = HIDDEN_SIZE
OUTPUT_SIZE = NUM_CLASSES
j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
layer = Classifier2p5D(INPUT_SIZE, OUTPUT_SIZE)
A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
A_master = torch.randint(5, A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i]
A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j]
A = A.clone()
A.requires_grad = True
W_shape = (OUTPUT_SIZE, INPUT_SIZE)
W_master = torch.randint(5, W_shape, dtype=dtype, device=device)
torch.distributed.broadcast(W_master, src=0)
# W = torch.chunk(W_master, TESSERACT_DIM, dim=-1)[j]
W = torch.chunk(W_master, TESSERACT_DIM, dim=-1)[j]
W = torch.chunk(W, TESSERACT_DIM, dim=-1)[i]
W = W.clone()
layer.weight.data.copy_(W)
# W.requires_grad = True
B_shape = (OUTPUT_SIZE,)
B_master = torch.randint(5, B_shape, dtype=dtype, device=device)
torch.distributed.broadcast(B_master, src=0)
# B = torch.chunk(B_master, TESSERACT_DIM, dim=0)[j]
B = B_master.clone()
layer.bias.data.copy_(B)
out = layer(A)
A_master = A_master.clone()
A_master.requires_grad = True
W_master = W_master.clone()
W_master.requires_grad = True
B_master = B_master.clone()
B_master.requires_grad = True
C_master = torch.matmul(A_master, W_master.transpose(0, 1)) + B_master
C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i]
# C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j]
check_equal(out, C)
print_rank_0("classifier (no given weight) forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=get_accelerator().get_current_device())
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i]
# grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=0)[i]
A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=-1)[j]
check_equal(A_grad, A.grad)
W_grad = W_master.grad
W_grad = torch.chunk(W_grad, TESSERACT_DIM, dim=-1)[j]
W_grad = torch.chunk(W_grad, TESSERACT_DIM, dim=-1)[i]
check_equal(W_grad, layer.weight.grad)
B_grad = B_master.grad
# B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=0)[j]
# if i == 0:
check_equal(B_grad, layer.bias.grad)
print_rank_0("classifier (no given weight) backward: pass")
def check_vocab_parallel_classifier_no_given_weight():
device = get_accelerator().get_current_device()
dtype = torch.float32
i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
layer = VocabParallelClassifier2p5D(HIDDEN_SIZE, VOCAB_SIZE, bias=True)
layer = layer.to(dtype).to(device)
layer_master = VanillaClassifier(HIDDEN_SIZE, VOCAB_SIZE, bias=True)
layer_master = layer_master.to(dtype).to(device)
weight_master = layer_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, TESSERACT_DIM, dim=0)[i]
weight = torch.chunk(weight, TESSERACT_DIM, dim=-1)[j]
layer.weight.data.copy_(weight)
bias_master = layer_master.bias.data
torch.distributed.broadcast(bias_master, src=0)
bias = torch.chunk(bias_master, TESSERACT_DIM)[j]
layer.bias.data.copy_(bias)
A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i]
A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j]
A = A.clone()
A.requires_grad = True
out = layer(A)
A_master = A_master.clone()
A_master.requires_grad = True
C_master = layer_master(A_master)
C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i]
C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j]
check_equal(out, C)
print_rank_0("vocab parallel classifier (no given weight) forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i]
grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=0)[i]
A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=-1)[j]
check_equal(A_grad, A.grad)
W_grad = layer_master.weight.grad
W_grad = torch.chunk(W_grad, TESSERACT_DIM, dim=0)[i]
W_grad = torch.chunk(W_grad, TESSERACT_DIM, dim=-1)[j]
check_equal(W_grad, layer.weight.grad)
B_grad = layer_master.bias.grad
B_grad = torch.chunk(B_grad, TESSERACT_DIM)[j]
if i == 0:
check_equal(B_grad, layer.bias.grad)
print_rank_0("vocab parallel classifier (no given weight) backward: pass")
def check_classifier_given_embed_weight():
device = get_accelerator().get_current_device()
dtype = torch.float32
i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
embed = Embedding2p5D(VOCAB_SIZE, HIDDEN_SIZE)
embed = embed.to(dtype).to(device)
embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
embed_master = embed_master.to(dtype).to(device)
weight_master = embed_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, TESSERACT_DIM, dim=-1)[j]
weight = torch.chunk(weight, TESSERACT_DIM, dim=-1)[i]
embed.weight.data.copy_(weight)
layer = Classifier2p5D(HIDDEN_SIZE, VOCAB_SIZE, weight=embed.weight, bias=False)
layer = layer.to(dtype).to(device)
layer_master = VanillaClassifier(HIDDEN_SIZE, VOCAB_SIZE, weight=embed_master.weight, bias=False)
layer_master = layer_master.to(dtype).to(device)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
out = layer(embed(A))
A_master = A_master.clone()
C_master = layer_master(embed_master(A_master))
C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i]
check_equal(out, C)
print_rank_0("classifier (given embed weight) forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
W_grad = embed_master.weight.grad
W_grad = torch.chunk(W_grad, TESSERACT_DIM, dim=-1)[j]
W_grad = torch.chunk(W_grad, TESSERACT_DIM, dim=-1)[i]
check_equal(W_grad, embed.weight.grad)
print_rank_0("classifier (given embed weight) backward: pass")
def check_vocab_parallel_classifier_given_embed_weight():
device = get_accelerator().get_current_device()
dtype = torch.float32
i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
embed = VocabParallelEmbedding2p5D(VOCAB_SIZE, HIDDEN_SIZE)
embed = embed.to(dtype).to(device)
embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
embed_master = embed_master.to(dtype).to(device)
weight_master = embed_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, TESSERACT_DIM, dim=-1)[j]
weight = torch.chunk(weight, TESSERACT_DIM, dim=0)[i]
embed.weight.data.copy_(weight)
layer = VocabParallelClassifier2p5D(HIDDEN_SIZE, VOCAB_SIZE, weight=embed.weight, bias=False)
layer = layer.to(dtype).to(device)
layer_master = VanillaClassifier(HIDDEN_SIZE, VOCAB_SIZE, weight=embed_master.weight, bias=False)
layer_master = layer_master.to(dtype).to(device)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
out = layer(embed(A))
A_master = A_master.clone()
C_master = layer_master(embed_master(A_master))
C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i]
C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j]
check_equal(out, C)
print_rank_0("vocab parallel classifier (given embed weight) forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i]
grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j]
grad = grad.clone()
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
W_grad = embed_master.weight.grad
W_grad = torch.chunk(W_grad, TESSERACT_DIM, dim=-1)[j]
W_grad = torch.chunk(W_grad, TESSERACT_DIM, dim=0)[i]
check_equal(W_grad, embed.weight.grad)
print_rank_0("vocab parallel classifier (given embed weight) backward: pass")
def check_loss():
device = get_accelerator().get_current_device()
dtype = torch.float32
i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
criterion = CrossEntropyLoss2p5D()
criterion_master = torch.nn.CrossEntropyLoss()
out_shape = (BATCH_SIZE, NUM_CLASSES)
out_master = torch.randn(out_shape, dtype=dtype, device=device)
target_master = torch.randint(NUM_CLASSES, (BATCH_SIZE,), dtype=torch.long, device=device)
torch.distributed.broadcast(out_master, src=0)
torch.distributed.broadcast(target_master, src=0)
out = torch.chunk(out_master, TESSERACT_DIM, dim=0)[i]
out = out.clone()
out.requires_grad = True
loss = criterion(out, target_master)
out_master = out_master.clone()
out_master.requires_grad = True
loss_master = criterion_master(out_master, target_master)
check_equal(loss, loss_master)
print_rank_0("cross entropy loss forward: pass")
loss.backward()
loss_master.backward()
out_grad = out_master.grad
out_grad = torch.chunk(out_grad, TESSERACT_DIM, dim=0)[i]
check_equal(out_grad, out.grad)
print_rank_0("cross entropy loss backward: pass")
def check_vocab_parallel_loss():
device = get_accelerator().get_current_device()
dtype = torch.float32
i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
criterion = VocabParallelCrossEntropyLoss2p5D()
criterion_master = torch.nn.CrossEntropyLoss()
out_shape = (BATCH_SIZE, NUM_CLASSES)
out_master = torch.randn(out_shape, dtype=dtype, device=device)
target_master = torch.randint(NUM_CLASSES, (BATCH_SIZE,), dtype=torch.long, device=device)
torch.distributed.broadcast(out_master, src=0)
torch.distributed.broadcast(target_master, src=0)
out = torch.chunk(out_master, TESSERACT_DIM, dim=0)[i]
out = torch.chunk(out, TESSERACT_DIM, dim=-1)[j]
out = out.clone()
out.requires_grad = True
loss = criterion(out, target_master)
out_master = out_master.clone()
out_master.requires_grad = True
loss_master = criterion_master(out_master, target_master)
check_equal(loss, loss_master)
print_rank_0("vocab parallel cross entropy loss forward: pass")
loss.backward()
loss_master.backward()
out_grad = out_master.grad
out_grad = torch.chunk(out_grad, TESSERACT_DIM, dim=0)[i]
out_grad = torch.chunk(out_grad, TESSERACT_DIM, dim=-1)[j]
check_equal(out_grad, out.grad)
print_rank_0("vocab parallel cross entropy loss backward: pass")
# def check_attention():
# device = get_accelerator().get_current_device()
# dtype = torch.float32
# INPUT_SIZE = HIDDEN_SIZE
# NUM_ATTENTION_HEADS = 2
# i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
# j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
# k = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
# layer = TransformerSelfAttention2p5D(
# HIDDEN_SIZE, NUM_ATTENTION_HEADS,
# attention_dropout_prob=0.5,
# hidden_dropout_prob=0.5,
# dtype=dtype,
# )
# A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
# A_master = torch.randn(A_shape, dtype=dtype, device=device)
# torch.distributed.broadcast(A_master, src=0)
# A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i]
# A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j]
# A = A.clone()
# A.requires_grad = True
# mask_shape = (BATCH_SIZE // TESSERACT_DIM, NUM_ATTENTION_HEADS // TESSERACT_DIM, SEQ_LENGTH, SEQ_LENGTH)
# attention_mask = torch.zeros(mask_shape, dtype=dtype, device=device)
# out = layer(A, attention_mask)
# assert out.shape == (BATCH_SIZE // TESSERACT_DIM, SEQ_LENGTH, INPUT_SIZE // TESSERACT_DIM)
# print_rank_0('self attention forward: pass')
# grad_shape = out.shape
# grad = torch.randn(grad_shape, dtype=dtype, device=device)
# out.backward(grad)
# assert A.grad.shape == A.shape
# print_rank_0('self attention backward: pass')
# def check_mlp():
# device = get_accelerator().get_current_device()
# dtype = torch.float32
# INPUT_SIZE = HIDDEN_SIZE
# i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
# j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
# k = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
# layer = TransformerMLP2p5D(
# HIDDEN_SIZE,
# mlp_ratio=1,
# dropout_prob=0.5,
# act_func='gelu',
# dtype=dtype,
# )
# A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
# A_master = torch.randn(A_shape, dtype=dtype, device=device)
# torch.distributed.broadcast(A_master, src=0)
# A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i]
# A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j]
# A = A.clone()
# A.requires_grad = True
# out = layer(A)
# assert out.shape == (BATCH_SIZE // TESSERACT_DIM, SEQ_LENGTH, INPUT_SIZE // TESSERACT_DIM)
# print_rank_0('mlp forward: pass')
# grad_shape = out.shape
# grad = torch.randn(grad_shape, dtype=dtype, device=device)
# out.backward(grad)
# assert A.grad.shape == A.shape
# print_rank_0('mlp backward: pass')
# def check_transformerlayer():
# device = get_accelerator().get_current_device()
# dtype = torch.float32
# INPUT_SIZE = HIDDEN_SIZE
# NUM_ATTENTION_HEADS = 2
# i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
# j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
# k = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
# layer = TransformerLayer2p5D(
# HIDDEN_SIZE,
# NUM_ATTENTION_HEADS,
# act_func='gelu',
# attention_dropout_prob=0.5,
# hidden_dropout_prob=0.5,
# dtype=dtype,
# )
# A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
# A_master = torch.randn(A_shape, dtype=dtype, device=device)
# torch.distributed.broadcast(A_master, src=0)
# A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i]
# A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j]
# A = A.clone()
# A.requires_grad = True
# mask_shape = (BATCH_SIZE // TESSERACT_DIM, NUM_ATTENTION_HEADS // TESSERACT_DIM, SEQ_LENGTH, SEQ_LENGTH)
# attention_mask = torch.zeros(mask_shape, dtype=dtype, device=device)
# out = layer(A, attention_mask)
# assert out.shape == (BATCH_SIZE // TESSERACT_DIM, SEQ_LENGTH, INPUT_SIZE // TESSERACT_DIM)
# print_rank_0('transformerlayer forward: pass')
# grad_shape = out.shape
# grad = torch.randn(grad_shape, dtype=dtype, device=device)
# out.backward(grad)
# assert A.grad.shape == A.shape
# print_rank_0('transformerlayer backward: pass')
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_layers/test_2p5d/checks_2p5d/__init__.py | tests/test_legacy/test_layers/test_2p5d/checks_2p5d/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_layers/test_2p5d/checks_2p5d/check_operation_2p5d.py | tests/test_legacy/test_layers/test_2p5d/checks_2p5d/check_operation_2p5d.py | import torch
from colossalai.accelerator import get_accelerator
from colossalai.legacy.context import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.nn.layer.parallel_2p5d._operation import Matmul_AB_2p5D, Matmul_ABT_2p5D, Matmul_ATB_2p5D
from colossalai.legacy.utils import print_rank_0
from .common import *
def check_AB():
data_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA)
pipeline_parallel_rank = (
0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank(ParallelMode.PIPELINE)
)
pipeline_parallel_size = (
1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size(ParallelMode.PIPELINE)
)
tensor_parallel_size = gpc.get_world_size(ParallelMode.TENSOR)
dtype = torch.float
i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
k = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=get_accelerator().get_current_device())
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i]
A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j]
A = A.clone()
A.requires_grad = True
B_shape = (HIDDEN_SIZE, 4 * HIDDEN_SIZE)
B_master = torch.randn(B_shape, dtype=dtype, device=get_accelerator().get_current_device())
torch.distributed.broadcast(B_master, src=0)
B = torch.chunk(B_master, TESSERACT_DIM, dim=0)[i]
B = torch.chunk(B, TESSERACT_DIM, dim=-1)[j]
B = B.clone()
B.requires_grad = True
out_shape = (BATCH_SIZE // TESSERACT_DIM, SEQ_LENGTH, 4 * HIDDEN_SIZE // TESSERACT_DIM)
out = Matmul_AB_2p5D.apply(
A,
B,
TESSERACT_DIM,
out_shape,
i,
j,
k,
ParallelMode.PARALLEL_2P5D_ROW,
ParallelMode.PARALLEL_2P5D_COL,
data_parallel_rank,
pipeline_parallel_rank,
pipeline_parallel_size,
tensor_parallel_size,
)
(BATCH_SIZE, SEQ_LENGTH, 4 * HIDDEN_SIZE)
A_master = A_master.clone()
A_master.requires_grad = True
B_master = B_master.clone()
B_master.requires_grad = True
C_master = torch.matmul(A_master, B_master)
C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i]
C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j]
# check forward correctness
check_equal(out, C)
print_rank_0("AB forward: pass")
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=get_accelerator().get_current_device())
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i]
grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j]
out.backward(grad)
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=0)[i]
A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=-1)[j]
# check backward correctness
check_equal(A_grad, A.grad)
B_grad = B_master.grad
B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=0)[i]
B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=-1)[j]
# check backward correctness
check_equal(B_grad, B.grad)
print_rank_0("AB backward: pass")
def check_ABT():
data_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA)
pipeline_parallel_rank = (
0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank(ParallelMode.PIPELINE)
)
pipeline_parallel_size = (
1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size(ParallelMode.PIPELINE)
)
tensor_parallel_size = gpc.get_world_size(ParallelMode.TENSOR)
dtype = torch.float
device = get_accelerator().get_current_device()
i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
k = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
C_shape = (BATCH_SIZE, SEQ_LENGTH, 4 * HIDDEN_SIZE)
C_master = torch.randn(C_shape, dtype=dtype, device=device)
torch.distributed.broadcast(C_master, src=0)
C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i]
C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j]
C = C.clone()
C.requires_grad = True
B_shape = (HIDDEN_SIZE, 4 * HIDDEN_SIZE)
B_master = torch.randn(B_shape, dtype=dtype, device=device)
torch.distributed.broadcast(B_master, src=0)
B = torch.chunk(B_master, TESSERACT_DIM, dim=0)[i]
B = torch.chunk(B, TESSERACT_DIM, dim=-1)[j]
B = B.clone()
B.requires_grad = True
out = Matmul_ABT_2p5D.apply(
C,
B,
TESSERACT_DIM,
(BATCH_SIZE // TESSERACT_DIM, SEQ_LENGTH, HIDDEN_SIZE // TESSERACT_DIM),
i,
j,
k,
ParallelMode.PARALLEL_2P5D_ROW,
ParallelMode.PARALLEL_2P5D_COL,
data_parallel_rank,
pipeline_parallel_rank,
pipeline_parallel_size,
tensor_parallel_size,
)
(BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE)
C_master = C_master.clone()
C_master.requires_grad = True
B_master = B_master.clone()
B_master.requires_grad = True
A_master = torch.matmul(C_master, B_master.transpose(0, 1))
A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i]
A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j]
check_equal(out, A)
print_rank_0("ABT forward: pass")
grad_shape = A_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i]
grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j]
# backward
out.backward(grad)
A_master.backward(grad_master)
C_grad = C_master.grad
C_grad = torch.chunk(C_grad, TESSERACT_DIM, dim=0)[i]
C_grad = torch.chunk(C_grad, TESSERACT_DIM, dim=-1)[j]
check_equal(C_grad, C.grad)
B_grad = B_master.grad
B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=0)[i]
B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=-1)[j]
check_equal(B_grad, B.grad)
print_rank_0("ABT backward: pass")
def check_ATB():
data_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA)
pipeline_parallel_rank = (
0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank(ParallelMode.PIPELINE)
)
pipeline_parallel_size = (
1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size(ParallelMode.PIPELINE)
)
tensor_parallel_size = gpc.get_world_size(ParallelMode.TENSOR)
device = get_accelerator().get_current_device()
dtype = torch.float
i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
k = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i]
A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j]
A = A.clone()
A.requires_grad = True
C_shape = (BATCH_SIZE, SEQ_LENGTH, 4 * HIDDEN_SIZE)
C_master = torch.randn(C_shape, dtype=dtype, device=device)
torch.distributed.broadcast(C_master, src=0)
C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i]
C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j]
C = C.clone()
C.requires_grad = True
out = Matmul_ATB_2p5D.apply(
A,
C,
TESSERACT_DIM,
(HIDDEN_SIZE // TESSERACT_DIM, 4 * HIDDEN_SIZE // TESSERACT_DIM),
i,
j,
k,
ParallelMode.PARALLEL_2P5D_ROW,
ParallelMode.PARALLEL_2P5D_COL,
data_parallel_rank,
pipeline_parallel_rank,
pipeline_parallel_size,
tensor_parallel_size,
)
(HIDDEN_SIZE, 4 * HIDDEN_SIZE)
A_master = A_master.clone()
A_master.requires_grad = True
C_master = C_master.clone()
C_master.requires_grad = True
B_master = torch.matmul(
A_master.view(-1, A_master.shape[-1]).transpose(0, 1), C_master.view(-1, C_master.shape[-1])
)
B = torch.chunk(B_master, TESSERACT_DIM, dim=0)[i]
B = torch.chunk(B, TESSERACT_DIM, dim=-1)[j]
check_equal(out, B)
print_rank_0("ATB forward: pass")
grad_shape = B_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i]
grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j]
out.backward(grad)
B_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=0)[i]
A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=-1)[j]
check_equal(A_grad, A.grad)
C_grad = C_master.grad
C_grad = torch.chunk(C_grad, TESSERACT_DIM, dim=0)[i]
C_grad = torch.chunk(C_grad, TESSERACT_DIM, dim=-1)[j]
check_equal(C_grad, C.grad)
print_rank_0("ATB backward: pass")
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_moe/moe_utils.py | tests/test_legacy/test_moe/moe_utils.py | import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed import ProcessGroup
from colossalai.booster.plugin.low_level_zero_plugin import LowLevelZeroModel
from colossalai.legacy.engine.gradient_handler._base_gradient_handler import BaseGradientHandler
from colossalai.legacy.engine.gradient_handler.utils import bucket_allreduce
from colossalai.legacy.moe.manager import MOE_MANAGER
from colossalai.legacy.moe.utils import get_moe_epsize_param_dict
from colossalai.legacy.registry import GRADIENT_HANDLER
from colossalai.tensor.moe_tensor.api import get_ep_group, get_ep_size, set_moe_tensor_ep_group
def delete_moe_info(model):
for _, param in model.named_parameters():
if hasattr(param, "ep_group"):
delattr(param, "ep_group")
class MoeModel(nn.Module):
def __init__(self, ep_group: ProcessGroup = None):
super().__init__()
self.test_embed = nn.Linear(4, 16, bias=False)
self.w1 = torch.nn.Parameter(torch.randn(16, 8))
if ep_group:
set_moe_tensor_ep_group(self.w1, ep_group)
def forward(self, x):
x = self.test_embed(x)
x = torch.matmul(x, self.w1)
return x
@GRADIENT_HANDLER.register_module
class MoeGradientHandler(BaseGradientHandler):
"""A helper class to handle all-reduce operations in a data parallel group and
moe model parallel. A all-reduce collective communication will be operated in
:func:`handle_gradient` among a data parallel group.
For better performance, it bucketizes the gradients of all parameters that are
the same type to improve the efficiency of communication.
Args:
model (Module): Model where the gradients accumulate.
optimizer (Optimizer): Optimizer for updating the parameters.
"""
def __init__(self, model, optimizer=None):
super().__init__(model, optimizer)
def handle_gradient(self):
"""A method running an all-reduce operation in a data parallel group.
Then running an all-reduce operation for all parameters in experts
across moe model parallel group
"""
if dist.get_world_size() > 1:
epsize_param_dict = get_moe_epsize_param_dict(self._model)
# epsize is 1, indicating the params are replicated among processes in data parallelism
# use the ParallelMode.DATA to get data parallel group
# reduce gradients for all parameters in data parallelism
if 1 in epsize_param_dict:
bucket_allreduce(param_list=epsize_param_dict[1])
for ep_size in epsize_param_dict:
if ep_size != 1 and ep_size != MOE_MANAGER.world_size:
bucket_allreduce(
param_list=epsize_param_dict[ep_size], group=MOE_MANAGER.parallel_info_dict[ep_size].dp_group
)
def assert_not_equal_in_group(tensor, process_group=None):
# all gather tensors from different ranks
world_size = dist.get_world_size(process_group)
tensor_list = [torch.empty_like(tensor) for _ in range(world_size)]
dist.all_gather(tensor_list, tensor, group=process_group)
# check if they are equal one by one
for i in range(world_size - 1):
a = tensor_list[i]
b = tensor_list[i + 1]
assert not torch.allclose(a, b), (
f"expected tensors on rank {i} and {i + 1} not to be equal " f"but they are, {a} vs {b}"
)
def run_fwd_bwd(model, data, label, criterion, optimizer, enable_autocast=False):
model.train()
with torch.cuda.amp.autocast(enabled=enable_autocast):
if criterion:
y = model(data)
loss = criterion(y, label)
else:
loss = model(data, label)
loss = loss.float()
if isinstance(model, LowLevelZeroModel):
optimizer.backward(loss)
else:
loss.backward()
return y
def sync_local_from_ep(local_model, ep_model, assert_grad_flag: bool = False) -> None:
"""Sync the parameters of tp model from ep model
Args:
local_model (MoeModule)
ep_model (MoeModule)
"""
for (local_name, local_param), (ep_name, ep_param) in zip(
local_model.named_parameters(), ep_model.named_parameters()
):
if "experts" not in local_name:
if assert_grad_flag:
assert torch.allclose(local_param, ep_param), f"local_param: {local_param}, ep_param: {ep_param}"
assert torch.allclose(local_param.grad, ep_param.grad)
else:
local_param.data.copy_(ep_param.data)
continue
# gather param from ep model
param_list = [torch.zeros_like(ep_param) for _ in range(get_ep_size(ep_param))]
dist.all_gather(param_list, ep_param, group=get_ep_group(ep_param))
all_param = torch.cat(param_list, dim=0)
if assert_grad_flag:
grad_list = [torch.zeros_like(ep_param) for _ in range(get_ep_size(ep_param))]
dist.all_gather(grad_list, ep_param.grad, group=get_ep_group(ep_param))
all_grad = torch.cat(grad_list, dim=0)
if assert_grad_flag:
assert torch.allclose(local_param, all_param)
assert torch.allclose(local_param.grad, all_grad)
else:
local_param.data.copy_(all_param.data)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_moe/test_moe_hybrid_zero.py | tests/test_legacy/test_moe/test_moe_hybrid_zero.py | import pytest
import torch
import torch.distributed as dist
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin import LowLevelZeroPlugin
from colossalai.booster.plugin.low_level_zero_plugin import LowLevelZeroModel
from colossalai.legacy.moe.manager import MOE_MANAGER
from colossalai.tensor.moe_tensor.api import is_moe_tensor
from colossalai.testing import rerun_if_address_is_in_use, spawn
from tests.test_moe.moe_utils import MoeModel
def run_fwd_bwd(model, data, label, criterion, optimizer, enable_autocast=False):
model.train()
with torch.cuda.amp.autocast(enabled=enable_autocast):
if criterion:
y = model(data)
loss = criterion(y, label)
else:
loss = model(data, label)
loss = loss.float()
if isinstance(model, LowLevelZeroModel):
optimizer.backward(loss / 2)
else:
loss.backward()
return y
def run_zero_optim_test(local_rank, world_size, stage=1):
criterion = torch.nn.CrossEntropyLoss()
data = torch.randn(16, 4).cuda()
label = torch.randint(0, 4, (16,)).cuda()
MOE_MANAGER.__init__()
MOE_MANAGER.setup(parallel=None)
torch_model = MoeModel()
torch_optimizer = torch.optim.Adam(torch_model.parameters())
torch_model = torch_model.cuda()
MOE_MANAGER.__init__()
MOE_MANAGER.setup(max_ep_size=2, use_ep_inside=False, parallel="EP")
zero_model = MoeModel()
extra_dp_group = MOE_MANAGER.parallel_info_dict[2].dp_group
ep_rank = dist.get_rank(MOE_MANAGER.parallel_info_dict[2].ep_group)
ep_size = MOE_MANAGER.parallel_info_dict[2].ep_size
for zero_param, torch_param in zip(zero_model.parameters(), torch_model.parameters()):
if is_moe_tensor(zero_param):
num_expert = torch_param.data.shape[0]
zero_param.data.copy_(
torch_param.data[ep_rank * (num_expert // ep_size) : (ep_rank + 1) * (num_expert // ep_size)]
.detach()
.clone()
)
else:
zero_param.data.copy_(torch_param.data.detach().clone())
zero_optimizer = torch.optim.Adam(zero_model.parameters())
plugin = LowLevelZeroPlugin(stage=stage, precision="fp32")
plugin.zero_optim_kwargs["moe_extra_dp_process_group"] = extra_dp_group
booster = Booster(plugin=plugin)
zero_model, zero_optimizer, _, _, _ = booster.boost(zero_model, zero_optimizer)
run_fwd_bwd(torch_model, data, label, criterion, None)
torch_optimizer.step()
run_fwd_bwd(zero_model, data, label, criterion, zero_optimizer)
zero_optimizer.step()
for (torch_name, torch_param), (zero_name, zero_param) in zip(
torch_model.named_parameters(), zero_model.named_parameters()
):
if is_moe_tensor(zero_param):
num_expert = torch_param.data.shape[0]
torch_param.data = torch_param.data[
ep_rank * (num_expert // ep_size) : (ep_rank + 1) * (num_expert // ep_size)
]
assert torch.allclose(
torch_param.data, zero_param.data, atol=1e-4
), f"{torch_name}\ntorch_param {torch_param.data}\nzero_param {zero_param.data}"
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_zero_optim_test(rank, world_size, stage=1)
run_zero_optim_test(rank, world_size, stage=2)
@pytest.mark.skip(reason="moe need to be refactored")
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [4])
@rerun_if_address_is_in_use()
def test_moe_zero_optim(world_size):
spawn(run_dist, world_size)
if __name__ == "__main__":
test_moe_zero_optim(world_size=4)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_moe/test_grad_handler.py | tests/test_legacy/test_moe/test_grad_handler.py | import pytest
import torch
import torch.distributed as dist
import torch.nn as nn
import colossalai
from colossalai.accelerator import get_accelerator
from colossalai.legacy.moe.manager import MOE_MANAGER
# from colossalai.shardformer.layer.moe.layers import SparseMLP
from colossalai.testing import assert_equal_in_group, rerun_if_address_is_in_use, spawn
from tests.test_moe.moe_utils import MoeGradientHandler
BATCH_SIZE = 4
DIM = 16
def run_test(rank, world_size, port):
colossalai.launch(
rank=rank,
world_size=world_size,
host="localhost",
port=port,
backend="nccl",
)
MOE_MANAGER.setup(parallel="EP") # MOE initialization
num_experts_list = [1, 2, 4]
layer_list = []
for num_experts in num_experts_list:
moe_layer = SparseMLP(
hidden_size=DIM,
intermediate_size=DIM * 4,
num_experts=num_experts,
router_top_k=1,
router_noisy_policy="Jitter",
)
layer_list.append(moe_layer)
model = nn.ModuleList(layer_list)
model = model.to(get_accelerator().get_current_device())
dist_dict = MOE_MANAGER.parallel_info_dict
assert_equal_in_group(layer_list[0].experts.wi.data, dist_dict[1].dp_group)
assert_equal_in_group(layer_list[0].experts.wo.data, dist_dict[1].dp_group)
assert_equal_in_group(layer_list[1].experts.wi.data, dist_dict[2].dp_group)
assert_equal_in_group(layer_list[1].experts.wo.data, dist_dict[2].dp_group)
assert_equal_in_group(layer_list[2].experts.wi.data, dist_dict[4].dp_group)
assert_equal_in_group(layer_list[2].experts.wo.data, dist_dict[4].dp_group)
# MoE model synchronization passed
grad_handler = MoeGradientHandler(model, 0)
rank = dist.get_rank()
torch.cuda.manual_seed(78 + rank)
data = torch.randn(BATCH_SIZE, DIM, device=get_accelerator().get_current_device())
grad = torch.randn_like(data)
MOE_MANAGER.reset_loss()
for layer in layer_list:
data = layer(data)
data.backward(grad)
grad_handler.handle_gradient()
assert_equal_in_group(layer_list[0].experts.wi.grad, dist_dict[1].dp_group)
assert_equal_in_group(layer_list[0].experts.wo.grad, dist_dict[1].dp_group)
assert_equal_in_group(layer_list[1].experts.wi.grad, dist_dict[2].dp_group)
assert_equal_in_group(layer_list[1].experts.wo.grad, dist_dict[2].dp_group)
assert_equal_in_group(layer_list[2].experts.wi.grad, dist_dict[4].dp_group)
assert_equal_in_group(layer_list[2].experts.wo.grad, dist_dict[4].dp_group)
# MoE grad handler test passed
@pytest.mark.skip(reason="moe need to be refactored")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_grad_handler():
spawn(run_test, 4)
if __name__ == "__main__":
test_grad_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_moe/test_moe_load_balance.py | tests/test_legacy/test_moe/test_moe_load_balance.py | import pytest
import torch
import torch.distributed as dist
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin import LowLevelZeroPlugin
from colossalai.booster.plugin.low_level_zero_plugin import LowLevelZeroModel
from colossalai.legacy.moe.manager import MOE_MANAGER
# from colossalai.shardformer.layer.moe import apply_load_balance
from colossalai.tensor.moe_tensor.api import is_moe_tensor
from colossalai.testing import rerun_if_address_is_in_use, spawn
from tests.test_moe.moe_utils import MoeGradientHandler, MoeModel
def split_ddp_grad(grad, world_size):
with torch.no_grad():
grad = grad.clone().detach().flatten()
padding_size = (world_size - grad.numel() % world_size) % world_size
if padding_size > 0:
grad = torch.nn.functional.pad(grad, [0, padding_size])
splited_grad = grad.split(grad.numel() // world_size)
return splited_grad
def run_fwd_bwd(model, data, label, criterion, optimizer, enable_autocast=False):
model.train()
with torch.cuda.amp.autocast(enabled=enable_autocast):
if criterion:
y = model(data)
loss = criterion(y, label)
else:
loss = model(data, label)
loss = loss.float()
if isinstance(model, LowLevelZeroModel):
optimizer.backward(loss)
else:
loss.backward()
return y
def run_zero_optim_test(local_rank, world_size, stage=1):
criterion = torch.nn.CrossEntropyLoss()
MOE_MANAGER.__init__()
MOE_MANAGER.setup(
parallel="EP",
)
zero_model = MoeModel(enable_load_balance=True)
zero_optimizer = torch.optim.Adam(zero_model.parameters())
plugin = LowLevelZeroPlugin(stage=stage, precision="bf16", verbose=True)
booster = Booster(plugin=plugin)
zero_model, zero_optimizer, _, _, _ = booster.boost(zero_model, zero_optimizer)
MOE_MANAGER.__init__()
MOE_MANAGER.setup(parallel="EP")
torch_model = MoeModel()
for zero_param, torch_param in zip(zero_model.parameters(), torch_model.parameters()):
torch_param.data.copy_(zero_param.data)
torch_optimizer = torch.optim.Adam(torch_model.parameters())
torch_model = torch_model.cuda().bfloat16()
grad_handler = MoeGradientHandler(torch_model)
# run to update expert load
data = torch.randn(16, 4).cuda().bfloat16() / 1000 / (local_rank + 1)
label = torch.randint(0, 4, (16,)).cuda()
# run torch model twice
run_fwd_bwd(torch_model, data, label, criterion, None)
grad_handler.handle_gradient()
torch_optimizer.step()
torch_optimizer.zero_grad()
run_fwd_bwd(torch_model, data, label, criterion, None)
grad_handler.handle_gradient()
# get optim and load status in zero model
run_fwd_bwd(zero_model, data, label, criterion, zero_optimizer)
zero_optimizer.step()
zero_optimizer.zero_grad()
with torch.no_grad():
origin_out = zero_model(data)
# load balance
apply_load_balance(zero_model, zero_optimizer)
# run again to test
zero_out = run_fwd_bwd(zero_model, data, label, criterion, zero_optimizer)
torch.allclose(origin_out, zero_out)
# assert optim
torch_optimizer.step()
torch_out = run_fwd_bwd(torch_model, data, label, criterion, None)
zero_optimizer.step()
zero_out = run_fwd_bwd(zero_model, data, label, criterion, zero_optimizer)
assert torch.allclose(zero_out, torch_out, atol=3e-5), f"zero_out:{zero_out}\ntorch_out{torch_out}"
def run_hybrid_zero_optim_test(local_rank, world_size, stage=1):
criterion = torch.nn.CrossEntropyLoss()
data = torch.randn(16, 4).cuda()
label = torch.randint(0, 4, (16,)).cuda()
MOE_MANAGER.__init__()
MOE_MANAGER.setup(parallel=None)
torch_model = MoeModel()
torch_optimizer = torch.optim.Adam(torch_model.parameters())
torch_model = torch_model.cuda()
MOE_MANAGER.__init__()
MOE_MANAGER.setup(
max_ep_size=2,
use_ep_inside=False,
parallel="EP",
)
zero_model = MoeModel(enable_load_balance=True)
extra_dp_group = MOE_MANAGER.parallel_info_dict[2].dp_group
ep_rank = dist.get_rank(MOE_MANAGER.parallel_info_dict[2].ep_group)
ep_size = MOE_MANAGER.parallel_info_dict[2].ep_size
for zero_param, torch_param in zip(zero_model.parameters(), torch_model.parameters()):
if is_moe_tensor(zero_param):
num_expert = torch_param.data.shape[0]
zero_param.data.copy_(
torch_param.data[ep_rank * (num_expert // ep_size) : (ep_rank + 1) * (num_expert // ep_size)]
.detach()
.clone()
)
else:
zero_param.data.copy_(torch_param.data.detach().clone())
zero_optimizer = torch.optim.Adam(zero_model.parameters())
plugin = LowLevelZeroPlugin(stage=stage, precision="fp32")
plugin.zero_optim_kwargs["moe_extra_dp_process_group"] = extra_dp_group
booster = Booster(plugin=plugin)
zero_model, zero_optimizer, _, _, _ = booster.boost(zero_model, zero_optimizer)
# run torch for twice
run_fwd_bwd(torch_model, data, label, criterion, None)
torch_optimizer.step()
torch_optimizer.zero_grad()
run_fwd_bwd(torch_model, data, label, criterion, None)
torch_optimizer.step()
# run zero
run_fwd_bwd(zero_model, data, label, criterion, zero_optimizer)
zero_optimizer.step()
zero_optimizer.zero_grad()
with torch.no_grad():
origin_out = zero_model(data)
# load balance
apply_load_balance(zero_model, zero_optimizer)
# assert out
zero_out = run_fwd_bwd(zero_model, data, label, criterion, zero_optimizer)
torch.allclose(origin_out, zero_out)
# assert optim
zero_optimizer.step()
zero_out = run_fwd_bwd(zero_model, data, label, criterion, zero_optimizer)
torch_out = run_fwd_bwd(torch_model, data, label, criterion, None)
# TODO: high atol, check if bug exists
assert torch.allclose(zero_out, torch_out, atol=8e-4), f"zero_out:{zero_out}\ntorch_out{torch_out}"
def run_dist(rank, world_size, port):
colossalai.launch(
rank=rank,
world_size=world_size,
host="localhost",
port=port,
backend="nccl",
)
run_zero_optim_test(rank, world_size, stage=1)
run_zero_optim_test(rank, world_size, stage=2)
run_hybrid_zero_optim_test(rank, world_size, stage=1)
run_hybrid_zero_optim_test(rank, world_size, stage=2)
@pytest.mark.skip(reason="moe need to be refactored")
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [4])
@rerun_if_address_is_in_use()
def test_moe_load_balance(world_size):
spawn(run_dist, world_size)
if __name__ == "__main__":
test_moe_load_balance(world_size=4)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_moe/test_moe_group.py | tests/test_legacy/test_moe/test_moe_group.py | import pytest
import torch.distributed as dist
import torch.nn as nn
import colossalai
from colossalai.accelerator import get_accelerator
from colossalai.legacy.moe.manager import MOE_MANAGER
from colossalai.legacy.moe.utils import sync_moe_model_param
# from colossalai.shardformer.layer.moe import MLPExperts
from colossalai.testing import assert_equal_in_group, rerun_if_address_is_in_use, spawn
HIDDEN_SIZE = 4
INTERMEDIATE_SIZE = 8
def run_moe_init(expert_parallel):
MOE_MANAGER.__init__()
MOE_MANAGER.setup(parallel=expert_parallel)
expert_args = dict(
hidden_size=HIDDEN_SIZE,
intermediate_size=INTERMEDIATE_SIZE,
expert_parallel=expert_parallel,
)
exp0 = MLPExperts(1, **expert_args)
exp1 = MLPExperts(2, **expert_args)
exp2 = MLPExperts(4, **expert_args)
if expert_parallel == "EP":
assert exp0.num_local_experts == 1
assert exp1.num_local_experts == 1
assert exp2.num_local_experts == 2
else:
assert exp0.num_local_experts == 1
assert exp1.num_local_experts == 2
assert exp2.num_local_experts == 4
parallel_info_dict = MOE_MANAGER.parallel_info_dict
rank = dist.get_rank()
# group creation assert
assert len(parallel_info_dict) == 2
assert dist.get_rank(parallel_info_dict[2].ep_group) == rank % 2
assert dist.get_rank(parallel_info_dict[1].ep_group) == 0
assert dist.get_rank(parallel_info_dict[2].dp_group) == rank // 2
assert dist.get_rank(parallel_info_dict[1].dp_group) == rank
model = nn.ModuleList([exp0, exp1, exp2])
model = model.to(get_accelerator().get_current_device())
sync_moe_model_param(model)
# MOE experts layout success when ep_size = 1
assert_equal_in_group(exp0.wi.data, parallel_info_dict[1].dp_group)
assert_equal_in_group(exp0.wo.data, parallel_info_dict[1].dp_group)
# MOE experts layout success when ep_size = 2
assert_equal_in_group(exp1.wi.data, parallel_info_dict[2].dp_group)
assert_equal_in_group(exp1.wo.data, parallel_info_dict[2].dp_group)
def _run_test(rank, world_size, port, expert_parallel):
colossalai.launch(
rank=rank,
world_size=world_size,
host="localhost",
port=port,
backend="nccl",
)
run_moe_init(expert_parallel)
@pytest.mark.skip(reason="moe need to be refactored")
@pytest.mark.dist
@pytest.mark.parametrize("expert_parallel", ["EP", "TP"])
@rerun_if_address_is_in_use()
def test_moe_initialization(expert_parallel):
spawn(_run_test, 2, expert_parallel=expert_parallel)
if __name__ == "__main__":
test_moe_initialization("EP")
test_moe_initialization("TP")
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_tensor/test_parameter.py | tests/test_legacy/test_tensor/test_parameter.py | import pytest
import torch
from common_utils import tensor_equal
import colossalai
from colossalai.tensor import ColoParameter, ColoTensor
from colossalai.testing import free_port
@pytest.mark.skip
def test_multiinheritance():
colossalai.legacy.launch(rank=0, world_size=1, host="localhost", port=free_port(), backend="nccl")
colo_param = ColoParameter(None, requires_grad=True)
assert colo_param.dist_spec.placement.value == "r"
assert isinstance(colo_param, ColoTensor)
assert isinstance(colo_param, torch.nn.Parameter)
# __deepcopy__ overload
import copy
colo_param2 = copy.deepcopy(colo_param)
assert isinstance(colo_param2, ColoParameter)
assert tensor_equal(colo_param.data, colo_param2.data)
assert colo_param.requires_grad == colo_param2.requires_grad
# __repr__ overload
assert "ColoParameter" in str(colo_param)
# __torch_function__
clone_param = torch.clone(colo_param)
assert isinstance(clone_param, ColoTensor)
if __name__ == "__main__":
test_multiinheritance()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_tensor/common_utils/__init__.py | tests/test_legacy/test_tensor/common_utils/__init__.py | from ._utils import *
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_tensor/common_utils/_utils.py | tests/test_legacy/test_tensor/common_utils/_utils.py | import os
import random
import numpy as np
import torch
import torch.distributed as dist
from torch.testing import assert_close
from colossalai.legacy.context import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.tensor import ComputePattern, ComputeSpec, ShardSpec
def set_seed(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-3, atol=1e-1) == True
def replace_parameter_add_grad(layer, weight=None, bias=None):
if weight is not None:
delattr(layer, "weight")
setattr(layer, "weight", weight)
layer.weight.requires_grad = True
if bias is not None:
delattr(layer, "bias")
setattr(layer, "bias", bias)
layer.bias.requires_grad = True
def broadcast_tensor_chunk(tensor, chunk_size=1, local_rank=0):
dist.broadcast(tensor, src=0)
tensor_chunk = torch.chunk(tensor, chunk_size, dim=-1)[local_rank]
return tensor_chunk.clone()
def tensor_equal(t_a: torch.Tensor, t_b: torch.Tensor, rtol: float = 1e-3, atol: float = 1e-1):
assert_close(t_a, t_b, rtol=rtol, atol=atol)
return True
def tensor_shard_equal(
tensor: torch.Tensor, shard: torch.Tensor, rank: int, world_size: int, rtol: float = 1e-3, atol: float = 1e-1
):
assert tensor.ndim == shard.ndim
if tensor.shape == shard.shape:
return tensor_equal(tensor, shard, rtol, atol)
else:
dims_not_eq = torch.nonzero(torch.tensor(tensor.shape) != torch.tensor(shard.shape))
if dims_not_eq.numel() == 1:
# 1D shard
dim = dims_not_eq.item()
if world_size is None:
world_size = gpc.get_world_size(ParallelMode.PARALLEL_1D)
if rank is None:
rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
return tensor_equal(tensor.chunk(world_size, dim)[rank], shard, rtol, atol)
else:
raise NotImplementedError
def split_param_single_dim_tp1d(dim, param, pg):
spec = (ShardSpec([dim], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D))
if param.process_group.tp_world_size() == 1:
param.set_process_group(pg)
param.set_tensor_spec(*spec)
def split_param_row_tp1d(param, pg):
split_param_single_dim_tp1d(0, param, pg)
def split_param_col_tp1d(param, pg):
split_param_single_dim_tp1d(-1, param, pg)
def debug_print(ranks, *args):
if dist.get_rank() in ranks:
print(*args)
dist.barrier()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_tensor/core/test_dist_spec_mgr.py | tests/test_legacy/test_tensor/core/test_dist_spec_mgr.py | import math
import pytest
import torch
import torch.distributed as dist
import colossalai
from colossalai.legacy.tensor import DistSpecManager, ProcessGroup, ReplicaSpec, ShardSpec
from colossalai.testing import rerun_if_address_is_in_use, spawn
def run():
group = ProcessGroup(tp_degree=dist.get_world_size())
rank = dist.get_rank()
size = dist.get_world_size()
depth = int(math.sqrt(size))
assert depth == math.sqrt(size)
x = torch.rand(8, 8).cuda()
old_dist_spec = ReplicaSpec()
row_spec = ShardSpec([0], [size])
col_spec = ShardSpec([-1], [size])
mat_spec = ShardSpec([0, 1], [depth, depth])
row_shard = DistSpecManager._shard_as(x, old_dist_spec, row_spec, group)
assert torch.equal(x.chunk(size, 0)[rank], row_shard)
assert torch.equal(x, DistSpecManager._gather(row_shard, row_spec, group))
col_shard = DistSpecManager._all_to_all(row_shard, row_spec, col_spec, group)
assert torch.equal(x.chunk(size, -1)[rank], col_shard)
assert torch.equal(x, DistSpecManager._gather(col_shard, col_spec, group))
mat_shard = DistSpecManager._shard_as(x, old_dist_spec, mat_spec, group)
assert torch.equal(x.chunk(depth, 0)[rank // depth].chunk(depth, 1)[rank % depth], mat_shard)
assert torch.equal(x, DistSpecManager._gather(mat_shard, mat_spec, group))
def check_mem():
pg = ProcessGroup(tp_degree=dist.get_world_size())
size = dist.get_world_size()
assert torch.cuda.memory_allocated() == 0
x = torch.rand(32, 32).cuda()
orig_mem = x.numel() * x.element_size()
assert torch.cuda.memory_allocated() == orig_mem
old_dist_spec = ReplicaSpec()
row_spec = ShardSpec([0], [size])
x.data = DistSpecManager._shard_as(x, old_dist_spec, row_spec, pg)
assert x.size(0) == 32 // size and x.size(1) == 32
assert torch.cuda.memory_allocated() == orig_mem // size
x.data = DistSpecManager._gather(x, row_spec, pg)
assert torch.cuda.memory_allocated() == orig_mem
def run_dist(rank, world_size, port):
colossalai.legacy.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
check_mem()
run()
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 4])
@rerun_if_address_is_in_use()
def test_dist_spec_mgr(world_size):
spawn(run_dist, world_size)
if __name__ == "__main__":
test_dist_spec_mgr(4)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_engine/test_engine.py | tests/test_legacy/test_engine/test_engine.py | import pytest
import torch
import colossalai
from colossalai.legacy.amp import AMP_TYPE
from colossalai.legacy.core import global_context as gpc
from colossalai.testing import DummyDataloader, parameterize, rerun_if_address_is_in_use, spawn
from tests.kit.model_zoo import model_zoo
CONFIG = dict(
parallel=dict(pipeline=dict(size=1), tensor=dict(size=1, mode=None)), fp16=dict(mode=None), clip_grad_norm=1.0
)
@parameterize("model_name", ["repeated_computed_layers", "resnet18", "repeated_computed_layers"])
@parameterize("amp_mode", [AMP_TYPE.APEX, AMP_TYPE.TORCH, AMP_TYPE.NAIVE, None])
def run_train(model_name, amp_mode):
# FIXME: test bert
model_builder, data_gen_fn, *_ = next(iter(model_zoo.get_sub_registry(model_name).values()))
train_dataloader = DummyDataloader(data_gen_fn)
criterion = lambda x: x.sum()
gpc.config.fp16["mode"] = amp_mode
model = model_builder()
engine, train_dataloader, *args = colossalai.legacy.initialize(
model=model,
optimizer=torch.optim.Adam(model.parameters(), lr=1e-3),
criterion=criterion,
train_dataloader=train_dataloader,
)
try:
engine.train()
for data in train_dataloader:
engine.zero_grad()
data = {k: v.cuda() if isinstance(v, torch.Tensor) else v for k, v in data.items()}
if criterion:
output = engine(**data)
loss = engine.criterion(output)
else:
loss = engine(**data)
engine.backward(loss)
engine.step()
break
except IndexError:
# if using apex amp, NetWithRepeatedlyComputedLayers will raise an index out of range issue
# the following check fails in apex
# if cached_x.grad_fn.next_functions[1][0].variable is not x:
pass
def run_engine(rank, world_size, port):
# init dist env
colossalai.legacy.launch(
config=CONFIG, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl"
)
run_train()
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_engine():
spawn(run_engine, 2)
if __name__ == "__main__":
test_engine()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_engine/test_gradient_accumluation.py | tests/test_legacy/test_engine/test_gradient_accumluation.py | import os
from pathlib import Path
import pytest
import torch
import torch.nn as nn
from torch.optim import Adam
from torchvision import transforms
from torchvision.datasets import CIFAR10
from torchvision.models import resnet18
import colossalai
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.utils import get_dataloader
from colossalai.logging import get_dist_logger
from colossalai.testing import rerun_if_address_is_in_use, spawn
# Config
BATCH_SIZE = 2
NUM_CLASSES = 10
CONFIG = dict(
parallel=dict(pipeline=dict(size=1), tensor=dict(size=1, mode=None)), clip_grad_norm=1.0, gradient_accumulation=4
)
def run_no_pipeline(rank, world_size, port):
# init dist env
colossalai.legacy.launch(
config=CONFIG, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl"
)
# build model
model = resnet18(num_classes=10)
# build dataloaders
train_dataset = CIFAR10(
root=Path(os.environ["DATA"]),
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))]
),
)
train_dataloader = get_dataloader(
dataset=train_dataset, shuffle=True, batch_size=BATCH_SIZE, pin_memory=True, drop_last=True
)
# build optimizer
optimizer = Adam(model.parameters(), lr=0.001)
criterion = nn.CrossEntropyLoss()
engine, train_dataloader, *args = colossalai.legacy.initialize(
model=model, optimizer=optimizer, criterion=criterion, train_dataloader=train_dataloader
)
get_dist_logger()
rank = torch.distributed.get_rank()
param_track = []
grad_track = []
next(model.parameters()).retain_grad()
engine.train()
step = 0
for img, label in train_dataloader:
engine.zero_grad()
img = img.cuda()
label = label.cuda()
output = engine(img)
loss = engine.criterion(output, label)
engine.backward(loss)
engine.step()
# check
param_track.append(next(model.parameters())[0].clone())
grad_track.append(next(model.parameters()).grad[0].clone())
step += 1
if step == CONFIG["gradient_accumulation"]:
break
assert not torch.all(grad_track[0] == grad_track[-1]), "grad should be different in different iterations"
assert torch.all(param_track[0] == param_track[1]) and not torch.all(
param_track[0] == param_track[-1]
), "param should be the same in the first few iterations and only changed in the last iteration"
gpc.destroy()
torch.cuda.empty_cache()
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_engine():
spawn(run_no_pipeline, 4)
if __name__ == "__main__":
test_engine()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_context/test_hybrid_parallel.py | tests/test_legacy/test_context/test_hybrid_parallel.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from pathlib import Path
import torch
from colossalai.legacy import launch
from colossalai.legacy.context import reset_seeds
from colossalai.legacy.context.parallel_mode import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.global_variables import tensor_parallel_env as tp_env
from colossalai.testing import free_port, rerun_if_address_is_in_use, spawn
CONFIG_PATH_LIST = list(Path(__file__).parent.glob("configs/*.py"))
def check_data_parallel_rank(rank):
global_world_size = gpc.get_world_size(ParallelMode.GLOBAL)
mp_size = gpc.get_world_size(ParallelMode.MODEL)
num_dp_groups = global_world_size // mp_size
dp_local_rank = gpc.get_local_rank(ParallelMode.DATA)
assert gpc.get_world_size(ParallelMode.DATA) == num_dp_groups
for group_idx in range(num_dp_groups):
ranks_in_dp_group = range(group_idx * mp_size, (group_idx + 1) * mp_size)
if rank in ranks_in_dp_group:
assert dp_local_rank == group_idx
def check_pipeline_parallel_rank(rank):
mp_world_size = gpc.get_world_size(ParallelMode.MODEL)
tp_world_size = gpc.get_world_size(ParallelMode.TENSOR)
num_pipeline_stage = mp_world_size // tp_world_size
pipeline_local_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
for stage_idx in range(num_pipeline_stage):
ranks_in_current_stage = range(stage_idx * tp_world_size, (stage_idx + 1) * tp_world_size)
if rank in ranks_in_current_stage:
assert stage_idx == pipeline_local_rank
def check_model_parallel_rank(rank):
mp_size = gpc.get_world_size(ParallelMode.MODEL)
rank_within_mp_group = rank % mp_size
mp_local_rank = gpc.get_local_rank(ParallelMode.MODEL)
assert rank_within_mp_group == mp_local_rank
def check_tensor_parallel_rank(rank):
if tp_env.mode == "2d":
check_2d_tensor_parallel_rank(rank)
elif tp_env == "2.5d":
check_2p5d_tensor_parallel_rank(rank)
elif tp_env == "3d":
check_3d_tensor_parallel_rank(rank)
def get_tp_info():
global_world_size = gpc.get_world_size(ParallelMode.GLOBAL)
tp_world_size = gpc.get_world_size(ParallelMode.TENSOR)
num_tp_groups = global_world_size // tp_world_size
tp_local_rank = gpc.get_local_rank(ParallelMode.TENSOR)
return tp_local_rank, tp_world_size, num_tp_groups
def check_2d_tensor_parallel_rank(rank):
tp_local_rank, tp_world_size, num_tp_groups = get_tp_info()
for group_id in range(num_tp_groups):
ranks_in_current_tp_group = range(group_id * tp_world_size, (group_id + 1) * tp_world_size)
if rank in ranks_in_current_tp_group:
col_local_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)
row_local_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)
assert col_local_rank == tp_local_rank // tp_env.summa_dim
assert row_local_rank == tp_local_rank % tp_env.summa_dim
def check_2p5d_tensor_parallel_rank(rank):
tp_local_rank, tp_world_size, num_tp_groups = get_tp_info()
for group_id in range(num_tp_groups):
ranks_in_current_tp_group = range(group_id * tp_world_size, (group_id + 1) * tp_world_size)
if rank in ranks_in_current_tp_group:
rp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
cp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
dp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
xp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_XZ)
assert rp_rank == tp_local_rank % tp_env.summa_dim
assert cp_rank == tp_local_rank // tp_env.tesseract_dim
assert dp_rank == tp_local_rank // (tp_env.summa_dim**2)
assert xp_rank == tp_local_rank // tp_env.summa_dim
def check_3d_tensor_parallel_rank(rank):
tp_local_rank, tp_world_size, num_tp_groups = get_tp_info()
for group_id in range(num_tp_groups):
ranks_in_current_tp_group = range(group_id * tp_world_size, (group_id + 1) * tp_world_size)
if rank in ranks_in_current_tp_group:
ip_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_INPUT)
wp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT)
op_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT)
assert ip_rank == tp_local_rank % tp_env.depth_3d
assert wp_rank == tp_local_rank // tp_env.depth_3d
assert op_rank == tp_local_rank // (tp_env.depth_3d**2)
def init_context(config_path, rank, world_size, backend, port, host):
dist_args = dict(
config=config_path, rank=rank, world_size=world_size, backend=backend, port=port, host=host, verbose=True
)
launch(**dist_args)
check_tensor_parallel_rank(rank)
check_data_parallel_rank(rank)
check_pipeline_parallel_rank(rank)
check_model_parallel_rank(rank)
gpc.destroy()
torch.cuda.empty_cache()
def run_dist(rank, world_size, port, backend, port_list, host):
for config_path, current_port in zip(CONFIG_PATH_LIST, port_list):
init_context(
config_path=config_path, rank=rank, world_size=world_size, backend=backend, port=current_port, host=host
)
reset_seeds()
@rerun_if_address_is_in_use()
def test_context():
"""
As no computation or communication is done, we can run this test on CPU.
"""
world_size = 32
port_list = []
for _ in range(len(CONFIG_PATH_LIST)):
while True:
port = free_port()
if port not in port_list:
port_list.append(port)
break
spawn(run_dist, world_size, backend="gloo", port_list=port_list, host="localhost")
if __name__ == "__main__":
test_context()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_context/configs/parallel_2p5d_init.py | tests/test_legacy/test_context/configs/parallel_2p5d_init.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
parallel = dict(pipeline=dict(size=2), tensor=dict(size=8, depth=2, mode="2.5d"))
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_context/configs/parallel_2d_init.py | tests/test_legacy/test_context/configs/parallel_2d_init.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
parallel = dict(pipeline=dict(size=2), tensor=dict(size=4, mode="2d"))
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_context/configs/parallel_3d_init.py | tests/test_legacy/test_context/configs/parallel_3d_init.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
parallel = dict(pipeline=dict(size=2), tensor=dict(size=8, mode="3d"))
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_data/test_deterministic_dataloader.py | tests/test_legacy/test_data/test_deterministic_dataloader.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
from pathlib import Path
import torch
import torch.distributed as dist
from torchvision import datasets, transforms
import colossalai
from colossalai.context import Config
from colossalai.legacy.context import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.utils import get_dataloader
from colossalai.testing import rerun_if_address_is_in_use, spawn
CONFIG = Config(
dict(
train_data=dict(
dataset=dict(
type="CIFAR10",
root=Path(os.environ["DATA"]),
train=True,
download=True,
),
dataloader=dict(num_workers=2, batch_size=2, shuffle=True),
),
parallel=dict(
pipeline=dict(size=1),
tensor=dict(size=1, mode=None),
),
seed=1024,
)
)
def run_data_sampler(rank, world_size, port):
dist_args = dict(config=CONFIG, rank=rank, world_size=world_size, backend="gloo", port=port, host="localhost")
colossalai.legacy.launch(**dist_args)
# build dataset
transform_pipeline = [transforms.ToTensor(), transforms.RandomCrop(size=32, padding=4)]
transform_pipeline = transforms.Compose(transform_pipeline)
dataset = datasets.CIFAR10(root=Path(os.environ["DATA"]), train=True, download=True, transform=transform_pipeline)
# build dataloader
dataloader = get_dataloader(dataset, batch_size=8, add_sampler=False)
data_iter = iter(dataloader)
img, label = data_iter.next()
img = img[0]
if gpc.get_local_rank(ParallelMode.DATA) != 0:
img_to_compare = img.clone()
else:
img_to_compare = img
dist.broadcast(img_to_compare, src=0, group=gpc.get_group(ParallelMode.DATA))
if gpc.get_local_rank(ParallelMode.DATA) != 0:
# this is without sampler
# this should be false if data parallel sampler to given to the dataloader
assert torch.equal(
img, img_to_compare
), "Same image was distributed across ranks and expected it to be the same"
torch.cuda.empty_cache()
@rerun_if_address_is_in_use()
def test_data_sampler():
spawn(run_data_sampler, 4)
if __name__ == "__main__":
test_data_sampler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_data/test_cifar10_dataset.py | tests/test_legacy/test_data/test_cifar10_dataset.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
from pathlib import Path
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
def test_cifar10_dataset():
# build transform
transform_pipeline = [transforms.ToTensor()]
transform_pipeline = transforms.Compose(transform_pipeline)
# build dataset
dataset = datasets.CIFAR10(root=Path(os.environ["DATA"]), train=True, download=True, transform=transform_pipeline)
# build dataloader
dataloader = DataLoader(dataset=dataset, batch_size=4, shuffle=True, num_workers=2)
data_iter = iter(dataloader)
img, label = data_iter.next()
if __name__ == "__main__":
test_cifar10_dataset()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_data/test_data_parallel_sampler.py | tests/test_legacy/test_data/test_data_parallel_sampler.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
from pathlib import Path
import torch
import torch.distributed as dist
from torchvision import datasets, transforms
import colossalai
from colossalai.context import Config
from colossalai.legacy.context import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.utils import get_dataloader
from colossalai.testing import rerun_if_address_is_in_use, spawn
CONFIG = Config(
dict(
parallel=dict(
pipeline=dict(size=1),
tensor=dict(size=1, mode=None),
),
seed=1024,
)
)
def run_data_sampler(rank, world_size, port):
dist_args = dict(config=CONFIG, rank=rank, world_size=world_size, backend="gloo", port=port, host="localhost")
colossalai.legacy.launch(**dist_args)
print("finished initialization")
# build dataset
transform_pipeline = [transforms.ToTensor()]
transform_pipeline = transforms.Compose(transform_pipeline)
dataset = datasets.CIFAR10(root=Path(os.environ["DATA"]), train=True, download=True, transform=transform_pipeline)
# build dataloader
dataloader = get_dataloader(dataset, batch_size=8, add_sampler=True)
data_iter = iter(dataloader)
img, label = data_iter.next()
img = img[0]
if gpc.get_local_rank(ParallelMode.DATA) != 0:
img_to_compare = img.clone()
else:
img_to_compare = img
dist.broadcast(img_to_compare, src=0, group=gpc.get_group(ParallelMode.DATA))
if gpc.get_local_rank(ParallelMode.DATA) != 0:
assert not torch.equal(
img, img_to_compare
), "Same image was distributed across ranks but expected it to be different"
torch.cuda.empty_cache()
@rerun_if_address_is_in_use()
def test_data_sampler():
spawn(run_data_sampler, 4)
if __name__ == "__main__":
test_data_sampler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_amp/test_torch_fp16.py | tests/test_legacy/test_amp/test_torch_fp16.py | import copy
import pytest
import torch
import colossalai
from colossalai.legacy.amp import convert_to_apex_amp, convert_to_torch_amp
from colossalai.testing import assert_close_loose, clear_cache_before_run, rerun_if_address_is_in_use, spawn
from tests.kit.model_zoo import model_zoo
def run_torch_amp():
"""
In this test, we compare the torch amp and apex amp implemented in colossalai
"""
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# create layer
test_models = ["torchvision_resnet18", "custom_simple_net"]
for test_name in test_models:
model_builder, data_gen_fn, *_ = next(iter(model_zoo.get_sub_registry(test_name).values()))
# create model
torch_amp_model = model_builder().cuda()
apex_amp_model = copy.deepcopy(torch_amp_model)
# create optimizer
# we use SGD here, since the correctness of gradient clipping can't be tested with Adam
torch_amp_optimizer = torch.optim.SGD(torch_amp_model.parameters(), lr=1e-3)
apex_amp_optimizer = torch.optim.SGD(apex_amp_model.parameters(), lr=1e-3)
# inject torch and apex amp
torch_amp_config = dict(init_scale=128, enabled=True)
torch_amp_model, torch_amp_optimizer, _ = convert_to_torch_amp(
torch_amp_model, torch_amp_optimizer, amp_config=torch_amp_config
)
apex_amp_config = dict(opt_level="O1", loss_scale=128)
apex_amp_model, apex_amp_optimizer = convert_to_apex_amp(apex_amp_model, apex_amp_optimizer, apex_amp_config)
# create data
data = data_gen_fn()
data = {k: v.cuda() if isinstance(v, torch.Tensor) else v for k, v in data.items()}
# forward pass
torch_amp_output = torch_amp_model(**data)
apex_amp_output = apex_amp_model(**data)
assert_close_loose(torch_amp_output, apex_amp_output)
for torch_amp_param, apex_amp_param in zip(torch_amp_model.parameters(), apex_amp_model.parameters()):
assert_close_loose(torch_amp_param, apex_amp_param)
# backward
# use sum() to get big gradient
torch_amp_optimizer.backward(torch_amp_output.sum())
apex_amp_optimizer.backward(apex_amp_output.sum())
# check grad
# In apex amp, grad is not scaled before backward, but torch amp does
for torch_amp_param, apex_amp_param in zip(torch_amp_model.parameters(), apex_amp_model.parameters()):
assert_close_loose(torch_amp_param.grad, apex_amp_param.grad * apex_amp_config["loss_scale"])
# clip gradient
apex_amp_optimizer.clip_grad_norm(model=apex_amp_model, max_norm=1.0)
torch_amp_optimizer.clip_grad_norm(model=torch_amp_model, max_norm=1.0)
# step
torch_amp_optimizer.step()
apex_amp_optimizer.step()
# check updated param and grad
for torch_amp_param, apex_amp_param in zip(torch_amp_model.parameters(), apex_amp_model.parameters()):
assert_close_loose(torch_amp_param.grad, apex_amp_param.grad)
assert_close_loose(torch_amp_param, apex_amp_param)
def run_dist(rank, world_size, port):
colossalai.legacy.launch(rank=rank, world_size=world_size, port=port, host="localhost")
run_torch_amp()
@pytest.mark.dist
@rerun_if_address_is_in_use()
@clear_cache_before_run()
def test_torch_amp():
spawn(run_dist, 1)
if __name__ == "__main__":
test_torch_amp()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_amp/test_naive_fp16.py | tests/test_legacy/test_amp/test_naive_fp16.py | import copy
import pytest
import torch
import colossalai
from colossalai.legacy.amp import convert_to_apex_amp, convert_to_naive_amp
from colossalai.testing import assert_close_loose, clear_cache_before_run, rerun_if_address_is_in_use, spawn
from tests.kit.model_zoo import model_zoo
def check_equal(a, b):
"""
This function checks if two tensors are equal within tolerance
"""
assert torch.allclose(a.float(), b.float(), rtol=1e-4, atol=1e-3), f"a = {a}, b = {b}"
def run_naive_amp():
"""
In this test, we compare the naive fp16 optimizer implemented in colossalai
and fp32 torch optimizer
"""
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# create layer
test_models = ["custom_repeated_computed_layers", "custom_nested_model", "torchvision_resnet18"]
for test_name in test_models:
model_builder, data_gen_fn, *_ = next(iter(model_zoo.get_sub_registry(test_name).values()))
# create model
naive_amp_model = model_builder().cuda()
apex_amp_model = copy.deepcopy(naive_amp_model)
# create optimizer
# we use SGD here, since the correctness of gradient clipping can't be tested with Adam
naive_amp_optimizer = torch.optim.SGD(naive_amp_model.parameters(), lr=1e-3)
apex_amp_optimizer = torch.optim.SGD(apex_amp_model.parameters(), lr=1e-3)
# inject naive and apex amp
naive_amp_config = dict(initial_scale=128, clip_grad_norm=1.0)
naive_amp_model, naive_amp_optimizer = convert_to_naive_amp(
naive_amp_model, naive_amp_optimizer, naive_amp_config
)
apex_amp_config = dict(opt_level="O2", loss_scale=128, keep_batchnorm_fp32=False)
apex_amp_model, apex_amp_optimizer = convert_to_apex_amp(apex_amp_model, apex_amp_optimizer, apex_amp_config)
# create data
data = data_gen_fn()
data = {k: v.cuda() if isinstance(v, torch.Tensor) else v for k, v in data.items()}
# forward pass
naive_amp_output = naive_amp_model(**data)
apex_amp_output = apex_amp_model(**data)
assert_close_loose(naive_amp_output, apex_amp_output)
# backward
# use sum() to get big gradient
naive_amp_optimizer.backward(naive_amp_output.sum())
apex_amp_optimizer.backward(apex_amp_output.sum())
# check grad
for naive_amp_param, apex_amp_param in zip(naive_amp_model.parameters(), apex_amp_model.parameters()):
assert_close_loose(naive_amp_param.grad, apex_amp_param.grad)
# clip gradient
apex_amp_optimizer.clip_grad_norm(model=apex_amp_model, max_norm=1.0)
# step
naive_amp_optimizer.step()
apex_amp_optimizer.step()
# check updated param
for naive_amp_param, apex_amp_param in zip(naive_amp_model.parameters(), apex_amp_model.parameters()):
assert_close_loose(naive_amp_param, apex_amp_param)
def run_dist(rank, world_size, port):
colossalai.legacy.launch(rank=rank, world_size=world_size, port=port, host="localhost")
run_naive_amp()
@pytest.mark.dist
@rerun_if_address_is_in_use()
@clear_cache_before_run()
def test_naive_amp():
spawn(run_dist, 1)
if __name__ == "__main__":
test_naive_amp()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_zero/test_commons.py | tests/test_legacy/test_zero/test_commons.py | import torch
import colossalai
from colossalai.legacy.zero.gemini.tensor_utils import colo_model_data_tensor_move, colo_model_data_tensor_move_inline
from colossalai.legacy.zero.sharded_param import ShardedTensor
from colossalai.testing import rerun_if_address_is_in_use, spawn
def run_tensor_move(rank, world_size, port):
colossalai.legacy.launch(rank=0, world_size=world_size, host="localhost", port=port, backend="nccl")
src_t = torch.ones(2, 3).cuda()
tgt_t = torch.zeros(2, 3)
colo_model_data_tensor_move(src_t, tgt_t)
assert torch.sum(tgt_t) == 6.0, f"{torch.sum(tgt_t.payload)} vs. 6.0"
src_t = torch.ones(2, 3)
tgt_t = torch.zeros(2, 3).cuda().half()
colo_model_data_tensor_move(src_t, tgt_t)
# the src_t has been removed
assert src_t.numel() == 0
assert torch.sum(tgt_t) == 6.0, f"{torch.sum(tgt_t.payload)} vs. 6.0"
src_t = ShardedTensor(torch.ones(2, 3))
tgt_t = ShardedTensor(torch.zeros(2, 3).cuda().half())
colo_model_data_tensor_move(src_t, tgt_t)
assert torch.sum(tgt_t.payload) == 6.0, f"{torch.sum(tgt_t.payload)} vs. 6.0"
assert tgt_t.device.type == "cuda"
colo_model_data_tensor_move_inline(tgt_t, torch.device("cpu"))
assert tgt_t.device.type == "cpu"
@rerun_if_address_is_in_use()
def test_tensor_move():
spawn(run_tensor_move, 1)
if __name__ == "__main__":
test_tensor_move()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_utils/test_norm_gradient_clipping.py | tests/test_legacy/test_utils/test_norm_gradient_clipping.py | import pytest
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad_norm_
import colossalai
from colossalai.accelerator import get_accelerator
from colossalai.legacy.tensor import ColoTensorSpec, ProcessGroup, distspec
from colossalai.legacy.utils.common import clip_grad_norm
from colossalai.logging import disable_existing_loggers
from colossalai.tensor.colo_parameter import ColoParameter
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
def close(num: float, other: float, rtol: float = 1e-5, atol: float = 1e-8):
return abs(num - other) <= atol + rtol * other
def shard_param(p: ColoParameter) -> None:
pg = p.get_process_group()
p._redistribute(distspec.ShardSpec([0], [pg.tp_world_size()]))
p.grad = p.grad.chunk(pg.tp_world_size(), 0)[pg.tp_local_rank()].clone().detach()
def check_grad_equal(p: Parameter, colo_p: ColoParameter) -> None:
pg = colo_p.get_process_group()
if p.shape != colo_p.shape:
grad = p.grad.chunk(pg.tp_world_size(), 0)[pg.tp_local_rank()]
else:
grad = p.grad
assert torch.allclose(grad, colo_p.grad), f"diff: {torch.abs(grad - colo_p.grad)}"
@parameterize("dtype", [torch.float])
@parameterize("device", ["mixed", "cuda", "cpu"])
@parameterize("norm_type", [2.0, 3.0, float("inf")])
def run_grad_clip_norm(world_size: int, dtype: torch.dtype, device: str, norm_type: float):
print(f"{world_size}, {dtype}, {device}, {norm_type}")
cuda_device = get_accelerator().get_current_device()
devices = [cuda_device] * 4
if device == "cpu":
devices = [torch.device("cpu")] * 4
elif device == "mixed":
devices = [cuda_device] * 2 + [torch.device("cpu")] * 2
pg = ProcessGroup(tp_degree=world_size)
params = [Parameter(torch.empty(4, 4, dtype=dtype, device=devices[i])) for i in range(4)]
colo_params = [
ColoParameter(torch.empty(4, 4, dtype=dtype, device=devices[i]), spec=ColoTensorSpec(pg)) for i in range(4)
]
for p, colo_p in zip(params, colo_params):
grad = torch.rand_like(p)
p.grad = grad
colo_p.grad = grad.clone().detach()
shard_param(colo_params[0])
shard_param(colo_params[2])
torch_norm = clip_grad_norm_(params, 1.0, norm_type=norm_type)
colo_norm = clip_grad_norm(colo_params, 1.0, norm_type=norm_type)
assert close(torch_norm, colo_norm), f"diff: {abs(torch_norm-colo_norm)}"
for p, colo_p in zip(params, colo_params):
check_grad_equal(p, colo_p)
def run_dist(rank, world_size, port):
disable_existing_loggers()
colossalai.legacy.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_grad_clip_norm(world_size=world_size)
@pytest.mark.skip("this need to be updated")
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
@rerun_if_address_is_in_use()
def test_zero_clip_grad(world_size: int):
spawn(run_dist, world_size)
if __name__ == "__main__":
test_zero_clip_grad(2)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_utils/test_memory.py | tests/test_legacy/test_utils/test_memory.py | import pytest
import colossalai
from colossalai.accelerator import get_accelerator
from colossalai.legacy.utils.memory import colo_device_memory_capacity, colo_set_process_memory_fraction
from colossalai.testing import spawn
def _run_colo_set_process_memory_fraction_and_colo_device_memory_capacity():
frac1 = colo_device_memory_capacity(get_accelerator().get_current_device())
colo_set_process_memory_fraction(0.5)
frac2 = colo_device_memory_capacity(get_accelerator().get_current_device())
assert frac2 * 2 == frac1
def run_dist(rank, world_size, port):
colossalai.legacy.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
_run_colo_set_process_memory_fraction_and_colo_device_memory_capacity()
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [3, 4])
def test_memory_utils(world_size):
spawn(run_dist, world_size)
if __name__ == "__main__":
test_memory_utils(world_size=2)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_utils/test_activation_checkpointing.py | tests/test_legacy/test_utils/test_activation_checkpointing.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
import torch.nn.functional as F
from colossalai.legacy.context.parallel_mode import ParallelMode
from colossalai.legacy.context.random import add_seed, reset_seeds, seed, set_mode
from colossalai.legacy.utils.activation_checkpoint import checkpoint
from colossalai.testing import clear_cache_before_run, parameterize
def forward(x, weight):
out = torch.matmul(x, weight)
with seed(ParallelMode.DATA):
out_ = F.dropout(out, p=0.4, training=True)
return out_
def forward_inplace_ckpt(x, weight, cpu_offload=False):
out = torch.matmul(x, weight)
bn = torch.nn.BatchNorm1d(4, affine=False)
bn = bn.to(device="cuda")
out = bn(out)
def ckpt0(x):
return F.relu(x, inplace=True)
out = checkpoint(ckpt0, cpu_offload, out, use_reentrant=False)
return out
def forward_inplace(x, weight):
out = torch.matmul(x, weight)
bn = torch.nn.BatchNorm1d(4, affine=False)
bn = bn.to(device="cuda")
out = bn(out)
out = F.relu(out, inplace=True)
return out
@clear_cache_before_run()
@parameterize("use_reentrant", [True, False])
@parameterize("cpu_offload", [True, False])
def test_activation_checkpointing(cpu_offload, use_reentrant):
# as seed manager is singleton
# if we don't reset seeds here,
# other tests might affect this test
reset_seeds()
# We put initialization here to avoid change cuda rng state below
inputs = torch.rand(2, 2, requires_grad=True, device="cuda")
weight = torch.rand(2, 4, requires_grad=True, device="cuda")
# Get a copy of input tensors
inputs_ = torch.empty(2, 2, requires_grad=True, device="cuda")
inputs_.data.copy_(inputs.data)
weight_ = torch.empty(2, 4, requires_grad=True, device="cuda")
weight_.data.copy_(weight.data)
add_seed(ParallelMode.GLOBAL, 1024)
add_seed(ParallelMode.DATA, 1026)
set_mode(ParallelMode.GLOBAL)
global_cuda_rng_state = torch.cuda.get_rng_state()
set_mode(ParallelMode.DATA)
data_parallel_cuda_rng_state = torch.cuda.get_rng_state()
set_mode(ParallelMode.GLOBAL)
out = forward(inputs, weight)
loss = out.sum()
loss.backward()
# Recover cuda rng states
set_mode(ParallelMode.GLOBAL)
torch.cuda.set_rng_state(global_cuda_rng_state)
set_mode(ParallelMode.DATA)
torch.cuda.set_rng_state(data_parallel_cuda_rng_state)
set_mode(ParallelMode.GLOBAL)
out = checkpoint(forward, cpu_offload, inputs_, weight_, use_reentrant=use_reentrant)
loss = out.sum()
loss.backward()
assert torch.all(inputs.grad == inputs_.grad), "Gradient of the input does not match"
torch.cuda.empty_cache()
# Extra test for use_reentrant=False
if use_reentrant == False:
# Recover cuda rng states
set_mode(ParallelMode.GLOBAL)
torch.cuda.set_rng_state(global_cuda_rng_state)
set_mode(ParallelMode.DATA)
torch.cuda.set_rng_state(data_parallel_cuda_rng_state)
set_mode(ParallelMode.GLOBAL)
out = forward_inplace(inputs, weight)
loss = out.sum()
loss.backward()
# Recover cuda rng states
set_mode(ParallelMode.GLOBAL)
torch.cuda.set_rng_state(global_cuda_rng_state)
set_mode(ParallelMode.DATA)
torch.cuda.set_rng_state(data_parallel_cuda_rng_state)
set_mode(ParallelMode.GLOBAL)
out = forward_inplace_ckpt(inputs_, weight_, cpu_offload=cpu_offload)
loss = out.sum()
loss.backward()
assert torch.all(inputs.grad == inputs_.grad), "Gradient of the input does not match"
torch.cuda.empty_cache()
# as seed manager is singleton
# if we don't reset seeds here,
# other tests will fail if running together with this test
# as other tests can't overwrite the seed set by this test
reset_seeds()
if __name__ == "__main__":
test_activation_checkpointing(False, False)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_utils/test_checkpoint/test_checkpoint_2d.py | tests/test_legacy/test_utils/test_checkpoint/test_checkpoint_2d.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pprint
import pytest
import torch
import torch.nn as nn
import colossalai.legacy.nn as col_nn
from colossalai.legacy.context.parallel_mode import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.initialize import launch
from colossalai.legacy.utils import is_using_pp
from colossalai.legacy.utils.checkpointing import gather_pipeline_parallel_state_dict, load_checkpoint, save_checkpoint
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, skip_if_not_enough_gpus, spawn
def build_pipeline(model):
from colossalai.legacy.pipeline.utils import partition_uniform
pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE)
pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
depth = len(model)
start, end = partition_uniform(depth, pipeline_size, 1)[pipeline_rank][0]
layers = []
for i in range(depth):
if start <= i < end:
layers.append(model[i])
else:
layers.append(nn.Identity())
return nn.Sequential(*tuple(layers))
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-3, atol=1e-2)
def check_checkpoint_2d(rank, world_size, port):
config = dict(
parallel=dict(pipeline=dict(size=2), tensor=dict(size=4, mode="2d")),
)
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
m1 = nn.Sequential(nn.Linear(4, 8), nn.Linear(8, 4))
sd1 = m1.state_dict()
if gpc.get_global_rank() == 0:
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd1)}\n")
save_checkpoint("test.pt", 0, m1)
m2 = nn.Sequential(col_nn.Linear(4, 8), col_nn.Linear(8, 4))
if is_using_pp():
m2 = build_pipeline(m2)
load_checkpoint("test.pt", m2)
sd2 = m2.state_dict()
if is_using_pp() and gpc.get_local_rank(ParallelMode.TENSOR) == 0:
sd2 = gather_pipeline_parallel_state_dict(sd2)
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd2)}\n")
if gpc.get_global_rank() == 0:
for k, v in sd1.items():
assert k in sd2
check_equal(v, sd2[k].to(torch.device("cpu")))
@pytest.mark.dist
@pytest.mark.skip("takes too long")
@skip_if_not_enough_gpus(min_gpus=8)
@rerun_if_address_is_in_use()
def test_checkpoint_2d():
spawn(check_checkpoint_2d, 8)
if __name__ == "__main__":
test_checkpoint_2d()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_utils/test_checkpoint/test_checkpoint_2p5d.py | tests/test_legacy/test_utils/test_checkpoint/test_checkpoint_2p5d.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pprint
import pytest
import torch
import torch.nn as nn
import colossalai.legacy.nn as col_nn
from colossalai.legacy.context.parallel_mode import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.initialize import launch
from colossalai.legacy.utils import is_using_pp
from colossalai.legacy.utils.checkpointing import gather_pipeline_parallel_state_dict, load_checkpoint, save_checkpoint
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, skip_if_not_enough_gpus, spawn
def build_pipeline(model):
from colossalai.legacy.pipeline.utils import partition_uniform
pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE)
pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
depth = len(model)
start, end = partition_uniform(depth, pipeline_size, 1)[pipeline_rank][0]
layers = []
for i in range(depth):
if start <= i < end:
layers.append(model[i])
else:
layers.append(nn.Identity())
return nn.Sequential(*tuple(layers))
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-3, atol=1e-2)
def check_checkpoint_2p5d(rank, world_size, port):
config = dict(
parallel=dict(pipeline=dict(size=2), tensor=dict(size=4, depth=1, mode="2.5d")),
)
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
m1 = nn.Sequential(nn.Linear(4, 8), nn.Linear(8, 4))
sd1 = m1.state_dict()
if gpc.get_global_rank() == 0:
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd1)}\n")
save_checkpoint("test.pt", 0, m1)
m2 = nn.Sequential(col_nn.Linear(4, 8), col_nn.Linear(8, 4))
if is_using_pp():
m2 = build_pipeline(m2)
load_checkpoint("test.pt", m2)
sd2 = m2.state_dict()
if is_using_pp() and gpc.get_local_rank(ParallelMode.TENSOR) == 0:
sd2 = gather_pipeline_parallel_state_dict(sd2)
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd2)}\n")
if gpc.get_global_rank() == 0:
for k, v in sd1.items():
assert k in sd2
check_equal(v, sd2[k].to(torch.device("cpu")))
@pytest.mark.dist
@pytest.mark.skip("takes too long")
@skip_if_not_enough_gpus(min_gpus=8)
@rerun_if_address_is_in_use()
def test_checkpoint_2p5d():
spawn(check_checkpoint_2p5d, 8)
if __name__ == "__main__":
test_checkpoint_2p5d()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_utils/test_checkpoint/test_checkpoint_3d.py | tests/test_legacy/test_utils/test_checkpoint/test_checkpoint_3d.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pprint
import pytest
import torch
import torch.nn as nn
import colossalai.legacy.nn as col_nn
from colossalai.legacy.context.parallel_mode import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.initialize import launch
from colossalai.legacy.utils import is_using_pp
from colossalai.legacy.utils.checkpointing import gather_pipeline_parallel_state_dict, load_checkpoint, save_checkpoint
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, skip_if_not_enough_gpus, spawn
def build_pipeline(model):
from colossalai.legacy.pipeline.utils import partition_uniform
pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE)
pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
depth = len(model)
start, end = partition_uniform(depth, pipeline_size, 1)[pipeline_rank][0]
layers = []
for i in range(depth):
if start <= i < end:
layers.append(model[i])
else:
layers.append(nn.Identity())
return nn.Sequential(*tuple(layers))
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-3, atol=1e-2)
def check_checkpoint_3d(rank, world_size, port):
config = dict(
parallel=dict(pipeline=dict(size=1), tensor=dict(size=8, mode="3d")),
)
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
m1 = nn.Sequential(nn.Linear(4, 8), nn.Linear(8, 4))
sd1 = m1.state_dict()
if gpc.get_global_rank() == 0:
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd1)}\n")
save_checkpoint("test.pt", 0, m1)
m2 = nn.Sequential(col_nn.Linear(4, 8), col_nn.Linear(8, 4))
if is_using_pp():
m2 = build_pipeline(m2)
load_checkpoint("test.pt", m2)
sd2 = m2.state_dict()
if is_using_pp() and gpc.get_local_rank(ParallelMode.TENSOR) == 0:
sd2 = gather_pipeline_parallel_state_dict(sd2)
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd2)}\n")
if gpc.get_global_rank() == 0:
for k, v in sd1.items():
assert k in sd2
check_equal(v, sd2[k].to(torch.device("cpu")))
@pytest.mark.dist
@pytest.mark.skip("takes too long")
@skip_if_not_enough_gpus(min_gpus=8)
@rerun_if_address_is_in_use()
def test_checkpoint_3d():
spawn(check_checkpoint_3d, 8)
if __name__ == "__main__":
test_checkpoint_3d()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_legacy/test_utils/test_checkpoint/test_checkpoint_1d.py | tests/test_legacy/test_utils/test_checkpoint/test_checkpoint_1d.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pprint
import pytest
import torch
import torch.nn as nn
import colossalai.legacy.nn as col_nn
from colossalai.legacy.context.parallel_mode import ParallelMode
from colossalai.legacy.core import global_context as gpc
from colossalai.legacy.initialize import launch
from colossalai.legacy.utils import is_using_pp
from colossalai.legacy.utils.checkpointing import gather_pipeline_parallel_state_dict, load_checkpoint, save_checkpoint
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, skip_if_not_enough_gpus, spawn
def build_pipeline(model):
from colossalai.legacy.pipeline.utils import partition_uniform
pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE)
pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
depth = len(model)
start, end = partition_uniform(depth, pipeline_size, 1)[pipeline_rank][0]
layers = []
for i in range(depth):
if start <= i < end:
layers.append(model[i])
else:
layers.append(nn.Identity())
return nn.Sequential(*tuple(layers))
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-3, atol=1e-2)
def check_checkpoint_1d(rank, world_size, port):
config = dict(
parallel=dict(pipeline=dict(size=2), tensor=dict(size=4, mode="1d")),
)
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
m1 = nn.Sequential(nn.Linear(4, 8), nn.Linear(8, 4))
sd1 = m1.state_dict()
if gpc.get_global_rank() == 0:
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd1)}\n")
save_checkpoint("test.pt", 0, m1)
m2 = nn.Sequential(col_nn.Linear(4, 8), col_nn.Linear(8, 4))
if is_using_pp():
m2 = build_pipeline(m2)
load_checkpoint("test.pt", m2)
sd2 = m2.state_dict()
if is_using_pp() and gpc.get_local_rank(ParallelMode.TENSOR) == 0:
sd2 = gather_pipeline_parallel_state_dict(sd2)
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd2)}\n")
if gpc.get_global_rank() == 0:
for k, v in sd1.items():
assert k in sd2
check_equal(v, sd2[k].to(torch.device("cpu")))
@pytest.mark.dist
@pytest.mark.skip("takes too long")
@skip_if_not_enough_gpus(min_gpus=8)
@rerun_if_address_is_in_use()
def test_checkpoint_1d():
spawn(check_checkpoint_1d, 8)
if __name__ == "__main__":
test_checkpoint_1d()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/__init__.py | tests/test_auto_parallel/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_ckpt_solvers/test_C_solver_consistency.py | tests/test_auto_parallel/test_ckpt_solvers/test_C_solver_consistency.py | import copy
import pytest
import torch
import torch.fx
import torchvision.models as tm
import colossalai
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.fx._compatibility import is_compatible_with_meta
# from colossalai.fx.passes.algorithms import solver_rotor
# from colossalai.fx.passes.algorithms.operation import Sequence
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
from colossalai.legacy.core import global_context as gpc
from colossalai.testing import rerun_if_address_is_in_use, spawn
if is_compatible_with_meta():
from colossalai.fx.profiler.tensor import MetaTensor
try:
from colossalai.fx.codegen import ActivationCheckpointCodeGen
withcodegen = True
except:
withcodegen = False
def _run_C_solver_consistency_test(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
for M, mem_budget in [(tm.resnet50, 4000), (tm.densenet121, 8080)]:
model = M()
data = torch.rand(128, 3, 224, 224, device="meta")
tracer = ColoTracer()
graph = tracer.trace(model, meta_args={"x": data})
graph.set_codegen(ActivationCheckpointCodeGen())
gm = ColoGraphModule(model, graph, model.__class__.__name__)
if is_compatible_with_meta():
data_meta = MetaTensor(data, fake_device=next(gm.parameters()).device)
MetaInfoProp(gm).run(data_meta)
# python solver
gm = solver_rotor(gm, data_meta, mem_budget * 1024 * 1024, force_python=True)
sequence_python: Sequence = copy.deepcopy(gm.__sequence__)
opt_python = copy.deepcopy(gm.__opttable__)
# C solver
gm = solver_rotor(gm, data_meta, mem_budget * 1024 * 1024)
sequence_C: Sequence = copy.deepcopy(gm.__sequence__)
opt_C = copy.deepcopy(gm.__opttable__)
# make sure the opt_tables are the same
for m in range(len(opt_python)):
for d in range(1, len(opt_python[0])):
for i in range(len(opt_python[0]) - d):
assert (
opt_python[m][i][i + d] == opt_C[m][i][i + d]
), f"item ({m}, {i}, {i + d}) is not consistent with python version!\npython version: {opt_python[m][i][i + d]}\nC version: {opt_C[m][i][i + d]}"
sequence_python = sequence_python.list_operations()
sequence_C = sequence_C.list_operations()
# make sure the sequences are the same
assert len(sequence_python) == len(sequence_C) and all(
python_op.__repr__() == C_op.__repr__() for (python_op, C_op) in zip(sequence_python, sequence_C)
)
gpc.destroy()
@pytest.mark.skip("TODO(lyl): refactor all tests.")
@pytest.mark.skipif(not withcodegen, reason="torch version is less than 1.12.0")
@rerun_if_address_is_in_use()
def test_C_solver_consistency():
spawn(_run_C_solver_consistency_test, 1)
if __name__ == "__main__":
_run_C_solver_consistency_test(rank=0)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_ckpt_solvers/test_ckpt_torchvision.py | tests/test_auto_parallel/test_ckpt_solvers/test_ckpt_torchvision.py | import copy
import re
from typing import Callable
import pytest
import torch
import torchvision.models as tm
from torch.fx import GraphModule
import colossalai
from colossalai.fx import ColoTracer
from colossalai.fx._compatibility import is_compatible_with_meta
from colossalai.fx.graph_module import ColoGraphModule
# from colossalai.fx.passes.algorithms import chen_greedy, solver_rotor
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
from colossalai.legacy.core import global_context as gpc
from colossalai.testing import rerun_if_address_is_in_use, spawn
if is_compatible_with_meta():
from colossalai.fx.profiler.tensor import MetaTensor
try:
from colossalai.fx.codegen import ActivationCheckpointCodeGen
with_codegen = True
except:
# fall back to older pytorch version
from colossalai.fx.codegen import python_code_with_activation_checkpoint
with_codegen = False
# SOLVERS = [chen_greedy, solver_rotor]
SOLVERS = []
def _is_activation_checkpoint_available(gm: GraphModule):
for n in gm.graph.nodes:
if hasattr(n, "activation_checkpoint") and getattr(n, "activation_checkpoint") is not None:
return True
def _is_all_gradient_close(m: torch.nn.Module, gm: GraphModule):
for m_p, gm_p in zip(m.parameters(), gm.parameters()):
if not torch.allclose(m_p.grad, gm_p.grad):
return False
return True
def _is_graph_linearized(gm: GraphModule):
code = gm.code
# find patterns like r' return output_1, output_2', which is not expected on a linearized graph
pattern = re.compile(r" return [a-zA-Z0-9_]+(, [a-zA-Z0-9_]+)+")
if pattern.findall(code):
return False
else:
return True
def check_backward_consistency(
m: torch.nn.Module,
gm: GraphModule,
solver: Callable[[GraphModule], GraphModule],
model_cls: Callable[[], torch.nn.Module],
):
criterion = torch.nn.MSELoss()
m.cuda()
data = torch.rand(2, 3, 32, 32).cuda()
label = torch.rand(2, 5).cuda()
loss = criterion(m(data), label)
loss.backward()
loss = criterion(gm(data), label)
loss.backward()
assert _is_all_gradient_close(m, gm), f"Solver {solver} did not work correctly in backward pass on {model_cls}"
def _run_ckpt_solver(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
MODEL_LIST = [tm.densenet121]
torch.backends.cudnn.deterministic = True
tracer = ColoTracer(trace_act_ckpt=False)
data = torch.rand(8, 3, 224, 224, device="meta")
for solver in SOLVERS:
for model_cls in MODEL_LIST:
m = model_cls(num_classes=5)
graph = tracer.trace(root=m)
gm = ColoGraphModule(copy.deepcopy(m), graph, m.__class__.__name__)
MetaInfoProp(gm.cuda()).run(MetaTensor(data).cuda())
codegen = ActivationCheckpointCodeGen()
gm.graph.set_codegen(codegen)
if solver == solver_rotor:
gm = solver(gm, data, mem_limit=500 * 1024 * 1024, mem_slots=500)
else:
gm = solver(gm)
assert _is_graph_linearized(gm), f"Solver {solver} did not solve {model_cls} in a linearized manner."
assert _is_activation_checkpoint_available(
gm
), f"Solver {solver} did not annotate {model_cls} with any activation checkpoints"
check_backward_consistency(m, gm, solver, model_cls)
gpc.destroy()
@pytest.mark.skip("TODO(super-dainiu): refactor all tests.")
@pytest.mark.skipif(not with_codegen, reason="torch version is lower than 1.12.0")
@rerun_if_address_is_in_use()
def test_ckpt_solver():
spawn(_run_ckpt_solver, 1)
def _run_ckpt_solver_torch11(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
MODEL_LIST = [tm.densenet121]
torch.backends.cudnn.deterministic = True
tracer = ColoTracer(trace_act_ckpt=False)
data = torch.rand(8, 3, 32, 32, device="meta")
for solver in SOLVERS:
for model_cls in MODEL_LIST:
m = model_cls(num_classes=5)
graph = tracer.trace(root=m)
gm = ColoGraphModule(copy.deepcopy(m), graph, m.__class__.__name__)
MetaInfoProp(gm).run(data)
gm.graph._python_code = python_code_with_activation_checkpoint.__get__(graph)
if solver == solver_rotor:
gm = solver(gm, data, mem_limit=500 * 1024 * 1024, mem_slots=500, force_python=True)
else:
gm = solver(gm)
assert _is_graph_linearized(gm), f"Solver {solver} did not solve {model_cls} in a linearized manner."
assert _is_activation_checkpoint_available(
gm
), f"Solver {solver} did not annotate {model_cls} with any activation checkpoints"
check_backward_consistency(m, gm, solver, model_cls)
gpc.destroy()
@pytest.mark.skipif(with_codegen, reason="torch version is equal to or higher than 1.12.0")
@pytest.mark.skip(reason="currently torch11 ColoGraphModule is not done")
@rerun_if_address_is_in_use()
def test_ckpt_solver_torch11():
spawn(_run_ckpt_solver_torch11, 1)
if __name__ == "__main__":
_run_ckpt_solver(rank=0)
test_ckpt_solver()
test_ckpt_solver_torch11()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_ckpt_solvers/test_linearize.py | tests/test_auto_parallel/test_ckpt_solvers/test_linearize.py | import pytest
import torch
import torchvision.models as tm
from colossalai.fx import ColoTracer
from colossalai.fx._compatibility import is_compatible_with_meta
from colossalai.fx.graph_module import ColoGraphModule
# from colossalai.fx.passes.algorithms import linearize, solver_rotor
# from colossalai.fx.passes.algorithms.operation import (ForwardCheck, ForwardEnable, ForwardNograd, Loss)
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
from colossalai.testing import clear_cache_before_run
if is_compatible_with_meta():
from colossalai.fx.profiler.tensor import MetaTensor
try:
from colossalai.fx.codegen import ActivationCheckpointCodeGen
with_codegen = True
except:
# fall back to older pytorch version
from colossalai.fx.codegen import python_code_with_activation_checkpoint
with_codegen = False
@pytest.mark.skip(reason="TODO: modify the logger")
@pytest.mark.skip("TODO(lyl): refactor all tests.")
@pytest.mark.skipif(not with_codegen, reason="torch version is lower than 1.12.0")
@clear_cache_before_run()
def test_linearize():
MODEL_DICT = {tm.resnet18: [2100, 3000], tm.densenet121: [8100, 17000]}
tracer = ColoTracer()
for M, budgets in MODEL_DICT.items():
for budget in budgets:
model = M()
graph = tracer.trace(model)
graph.set_codegen(ActivationCheckpointCodeGen())
gm = ColoGraphModule(model, graph, model.__class__.__name__)
MetaInfoProp(gm).run(MetaTensor(torch.rand(128, 3, 224, 224, device="meta"), fake_device="cpu"))
node_list = linearize(gm)
gm = solver_rotor(gm, data=torch.rand(128, 3, 224, 224, device="meta"), mem_limit=budget * 1024**2)
op_list = gm.__sequence__.list_operations()
loss_op = next(op for op in op_list if isinstance(op, Loss))
op_list = op_list[: op_list.index(loss_op)]
in_ckpt = False
ckpt_idx = 0
for idx, op in enumerate(op_list):
if in_ckpt:
if isinstance(op, ForwardNograd):
for n in node_list[idx]:
assert hasattr(n, "activation_checkpoint"), f"{n} is not annotated!"
assert (
n.activation_checkpoint[0] == ckpt_idx
), f"{n} ckpt_idx {n.activation_checkpoint[0]} wrong, should be {ckpt_idx}!"
continue
if isinstance(op, ForwardEnable):
for n in node_list[idx]:
assert getattr(n, "activation_checkpoint", None) == None, f"{n} should not be annotated!"
in_ckpt = False
ckpt_idx += 1
continue
if isinstance(op, ForwardCheck):
ckpt_idx += 1
for n in node_list[idx]:
assert hasattr(n, "activation_checkpoint"), f"{n} is not annotated!"
assert (
n.activation_checkpoint[0] == ckpt_idx
), f"{n} ckpt_idx {n.activation_checkpoint[0]} wrong, should be {ckpt_idx}!"
continue
else:
if isinstance(op, ForwardCheck):
in_ckpt = True
for n in node_list[idx]:
assert hasattr(n, "activation_checkpoint"), f"{n} is not annotated!"
assert (
n.activation_checkpoint[0] == ckpt_idx
), f"{n} ckpt_idx {n.activation_checkpoint[0]} wrong, should be {ckpt_idx}!"
del model
del gm
del node_list
@pytest.mark.skip("TODO(lyl): refactor all tests.")
@pytest.mark.skip(reason="torch11 meta tensor not implemented")
@pytest.mark.skipif(with_codegen, reason="torch version is equal to or higher than 1.12.0")
@clear_cache_before_run()
def test_linearize_torch11():
MODEL_DICT = {tm.resnet18: [2100, 3000], tm.densenet121: [8100, 17000]}
tracer = ColoTracer()
for M, budgets in MODEL_DICT.items():
for budget in budgets:
model = M()
graph = tracer.trace(model)
gm = ColoGraphModule(model, graph, model.__class__.__name__)
gm.graph._python_code = python_code_with_activation_checkpoint.__get__(graph)
node_list = linearize(gm)
gm = solver_rotor(gm, data=torch.rand(128, 3, 224, 224, device="meta"), mem_limit=budget * 1024**2)
op_list = gm.__sequence__.list_operations()
loss_op = next(op for op in op_list if isinstance(op, Loss))
op_list = op_list[: op_list.index(loss_op)]
in_ckpt = False
ckpt_idx = 0
for idx, op in enumerate(op_list):
if in_ckpt:
if isinstance(op, ForwardNograd):
for n in node_list[idx]:
assert hasattr(n, "activation_checkpoint"), f"{n} is not annotated!"
assert n.activation_checkpoint == ckpt_idx, f"{n} ckpt_idx wrong, should be {ckpt_idx}!"
continue
if isinstance(op, ForwardEnable):
for n in node_list[idx]:
assert getattr(n, "activation_checkpoint", None) == None, f"{n} should not be annotated!"
in_ckpt = False
ckpt_idx += 1
continue
if isinstance(op, ForwardCheck):
ckpt_idx += 1
for n in node_list[idx]:
assert hasattr(n, "activation_checkpoint"), f"{n} is not annotated!"
assert n.activation_checkpoint == ckpt_idx, f"{n} ckpt_idx wrong, should be {ckpt_idx}!"
continue
else:
if isinstance(op, ForwardCheck):
in_ckpt = True
for n in node_list[idx]:
assert hasattr(n, "activation_checkpoint"), f"{n} is not annotated!"
assert n.activation_checkpoint == ckpt_idx, f"{n} ckpt_idx wrong, should be {ckpt_idx}!"
del model
del gm
del node_list
if __name__ == "__main__":
test_linearize()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_offload/model_utils.py | tests/test_auto_parallel/test_offload/model_utils.py | import torch
import torch.nn as nn
from transformers import BertConfig, BertLMHeadModel, GPT2Config, GPT2LMHeadModel
# from tests.components_to_test.registry import non_distributed_component_funcs
class GPTLMModel(nn.Module):
def __init__(self, hidden_size=768, num_layers=12, num_attention_heads=12, max_seq_len=1024, vocab_size=50257):
super().__init__()
self.model = GPT2LMHeadModel(
GPT2Config(
n_embd=hidden_size,
n_layer=num_layers,
n_head=num_attention_heads,
n_positions=max_seq_len,
n_ctx=max_seq_len,
vocab_size=vocab_size,
)
)
def forward(self, input_ids, attention_mask):
# Only return lm_logits
return self.model(input_ids=input_ids, attention_mask=attention_mask, use_cache=True)[0]
class LMLoss(nn.Module):
def __init__(self):
super().__init__()
self.loss_fn = nn.CrossEntropyLoss()
def forward(self, logits, labels):
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
return self.loss_fn(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
class BertLMModel(nn.Module):
def __init__(self, hidden_size=768, num_layers=12, num_attention_heads=32, vocab_size=30522):
super().__init__()
self.model = BertLMHeadModel(
BertConfig(
n_embd=hidden_size,
num_hidden_layers=num_layers,
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
max_position_embeddings=hidden_size,
vocab_size=vocab_size,
)
)
def forward(self, input_ids, attention_mask):
# Only return lm_logits
return self.model(input_ids=input_ids, attention_mask=attention_mask, use_cache=True)[0]
# @non_distributed_component_funcs.register(name="bert_")
def get_bert_components():
vocab_size = 1024
seq_len = 64
batchSize = 64
def bert_model_builder():
model = BertLMModel(hidden_size=8192, num_layers=4, num_attention_heads=32, vocab_size=vocab_size)
return model
def bert_data_gen(device="meta"):
input_ids = torch.randint(0, vocab_size, (batchSize, seq_len), device=device)
attention_mask = torch.ones_like(input_ids, device=device)
kwargs = dict(input_ids=input_ids, attention_mask=attention_mask)
return kwargs
return bert_model_builder, bert_data_gen
# @non_distributed_component_funcs.register(name="gpt2_")
def get_gpt2_components():
vocab_size = 1024
seq_len = 8
batchSize = 64
def gpt2_model_builder():
model = GPTLMModel(hidden_size=8192, num_layers=2, num_attention_heads=32, vocab_size=vocab_size)
return model
def gpt2_data_gen(device="meta"):
input_ids = torch.randint(0, vocab_size, (batchSize, seq_len), device=device)
attention_mask = torch.ones_like(input_ids, device=device)
kwargs = dict(input_ids=input_ids, attention_mask=attention_mask)
return kwargs
return gpt2_model_builder, gpt2_data_gen
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_offload/test_solver.py | tests/test_auto_parallel/test_offload/test_solver.py | import pytest
import torch.fx
from torch.fx import GraphModule
from torch.utils._pytree import tree_map
from colossalai.auto_parallel.offload.region_manager import RegionManager
from colossalai.auto_parallel.offload.solver import NOT_NVML, SolverFactory
from colossalai.fx import ColoTracer, is_compatible_with_meta
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
from colossalai.testing import clear_cache_before_run, parameterize
from tests.test_auto_parallel.test_offload.model_utils import *
@pytest.mark.skipif(NOT_NVML, reason="pynvml is not installed")
@clear_cache_before_run()
@parameterize("model_name", ["gpt2_", "bert_"])
@parameterize("memory_budget", [4000])
@parameterize("solver_name", ["syn", "asyn"])
def solver_test(model_name: str, memory_budget: float, solver_name: str):
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, data_gen = get_components_func()
data_args = data_gen(device="cpu")
wrap_fn = lambda x: x.to(dtype=torch.half) if isinstance(x, torch.Tensor) and torch.is_floating_point(x) else x
data_args = tree_map(wrap_fn, data_args)
model = model_builder()
model.train()
model = model.cpu().half()
tracer = ColoTracer()
assert is_compatible_with_meta()
wrap_fn = lambda x: x.to("meta") if isinstance(x, torch.Tensor) else x
meta_args = tree_map(wrap_fn, data_args)
graph = tracer.trace(model, meta_args=meta_args)
gm = GraphModule(model, graph, model.__class__.__name__)
interp = MetaInfoProp(gm)
interp.propagate(*meta_args.values())
region_manager = RegionManager(graph, solver_name=solver_name)
region_manager._pre_process()
region_list = region_manager.region_list
solver_cls = SolverFactory.create(solver_name)
memory_budget = memory_budget * 1024 * 1024
solver = solver_cls(region_list, memory_budget)
solver._call_solver()
assert solver.best_ts.peak_mem < memory_budget
print("****************** execution plan *******************")
for region in region_list:
need_offload = region.need_offload
to_prefetch = region.fwd_prefetch_region.r_id if region.fwd_prefetch_region is not None else None
print(
f"| {model_name} forward | region id: {region.r_id} | need_offload: {need_offload} | to_prefetch: {to_prefetch}"
)
for region in region_list.__reversed__():
need_offload = region.need_offload
to_prefetch = region.bwd_prefetch_region.r_id if region.bwd_prefetch_region is not None else None
print(
f"| {model_name} backward | region id: {region.r_id} | need_offload: {need_offload} | to_prefetch: {to_prefetch}"
)
if __name__ == "__main__":
solver_test()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_offload/test_perf.py | tests/test_auto_parallel/test_offload/test_perf.py | import time
import pytest
import torch
from torch.utils._pytree import tree_map
import colossalai
from colossalai.accelerator import get_accelerator
from colossalai.auto_parallel.offload.amp_optimizer import AMPOptimizer
from colossalai.auto_parallel.offload.mem_optimize import memory_optimize
from colossalai.auto_parallel.offload.solver import NOT_NVML
from colossalai.fx.profiler import parameter_size
from colossalai.legacy.zero.gemini.colo_init_context import ColoInitContext
from colossalai.nn.optimizer import HybridAdam
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
from colossalai.utils import set_seed
from colossalai.zero import zero_model_wrapper, zero_optim_wrapper
from tests.test_auto_parallel.test_offload.model_utils import *
# from tests.test_tensor.common_utils import set_seed
@parameterize("model_name", ["gpt2_"])
@parameterize("memory_budget", [5000])
@parameterize("solver_name", ["asyn"])
def exam_fwd_bwd(model_name: str, memory_budget: float, solver_name: str):
# build model
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, data_gen = get_components_func()
label = torch.randint(
low=0,
high=128,
size=(
64,
8,
),
device=get_accelerator().get_current_device(),
)
criterion = LMLoss()
set_seed(42)
start_time = time.time()
model = model_builder()
model.train()
param_size = parameter_size(model) / 1024**2 / 2
init_time = time.time() - start_time
print(f"init_param_size={param_size:.3f} MB | init_model_time={init_time:.3f} s")
data_args = data_gen(device="cpu")
wrap_fn = lambda x: x.to(dtype=torch.half) if isinstance(x, torch.Tensor) and torch.is_floating_point(x) else x
data_args = tree_map(wrap_fn, data_args)
start_time = time.time()
model = memory_optimize(model, data_args, memory_budget * 1024 * 1024, solver_name)
solver_time = time.time() - start_time
print(f"solver_time={solver_time:.3f} s")
hybrid_optimizer = HybridAdam(model.model.parameters(), lr=1e-3)
optim = AMPOptimizer(hybrid_optimizer, model)
with ColoInitContext(device=torch.device("cpu")):
gemini_model = model_builder()
gemini_model.train()
hybrid_optimizer = HybridAdam(gemini_model.parameters(), lr=1e-3)
gemini_config = dict(
strict_ddp_mode=False,
device=torch.device("cpu"),
placement_policy="cpu",
pin_memory=True,
hidden_dim=8192,
search_range_m=128,
)
gemini_model = zero_model_wrapper(gemini_model, 3, gemini_config)
optim_config = dict(reduce_bucket_size=12 * 1024 * 1024, overlap_communication=True, verbose=True)
gemini_optim = zero_optim_wrapper(gemini_model, hybrid_optimizer, optim_config=optim_config)
torch.cuda.empty_cache()
torch.cuda.synchronize()
torch.cuda.reset_peak_memory_stats()
# test gemini
time_list = []
set_seed(42)
data_args = data_gen(device="cuda")
for step in range(10):
gemini_optim.zero_grad()
torch.cuda.synchronize()
start_time = time.time()
gemini_out = gemini_model(**data_args)
gemini_loss = criterion(gemini_out, label)
gemini_optim.backward(gemini_loss)
torch.cuda.synchronize()
time_list.append(time.time() - start_time)
gemini_optim.step()
torch.cuda.synchronize()
exec_time = sum(sorted(time_list)[:5]) / 5
runtime_peak_mem_alc = torch.cuda.max_memory_allocated() / 1024**2
runtime_peak_mem_res = torch.cuda.max_memory_reserved() / 1024**2
print(f"gemini | model_name: {model_name}")
print(
f"| exec_time={exec_time:.3f} s | param_size={param_size:.3f} MB "
f"| runtime_peak_mem_alc={runtime_peak_mem_alc:.3f} MB| runtime_peak_mem_res={runtime_peak_mem_res:.3f} MB|"
)
print(time_list)
del data_args
del gemini_model
del gemini_optim
del gemini_out
del gemini_loss
# test asyn offload
torch.cuda.empty_cache()
torch.cuda.synchronize()
torch.cuda.reset_peak_memory_stats()
time_list = []
set_seed(42)
data_args = data_gen(device="cuda")
data_args = tree_map(wrap_fn, data_args)
for step in range(10):
optim.zero_grad()
torch.cuda.synchronize()
start_time = time.time()
loss = criterion(model(**data_args), label)
optim.backward(loss)
torch.cuda.synchronize()
time_list.append(time.time() - start_time)
optim.step()
torch.cuda.synchronize()
exec_time = sum(sorted(time_list)[:5]) / 5
runtime_peak_mem_alc = torch.cuda.max_memory_allocated() / 1024**2
runtime_peak_mem_res = torch.cuda.max_memory_reserved() / 1024**2
print(f"solver_name: {solver_name} | model_name: {model_name}")
print(
f"| exec_time={exec_time:.3f} s | param_size={param_size:.3f} MB "
f"| runtime_peak_mem_alc={runtime_peak_mem_alc:.3f} MB| runtime_peak_mem_res={runtime_peak_mem_res:.3f} MB|"
)
print(time_list)
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
exam_fwd_bwd()
@pytest.mark.skip("this test failed")
@pytest.mark.skipif(NOT_NVML, reason="pynvml is not installed")
@rerun_if_address_is_in_use()
def test_perf():
spawn(run_dist, 1)
if __name__ == "__main__":
test_perf()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_bias_addition_forward.py | tests/test_auto_parallel/test_tensor_shard/test_bias_addition_forward.py | import pytest
import torch
try:
from colossalai.auto_parallel.tensor_shard.initialize import initialize_model
NO_CODEGEN = False
except:
NO_CODEGEN = True
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import assert_close, rerun_if_address_is_in_use, run_on_environment_flag, spawn
class LinearModel(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features)
def forward(self, x):
x = self.linear(x)
x = x * 2
return x
class ConvModel(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bias=True):
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, bias=bias
)
def forward(self, x):
x = self.conv(x)
x = x * 2
return x
def check_linear_module(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = LinearModel(4, 8).cuda()
input = torch.rand(4, 4).cuda()
output_compare = model(input)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
meta_args = {"x": torch.rand(4, 4).to("meta")}
gm = initialize_model(model, meta_args=meta_args, device_mesh=device_mesh)
output = gm(input)
assert_close(output, output_compare)
def check_conv_module(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = ConvModel(3, 6, 2).cuda()
input = torch.rand(4, 3, 64, 64).cuda()
output_compare = model(input)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
meta_args = {"x": torch.rand(4, 3, 64, 64).to("meta")}
gm = initialize_model(model, meta_args=meta_args, device_mesh=device_mesh)
output = gm(input)
assert_close(output, output_compare)
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.skipif(NO_CODEGEN, reason="No codegen found")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_bias_addition_module():
spawn(check_linear_module, 4)
spawn(check_conv_module, 4)
if __name__ == "__main__":
test_bias_addition_module()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_broadcast.py | tests/test_auto_parallel/test_tensor_shard/test_broadcast.py | import torch
from colossalai.auto_parallel.tensor_shard.utils import (
get_broadcast_shape,
is_broadcastable,
recover_sharding_spec_for_broadcast_shape,
)
from colossalai.device.device_mesh import DeviceMesh
from colossalai.tensor.sharding_spec import ShardingSpec
def test_is_broadcastable():
x1 = torch.rand(4, 4, 8)
x2 = torch.rand(1, 8)
assert is_broadcastable(x1.shape, x2.shape)
x1 = torch.rand(4, 2, 8)
x2 = torch.rand(2, 8)
assert is_broadcastable(x1.shape, x2.shape)
x1 = torch.rand(4, 2, 8)
x2 = torch.rand(4, 8)
assert not is_broadcastable(x1.shape, x2.shape)
def test_get_broadcast_shape():
x1 = torch.rand(4, 4, 8)
x2 = torch.rand(1, 8)
assert get_broadcast_shape(x1.shape, x2.shape) == [4, 4, 8]
x1 = torch.rand(4, 2, 8)
x2 = torch.rand(2, 8)
assert get_broadcast_shape(x1.shape, x2.shape) == [4, 2, 8]
x1 = torch.rand(4, 2, 8)
x2 = torch.rand(8)
assert get_broadcast_shape(x1.shape, x2.shape) == [4, 2, 8]
def test_recover_sharding_spec_for_broadcast_shape():
x1 = torch.rand(4, 1, 8)
x2 = torch.rand(2, 8)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
broadcast_shape = get_broadcast_shape(x1.shape, x2.shape)
logical_sharding_spec_for_x1 = ShardingSpec(
device_mesh=device_mesh, dim_partition_dict={0: [0], 1: [1]}, entire_shape=broadcast_shape
)
physical_sharding_spec_for_x1, removed_dims = recover_sharding_spec_for_broadcast_shape(
logical_sharding_spec_for_x1, broadcast_shape, x1.shape
)
print(physical_sharding_spec_for_x1)
assert physical_sharding_spec_for_x1.entire_shape == x1.shape
# dim 1 for the physical tensor is of broadcast type MULTIPLE, so should ignore
assert physical_sharding_spec_for_x1.dim_partition_dict == {0: [0]}
assert physical_sharding_spec_for_x1.sharding_sequence == ["S0", "R", "R"]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_solver_with_resnet_v2.py | tests/test_auto_parallel/test_tensor_shard/test_solver_with_resnet_v2.py | import torch
from torch.fx import GraphModule
from torchvision.models import resnet50
from colossalai._analyzer.fx.passes import shape_prop_pass
# from colossalai.fx.tracer.tracer import ColoTracer
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.constants import BATCHNORM_MODULE_OP
from colossalai.auto_parallel.tensor_shard.options import SolverOptions
from colossalai.auto_parallel.tensor_shard.solver import CostGraph, Solver, StrategiesConstructor
from colossalai.device.device_mesh import DeviceMesh
from colossalai.tensor.shape_consistency import ShapeConsistencyManager
from colossalai.testing import clear_cache_before_run, run_on_environment_flag
@run_on_environment_flag(name="AUTO_PARALLEL")
@clear_cache_before_run()
def test_cost_graph():
physical_mesh_id = torch.arange(0, 8)
mesh_shape = (2, 4)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
ShapeConsistencyManager()
tracer = ColoTracer(bias_addition_split=True)
model = resnet50(num_classes=100000)
input_sample = {"x": torch.rand(128, 3, 224, 224).to("meta")}
graph = tracer.trace(root=model, meta_args=input_sample)
# graph():
# %x : torch.Tensor [#users=1] = placeholder[target=x]
# %conv1 : [#users=1] = call_module[target=conv1](args = (%x,), kwargs = {})
# %bn1 : [#users=1] = call_module[target=bn1](args = (%conv1,), kwargs = {})
# %relu : [#users=1] = call_module[target=relu](args = (%bn1,), kwargs = {})
# %maxpool : [#users=2] = call_module[target=maxpool](args = (%relu,), kwargs = {})
# %layer1_0_conv1 : [#users=1] = call_module[target=layer1.0.conv1](args = (%maxpool,), kwargs = {})
# %layer1_0_bn1 : [#users=1] = call_module[target=layer1.0.bn1](args = (%layer1_0_conv1,), kwargs = {})
# %layer1_0_relu : [#users=1] = call_module[target=layer1.0.relu](args = (%layer1_0_bn1,), kwargs = {})
# %layer1_0_conv2 : [#users=1] = call_module[target=layer1.0.conv2](args = (%layer1_0_relu,), kwargs = {})
# %layer1_0_bn2 : [#users=1] = call_module[target=layer1.0.bn2](args = (%layer1_0_conv2,), kwargs = {})
# %add : [#users=1] = call_function[target=operator.add](args = (%layer1_0_bn2, %maxpool), kwargs = {})
# %layer1_0_relu_1 : [#users=2] = call_module[target=layer1.0.relu](args = (%add,), kwargs = {})
# %layer1_1_conv1 : [#users=1] = call_module[target=layer1.1.conv1](args = (%layer1_0_relu_1,), kwargs = {})
# %layer1_1_bn1 : [#users=1] = call_module[target=layer1.1.bn1](args = (%layer1_1_conv1,), kwargs = {})
# %layer1_1_relu : [#users=1] = call_module[target=layer1.1.relu](args = (%layer1_1_bn1,), kwargs = {})
# %layer1_1_conv2 : [#users=1] = call_module[target=layer1.1.conv2](args = (%layer1_1_relu,), kwargs = {})
# %layer1_1_bn2 : [#users=1] = call_module[target=layer1.1.bn2](args = (%layer1_1_conv2,), kwargs = {})
# %add_1 : [#users=1] = call_function[target=operator.add](args = (%layer1_1_bn2, %layer1_0_relu_1), kwargs = {})
# ...
# %avgpool : [#users=1] = call_module[target=avgpool](args = (%layer4_2_relu_1,), kwargs = {})
# %flatten : [#users=1] = call_function[target=torch.flatten](args = (%avgpool, 1), kwargs = {})
# %fc : [#users=1] = call_module[target=fc](args = (%flatten,), kwargs = {})
# return fc
gm = GraphModule(model, graph, model.__class__.__name__)
shape_prop_pass(gm, *input_sample.values())
gm.recompile()
solver_options = SolverOptions()
strategies_constructor = StrategiesConstructor(graph, device_mesh, solver_options)
strategies_constructor.build_strategies_and_cost()
cost_graph = CostGraph(strategies_constructor.leaf_strategies)
cost_graph.simplify_graph()
solver = Solver(gm.graph, strategies_constructor, cost_graph)
ret = solver.call_solver_serialized_args()
print(ret[0])
print(solver.last_s_val)
strategies_list = solver.last_s_val
computation_cost = 0
communication_cost = 0
communication_cost_bn = 0
memory_cost = 0
for index, node in enumerate(graph.nodes):
if node.op == "call_module":
submod = node.graph.owning_module.get_submodule(node.target)
if type(submod) in BATCHNORM_MODULE_OP:
communication_cost_bn += node.strategies_vector[strategies_list[index]].communication_cost.total
print(node.name, node.strategies_vector[strategies_list[index]].name)
computation_cost += node.strategies_vector[strategies_list[index]].compute_cost.total
communication_cost += node.strategies_vector[strategies_list[index]].communication_cost.total
node_memory_cost = node.strategies_vector[strategies_list[index]].memory_cost.total
if isinstance(node_memory_cost, tuple):
node_memory_cost = node_memory_cost[0]
memory_cost += node_memory_cost.activation + node_memory_cost.parameter
print(f"computation cost is {computation_cost}")
print(f"communication cost is {communication_cost}")
print(f"memory cost is {memory_cost}")
print(f"bn communication cost is {communication_cost_bn}")
if __name__ == "__main__":
test_cost_graph()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/__init__.py | tests/test_auto_parallel/test_tensor_shard/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_compatibility_with_gemini.py | tests/test_auto_parallel/test_tensor_shard/test_compatibility_with_gemini.py | import copy
import pytest
import torch
try:
from colossalai.auto_parallel.tensor_shard.initialize import initialize_model
NO_CODEGEN = False
except:
NO_CODEGEN = True
from colossalai.accelerator import get_accelerator
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.nn.optimizer import HybridAdam
from colossalai.testing import assert_close, rerun_if_address_is_in_use, run_on_environment_flag, spawn
from colossalai.zero import zero_model_wrapper, zero_optim_wrapper
class MLP(torch.nn.Module):
def __init__(self, in_features):
super().__init__()
self.linear_1 = torch.nn.Linear(in_features, 4 * in_features, bias=False)
self.linear_2 = torch.nn.Linear(4 * in_features, in_features, bias=False)
def forward(self, x):
x = self.linear_1(x)
x = self.linear_2(x)
return x
def check_auto_parallel_with_gemini(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = MLP(4).half().cuda()
if rank in [0, 1]:
input = torch.arange(0, 16).reshape(4, 4).half().cuda()
elif rank in [2, 3]:
input = torch.arange(16, 32).reshape(4, 4).half().cuda()
input_compare = torch.arange(0, 32).reshape(8, 4).half().cuda()
output_compare = model(input_compare)
loss_compare = output_compare.sum()
loss_compare.backward()
grad_compare = copy.deepcopy(model.linear_1.weight.grad / 2)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
meta_args = {"x": torch.rand(4, 4).half().to("meta")}
gm, solution = initialize_model(
model,
meta_args=meta_args,
device_mesh=device_mesh,
return_solution=True,
solver_preference="tp",
shard_option="shard_last_axis",
)
if rank == 0:
msg = "| TP strategy combination chosen by auto-parallel solver |"
msg_length = len(msg)
print("=" * msg_length)
print(msg)
print("=" * msg_length)
for strategy in solution:
print(strategy)
print("=" * msg_length)
gemini_config = dict(
strict_ddp_mode=False,
device=get_accelerator().get_current_device(),
placement_policy="cpu",
pin_memory=True,
search_range_m=128,
)
gm = zero_model_wrapper(gm, zero_stage=3, gemini_config=gemini_config)
optimizer = HybridAdam(gm.parameters(), betas=(0, 0))
optimizer = zero_optim_wrapper(gm, optimizer, initial_scale=1)
output = gm(input)
if rank in (0, 1):
assert_close(output, output_compare.narrow(0, 0, 4))
else:
assert_close(output, output_compare.narrow(0, 4, 4))
print(f"output on rank{rank} is correct")
loss = output.sum()
optimizer.zero_grad()
optimizer.backward(loss)
optimizer.step()
if rank in (0, 2):
assert_close(list(optimizer.optim.state.values())[0]["exp_avg"].half(), grad_compare.narrow(0, 0, 8).flatten())
if rank in (1, 3):
assert_close(list(optimizer.optim.state.values())[0]["exp_avg"].half(), grad_compare.narrow(0, 8, 8).flatten())
print(f"gradient on rank{rank} is correct")
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.skipif(NO_CODEGEN, reason="No codegen found")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_auto_parallel_with_gemini():
spawn(check_auto_parallel_with_gemini, 4)
if __name__ == "__main__":
test_auto_parallel_with_gemini()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_liveness_analysis.py | tests/test_auto_parallel/test_tensor_shard/test_liveness_analysis.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.solver import GraphAnalyser
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.testing import clear_cache_before_run
class LinearModel(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(4, 4)
self.relu = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(4, 4)
def forward(self, x1, x2):
x1 = x1 * 2
x1 = self.linear1(x1)
x1 = self.relu(x1)
x1 = self.linear2(x1)
out = x1 + x2
return out
@pytest.mark.skip("meta tensor has some bugs in 1.11")
@clear_cache_before_run()
def test_liveness_analysis():
model = LinearModel()
tracer = ColoTracer(bias_addition_split=True)
meta_args = {"x1": torch.rand(4, 4, device="meta"), "x2": torch.rand(4, 4, device="meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(root=model, graph=graph, class_name=model.__class__.__name__)
shape_prop_pass(gm, *meta_args.values())
graph_analyser = GraphAnalyser(gm)
liveness_list = graph_analyser.liveness_analysis()
stage_count = len(liveness_list)
# if a LiveStage is covered by another LiveStage, we just keep the larger one.
assert stage_count == 1
# a variable named `relu` must exist
# and this live var must have inplace = True
assert liveness_list[0].all_live_vars.exists("relu")
relu_var = liveness_list[0].all_live_vars.get("relu")
assert relu_var.is_inplace
# the unique vars must be fewer than the all vars since in-place ops exist
all_live_vars = liveness_list[0].all_live_vars
unique_live_vars = liveness_list[0].unique_live_vars
assert len(unique_live_vars) + 1 == len(all_live_vars)
if __name__ == "__main__":
test_liveness_analysis()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_compatibility_with_ddp.py | tests/test_auto_parallel/test_tensor_shard/test_compatibility_with_ddp.py | import copy
import pytest
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
try:
from colossalai.auto_parallel.tensor_shard.initialize import initialize_model
NO_CODEGEN = False
except:
NO_CODEGEN = True
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import assert_close, rerun_if_address_is_in_use, run_on_environment_flag, spawn
class MLP(torch.nn.Module):
def __init__(self, in_features):
super().__init__()
self.linear_1 = torch.nn.Linear(in_features, 4 * in_features, bias=False)
self.linear_2 = torch.nn.Linear(4 * in_features, in_features, bias=False)
def forward(self, x):
x = self.linear_1(x)
x = self.linear_2(x)
return x
def check_compatibility_with_ddp(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = MLP(4).cuda()
if rank in [0, 1]:
input = torch.arange(0, 16, dtype=torch.float).reshape(4, 4).cuda()
elif rank in [2, 3]:
input = torch.arange(16, 32, dtype=torch.float).reshape(4, 4).cuda()
input_compare = torch.arange(0, 32, dtype=torch.float).reshape(8, 4).cuda()
output_compare = model(input_compare)
loss_compare = output_compare.sum()
loss_compare.backward()
grad_compare = copy.deepcopy(model.linear_1.weight.grad / 2)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
meta_args = {"x": torch.rand(4, 4).to("meta")}
gm, solution = initialize_model(
model,
meta_args=meta_args,
device_mesh=device_mesh,
return_solution=True,
solver_preference="tp",
shard_option="shard_last_axis",
)
msg = "| TP strategy combination chosen by auto-parallel solver |"
msg_length = len(msg)
if rank == 0:
print("=" * msg_length)
print(msg)
print("=" * msg_length)
for strategy in solution:
print(strategy)
print("=" * msg_length)
dp_process_group = None
for ranks, process_group_handle in device_mesh.process_groups_dict[0]:
if rank in ranks:
dp_process_group = process_group_handle
assert dp_process_group is not None
gm = DDP(gm, process_group=dp_process_group)
output = gm(input)
if rank in (0, 1):
assert_close(output, output_compare.narrow(0, 0, 4))
else:
assert_close(output, output_compare.narrow(0, 4, 4))
print(f"output on rank{rank} is correct")
loss = output.sum()
loss.backward()
if rank in (0, 2):
assert_close(gm.module.module.linear_1.weight.grad, grad_compare.narrow(0, 0, 8))
if rank in (1, 3):
assert_close(gm.module.module.linear_1.weight.grad, grad_compare.narrow(0, 8, 8))
print(f"gradient on rank{rank} is correct")
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.skipif(NO_CODEGEN, reason="No codegen found")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_compatibility_with_ddp():
spawn(check_compatibility_with_ddp, 4)
if __name__ == "__main__":
test_compatibility_with_ddp()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_find_repeat_block.py | tests/test_auto_parallel/test_tensor_shard/test_find_repeat_block.py | from typing import Optional, Tuple
import torch
import torch.nn as nn
from torch.fx import GraphModule
from transformers.pytorch_utils import Conv1D
from colossalai._analyzer.fx.passes import shape_prop_pass
# from colossalai.fx.tracer.tracer import ColoTracer
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.utils.factory import find_repeat_blocks
from colossalai.testing import clear_cache_before_run, parameterize, run_on_environment_flag
NUM_REPEAT_BLOCKS = 4
BATCH_SIZE = 1
SEQ_LENGTH = 32
HIDDEN_DIM = 384
class RepeatBlock(nn.Module):
def __init__(self, intermediate_size, hidden_size):
super().__init__()
self.c_fc = Conv1D(intermediate_size, hidden_size)
self.c_proj = Conv1D(hidden_size, intermediate_size)
self.act = torch.nn.ReLU()
def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
return hidden_states
class RepeatModel(nn.Module):
def __init__(self, intermediate_size, hidden_size, num_layers):
super().__init__()
self.blocks = nn.ModuleList([RepeatBlock(intermediate_size, hidden_size) for i in range(num_layers)])
def forward(self, x):
for block in self.blocks:
x = block(x)
return x
class NonRepeatBlock(nn.Module):
def __init__(self, intermediate_size, hidden_size, layer_index):
super().__init__()
intermediate_size //= layer_index + 1
self.c_fc = Conv1D(intermediate_size, hidden_size)
self.c_proj = Conv1D(hidden_size, intermediate_size)
self.act = torch.nn.ReLU()
def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
return hidden_states
class NonRepeatModel(nn.Module):
def __init__(self, intermediate_size, hidden_size, num_layers):
super().__init__()
self.blocks = nn.ModuleList([NonRepeatBlock(intermediate_size, hidden_size, i) for i in range(num_layers)])
def forward(self, x):
for block in self.blocks:
x = block(x)
return x
@run_on_environment_flag(name="AUTO_PARALLEL")
@clear_cache_before_run()
@parameterize("model_cls", [RepeatModel, NonRepeatModel])
def test_repeat_blocks(model_cls):
model = model_cls(4 * HIDDEN_DIM, HIDDEN_DIM, NUM_REPEAT_BLOCKS)
tracer = ColoTracer(bias_addition_split=True)
input_sample = {"x": torch.rand(BATCH_SIZE, SEQ_LENGTH, HIDDEN_DIM).to("meta")}
graph = tracer.trace(root=model, meta_args=input_sample)
gm = GraphModule(model, graph, model.__class__.__name__)
shape_prop_pass(gm, *input_sample.values())
gm.recompile()
node_list = list(graph.nodes)
root_module = graph.owning_module
common_blocks = find_repeat_blocks(node_list, root_module, common_length_threshold=10)
total_num_nodes = len(list(graph.nodes))
# remove the input placeholder node and the output node
num_repeat_nodes_per_block = (total_num_nodes - 2) // NUM_REPEAT_BLOCKS
for common_block in common_blocks:
print(common_block)
if model_cls == RepeatModel:
assert len(common_blocks) == NUM_REPEAT_BLOCKS
assert len(common_blocks[0]) == num_repeat_nodes_per_block
elif model_cls == NonRepeatModel:
assert len(common_blocks) == 0
if __name__ == "__main__":
test_repeat_blocks()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_checkpoint.py | tests/test_auto_parallel/test_tensor_shard/test_checkpoint.py | from typing import Optional, Tuple
import pytest
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from transformers.pytorch_utils import Conv1D
try:
from colossalai.auto_parallel.tensor_shard.initialize import initialize_model
NO_CODEGEN = False
except:
NO_CODEGEN = True
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, run_on_environment_flag, spawn
HIDDEN_SIZE = 16
class GPT2MLPWithCkpt(nn.Module):
def __init__(self, intermediate_size, hidden_size):
super().__init__()
embed_dim = hidden_size
self.c_fc = Conv1D(intermediate_size, embed_dim)
self.c_proj = Conv1D(embed_dim, intermediate_size)
self.act = torch.nn.ReLU()
def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = checkpoint(self.c_proj, hidden_states)
hidden_states = self.act(hidden_states)
return hidden_states
def check_act_ckpt(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = GPT2MLPWithCkpt(intermediate_size=4 * HIDDEN_SIZE, hidden_size=HIDDEN_SIZE)
torch.rand(1, 64, HIDDEN_SIZE)
input_sample = {
"hidden_states": torch.rand(1, 64, HIDDEN_SIZE).to("meta"),
}
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
gm = initialize_model(model, input_sample, device_mesh)
code = gm.module.graph.python_code("self").src
assert (
"runtime_comm_spec_apply_1 = colossalai_auto_parallel_passes_runtime_apply_pass_runtime_comm_spec_apply(linear_1, comm_actions_dict, 12, 'linear_1')"
in code
)
assert (
"view_3 = torch.utils.checkpoint.checkpoint(self.checkpoint_0, view_1, comm_actions_dict, use_reentrant=False)"
in code
)
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.skipif(NO_CODEGEN, reason="No codegen found")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_mlp_layer():
spawn(check_act_ckpt, 4)
if __name__ == "__main__":
test_mlp_layer()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_gpt/test_solver_with_gpt_module.py | tests/test_auto_parallel/test_tensor_shard/test_gpt/test_solver_with_gpt_module.py | import torch
import transformers
from torch.fx import GraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.options import SolverOptions
from colossalai.auto_parallel.tensor_shard.solver import CostGraph, Solver, StrategiesConstructor
from colossalai.device.device_mesh import DeviceMesh
from colossalai.tensor.shape_consistency import ShapeConsistencyManager
from colossalai.testing import clear_cache_before_run, parameterize
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from tests.test_auto_parallel.test_tensor_shard.test_gpt.gpt_modules import GPT2MLP, GPT2Attention, GPT2Block, GPT2Model
BATCH_SIZE = 1
SEQ_LENGTH = 32
HIDDEN_DIM = 384
@run_on_environment_flag(name="AUTO_PARALLEL")
@clear_cache_before_run()
@parameterize("model_cls", [GPT2Block, GPT2Attention, GPT2MLP, GPT2Model])
def test_self_attention_block(model_cls):
config = transformers.GPT2Config(n_position=64, n_layer=2, n_head=16, n_embd=HIDDEN_DIM)
if model_cls == GPT2MLP:
model = model_cls(intermediate_size=4 * config.hidden_size, config=config)
else:
model = model_cls(config=config)
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
ShapeConsistencyManager()
tracer = ColoTracer(bias_addition_split=True)
if model_cls == GPT2MLP:
input_sample = {
"hidden_states": torch.rand(BATCH_SIZE, SEQ_LENGTH, HIDDEN_DIM).to("meta"),
}
elif model_cls in (GPT2Attention, GPT2Block):
input_sample = {
"hidden_states": torch.rand(BATCH_SIZE, SEQ_LENGTH, HIDDEN_DIM).to("meta"),
"attention_mask": torch.rand(1, SEQ_LENGTH).to("meta"),
}
else:
input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64)
attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64)
kwargs = dict(input_ids=input_ids, attention_mask=attention_mask)
input_sample = {k: v.to("meta") for k, v in kwargs.items()}
graph = tracer.trace(root=model, meta_args=input_sample)
gm = GraphModule(model, graph, model.__class__.__name__)
shape_prop_pass(gm, *input_sample.values())
print(gm.graph)
gm.recompile()
solver_options = SolverOptions()
strategies_constructor = StrategiesConstructor(graph, device_mesh, solver_options)
strategies_constructor.build_strategies_and_cost()
cost_graph = CostGraph(strategies_constructor.leaf_strategies)
cost_graph.simplify_graph()
solver = Solver(gm.graph, strategies_constructor, cost_graph, memory_budget=-1)
solver.call_solver_serialized_args()
strategies_list = solver.last_s_val
nodes = [strategies_vector.node for strategies_vector in strategies_constructor.leaf_strategies]
computation_cost = 0
communication_cost = 0
memory_cost = 0
for index, node in enumerate(nodes):
print(node.name, node.strategies_vector[strategies_list[index]].name)
computation_cost += node.strategies_vector[strategies_list[index]].compute_cost.total
communication_cost += node.strategies_vector[strategies_list[index]].communication_cost.total
node_memory_cost = node.strategies_vector[strategies_list[index]].memory_cost.total
if isinstance(node_memory_cost, tuple):
node_memory_cost = node_memory_cost[0]
memory_cost += node_memory_cost.activation + node_memory_cost.parameter
print(f"computation cost is {computation_cost}")
print(f"communication cost is {communication_cost}")
print(f"memory cost is {memory_cost}")
if __name__ == "__main__":
test_self_attention_block()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_gpt/test_runtime_with_gpt_modules.py | tests/test_auto_parallel/test_tensor_shard/test_gpt/test_runtime_with_gpt_modules.py | import copy
import random
from typing import Dict
import numpy as np
import pytest
import torch
import transformers
from torch.fx import GraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
# from colossalai.fx.tracer.tracer import ColoTracer
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
try:
from colossalai.auto_parallel.tensor_shard.initialize import (
ModuleWrapper,
build_strategy_constructor,
solve_solution,
transform_to_sharded_model,
)
NO_CODEGEN = False
except:
NO_CODEGEN = True
from colossalai.auto_parallel.tensor_shard.sharding_strategy import ShardingSpec
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.tensor.shape_consistency import to_global
from colossalai.testing import assert_close, assert_close_loose, parameterize, rerun_if_address_is_in_use, spawn
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from tests.test_auto_parallel.test_tensor_shard.test_gpt.gpt_modules import GPT2MLP, GPT2Attention, GPT2Block, GPT2Model
BATCH_SIZE = 1
SEQ_LENGTH = 32
HIDDEN_DIM = 768
seed = 128
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def _check_module_grad(
module: torch.nn.Module,
origin_param_dict: Dict[str, torch.Tensor],
best_sharding_spec_dict: Dict[str, ShardingSpec],
):
for name, param in module.named_parameters():
param_grad = param.grad
name = name.replace("module.", "")
origin_param_grad = origin_param_dict[name].grad
atoms = name.split(".")
new_name = "_".join(atoms)
if new_name in best_sharding_spec_dict:
param_sharding_spec = best_sharding_spec_dict[new_name]
grad_to_compare = copy.deepcopy(param_grad)
param_grad_global = to_global(grad_to_compare, param_sharding_spec)
try:
assert_close_loose(param_grad_global, origin_param_grad, rtol=1e-03, atol=1e-05)
except:
difference = param_grad_global - origin_param_grad
avg_diff = difference.abs().sum() / difference.numel()
assert avg_diff < 0.001
print(f"{name} param has {avg_diff} average difference")
def check_attention_layer(rank, model_cls, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
config = transformers.GPT2Config(n_position=64, n_layer=2, n_head=16, n_embd=HIDDEN_DIM)
if model_cls == GPT2MLP:
model = model_cls(intermediate_size=4 * config.hidden_size, config=config).to("cuda")
else:
model = model_cls(config=config).to("cuda")
test_model = copy.deepcopy(model)
input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64)
token_type_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64)
attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64)
hidden_states = torch.rand((BATCH_SIZE, SEQ_LENGTH, HIDDEN_DIM), dtype=torch.float32)
if model_cls == GPT2MLP:
input_sample = (hidden_states.to("cuda"),)
test_input_sample = copy.deepcopy(input_sample)
meta_input_sample = {
"hidden_states": hidden_states.to("meta"),
}
elif model_cls in (GPT2Attention, GPT2Block):
input_sample = (
hidden_states.to("cuda"),
attention_mask.to("cuda"),
)
test_input_sample = copy.deepcopy(input_sample)
meta_input_sample = {
"hidden_states": hidden_states.to("meta"),
"attention_mask": attention_mask.to("meta"),
}
else:
input_sample = (
input_ids.to("cuda"),
attention_mask.to("cuda"),
)
test_input_sample = copy.deepcopy(input_sample)
meta_input_sample = {
"input_ids": input_ids.to("meta"),
"attention_mask": attention_mask.to("meta"),
}
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
# [[0, 1]
# [2, 3]]
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
tracer = ColoTracer(bias_addition_split=True)
graph = tracer.trace(root=model, meta_args=meta_input_sample)
gm = GraphModule(model, graph, model.__class__.__name__)
shape_prop_pass(gm, *meta_input_sample.values())
gm.recompile()
strategies_constructor = build_strategy_constructor(graph, device_mesh, "standard", "replicated", "standard")
solution = solve_solution(gm, strategies_constructor, memory_budget=-1)
gm, sharding_spec_dicts = transform_to_sharded_model(
gm, meta_input_sample, solution, device_mesh, strategies_constructor
)
gm = ModuleWrapper(gm, *sharding_spec_dicts)
nodes = [strategies_vector.node for strategies_vector in strategies_constructor.leaf_strategies]
best_sharding_spec_dict = {}
for index, node in enumerate(nodes):
best_sharding_spec_dict[node.name] = node.sharding_spec
cuda_rng_state = torch.cuda.get_rng_state()
cpu_rng_state = torch.get_rng_state()
origin_output = test_model(*test_input_sample)
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(cpu_rng_state)
output = gm(*input_sample)
assert_close(output, origin_output, rtol=1e-03, atol=1e-03)
# *******************backward starting*******************
cuda_rng_state = torch.cuda.get_rng_state()
cpu_rng_state = torch.get_rng_state()
output.sum().backward()
torch.set_rng_state(cpu_rng_state)
torch.cuda.set_rng_state(cuda_rng_state)
origin_output.sum().backward()
origin_param_dict = dict(test_model.named_parameters())
if rank == 0:
print("*******************backward starting*******************")
_check_module_grad(gm, origin_param_dict, best_sharding_spec_dict)
if rank == 0:
print("*******************backward finished*******************")
# *******************backward finished*******************
# *******************strategy selected*******************
if rank == 0:
print("*******************strategy selected*******************")
nodes = [strategies_vector.node for strategies_vector in strategies_constructor.leaf_strategies]
computation_cost = 0
communication_cost = 0
memory_cost = 0
for index, node in enumerate(nodes):
print(node.name, node.strategies_vector[solution[index]].name)
computation_cost += node.strategies_vector[solution[index]].compute_cost.total
communication_cost += node.strategies_vector[solution[index]].communication_cost.total
node_memory_cost = node.strategies_vector[solution[index]].memory_cost.total
if isinstance(node_memory_cost, tuple):
node_memory_cost = node_memory_cost[0]
memory_cost += node_memory_cost.activation + node_memory_cost.parameter
print(f"computation cost is {computation_cost}")
print(f"communication cost is {communication_cost}")
print(f"memory cost is {memory_cost}")
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.skipif(NO_CODEGEN, reason="no codegen module")
@pytest.mark.dist
@parameterize("model_cls", [GPT2MLP, GPT2Block, GPT2Attention, GPT2Model])
@rerun_if_address_is_in_use()
def test_mlp_layer(model_cls):
spawn(check_attention_layer, 4, model_cls=model_cls)
if __name__ == "__main__":
test_mlp_layer()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_gpt/__init__.py | tests/test_auto_parallel/test_tensor_shard/test_gpt/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_gpt/gpt_modules.py | tests/test_auto_parallel/test_tensor_shard/test_gpt/gpt_modules.py | from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from transformers.activations import ACT2FN
from transformers.models.gpt2.modeling_gpt2 import BaseModelOutputWithPastAndCrossAttentions, GPT2PreTrainedModel
from transformers.pytorch_utils import Conv1D
class GPT2MLP(nn.Module):
def __init__(self, intermediate_size, config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = Conv1D(intermediate_size, embed_dim)
self.c_proj = Conv1D(embed_dim, intermediate_size)
self.act = ACT2FN[config.activation_function]
# We temporarily banned the Dropout layer because the rng state need
# to process to get the correct result.
# self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
# TODO: the rng state need to be fixed for distributed runtime
# hidden_states = self.dropout(hidden_states)
return hidden_states
# The reason Why we don't import GPT2Attention from transformers directly is that:
# 1. The tracer will not work correctly when we feed meta_args and concrete_args at same time,
# so we have to build the customized GPT2Attention class and remove the conditional branch manually.
# 2. The order of split and view op has been changed in the customized GPT2Attention class, the new
# order is same as megatron-lm gpt model.
class GPT2Attention(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
max_positions = config.max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.split_size = self.embed_dim
self.scale_attn_weights = config.scale_attn_weights
# Layer-wise attention scaling, reordering, and upcasting
self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
self.layer_idx = layer_idx
self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
attn_weights = torch.matmul(query, key.transpose(-1, -2))
if self.scale_attn_weights:
attn_weights = attn_weights / (value.size(-1) ** 0.5)
# Layer-wise attention scaling
if self.scale_attn_by_inverse_layer_idx:
attn_weights = attn_weights / float(self.layer_idx + 1)
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].to(torch.bool)
attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype))
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
attn_weights = attn_weights.type(value.dtype)
# attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _split_heads(self, tensor, num_heads, attn_head_size):
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(new_shape)
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def _merge_heads(self, tensor, num_heads, attn_head_size):
tensor = tensor.permute(0, 2, 1, 3).contiguous()
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
return tensor.view(new_shape)
def forward(
self,
hidden_states: Optional[Tuple[torch.FloatTensor]],
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
# query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
qkv = self.c_attn(hidden_states)
# query = self._split_heads(query, self.num_heads, self.head_dim)
# key = self._split_heads(key, self.num_heads, self.head_dim)
# value = self._split_heads(value, self.num_heads, self.head_dim)
query, key, value = self._split_heads(qkv, self.num_heads, 3 * self.head_dim).split(self.head_dim, dim=3)
(key, value)
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
attn_output = self.c_proj(attn_output)
# attn_output = self.resid_dropout(attn_output)
return attn_output
class GPT2Block(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
hidden_size = config.hidden_size
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = GPT2Attention(config, layer_idx=layer_idx)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = GPT2MLP(inner_dim, config)
def forward(
self,
hidden_states: Optional[Tuple[torch.FloatTensor]],
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
residual = hidden_states
# %transformer_h_0_ln_1
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(
hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
)
# residual connection
hidden_states = attn_outputs + residual
residual = hidden_states
hidden_states = self.ln_2(hidden_states)
feed_forward_hidden_states = self.mlp(hidden_states)
# residual connection
hidden_states = residual + feed_forward_hidden_states
return hidden_states
class GPT2Model(GPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = ["attn.masked_bias"]
def __init__(self, config):
super().__init__(config)
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
device = input_ids.device
past_length = 0
past_key_values = tuple([None] * len(self.h))
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# GPT2Attention mask.
attention_mask = attention_mask.view(batch_size, -1)
attention_mask = attention_mask[:, None, None, :]
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
# add_2
hidden_states = inputs_embeds + position_embeds
# comment to run pipeline
# add_3
output_shape = input_shape + (hidden_states.size(-1),)
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
outputs = block(hidden_states, attention_mask=attention_mask, head_mask=head_mask[i])
hidden_states = outputs
hidden_states = self.ln_f(hidden_states)
# comment to run pipeline
hidden_states = hidden_states.view(output_shape)
return hidden_states
class GPT2LMHeadModel(GPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# Model parallel
self.model_parallel = False
self.device_map = None
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
):
transformer_outputs = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
)
lm_logits = self.lm_head(transformer_outputs)
return lm_logits
class GPTLMLoss(nn.Module):
def __init__(self):
super().__init__()
self.loss_fn = nn.CrossEntropyLoss()
def forward(self, logits, labels):
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
return self.loss_fn(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_conv_metainfo.py | tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_conv_metainfo.py | import pytest
import torch
import torch.nn as nn
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import rerun_if_address_is_in_use, spawn
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import mem_test_for_node_strategy
class ConvFunctionModule(nn.Module):
def __init__(self, in_channels=4, out_channels=64, kernel_size=3):
super().__init__()
self.conv_weight = nn.Parameter(torch.randn(out_channels, in_channels, kernel_size, kernel_size))
def forward(self, input):
return nn.functional.conv2d(input, self.conv_weight)
def _conv_module_mem_test(rank, world_size, port, bias):
"""This function is for conv memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
Args:
rank: device rank
bias: indicate whether conv module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = nn.Sequential(nn.Conv2d(4, 64, 3, padding=1, bias=bias)).cuda()
input = torch.rand(4, 4, 64, 64).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# index of target node in computation graph
node_index = 1
# total number of target node strategies
strategy_number = 16
mem_test_for_node_strategy(
rank=rank,
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=["input"],
)
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_conv_meta_concrete_info_match(bias=False):
spawn(_conv_module_mem_test, 4, bias=bias)
def _conv_function_mem_test(rank, world_size, port):
"""This function is for conv function memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
rank: device rank
bias: indicate whether conv module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = ConvFunctionModule().cuda()
input = torch.rand(4, 4, 64, 64).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# index of target node in computation graph
node_index = 2
# total number of target node strategies
strategy_number = 16
mem_test_for_node_strategy(
rank=rank,
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=["input"],
)
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_conv_function_concrete_info_match():
spawn(_conv_function_mem_test, 4)
if __name__ == "__main__":
# test_conv_meta_concrete_info_match()
test_conv_function_concrete_info_match()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_binary_elementwise_metainfo.py | tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_binary_elementwise_metainfo.py | import pytest
import torch
import torch.nn as nn
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import rerun_if_address_is_in_use, spawn
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import mem_test_for_node_strategy
class BinaryElementwiseOpModule(nn.Module):
def __init__(self, token=torch.add, shape=64) -> None:
super().__init__()
self.token = token
self.param = nn.Parameter(torch.rand(shape))
def forward(self, input):
return input + self.param
def _binary_elementwise_mem_test(rank, world_size, port):
"""This function is for binary elementwise ops memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
rank: device rank
bias: indicate whether conv module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = BinaryElementwiseOpModule(token=torch.add, shape=1024).cuda()
input = torch.rand(32, 1024).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# index of target node in computation graph
node_index = 2
# total number of target node strategies
strategy_number = 9
mem_test_for_node_strategy(
rank=rank,
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=["input"],
)
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_binary_elementwise_meta_concrete_info_match():
spawn(_binary_elementwise_mem_test, 4)
if __name__ == "__main__":
test_binary_elementwise_meta_concrete_info_match()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_linear_metainfo.py | tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_linear_metainfo.py | import pytest
import torch
import torch.nn as nn
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import rerun_if_address_is_in_use, spawn
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import mem_test_for_node_strategy
class MyModule(nn.Module):
def __init__(self, in_features=64, out_features=128):
super().__init__()
self.fc_weight = nn.Parameter(torch.randn(out_features, in_features))
def forward(self, input):
return nn.functional.linear(input, self.fc_weight)
def _linear_module_mem_test(rank, world_size, port):
"""This function is for linear memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
rank: device rank
bias: indicate whether linear module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = nn.Sequential(nn.Linear(64, 128, bias=False)).cuda()
input = torch.rand(8, 8, 16, 64).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# memory test
mem_test_for_node_strategy(
rank=rank,
model=model,
device_mesh=device_mesh,
node_index=1,
strategy_number=13,
input_args=[input],
meta_arg_names=["input"],
)
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_linear_module_meta_concrete_info_match():
spawn(_linear_module_mem_test, 4)
def _linear_function_mem_test(rank, world_size, port):
"""This function is for linear memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
rank: device rank
bias: indicate whether linear module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = MyModule().cuda()
input = torch.rand(8, 8, 16, 64).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# memory test
mem_test_for_node_strategy(
rank=rank,
model=model,
device_mesh=device_mesh,
node_index=2,
strategy_number=24,
input_args=[input],
meta_arg_names=["input"],
)
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_linear_function_meta_concrete_info_match():
spawn(_linear_function_mem_test, 4)
if __name__ == "__main__":
# test_linear_module_meta_concrete_info_match()
test_linear_function_meta_concrete_info_match()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_tensor_metainfo.py | tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_tensor_metainfo.py | import pytest
import torch
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType
from colossalai.testing.utils import clear_cache_before_run
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import print_results
if torch.__version__ >= "1.12.0":
from colossalai.auto_parallel.meta_profiler import meta_register
class SplitModule(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
return x.split(512, dim=0)
@pytest.mark.skipif(torch.__version__ < "1.12.0", reason="need pytorch 1.12.0 or higher for aten level operations")
@clear_cache_before_run()
def test_tensor_meta_info():
"""test tensor related meta information
We will just use torch.Tensor.split for the test
"""
meta_func = meta_register.get(torch.Tensor.split)
# construct meta tensors
input_tensor = torch.rand(1024, 1024, device="meta")
output_tensor = input_tensor.split(512, dim=0)
# construct operation data
input_data = OperationData(
name="input",
data=input_tensor,
type=OperationDataType.ARG,
logical_shape=input_tensor.shape,
)
output_data = OperationData(
name="output",
data=output_tensor,
type=OperationDataType.OUTPUT,
logical_shape=input_tensor.shape,
)
split_info_data = OperationData(
name="split_info",
type=OperationDataType.ARG,
data=0,
logical_shape=None,
)
# construct args
args = [input_data, output_data, split_info_data]
kwargs = {"inplace": False}
# estimated results
compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out = meta_func(*args, **kwargs)
# actual results
model = SplitModule()
input_real_tensor = torch.rand(1024, 1024).cuda()
input_real_tensor.requires_grad = True
# fwd
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
output_real_tensor = model(input_real_tensor)
fwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
fwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
# bwd
upstream_grad = [torch.rand_like(tensor) for tensor in output_real_tensor]
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
torch.autograd.backward(output_real_tensor, upstream_grad)
bwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
bwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
print_results(
[input_real_tensor],
output_real_tensor,
compute_cost,
memory_cost,
fwd_allocated,
fwd_peak,
bwd_allocated,
bwd_peak,
)
if __name__ == "__main__":
test_tensor_meta_info()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_embedding_metainfo.py | tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_embedding_metainfo.py | import pytest
import torch
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType
from colossalai.testing.utils import clear_cache_before_run
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import print_results
if torch.__version__ >= "1.12.0":
from colossalai.auto_parallel.meta_profiler import meta_register
@pytest.mark.skipif(torch.__version__ < "1.12.0", reason="need pytorch 1.12.0 or higher for aten level operations")
@clear_cache_before_run()
def test_embedding_meta_info():
meta_func = meta_register.get(torch.nn.Embedding)
# construct meta tensors
input_tensor = torch.randint(0, 50256, (8, 1024), device="meta")
weight_tensor = torch.rand(50257, 1024, device="meta")
output_tensor = torch.rand(8, 1024, 1024, device="meta")
# construct operation data
input_data = OperationData(name="input", type=OperationDataType.ARG, data=input_tensor)
weight_data = OperationData(name="weight", type=OperationDataType.PARAM, data=weight_tensor)
output_data = OperationData(name="output", type=OperationDataType.OUTPUT, data=output_tensor)
# construct args and kwargs
args = [input_data, weight_data, output_data]
kwargs = {"inplace": False}
# estimated results
compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out = meta_func(*args, **kwargs)
# actual results
input_real_tensor = torch.randint(0, 50256, (8, 1024), device="cuda")
embedding_module = torch.nn.Embedding(50257, 1024).cuda()
# fwd
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
output_real_tensor = embedding_module(input_real_tensor)
fwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
fwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
# bwd
upstream_grad = torch.rand_like(output_real_tensor)
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
torch.autograd.backward(output_real_tensor, upstream_grad)
bwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
bwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
print_results(
[input_real_tensor],
[output_real_tensor],
compute_cost,
memory_cost,
fwd_allocated,
fwd_peak,
bwd_allocated,
bwd_peak,
)
if __name__ == "__main__":
test_embedding_meta_info()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_metainfo/utils.py | tests/test_auto_parallel/test_tensor_shard/test_metainfo/utils.py | import copy
from pprint import pprint
from typing import Dict, List
import torch
from torch.fx import GraphModule
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes import shape_prop_pass
# from colossalai.fx.tracer.tracer import ColoTracer
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.passes.runtime_apply_pass import runtime_apply_pass
from colossalai.auto_parallel.passes.runtime_preparation_pass import runtime_preparation_pass
from colossalai.auto_parallel.tensor_shard.options import SolverOptions
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationDataType, TrainCycleItem
from colossalai.auto_parallel.tensor_shard.solver import StrategiesConstructor
from colossalai.device.device_mesh import DeviceMesh
if torch.__version__ >= "1.12.0":
from colossalai.auto_parallel.meta_profiler import ShardMetaInfo
def mem_test_for_node_strategy(
rank: int,
model: torch.nn.Module,
device_mesh: DeviceMesh,
node_index: int,
strategy_number: int,
input_args: List[torch.Tensor],
meta_arg_names: List[str],
input_kwargs: Dict[str, torch.Tensor] = {},
):
for strategy_index in range(strategy_number):
# We need to copy the model to avoid do backward more than once in same graph
model_to_shard, args_to_shard, kwargs_to_shard = (
copy.deepcopy(model),
copy.deepcopy(input_args),
copy.deepcopy(input_kwargs),
)
tracer = ColoTracer(bias_addition_split=True)
input_sample = {}
for input_arg, meta_arg_name in zip(input_args, meta_arg_names):
input_sample[meta_arg_name] = torch.rand(input_arg.shape).to("meta")
for meta_kwarg_name, input_kwarg in input_kwargs.items():
input_sample[meta_kwarg_name] = torch.rand(input_kwarg.shape).to("meta")
graph = tracer.trace(root=model_to_shard, meta_args=input_sample)
gm = ColoGraphModule(model_to_shard, graph, model_to_shard.__class__.__name__)
shape_prop_pass(gm, *input_sample.values())
gm.recompile()
solver_options = SolverOptions()
strategies_constructor = StrategiesConstructor(graph, device_mesh, solver_options)
strategies_constructor.build_strategies_and_cost()
target_node = list(graph.nodes)[node_index]
# solution construction
# construct the strategy for the target node
solution_len = len(strategies_constructor.leaf_strategies)
solution = [0] * solution_len
solution[node_index] = strategy_index
# construct the strategy for the output node
placeholder_strategy = list(graph.nodes)[-1].strategies_vector[0]
output_key = next(
key
for key in target_node.strategies_vector[strategy_index].sharding_specs.keys()
if key.type == OperationDataType.OUTPUT
)
placeholder_strategy.sharding_specs[output_key] = target_node.strategies_vector[strategy_index].sharding_specs[
output_key
]
gm, sharding_spec_dict, origin_spec_dict, comm_actions_dict = runtime_preparation_pass(
gm, solution, device_mesh, strategies_constructor
)
gm = runtime_apply_pass(gm)
gm.recompile()
gm: GraphModule
num_of_strategies = len(target_node.strategies_vector)
if rank == 0:
print("=======================")
print(f"#strategy_index: {strategy_index + 1}/{num_of_strategies}")
pprint(target_node.strategies_vector[strategy_index])
# warmup
with torch.no_grad():
output = gm(
*args_to_shard,
sharding_spec_convert_dict=sharding_spec_dict,
origin_node_sharding_spec_dict=origin_spec_dict,
comm_actions_dict=comm_actions_dict,
**kwargs_to_shard,
)
del output
# forward memory compare
if rank == 0:
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
output = gm(
*args_to_shard,
sharding_spec_convert_dict=sharding_spec_dict,
origin_node_sharding_spec_dict=origin_spec_dict,
comm_actions_dict=comm_actions_dict,
**kwargs_to_shard,
)
if rank == 0:
# print forward memory allocated and peak memory stats in kb
print(
f"forward memory allocated: {(torch.cuda.memory_allocated() - mem_stamp0) / 1024} kb, peak memory stats: {(torch.cuda.max_memory_allocated() - mem_stamp0) / 1024} kb"
)
# backward memory compare
grad_tensors = torch.ones_like(output)
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
torch.autograd.backward(output, grad_tensors)
if rank == 0:
# print backward memory allocated and peak memory stats in kb
print(
f"backward memory allocated: {(torch.cuda.memory_allocated() - mem_stamp0) / 1024} kb, peak memory stats: {(torch.cuda.max_memory_allocated() - mem_stamp0) / 1024} kb"
)
# estimated memory
if target_node.op == "call_module":
metainfo = ShardMetaInfo(
target_node.strategies_vector[strategy_index],
target_node.graph.owning_module.get_submodule(target_node.target),
)
else:
metainfo = ShardMetaInfo(target_node.strategies_vector[strategy_index], target_node.target)
print("estimated memory:")
print(
f"forward activation: {metainfo.memory_cost.fwd.activation / 1024} kb, forward param: {metainfo.memory_cost.fwd.parameter / 1024} kb"
)
print(
f"forward temp: {metainfo.memory_cost.fwd.temp / 1024} kb, forward buffer: {metainfo.memory_cost.fwd.buffer / 1024} kb"
)
print(
f"backward activation: {metainfo.memory_cost.bwd.activation / 1024} kb, backward param: {metainfo.memory_cost.bwd.parameter / 1024} kb"
)
print(
f"backward temp: {metainfo.memory_cost.bwd.temp / 1024} kb, backward buffer: {metainfo.memory_cost.bwd.buffer / 1024} kb"
)
print("=======================")
def print_results(
input: List[torch.Tensor],
output: List[torch.Tensor],
compute_cost: TrainCycleItem,
memory_cost: TrainCycleItem,
fwd_allocated,
fwd_peak,
bwd_allocated,
bwd_peak,
):
"""Print the results of the meta information test.
Args:
input (List[torch.Tensor]): input tensors
output (List[torch.Tensor]): output tensors
compute_cost (TrainCycleItem): compute cost estimated by meta_func
memory_cost (TrainCycleItem): memory cost estimated by meta_func
fwd_allocated: real forward memory allocated
fwd_peak: real forward peak memory stats
bwd_allocated: real backward memory allocated
bwd_peak: real backward peak memory stats
"""
print("=====================")
print(f"input shapes: {[tensor.shape for tensor in input]}")
print(f"output shapes: {[tensor.shape for tensor in output]}")
# estimated results
print("Estimated Results")
# compute cost
print("compute_cost:")
print(f" fwd: {compute_cost.fwd}")
print(f" bwd: {compute_cost.bwd}")
# memory cost
print("memory_cost:")
# fwd
print(f" fwd activation: {memory_cost.fwd.activation / 1024} KB")
print(f" fwd buffer: {memory_cost.fwd.buffer / 1024} KB")
print(f" fwd temp: {memory_cost.fwd.temp / 1024} KB")
print(f" fwd parameter: {memory_cost.fwd.parameter / 1024} KB")
# bwd
print(f" bwd activation: {memory_cost.bwd.activation / 1024} KB")
print(f" bwd buffer: {memory_cost.bwd.buffer / 1024} KB")
print(f" bwd temp: {memory_cost.bwd.temp / 1024} KB")
print(f" bwd parameter: {memory_cost.bwd.parameter / 1024} KB")
# actual results
print("Actual Results")
print("memory_cost:")
# fwd
print(f" fwd allocated: {fwd_allocated / 1024} KB")
print(f" fwd peak: {fwd_peak / 1024} KB")
# bwd
print(f" bwd allocated: {bwd_allocated / 1024} KB")
print(f" bwd peak: {bwd_peak / 1024} KB")
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_activation_metainfo.py | tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_activation_metainfo.py | import pytest
import torch
from colossalai.auto_parallel.meta_profiler import meta_register
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType
from colossalai.testing.utils import clear_cache_before_run, parameterize
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import print_results
@pytest.mark.skipif(torch.__version__ < "1.12.0", reason="need pytorch 1.12.0 or higher for aten level operations")
@clear_cache_before_run()
@parameterize(
"func",
[
torch.nn.functional.softmax,
torch.nn.functional.relu,
torch.tanh,
torch.nn.functional.dropout,
],
)
def test_activation_meta_info(func):
meta_func = meta_register.get(func)
# construct meta tensors
input_tensor = torch.rand(256, 1024, device="meta")
output_tensor = torch.rand(256, 1024, device="meta")
softmax_dim = 0
# construct operation data
input_data = OperationData(name="input", type=OperationDataType.ARG, data=input_tensor)
output_data = OperationData(name="output", type=OperationDataType.OUTPUT, data=output_tensor)
softmax_dim_data = OperationData(name="softmax_dim", type=OperationDataType.ARG, data=softmax_dim)
# construct args and kwargs
args = [input_data, softmax_dim_data, output_data]
kwargs = {"inplace": False}
# estimated results
compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out = meta_func(*args, **kwargs)
# actual results
input_real_tensor = torch.rand(256, 1024, device="cuda")
input_real_tensor.requires_grad = True
# fwd
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
output_real_tensor = func(input_real_tensor)
fwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
fwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
# bwd
upstream_grad = torch.rand_like(output_real_tensor)
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
torch.autograd.backward(output_real_tensor, upstream_grad)
bwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
bwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
print_results(
[input_real_tensor],
[output_real_tensor],
compute_cost,
memory_cost,
fwd_allocated,
fwd_peak,
bwd_allocated,
bwd_peak,
)
if __name__ == "__main__":
test_activation_meta_info()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_where_metainfo.py | tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_where_metainfo.py | import pytest
import torch
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, TrainCycleItem
from colossalai.testing.utils import clear_cache_before_run
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import print_results
if torch.__version__ >= "1.12.0":
from colossalai.auto_parallel.meta_profiler import meta_register
@pytest.mark.skipif(torch.__version__ < "1.12.0", reason="need pytorch 1.12.0 or higher for aten level operations")
@clear_cache_before_run()
def test_where_meta_info():
meta_func = meta_register.get(torch.where)
# construct meta tensors
condition_tensor = torch.rand(1, 1, 1024, 1024) > 0.5
condition_tensor = condition_tensor.to(device="meta")
x_tensor = torch.rand(8, 16, 1024, 1024, device="meta")
y_tensor = torch.tensor(0, device="meta")
output_tensor = torch.rand(8, 16, 1024, 1024)
# construct operation data
condition_data = OperationData(
name="condition",
data=condition_tensor,
type=OperationDataType.ARG,
logical_shape=condition_tensor.shape,
)
x_data = OperationData(
name="x",
data=x_tensor,
type=OperationDataType.ARG,
logical_shape=x_tensor.shape,
)
y_data = OperationData(
name="y",
data=y_tensor,
type=OperationDataType.ARG,
logical_shape=y_tensor.shape,
)
output_data = OperationData(
name="output",
data=output_tensor,
type=OperationDataType.OUTPUT,
logical_shape=output_tensor.shape,
)
# construct args and kwargs
args = [condition_data, x_data, y_data, output_data]
kwargs = {"inplace": False}
# estimated results
compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out = meta_func(*args, **kwargs)
# actual results
condition_real_tensor = torch.rand(1, 1, 1024, 1024) > 0.5
condition_real_tensor = condition_real_tensor.to(device="cuda")
x_real_tensor = torch.rand(8, 16, 1024, 1024, device="cuda")
y_real_tensor = torch.tensor(0.0, device="cuda")
x_real_tensor.requires_grad = True
y_real_tensor.requires_grad = True
# fwd
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
output_real_tensor = torch.where(condition_real_tensor, x_real_tensor, y_real_tensor)
fwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
fwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
# bwd
upstream_grad = torch.rand_like(output_real_tensor)
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
torch.autograd.backward(output_real_tensor, upstream_grad)
bwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
bwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
compute_cost: TrainCycleItem
memory_cost: TrainCycleItem
print_results(
[condition_real_tensor, x_real_tensor, y_real_tensor],
[output_real_tensor],
compute_cost,
memory_cost,
fwd_allocated,
fwd_peak,
bwd_allocated,
bwd_peak,
)
if __name__ == "__main__":
test_where_meta_info()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_pooling_metainfo.py | tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_pooling_metainfo.py | import pytest
import torch
import torch.nn as nn
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import rerun_if_address_is_in_use, spawn
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import mem_test_for_node_strategy
def _adaptiveavgpool_module_mem_test(rank, world_size, port):
"""This function is for AdaptiveAvgPool memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
rank: device rank
bias: indicate whether conv module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = nn.Sequential(nn.AdaptiveAvgPool2d((16, 16))).cuda()
input = torch.rand(4, 128, 64, 64).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# index of target node in computation graph
node_index = 1
# total number of target strategies
strategy_number = 1
mem_test_for_node_strategy(
rank=rank,
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=["input"],
)
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_adaptiveavgpool_meta_concrete_info_match():
spawn(_adaptiveavgpool_module_mem_test, 4)
def _maxpool_module_mem_test(rank, world_size, port):
"""This function is for MaxPool memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
rank: device rank
bias: indicate whether conv module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = nn.Sequential(nn.MaxPool2d((16, 16))).cuda()
input = torch.rand(4, 128, 64, 64).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# index of target node in computation graph
node_index = 1
# total number of target node strategies
strategy_number = 9
mem_test_for_node_strategy(
rank=rank,
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=["input"],
)
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_maxpool_meta_concrete_info_match():
spawn(_maxpool_module_mem_test, 4)
if __name__ == "__main__":
test_adaptiveavgpool_meta_concrete_info_match()
test_maxpool_meta_concrete_info_match()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_norm_metainfo.py | tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_norm_metainfo.py | import pytest
import torch
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, TrainCycleItem
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import parameterize, rerun_if_address_is_in_use, spawn
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import mem_test_for_node_strategy, print_results
if torch.__version__ >= "1.12.0":
from colossalai.auto_parallel.meta_profiler import meta_register
def _batchnorm_module_mem_test(rank, world_size, port):
"""This function is for batchnorm memory test
Test and print real memory cost and estimated, this test will not be executed except with the tag AUTO_PARALLEL
Args:
rank: device rank
bias: indicate whether conv module need bias
world_size: number of devices
port: port for initializing process group
"""
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = nn.Sequential(nn.BatchNorm2d(128)).cuda()
input = torch.rand(4, 128, 64, 64).cuda()
input.requires_grad = True
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# index of target node in computation graph
node_index = 1
# total number of target node strategies
strategy_number = 9
mem_test_for_node_strategy(
rank=rank,
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=["input"],
)
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_batchnorm_meta_concrete_info_match():
spawn(_batchnorm_module_mem_test, 4)
@pytest.mark.skipif(torch.__version__ < "1.12.0", reason="need pytorch 1.12.0 or higher for aten level operations")
@parameterize(
"tensor_shape",
[
[256, 1024],
[1024, 256],
],
)
def test_layernorm_meta_info(tensor_shape):
meta_func = meta_register.get(torch.nn.LayerNorm)
# construct input
input_tensor = torch.rand(*tensor_shape, device="meta")
output_tensor = torch.rand(*tensor_shape, device="meta")
weight_tensor = torch.rand(tensor_shape[1], device="meta")
bias_tensor = torch.rand(tensor_shape[1], device="meta")
# construct operation data
input_data = OperationData(name="input", type=OperationDataType.ARG, data=input_tensor)
output_data = OperationData(name="output", type=OperationDataType.OUTPUT, data=output_tensor)
weight_data = OperationData(name="weight", type=OperationDataType.PARAM, data=weight_tensor)
bias_data = OperationData(name="bias", type=OperationDataType.PARAM, data=bias_tensor)
# construct args and kwargs
args = [input_data, output_data, weight_data, bias_data]
kwargs = {"inplace": False}
# estimated results
compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out = meta_func(*args, **kwargs)
# actual results
input_real_tensor = torch.rand(*tensor_shape, device="cuda:0")
input_real_tensor.requires_grad = True
ln_module = torch.nn.LayerNorm(tensor_shape[1]).cuda()
# fwd
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
output_real_tensor = ln_module(input_real_tensor)
fwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
fwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
# bwd
upstream_grad = torch.rand_like(output_real_tensor)
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
torch.autograd.backward(output_real_tensor, upstream_grad)
bwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
bwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
compute_cost: TrainCycleItem
memory_cost: TrainCycleItem
print_results(
[input_real_tensor],
[output_real_tensor],
compute_cost,
memory_cost,
fwd_allocated,
fwd_peak,
bwd_allocated,
bwd_peak,
)
if __name__ == "__main__":
test_batchnorm_meta_concrete_info_match()
test_layernorm_meta_info()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_matmul_metainfo.py | tests/test_auto_parallel/test_tensor_shard/test_metainfo/test_matmul_metainfo.py | import pytest
import torch
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, TrainCycleItem
from colossalai.testing.utils import clear_cache_before_run, parameterize
from tests.test_auto_parallel.test_tensor_shard.test_metainfo.utils import print_results
if torch.__version__ >= "1.12.0":
from colossalai.auto_parallel.meta_profiler import meta_register
@pytest.mark.skipif(torch.__version__ < "1.12.0", reason="need pytorch 1.12.0 or higher for aten level operations")
@clear_cache_before_run()
@parameterize(
"tensor_shapes",
[
[[128], [128]], # dot product
[[64, 128], [128]], # mat-vec
[[128], [128, 64]], # vec-mat
[[64, 64, 128], [128]], # batched mat-vec
[[128], [64, 128, 64]], # vec-batched mat
[[64, 128], [128, 192]], # mat-mat
[[64, 64, 128], [128, 192]], # batched mat-mat
[[64, 128], [64, 128, 192]], # mat-batched mat
[[64, 64, 128], [64, 128, 192]], # batched mat-batched mat (matched batch dims)
[[64, 1, 64, 128], [64, 128, 192]], # batched mat-batched mat (unmatched batch dims)
],
)
def test_matmul_function_meta_info(tensor_shapes):
meta_func = meta_register.get(torch.matmul)
# construct meta tensors
input_tensor = torch.rand(*tensor_shapes[0], device="meta")
other_tensor = torch.rand(*tensor_shapes[1], device="meta")
output_tensor = torch.matmul(input_tensor, other_tensor)
# construct operation data
input_data = OperationData(
name="input",
data=input_tensor,
type=OperationDataType.ARG,
logical_shape=input_tensor.shape,
)
other_data = OperationData(
name="other",
data=other_tensor,
type=OperationDataType.ARG,
logical_shape=other_tensor.shape,
)
output_data = OperationData(
name="output",
data=output_tensor,
type=OperationDataType.OUTPUT,
logical_shape=output_tensor.shape,
)
# construct args and kwargs
args = [input_data, other_data, output_data]
kwargs = {"inplace": False}
# estimated results
compute_cost, memory_cost, fwd_in, fwd_buffer, fwd_out = meta_func(*args, **kwargs)
# actual results
input_real_tensor = torch.rand(*tensor_shapes[0], device="cuda:0")
other_real_tensor = torch.rand(*tensor_shapes[1], device="cuda:0")
input_real_tensor.requires_grad = True
other_real_tensor.requires_grad = True
# fwd
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
output_real_tensor = torch.matmul(input_real_tensor, other_real_tensor)
fwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
fwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
# bwd
upstream_grad = torch.rand_like(output_real_tensor)
torch.cuda.reset_peak_memory_stats()
mem_stamp0 = torch.cuda.memory_allocated()
torch.autograd.backward(output_real_tensor, upstream_grad)
bwd_allocated = torch.cuda.memory_allocated() - mem_stamp0
bwd_peak = torch.cuda.max_memory_allocated() - mem_stamp0
compute_cost: TrainCycleItem
memory_cost: TrainCycleItem
print_results(
[input_real_tensor, other_real_tensor],
[output_real_tensor],
compute_cost,
memory_cost,
fwd_allocated,
fwd_peak,
bwd_allocated,
bwd_peak,
)
if __name__ == "__main__":
test_matmul_function_meta_info()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_layer_norm_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_layer_norm_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler.layer_norm_handler import LayerNormModuleHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, spawn
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
def check_ln_module_handler(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = nn.Sequential(nn.LayerNorm(16)).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
input = torch.rand(4, 16).cuda()
# the index of bn node in computation graph
node_index = 1
# the total number of ln strategies
strategy_number = 4
# construct input args
input_args = [input]
# construct meta arg names
meta_arg_names = ["input"]
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
)
tracer = ColoTracer(bias_addition_split=True)
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %_0 : [#users=1] = call_module[target=0](args = (%input_1,), kwargs = {})
# return _0
meta_args = {"input": torch.rand(4, 16).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
ln_mod_node = list(graph.nodes)[1]
strategies_vector = StrategiesVector(ln_mod_node)
# build handler
handler = LayerNormModuleHandler(node=ln_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["input"].name == "input_1"
assert mapping["input"].data.shape == torch.Size([4, 16])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([4, 16])
assert mapping["other"].name == "weight"
assert mapping["other"].data.shape == torch.Size([16])
assert mapping["other"].type == OperationDataType.PARAM
assert mapping["other"].logical_shape == torch.Size([16])
assert mapping["bias"].name == "bias"
assert mapping["bias"].data.shape == torch.Size([16])
assert mapping["bias"].type == OperationDataType.PARAM
assert mapping["bias"].logical_shape == torch.Size([16])
assert mapping["output"].name == "_0"
assert mapping["output"].data.shape == torch.Size([4, 16])
assert mapping["output"].type == OperationDataType.OUTPUT
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# SR = SR x R
assert "[S0, R] = [S0, R] x [R]" in strategy_name_list
assert "[S1, R] = [S1, R] x [R]" in strategy_name_list
# RR = RR x R
assert "RR = RR x R" in strategy_name_list
# S01R = S01R x R
assert "[S01, R] = [S01, R] x [R]" in strategy_name_list
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_ln_module_handler():
spawn(check_ln_module_handler, 4)
if __name__ == "__main__":
test_ln_module_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_shard_option.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_shard_option.py | import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.options import ShardOption
from colossalai.auto_parallel.tensor_shard.sharding_strategy import StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.testing import clear_cache_before_run, run_on_environment_flag
class LinearModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, others, bias=None):
x = nn.functional.linear(input, others, bias=bias)
return x
def check_shard_option(shard_option):
model = LinearModel().cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
tracer = ColoTracer(bias_addition_split=True)
meta_args = {"input": torch.rand(4, 4, 4, 16).to("meta"), "others": torch.rand(32, 16).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
linear_func_node = list(graph.nodes)[2]
strategies_vector = StrategiesVector(linear_func_node)
# build handler
handler = LinearFunctionHandler(
node=linear_func_node, device_mesh=device_mesh, strategies_vector=strategies_vector, shard_option=shard_option
)
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
if shard_option == ShardOption.SHARD_LAST_AXIS:
# RR = RS x SR
assert "RR = RS1 x S1R" in strategy_name_list
# RS= RR x RS
assert "RS1 = RR x RS1" in strategy_name_list
return
# SS = SR x RS
assert "S1S0 = S1R x RS0_0" in strategy_name_list
assert "S0S1 = S0R x RS1_1" in strategy_name_list
assert "S0S1 = S0R x RS1_2" in strategy_name_list
assert "S0S1 = S0R x RS1_0" in strategy_name_list
assert "S1S0 = S1R x RS0_1" in strategy_name_list
assert "S1S0 = S1R x RS0_2" in strategy_name_list
# SR = SS x SR
assert "S0R = S0S1 x S1R_1" in strategy_name_list
assert "S0R = S0S1 x S1R_2" in strategy_name_list
assert "S1R = S1S0 x S0R_0" in strategy_name_list
assert "S0R = S0S1 x S1R_0" in strategy_name_list
assert "S1R = S1S0 x S0R_1" in strategy_name_list
assert "S1R = S1S0 x S0R_2" in strategy_name_list
# RS = RS x SS
assert "RS0 = RS1 x S1S0" in strategy_name_list
assert "RS1 = RS0 x S0S1" in strategy_name_list
# S01R = S01R x RR
assert "S01R = S01R x RR_0" in strategy_name_list
assert "S01R = S01R x RR_1" in strategy_name_list
assert "S01R = S01R x RR_2" in strategy_name_list
# RR = RS01 x S01R
assert "RR = RS01 x S01R" in strategy_name_list
# RS01 = RR x RS01
assert "RS01 = RR x RS01" in strategy_name_list
if shard_option == ShardOption.SHARD:
# RR = RS x SR
assert "RR = RS0 x S0R" in strategy_name_list
assert "RR = RS1 x S1R" in strategy_name_list
# RS= RR x RS
assert "RS0 = RR x RS0" in strategy_name_list
assert "RS1 = RR x RS1" in strategy_name_list
if shard_option == ShardOption.STANDARD:
# RR = RS x SR
assert "RR = RS0 x S0R" in strategy_name_list
assert "RR = RS1 x S1R" in strategy_name_list
# RS= RR x RS
assert "RS0 = RR x RS0" in strategy_name_list
assert "RS1 = RR x RS1" in strategy_name_list
# RR = RR x RR
assert "RR = RR x RR" in strategy_name_list
@run_on_environment_flag(name="AUTO_PARALLEL")
@clear_cache_before_run()
def test_shard_option():
# for shard_option in [ShardOption.STANDARD, ShardOption.SHARD, ShardOption.FULL_SHARD, ShardOption.SHARD_LAST_AXIS]:
for shard_option in [ShardOption.SHARD_LAST_AXIS]:
check_shard_option(shard_option)
if __name__ == "__main__":
test_shard_option()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_unary_element_wise_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_unary_element_wise_handler.py | import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler
from colossalai.auto_parallel.tensor_shard.node_handler.unary_elementwise_handler import UnaryElementwiseHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.testing import clear_cache_before_run, run_on_environment_flag
class ReLuModel(nn.Module):
def __init__(self):
super().__init__()
self.act = torch.nn.ReLU()
def forward(self, input, other):
conv_node = nn.functional.conv2d(input, other)
relu_node = self.act(conv_node)
return relu_node
@run_on_environment_flag(name="AUTO_PARALLEL")
@clear_cache_before_run()
def test_elementwise_handler():
model = ReLuModel()
tracer = ColoTracer(bias_addition_split=True)
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %conv2d : [#users=1] = call_function[target=torch.conv2d](args = (%input_1, %other), kwargs = {})
# %act : [#users=1] = call_module[target=act](args = (%conv2d,), kwargs = {})
# return act
meta_args = {
"input": torch.rand(4, 4, 64, 64).to("meta"),
"other": torch.rand(16, 4, 3, 3).to("meta"),
}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
conv_mod_node = list(graph.nodes)[2]
relu_mod_node = list(graph.nodes)[3]
relu_strategies_vector = StrategiesVector(relu_mod_node)
conv_strategies_vector = StrategiesVector(conv_mod_node)
# build handler
conv_handler = ConvFunctionHandler(
node=conv_mod_node, device_mesh=device_mesh, strategies_vector=conv_strategies_vector
)
conv_handler.register_strategy(compute_resharding_cost=False)
setattr(conv_mod_node, "strategies_vector", conv_strategies_vector)
relu_handler = UnaryElementwiseHandler(
node=relu_mod_node, device_mesh=device_mesh, strategies_vector=relu_strategies_vector
)
relu_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = relu_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
assert mapping["input"].name == "conv2d"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size([4, 16, 62, 62])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([4, 16, 62, 62])
assert mapping["output"].name == "act"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size([4, 16, 62, 62])
assert mapping["output"].type == OperationDataType.OUTPUT
# getitem is a following strategy handler, so the number of strategies is equal to the predecessor node.
assert len(relu_strategies_vector) == len(conv_strategies_vector)
if __name__ == "__main__":
test_elementwise_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_bias_linear_module_node.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_bias_linear_module_node.py | import pytest
import torch
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import (
OperationData,
OperationDataType,
ShardingStrategy,
StrategiesVector,
)
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, run_on_environment_flag, spawn
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class LinearModule(torch.nn.Module):
def __init__(self, in_features, out_features, bias):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features, bias=bias)
def forward(self, x):
x = self.linear(x)
return x
def check_linear_module_handler(rank, world_size, port, bias):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = LinearModule(16, 32, bias=bias).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
input = torch.rand(4, 4, 4, 16).cuda()
# the index of linear node in computation graph
node_index = 3
# strategy number of linear node
strategy_number = 24
# construct input args
input_args = [input]
# construct meta arg names
meta_arg_names = ["x"]
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
node_type="bias_module",
)
tracer = ColoTracer(bias_addition_split=True)
meta_args = {"x": torch.rand(4, 4, 4, 16).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
linear_mod_node = list(graph.nodes)[3]
strategies_vector = StrategiesVector(linear_mod_node)
# build handler
handler = LinearFunctionHandler(node=linear_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["input"].name == "x"
assert mapping["input"].data.shape == torch.Size([4, 4, 4, 16])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([64, 16])
assert mapping["other"].name == "linear_weight"
assert mapping["other"].data.shape == torch.Size([32, 16])
assert mapping["other"].type == OperationDataType.PARAM
assert mapping["other"].logical_shape == torch.Size([16, 32])
assert "bias" not in mapping
assert mapping["output"].name == "linear"
assert mapping["output"].data.shape == torch.Size([4, 4, 4, 32])
assert mapping["output"].type == OperationDataType.OUTPUT
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# SS = SR x RS
assert "S0S1 = S0R x RS1_0" in strategy_name_list
assert "S0S1 = S0R x RS1_1" in strategy_name_list
assert "S0S1 = S0R x RS1_2" in strategy_name_list
assert "S1S0 = S1R x RS0_0" in strategy_name_list
assert "S1S0 = S1R x RS0_1" in strategy_name_list
assert "S1S0 = S1R x RS0_2" in strategy_name_list
# SR = SS x SR
assert "S0R = S0S1 x S1R_0" in strategy_name_list
assert "S0R = S0S1 x S1R_1" in strategy_name_list
assert "S0R = S0S1 x S1R_2" in strategy_name_list
assert "S1R = S1S0 x S0R_0" in strategy_name_list
assert "S1R = S1S0 x S0R_1" in strategy_name_list
assert "S1R = S1S0 x S0R_2" in strategy_name_list
# RS = RS x SS
assert "RS0 = RS1 x S1S0" in strategy_name_list
assert "RS1 = RS0 x S0S1" in strategy_name_list
# RR = RS x SR
assert "RR = RS0 x S0R" in strategy_name_list
assert "RR = RS1 x S1R" in strategy_name_list
# RS= RR x RS
assert "RS0 = RR x RS0" in strategy_name_list
assert "RS1 = RR x RS1" in strategy_name_list
# S01R = S01R x RR
assert "S01R = S01R x RR_0" in strategy_name_list
assert "S01R = S01R x RR_1" in strategy_name_list
assert "S01R = S01R x RR_2" in strategy_name_list
# RR = RS01 x S01R
assert "RR = RS01 x S01R" in strategy_name_list
# RS01 = RR x RS01
assert "RS01 = RR x RS01" in strategy_name_list
# RR = RR x RR
assert "RR = RR x RR" in strategy_name_list
for strategy in strategies_vector:
strategy: ShardingStrategy
input_sharding_spec = strategy.get_sharding_spec_by_name("x")
weight_sharding_spec = strategy.get_sharding_spec_by_name("linear_weight")
output_sharding_spec = strategy.get_sharding_spec_by_name("linear")
# make sure the sharding matches across different operation data
assert input_sharding_spec.sharding_sequence[:-1] == output_sharding_spec.sharding_sequence[:-1]
assert weight_sharding_spec.sharding_sequence[1] == input_sharding_spec.sharding_sequence[-1]
assert weight_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[-1]
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_linear_handler(bias=True):
spawn(check_linear_module_handler, bias=bias)
if __name__ == "__main__":
test_linear_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_binary_elementwise_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_binary_elementwise_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler import BinaryElementwiseHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import parameterize, rerun_if_address_is_in_use, run_on_environment_flag, spawn
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
def check_binary_elementwise_handler_with_tensor(rank, world_size, port, op, other_dim):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
class BinaryElementwiseOpModel(nn.Module):
def __init__(self, op):
super().__init__()
self.op = op
def forward(self, x1, x2):
out = self.op(x1, x2)
return out
model = BinaryElementwiseOpModel(op).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
x1 = torch.rand(4, 4).cuda()
x2 = torch.rand([4] * other_dim).cuda()
# the index of binary-elementwise node in computation graph
node_index = 2
# strategy number of binary-elementwise node
strategy_number = 9
# construct input args
input_args = [x1, x2]
# construct meta arg names
meta_arg_names = ["x1", "x2"]
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
)
tracer = ColoTracer(bias_addition_split=True)
meta_args = {"x1": torch.rand(4, 4).to("meta"), "x2": torch.rand([4] * other_dim).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
op_node = list(graph.nodes)[2]
strategies_vector = StrategiesVector(op_node)
# build handler
handler = BinaryElementwiseHandler(node=op_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["input"].name == "x1"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size([4, 4])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([4, 4])
assert mapping["other"].name == "x2"
assert mapping["other"].data.is_meta
assert mapping["other"].data.shape == torch.Size([4] * other_dim)
assert mapping["other"].type == OperationDataType.ARG
assert mapping["other"].logical_shape == torch.Size([4, 4])
assert mapping["output"].name == str(op_node)
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size([4, 4])
assert mapping["output"].type == OperationDataType.OUTPUT
assert mapping["output"].logical_shape == torch.Size([4, 4])
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# one strategy will be converted to different physical sharding spec
assert len(strategy_name_list) == 9
# check if the sharding strategy is correct
assert "[S0, S1] = [S0, S1] <binary-elementwise-op> [S0, S1]" in strategy_name_list
assert "[S1, S0] = [S1, S0] <binary-elementwise-op> [S1, S0]" in strategy_name_list
assert "[S01, R] = [S01, R] <binary-elementwise-op> [S01, R]" in strategy_name_list
assert "[R, S01] = [R, S01] <binary-elementwise-op> [R, S01]" in strategy_name_list
assert "[S0, R] = [S0, R] <binary-elementwise-op> [S0, R]" in strategy_name_list
assert "[R, S0] = [R, S0] <binary-elementwise-op> [R, S0]" in strategy_name_list
assert "[S1, R] = [S1, R] <binary-elementwise-op> [S1, R]" in strategy_name_list
assert "[R, S1] = [R, S1] <binary-elementwise-op> [R, S1]" in strategy_name_list
assert "[R, R] = [R, R] <binary-elementwise-op> [R, R]" in strategy_name_list
for strategy in strategies_vector:
input_sharding_spec = strategy.get_sharding_spec_by_name("x1")
other_sharding_spec = strategy.get_sharding_spec_by_name("x2")
output_sharding_spec = strategy.get_sharding_spec_by_name(str(op_node))
# make sure the sharding spec is the same for input and output
assert input_sharding_spec.sharding_sequence == output_sharding_spec.sharding_sequence
# since the dim of the other can change, we make sure at least its last dim sharding is the same
if len(other_sharding_spec.sharding_sequence) == 2:
assert input_sharding_spec.sharding_sequence == other_sharding_spec.sharding_sequence
elif len(other_sharding_spec.sharding_sequence) == 1:
assert input_sharding_spec.sharding_sequence[-1] == other_sharding_spec.sharding_sequence[-1]
class BEOpModelWithNodeConst(nn.Module):
def __init__(self, op):
super().__init__()
self.op = op
def forward(self, x1):
const = x1.dim()
out = self.op(x1, const)
return out
class BEOpModelWithIntConst(nn.Module):
def __init__(self, op, const):
super().__init__()
self.op = op
self.const = const
def forward(self, x1):
out = self.op(x1, self.const)
return out
def check_binary_elementwise_handler_with_int(rank, world_size, port, op, other_dim, model_cls):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
if model_cls == BEOpModelWithNodeConst:
model = model_cls(op).cuda()
else:
model = model_cls(op, other_dim).cuda()
x1 = torch.rand(4, 4).cuda()
# the index of binary-elementwise node in computation graph
node_index = 1
# strategy number of binary-elementwise node
strategy_number = 9
# construct input args
input_args = [x1]
# construct meta arg names
meta_arg_names = ["x1"]
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
)
tracer = ColoTracer(bias_addition_split=True)
meta_args = {"x1": torch.rand(4, 4).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
if model_cls == BEOpModelWithNodeConst:
op_node = list(graph.nodes)[2]
else:
op_node = list(graph.nodes)[1]
strategies_vector = StrategiesVector(op_node)
# build handler
handler = BinaryElementwiseHandler(node=op_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
assert mapping["input"].name == "x1"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size([4, 4])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([4, 4])
assert mapping["output"].name == str(op_node)
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size([4, 4])
assert mapping["output"].type == OperationDataType.OUTPUT
assert mapping["output"].logical_shape == torch.Size([4, 4])
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# one strategy will be converted to different physical sharding spec
assert len(strategy_name_list) == 9
# check if the sharding strategy is correct
assert "[S0, S1] = [S0, S1] <binary-elementwise-op> [S0, S1]" in strategy_name_list
assert "[S1, S0] = [S1, S0] <binary-elementwise-op> [S1, S0]" in strategy_name_list
assert "[S01, R] = [S01, R] <binary-elementwise-op> [S01, R]" in strategy_name_list
assert "[R, S01] = [R, S01] <binary-elementwise-op> [R, S01]" in strategy_name_list
assert "[S0, R] = [S0, R] <binary-elementwise-op> [S0, R]" in strategy_name_list
assert "[R, S0] = [R, S0] <binary-elementwise-op> [R, S0]" in strategy_name_list
assert "[S1, R] = [S1, R] <binary-elementwise-op> [S1, R]" in strategy_name_list
assert "[R, S1] = [R, S1] <binary-elementwise-op> [R, S1]" in strategy_name_list
assert "[R, R] = [R, R] <binary-elementwise-op> [R, R]" in strategy_name_list
for strategy in strategies_vector:
input_sharding_spec = strategy.get_sharding_spec_by_name("x1")
output_sharding_spec = strategy.get_sharding_spec_by_name(str(op_node))
# make sure the sharding spec is the same for input and output
assert input_sharding_spec.sharding_sequence == output_sharding_spec.sharding_sequence
@run_on_environment_flag(name="AUTO_PARALLEL")
@parameterize("op", [torch.add])
@parameterize("other_dim", [1, 2])
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_binary_elementwise_handler_with_tensor(op, other_dim):
spawn(
check_binary_elementwise_handler_with_tensor,
4,
op=op,
other_dim=other_dim,
)
@run_on_environment_flag(name="AUTO_PARALLEL")
@parameterize("op", [torch.add])
@parameterize("other_dim", [1, 2])
@parameterize("model_cls", [BEOpModelWithNodeConst, BEOpModelWithIntConst])
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_binary_elementwise_handler_with_int(op, model_cls, other_dim):
spawn(
check_binary_elementwise_handler_with_int,
4,
op=op,
model_cls=model_cls,
other_dim=other_dim,
)
if __name__ == "__main__":
test_binary_elementwise_handler_with_tensor()
test_binary_elementwise_handler_with_int()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_sum_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_sum_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler.linear_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.node_handler.sum_handler import SumHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import parameterize, rerun_if_address_is_in_use, run_on_environment_flag, spawn
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class LinearSumModel(nn.Module):
def __init__(self, sum_dims, keepdim):
super().__init__()
self.sum_dims = sum_dims
self.keepdim = keepdim
def forward(self, input, other):
linear_node = nn.functional.linear(input, other, bias=None)
if self.sum_dims is not None:
sum_node = torch.sum(linear_node, self.sum_dims, keepdim=self.keepdim)
else:
sum_node = torch.sum(linear_node, keepdim=self.keepdim)
return sum_node
def check_sum_handler(rank, world_size, port, sum_dims, keepdim):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = LinearSumModel(sum_dims=sum_dims, keepdim=keepdim).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
input = torch.rand(8, 16, 64, 32).to("cuda")
other = torch.rand(64, 32).to("cuda")
# index of linear node in computation graph
node_index = 2
# total number of linear strategies
strategy_number = 24
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input, other],
meta_arg_names=["input", "other"],
node_type="following",
)
tracer = ColoTracer(bias_addition_split=True)
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%input_1, %other), kwargs = {bias: None})
# %sum_1 : [#users=1] = call_function[target=torch.sum](args = (%linear,), kwargs = {})
# return sum_1
meta_args = {
"input": torch.rand(8, 16, 64, 32).to("meta"),
"other": torch.rand(64, 32).to("meta"),
}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
previous_mod_node = list(graph.nodes)[2]
sum_node = list(graph.nodes)[3]
sum_strategies_vector = StrategiesVector(sum_node)
previous_strategies_vector = StrategiesVector(previous_mod_node)
# build handler
assert len(previous_strategies_vector) == 0
linear_handler = LinearFunctionHandler(
node=previous_mod_node, device_mesh=device_mesh, strategies_vector=previous_strategies_vector
)
linear_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, "strategies_vector", previous_strategies_vector)
sum_handler = SumHandler(node=sum_node, device_mesh=device_mesh, strategies_vector=sum_strategies_vector)
sum_handler.register_strategy(compute_resharding_cost=False)
# sum handler is a following strategy handler, so the number of strategies is equal to the predecessor node.
assert len(sum_strategies_vector) == len(previous_strategies_vector)
strategy_name_list = [strategy.name for strategy in sum_strategies_vector]
# check operation data mapping
mapping = sum_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
assert mapping["input"].name == "linear"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size([8, 16, 64, 64])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([8, 16, 64, 64])
assert mapping["output"].name == "sum_1"
sum_node_shape = torch.empty([8, 16, 64, 64]).sum(sum_dims, keepdim=keepdim).shape
assert mapping["output"].logical_shape == sum_node_shape
assert mapping["output"].type == OperationDataType.OUTPUT
# check strategy name
if sum_dims == (0, 2) and keepdim == False:
assert "[R, R, R, R] -> [R, R]_0" in strategy_name_list
assert "[R, S01, R, R] -> [S01, R]_1" in strategy_name_list
assert "[R, R, R, R] -> [R, R]_2" in strategy_name_list
assert "[R, R, R, R] -> [R, R]_3" in strategy_name_list
assert "[R, R, R, S01] -> [R, S01]_4" in strategy_name_list
assert "[R, R, R, S1] -> [R, S1]_5" in strategy_name_list
assert "[R, R, R, S0] -> [R, S0]_6" in strategy_name_list
assert "[R, R, R, R] -> [R, R]_7" in strategy_name_list
assert "[R, R, R, R] -> [R, R]_8" in strategy_name_list
assert "[R, R, R, S0] -> [R, S0]_9" in strategy_name_list
assert "[R, R, R, S1] -> [R, S1]_10" in strategy_name_list
assert "[R, R, R, S1] -> [R, S1]_11" in strategy_name_list
assert "[R, S0, R, S1] -> [S0, S1]_12" in strategy_name_list
assert "[R, R, R, S1] -> [R, S1]_13" in strategy_name_list
assert "[R, R, R, S0] -> [R, S0]_14" in strategy_name_list
assert "[R, S1, R, S0] -> [S1, S0]_15" in strategy_name_list
assert "[R, R, R, S0] -> [R, S0]_16" in strategy_name_list
assert "[R, R, R, R] -> [R, R]_17" in strategy_name_list
assert "[R, S0, R, R] -> [S0, R]_18" in strategy_name_list
assert "[R, R, R, R] -> [R, R]_19" in strategy_name_list
assert "[R, R, R, R] -> [R, R]_20" in strategy_name_list
assert "[R, S1, R, R] -> [S1, R]_21" in strategy_name_list
assert "[R, R, R, R] -> [R, R]_22" in strategy_name_list
assert "[R, R, R, R] -> [R, R]_23" in strategy_name_list
if sum_dims == (0, 2) and keepdim == True:
assert "[R, R, R, R] -> [R, R, R, R]_0" in strategy_name_list
assert "[R, S01, R, R] -> [R, S01, R, R]_1" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_2" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_3" in strategy_name_list
assert "[R, R, R, S01] -> [R, R, R, S01]_4" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_5" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_6" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_7" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_9" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_10" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_11" in strategy_name_list
assert "[R, S0, R, S1] -> [R, S0, R, S1]_12" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_13" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_14" in strategy_name_list
assert "[R, S1, R, S0] -> [R, S1, R, S0]_15" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_16" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_17" in strategy_name_list
assert "[R, S0, R, R] -> [R, S0, R, R]_18" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_19" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_20" in strategy_name_list
assert "[R, S1, R, R] -> [R, S1, R, R]_21" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_22" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_23" in strategy_name_list
if sum_dims == 1 and keepdim == False:
assert "[S01, R, R, R] -> [S01, R, R]_0" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R]_1" in strategy_name_list
assert "[R, R, S01, R] -> [R, S01, R]_2" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R]_3" in strategy_name_list
assert "[R, R, R, S01] -> [R, R, S01]_4" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, S1]_5" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, S0]_6" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R]_7" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R]_8" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, S0]_9" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, S1]_10" in strategy_name_list
assert "[S0, R, R, S1] -> [S0, R, S1]_11" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, S1]_12" in strategy_name_list
assert "[R, R, S0, S1] -> [R, S0, S1]_13" in strategy_name_list
assert "[S1, R, R, S0] -> [S1, R, S0]_14" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, S0]_15" in strategy_name_list
assert "[R, R, S1, S0] -> [R, S1, S0]_16" in strategy_name_list
assert "[S0, R, R, R] -> [S0, R, R]_17" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R]_18" in strategy_name_list
assert "[R, R, S0, R] -> [R, S0, R]_19" in strategy_name_list
assert "[S1, R, R, R] -> [S1, R, R]_20" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R]_21" in strategy_name_list
assert "[R, R, S1, R] -> [R, S1, R]_22" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R]_23" in strategy_name_list
if sum_dims == 1 and keepdim == True:
assert "[S01, R, R, R] -> [S01, R, R, R]_0" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_1" in strategy_name_list
assert "[R, R, S01, R] -> [R, R, S01, R]_2" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_3" in strategy_name_list
assert "[R, R, R, S01] -> [R, R, R, S01]_4" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_5" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_6" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_7" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_9" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_10" in strategy_name_list
assert "[S0, R, R, S1] -> [S0, R, R, S1]_11" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_12" in strategy_name_list
assert "[R, R, S0, S1] -> [R, R, S0, S1]_13" in strategy_name_list
assert "[S1, R, R, S0] -> [S1, R, R, S0]_14" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_15" in strategy_name_list
assert "[R, R, S1, S0] -> [R, R, S1, S0]_16" in strategy_name_list
assert "[S0, R, R, R] -> [S0, R, R, R]_17" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_18" in strategy_name_list
assert "[R, R, S0, R] -> [R, R, S0, R]_19" in strategy_name_list
assert "[S1, R, R, R] -> [S1, R, R, R]_20" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_21" in strategy_name_list
assert "[R, R, S1, R] -> [R, R, S1, R]_22" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_23" in strategy_name_list
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
@parameterize("sum_dims", [(0, 2), 1])
@parameterize("keepdim", [False, True])
def test_sum_handler(sum_dims, keepdim):
spawn(check_sum_handler, 4, sum_dims=sum_dims, keepdim=keepdim)
if __name__ == "__main__":
test_sum_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_linear_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_linear_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler import LinearFunctionHandler, LinearModuleHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import (
OperationData,
OperationDataType,
ShardingStrategy,
StrategiesVector,
)
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from colossalai.testing.utils import parameterize
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
def check_linear_module_handler(rank, world_size, port, bias, input_shape):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = nn.Sequential(nn.Linear(16, 32, bias=bias)).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
input = torch.rand(input_shape).cuda()
# the index of linear node in computation graph
node_index = 1
# strategy number of linear node
if input_shape == (1, 4, 4, 16):
strategy_number = 19
else:
strategy_number = 24
# construct input args
input_args = [input]
# construct meta arg names
meta_arg_names = ["input"]
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
)
tracer = ColoTracer(bias_addition_split=True)
meta_args = {"input": torch.rand(input_shape).cuda()}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
linear_mod_node = list(graph.nodes)[1]
strategies_vector = StrategiesVector(linear_mod_node)
# build handler
handler = LinearModuleHandler(node=linear_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["input"].name == "input_1"
assert mapping["input"].data.shape == torch.Size(input_shape)
assert mapping["input"].type == OperationDataType.ARG
input_logical_shape = mapping["input"].data.view(-1, 16).shape
assert mapping["input"].logical_shape == input_logical_shape
assert mapping["other"].name == "weight"
assert mapping["other"].data.shape == torch.Size([32, 16])
assert mapping["other"].type == OperationDataType.PARAM
assert mapping["other"].logical_shape == torch.Size([16, 32])
if bias:
assert mapping["bias"].name == "bias"
assert mapping["bias"].data.shape == torch.Size([32])
assert mapping["bias"].type == OperationDataType.PARAM
assert mapping["bias"].logical_shape == torch.Size([32])
assert mapping["output"].name == "_0"
output_shape = input_shape[:-1] + (32,)
assert mapping["output"].data.shape == torch.Size(output_shape)
assert mapping["output"].type == OperationDataType.OUTPUT
output_logical_shape = mapping["output"].data.view(-1, 32).shape
assert mapping["output"].logical_shape == torch.Size(output_logical_shape)
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# First dimension cannot be shard if input shape is (1, 4, 4, 16)
if input_shape != (1, 4, 4, 16):
assert "S1S0 = S1R x RS0_0" in strategy_name_list
assert "S0S1 = S0R x RS1_0" in strategy_name_list
assert "S1R = S1S0 x S0R_0" in strategy_name_list
assert "S0R = S0S1 x S1R_0" in strategy_name_list
assert "S01R = S01R x RR_0" in strategy_name_list
# SS = SR x RS
assert "S0S1 = S0R x RS1_1" in strategy_name_list
assert "S0S1 = S0R x RS1_2" in strategy_name_list
assert "S1S0 = S1R x RS0_1" in strategy_name_list
assert "S1S0 = S1R x RS0_2" in strategy_name_list
# SR = SS x SR
assert "S0R = S0S1 x S1R_1" in strategy_name_list
assert "S0R = S0S1 x S1R_2" in strategy_name_list
assert "S1R = S1S0 x S0R_1" in strategy_name_list
assert "S1R = S1S0 x S0R_2" in strategy_name_list
# RS = RS x SS
assert "RS0 = RS1 x S1S0" in strategy_name_list
assert "RS1 = RS0 x S0S1" in strategy_name_list
# RR = RS x SR
assert "RR = RS0 x S0R" in strategy_name_list
assert "RR = RS1 x S1R" in strategy_name_list
# RS= RR x RS
assert "RS0 = RR x RS0" in strategy_name_list
assert "RS1 = RR x RS1" in strategy_name_list
# S01R = S01R x RR
assert "S01R = S01R x RR_1" in strategy_name_list
assert "S01R = S01R x RR_2" in strategy_name_list
# RR = RS01 x S01R
assert "RR = RS01 x S01R" in strategy_name_list
# RS01 = RR x RS01
assert "RS01 = RR x RS01" in strategy_name_list
# RR = RR x RR
assert "RR = RR x RR" in strategy_name_list
for strategy in strategies_vector:
strategy: ShardingStrategy
input_sharding_spec = strategy.get_sharding_spec_by_name("input_1")
weight_sharding_spec = strategy.get_sharding_spec_by_name("weight")
output_sharding_spec = strategy.get_sharding_spec_by_name("_0")
if bias:
bias_sharding_spec = strategy.get_sharding_spec_by_name("bias")
# make sure the sharding matches across different operation data
assert input_sharding_spec.sharding_sequence[:-1] == output_sharding_spec.sharding_sequence[:-1]
assert weight_sharding_spec.sharding_sequence[1] == input_sharding_spec.sharding_sequence[-1]
assert weight_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[-1]
if bias:
assert bias_sharding_spec.sharding_sequence[-1] == output_sharding_spec.sharding_sequence[-1]
class LinearModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, others, bias=None):
x = nn.functional.linear(input, others, bias=bias)
return x
def check_linear_function_handler(rank, world_size, port, bias, input_shape):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = LinearModel().cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
input = torch.rand(input_shape).cuda()
other = torch.rand(32, 16).cuda()
# the index of linear node in computation graph
node_index = 2
# strategy number of linear node
if input_shape == (1, 4, 4, 16):
strategy_number = 19
else:
strategy_number = 24
# construct input args
input_args = [input, other]
# construct meta arg names
meta_arg_names = ["input", "others"]
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
)
tracer = ColoTracer(bias_addition_split=True)
meta_args = {"input": torch.rand(input_shape).to("meta"), "others": torch.rand(32, 16).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
if bias:
linear_func_node = list(graph.nodes)[3]
else:
linear_func_node = list(graph.nodes)[2]
strategies_vector = StrategiesVector(linear_func_node)
# build handler
handler = LinearFunctionHandler(node=linear_func_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# # check operation data mapping
mapping = handler.get_operation_data_mapping()
assert mapping["input"].name == "input_1"
assert mapping["input"].data.shape == torch.Size(input_shape)
assert mapping["input"].type == OperationDataType.ARG
input_logical_shape = mapping["input"].data.view(-1, 16).shape
assert mapping["input"].logical_shape == torch.Size(input_logical_shape)
assert mapping["other"].name == "others"
assert mapping["other"].data.shape == torch.Size([32, 16])
assert mapping["other"].type == OperationDataType.ARG
assert mapping["other"].logical_shape == torch.Size([16, 32])
if bias:
assert mapping["bias"].name == "bias"
assert mapping["bias"].data.shape == torch.Size([32])
assert mapping["bias"].type == OperationDataType.ARG
assert mapping["other"].logical_shape == torch.Size([16, 32])
assert mapping["output"].name == "linear"
output_shape = input_shape[:-1] + (32,)
assert mapping["output"].data.shape == torch.Size(output_shape)
assert mapping["output"].type == OperationDataType.OUTPUT
output_logical_shape = mapping["output"].data.view(-1, 32).shape
assert mapping["output"].logical_shape == torch.Size(output_logical_shape)
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# First dimension cannot be shard if input shape is (1, 4, 4, 16)
if input_shape != (1, 4, 4, 16):
assert "S1S0 = S1R x RS0_0" in strategy_name_list
assert "S0S1 = S0R x RS1_0" in strategy_name_list
assert "S1R = S1S0 x S0R_0" in strategy_name_list
assert "S0R = S0S1 x S1R_0" in strategy_name_list
assert "S01R = S01R x RR_0" in strategy_name_list
# SS = SR x RS
assert "S0S1 = S0R x RS1_1" in strategy_name_list
assert "S0S1 = S0R x RS1_2" in strategy_name_list
assert "S1S0 = S1R x RS0_1" in strategy_name_list
assert "S1S0 = S1R x RS0_2" in strategy_name_list
# SR = SS x SR
assert "S0R = S0S1 x S1R_1" in strategy_name_list
assert "S0R = S0S1 x S1R_2" in strategy_name_list
assert "S1R = S1S0 x S0R_1" in strategy_name_list
assert "S1R = S1S0 x S0R_2" in strategy_name_list
# RS = RS x SS
assert "RS0 = RS1 x S1S0" in strategy_name_list
assert "RS1 = RS0 x S0S1" in strategy_name_list
# RR = RS x SR
assert "RR = RS0 x S0R" in strategy_name_list
assert "RR = RS1 x S1R" in strategy_name_list
# RS= RR x RS
assert "RS0 = RR x RS0" in strategy_name_list
assert "RS1 = RR x RS1" in strategy_name_list
# S01R = S01R x RR
assert "S01R = S01R x RR_1" in strategy_name_list
assert "S01R = S01R x RR_2" in strategy_name_list
# RR = RS01 x S01R
assert "RR = RS01 x S01R" in strategy_name_list
# RS01 = RR x RS01
assert "RS01 = RR x RS01" in strategy_name_list
# RR = RR x RR
assert "RR = RR x RR" in strategy_name_list
for strategy in strategies_vector:
strategy: ShardingStrategy
input_sharding_spec = strategy.get_sharding_spec_by_name("input_1")
weight_sharding_spec = strategy.get_sharding_spec_by_name("others")
output_sharding_spec = strategy.get_sharding_spec_by_name("linear")
if bias:
bias_sharding_spec = strategy.get_sharding_spec_by_name("bias")
# make sure the sharding matches across different operation data
assert input_sharding_spec.sharding_sequence[:-1] == output_sharding_spec.sharding_sequence[:-1]
assert weight_sharding_spec.sharding_sequence[1] == input_sharding_spec.sharding_sequence[-1]
assert weight_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[-1]
if bias:
assert bias_sharding_spec.sharding_sequence[-1] == output_sharding_spec.sharding_sequence[-1]
@run_on_environment_flag(name="AUTO_PARALLEL")
@parameterize("input_shape", [(1, 4, 4, 16), (4, 4, 4, 16)])
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_linear_handler(input_shape, bias=False):
spawn(
check_linear_module_handler,
4,
bias=bias,
input_shape=input_shape,
)
spawn(
check_linear_function_handler,
4,
bias=bias,
input_shape=input_shape,
)
if __name__ == "__main__":
test_linear_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_matmul_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_matmul_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler.matmul_handler import (
MatMulHandler,
MatMulType,
_get_bmm_logical_shape,
get_matmul_type,
)
from colossalai.auto_parallel.tensor_shard.sharding_strategy import (
OperationData,
OperationDataType,
ShardingStrategy,
StrategiesVector,
)
from colossalai.device.device_mesh import DeviceMesh
from colossalai.testing.utils import clear_cache_before_run, parameterize
class MatMulModule(nn.Module):
def forward(self, x1, x2):
return torch.matmul(x1, x2)
@pytest.mark.skipif(torch.__version__ < "1.12.0", reason="need pytorch 1.12.0 or higher for aten level operations")
@clear_cache_before_run()
@parameterize(
"tensor_shapes",
[
[[8], [8]], # dot product
[[4, 8], [8]], # mat-vec product
[[4, 8], [8, 16]], # mat-mat product
[[8], [8, 16]], # mat-mat product
[[8], [4, 8, 16]], # batched mat-mat product with padding + broadcasting
[[4, 8, 16], [16]], # batched mat-mat product with padding + broadcasting
[[4, 8, 16], [16, 32]], # batched mat-mat product with broadcasting
[[4, 8, 16], [1, 16, 32]], # batched mat-mat product with broadcasting
[[8, 16], [2, 4, 16, 32]], # batched mat-mat product with broadcasting
[[4, 8, 16], [2, 4, 16, 32]], # batched mat-mat product with broadcasting
[[1, 8, 16], [2, 4, 16, 32]], # batched mat-mat product with broadcasting
[[1, 4, 8, 16], [2, 4, 16, 32]], # batched mat-mat product with broadcasting
[[2, 1, 8, 16], [2, 4, 16, 32]], # batched mat-mat product with broadcasting
[[2, 4, 8, 16], [2, 4, 16, 32]], # batched mat-mat product without broadcasting
],
)
def test_matmul_node_handler(tensor_shapes):
input_shape, other_shape = tensor_shapes
# get output shape
x1 = torch.rand(*input_shape)
x2 = torch.rand(*other_shape)
output_shape = list(torch.matmul(x1, x2).shape)
# get matmul type
matmul_type = get_matmul_type(x1.dim(), x2.dim())
model = MatMulModule()
tracer = ColoTracer(bias_addition_split=True)
meta_args = {"x1": x1.to("meta"), "x2": x2.to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
physical_mesh_id = torch.arange(0, 4)
print(graph)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
mod_node = list(graph.nodes)[2]
strategies_vector = StrategiesVector(mod_node)
# build handler
handler = MatMulHandler(node=mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
logical_input_shape = input_shape
logical_other_shape = other_shape
logical_output_shape = output_shape
if matmul_type == MatMulType.MM and len(input_shape) == 1:
logical_input_shape = [1] + input_shape
elif matmul_type == MatMulType.BMM:
logical_input_shape, logical_other_shape, logical_output_shape = _get_bmm_logical_shape(
input_shape, other_shape, handler.transforms
)
else:
logical_input_shape = input_shape
# check input operation data
assert mapping["input"].name == "x1"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size(input_shape)
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size(logical_input_shape)
# check other operation data
assert mapping["other"].name == "x2"
assert mapping["other"].data.is_meta
assert mapping["other"].data.shape == torch.Size(other_shape)
assert mapping["other"].type == OperationDataType.ARG
assert mapping["other"].logical_shape == torch.Size(logical_other_shape)
# check output
assert mapping["output"].name == "matmul"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size(output_shape)
assert mapping["output"].type == OperationDataType.OUTPUT
assert mapping["output"].logical_shape == torch.Size(logical_output_shape)
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# ensure there is no duplicate strategy
if matmul_type != MatMulType.BMM:
assert len(set(strategy_name_list)) == len(strategy_name_list), strategy_name_list
for strategy in strategies_vector:
strategy: ShardingStrategy
input_sharding_spec = strategy.get_sharding_spec_by_name("x1")
other_sharding_spec = strategy.get_sharding_spec_by_name("x2")
output_sharding_spec = strategy.get_sharding_spec_by_name("matmul")
if matmul_type == MatMulType.DOT:
# dot product will produce a scaler
# results should fulfill:
# 1. the input and other operands have the same sharding spec
# 2. the output has no sharding
assert input_sharding_spec.sharding_sequence == other_sharding_spec.sharding_sequence
assert len(output_sharding_spec.sharding_sequence) == 0
elif matmul_type == MatMulType.MV:
# matrix-vector product should fulfill
# 1. the last dim of the input and other operands should have the same sharding
# 2. the first dim of the input and other should have the same sharding
# 3. the output should have only 1 dim
assert input_sharding_spec.sharding_sequence[-1] == other_sharding_spec.sharding_sequence[-1]
assert input_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[0]
assert len(output_sharding_spec.sharding_sequence) == 1
elif matmul_type == MatMulType.MM:
# matrix-matrix multiplication should fulfil
# 1. if input is a 2D tensor, the 1st dim of input and output should have the same sharding
# 2. the input's last dim and the first dim of the other should have the same sharding
# 3. the last dim of the output and other should have the same sharding
# 4. the input and output should have the same number of dims
if len(input_shape) == 2:
assert input_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[0]
assert input_sharding_spec.sharding_sequence[-1] == other_sharding_spec.sharding_sequence[0]
assert output_sharding_spec.sharding_sequence[-1] == other_sharding_spec.sharding_sequence[-1]
assert len(input_sharding_spec.sharding_sequence) == len(output_sharding_spec.sharding_sequence)
elif matmul_type == MatMulType.BMM:
# bmm should fulfil
# 1. of the other tensor is not a 1d tensor, the last dim of other and output have the same sharding
# 2. if the input has more than 2 dim, the second last dim of input and output have the same sharding
# 3. if the other have more than 2 dim, the second last dim of other and the last dim of input should have the same sharding
if len(other_shape) > 1:
assert other_sharding_spec.sharding_sequence[-1] == output_sharding_spec.sharding_sequence[-1]
if len(input_shape) > 1:
if len(other_shape) == 1:
assert input_sharding_spec.sharding_sequence[-2] == output_sharding_spec.sharding_sequence[-1]
else:
assert input_sharding_spec.sharding_sequence[-2] == output_sharding_spec.sharding_sequence[-2]
if len(other_shape) > 2:
assert other_sharding_spec.sharding_sequence[-2] == input_sharding_spec.sharding_sequence[-1]
if __name__ == "__main__":
test_matmul_node_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_view_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_view_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler import ViewHandler
from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler
from colossalai.auto_parallel.tensor_shard.node_handler.linear_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class ConvViewModel(nn.Module):
def __init__(self, tgt_shape):
super().__init__()
self.tgt_shape = tgt_shape
def forward(self, input, other):
conv_node = nn.functional.conv2d(input, other, bias=None)
reshape_node = conv_node.view(*self.tgt_shape)
return reshape_node
class LinearViewModel(nn.Module):
def __init__(self, tgt_shape):
super().__init__()
self.tgt_shape = tgt_shape
def forward(self, input, other):
linear_node = nn.functional.linear(input, other, bias=None)
reshape_node = linear_node.view(*self.tgt_shape)
return reshape_node
def check_view_handler(rank, tgt_shape, model_cls, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = model_cls(tgt_shape).cuda()
if model_cls.__name__ == "ConvViewModel":
input = torch.rand(8, 8, 66, 66).to("cuda")
other = torch.rand(16, 8, 3, 3).to("cuda")
# index of conv node in computation graph
node_index = 2
# total number of conv strategies
strategy_number = 16
if model_cls.__name__ == "LinearViewModel":
input = torch.rand(8, 16, 64, 32).to("cuda")
other = torch.rand(64, 32).to("cuda")
# index of linear node in computation graph
node_index = 2
# total number of linear strategies
strategy_number = 23
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input, other],
meta_arg_names=["input", "other"],
node_type="following",
)
tracer = ColoTracer(bias_addition_split=True)
if model_cls.__name__ == "ConvViewModel":
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %conv2d : [#users=1] = call_function[target=torch.conv2d](args = (%input_1, %other), kwargs = {})
# %view : [#users=1] = call_method[target=view](args = (%conv2d, 2, -1), kwargs = {})
# return view
meta_args = {"input": torch.rand(8, 8, 66, 66).to("meta"), "other": torch.rand(16, 8, 3, 3).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
if model_cls.__name__ == "LinearViewModel":
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%input_1, %other), kwargs = {bias: None})
# %view : [#users=1] = call_method[target=view](args = (%linear, 32, 4, 32, 32, 4), kwargs = {})
# return view
meta_args = {
"input": torch.rand(8, 16, 64, 32).to("meta"),
"other": torch.rand(64, 32).to("meta"),
}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
previous_mod_node = list(graph.nodes)[2]
view_node = list(graph.nodes)[3]
view_strategies_vector = StrategiesVector(view_node)
previous_strategies_vector = StrategiesVector(previous_mod_node)
# build handler
if model_cls.__name__ == "ConvViewModel":
conv_handler = ConvFunctionHandler(
node=previous_mod_node, device_mesh=device_mesh, strategies_vector=previous_strategies_vector
)
conv_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, "strategies_vector", previous_strategies_vector)
if model_cls.__name__ == "LinearViewModel":
assert len(previous_strategies_vector) == 0
linear_handler = LinearFunctionHandler(
node=previous_mod_node, device_mesh=device_mesh, strategies_vector=previous_strategies_vector
)
linear_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, "strategies_vector", previous_strategies_vector)
view_handler = ViewHandler(node=view_node, device_mesh=device_mesh, strategies_vector=view_strategies_vector)
view_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = view_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
if model_cls.__name__ == "ConvViewModel":
assert mapping["input"].name == "conv2d"
else:
assert mapping["input"].name == "linear"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size([8, 16, 64, 64])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([8, 16, 64, 64])
assert mapping["output"].name == "view"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size(tgt_shape)
assert mapping["output"].type == OperationDataType.OUTPUT
# reshape handler is a following strategy handler, so the number of strategies is equal to the predecessor node.
assert len(view_strategies_vector) == len(previous_strategies_vector)
strategy_name_list = [strategy.name for strategy in view_strategies_vector]
if model_cls.__name__ == "ConvViewModel":
if tgt_shape == (32, 4, 64, 16, 4):
assert "[S0, S1, R, R] -> FULLY REPLICATED_0" in strategy_name_list
assert "[S1, S0, R, R] -> FULLY REPLICATED_1" in strategy_name_list
assert "[S0, R, R, R] -> [S0, R, R, R, R]_2" in strategy_name_list
assert "[S1, R, R, R] -> [S1, R, R, R, R]_3" in strategy_name_list
assert "[S0, R, R, R] -> [S0, R, R, R, R]_4" in strategy_name_list
assert "[S1, R, R, R] -> [S1, R, R, R, R]_5" in strategy_name_list
assert "[R, S1, R, R] -> FULLY REPLICATED_6" in strategy_name_list
assert "[R, S0, R, R] -> FULLY REPLICATED_7" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R, R]_9" in strategy_name_list
assert "[R, S0, R, R] -> FULLY REPLICATED_10" in strategy_name_list
assert "[R, S1, R, R] -> FULLY REPLICATED_11" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R, R]_12" in strategy_name_list
assert "[S01, R, R, R] -> [S01, R, R, R, R]_13" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R, R]_14" in strategy_name_list
assert "[R, S01, R, R] -> FULLY REPLICATED_15" in strategy_name_list
if tgt_shape == (8, 4, 4, 64, 16, 4):
assert "[S0, S1, R, R] -> [S0, S1, R, R, R, R]_0" in strategy_name_list
assert "[S1, S0, R, R] -> [S1, S0, R, R, R, R]_1" in strategy_name_list
assert "[S0, R, R, R] -> [S0, R, R, R, R, R]_2" in strategy_name_list
assert "[S1, R, R, R] -> [S1, R, R, R, R, R]_3" in strategy_name_list
assert "[S0, R, R, R] -> [S0, R, R, R, R, R]_4" in strategy_name_list
assert "[S1, R, R, R] -> [S1, R, R, R, R, R]_5" in strategy_name_list
assert "[R, S1, R, R] -> [R, S1, R, R, R, R]_6" in strategy_name_list
assert "[R, S0, R, R] -> [R, S0, R, R, R, R]_7" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R, R, R]_9" in strategy_name_list
assert "[R, S0, R, R] -> [R, S0, R, R, R, R]_10" in strategy_name_list
assert "[R, S1, R, R] -> [R, S1, R, R, R, R]_11" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R, R, R]_12" in strategy_name_list
assert "[S01, R, R, R] -> [S01, R, R, R, R, R]_13" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R, R, R]_14" in strategy_name_list
assert "[R, S01, R, R] -> [R, S01, R, R, R, R]_15" in strategy_name_list
if model_cls.__name__ == "LinearViewModel":
if tgt_shape == (32, 4, 64, 16, 4):
for strategy in strategy_name_list:
print(strategy)
# print(strategy_name_list)
assert "[S0, R, R, S1] -> [S0, R, R, S1, R]_11" in strategy_name_list
assert "[R, S0, R, S1] -> FULLY REPLICATED_12" in strategy_name_list
assert "[R, R, S0, S1] -> [R, R, S0, S1, R]_13" in strategy_name_list
assert "[S1, R, R, S0] -> [S1, R, R, S0, R]_14" in strategy_name_list
assert "[R, S1, R, S0] -> FULLY REPLICATED_15" in strategy_name_list
assert "[R, R, S1, S0] -> [R, R, S1, S0, R]_16" in strategy_name_list
assert "[S0, R, R, R] -> [S0, R, R, R, R]_17" in strategy_name_list
assert "[R, S0, R, R] -> FULLY REPLICATED_18" in strategy_name_list
assert "[R, R, S0, R] -> [R, R, S0, R, R]_19" in strategy_name_list
assert "[S1, R, R, R] -> [S1, R, R, R, R]_20" in strategy_name_list
assert "[R, S1, R, R] -> FULLY REPLICATED_21" in strategy_name_list
assert "[R, R, S1, R] -> [R, R, S1, R, R]_22" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1, R]_10" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0, R]_9" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R, R]_7" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0, R]_6" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1, R]_5" in strategy_name_list
assert "[S01, R, R, R] -> [S01, R, R, R, R]_0" in strategy_name_list
assert "[R, S01, R, R] -> FULLY REPLICATED_1" in strategy_name_list
assert "[R, R, S01, R] -> [R, R, S01, R, R]_2" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R, R]_3" in strategy_name_list
assert "[R, R, R, S01] -> [R, R, R, S01, R]_4" in strategy_name_list
if tgt_shape == (8, 4, 4, 64, 16, 4):
assert "[S0, R, R, S1] -> [S0, R, R, R, S1, R]_11" in strategy_name_list
assert "[R, S0, R, S1] -> [R, S0, R, R, S1, R]_12" in strategy_name_list
assert "[R, R, S0, S1] -> [R, R, R, S0, S1, R]_13" in strategy_name_list
assert "[S1, R, R, S0] -> [S1, R, R, R, S0, R]_14" in strategy_name_list
assert "[R, S1, R, S0] -> [R, S1, R, R, S0, R]_15" in strategy_name_list
assert "[R, R, S1, S0] -> [R, R, R, S1, S0, R]_16" in strategy_name_list
assert "[S0, R, R, R] -> [S0, R, R, R, R, R]_17" in strategy_name_list
assert "[R, S0, R, R] -> [R, S0, R, R, R, R]_18" in strategy_name_list
assert "[R, R, S0, R] -> [R, R, R, S0, R, R]_19" in strategy_name_list
assert "[S1, R, R, R] -> [S1, R, R, R, R, R]_20" in strategy_name_list
assert "[R, S1, R, R] -> [R, S1, R, R, R, R]_21" in strategy_name_list
assert "[R, R, S1, R] -> [R, R, R, S1, R, R]_22" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, R, S1, R]_10" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, R, S0, R]_9" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R, R, R]_7" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, R, S0, R]_6" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, R, S1, R]_5" in strategy_name_list
assert "[S01, R, R, R] -> [S01, R, R, R, R, R]_0" in strategy_name_list
assert "[R, S01, R, R] -> [R, S01, R, R, R, R]_1" in strategy_name_list
assert "[R, R, S01, R] -> [R, R, R, S01, R, R]_2" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R, R, R]_3" in strategy_name_list
assert "[R, R, R, S01] -> [R, R, R, R, S01, R]_4" in strategy_name_list
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
@parameterize("tgt_shape", [(32, 4, 64, 16, 4), (8, 4, 4, 64, 16, 4)])
@parameterize("model_cls", [ConvViewModel, LinearViewModel])
def test_view_handler(tgt_shape, model_cls):
spawn(check_view_handler, 4, tgt_shape=tgt_shape, model_cls=model_cls)
if __name__ == "__main__":
test_view_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_bmm_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_bmm_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler import BMMFunctionHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import parameterize, rerun_if_address_is_in_use, run_on_environment_flag, spawn
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class BMMTensorMethodModule(nn.Module):
def forward(self, x1, x2):
return x1.bmm(x2)
class BMMTorchFunctionModule(nn.Module):
def forward(self, x1, x2):
return torch.bmm(x1, x2)
def check_2d_device_mesh(rank, module, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = module().cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
x1 = torch.rand(4, 8, 16).cuda()
x2 = torch.rand(4, 16, 8).cuda()
# the index of bmm node in computation graph
node_index = 2
# strategy number of bmm node on 2d device mesh
strategy_number = 7
# construct input args
input_args = [x1, x2]
# construct meta arg names
meta_arg_names = ["x1", "x2"]
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
)
tracer = ColoTracer(bias_addition_split=True)
meta_args = {"x1": torch.rand(4, 8, 16).to("meta"), "x2": torch.rand(4, 16, 8).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
linear_mod_node = list(graph.nodes)[2]
strategies_vector = StrategiesVector(linear_mod_node)
# build handler
handler = BMMFunctionHandler(node=linear_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["input"].name == "x1"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size([4, 8, 16])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([4, 8, 16])
assert mapping["other"].name == "x2"
assert mapping["other"].data.is_meta
assert mapping["other"].data.shape == torch.Size([4, 16, 8])
assert mapping["other"].type == OperationDataType.ARG
assert mapping["other"].logical_shape == torch.Size([4, 16, 8])
assert mapping["output"].name == "bmm"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size([4, 8, 8])
assert mapping["output"].type == OperationDataType.OUTPUT
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# one batch dim
assert "Sb0 = Sb0 x Sb0" not in strategy_name_list
# two batch dim
assert "Sb01 = Sb01 x Sb01" in strategy_name_list
# SbSi = SbSi x Sb
assert "Sb0Si1 = Sb0Si1 x Sb0" in strategy_name_list
assert "Sb1Si0 = Sb1Si0 x Sb1" in strategy_name_list
# SbSj = SbR x SbSj
assert "Sb0Sj1 = Sb0R x Sb0Sj1" in strategy_name_list
assert "Sb1Sj0 = Sb1R x Sb1Sj0" in strategy_name_list
# SbR = SbSk x SbSk
assert "Sb0R = Sb0Sk1 x Sb0Sk1" in strategy_name_list
assert "Sb1R = Sb1Sk0 x Sb1Sk0" in strategy_name_list
for strategy in strategies_vector:
input_sharding_spec = strategy.get_sharding_spec_by_name("x1")
other_sharding_spec = strategy.get_sharding_spec_by_name("x2")
output_sharding_spec = strategy.get_sharding_spec_by_name("bmm")
# make sure the sharding matches across different operation data
assert input_sharding_spec.sharding_sequence[:-1] == output_sharding_spec.sharding_sequence[:-1]
assert other_sharding_spec.sharding_sequence[1] == input_sharding_spec.sharding_sequence[-1]
assert other_sharding_spec.sharding_sequence[-1] == output_sharding_spec.sharding_sequence[-1]
def check_1d_device_mesh(rank, module, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = module().cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (1, 4)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
x1 = torch.rand(4, 8, 16).cuda()
x2 = torch.rand(4, 16, 8).cuda()
# the index of bmm node in computation graph
node_index = 2
# strategy number of bmm node on 1d device mesh
strategy_number = 1
# construct input args
input_args = [x1, x2]
# construct meta arg names
meta_arg_names = ["x1", "x2"]
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
)
tracer = ColoTracer(bias_addition_split=True)
meta_args = {"x1": torch.rand(4, 8, 16).to("meta"), "x2": torch.rand(4, 16, 8).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
linear_mod_node = list(graph.nodes)[2]
strategies_vector = StrategiesVector(linear_mod_node)
# build handler
handler = BMMFunctionHandler(node=linear_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["input"].name == "x1"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size([4, 8, 16])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([4, 8, 16])
assert mapping["other"].name == "x2"
assert mapping["other"].data.is_meta
assert mapping["other"].data.shape == torch.Size([4, 16, 8])
assert mapping["other"].type == OperationDataType.ARG
assert mapping["other"].logical_shape == torch.Size([4, 16, 8])
assert mapping["output"].name == "bmm"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size([4, 8, 8])
assert mapping["output"].type == OperationDataType.OUTPUT
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
assert len(strategy_name_list) == 1
# one batch dim
assert "Sb0 = Sb0 x Sb0" in strategy_name_list
for strategy in strategies_vector:
input_sharding_spec = strategy.get_sharding_spec_by_name("x1")
other_sharding_spec = strategy.get_sharding_spec_by_name("x2")
output_sharding_spec = strategy.get_sharding_spec_by_name("bmm")
# make sure the sharding matches across different operation data
assert input_sharding_spec.sharding_sequence[:-1] == output_sharding_spec.sharding_sequence[:-1]
assert other_sharding_spec.sharding_sequence[1] == input_sharding_spec.sharding_sequence[-1]
assert other_sharding_spec.sharding_sequence[-1] == output_sharding_spec.sharding_sequence[-1]
@run_on_environment_flag(name="AUTO_PARALLEL")
@parameterize("module", [BMMTensorMethodModule, BMMTorchFunctionModule])
@parameterize("module", [BMMTensorMethodModule, BMMTorchFunctionModule])
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_bmm_handler(module):
spawn(check_2d_device_mesh, 4, module=module)
spawn(check_1d_device_mesh, 4, module=module)
if __name__ == "__main__":
test_bmm_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_placeholder_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_placeholder_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler.placeholder_handler import PlaceholderHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.testing import clear_cache_before_run, parameterize
class PlaceholderModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input
@pytest.mark.skip("ShapeProp is not compatible with PyTorch 1.11.0")
@parameterize("placeholder_option", ["distributed", "replicated"])
@clear_cache_before_run()
def test_placeholder_handler(placeholder_option):
model = PlaceholderModel()
tracer = ColoTracer(bias_addition_split=True)
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# return input_1
meta_args = {
"input": torch.rand(4, 4, 64, 64).to("meta"),
}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
placeholder_node = list(graph.nodes)[0]
placeholder_strategies_vector = StrategiesVector(placeholder_node)
# build handler
placeholder_handler = PlaceholderHandler(
node=placeholder_node,
device_mesh=device_mesh,
strategies_vector=placeholder_strategies_vector,
placeholder_option=placeholder_option,
)
placeholder_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = placeholder_handler.get_operation_data_mapping()
strategy = placeholder_strategies_vector[0]
strategy_sharding_spec = strategy.get_sharding_spec_by_name(mapping["output"].name)
if placeholder_option == "distributed":
assert str(strategy_sharding_spec.sharding_sequence) == "[S01, R, R, R]"
else:
assert str(strategy_sharding_spec.sharding_sequence) == "[R, R, R, R]"
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
assert mapping["output"].name == "input_1"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size((4, 4, 64, 64))
assert mapping["output"].type == OperationDataType.OUTPUT
strategy_name_list = [val.name for val in placeholder_handler.strategies_vector]
if placeholder_option == "replicated":
assert "Replica Placeholder" in strategy_name_list
else:
assert "Distributed Placeholder" in strategy_name_list
if __name__ == "__main__":
test_placeholder_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_bias_linear_function_node.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_bias_linear_function_node.py | import pytest
import torch
import torch.nn.functional as F
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import (
OperationData,
OperationDataType,
ShardingStrategy,
StrategiesVector,
)
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, run_on_environment_flag, spawn
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
WEIGHT_SHAPE = (32, 16)
class LinearModule(torch.nn.Module):
def __init__(self, weight_shape):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(*weight_shape))
self.bias = torch.nn.Parameter(torch.rand(weight_shape[0]))
def forward(self, x):
x = F.linear(x, self.weight, bias=self.bias)
return x
def check_linear_module_handler(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = LinearModule(weight_shape=WEIGHT_SHAPE).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
input = torch.rand(4, 4, 4, 16).cuda()
# the index of linear node in computation graph
node_index = 3
# strategy number of linear node
strategy_number = 24
# construct input args
input_args = [input]
# construct meta arg names
meta_arg_names = ["x"]
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
node_type="bias_module",
)
tracer = ColoTracer(bias_addition_split=True)
# graph():
# %x : torch.Tensor [#users=1] = placeholder[target=x]
# %weight : [#users=1] = get_attr[target=weight]
# %bias : [#users=1] = get_attr[target=bias]
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%x, %weight), kwargs = {})
# %add : [#users=1] = call_function[target=operator.add](args = (%linear, %bias), kwargs = {})
# return add
meta_args = {"x": torch.rand(4, 4, 4, 16).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
linear_mod_node = list(graph.nodes)[3]
strategies_vector = StrategiesVector(linear_mod_node)
# build handler
handler = LinearFunctionHandler(node=linear_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["input"].name == "x"
assert mapping["input"].data.shape == torch.Size([4, 4, 4, 16])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([64, 16])
assert mapping["other"].name == "weight"
assert mapping["other"].data.shape == torch.Size([32, 16])
assert mapping["other"].type == OperationDataType.PARAM
assert mapping["other"].logical_shape == torch.Size([16, 32])
assert "bias" not in mapping
assert mapping["output"].name == "linear"
assert mapping["output"].data.shape == torch.Size([4, 4, 4, 32])
assert mapping["output"].type == OperationDataType.OUTPUT
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# SS = SR x RS
assert "S0S1 = S0R x RS1_0" in strategy_name_list
assert "S0S1 = S0R x RS1_1" in strategy_name_list
assert "S0S1 = S0R x RS1_2" in strategy_name_list
assert "S1S0 = S1R x RS0_0" in strategy_name_list
assert "S1S0 = S1R x RS0_1" in strategy_name_list
assert "S1S0 = S1R x RS0_2" in strategy_name_list
# SR = SS x SR
assert "S0R = S0S1 x S1R_0" in strategy_name_list
assert "S0R = S0S1 x S1R_1" in strategy_name_list
assert "S0R = S0S1 x S1R_2" in strategy_name_list
assert "S1R = S1S0 x S0R_0" in strategy_name_list
assert "S1R = S1S0 x S0R_1" in strategy_name_list
assert "S1R = S1S0 x S0R_2" in strategy_name_list
# RS = RS x SS
assert "RS0 = RS1 x S1S0" in strategy_name_list
assert "RS1 = RS0 x S0S1" in strategy_name_list
# RR = RS x SR
assert "RR = RS0 x S0R" in strategy_name_list
assert "RR = RS1 x S1R" in strategy_name_list
# RS= RR x RS
assert "RS0 = RR x RS0" in strategy_name_list
assert "RS1 = RR x RS1" in strategy_name_list
# S01R = S01R x RR
assert "S01R = S01R x RR_0" in strategy_name_list
assert "S01R = S01R x RR_1" in strategy_name_list
assert "S01R = S01R x RR_2" in strategy_name_list
# RR = RS01 x S01R
assert "RR = RS01 x S01R" in strategy_name_list
# RS01 = RR x RS01
assert "RS01 = RR x RS01" in strategy_name_list
# RR = RR x RR
assert "RR = RR x RR" in strategy_name_list
for strategy in strategies_vector:
strategy: ShardingStrategy
input_sharding_spec = strategy.get_sharding_spec_by_name("x")
weight_sharding_spec = strategy.get_sharding_spec_by_name("weight")
output_sharding_spec = strategy.get_sharding_spec_by_name("linear")
# make sure the sharding matches across different operation data
assert input_sharding_spec.sharding_sequence[:-1] == output_sharding_spec.sharding_sequence[:-1]
assert weight_sharding_spec.sharding_sequence[1] == input_sharding_spec.sharding_sequence[-1]
assert weight_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[-1]
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_linear_handler():
spawn(check_linear_module_handler)
if __name__ == "__main__":
test_linear_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_addbmm_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_addbmm_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai.auto_parallel.tensor_shard.node_handler import BMMFunctionHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.fx import ColoGraphModule, ColoTracer
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import parameterize, rerun_if_address_is_in_use, run_on_environment_flag, spawn
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class AddBMMTensorMethodModule(nn.Module):
def __init__(self, using_kwargs):
super().__init__()
self.using_kwargs = using_kwargs
def forward(self, bias, x1, x2):
if self.using_kwargs:
output = bias.addbmm(x1, x2, alpha=2, beta=3)
else:
output = bias.addbmm(x1, x2)
return output
class AddBMMTorchFunctionModule(nn.Module):
def __init__(self, using_kwargs):
super().__init__()
self.using_kwargs = using_kwargs
def forward(self, bias, x1, x2):
if self.using_kwargs:
output = torch.addbmm(bias, x1, x2, alpha=2, beta=3)
else:
output = torch.addbmm(bias, x1, x2)
return output
def check_2d_device_mesh(rank, world_size, port, module, bias_shape, using_kwargs):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = module(using_kwargs).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
x1 = torch.rand(4, 8, 16).cuda()
x2 = torch.rand(4, 16, 8).cuda()
bias = torch.rand(bias_shape).cuda()
# the index of addbmm node in computation graph
node_index = 3
# strategy number of addbmm node on 2d device mesh
strategy_number = 7
# construct input args
input_args = [bias, x1, x2]
# construct meta arg names
meta_arg_names = ["bias", "x1", "x2"]
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
)
tracer = ColoTracer()
# graph():
# %bias : torch.Tensor [#users=1] = placeholder[target=bias]
# %x1 : torch.Tensor [#users=1] = placeholder[target=x1]
# %x2 : torch.Tensor [#users=1] = placeholder[target=x2]
# %bmm : [#users=1] = call_function[target=torch.bmm](args = (%x1, %x2), kwargs = {})
# %sum_1 : [#users=1] = call_function[target=torch.sum](args = (%bmm, 0), kwargs = {})
# %add : [#users=1] = call_function[target=operator.add](args = (%sum_1, %bias), kwargs = {})
# return add
graph = tracer.trace(
model,
meta_args={
"bias": torch.rand(*bias_shape).to("meta"),
"x1": torch.rand(4, 8, 16).to("meta"),
"x2": torch.rand(4, 16, 8).to("meta"),
},
)
ColoGraphModule(model, graph)
bmm_mod_node = list(graph.nodes)[3]
strategies_vector = StrategiesVector(bmm_mod_node)
# build handler
handler = BMMFunctionHandler(node=bmm_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["input"].name == "x1"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size([4, 8, 16])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([4, 8, 16])
assert mapping["other"].name == "x2"
assert mapping["other"].data.is_meta
assert mapping["other"].data.shape == torch.Size([4, 16, 8])
assert mapping["other"].type == OperationDataType.ARG
assert mapping["other"].logical_shape == torch.Size([4, 16, 8])
assert mapping["output"].name == "bmm"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size([4, 8, 8])
assert mapping["output"].type == OperationDataType.OUTPUT
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
for name in strategy_name_list:
print(name)
# one batch dim
assert "Sb0 = Sb0 x Sb0" not in strategy_name_list
# two batch dim
assert "Sb01 = Sb01 x Sb01" in strategy_name_list
# SbSi = SbSi x Sb
assert "Sb0Si1 = Sb0Si1 x Sb0" in strategy_name_list
assert "Sb1Si0 = Sb1Si0 x Sb1" in strategy_name_list
# SbSj = SbR x SbSj
assert "Sb0Sj1 = Sb0R x Sb0Sj1" in strategy_name_list
assert "Sb1Sj0 = Sb1R x Sb1Sj0" in strategy_name_list
# SbR = SbSk x SbSk
assert "Sb0R = Sb0Sk1 x Sb0Sk1" in strategy_name_list
assert "Sb1R = Sb1Sk0 x Sb1Sk0" in strategy_name_list
for strategy in strategies_vector:
input_sharding_spec = strategy.get_sharding_spec_by_name("x1")
other_sharding_spec = strategy.get_sharding_spec_by_name("x2")
output_sharding_spec = strategy.get_sharding_spec_by_name("bmm")
# make sure the sharding matches across different operation data
assert input_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[0]
assert other_sharding_spec.sharding_sequence[1] == input_sharding_spec.sharding_sequence[-1]
assert other_sharding_spec.sharding_sequence[-1] == output_sharding_spec.sharding_sequence[-1]
def check_1d_device_mesh(rank, module, bias_shape, using_kwargs, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (1, 4)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
model = module(using_kwargs).cuda()
x1 = torch.rand(4, 8, 16).cuda()
x2 = torch.rand(4, 16, 8).cuda()
bias = torch.rand(bias_shape).cuda()
# the index of addbmm node in computation graph
node_index = 3
# strategy number of addbmm node on 2d device mesh
strategy_number = 1
# construct input args
input_args = [bias, x1, x2]
# construct meta arg names
meta_arg_names = ["bias", "x1", "x2"]
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
)
tracer = ColoTracer()
# graph():
# %bias : torch.Tensor [#users=1] = placeholder[target=bias]
# %x1 : torch.Tensor [#users=1] = placeholder[target=x1]
# %x2 : torch.Tensor [#users=1] = placeholder[target=x2]
# %bmm : [#users=1] = call_function[target=torch.bmm](args = (%x1, %x2), kwargs = {})
# %sum_1 : [#users=1] = call_function[target=torch.sum](args = (%bmm, 0), kwargs = {})
# %add : [#users=1] = call_function[target=operator.add](args = (%sum_1, %bias), kwargs = {})
# return add
graph = tracer.trace(
model,
meta_args={
"bias": torch.rand(*bias_shape).to("meta"),
"x1": torch.rand(4, 8, 16).to("meta"),
"x2": torch.rand(4, 16, 8).to("meta"),
},
)
ColoGraphModule(model, graph)
bmm_mod_node = list(graph.nodes)[3]
strategies_vector = StrategiesVector(bmm_mod_node)
# build handler
handler = BMMFunctionHandler(node=bmm_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["input"].name == "x1"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size([4, 8, 16])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([4, 8, 16])
assert mapping["other"].name == "x2"
assert mapping["other"].data.is_meta
assert mapping["other"].data.shape == torch.Size([4, 16, 8])
assert mapping["other"].type == OperationDataType.ARG
assert mapping["other"].logical_shape == torch.Size([4, 16, 8])
assert mapping["output"].name == "bmm"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size([4, 8, 8])
assert mapping["output"].type == OperationDataType.OUTPUT
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
assert len(strategy_name_list) == 1
# one batch dim
assert "Sb0 = Sb0 x Sb0" in strategy_name_list
for strategy in strategies_vector:
input_sharding_spec = strategy.get_sharding_spec_by_name("x1")
other_sharding_spec = strategy.get_sharding_spec_by_name("x2")
output_sharding_spec = strategy.get_sharding_spec_by_name("bmm")
# make sure the sharding matches across different operation data
assert input_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[0]
assert other_sharding_spec.sharding_sequence[1] == input_sharding_spec.sharding_sequence[-1]
assert other_sharding_spec.sharding_sequence[-1] == output_sharding_spec.sharding_sequence[-1]
@pytest.mark.skip("skip due to bias cases not ready")
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@parameterize("module", [AddBMMTorchFunctionModule, AddBMMTensorMethodModule])
@parameterize("bias_shape", [[8], [1, 8], [8, 8]])
@parameterize("using_kwargs", [True, False])
@rerun_if_address_is_in_use()
def test_2d_device_mesh(module, bias_shape, using_kwargs):
spawn(
check_2d_device_mesh,
4,
module=module,
bias_shape=bias_shape,
using_kwargs=using_kwargs,
)
@pytest.mark.skip("skip due to bias cases not ready")
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@parameterize("module", [AddBMMTorchFunctionModule, AddBMMTensorMethodModule])
@parameterize("bias_shape", [[8], [1, 8], [8, 8]])
@parameterize("using_kwargs", [True, False])
@rerun_if_address_is_in_use()
def test_1d_device_mesh(module, bias_shape, using_kwargs):
spawn(
check_1d_device_mesh,
4,
module=module,
bias_shape=bias_shape,
using_kwargs=using_kwargs,
)
if __name__ == "__main__":
test_1d_device_mesh()
test_2d_device_mesh()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_addmm_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_addmm_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import (
OperationDataType,
ShardingStrategy,
StrategiesVector,
)
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import parameterize, rerun_if_address_is_in_use, run_on_environment_flag, spawn
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class AddmmModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, m1, m2):
x = torch.addmm(input, m1, m2, beta=3, alpha=2)
return x
class AddmmModel_with_param(nn.Module):
def __init__(self, weight_shape, bias_shape):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(weight_shape))
self.bias = torch.nn.Parameter(torch.rand(bias_shape))
def forward(self, m1):
x = torch.addmm(self.bias, m1, self.weight, beta=3, alpha=2)
return x
def check_addmm_function_handler(rank, world_size, port, input_shape, model_cls):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
if model_cls == AddmmModel:
model = AddmmModel().cuda()
else:
model = AddmmModel_with_param(weight_shape=(8, 16), bias_shape=input_shape).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
if model_cls == AddmmModel:
input = torch.rand(input_shape).cuda()
m1 = torch.rand(4, 8).cuda()
m2 = torch.rand(8, 16).cuda()
# construct input args
input_args = [input, m1, m2]
# construct meta arg names
meta_arg_names = ["input", "m1", "m2"]
meta_args_for_tracer = {}
for meta_arg, input_arg in zip(meta_arg_names, input_args):
meta_args_for_tracer[meta_arg] = input_arg.to("meta")
# the index of addmm node in computation graph
node_index = 4
# strategy number of linear node
strategy_number = 14
else:
m1 = torch.rand(4, 8).cuda()
# construct input args
input_args = [m1]
# construct meta arg names
meta_arg_names = ["m1"]
# the index of addmm node in computation graph
meta_args_for_tracer = {}
for meta_arg, input_arg in zip(meta_arg_names, input_args):
meta_args_for_tracer[meta_arg] = input_arg.to("meta")
node_index = 4
# strategy number of linear node
strategy_number = 14
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
node_type="bias_module",
)
tracer = ColoTracer(bias_addition_split=True)
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %m1 : torch.Tensor [#users=1] = placeholder[target=m1]
# %m2 : torch.Tensor [#users=1] = placeholder[target=m2]
# %transpose : [#users=1] = call_function[target=torch.transpose](args = (%m2, 0, 1), kwargs = {})
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%m1, %transpose), kwargs = {})
# %mul : [#users=1] = call_function[target=operator.mul](args = (%input_1, 3), kwargs = {})
# %mul_1 : [#users=1] = call_function[target=operator.mul](args = (2, %linear), kwargs = {})
# %add : [#users=1] = call_function[target=operator.add](args = (%mul_1, %mul), kwargs = {})
# return add
graph = tracer.trace(model, meta_args=meta_args_for_tracer)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args_for_tracer.values())
# [input_1, m1, m2, addmm, output]
node_list = list(graph.nodes)
linear_node = node_list[4]
strategies_vector = StrategiesVector(linear_node)
# build handler
handler = LinearFunctionHandler(node=linear_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# check operation data mapping
mapping = handler.get_operation_data_mapping()
assert mapping["input"].name == "m1"
assert mapping["input"].data.shape == torch.Size([4, 8])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([4, 8])
assert mapping["other"].name == "transpose"
assert mapping["other"].data.shape == torch.Size([16, 8])
if model_cls == AddmmModel:
assert mapping["other"].type == OperationDataType.ARG
else:
assert mapping["other"].type == OperationDataType.PARAM
assert mapping["other"].logical_shape == torch.Size([8, 16])
assert mapping["output"].name == "linear"
assert mapping["output"].data.shape == torch.Size([4, 16])
assert mapping["output"].type == OperationDataType.OUTPUT
# SS = SR x RS
assert "S0S1 = S0R x RS1_0" in strategy_name_list
assert "S1S0 = S1R x RS0_0" in strategy_name_list
# SR = SS x SR
assert "S0R = S0S1 x S1R_0" in strategy_name_list
assert "S1R = S1S0 x S0R_0" in strategy_name_list
# RS = RS x SS
assert "RS0 = RS1 x S1S0" in strategy_name_list
assert "RS1 = RS0 x S0S1" in strategy_name_list
# RR = RS x SR
assert "RR = RS0 x S0R" in strategy_name_list
assert "RR = RS1 x S1R" in strategy_name_list
# RS= RR x RS
assert "RS0 = RR x RS0" in strategy_name_list
assert "RS1 = RR x RS1" in strategy_name_list
# S01R = S01R x RR
assert "S01R = S01R x RR_0" in strategy_name_list
# RR = RS01 x S01R
assert "RR = RS01 x S01R" in strategy_name_list
# RS01 = RR x RS01
assert "RS01 = RR x RS01" in strategy_name_list
# RR = RR x RR
assert "RR = RR x RR" in strategy_name_list
for strategy in strategies_vector:
strategy: ShardingStrategy
input_sharding_spec = strategy.get_sharding_spec_by_name("m1")
weight_sharding_spec = strategy.get_sharding_spec_by_name("transpose")
output_sharding_spec = strategy.get_sharding_spec_by_name("linear")
# make sure the sharding matches across different operation data
assert input_sharding_spec.sharding_sequence[:-1] == output_sharding_spec.sharding_sequence[:-1]
assert weight_sharding_spec.sharding_sequence[1] == input_sharding_spec.sharding_sequence[1]
assert weight_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[1]
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@parameterize("input_shape", [(16,), (4, 16)])
@parameterize("model_cls", [AddmmModel, AddmmModel_with_param])
@rerun_if_address_is_in_use()
def test_addmm_handler(input_shape, model_cls):
spawn(check_addmm_function_handler, 4, input_shape=input_shape, model_cls=model_cls)
if __name__ == "__main__":
test_addmm_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_getitem_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_getitem_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler.default_reshape_handler import DefaultReshapeHandler
from colossalai.auto_parallel.tensor_shard.node_handler.getitem_handler import GetItemHandler
from colossalai.auto_parallel.tensor_shard.node_handler.linear_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.node_handler.placeholder_handler import PlaceholderHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class GetItemFromTensorModel(nn.Module):
def __init__(self, getitem_index):
super().__init__()
self.getitem_index = getitem_index
def forward(self, input, other):
linear_node = nn.functional.linear(input, other, bias=None)
x = linear_node[self.getitem_index]
return x
def check_getitem_from_tensor_handler(rank, getitem_index, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = GetItemFromTensorModel(getitem_index=getitem_index)
input = torch.rand(8, 16, 64, 32).to("cuda")
other = torch.rand(64, 32).to("cuda")
# index of linear node in computation graph
node_index = 2
# total number of linear strategies
strategy_number = 23
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input, other],
meta_arg_names=["input", "other"],
node_type="following",
)
tracer = ColoTracer(bias_addition_split=True)
meta_args = {
"input": torch.rand(8, 16, 64, 32).to("meta"),
"other": torch.rand(64, 32).to("meta"),
}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *list(meta_args.values()))
linear_mod_node = list(graph.nodes)[2]
getitem_mod_node = list(graph.nodes)[3]
getitem_strategies_vector = StrategiesVector(getitem_mod_node)
linear_strategies_vector = StrategiesVector(linear_mod_node)
# build handler
linear_handler = LinearFunctionHandler(
node=linear_mod_node, device_mesh=device_mesh, strategies_vector=linear_strategies_vector
)
linear_handler.register_strategy(compute_resharding_cost=False)
setattr(linear_mod_node, "strategies_vector", linear_strategies_vector)
getitem_handler = GetItemHandler(
node=getitem_mod_node, device_mesh=device_mesh, strategies_vector=getitem_strategies_vector
)
getitem_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = getitem_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
# getitem is a following strategy handler, so the number of strategies is equal to the predecessor node.
assert len(getitem_strategies_vector) == len(linear_strategies_vector)
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
# @parameterize('getitem_index', [slice(0, 2), (slice(None), slice(None))])
@parameterize("getitem_index", [1, (1, 4), slice(0, 2), (slice(None), slice(None))])
def test_getitem_from_tensor_handler(getitem_index):
spawn(check_getitem_from_tensor_handler, 4)
class GetItemFromTupleModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
split_node = torch.split(input, 2, 0)
x = split_node[1]
return x
@run_on_environment_flag(name="AUTO_PARALLEL")
@clear_cache_before_run()
def test_getitem_from_tuple_handler():
model = GetItemFromTupleModel()
tracer = ColoTracer()
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %split : [#users=1] = call_function[target=torch.functional.split](args = (%conv2d, 2), kwargs = {dim: 0})
# %getitem : [#users=1] = call_function[target=operator.getitem](args = (%split, 1), kwargs = {})
# return getitem
meta_args = {
"input": torch.rand(4, 4, 64, 64).to("meta"),
}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
input_node = list(graph.nodes)[0]
split_node = list(graph.nodes)[1]
getitem_node = list(graph.nodes)[2]
input_strategies_vector = StrategiesVector(input_node)
getitem_strategies_vector = StrategiesVector(getitem_node)
split_strategies_vector = StrategiesVector(split_node)
# build handler
input_handler = PlaceholderHandler(
node=input_node,
device_mesh=device_mesh,
strategies_vector=input_strategies_vector,
placeholder_option="replicated",
)
input_handler.register_strategy(compute_resharding_cost=False)
setattr(input_node, "strategies_vector", input_strategies_vector)
split_handler = DefaultReshapeHandler(
node=split_node, device_mesh=device_mesh, strategies_vector=split_strategies_vector
)
split_handler.register_strategy(compute_resharding_cost=False)
setattr(split_node, "strategies_vector", split_strategies_vector)
getitem_handler = GetItemHandler(
node=getitem_node, device_mesh=device_mesh, strategies_vector=getitem_strategies_vector
)
getitem_handler.register_strategy(compute_resharding_cost=False)
setattr(getitem_node, "strategies_vector", getitem_strategies_vector)
# check operation data mapping
mapping = getitem_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
assert mapping["input"].name == "split"
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == (torch.Size([2, 4, 64, 64]), torch.Size([2, 4, 64, 64]))
assert mapping["index"].name == "index"
assert isinstance(mapping["index"].data, int)
assert mapping["index"].type == OperationDataType.ARG
assert mapping["output"].name == "getitem"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size([2, 4, 64, 64])
assert mapping["output"].type == OperationDataType.OUTPUT
# getitem is a following strategy handler, so the number of strategies is equal to the predecessor node.
assert len(getitem_strategies_vector) == len(split_strategies_vector)
if __name__ == "__main__":
test_getitem_from_tensor_handler()
test_getitem_from_tuple_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_split_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_split_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler import SplitHandler
from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler
from colossalai.auto_parallel.tensor_shard.node_handler.linear_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import parameterize, rerun_if_address_is_in_use, run_on_environment_flag, spawn
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class ConvSplitModel(nn.Module):
def __init__(self, split_size, split_dim):
super().__init__()
self.split_size = split_size
self.split_dim = split_dim
def forward(self, input, other):
conv_node = nn.functional.conv2d(input, other, bias=None)
split_node = conv_node.split(self.split_size, dim=self.split_dim)
return split_node
class LinearSplitModel(nn.Module):
def __init__(self, split_size, split_dim):
super().__init__()
self.split_size = split_size
self.split_dim = split_dim
def forward(self, input, other):
linear_node = nn.functional.linear(input, other, bias=None)
split_node = linear_node.split(self.split_size, dim=self.split_dim)
return split_node
def check_split_handler(rank, world_size, port, split_size, split_dim, model_cls):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = model_cls(split_size=split_size, split_dim=split_dim).cuda()
if model_cls.__name__ == "ConvSplitModel":
input = torch.rand(8, 8, 66, 66).to("cuda")
other = torch.rand(16, 8, 3, 3).to("cuda")
# index of conv node in computation graph
node_index = 2
# total number of conv strategies
strategy_number = 16
if model_cls.__name__ == "LinearSplitModel":
input = torch.rand(8, 16, 64, 32).to("cuda")
other = torch.rand(64, 32).to("cuda")
# index of linear node in computation graph
node_index = 2
# total number of linear strategies
strategy_number = 23
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input, other],
meta_arg_names=["input", "other"],
node_type="following",
)
tracer = ColoTracer(bias_addition_split=True)
if model_cls.__name__ == "ConvSplitModel":
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %conv2d : [#users=1] = call_function[target=torch.conv2d](args = (%input_1, %other), kwargs = {})
# %split : [#users=1] = call_method[target=split](args = (%conv2d,), kwargs = {})
# return split
meta_args = {
"input": torch.rand(8, 8, 66, 66).to("meta"),
"other": torch.rand(16, 8, 3, 3).to("meta"),
}
graph = tracer.trace(model, meta_args=meta_args)
if model_cls.__name__ == "LinearSplitModel":
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%input_1, %other), kwargs = {bias: None})
# %split : [#users=1] = call_method[target=split](args = (%linear,), kwargs = {})
# return split
meta_args = {
"input": torch.rand(8, 16, 64, 32).to("meta"),
"other": torch.rand(64, 32).to("meta"),
}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
previous_mod_node = list(graph.nodes)[2]
split_node = list(graph.nodes)[3]
split_strategies_vector = StrategiesVector(split_node)
previous_strategies_vector = StrategiesVector(previous_mod_node)
# build handler
if model_cls.__name__ == "ConvSplitModel":
conv_handler = ConvFunctionHandler(
node=previous_mod_node, device_mesh=device_mesh, strategies_vector=previous_strategies_vector
)
conv_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, "strategies_vector", previous_strategies_vector)
if model_cls.__name__ == "LinearSplitModel":
assert len(previous_strategies_vector) == 0
linear_handler = LinearFunctionHandler(
node=previous_mod_node, device_mesh=device_mesh, strategies_vector=previous_strategies_vector
)
linear_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, "strategies_vector", previous_strategies_vector)
split_handler = SplitHandler(node=split_node, device_mesh=device_mesh, strategies_vector=split_strategies_vector)
split_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = split_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
if model_cls.__name__ == "ConvSplitModel":
assert mapping["input"].name == "conv2d"
else:
assert mapping["input"].name == "linear"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size([8, 16, 64, 64])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([8, 16, 64, 64])
assert mapping["output"].name == "split"
split_items = torch.empty([8, 16, 64, 64]).split(split_size, split_dim)
assert mapping["output"].logical_shape == tuple([item.shape for item in split_items])
assert mapping["output"].type == OperationDataType.OUTPUT
# reshape handler is a following strategy handler, so the number of strategies is equal to the predecessor node.
assert len(split_strategies_vector) == len(previous_strategies_vector)
strategy_name_list = [strategy.name for strategy in split_strategies_vector]
if model_cls.__name__ == "ConvSplitModel":
if split_dim == 0:
assert "[R, S1, R, R]_0" in strategy_name_list
assert "[R, S0, R, R]_1" in strategy_name_list
assert "[R, R, R, R]_2" in strategy_name_list
assert "[R, R, R, R]_3" in strategy_name_list
assert "[R, R, R, R]_4" in strategy_name_list
assert "[R, R, R, R]_5" in strategy_name_list
assert "[R, S1, R, R]_6" in strategy_name_list
assert "[R, S0, R, R]_7" in strategy_name_list
assert "[R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R]_9" in strategy_name_list
assert "[R, S0, R, R]_10" in strategy_name_list
assert "[R, S1, R, R]_11" in strategy_name_list
assert "[R, R, R, R]_12" in strategy_name_list
assert "[R, R, R, R]_13" in strategy_name_list
assert "[R, R, R, R]_14" in strategy_name_list
assert "[R, S01, R, R]_15" in strategy_name_list
if split_dim == 1:
assert "[S0, R, R, R]_0" in strategy_name_list
assert "[S1, R, R, R]_1" in strategy_name_list
assert "[S0, R, R, R]_2" in strategy_name_list
assert "[S1, R, R, R]_3" in strategy_name_list
assert "[S0, R, R, R]_4" in strategy_name_list
assert "[S1, R, R, R]_5" in strategy_name_list
assert "[R, R, R, R]_6" in strategy_name_list
assert "[R, R, R, R]_7" in strategy_name_list
assert "[R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R]_9" in strategy_name_list
assert "[R, R, R, R]_10" in strategy_name_list
assert "[R, R, R, R]_11" in strategy_name_list
assert "[R, R, R, R]_12" in strategy_name_list
assert "[S01, R, R, R]_13" in strategy_name_list
assert "[R, R, R, R]_14" in strategy_name_list
assert "[R, R, R, R]_15" in strategy_name_list
if model_cls.__name__ == "LinearSplitModel":
if split_dim == 0:
assert "[R, R, R, S1]_11" in strategy_name_list
assert "[R, S0, R, S1]_12" in strategy_name_list
assert "[R, R, S0, S1]_13" in strategy_name_list
assert "[R, R, R, S0]_14" in strategy_name_list
assert "[R, S1, R, S0]_15" in strategy_name_list
assert "[R, R, S1, S0]_16" in strategy_name_list
assert "[R, R, R, R]_17" in strategy_name_list
assert "[R, S0, R, R]_18" in strategy_name_list
assert "[R, R, S0, R]_19" in strategy_name_list
assert "[R, R, R, R]_20" in strategy_name_list
assert "[R, S1, R, R]_21" in strategy_name_list
assert "[R, R, S1, R]_22" in strategy_name_list
assert "[R, R, R, S1]_10" in strategy_name_list
assert "[R, R, R, S0]_9" in strategy_name_list
assert "[R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R]_7" in strategy_name_list
assert "[R, R, R, S0]_6" in strategy_name_list
assert "[R, R, R, S1]_5" in strategy_name_list
assert "[R, R, R, R]_0" in strategy_name_list
assert "[R, S01, R, R]_1" in strategy_name_list
assert "[R, R, S01, R]_2" in strategy_name_list
assert "[R, R, R, R]_3" in strategy_name_list
assert "[R, R, R, S01]_4" in strategy_name_list
if split_dim == 1:
assert "[S0, R, R, S1]_11" in strategy_name_list
assert "[R, R, R, S1]_12" in strategy_name_list
assert "[R, R, S0, S1]_13" in strategy_name_list
assert "[S1, R, R, S0]_14" in strategy_name_list
assert "[R, R, R, S0]_15" in strategy_name_list
assert "[R, R, S1, S0]_16" in strategy_name_list
assert "[S0, R, R, R]_17" in strategy_name_list
assert "[R, R, R, R]_18" in strategy_name_list
assert "[R, R, S0, R]_19" in strategy_name_list
assert "[S1, R, R, R]_20" in strategy_name_list
assert "[R, R, R, R]_21" in strategy_name_list
assert "[R, R, S1, R]_22" in strategy_name_list
assert "[R, R, R, S1]_10" in strategy_name_list
assert "[R, R, R, S0]_9" in strategy_name_list
assert "[R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R]_7" in strategy_name_list
assert "[R, R, R, S0]_6" in strategy_name_list
assert "[R, R, R, S1]_5" in strategy_name_list
assert "[S01, R, R, R]_0" in strategy_name_list
assert "[R, R, R, R]_1" in strategy_name_list
assert "[R, R, S01, R]_2" in strategy_name_list
assert "[R, R, R, R]_3" in strategy_name_list
assert "[R, R, R, S01]_4" in strategy_name_list
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
@parameterize("split_size", [2])
@parameterize("split_dim", [0, 1, 2])
@parameterize("model_cls", [ConvSplitModel, LinearSplitModel])
def test_split_handler(split_size, split_dim, model_cls):
spawn(check_split_handler, 4, split_size=split_size, split_dim=split_dim, model_cls=model_cls)
if __name__ == "__main__":
test_split_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_where_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_where_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler.where_handler import WhereHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.testing import clear_cache_before_run
class ConvModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, condition, x, y):
output = torch.where(condition, x, y)
return output
@pytest.mark.skip("ShapeProp is not compatible with PyTorch 1.11.0")
@clear_cache_before_run()
def test_where_handler():
model = ConvModel()
tracer = ColoTracer(bias_addition_split=True)
# graph():
# %condition : torch.Tensor [#users=1] = placeholder[target=condition]
# %x : torch.Tensor [#users=1] = placeholder[target=x]
# %y : torch.Tensor [#users=1] = placeholder[target=y]
# %where : [#users=1] = call_function[target=torch.where](args = (%condition, %x, %y), kwargs = {})
# return where
meta_args = {
"condition": torch.rand(4, 4, 64, 64).to("meta"),
"x": torch.rand(4, 1, 64, 64).to("meta"),
"y": torch.rand(1, 4, 64, 64).to("meta"),
}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
where_node = list(graph.nodes)[3]
strategies_vector = StrategiesVector(where_node)
# build handler
handler = WhereHandler(node=where_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping, _ = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["condition"].name == "condition"
assert mapping["condition"].data.is_meta
assert mapping["condition"].data.shape == torch.Size([4, 4, 64, 64])
assert mapping["condition"].type == OperationDataType.ARG
assert mapping["condition"].logical_shape == torch.Size([4, 4, 64, 64])
assert mapping["x"].name == "x"
assert mapping["x"].data.is_meta
assert mapping["x"].data.shape == torch.Size([4, 1, 64, 64])
assert mapping["x"].type == OperationDataType.ARG
assert mapping["x"].logical_shape == torch.Size([4, 4, 64, 64])
assert mapping["y"].name == "y"
assert mapping["y"].data.is_meta
assert mapping["y"].data.shape == torch.Size([1, 4, 64, 64])
assert mapping["y"].type == OperationDataType.ARG
assert mapping["y"].logical_shape == torch.Size([4, 4, 64, 64])
assert mapping["output"].name == "where"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size([4, 4, 64, 64])
assert mapping["output"].type == OperationDataType.OUTPUT
handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# 4*3 + 4*3/2*2 + 1
assert len(strategy_name_list) == 25
if __name__ == "__main__":
test_where_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/utils.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/utils.py | import copy
from typing import Dict, List
import torch
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.passes.runtime_apply_pass import runtime_apply_pass
from colossalai.auto_parallel.passes.runtime_preparation_pass import runtime_preparation_pass
from colossalai.auto_parallel.tensor_shard.options import SolverOptions
from colossalai.auto_parallel.tensor_shard.solver import StrategiesConstructor
from colossalai.auto_parallel.tensor_shard.solver.cost_graph import CostGraph
from colossalai.auto_parallel.tensor_shard.solver.solver import Solver
from colossalai.device.device_mesh import DeviceMesh
from colossalai.tensor.shape_consistency import to_global
from colossalai.testing.comparison import assert_close
def _build_model_to_compare(
model: torch.nn.Module,
input_args: List[torch.Tensor],
input_kwargs: Dict[str, torch.Tensor],
grad_dict: Dict[any, torch.Tensor],
):
model_to_compare = copy.deepcopy(model)
args_to_compare = []
kwargs_to_compare = {}
for arg_index, input_tensor in enumerate(input_args):
def wrapper(param, index):
def hook_fn(grad):
grad_dict[index] = grad
param.register_hook(hook_fn)
arg_to_compare = copy.deepcopy(input_tensor)
# only Tensors of floating point and complex dtype can require gradients
if arg_to_compare.dtype != torch.int64:
arg_to_compare.requires_grad = True
wrapper(arg_to_compare, arg_index)
args_to_compare.append(arg_to_compare)
for name, input_kwarg in input_kwargs.items():
def wrapper(param, name):
def hook_fn(grad):
grad_dict[name] = grad
param.register_hook(hook_fn)
kwarg_to_compare = copy.deepcopy(input_kwarg)
# only Tensors of floating point and complex dtype can require gradients
if kwarg_to_compare.dtype != torch.int64:
kwarg_to_compare.requires_grad = True
wrapper(kwarg_to_compare, name)
kwargs_to_compare[name] = kwarg_to_compare
return model_to_compare, args_to_compare, kwargs_to_compare
def numerical_test_for_node_strategy(
model: torch.nn.Module,
device_mesh: DeviceMesh,
node_index: int,
strategy_number: int,
input_args: List[torch.Tensor],
meta_arg_names: List[str],
input_kwargs: Dict[str, torch.Tensor] = {},
node_type: str = "normal",
):
for strategy_index in range(strategy_number):
print(f"#strategy_index: {strategy_index}")
# We need to copy the model to avoid do backward more than once in same graph
grad_to_compare_dict = {}
grad_to_shard_dict = {}
model_to_compare, args_to_compare, kwargs_to_compare = _build_model_to_compare(
model, input_args, input_kwargs, grad_to_compare_dict
)
model_to_shard, args_to_shard, kwargs_to_shard = _build_model_to_compare(
model, input_args, input_kwargs, grad_to_shard_dict
)
tracer = ColoTracer(bias_addition_split=True)
input_sample = {}
for input_arg, meta_arg_name in zip(input_args, meta_arg_names):
input_sample[meta_arg_name] = torch.empty(input_arg.shape, dtype=input_arg.dtype).to("meta")
for meta_kwarg_name, input_kwarg in input_kwargs.items():
input_sample[meta_kwarg_name] = torch.empty(input_kwarg.shape, dtype=input_kwarg.dtype).to("meta")
graph = tracer.trace(root=model_to_shard, meta_args=input_sample)
gm = ColoGraphModule(model_to_shard, graph, model_to_shard.__class__.__name__)
shape_prop_pass(gm, *input_sample.values())
solver_options = SolverOptions()
strategies_constructor = StrategiesConstructor(graph, device_mesh, solver_options)
strategies_constructor.build_strategies_and_cost()
target_node = [strategies_vector.node for strategies_vector in strategies_constructor.leaf_strategies][
node_index
]
if node_type == "normal":
solution_len = len(strategies_constructor.leaf_strategies)
solution = [0] * solution_len
solution[node_index] = strategy_index
elif node_type == "following":
solution_len = len(strategies_constructor.leaf_strategies)
solution = [0] * solution_len
solution[node_index] = strategy_index
solution[node_index + 1] = strategy_index
else:
node_vector = strategies_constructor.leaf_strategies[node_index]
strategy_to_keep = node_vector[strategy_index]
node_vector = [strategy_to_keep]
# solution construction
cost_graph = CostGraph(strategies_constructor.leaf_strategies)
cost_graph.simplify_graph()
solver = Solver(gm.graph, strategies_constructor, cost_graph, verbose=False)
ret = solver.call_solver_serialized_args()
solution = list(ret[0])
gm, sharding_spec_dict, origin_spec_dict, comm_actions_dict = runtime_preparation_pass(
gm, solution, device_mesh, strategies_constructor
)
gm = runtime_apply_pass(gm)
gm.recompile()
# forward result compare
output = gm(
*args_to_shard,
sharding_spec_convert_dict=sharding_spec_dict,
origin_node_sharding_spec_dict=origin_spec_dict,
comm_actions_dict=comm_actions_dict,
**kwargs_to_shard,
)
output_to_compare = model_to_compare(*args_to_compare, **kwargs_to_compare)
assert_close_helper(output, output_to_compare, strategy_index=strategy_index, type="forward output")
# backward result compare
if isinstance(output, (tuple, list)):
loss = output[0].sum()
loss_to_compare = output_to_compare[0].sum()
else:
loss = output.sum()
loss_to_compare = output_to_compare.sum()
loss_to_compare.backward()
loss.backward()
for key in grad_to_shard_dict.keys():
grad_to_shard = grad_to_shard_dict[key]
grad_to_compare = grad_to_compare_dict[key]
assert_close_helper(grad_to_shard, grad_to_compare, strategy_index=strategy_index, type="input grad")
# extract the strategy used in this iter
strategy_in_use = target_node.strategies_vector[strategy_index]
param_to_shard_dict = dict(gm.named_parameters())
param_to_compare_dict = dict(model_to_compare.named_parameters())
for name in param_to_shard_dict.keys():
param_name = name.split(".")[-1]
if node_type == "normal":
param_sharding_spec = strategy_in_use.get_sharding_spec_by_name(param_name)
else:
if "weight" in name:
param_sharding_spec = None
for node in list(graph.nodes):
if "weight" in node.name:
param_sharding_spec = node.sharding_spec
elif "bias" in name:
param_sharding_spec = None
for node in list(graph.nodes):
if "bias" in node.name:
param_sharding_spec = node.sharding_spec
assert param_sharding_spec is not None
grad_sharded = param_to_shard_dict[name].grad
grad_to_compare = param_to_compare_dict[name].grad
global_grad = to_global(grad_sharded, param_sharding_spec)
assert_close_helper(global_grad, grad_to_compare, strategy_index=strategy_index, type="param grad")
def assert_close_helper(
first: torch.Tensor,
second: torch.Tensor,
rtol: float = 1e-2,
atol: float = 1e-2,
strategy_index: int = -1,
type: str = "not defined",
):
"""
This method is used to check whether the average difference between two tensors is as close as expected.
"""
try:
if isinstance(first, (tuple, list)):
for first_element, second_element in zip(first, second):
assert_close(first_element, second_element, rtol=rtol, atol=atol)
else:
assert_close(first, second, rtol=rtol, atol=atol)
except:
print(f"strategy index {strategy_index} encounter assert_close error on {type}")
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_norm_pooling_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_norm_pooling_handler.py | import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler.normal_pooling_handler import NormPoolingHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.testing import clear_cache_before_run, run_on_environment_flag
@run_on_environment_flag(name="AUTO_PARALLEL")
@clear_cache_before_run()
def test_norm_pool_handler():
model = nn.Sequential(nn.MaxPool2d(4, padding=1).to("meta"))
tracer = ColoTracer(bias_addition_split=True)
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %_0 : [#users=1] = call_module[target=0](args = (%input_1,), kwargs = {})
# return _0
meta_args = {"input": torch.rand(4, 4, 64, 64).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
conv_mod_node = list(graph.nodes)[1]
strategies_vector = StrategiesVector(conv_mod_node)
# build handler
handler = NormPoolingHandler(node=conv_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
assert mapping["input"].name == "input_1"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size([4, 4, 64, 64])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([4, 4, 64, 64])
assert mapping["output"].name == "_0"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size([4, 4, 16, 16])
assert mapping["output"].type == OperationDataType.OUTPUT
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
assert len(strategy_name_list) == 9
if __name__ == "__main__":
test_norm_pool_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_permute_and_transpose_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_permute_and_transpose_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler import PermuteHandler, TransposeHandler
from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler
from colossalai.auto_parallel.tensor_shard.node_handler.linear_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class ConvReshapeModel(nn.Module):
def __init__(self, reshape_dims, call_function):
super().__init__()
self.reshape_dims = reshape_dims
self.call_function = call_function
def forward(self, input, other):
conv_node = nn.functional.conv2d(input, other, bias=None)
# permute_node = torch.permute(conv_node, self.permute_dims)
if self.call_function == torch.permute:
permute_node = self.call_function(conv_node, self.reshape_dims)
else:
permute_node = self.call_function(conv_node, *self.reshape_dims)
return permute_node
class LinearReshapeModel(nn.Module):
def __init__(self, reshape_dims, call_function):
super().__init__()
self.reshape_dims = reshape_dims
self.call_function = call_function
def forward(self, input, other):
linear_node = nn.functional.linear(input, other, bias=None)
# permute_node = torch.permute(linear_node, self.tgt_shape)
if self.call_function == torch.permute:
permute_node = self.call_function(linear_node, self.reshape_dims)
else:
permute_node = self.call_function(linear_node, *self.reshape_dims)
return permute_node
def check_view_handler(rank, world_size, port, call_function, reshape_dims, model_cls):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
if call_function == torch.permute:
reshape_dims = reshape_dims[0]
elif call_function == torch.transpose:
reshape_dims = reshape_dims[1]
model = model_cls(reshape_dims, call_function).cuda()
if model_cls.__name__ == "ConvReshapeModel":
input = torch.rand(8, 8, 66, 66).to("cuda")
other = torch.rand(16, 8, 3, 3).to("cuda")
# index of conv node in computation graph
node_index = 2
# total number of conv strategies
strategy_number = 16
if model_cls.__name__ == "LinearReshapeModel":
input = torch.rand(8, 16, 64, 32).to("cuda")
other = torch.rand(64, 32).to("cuda")
# index of linear node in computation graph
node_index = 2
# total number of linear strategies
strategy_number = 23
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input, other],
meta_arg_names=["input", "other"],
node_type="following",
)
tracer = ColoTracer(bias_addition_split=True)
if model_cls.__name__ == "ConvReshapeModel":
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %conv2d : [#users=1] = call_function[target=torch.conv2d](args = (%input_1, %other), kwargs = {bias: None})
# %permute : [#users=1] = call_function[target=torch.permute](args = (%conv2d, (0, 2, 1, 3)), kwargs = {})
# return permute
meta_args = {
"input": torch.rand(8, 8, 66, 66).to("meta"),
"other": torch.rand(16, 8, 3, 3).to("meta"),
}
graph = tracer.trace(model, meta_args=meta_args)
if model_cls.__name__ == "LinearReshapeModel":
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%input_1, %other), kwargs = {bias: None})
# %permute : [#users=1] = call_method[target=view](args = (%linear, 32, 4, 32, 32, 4), kwargs = {})
# return permute
meta_args = {
"input": torch.rand(8, 16, 64, 32).to("meta"),
"other": torch.rand(64, 32).to("meta"),
}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
previous_mod_node = list(graph.nodes)[2]
reshape_node = list(graph.nodes)[3]
view_strategies_vector = StrategiesVector(reshape_node)
previous_strategies_vector = StrategiesVector(previous_mod_node)
# build handler
if model_cls.__name__ == "ConvReshapeModel":
conv_handler = ConvFunctionHandler(
node=previous_mod_node, device_mesh=device_mesh, strategies_vector=previous_strategies_vector
)
conv_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, "strategies_vector", previous_strategies_vector)
if model_cls.__name__ == "LinearReshapeModel":
assert len(previous_strategies_vector) == 0
linear_handler = LinearFunctionHandler(
node=previous_mod_node, device_mesh=device_mesh, strategies_vector=previous_strategies_vector
)
linear_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, "strategies_vector", previous_strategies_vector)
if call_function == torch.permute:
reshape_handler = PermuteHandler(
node=reshape_node, device_mesh=device_mesh, strategies_vector=view_strategies_vector
)
else:
reshape_handler = TransposeHandler(
node=reshape_node, device_mesh=device_mesh, strategies_vector=view_strategies_vector
)
reshape_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = reshape_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
if model_cls.__name__ == "ConvReshapeModel":
assert mapping["input"].name == "conv2d"
else:
assert mapping["input"].name == "linear"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size([8, 16, 64, 64])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([8, 16, 64, 64])
if call_function == torch.permute:
assert mapping["output"].name == "permute"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.permute(torch.rand(8, 16, 64, 64), reshape_dims).shape
assert mapping["output"].type == OperationDataType.OUTPUT
else:
assert mapping["output"].name == "transpose"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.transpose(torch.rand(8, 16, 64, 64), *reshape_dims).shape
assert mapping["output"].type == OperationDataType.OUTPUT
# reshape handler is a following strategy handler, so the number of strategies is equal to the predecessor node.
assert len(view_strategies_vector) == len(previous_strategies_vector)
strategy_name_list = [strategy.name for strategy in view_strategies_vector]
if rank == 0:
for name in strategy_name_list:
print(name)
if model_cls.__name__ == "ConvReshapeModel":
if reshape_dims in ((0, 2, 1, 3), (1, 2)):
assert "[S0, S1, R, R] -> [S0, R, S1, R]_0" in strategy_name_list
assert "[S1, S0, R, R] -> [S1, R, S0, R]_1" in strategy_name_list
assert "[S0, R, R, R] -> [S0, R, R, R]_2" in strategy_name_list
assert "[S1, R, R, R] -> [S1, R, R, R]_3" in strategy_name_list
assert "[S0, R, R, R] -> [S0, R, R, R]_4" in strategy_name_list
assert "[S1, R, R, R] -> [S1, R, R, R]_5" in strategy_name_list
assert "[R, S1, R, R] -> [R, R, S1, R]_6" in strategy_name_list
assert "[R, S0, R, R] -> [R, R, S0, R]_7" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_9" in strategy_name_list
assert "[R, S0, R, R] -> [R, R, S0, R]_10" in strategy_name_list
assert "[R, S1, R, R] -> [R, R, S1, R]_11" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_12" in strategy_name_list
assert "[S01, R, R, R] -> [S01, R, R, R]_13" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_14" in strategy_name_list
assert "[R, S01, R, R] -> [R, R, S01, R]_15" in strategy_name_list
if reshape_dims == (2, 0, 1, 3):
assert "[S0, S1, R, R] -> [R, S0, S1, R]_0" in strategy_name_list
assert "[S1, S0, R, R] -> [R, S1, S0, R]_1" in strategy_name_list
assert "[S0, R, R, R] -> [R, S0, R, R]_2" in strategy_name_list
assert "[S1, R, R, R] -> [R, S1, R, R]_3" in strategy_name_list
assert "[S0, R, R, R] -> [R, S0, R, R]_4" in strategy_name_list
assert "[S1, R, R, R] -> [R, S1, R, R]_5" in strategy_name_list
assert "[R, S1, R, R] -> [R, R, S1, R]_6" in strategy_name_list
assert "[R, S0, R, R] -> [R, R, S0, R]_7" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_9" in strategy_name_list
assert "[R, S0, R, R] -> [R, R, S0, R]_10" in strategy_name_list
assert "[R, S1, R, R] -> [R, R, S1, R]_11" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_12" in strategy_name_list
assert "[S01, R, R, R] -> [R, S01, R, R]_13" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_14" in strategy_name_list
assert "[R, S01, R, R] -> [R, R, S01, R]_15" in strategy_name_list
if reshape_dims == (1, 3):
assert "[S0, S1, R, R] -> [S0, R, R, S1]_0" in strategy_name_list
assert "[S1, S0, R, R] -> [S1, R, R, S0]_1" in strategy_name_list
assert "[S0, R, R, R] -> [S0, R, R, R]_2" in strategy_name_list
assert "[S1, R, R, R] -> [S1, R, R, R]_3" in strategy_name_list
assert "[S0, R, R, R] -> [S0, R, R, R]_4" in strategy_name_list
assert "[S1, R, R, R] -> [S1, R, R, R]_5" in strategy_name_list
assert "[R, S1, R, R] -> [R, R, R, S1]_6" in strategy_name_list
assert "[R, S0, R, R] -> [R, R, R, S0]_7" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_9" in strategy_name_list
assert "[R, S0, R, R] -> [R, R, R, S0]_10" in strategy_name_list
assert "[R, S1, R, R] -> [R, R, R, S1]_11" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_12" in strategy_name_list
assert "[S01, R, R, R] -> [S01, R, R, R]_13" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_14" in strategy_name_list
assert "[R, S01, R, R] -> [R, R, R, S01]_15" in strategy_name_list
if model_cls.__name__ == "LinearReshapeModel":
if reshape_dims == ((0, 2, 1, 3), (1, 2)):
assert "[S0, R, R, S1] -> [S0, R, R, S1]_11" in strategy_name_list
assert "[R, S0, R, S1] -> [R, R, S0, S1]_12" in strategy_name_list
assert "[R, R, S0, S1] -> [R, S0, R, S1]_13" in strategy_name_list
assert "[S1, R, R, S0] -> [S1, R, R, S0]_14" in strategy_name_list
assert "[R, S1, R, S0] -> [R, R, S1, S0]_15" in strategy_name_list
assert "[R, R, S1, S0] -> [R, S1, R, S0]_16" in strategy_name_list
assert "[S0, R, R, R] -> [S0, R, R, R]_17" in strategy_name_list
assert "[R, S0, R, R] -> [R, R, S0, R]_18" in strategy_name_list
assert "[R, R, S0, R] -> [R, S0, R, R]_19" in strategy_name_list
assert "[S1, R, R, R] -> [S1, R, R, R]_20" in strategy_name_list
assert "[R, S1, R, R] -> [R, R, S1, R]_21" in strategy_name_list
assert "[R, R, S1, R] -> [R, S1, R, R]_22" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_10" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_9" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_7" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_6" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_5" in strategy_name_list
assert "[S01, R, R, R] -> [S01, R, R, R]_0" in strategy_name_list
assert "[R, S01, R, R] -> [R, R, S01, R]_1" in strategy_name_list
assert "[R, R, S01, R] -> [R, S01, R, R]_2" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_3" in strategy_name_list
assert "[R, R, R, S01] -> [R, R, R, S01]_4" in strategy_name_list
if reshape_dims == (2, 0, 1, 3):
assert "[S0, R, R, S1] -> [R, S0, R, S1]_11" in strategy_name_list
assert "[R, S0, R, S1] -> [R, R, S0, S1]_12" in strategy_name_list
assert "[R, R, S0, S1] -> [S0, R, R, S1]_13" in strategy_name_list
assert "[S1, R, R, S0] -> [R, S1, R, S0]_14" in strategy_name_list
assert "[R, S1, R, S0] -> [R, R, S1, S0]_15" in strategy_name_list
assert "[R, R, S1, S0] -> [S1, R, R, S0]_16" in strategy_name_list
assert "[S0, R, R, R] -> [R, S0, R, R]_17" in strategy_name_list
assert "[R, S0, R, R] -> [R, R, S0, R]_18" in strategy_name_list
assert "[R, R, S0, R] -> [S0, R, R, R]_19" in strategy_name_list
assert "[S1, R, R, R] -> [R, S1, R, R]_20" in strategy_name_list
assert "[R, S1, R, R] -> [R, R, S1, R]_21" in strategy_name_list
assert "[R, R, S1, R] -> [S1, R, R, R]_22" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_10" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_9" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_7" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_6" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_5" in strategy_name_list
assert "[S01, R, R, R] -> [R, S01, R, R]_0" in strategy_name_list
assert "[R, S01, R, R] -> [R, R, S01, R]_1" in strategy_name_list
assert "[R, R, S01, R] -> [S01, R, R, R]_2" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_3" in strategy_name_list
assert "[R, R, R, S01] -> [R, R, R, S01]_4" in strategy_name_list
if reshape_dims == (1, 3):
assert "[S0, R, R, S1] -> [S0, S1, R, R]_11" in strategy_name_list
assert "[R, S0, R, S1] -> [R, S1, R, S0]_12" in strategy_name_list
assert "[R, R, S0, S1] -> [R, S1, S0, R]_13" in strategy_name_list
assert "[S1, R, R, S0] -> [S1, S0, R, R]_14" in strategy_name_list
assert "[R, S1, R, S0] -> [R, S0, R, S1]_15" in strategy_name_list
assert "[R, R, S1, S0] -> [R, S0, S1, R]_16" in strategy_name_list
assert "[S0, R, R, R] -> [S0, R, R, R]_17" in strategy_name_list
assert "[R, S0, R, R] -> [R, R, R, S0]_18" in strategy_name_list
assert "[R, R, S0, R] -> [R, R, S0, R]_19" in strategy_name_list
assert "[S1, R, R, R] -> [S1, R, R, R]_20" in strategy_name_list
assert "[R, S1, R, R] -> [R, R, R, S1]_21" in strategy_name_list
assert "[R, R, S1, R] -> [R, R, S1, R]_22" in strategy_name_list
assert "[R, R, R, S1] -> [R, S1, R, R]_10" in strategy_name_list
assert "[R, R, R, S0] -> [R, S0, R, R]_9" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_7" in strategy_name_list
assert "[R, R, R, S0] -> [R, S0, R, R]_6" in strategy_name_list
assert "[R, R, R, S1] -> [R, S1, R, R]_5" in strategy_name_list
assert "[S01, R, R, R] -> [S01, R, R, R]_0" in strategy_name_list
assert "[R, S01, R, R] -> [R, R, R, S01]_1" in strategy_name_list
assert "[R, R, S01, R] -> [R, R, S01, R]_2" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_3" in strategy_name_list
assert "[R, R, R, S01] -> [R, S01, R, R]_4" in strategy_name_list
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
@parameterize("call_function", [torch.permute, torch.transpose])
@parameterize("reshape_dims", [((0, 2, 1, 3), (1, 2)), ((2, 0, 1, 3), (1, 3))])
@parameterize("model_cls", [ConvReshapeModel, LinearReshapeModel])
def test_view_handler(call_function, reshape_dims, model_cls):
spawn(
check_view_handler,
4,
call_function=call_function,
reshape_dims=reshape_dims,
model_cls=model_cls,
)
if __name__ == "__main__":
test_view_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_softmax_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_softmax_handler.py | import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler.linear_handler import LinearFunctionHandler
from colossalai.auto_parallel.tensor_shard.node_handler.softmax_handler import SoftmaxHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import parameterize, rerun_if_address_is_in_use, run_on_environment_flag, spawn
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
class LinearSplitModel(nn.Module):
def __init__(self, softmax_dim):
super().__init__()
self.softmax_dim = softmax_dim
def forward(self, input, other):
linear_node = F.linear(input, other, bias=None)
softmax_node = F.softmax(linear_node, self.softmax_dim)
return softmax_node
def check_split_handler(rank, world_size, port, softmax_dim, model_cls):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = model_cls(softmax_dim=softmax_dim).cuda()
input = torch.rand(8, 16, 64, 32).to("cuda")
other = torch.rand(64, 32).to("cuda")
# index of linear node in computation graph
node_index = 2
# total number of linear strategies
strategy_number = 23
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input, other],
meta_arg_names=["input", "other"],
node_type="following",
)
tracer = ColoTracer(bias_addition_split=True)
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %other : torch.Tensor [#users=1] = placeholder[target=other]
# %linear : [#users=1] = call_function[target=torch._C._nn.linear](args = (%input_1, %other), kwargs = {bias: None})
# %softmax : [#users=1] = call_method[target=split](args = (%linear,), kwargs = {})
# return split
meta_args = {
"input": torch.rand(8, 16, 64, 32).to("meta"),
"other": torch.rand(64, 32).to("meta"),
}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
previous_mod_node = list(graph.nodes)[2]
split_node = list(graph.nodes)[3]
split_strategies_vector = StrategiesVector(split_node)
previous_strategies_vector = StrategiesVector(previous_mod_node)
# build handler
assert len(previous_strategies_vector) == 0
linear_handler = LinearFunctionHandler(
node=previous_mod_node, device_mesh=device_mesh, strategies_vector=previous_strategies_vector
)
linear_handler.register_strategy(compute_resharding_cost=False)
setattr(previous_mod_node, "strategies_vector", previous_strategies_vector)
softmax_handler = SoftmaxHandler(
node=split_node, device_mesh=device_mesh, strategies_vector=split_strategies_vector
)
softmax_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = softmax_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
assert mapping["input"].name == "linear"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size([8, 16, 64, 64])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([8, 16, 64, 64])
assert mapping["softmax_dim"].name == "softmax_dim"
assert mapping["softmax_dim"].data == softmax_dim
assert mapping["softmax_dim"].type == OperationDataType.ARG
assert mapping["output"].name == "softmax"
assert mapping["output"].data.shape == torch.Size([8, 16, 64, 64])
assert mapping["output"].logical_shape == torch.Size([8, 16, 64, 64])
assert mapping["output"].type == OperationDataType.OUTPUT
# reshape handler is a following strategy handler, so the number of strategies is equal to the predecessor node.
assert len(split_strategies_vector) == len(previous_strategies_vector)
strategy_name_list = [strategy.name for strategy in split_strategies_vector]
if softmax_dim == 0:
assert "[R, R, R, S1] -> [R, R, R, S1]_11" in strategy_name_list
assert "[R, S0, R, S1] -> [R, S0, R, S1]_12" in strategy_name_list
assert "[R, R, S0, S1] -> [R, R, S0, S1]_13" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_14" in strategy_name_list
assert "[R, S1, R, S0] -> [R, S1, R, S0]_15" in strategy_name_list
assert "[R, R, S1, S0] -> [R, R, S1, S0]_16" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_17" in strategy_name_list
assert "[R, S0, R, R] -> [R, S0, R, R]_18" in strategy_name_list
assert "[R, R, S0, R] -> [R, R, S0, R]_19" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_20" in strategy_name_list
assert "[R, S1, R, R] -> [R, S1, R, R]_21" in strategy_name_list
assert "[R, R, S1, R] -> [R, R, S1, R]_22" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_10" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_9" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_7" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_6" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_5" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_0" in strategy_name_list
assert "[R, S01, R, R] -> [R, S01, R, R]_1" in strategy_name_list
assert "[R, R, S01, R] -> [R, R, S01, R]_2" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_3" in strategy_name_list
assert "[R, R, R, S01] -> [R, R, R, S01]_4" in strategy_name_list
if softmax_dim == 1:
assert "[S0, R, R, S1] -> [S0, R, R, S1]_11" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_12" in strategy_name_list
assert "[R, R, S0, S1] -> [R, R, S0, S1]_13" in strategy_name_list
assert "[S1, R, R, S0] -> [S1, R, R, S0]_14" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_15" in strategy_name_list
assert "[R, R, S1, S0] -> [R, R, S1, S0]_16" in strategy_name_list
assert "[S0, R, R, R] -> [S0, R, R, R]_17" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_18" in strategy_name_list
assert "[R, R, S0, R] -> [R, R, S0, R]_19" in strategy_name_list
assert "[S1, R, R, R] -> [S1, R, R, R]_20" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_21" in strategy_name_list
assert "[R, R, S1, R] -> [R, R, S1, R]_22" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_10" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_9" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_8" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_7" in strategy_name_list
assert "[R, R, R, S0] -> [R, R, R, S0]_6" in strategy_name_list
assert "[R, R, R, S1] -> [R, R, R, S1]_5" in strategy_name_list
assert "[S01, R, R, R] -> [S01, R, R, R]_0" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_1" in strategy_name_list
assert "[R, R, S01, R] -> [R, R, S01, R]_2" in strategy_name_list
assert "[R, R, R, R] -> [R, R, R, R]_3" in strategy_name_list
assert "[R, R, R, S01] -> [R, R, R, S01]_4" in strategy_name_list
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
@parameterize("softmax_dim", [0, 1, 2, 3])
@parameterize("model_cls", [LinearSplitModel])
def test_split_handler(softmax_dim, model_cls):
spawn(check_split_handler, 4, softmax_dim=softmax_dim, model_cls=model_cls)
if __name__ == "__main__":
test_split_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_embedding_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_embedding_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler.embedding_handler import (
EmbeddingFunctionHandler,
EmbeddingModuleHandler,
)
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, spawn
from colossalai.testing.pytest_wrapper import run_on_environment_flag
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
NUM_EMBEDDINGS = 16
EMBEDDING_DIMS = 32
class EmbeddingModule(nn.Module):
def __init__(self, num_embeddings, embedding_dims):
super().__init__()
self.embedding = nn.Embedding(num_embeddings, embedding_dims)
def forward(self, input):
x = self.embedding(input)
return x
def check_embedding_module_handler(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = EmbeddingModule(num_embeddings=NUM_EMBEDDINGS, embedding_dims=EMBEDDING_DIMS).cuda()
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %embedding : [#users=1] = call_module[target=embedding](args = (%input_1,), kwargs = {})
# return embedding
input = torch.rand(4, 16, 16) * NUM_EMBEDDINGS
input = input.to(torch.int64).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# index of embedding node in computation graph
node_index = 1
# total number of embedding strategies
strategy_number = 19
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=["input"],
)
tracer = ColoTracer(bias_addition_split=True)
meta_args = {"input": torch.randint(NUM_EMBEDDINGS, (4, 16, 16)).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
embedding_node = list(graph.nodes)[1]
strategies_vector = StrategiesVector(embedding_node)
# build handler
handler = EmbeddingModuleHandler(node=embedding_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["input"].name == "input_1"
# assert mapping['input'].data.is_meta
assert mapping["input"].data.shape == torch.Size([4, 16, 16])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([1024])
assert mapping["other"].name == "weight"
assert mapping["other"].data.shape == torch.Size([NUM_EMBEDDINGS, EMBEDDING_DIMS])
assert mapping["other"].type == OperationDataType.PARAM
assert mapping["other"].logical_shape == torch.Size([NUM_EMBEDDINGS, EMBEDDING_DIMS])
assert mapping["output"].name == "embedding"
assert mapping["output"].data.shape == torch.Size([4, 16, 16, EMBEDDING_DIMS])
assert mapping["output"].type == OperationDataType.OUTPUT
assert mapping["output"].logical_shape == torch.Size([1024, EMBEDDING_DIMS])
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# RR = RR x RR
assert "RR = R x RR" in strategy_name_list
# SR = SR x RR
assert "S0R = S0 x RR_0" in strategy_name_list
assert "S0R = S0 x RR_1" in strategy_name_list
assert "S0R = S0 x RR_2" in strategy_name_list
assert "S1R = S1 x RR_0" in strategy_name_list
assert "S1R = S1 x RR_1" in strategy_name_list
assert "S1R = S1 x RR_2" in strategy_name_list
# SS = SR x RS
assert "S0S1 = S0 x RS1_0" in strategy_name_list
assert "S0S1 = S0 x RS1_1" in strategy_name_list
assert "S0S1 = S0 x RS1_2" in strategy_name_list
assert "S1S0 = S1 x RS0_0" in strategy_name_list
assert "S1S0 = S1 x RS0_1" in strategy_name_list
assert "S1S0 = S1 x RS0_2" in strategy_name_list
# RS= RR x RS
assert "RS0 = R x RS0" in strategy_name_list
assert "RS1 = R x RS1" in strategy_name_list
# S01R = S01R x RR
assert "S01R = S01 x RR_0" in strategy_name_list
assert "S01R = S01 x RR_1" in strategy_name_list
assert "S01R = S01 x RR_2" in strategy_name_list
# RS01 = RR x RS01
assert "RS01 = R x RS01" in strategy_name_list
for strategy in strategies_vector:
input_sharding_spec = strategy.get_sharding_spec_by_name("input_1")
weight_sharding_spec = strategy.get_sharding_spec_by_name("weight")
output_sharding_spec = strategy.get_sharding_spec_by_name("embedding")
# make sure the sharding matches across different operation data
assert output_sharding_spec.sharding_sequence[-1] == weight_sharding_spec.sharding_sequence[-1]
assert input_sharding_spec.sharding_sequence == output_sharding_spec.sharding_sequence[:-1]
class EmbeddingFunction(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, others):
x = nn.functional.embedding(input, others)
return x
def check_embedding_function_handler(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = EmbeddingFunction().cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
input = torch.rand(4, 16, 16) * NUM_EMBEDDINGS
input = input.to(torch.int64).cuda()
others = torch.rand(NUM_EMBEDDINGS, EMBEDDING_DIMS).cuda()
input_args = [input, others]
meta_arg_names = ["input", "others"]
input_kwargs = {}
# total number of embedding strategies
strategy_number = 19
node_index = 2
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
input_kwargs=input_kwargs,
)
tracer = ColoTracer(bias_addition_split=True)
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %others : torch.Tensor [#users=1] = placeholder[target=others]
# %embedding : [#users=1] = call_function[target=torch.nn.functional.embedding](args = (%input_1, %others), kwargs = {padding_idx: None, max_norm: None, norm_type: 2.0, scale_grad_by_freq: False, sparse: False})
# return embedding
meta_args = {
"input": torch.randint(NUM_EMBEDDINGS, (4, 16, 16)).to("meta"),
"others": torch.rand(NUM_EMBEDDINGS, EMBEDDING_DIMS).to("meta"),
}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
embedding_node = list(graph.nodes)[2]
strategies_vector = StrategiesVector(embedding_node)
# build handler
handler = EmbeddingFunctionHandler(
node=embedding_node, device_mesh=device_mesh, strategies_vector=strategies_vector
)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["input"].name == "input_1"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size([4, 16, 16])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([1024])
assert mapping["other"].name == "others"
assert mapping["other"].data.is_meta
assert mapping["other"].data.shape == torch.Size([NUM_EMBEDDINGS, EMBEDDING_DIMS])
assert mapping["other"].type == OperationDataType.ARG
assert mapping["other"].logical_shape == torch.Size([NUM_EMBEDDINGS, EMBEDDING_DIMS])
assert mapping["output"].name == "embedding"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size([4, 16, 16, EMBEDDING_DIMS])
assert mapping["output"].type == OperationDataType.OUTPUT
assert mapping["output"].logical_shape == torch.Size([1024, EMBEDDING_DIMS])
handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# RR = RR x RR
assert "RR = R x RR" in strategy_name_list
# SR = SR x RR
assert "S0R = S0 x RR_0" in strategy_name_list
assert "S0R = S0 x RR_1" in strategy_name_list
assert "S0R = S0 x RR_2" in strategy_name_list
assert "S1R = S1 x RR_0" in strategy_name_list
assert "S1R = S1 x RR_1" in strategy_name_list
assert "S1R = S1 x RR_2" in strategy_name_list
# SS = SR x RS
assert "S0S1 = S0 x RS1_0" in strategy_name_list
assert "S0S1 = S0 x RS1_1" in strategy_name_list
assert "S0S1 = S0 x RS1_2" in strategy_name_list
assert "S1S0 = S1 x RS0_0" in strategy_name_list
assert "S1S0 = S1 x RS0_1" in strategy_name_list
assert "S1S0 = S1 x RS0_2" in strategy_name_list
# RS= RR x RS
assert "RS0 = R x RS0" in strategy_name_list
assert "RS1 = R x RS1" in strategy_name_list
# S01R = S01R x RR
assert "S01R = S01 x RR_0" in strategy_name_list
assert "S01R = S01 x RR_1" in strategy_name_list
assert "S01R = S01 x RR_2" in strategy_name_list
# RS01 = RR x RS01
assert "RS01 = R x RS01" in strategy_name_list
for strategy in strategies_vector:
input_sharding_spec = strategy.get_sharding_spec_by_name("input_1")
weight_sharding_spec = strategy.get_sharding_spec_by_name("others")
output_sharding_spec = strategy.get_sharding_spec_by_name("embedding")
# make sure the sharding matches across different operation data
assert output_sharding_spec.sharding_sequence[-1] == weight_sharding_spec.sharding_sequence[-1]
assert input_sharding_spec.sharding_sequence == output_sharding_spec.sharding_sequence[:-1]
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_embedding_module_handler():
spawn(check_embedding_module_handler, 4)
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_embedding_function_handler():
spawn(check_embedding_function_handler, 4)
if __name__ == "__main__":
test_embedding_module_handler()
test_embedding_function_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/__init__.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_tensor_constructor.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_tensor_constructor.py | import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler.tensor_constructor_handler import TensorConstructorHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.testing import clear_cache_before_run, run_on_environment_flag
class TensorConstructorModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
arange_node = torch.arange(x.size()[0])
x = x + arange_node
return x
@run_on_environment_flag(name="AUTO_PARALLEL")
@clear_cache_before_run()
def test_where_handler():
model = TensorConstructorModel()
tracer = ColoTracer(bias_addition_split=True)
# graph():
# %x : torch.Tensor [#users=2] = placeholder[target=x]
# %size : [#users=1] = call_method[target=size](args = (%x,), kwargs = {})
# %getitem : [#users=1] = call_function[target=operator.getitem](args = (%size, 0), kwargs = {})
# %arange : [#users=1] = call_function[target=torch.arange](args = (%getitem,), kwargs = {})
# %add : [#users=1] = call_function[target=operator.add](args = (%x, %arange), kwargs = {})
# return add
meta_args = {"x": torch.rand(10).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
arange_node = list(graph.nodes)[3]
strategies_vector = StrategiesVector(arange_node)
# build handler
handler = TensorConstructorHandler(node=arange_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["output"].name == "arange"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size([10])
assert mapping["output"].type == OperationDataType.OUTPUT
handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
assert "Replica Tensor Constructor" in strategy_name_list
if __name__ == "__main__":
test_where_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_conv_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_conv_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler.conv_handler import ConvFunctionHandler, ConvModuleHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, run_on_environment_flag, spawn
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
def check_conv_module_handler(rank, world_size, port, bias):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = nn.Sequential(nn.Conv2d(4, 16, 3, padding=1, bias=bias)).cuda()
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %_0 : [#users=1] = call_module[target=0](args = (%input_1,), kwargs = {})
# return _0
input = torch.rand(4, 4, 64, 64).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
# index of conv node in computation graph
node_index = 1
# total number of conv strategies
strategy_number = 16
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=["input"],
)
tracer = ColoTracer(bias_addition_split=True)
meta_args = {"input": torch.rand(4, 4, 64, 64).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
conv_mod_node = list(graph.nodes)[1]
strategies_vector = StrategiesVector(conv_mod_node)
# build handler
handler = ConvModuleHandler(node=conv_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["input"].name == "input_1"
# assert mapping['input'].data.is_meta
assert mapping["input"].data.shape == torch.Size([4, 4, 64, 64])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([4, 4, 64, 64])
assert mapping["other"].name == "weight"
# assert mapping['other'].data.is_meta
assert mapping["other"].data.shape == torch.Size([16, 4, 3, 3])
assert mapping["other"].type == OperationDataType.PARAM
assert mapping["other"].logical_shape == torch.Size([4, 16, 3, 3])
if bias:
assert mapping["bias"].name == "bias"
# assert mapping['bias'].data.is_meta
assert mapping["bias"].data.shape == torch.Size([16])
assert mapping["bias"].type == OperationDataType.PARAM
assert mapping["bias"].logical_shape == torch.Size([16])
assert mapping["output"].name == "_0"
# assert mapping['output'].data.is_meta
assert mapping["output"].data.shape == torch.Size([4, 16, 64, 64])
assert mapping["output"].type == OperationDataType.OUTPUT
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# SS = SR x RS
assert "S0S1 = S0R x RS1" in strategy_name_list
assert "S1S0 = S1R x RS0" in strategy_name_list
# SR = SR x RR
assert "S0R = S0R x RR" in strategy_name_list
assert "S1R = S1R x RR" in strategy_name_list
# SR = SS x SR
assert "S0R = S0S1 x S1R" in strategy_name_list
assert "S1R = S1S0 x S0R" in strategy_name_list
# RS = RS x SS
assert "RS0 = RS1 x S1S0" in strategy_name_list
assert "RS1 = RS0 x S0S1" in strategy_name_list
# RR = RS x SR
assert "RR = RS0 x S0R" in strategy_name_list
assert "RR = RS1 x S1R" in strategy_name_list
# RS= RR x RS
assert "RS0 = RR x RS0" in strategy_name_list
assert "RS1 = RR x RS1" in strategy_name_list
# RR = RR x RR
assert "RR = RR x RR" in strategy_name_list
# S01R = S01R x RR
assert "S01R = S01R x RR" in strategy_name_list
# RR = RS01 x S01R
assert "RR = RS01 x S01R" in strategy_name_list
# RS01 = RR x RS01
assert "RS01 = RR x RS01" in strategy_name_list
for strategy in strategies_vector:
input_sharding_spec = strategy.get_sharding_spec_by_name("input_1")
weight_sharding_spec = strategy.get_sharding_spec_by_name("weight")
output_sharding_spec = strategy.get_sharding_spec_by_name("_0")
if bias:
bias_sharding_spec = strategy.get_sharding_spec_by_name("bias")
# make sure the sharding matches across different operation data
assert output_sharding_spec.sharding_sequence[1] == weight_sharding_spec.sharding_sequence[0]
assert input_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[0]
assert input_sharding_spec.sharding_sequence[2:] == output_sharding_spec.sharding_sequence[2:]
assert input_sharding_spec.sharding_sequence[1] == weight_sharding_spec.sharding_sequence[1]
if bias:
assert bias_sharding_spec.sharding_sequence[-1] == weight_sharding_spec.sharding_sequence[0]
assert bias_sharding_spec.sharding_sequence[-1] == output_sharding_spec.sharding_sequence[1]
class ConvModel(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, others, bias=None):
x = nn.functional.conv2d(input, others, bias=bias, padding=1)
return x
def check_conv_function_handler(rank, world_size, port, bias):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = ConvModel().cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
input = torch.rand(4, 4, 64, 64).cuda()
others = torch.rand(16, 4, 3, 3).cuda()
input_args = [input, others]
meta_arg_names = ["input", "others"]
input_kwargs = {}
# total number of conv strategies
strategy_number = 16
node_index = 2
if bias:
bias_tensor = torch.rand(16).cuda()
input_kwargs["bias"] = bias_tensor
node_index += 1
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=input_args,
meta_arg_names=meta_arg_names,
input_kwargs=input_kwargs,
)
tracer = ColoTracer(bias_addition_split=True)
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %others : torch.Tensor [#users=1] = placeholder[target=others]
# %conv2d : [#users=1] = call_function[target=torch.conv2d](args = (%input_1, %others), kwargs = {})
# return conv2d
meta_args = {"input": torch.rand(4, 4, 64, 64).to("meta"), "others": torch.rand(16, 4, 3, 3).to("meta")}
if bias:
meta_args["bias"] = torch.rand(16).to("meta")
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
if bias:
conv_mod_node = list(graph.nodes)[3]
else:
conv_mod_node = list(graph.nodes)[2]
strategies_vector = StrategiesVector(conv_mod_node)
# build handler
handler = ConvFunctionHandler(node=conv_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["input"].name == "input_1"
assert mapping["input"].data.is_meta
assert mapping["input"].data.shape == torch.Size([4, 4, 64, 64])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([4, 4, 64, 64])
assert mapping["other"].name == "others"
assert mapping["other"].data.is_meta
assert mapping["other"].data.shape == torch.Size([16, 4, 3, 3])
assert mapping["other"].type == OperationDataType.ARG
assert mapping["other"].logical_shape == torch.Size([4, 16, 3, 3])
if bias:
assert mapping["bias"].name == "bias"
assert mapping["bias"].data.is_meta
assert mapping["bias"].data.shape == torch.Size([16])
assert mapping["bias"].type == OperationDataType.ARG
assert mapping["bias"].logical_shape == torch.Size([16])
assert mapping["output"].name == "conv2d"
assert mapping["output"].data.is_meta
assert mapping["output"].data.shape == torch.Size([4, 16, 64, 64])
assert mapping["output"].type == OperationDataType.OUTPUT
handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# SS = SR x RS
assert "S0S1 = S0R x RS1" in strategy_name_list
assert "S1S0 = S1R x RS0" in strategy_name_list
# SR = SR x RR
assert "S0R = S0R x RR" in strategy_name_list
assert "S1R = S1R x RR" in strategy_name_list
# SR = SS x SR
assert "S0R = S0S1 x S1R" in strategy_name_list
assert "S1R = S1S0 x S0R" in strategy_name_list
# RS = RS x SS
assert "RS0 = RS1 x S1S0" in strategy_name_list
assert "RS1 = RS0 x S0S1" in strategy_name_list
# RR = RS x SR
assert "RR = RS0 x S0R" in strategy_name_list
assert "RR = RS1 x S1R" in strategy_name_list
# RS= RR x RS
assert "RS0 = RR x RS0" in strategy_name_list
assert "RS1 = RR x RS1" in strategy_name_list
# RR = RR x RR
assert "RR = RR x RR" in strategy_name_list
# S01R = S01R x RR
assert "S01R = S01R x RR" in strategy_name_list
# RR = RS01 x S01R
assert "RR = RS01 x S01R" in strategy_name_list
# RS01 = RR x RS01
assert "RS01 = RR x RS01" in strategy_name_list
for strategy in strategies_vector:
input_sharding_spec = strategy.get_sharding_spec_by_name("input_1")
weight_sharding_spec = strategy.get_sharding_spec_by_name("others")
output_sharding_spec = strategy.get_sharding_spec_by_name("conv2d")
if bias:
bias_sharding_spec = strategy.get_sharding_spec_by_name("bias")
# make sure the sharding matches across different operation data
assert output_sharding_spec.sharding_sequence[1] == weight_sharding_spec.sharding_sequence[0]
assert input_sharding_spec.sharding_sequence[0] == output_sharding_spec.sharding_sequence[0]
assert input_sharding_spec.sharding_sequence[2:] == output_sharding_spec.sharding_sequence[2:]
assert input_sharding_spec.sharding_sequence[1] == weight_sharding_spec.sharding_sequence[1]
if bias:
assert bias_sharding_spec.sharding_sequence[-1] == weight_sharding_spec.sharding_sequence[0]
assert bias_sharding_spec.sharding_sequence[-1] == output_sharding_spec.sharding_sequence[1]
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
# We temporarily ban the bias option before doing bias add
# before all reduce communication may encounter correctness issue.
# @parameterize('bias', [True, False])
@rerun_if_address_is_in_use()
def test_conv_module_handler(bias=False):
spawn(check_conv_module_handler, 4, bias=bias)
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
# We temporarily ban the bias option before doing bias add
# before all reduce communication may encounter correctness issue.
# @parameterize('bias', [True, False])
@rerun_if_address_is_in_use()
def test_conv_function_handler(bias=False):
spawn(check_conv_function_handler, 4, bias=bias)
if __name__ == "__main__":
test_conv_module_handler()
test_conv_function_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_getattr_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_getattr_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler.getattr_handler import GetattrHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.testing import clear_cache_before_run
class GetattrModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(4, 16, 3, padding=1, bias=False)
def forward(self, input):
weight = self.conv.weight
return weight
@pytest.mark.skip("ShapeProp is not compatible with PyTorch 1.11.0")
@clear_cache_before_run()
def test_getattr_handler():
model = GetattrModel()
tracer = ColoTracer(bias_addition_split=True)
# graph():
# %input_1 : torch.Tensor [#users=0] = placeholder[target=input]
# %conv_weight : [#users=1] = get_attr[target=conv.weight]
# return conv_weight
meta_args = {"input": torch.rand(4, 4, 64, 64).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape)
getattr_node = list(graph.nodes)[1]
getattr_strategies_vector = StrategiesVector(getattr_node)
# build handler
getattr_handler = GetattrHandler(
node=getattr_node, device_mesh=device_mesh, strategies_vector=getattr_strategies_vector
)
getattr_handler.register_strategy(compute_resharding_cost=False)
# check operation data mapping
mapping = getattr_handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.data is not None
assert mapping["output"].name == "conv_weight"
assert mapping["output"].data.shape == torch.Size((16, 4, 3, 3))
assert mapping["output"].type == OperationDataType.OUTPUT
strategy_name_list = [val.name for val in getattr_handler.strategies_vector]
assert "get_attr [S0, S1, R, R]" in strategy_name_list
assert "get_attr [S1, S0, R, R]" in strategy_name_list
assert "get_attr [S01, R, R, R]" in strategy_name_list
assert "get_attr [R, S01, R, R]" in strategy_name_list
assert "get_attr [S0, R, R, R]" in strategy_name_list
assert "get_attr [R, S0, R, R]" in strategy_name_list
assert "get_attr [S1, R, R, R]" in strategy_name_list
assert "get_attr [R, S1, R, R]" in strategy_name_list
assert "get_attr [R, R, R, R]" in strategy_name_list
if __name__ == "__main__":
test_getattr_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_batch_norm_handler.py | tests/test_auto_parallel/test_tensor_shard/test_node_handler/test_batch_norm_handler.py | import pytest
import torch
import torch.nn as nn
from colossalai._analyzer.fx.graph_module import ColoGraphModule
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.tracer.tracer import ColoTracer
from colossalai.auto_parallel.tensor_shard.node_handler.batch_norm_handler import BatchNormModuleHandler
from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, StrategiesVector
from colossalai.device.device_mesh import DeviceMesh
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, run_on_environment_flag, spawn
from tests.test_auto_parallel.test_tensor_shard.test_node_handler.utils import numerical_test_for_node_strategy
def check_bn_module_handler(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
model = nn.Sequential(nn.BatchNorm2d(16)).cuda()
physical_mesh_id = torch.arange(0, 4)
mesh_shape = (2, 2)
device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True)
input = torch.rand(4, 16, 64, 64).cuda()
# the index of bn node in computation graph
node_index = 1
# the total number of bn strategies without sync bn mode
# TODO: add sync bn strategies after related passes ready
strategy_number = 4
numerical_test_for_node_strategy(
model=model,
device_mesh=device_mesh,
node_index=node_index,
strategy_number=strategy_number,
input_args=[input],
meta_arg_names=["input"],
)
tracer = ColoTracer(bias_addition_split=True)
# graph():
# %input_1 : torch.Tensor [#users=1] = placeholder[target=input]
# %_0 : [#users=1] = call_module[target=0](args = (%input_1,), kwargs = {})
# return _0
meta_args = {"input": torch.rand(4, 16, 64, 64).to("meta")}
graph = tracer.trace(model, meta_args=meta_args)
gm = ColoGraphModule(model, graph)
shape_prop_pass(gm, *meta_args.values())
bn_mod_node = list(graph.nodes)[1]
strategies_vector = StrategiesVector(bn_mod_node)
# build handler
handler = BatchNormModuleHandler(node=bn_mod_node, device_mesh=device_mesh, strategies_vector=strategies_vector)
# check operation data mapping
mapping = handler.get_operation_data_mapping()
for name, op_data in mapping.items():
op_data: OperationData
# make sure they have valid values
assert op_data.logical_shape is not None
assert op_data.data is not None
assert mapping["input"].name == "input_1"
assert mapping["input"].data.shape == torch.Size([4, 16, 64, 64])
assert mapping["input"].type == OperationDataType.ARG
assert mapping["input"].logical_shape == torch.Size([4, 16, 64, 64])
assert mapping["other"].name == "weight"
assert mapping["other"].data.shape == torch.Size([16])
assert mapping["other"].type == OperationDataType.PARAM
assert mapping["other"].logical_shape == torch.Size([16])
assert mapping["bias"].name == "bias"
assert mapping["bias"].data.shape == torch.Size([16])
assert mapping["bias"].type == OperationDataType.PARAM
assert mapping["bias"].logical_shape == torch.Size([16])
assert mapping["output"].name == "_0"
assert mapping["output"].data.shape == torch.Size([4, 16, 64, 64])
assert mapping["output"].type == OperationDataType.OUTPUT
strategies_vector = handler.register_strategy(compute_resharding_cost=False)
strategy_name_list = [val.name for val in strategies_vector]
# RS = RS x S
assert "RS0 = RS0 x S0" in strategy_name_list
assert "RS1 = RS1 x S1" in strategy_name_list
# RR = RR x R
assert "RR = RR x R" in strategy_name_list
# RS01 = RS01 x S01
assert "RS01 = RS01 x S01" in strategy_name_list
# temporarily skip the sync bn test
# TODO: test sync bn after the implicit runtime pass completed
# SR = SR x R WITH SYNC_BN
# assert 'S0R = S0R x R WITH SYNC_BN' in strategy_name_list
# assert 'S1R = S1R x R WITH SYNC_BN' in strategy_name_list
# SS = SS x S WITH SYNC_BN
# assert 'S0S1 = S0S1 x S1 WITH SYNC_BN' in strategy_name_list
# assert 'S1S0 = S1S0 x S0 WITH SYNC_BN' in strategy_name_list
# S01R = S01R x R WITH SYNC_BN
# assert 'S01R = S01R x R WITH SYNC_BN' in strategy_name_list
@run_on_environment_flag(name="AUTO_PARALLEL")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_bn_module_handler():
spawn(check_bn_module_handler, 4)
if __name__ == "__main__":
test_bn_module_handler()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.