code stringlengths 197 38.4k | apis list | extract_api stringlengths 137 20.3k |
|---|---|---|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from typing import Any, Optional, Tuple
import torch
import torch.distributed as dist
from colossalai.communication import all_gather, all_reduce, reduce_scatter
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from torch import Tensor
from torch.cuda.amp import custom_bwd, custom_fwd
class linear_3d(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any,
input_: Tensor,
weight: Tensor,
bias: Optional[Tensor],
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
assert input_.shape[-1] == weight.shape[0], \
'Invalid shapes: input = {}, weight = {}.'.format(input_.shape, weight.shape)
ctx.use_bias = bias is not None
input_ = all_gather(input_, input_dim, input_parallel_mode)
input_ = torch.cat(input_, dim=input_dim)
# weight = all_gather(weight, weight_dim, weight_parallel_mode)
ctx.save_for_backward(input_, weight)
output = torch.matmul(input_, weight)
output = reduce_scatter(output, output_dim, output_parallel_mode)
if bias is not None:
# ranks_in_group = gpc.get_ranks_in_group(output_parallel_mode)
# src_rank = ranks_in_group[gpc.get_local_rank(input_parallel_mode)]
# dist.broadcast(bias,
# src=src_rank,
# group=gpc.get_group(output_parallel_mode))
# bias = all_gather(bias, -1, weight_parallel_mode)
output += bias
# ctx.src_rank = src_rank
# ctx.save_for_backward(input_, weight)
# output = torch.matmul(input_, weight)
# dist.all_reduce(output, group=gpc.get_group(output_parallel_mode))
# output += bias
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
ctx.input_dim = input_dim
ctx.weight_dim = weight_dim
ctx.output_dim = output_dim
return output
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
input_, weight = ctx.saved_tensors
with torch.no_grad():
# input_grad = torch.matmul(output_grad, weight.transpose(0, 1))
# dist.all_reduce(input_grad,
# group=gpc.get_group(ctx.input_parallel_mode))
# weight_grad = torch.matmul(
# input_.reshape(-1, input_.shape[-1]).transpose(0, 1),
# output_grad.reshape(-1, output_grad.shape[-1]))
# dist.all_reduce(weight_grad,
# group=gpc.get_group(ctx.weight_parallel_mode))
# bias_grad = torch.sum(output_grad,
# dim=tuple(
# range(len(output_grad.shape))[:-1]))
# bias_grad = reduce_scatter(bias_grad, -1,
# ctx.weight_parallel_mode)
# dist.reduce(bias_grad,
# dst=ctx.src_rank,
# group=gpc.get_group(ctx.output_parallel_mode))
# if gpc.get_local_rank(
# ctx.output_parallel_mode) != gpc.get_local_rank(
# ctx.input_parallel_mode):
# bias_grad = None
# input_ = all_gather(input_, ctx.input_dim, ctx.input_parallel_mode)
# weight = all_gather(weight, ctx.weight_dim,
# ctx.weight_parallel_mode)
output_grad = all_gather(output_grad, ctx.output_dim,
ctx.output_parallel_mode)
output_grad = torch.cat(output_grad, dim=ctx.output_dim)
input_grad = torch.matmul(output_grad, weight.transpose(0, 1))
input_grad, input_op = reduce_scatter(input_grad, ctx.input_dim,
ctx.input_parallel_mode,
async_op=True)
weight_grad = torch.matmul(
input_.reshape(-1, input_.shape[-1]).transpose(0, 1),
output_grad.reshape(-1, output_grad.shape[-1]))
# weight_grad = torch.matmul(
# input_.reshape(-1, input_.shape[-1]).transpose(0, 1),
# output_grad.reshape(-1, output_grad.shape[-1]))
# weight_grad = reduce_scatter(weight_grad, ctx.weight_dim,
# ctx.weight_parallel_mode)
if ctx.use_bias:
bias_grad = torch.sum(output_grad,
dim=tuple(
range(len(output_grad.shape))[:-1]))
# bias_grad =all_reduce(bias_grad, ctx.output_parallel_mode)
# dist.all_reduce(bias_grad,
# group=gpc.get_group(ctx.weight_parallel_mode))
weight_grad = torch.cat([weight_grad, torch.unsqueeze(bias_grad, dim=0)])
weight_grad, weight_op = all_reduce(weight_grad, ctx.weight_parallel_mode, async_op=True)
input_op.wait()
weight_op.wait()
if ctx.use_bias:
bias_grad = weight_grad[-1]
weight_grad = weight_grad[:-1]
return input_grad, weight_grad, bias_grad, None, None, None, None, None, None
class layer_norm_3d(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, input_: Tensor, weight: Tensor, bias: Tensor,
normalized_shape: int, eps: float,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
# mean = torch.sum(input_, dim=-1)
# dist.all_reduce(mean, group=gpc.get_group(output_parallel_mode))
# mean /= normalized_shape
# mu = input_ - mean
# var = torch.sum(torch.pow(mu, 2), dim=-1)
# dist.all_reduce(var, group=gpc.get_group(output_parallel_mode))
# var /= normalized_shape
# std_dev = torch.sqrt(var + eps)
# ctx.save_for_backward(input_, mu, std_dev, weight)
# output = weight * mu / std_dev + bias
mean = all_reduce(torch.sum(input_, dim=-1, keepdim=True),
output_parallel_mode) / normalized_shape
mu = input_ - mean
var = all_reduce(torch.sum(mu**2, dim=-1, keepdim=True),
output_parallel_mode) / normalized_shape
sigma = torch.sqrt(var + eps)
# ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
# src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
# transforms = torch.stack([weight, bias]).contiguous()
# dist.broadcast(transforms,
# src=src_rank,
# group=gpc.get_group(input_parallel_mode))
# transforms = all_gather(transforms, -1, weight_parallel_mode)
# weight, bias = transforms[0], transforms[1]
ctx.save_for_backward(mu, sigma, weight)
z = mu / sigma
output = weight * z + bias
# ctx.src_rank = src_rank
ctx.normalized_shape = normalized_shape
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
return output
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
mu, sigma, weight = ctx.saved_tensors
with torch.no_grad():
bias_grad, weight_grad = output_grad, output_grad * mu / sigma
grads = torch.stack([bias_grad, weight_grad]).contiguous()
grads = torch.sum(grads, dim=tuple(range(len(grads.shape))[1:-1]))
grads = all_reduce(grads, ctx.weight_parallel_mode)
grads = all_reduce(grads, ctx.input_parallel_mode)
bias_grad, weight_grad = grads[0], grads[1]
# grads = reduce_scatter(grads, -1, ctx.weight_parallel_mode)
# dist.reduce(grads,
# dst=ctx.src_rank,
# group=gpc.get_group(ctx.input_parallel_mode))
# if gpc.get_local_rank(
# ctx.input_parallel_mode) == gpc.get_local_rank(
# ctx.output_parallel_mode):
# bias_grad, weight_grad = grads[0], grads[1]
# else:
# bias_grad, weight_grad = None, None
dz = output_grad * weight
dvar = dz * mu * (-0.5) * sigma**(-3)
dvar = all_reduce(torch.sum(dvar, dim=-1, keepdim=True), ctx.output_parallel_mode)
dmean = dz * (-1 / sigma) + dvar * -2 * mu / ctx.normalized_shape
dmean = all_reduce(torch.sum(dmean, dim=-1, keepdim=True), ctx.output_parallel_mode)
input_grad = dz / sigma + dvar * 2 * mu / ctx.normalized_shape + dmean / ctx.normalized_shape
return input_grad, weight_grad, bias_grad, None, None, None, None, None
class Matmul_AB_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = AB`
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any,
A: Tensor,
B: Tensor,
depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
# A: [m/q^2, n, k/q]
# B: [k/q, h/q^2]
# C: [m/q^2, n, h/q]
ctx.save_for_backward(A, B)
assert A.shape[-1] == B.shape[0], \
'Invalid shapes: A={}, B={}.'.format(A.shape, B.shape)
A_temp = all_gather(A, input_dim, input_parallel_mode)
B_temp = all_gather(B, weight_dim, weight_parallel_mode)
C = torch.matmul(A_temp, B_temp)
out = reduce_scatter(C, output_dim, output_parallel_mode)
ctx.depth = depth
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
ctx.A_dim = input_dim
ctx.B_dim = weight_dim
ctx.C_dim = output_dim
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_3D.apply(output_grad, B, ctx.depth,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode,
ctx.A_group_parallel_mode, ctx.C_dim,
ctx.B_dim, ctx.A_dim)
B_grad = Matmul_ATB_3D.apply(A, output_grad, ctx.depth,
ctx.A_group_parallel_mode,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode, ctx.A_dim,
ctx.C_dim, ctx.B_dim)
return A_grad, B_grad, None, None, None, None, None, None, None
class Matmul_ABT_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = AB^T`
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any,
A: Tensor,
B: Tensor,
depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
# A: [m/q^2, n, h/q]
# B: [k/q, h/q^2]
# C: [m/q^2, n, k/q]
ctx.save_for_backward(A, B)
A_temp = all_gather(A, input_dim, input_parallel_mode)
B_temp = all_gather(B, weight_dim, weight_parallel_mode)
C = torch.matmul(A_temp, B_temp.transpose(0, 1))
out = reduce_scatter(C, output_dim, output_parallel_mode)
ctx.depth = depth
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
ctx.A_dim = input_dim
ctx.B_dim = weight_dim
ctx.C_dim = output_dim
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_AB_3D.apply(output_grad, B, ctx.depth,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode,
ctx.A_group_parallel_mode, ctx.C_dim,
ctx.B_dim, ctx.A_dim)
B_grad = Matmul_ATB_3D.apply(output_grad, A, ctx.depth,
ctx.C_group_parallel_mode,
ctx.A_group_parallel_mode,
ctx.B_group_parallel_mode, ctx.C_dim,
ctx.A_dim, ctx.B_dim)
return A_grad, B_grad, None, None, None, None, None, None, None
class Matmul_ATB_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = A^TB`
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any,
A: Tensor,
B: Tensor,
depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = 0,
output_dim: int = -1) -> Tensor:
# A: [m/q^2, n, k/q]
# B: [m/q^2, n, h/q]
# C: [k/q, h/q^2]
ctx.save_for_backward(A, B)
A_temp = all_gather(A, input_dim, input_parallel_mode)
A_temp = A_temp.reshape(-1, A.shape[-1])
B_temp = all_gather(B, weight_dim, weight_parallel_mode)
B_temp = B_temp.reshape(-1, B.shape[-1])
C = torch.matmul(A_temp.transpose(0, 1), B_temp)
out = reduce_scatter(C, output_dim, output_parallel_mode)
ctx.depth = depth
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
ctx.A_dim = input_dim
ctx.B_dim = weight_dim
ctx.C_dim = output_dim
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_3D.apply(B, output_grad, ctx.depth,
ctx.B_group_parallel_mode,
ctx.C_group_parallel_mode,
ctx.A_group_parallel_mode, ctx.B_dim,
ctx.C_dim, ctx.A_dim)
B_grad = Matmul_AB_3D.apply(A, output_grad, ctx.depth,
ctx.A_group_parallel_mode,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode, ctx.A_dim,
ctx.C_dim, ctx.B_dim)
return A_grad, B_grad, None, None, None, None, None, None, None
class Add_3D(torch.autograd.Function):
"""Matrix add bias: :math:`C = A + b`
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, input_: Tensor, bias: Tensor, depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
# input: [m/q^2, n, h/q]
# bias: [h/q^2]
ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
bias_temp = bias.clone()
dist.broadcast(bias_temp,
src=src_rank,
group=gpc.get_group(input_parallel_mode))
# [h/q]
bias_temp = all_gather(bias_temp, -1, weight_parallel_mode)
out = input_ + bias_temp
ctx.depth = depth
ctx.src_rank = src_rank
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
# output_grad: [m/q^2, n, h/q]
with torch.no_grad():
# [h/q]
grad = torch.sum(output_grad,
dim=tuple(range(len(output_grad.shape))[:-1]))
bias_grad = reduce_scatter(grad, -1, ctx.B_group_parallel_mode)
dist.reduce(bias_grad,
dst=ctx.src_rank,
group=gpc.get_group(ctx.A_group_parallel_mode))
if gpc.get_local_rank(
ctx.A_group_parallel_mode) != gpc.get_local_rank(
ctx.C_group_parallel_mode):
bias_grad = None
return output_grad, bias_grad, None, None, None, None
class Mul_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = A * b`
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, input_: Tensor, bias: Tensor, depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
# input: [m/q^2, n, h/q]
# bias: [h/q^2]
ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
# [h/q^2]
bias_temp = bias.clone()
dist.broadcast(bias_temp,
src=src_rank,
group=gpc.get_group(input_parallel_mode))
# [h/q]
bias_temp = all_gather(bias_temp, -1, weight_parallel_mode)
# empty_cache()
ctx.save_for_backward(input_, bias_temp)
out = torch.mul(input_, bias_temp)
ctx.depth = depth
ctx.src_rank = src_rank
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
# output_grad: [m/q^2, n, h/q]
with torch.no_grad():
input_, bias = ctx.saved_tensors
# [m/q^2, n, h/q]
input_grad = torch.mul(output_grad, bias)
# [h/q]
grad = torch.mul(output_grad, input_)
grad = torch.sum(grad,
dim=tuple(range(len(output_grad.shape))[:-1]))
bias_grad = reduce_scatter(grad, -1, ctx.B_group_parallel_mode)
dist.reduce(bias_grad,
dst=ctx.src_rank,
group=gpc.get_group(ctx.A_group_parallel_mode))
if gpc.get_local_rank(
ctx.A_group_parallel_mode) != gpc.get_local_rank(
ctx.C_group_parallel_mode):
bias_grad = None
return input_grad, bias_grad, None, None, None, None
class Sum_3D(torch.autograd.Function):
"""Compute the sum of input tensors
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any,
input_: Tensor,
dim: int,
depth: int,
parallel_mode: ParallelMode,
keepdim: bool = False) -> Tensor:
# input: [m/q^2, n, h/q]
out = torch.sum(input_, dim=dim, keepdim=keepdim)
dist.all_reduce(out, group=gpc.get_group(parallel_mode))
ctx.input_shape = input_.shape
ctx.depth = depth
ctx.group = parallel_mode
ctx.dim = dim
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
with torch.no_grad():
output_grad = output_grad.contiguous()
dist.all_reduce(output_grad, group=gpc.get_group(ctx.group))
if len(output_grad.shape) < len(ctx.input_shape):
output_grad = torch.unsqueeze(output_grad, ctx.dim)
dims = [1 for _ in range(len(output_grad.shape))]
dims[ctx.dim] = ctx.input_shape[ctx.dim]
input_grad = output_grad.repeat(tuple(dims))
return input_grad, None, None, None, None, None
class Reduce_3D(torch.autograd.Function):
"""Reduce input tensors
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, input_: Tensor, depth: int,
parallel_mode: ParallelMode) -> Tensor:
dist.all_reduce(input_, group=gpc.get_group(parallel_mode))
return input_.clone()
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
return output_grad, None, None
# class Slice_3D(torch.autograd.Function):
# """Slice input tensor
# """
# @staticmethod
# @custom_fwd(cast_inputs=torch.float16)
# def forward(ctx: Any, input_: Tensor, dim: int, depth: int,
# parallel_mode: ParallelMode) -> Tensor:
# rank = gpc.get_local_rank(parallel_mode)
# out = torch.chunk(input_, depth, dim=dim)[rank].contiguous()
# ctx.depth = depth
# ctx.parallel_mode = parallel_mode
# ctx.dim = dim
# ctx.input_shape = input_.shape
# return out
# @staticmethod
# @custom_bwd
# def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
# with torch.no_grad():
# input_grad = all_gather(output_grad, ctx.dim, ctx.parallel_mode)
# input_grad.reshape(ctx.input_shape)
# return input_grad, None, None, None
| [
"colossalai.core.global_context.get_local_rank",
"colossalai.communication.all_gather",
"colossalai.communication.reduce_scatter",
"colossalai.core.global_context.get_group",
"colossalai.communication.all_reduce",
"colossalai.core.global_context.get_ranks_in_group"
] | [((462, 499), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (472, 499), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((5920, 5957), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (5930, 5957), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((9720, 9757), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (9730, 9757), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((11983, 12020), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (11993, 12020), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((14145, 14182), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (14155, 14182), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((16390, 16427), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (16400, 16427), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((18313, 18350), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (18323, 18350), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((20498, 20535), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (20508, 20535), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((21761, 21798), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (21771, 21798), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((1110, 1160), 'colossalai.communication.all_gather', 'all_gather', (['input_', 'input_dim', 'input_parallel_mode'], {}), '(input_, input_dim, input_parallel_mode)\n', (1120, 1160), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((1178, 1210), 'torch.cat', 'torch.cat', (['input_'], {'dim': 'input_dim'}), '(input_, dim=input_dim)\n', (1187, 1210), False, 'import torch\n'), ((1347, 1375), 'torch.matmul', 'torch.matmul', (['input_', 'weight'], {}), '(input_, weight)\n', (1359, 1375), False, 'import torch\n'), ((1393, 1449), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['output', 'output_dim', 'output_parallel_mode'], {}), '(output, output_dim, output_parallel_mode)\n', (1407, 1449), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((7051, 7072), 'torch.sqrt', 'torch.sqrt', (['(var + eps)'], {}), '(var + eps)\n', (7061, 7072), False, 'import torch\n'), ((10393, 10438), 'colossalai.communication.all_gather', 'all_gather', (['A', 'input_dim', 'input_parallel_mode'], {}), '(A, input_dim, input_parallel_mode)\n', (10403, 10438), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((10456, 10503), 'colossalai.communication.all_gather', 'all_gather', (['B', 'weight_dim', 'weight_parallel_mode'], {}), '(B, weight_dim, weight_parallel_mode)\n', (10466, 10503), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((10517, 10545), 'torch.matmul', 'torch.matmul', (['A_temp', 'B_temp'], {}), '(A_temp, B_temp)\n', (10529, 10545), False, 'import torch\n'), ((10560, 10611), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['C', 'output_dim', 'output_parallel_mode'], {}), '(C, output_dim, output_parallel_mode)\n', (10574, 10611), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((12544, 12589), 'colossalai.communication.all_gather', 'all_gather', (['A', 'input_dim', 'input_parallel_mode'], {}), '(A, input_dim, input_parallel_mode)\n', (12554, 12589), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((12607, 12654), 'colossalai.communication.all_gather', 'all_gather', (['B', 'weight_dim', 'weight_parallel_mode'], {}), '(B, weight_dim, weight_parallel_mode)\n', (12617, 12654), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((12727, 12778), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['C', 'output_dim', 'output_parallel_mode'], {}), '(C, output_dim, output_parallel_mode)\n', (12741, 12778), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((14706, 14751), 'colossalai.communication.all_gather', 'all_gather', (['A', 'input_dim', 'input_parallel_mode'], {}), '(A, input_dim, input_parallel_mode)\n', (14716, 14751), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((14818, 14865), 'colossalai.communication.all_gather', 'all_gather', (['B', 'weight_dim', 'weight_parallel_mode'], {}), '(B, weight_dim, weight_parallel_mode)\n', (14828, 14865), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((14987, 15038), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['C', 'output_dim', 'output_parallel_mode'], {}), '(C, output_dim, output_parallel_mode)\n', (15001, 15038), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((16744, 16787), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (16766, 16787), True, 'from colossalai.core import global_context as gpc\n'), ((17069, 17116), 'colossalai.communication.all_gather', 'all_gather', (['bias_temp', '(-1)', 'weight_parallel_mode'], {}), '(bias_temp, -1, weight_parallel_mode)\n', (17079, 17116), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((18667, 18710), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (18689, 18710), True, 'from colossalai.core import global_context as gpc\n'), ((19010, 19057), 'colossalai.communication.all_gather', 'all_gather', (['bias_temp', '(-1)', 'weight_parallel_mode'], {}), '(bias_temp, -1, weight_parallel_mode)\n', (19020, 19057), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((19147, 19175), 'torch.mul', 'torch.mul', (['input_', 'bias_temp'], {}), '(input_, bias_temp)\n', (19156, 19175), False, 'import torch\n'), ((20790, 20833), 'torch.sum', 'torch.sum', (['input_'], {'dim': 'dim', 'keepdim': 'keepdim'}), '(input_, dim=dim, keepdim=keepdim)\n', (20799, 20833), False, 'import torch\n'), ((2572, 2587), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2585, 2587), False, 'import torch\n'), ((4006, 4071), 'colossalai.communication.all_gather', 'all_gather', (['output_grad', 'ctx.output_dim', 'ctx.output_parallel_mode'], {}), '(output_grad, ctx.output_dim, ctx.output_parallel_mode)\n', (4016, 4071), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((4135, 4177), 'torch.cat', 'torch.cat', (['output_grad'], {'dim': 'ctx.output_dim'}), '(output_grad, dim=ctx.output_dim)\n', (4144, 4177), False, 'import torch\n'), ((4302, 4388), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['input_grad', 'ctx.input_dim', 'ctx.input_parallel_mode'], {'async_op': '(True)'}), '(input_grad, ctx.input_dim, ctx.input_parallel_mode, async_op\n =True)\n', (4316, 4388), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((5519, 5583), 'colossalai.communication.all_reduce', 'all_reduce', (['weight_grad', 'ctx.weight_parallel_mode'], {'async_op': '(True)'}), '(weight_grad, ctx.weight_parallel_mode, async_op=True)\n', (5529, 5583), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((8102, 8117), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8115, 8117), False, 'import torch\n'), ((8364, 8407), 'colossalai.communication.all_reduce', 'all_reduce', (['grads', 'ctx.weight_parallel_mode'], {}), '(grads, ctx.weight_parallel_mode)\n', (8374, 8407), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((8428, 8470), 'colossalai.communication.all_reduce', 'all_reduce', (['grads', 'ctx.input_parallel_mode'], {}), '(grads, ctx.input_parallel_mode)\n', (8438, 8470), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((11073, 11088), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11086, 11088), False, 'import torch\n'), ((13240, 13255), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13253, 13255), False, 'import torch\n'), ((15500, 15515), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15513, 15515), False, 'import torch\n'), ((16822, 16862), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['output_parallel_mode'], {}), '(output_parallel_mode)\n', (16840, 16862), True, 'from colossalai.core import global_context as gpc\n'), ((17558, 17573), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17571, 17573), False, 'import torch\n'), ((17737, 17788), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['grad', '(-1)', 'ctx.B_group_parallel_mode'], {}), '(grad, -1, ctx.B_group_parallel_mode)\n', (17751, 17788), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((18745, 18785), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['output_parallel_mode'], {}), '(output_parallel_mode)\n', (18763, 18785), True, 'from colossalai.core import global_context as gpc\n'), ((19583, 19598), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19596, 19598), False, 'import torch\n'), ((19700, 19728), 'torch.mul', 'torch.mul', (['output_grad', 'bias'], {}), '(output_grad, bias)\n', (19709, 19728), False, 'import torch\n'), ((19768, 19798), 'torch.mul', 'torch.mul', (['output_grad', 'input_'], {}), '(output_grad, input_)\n', (19777, 19798), False, 'import torch\n'), ((19934, 19985), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['grad', '(-1)', 'ctx.B_group_parallel_mode'], {}), '(grad, -1, ctx.B_group_parallel_mode)\n', (19948, 19985), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((21159, 21174), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21172, 21174), False, 'import torch\n'), ((6768, 6807), 'torch.sum', 'torch.sum', (['input_'], {'dim': '(-1)', 'keepdim': '(True)'}), '(input_, dim=-1, keepdim=True)\n', (6777, 6807), False, 'import torch\n'), ((6928, 6968), 'torch.sum', 'torch.sum', (['(mu ** 2)'], {'dim': '(-1)', 'keepdim': '(True)'}), '(mu ** 2, dim=-1, keepdim=True)\n', (6937, 6968), False, 'import torch\n'), ((9166, 9203), 'torch.sum', 'torch.sum', (['dvar'], {'dim': '(-1)', 'keepdim': '(True)'}), '(dvar, dim=-1, keepdim=True)\n', (9175, 9203), False, 'import torch\n'), ((9340, 9378), 'torch.sum', 'torch.sum', (['dmean'], {'dim': '(-1)', 'keepdim': '(True)'}), '(dmean, dim=-1, keepdim=True)\n', (9349, 9378), False, 'import torch\n'), ((16997, 17031), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (17010, 17031), True, 'from colossalai.core import global_context as gpc\n'), ((17953, 17998), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (17971, 17998), True, 'from colossalai.core import global_context as gpc\n'), ((18023, 18068), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.C_group_parallel_mode'], {}), '(ctx.C_group_parallel_mode)\n', (18041, 18068), True, 'from colossalai.core import global_context as gpc\n'), ((18938, 18972), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (18951, 18972), True, 'from colossalai.core import global_context as gpc\n'), ((20150, 20195), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (20168, 20195), True, 'from colossalai.core import global_context as gpc\n'), ((20220, 20265), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.C_group_parallel_mode'], {}), '(ctx.C_group_parallel_mode)\n', (20238, 20265), True, 'from colossalai.core import global_context as gpc\n'), ((20869, 20897), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (20882, 20897), True, 'from colossalai.core import global_context as gpc\n'), ((21392, 21429), 'torch.unsqueeze', 'torch.unsqueeze', (['output_grad', 'ctx.dim'], {}), '(output_grad, ctx.dim)\n', (21407, 21429), False, 'import torch\n'), ((21947, 21975), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (21960, 21975), True, 'from colossalai.core import global_context as gpc\n'), ((8214, 8251), 'torch.stack', 'torch.stack', (['[bias_grad, weight_grad]'], {}), '([bias_grad, weight_grad])\n', (8225, 8251), False, 'import torch\n'), ((17896, 17936), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (17909, 17936), True, 'from colossalai.core import global_context as gpc\n'), ((20093, 20133), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (20106, 20133), True, 'from colossalai.core import global_context as gpc\n'), ((21274, 21298), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.group'], {}), '(ctx.group)\n', (21287, 21298), True, 'from colossalai.core import global_context as gpc\n'), ((5445, 5478), 'torch.unsqueeze', 'torch.unsqueeze', (['bias_grad'], {'dim': '(0)'}), '(bias_grad, dim=0)\n', (5460, 5478), False, 'import torch\n')] |
import torch
from colossalai.tensor.op_wrapper import colo_op_impl
from colossalai.tensor import ColoTensor
@colo_op_impl(torch.nn.functional.cross_entropy)
def colo_cross_entropy(types, args=(), kwargs=None, pg=None):
arg_num = len(args)
if arg_num > 0:
input_tensor = args[0]
if arg_num > 1:
target = args[1]
if arg_num > 2:
weight = args[3]
if 'input' in kwargs:
input_tensor = kwargs['input']
if 'target' in kwargs:
target = kwargs['target']
if 'weight' in kwargs:
weight = kwargs['weight']
if isinstance(input_tensor, ColoTensor):
input_tensor = input_tensor.torch_tensor()
if isinstance(target, ColoTensor):
target = target.torch_tensor()
return ColoTensor.init_from_torch_tensor(torch.nn.functional.cross_entropy(input_tensor, target, weight))
| [
"colossalai.tensor.op_wrapper.colo_op_impl"
] | [((111, 158), 'colossalai.tensor.op_wrapper.colo_op_impl', 'colo_op_impl', (['torch.nn.functional.cross_entropy'], {}), '(torch.nn.functional.cross_entropy)\n', (123, 158), False, 'from colossalai.tensor.op_wrapper import colo_op_impl\n'), ((796, 859), 'torch.nn.functional.cross_entropy', 'torch.nn.functional.cross_entropy', (['input_tensor', 'target', 'weight'], {}), '(input_tensor, target, weight)\n', (829, 859), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
try:
import colossal_C
except:
print('Colossalai should be built with cuda extension to use the FP16 optimizer')
from torch.optim import Optimizer
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.logging import get_dist_logger
from colossalai.utils import (print_rank_0, copy_tensor_parallel_attributes,
clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier)
def _zero_grad_group_helper(group, set_to_none):
"""Zero out the gradient for a group of parameters.
Note: copied from torch.optim.optimizer."""
for param in group:
if param.grad is not None:
if set_to_none:
param.grad = None
else:
if param.grad.grad_fn is not None:
param.grad.detach_()
else:
param.grad.requires_grad_(False)
param.grad.zero_()
def _multi_tensor_copy_this_to_that(this, that, overflow_buf=None):
"""Use multi-tensor-applier to copy values from one list to another.
We don't have a blfoat16 implementation so for now if the overflow_buf
is not provided, we default back to simple loop copy to be compatible
with bfloat16."""
if overflow_buf:
overflow_buf.fill_(0)
# Scaling with factor `1.0` is equivalent to copy.
multi_tensor_applier(colossal_C.multi_tensor_scale,
overflow_buf,
[this, that],
1.0)
else:
for this_, that_ in zip(this, that):
that_.copy_(this_)
class DynamicGradScaler:
def __init__(self,
initial_scale,
min_scale,
growth_factor,
backoff_factor,
growth_interval,
hysteresis,
max_scale: int = None):
""""Grad scaler with dynamic scale that gets adjusted
during training."""
assert initial_scale > 0.0
self._scale = torch.cuda.FloatTensor([initial_scale])
# Lower bound on the scale.
assert min_scale > 0.0
assert min_scale <= initial_scale
self.min_scale = torch.cuda.FloatTensor([min_scale])
# Growth and backoff factors for the scale.
assert growth_factor > 1.0
self.growth_factor = torch.cuda.FloatTensor([growth_factor])
assert backoff_factor < 1.0
assert backoff_factor > 0.0
self.backoff_factor = torch.cuda.FloatTensor([backoff_factor])
# Interval over which if we don't see any inf/nan,
# we will scale the grad scale by the growth factor.
assert growth_interval > 0
self.growth_interval = growth_interval
# Number of inf/nans we should see before scaling down
# the grad scale by the backoff factor.
assert hysteresis > 0
self.hysteresis = hysteresis
if max_scale is not None:
assert max_scale > 1 and initial_scale <= max_scale
self._max_scale = max_scale
# Trackers.
self._growth_tracker = 0
self._hysteresis_tracker = self.hysteresis
self._logger = get_dist_logger()
@property
def scale(self):
return self._scale
@property
def inv_scale(self):
return self._scale.double().reciprocal().float()
def update(self, found_inf):
# If we have an inf/nan, growth tracker is set to 0
# and hysterisis tracker is reduced by 1.
if found_inf:
self._growth_tracker = 0
self._hysteresis_tracker -= 1
# Now if we are out of hysteresis count, scale down the loss.
if self._hysteresis_tracker <= 0:
self._scale = torch.max(self._scale * self.backoff_factor,
self.min_scale)
self._logger.info(f'overflow occurs, loss scale is adjusted to {self._scale}', ranks=[0])
else:
# If there is no nan/inf, increment the growth tracker.
self._growth_tracker += 1
# If we have had enough consequitive intervals with no nan/inf:
if self._growth_tracker == self.growth_interval:
# Reset the tracker and hysteresis trackers,
self._growth_tracker = 0
self._hysteresis_tracker = self.hysteresis
# and scale up the loss scale.
if self._max_scale is not None and self._scale >= self._max_scale:
self._logger.info(
f'Current loss scale {self._scale} has reached the max scale {self._max_scale} allowed', ranks=[0])
else:
self._scale = self._scale * self.growth_factor
self._logger.info(f'no consecutive overflow, loss scale is adjusted to {self._scale}', ranks=[0])
def state_dict(self):
state_dict = {}
state_dict['max_scale'] = self._max_scale
state_dict['scale'] = self._scale
state_dict['growth_tracker'] = self._growth_tracker
state_dict['hysteresis_tracker'] = self._hysteresis_tracker
return state_dict
def load_state_dict(self, state_dict):
self._scale = state_dict['scale'].cuda(torch.cuda.current_device())
self._growth_tracker = state_dict['growth_tracker']
self._hysteresis_tracker = state_dict['hysteresis_tracker']
self._max_scale = state_dict['max_scale']
class FP16Optimizer(Optimizer):
"""Float16 optimizer for fp16 and bf16 data types.
:param optimizer: base optimizer such as Adam or SGD
:type optimizer: torch.optim.Optimizer
:param clip_grad: clip gradeints with this global L2 norm. Note that clipping is ignored if clip_grad == 0
:type param clip_grad: float
:param log_num_zeros_in_grad: return number of zeros in the gradients.
:type log_num_zeros_in_grad: bool
:param initial_scale: initial scale of gradient scaler
:type initial_scale: int
:param growth_factor: the growth rate of loss scale
:type growth_factor: int
:param backoff_factor: the decrease rate of loss scale
:type backoff_factor: float
:param hysterisis: delay shift in dynamic loss scaling
:type hysterisis: int
:param max_scale: maximum loss scale allowed
:type max_scale: int
"""
def __init__(self,
optimizer,
clip_grad=0,
log_num_zeros_in_grad=False,
initial_scale=2 ** 32,
min_scale=1,
growth_factor=2,
backoff_factor=0.5,
growth_interval=1000,
hysteresis=2,
max_scale: int = 2 ** 32):
# default args for compatibility
bf16 = False
params_have_main_grad = True
# have a defaults for compatibility with pytorch optim
self.defaults = optimizer.defaults
# log config
self._logger = get_dist_logger()
self._logger.info(f"\n========= FP16 Optimizer Config =========\n"
f"Optimizer: {optimizer.__class__.__name__}\n"
f"clip_grad = {clip_grad}\n"
f"log_num_zeros_in_grad = {log_num_zeros_in_grad}\n"
f"initial_scale = {initial_scale}\n"
f"min_scale = {min_scale}\n"
f"growth_factor = {growth_factor}\n"
f"backoff_factor = {backoff_factor}\n"
f"growth_interval = {growth_interval}\n"
f"hysteresis = {hysteresis}\n"
f"==========================================", ranks=[0])
"""Input optimizer is the base optimizer for example Adam."""
self.optimizer = optimizer
assert self.optimizer, 'no optimizer is provided.'
# Set gradient clipping and logging params.
self.clip_grad = clip_grad
self.log_num_zeros_in_grad = log_num_zeros_in_grad
self.params_have_main_grad = params_have_main_grad
self.bf16 = bf16
self.grad_scaler = DynamicGradScaler(
initial_scale=initial_scale,
min_scale=min_scale,
growth_factor=growth_factor,
backoff_factor=backoff_factor,
growth_interval=growth_interval,
hysteresis=hysteresis,
max_scale=max_scale
)
# None grad scaler is only supported for bf16.
if self.grad_scaler is None:
assert self.bf16, 'fp16 expects a grad scaler.'
# Tensor used to determine if a nan/if has happend.
# Any non-zero value indicates inf/nan.
# Note that we keep this for the cases that grad scaler is none.
# We still record nan/inf if we have a bfloat16 with a grad scaler.
if self.grad_scaler:
self.found_inf = torch.cuda.FloatTensor([0.0])
# Dummy tensor needed for apex multi-apply tensor.
# For bfloat, we don't have multi-tensor apply and for now
# we set it to none so the multi-tensor apply gets ignored.
if bf16:
self._dummy_overflow_buf = None
else:
self._dummy_overflow_buf = torch.cuda.IntTensor([0])
# In case grad scaler is not passed, define the unity scale.
if self.grad_scaler is None:
self._scale_one = torch.cuda.FloatTensor([1.0])
# ======================
# main parameter stuff
# ======================
# Three groups of parameters:
# float16_groups: original float16 parameters
# fp32_from_float16_groups: fp32 copy of float16 parameters
# fp32_from_fp32_groups: original fp32 parameters
self.float16_groups = []
self.fp32_from_float16_groups = []
self.fp32_from_fp32_groups = []
# For all the groups in the original optimizer:
for param_group in self.optimizer.param_groups:
float16_params_this_group = []
fp32_params_this_group = []
fp32_from_float16_params_this_group = []
# For all the parameters in this group:
for i, param in enumerate(param_group['params']):
if param.requires_grad:
# float16 params:
if param.type() in ['torch.cuda.HalfTensor',
'torch.cuda.BFloat16Tensor']:
float16_params_this_group.append(param)
# Create a copy
main_param = param.detach().clone().float()
# Copy tensor model parallel attributes.
copy_tensor_parallel_attributes(param, main_param)
# if hasattr(param, 'shared'):
# main_param.shared = param.shared
# Replace the optimizer params with the new fp32 copy.
param_group['params'][i] = main_param
fp32_from_float16_params_this_group.append(main_param)
# Reset existing state dict key to the new main param.
if param in self.optimizer.state:
self.optimizer.state[main_param] \
= self.optimizer.state.pop(param)
# fp32 params.
elif param.type() == 'torch.cuda.FloatTensor':
fp32_params_this_group.append(param)
param_group['params'][i] = param
else:
raise TypeError('Wrapped parameters must be one of '
'torch.cuda.FloatTensor, '
'torch.cuda.HalfTensor, or '
'torch.cuda.BFloat16Tensor. '
'Received {}'.format(param.type()))
self.float16_groups.append(float16_params_this_group)
self.fp32_from_float16_groups.append(
fp32_from_float16_params_this_group)
self.fp32_from_fp32_groups.append(fp32_params_this_group)
# Leverage state_dict() and load_state_dict() to
# recast preexisting per-param state tensors
self.optimizer.load_state_dict(self.optimizer.state_dict())
def zero_grad(self, set_to_none=False):
"""We only need to zero the model related parameters, i.e.,
float16_groups & fp32_from_fp32_groups."""
for group in self.float16_groups:
_zero_grad_group_helper(group, set_to_none)
for group in self.fp32_from_fp32_groups:
_zero_grad_group_helper(group, set_to_none)
def get_loss_scale(self):
if self.grad_scaler is None:
return self._scale_one
return self.grad_scaler.scale
def _copy_model_grads_to_main_grads(self):
# This only needs to be done for the float16 group.
for model_group, main_group in zip(self.float16_groups,
self.fp32_from_float16_groups):
for model_param, main_param in zip(model_group, main_group):
if self.params_have_main_grad:
main_param.grad = model_param.main_grad.float()
else:
if model_param.grad is not None:
main_param.grad = model_param.grad.float()
# For fp32 grads, we need to reset the grads to main grad.
if self.params_have_main_grad:
for model_group in self.fp32_from_fp32_groups:
for model_param in model_group:
model_param.grad = model_param.main_grad
def _unscale_main_grads_and_check_for_nan(self):
main_grads = []
# fp32 params fromm float16 ones.
for main_group in self.fp32_from_float16_groups:
for main_param in main_group:
if main_param.grad is not None:
main_grads.append(main_param.grad.data)
# Append fp32 parameters.
for main_group in self.fp32_from_fp32_groups:
for main_param in main_group:
if main_param.grad is not None:
main_grads.append(main_param.grad.data)
# Reset found inf.
self.found_inf.fill_(0.0)
# Unscale and set found inf/nan
torch._amp_foreach_non_finite_check_and_unscale_(
main_grads, self.found_inf, self.grad_scaler.inv_scale)
# Update across all model parallel instances.
torch.distributed.all_reduce(self.found_inf,
op=torch.distributed.ReduceOp.MAX,
group=gpc.get_group(ParallelMode.TENSOR))
# Check for nan.
found_inf_flag = (self.found_inf.item() > 0)
return found_inf_flag
def _get_model_and_main_params_data_float16(self):
model_data = []
main_data = []
for model_group, main_group in zip(self.float16_groups,
self.fp32_from_float16_groups):
for model_param, main_param in zip(model_group, main_group):
model_data.append(model_param.data)
main_data.append(main_param.data)
return model_data, main_data
def _copy_main_params_to_model_params(self):
# Only needed for the float16 params.
model_data, main_data = self._get_model_and_main_params_data_float16()
_multi_tensor_copy_this_to_that(this=main_data, that=model_data,
overflow_buf=self._dummy_overflow_buf)
def _copy_model_params_to_main_params(self):
# Only needed for the float16 params.
model_data, main_data = self._get_model_and_main_params_data_float16()
_multi_tensor_copy_this_to_that(this=model_data, that=main_data,
overflow_buf=self._dummy_overflow_buf)
def reload_model_params(self):
self._copy_model_params_to_main_params()
@torch.no_grad()
def step(self):
# Copy gradients from model params to main params.
self._copy_model_grads_to_main_grads()
# Do unscale, check for inf, and update grad scaler only for
# the case that grad scaler is provided.
if self.grad_scaler:
# Unscale and check for inf/nan.
found_inf_flag = self._unscale_main_grads_and_check_for_nan()
# We are done with scaling gradients
# so we can update the loss scale.
self.grad_scaler.update(found_inf_flag)
# If we found inf/nan, skip the update.
if found_inf_flag:
return False, None, None
# Clip the main gradients.
grad_norm = None
if self.clip_grad > 0.0:
grad_norm = self.clip_grad_norm(self.clip_grad)
# count the zeros in the grads
num_zeros_in_grad = self.count_zeros() if \
self.log_num_zeros_in_grad else None
# Step the optimizer.
self.optimizer.step()
# Update params from main params.
self._copy_main_params_to_model_params()
# Successful update.
return True, grad_norm, num_zeros_in_grad
def state_dict(self):
state_dict = {}
state_dict['optimizer'] = self.optimizer.state_dict()
if self.grad_scaler:
state_dict['grad_scaler'] = self.grad_scaler.state_dict()
state_dict['fp32_from_fp16_params'] = self.fp32_from_float16_groups
return state_dict
def load_state_dict(self, state_dict):
# Optimizer.
optimizer_key = 'optimizer'
if optimizer_key not in state_dict:
optimizer_key = 'optimizer_state_dict'
print_rank_0('***WARNING*** loading optimizer from '
'an old checkpoint ...')
self.optimizer.load_state_dict(state_dict[optimizer_key])
# Grad scaler.
if 'grad_scaler' not in state_dict:
print_rank_0('***WARNING*** found an old checkpoint, will not '
'load grad scaler ...')
else:
if self.grad_scaler:
self.grad_scaler.load_state_dict(state_dict['grad_scaler'])
else:
print_rank_0('***WARNING*** fould the grad scaler in the '
'checkpoint but it is None in the class. '
'Skipping loading grad scaler ...')
# Copy data for the main params.
fp32_from_float16_params_key = 'fp32_from_fp16_params'
if fp32_from_float16_params_key not in state_dict:
fp32_from_float16_params_key = 'fp32_from_fp16'
for current_group, saved_group in zip(
self.fp32_from_float16_groups,
state_dict[fp32_from_float16_params_key]):
for current_param, saved_param in zip(current_group, saved_group):
current_param.data.copy_(saved_param.data)
def get_parameters(self):
params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
params.append(param)
return params
def clip_grad_norm(self, clip_grad):
params = self.get_parameters()
return clip_grad_norm_fp32(params, clip_grad)
def count_zeros(self):
params = self.get_parameters()
return count_zeros_fp32(params)
def scale_loss(self, loss):
"""Simple scaling."""
return self.get_loss_scale() * loss
# Promote state so it can be retrieved or set via
# "optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via
# "optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
| [
"colossalai.utils.print_rank_0",
"colossalai.logging.get_dist_logger",
"colossalai.utils.clip_grad_norm_fp32",
"colossalai.core.global_context.get_group",
"colossalai.utils.copy_tensor_parallel_attributes",
"colossalai.utils.count_zeros_fp32",
"colossalai.utils.multi_tensor_applier"
] | [((16274, 16289), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16287, 16289), False, 'import torch\n'), ((1471, 1559), 'colossalai.utils.multi_tensor_applier', 'multi_tensor_applier', (['colossal_C.multi_tensor_scale', 'overflow_buf', '[this, that]', '(1.0)'], {}), '(colossal_C.multi_tensor_scale, overflow_buf, [this,\n that], 1.0)\n', (1491, 1559), False, 'from colossalai.utils import print_rank_0, copy_tensor_parallel_attributes, clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier\n'), ((2156, 2195), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['[initial_scale]'], {}), '([initial_scale])\n', (2178, 2195), False, 'import torch\n'), ((2331, 2366), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['[min_scale]'], {}), '([min_scale])\n', (2353, 2366), False, 'import torch\n'), ((2483, 2522), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['[growth_factor]'], {}), '([growth_factor])\n', (2505, 2522), False, 'import torch\n'), ((2625, 2665), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['[backoff_factor]'], {}), '([backoff_factor])\n', (2647, 2665), False, 'import torch\n'), ((3309, 3326), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (3324, 3326), False, 'from colossalai.logging import get_dist_logger\n'), ((7111, 7128), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (7126, 7128), False, 'from colossalai.logging import get_dist_logger\n'), ((14590, 14698), 'torch._amp_foreach_non_finite_check_and_unscale_', 'torch._amp_foreach_non_finite_check_and_unscale_', (['main_grads', 'self.found_inf', 'self.grad_scaler.inv_scale'], {}), '(main_grads, self.found_inf,\n self.grad_scaler.inv_scale)\n', (14638, 14698), False, 'import torch\n'), ((19548, 19586), 'colossalai.utils.clip_grad_norm_fp32', 'clip_grad_norm_fp32', (['params', 'clip_grad'], {}), '(params, clip_grad)\n', (19567, 19586), False, 'from colossalai.utils import print_rank_0, copy_tensor_parallel_attributes, clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier\n'), ((19669, 19693), 'colossalai.utils.count_zeros_fp32', 'count_zeros_fp32', (['params'], {}), '(params)\n', (19685, 19693), False, 'from colossalai.utils import print_rank_0, copy_tensor_parallel_attributes, clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier\n'), ((5392, 5419), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (5417, 5419), False, 'import torch\n'), ((9057, 9086), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['[0.0]'], {}), '([0.0])\n', (9079, 9086), False, 'import torch\n'), ((9396, 9421), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['[0]'], {}), '([0])\n', (9416, 9421), False, 'import torch\n'), ((9559, 9588), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['[1.0]'], {}), '([1.0])\n', (9581, 9588), False, 'import torch\n'), ((18008, 18082), 'colossalai.utils.print_rank_0', 'print_rank_0', (['"""***WARNING*** loading optimizer from an old checkpoint ..."""'], {}), "('***WARNING*** loading optimizer from an old checkpoint ...')\n", (18020, 18082), False, 'from colossalai.utils import print_rank_0, copy_tensor_parallel_attributes, clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier\n'), ((18257, 18346), 'colossalai.utils.print_rank_0', 'print_rank_0', (['"""***WARNING*** found an old checkpoint, will not load grad scaler ..."""'], {}), "(\n '***WARNING*** found an old checkpoint, will not load grad scaler ...')\n", (18269, 18346), False, 'from colossalai.utils import print_rank_0, copy_tensor_parallel_attributes, clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier\n'), ((3883, 3943), 'torch.max', 'torch.max', (['(self._scale * self.backoff_factor)', 'self.min_scale'], {}), '(self._scale * self.backoff_factor, self.min_scale)\n', (3892, 3943), False, 'import torch\n'), ((14930, 14964), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (14943, 14964), True, 'from colossalai.core import global_context as gpc\n'), ((18527, 18668), 'colossalai.utils.print_rank_0', 'print_rank_0', (['"""***WARNING*** fould the grad scaler in the checkpoint but it is None in the class. Skipping loading grad scaler ..."""'], {}), "(\n '***WARNING*** fould the grad scaler in the checkpoint but it is None in the class. Skipping loading grad scaler ...'\n )\n", (18539, 18668), False, 'from colossalai.utils import print_rank_0, copy_tensor_parallel_attributes, clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier\n'), ((10865, 10915), 'colossalai.utils.copy_tensor_parallel_attributes', 'copy_tensor_parallel_attributes', (['param', 'main_param'], {}), '(param, main_param)\n', (10896, 10915), False, 'from colossalai.utils import print_rank_0, copy_tensor_parallel_attributes, clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier\n')] |
#Adapted from https://github.com/hpcaitech/ColossalAI-Examples/blob/main/language/gpt/model/gpt1d.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import math
from colossalai.utils.activation_checkpoint import checkpoint
import torch
from torch import nn as nn, Tensor
from colossalai.core import global_context as gpc
from colossalai.nn.layer.utils import divide, ACT2FN
from colossalai.utils import checkpoint
from colossalai.nn.layer import Linear1D_Col, Linear1D_Row
from colossalai.nn.layer.base_layer import ParallelLayer
from colossalai import kernel
from colossalai.kernel.cuda_native.scaled_softmax import AttnMaskType
from colossalai import nn as col_nn
class MLP1D(ParallelLayer):
def __init__(self,
in_features: int,
mlp_ratio: float,
act_func: str = 'gelu',
dropout_prob: float = 0.,
dtype=None,
checkpoint: bool = False,
skip_bias_add: bool = False,
):
super().__init__()
self.in_features = in_features
self.mlp_ratio = mlp_ratio
self.checkpoint = checkpoint
self.skip_bias_add = skip_bias_add
self.act = ACT2FN[act_func]
skip_dense_1_add_bias = False
# Project to mlp_ratio * h.
self.dense_1 = Linear1D_Col(
self.in_features,
int(self.mlp_ratio * self.in_features),
dtype=dtype,
gather_output=False,
skip_bias_add=skip_dense_1_add_bias,
)
# Project back to h.
self.dense_2 = Linear1D_Row(
int(self.mlp_ratio * self.in_features),
self.in_features,
dtype=dtype,
parallel_input=True,
)
self.dropout = col_nn.Dropout(dropout_prob)
def _forward(self, hidden_states: Tensor) -> Tensor:
intermediate_output = self.dense_1(hidden_states)
intermediate_output = self.act(intermediate_output)
output = self.dense_2(intermediate_output)
output = self.dropout(output)
return output
def _checkpoint_forward(self, hidden_states: Tensor) -> Tensor:
return checkpoint(self._forward, hidden_states)
def forward(self, hidden_states: Tensor) -> Tensor:
if self.checkpoint:
return self._checkpoint_forward(hidden_states)
else:
return self._forward(hidden_states)
class GenericSelfAttention1D(ParallelLayer):
def __init__(self,
hidden_size: int,
num_attention_heads: int,
attention_dropout_prob: float,
hidden_dropout_prob: float,
dtype=None,
checkpoint: bool = False,
max_position_embeddings=1024,
):
super().__init__()
self.hidden_size = hidden_size
self.attention_head_size = divide(hidden_size, num_attention_heads)
self.num_attention_heads_per_partition = divide(num_attention_heads, gpc.tensor_parallel_size)
self.hidden_size_per_partition = divide(hidden_size, gpc.tensor_parallel_size)
self.checkpoint = checkpoint
self.query_key_value = Linear1D_Col(
hidden_size,
3 * hidden_size,
dtype=dtype,
)
self.attention_dropout = col_nn.Dropout(attention_dropout_prob)
self.dense = Linear1D_Row(
hidden_size,
hidden_size,
dtype=dtype,
parallel_input=True,
)
self.dropout = col_nn.Dropout(hidden_dropout_prob)
def softmax_forward(self, attention_scores, attention_mask, query_layer, key_layer):
raise NotImplementedError
def _forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor:
query_key_value = self.query_key_value(hidden_states)
new_qkv_shape = query_key_value.shape[:-1] + \
(self.num_attention_heads_per_partition, 3 * self.attention_head_size)
query_key_value = query_key_value.view(new_qkv_shape)
query_key_value = query_key_value.permute((0, 2, 1, 3))
query_layer, key_layer, value_layer = torch.chunk(
query_key_value, 3, dim=-1)
attention_scores = torch.matmul(
query_layer, key_layer.transpose(-1, -2))
attention_scores = self.softmax_forward(attention_scores, attention_mask, query_layer, key_layer)
attention_scores = attention_scores.type(value_layer.dtype)
attention_probs = self.attention_dropout(attention_scores)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(1, 2)
new_context_layer_shape = context_layer.size()[
:-2] + (self.hidden_size_per_partition,)
context_layer = context_layer.reshape(new_context_layer_shape)
output = self.dense(context_layer)
output = self.dropout(output)
return output
def _checkpoint_forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor:
return checkpoint(self._forward, hidden_states, attention_mask)
def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor:
if self.checkpoint:
return self._checkpoint_forward(hidden_states, attention_mask)
else:
return self._forward(hidden_states, attention_mask)
class DeepNetSelfAttention1D(GenericSelfAttention1D):
def __init__(self, hidden_size: int, num_attention_heads: int, attention_dropout_prob: float, hidden_dropout_prob: float, dtype=None, checkpoint: bool = False, max_position_embeddings=1024):
super().__init__(hidden_size, num_attention_heads, attention_dropout_prob, hidden_dropout_prob,
dtype=dtype, checkpoint=checkpoint, max_position_embeddings=max_position_embeddings)
self.softmax = nn.Softmax(dim=-1)
max_positions = max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
def softmax_forward(self, attention_scores, attention_mask, query_layer, key_layer):
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# causal mask
query_length, key_length = query_layer.size(-2), key_layer.size(-2)
causal_mask = self.bias[:, :, key_length - query_length: key_length, :key_length].bool()
attention_scores = torch.where(causal_mask, attention_scores, self.masked_bias.to(attention_scores))
if attention_mask is not None:
# Apply the attention mask
attention_scores = attention_scores + attention_mask
attention_scores = self.softmax(attention_scores)
return attention_scores
class FusedDeepNetSelfAttention1D(GenericSelfAttention1D):
def __init__(self, hidden_size: int, num_attention_heads: int, attention_dropout_prob: float, hidden_dropout_prob: float, dtype=None, checkpoint: bool = False, max_position_embeddings=1024):
super().__init__(hidden_size, num_attention_heads, attention_dropout_prob, hidden_dropout_prob,
dtype=dtype, checkpoint=checkpoint, max_position_embeddings=max_position_embeddings)
self.softmax = kernel.FusedScaleMaskSoftmax(input_in_fp16=True,
input_in_bf16=False,
attn_mask_type=AttnMaskType.causal,
scaled_masked_softmax_fusion=True,
mask_func=None,
softmax_in_fp32=True,
scale=math.sqrt(self.attention_head_size))
def softmax_forward(self, attention_scores, attention_mask, query_layer, key_layer):
return self.softmax(attention_scores, attention_mask)
class GenericDeepNetTransformerLayer1D(ParallelLayer):
def __init__(self,
hidden_size: int,
num_attention_heads: int,
act_func: str = 'gelu',
mlp_ratio: float = 4.0,
attention_dropout_prob: float = 0.,
hidden_dropout_prob: float = 0.,
dtype=None,
checkpoint: bool = False,
max_position_embeddings: int = 1024,
alpha: float = 1.0,
layer_norm_epsilon: float = 1e-5,
apply_post_layer_norm: bool = False,
attention=None,
layer_norm=None
):
super().__init__()
self.checkpoint = checkpoint
self.dtype = dtype
self.norm1 = layer_norm(hidden_size, eps=layer_norm_epsilon)
self.attention = attention(
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
attention_dropout_prob=attention_dropout_prob,
hidden_dropout_prob=hidden_dropout_prob,
dtype=dtype,
max_position_embeddings=max_position_embeddings,
checkpoint=False,
)
self.alpha = alpha
self.norm2 = layer_norm(hidden_size, eps=layer_norm_epsilon)
self.mlp = MLP1D(
in_features=hidden_size,
dropout_prob=hidden_dropout_prob,
act_func=act_func,
mlp_ratio=mlp_ratio,
dtype=dtype,
checkpoint=False,
)
def _forward(self, hidden_states, attention_mask) -> Tensor:
residual = hidden_states
attention_output = self.attention(hidden_states, attention_mask)
hidden_states = self.norm1(residual*self.alpha + attention_output)
residual = hidden_states
feed_forward_hidden_states = self.mlp(hidden_states)
hidden_states = self.norm2(residual*self.alpha + feed_forward_hidden_states)
output = (hidden_states, attention_mask)
return output
def forward(self, hidden_states, attention_mask):
if self.checkpoint:
return checkpoint(self._forward, hidden_states, attention_mask)
else:
return self._forward(hidden_states, attention_mask)
class DeepNetTransformerLayer1D(GenericDeepNetTransformerLayer1D):
def __init__(self, hidden_size: int, num_attention_heads: int, act_func: str = 'gelu',
mlp_ratio: float = 4, attention_dropout_prob: float = 0, hidden_dropout_prob: float = 0,
dtype=None, checkpoint: bool = False, max_position_embeddings: int = 1024,
layer_norm_epsilon: float = 0.00001, alpha: float = 1.0):
attention = DeepNetSelfAttention1D
layer_norm = nn.LayerNorm
super().__init__(hidden_size, num_attention_heads, act_func=act_func, mlp_ratio=mlp_ratio, attention_dropout_prob=attention_dropout_prob, hidden_dropout_prob=hidden_dropout_prob, dtype=dtype,
checkpoint=checkpoint, max_position_embeddings=max_position_embeddings, layer_norm_epsilon=layer_norm_epsilon, alpha=alpha, attention=attention, layer_norm=layer_norm)
class FusedDeepNetTransformerLayer1D(GenericDeepNetTransformerLayer1D):
def __init__(self, hidden_size: int, num_attention_heads: int, act_func: str = 'gelu',
mlp_ratio: float = 4, attention_dropout_prob: float = 0, hidden_dropout_prob: float = 0,
dtype=None, checkpoint: bool = False, max_position_embeddings: int = 1024,
layer_norm_epsilon: float = 0.00001, alpha: float = 1.0):
attention = FusedDeepNetSelfAttention1D
layer_norm = kernel.LayerNorm
super().__init__(hidden_size, num_attention_heads, act_func=act_func, mlp_ratio=mlp_ratio, attention_dropout_prob=attention_dropout_prob, hidden_dropout_prob=hidden_dropout_prob, dtype=dtype,
checkpoint=checkpoint, max_position_embeddings=max_position_embeddings, layer_norm_epsilon=layer_norm_epsilon, alpha=alpha, attention=attention, layer_norm=layer_norm)
| [
"colossalai.utils.checkpoint",
"colossalai.nn.layer.Linear1D_Col",
"colossalai.nn.Dropout",
"colossalai.nn.layer.Linear1D_Row",
"colossalai.nn.layer.utils.divide"
] | [((1840, 1868), 'colossalai.nn.Dropout', 'col_nn.Dropout', (['dropout_prob'], {}), '(dropout_prob)\n', (1854, 1868), True, 'from colossalai import nn as col_nn\n'), ((2252, 2292), 'colossalai.utils.checkpoint', 'checkpoint', (['self._forward', 'hidden_states'], {}), '(self._forward, hidden_states)\n', (2262, 2292), False, 'from colossalai.utils import checkpoint\n'), ((3001, 3041), 'colossalai.nn.layer.utils.divide', 'divide', (['hidden_size', 'num_attention_heads'], {}), '(hidden_size, num_attention_heads)\n', (3007, 3041), False, 'from colossalai.nn.layer.utils import divide, ACT2FN\n'), ((3092, 3145), 'colossalai.nn.layer.utils.divide', 'divide', (['num_attention_heads', 'gpc.tensor_parallel_size'], {}), '(num_attention_heads, gpc.tensor_parallel_size)\n', (3098, 3145), False, 'from colossalai.nn.layer.utils import divide, ACT2FN\n'), ((3188, 3233), 'colossalai.nn.layer.utils.divide', 'divide', (['hidden_size', 'gpc.tensor_parallel_size'], {}), '(hidden_size, gpc.tensor_parallel_size)\n', (3194, 3233), False, 'from colossalai.nn.layer.utils import divide, ACT2FN\n'), ((3304, 3359), 'colossalai.nn.layer.Linear1D_Col', 'Linear1D_Col', (['hidden_size', '(3 * hidden_size)'], {'dtype': 'dtype'}), '(hidden_size, 3 * hidden_size, dtype=dtype)\n', (3316, 3359), False, 'from colossalai.nn.layer import Linear1D_Col, Linear1D_Row\n'), ((3445, 3483), 'colossalai.nn.Dropout', 'col_nn.Dropout', (['attention_dropout_prob'], {}), '(attention_dropout_prob)\n', (3459, 3483), True, 'from colossalai import nn as col_nn\n'), ((3506, 3578), 'colossalai.nn.layer.Linear1D_Row', 'Linear1D_Row', (['hidden_size', 'hidden_size'], {'dtype': 'dtype', 'parallel_input': '(True)'}), '(hidden_size, hidden_size, dtype=dtype, parallel_input=True)\n', (3518, 3578), False, 'from colossalai.nn.layer import Linear1D_Col, Linear1D_Row\n'), ((3667, 3702), 'colossalai.nn.Dropout', 'col_nn.Dropout', (['hidden_dropout_prob'], {}), '(hidden_dropout_prob)\n', (3681, 3702), True, 'from colossalai import nn as col_nn\n'), ((4289, 4328), 'torch.chunk', 'torch.chunk', (['query_key_value', '(3)'], {'dim': '(-1)'}), '(query_key_value, 3, dim=-1)\n', (4300, 4328), False, 'import torch\n'), ((4719, 4761), 'torch.matmul', 'torch.matmul', (['attention_probs', 'value_layer'], {}), '(attention_probs, value_layer)\n', (4731, 4761), False, 'import torch\n'), ((5216, 5272), 'colossalai.utils.checkpoint', 'checkpoint', (['self._forward', 'hidden_states', 'attention_mask'], {}), '(self._forward, hidden_states, attention_mask)\n', (5226, 5272), False, 'from colossalai.utils import checkpoint\n'), ((6033, 6051), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (6043, 6051), True, 'from torch import nn as nn, Tensor\n'), ((6370, 6392), 'torch.tensor', 'torch.tensor', (['(-10000.0)'], {}), '(-10000.0)\n', (6382, 6392), False, 'import torch\n'), ((6529, 6564), 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), '(self.attention_head_size)\n', (6538, 6564), False, 'import math\n'), ((10520, 10576), 'colossalai.utils.checkpoint', 'checkpoint', (['self._forward', 'hidden_states', 'attention_mask'], {}), '(self._forward, hidden_states, attention_mask)\n', (10530, 10576), False, 'from colossalai.utils import checkpoint\n'), ((8114, 8149), 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), '(self.attention_head_size)\n', (8123, 8149), False, 'import math\n'), ((6177, 6238), 'torch.ones', 'torch.ones', (['(max_positions, max_positions)'], {'dtype': 'torch.uint8'}), '((max_positions, max_positions), dtype=torch.uint8)\n', (6187, 6238), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
from colossalai.constants import (DEPTH_3D, INPUT_GROUP_3D, OUTPUT_GROUP_3D,
WEIGHT_GROUP_3D)
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from torch import Tensor
def get_depth_from_env() -> int:
try:
depth = os.environ[DEPTH_3D]
depth = int(depth)
assert depth > 0, 'DEPTH must be greater than zero'
return depth
except KeyError as e:
raise EnvironmentError(
'DEPTH is not found in the current environment, '
'please make sure that you have used the correct process group initializer'
)
def get_parallel_mode_from_env(group):
return getattr(ParallelMode, os.environ[group])
def get_last_group(a, b):
mapping = {
ParallelMode.PARALLEL_3D_INPUT: 'A',
ParallelMode.PARALLEL_3D_WEIGHT: 'B',
ParallelMode.PARALLEL_3D_OUTPUT: 'C',
}
res = chr(
ord('A') + ord('B') + ord('C') - ord(mapping[a]) - ord(mapping[b]))
if res == 'A':
return ParallelMode.PARALLEL_3D_INPUT
elif res == 'B':
return ParallelMode.PARALLEL_3D_WEIGHT
elif res == 'C':
return ParallelMode.PARALLEL_3D_OUTPUT
def swap_in_out_group():
os.environ[INPUT_GROUP_3D], os.environ[OUTPUT_GROUP_3D] = \
os.environ[OUTPUT_GROUP_3D], os.environ[INPUT_GROUP_3D]
def dbg_check_shape(tensor: Tensor, shape: tuple):
rank = gpc.get_global_rank()
if rank == 0:
print(tensor.shape)
assert tensor.shape == shape, \
'{} does not match {}'.format(tensor.shape, shape)
| [
"colossalai.core.global_context.get_global_rank"
] | [((1522, 1543), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (1541, 1543), True, 'from colossalai.core import global_context as gpc\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os.path as osp
import pytest
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import initialize
from colossalai.logging import get_global_dist_logger
NUM_BATCH = 128
BATCH_SIZE = 32
SEQ_LENGTH = 128
HIDDEN_SIZE = 512
DIR_PATH = osp.dirname(osp.realpath(__file__))
CONFIG_PATH = osp.join(DIR_PATH, '../configs/pipeline_vanilla_resnet.py')
@pytest.mark.skip("This test should be invoked using the test.sh provided")
@pytest.mark.dist
def test_schedule():
engine, train_dataloader, test_dataloader = initialize(CONFIG_PATH)
logger = get_global_dist_logger()
model = engine.model
optimizer = engine.optimizer
criterion = engine.criterion
schedule = engine._schedule
output, label, loss = schedule.forward_backward_step(
data_iter=iter(train_dataloader),
model=model,
optimizer=optimizer,
criterion=criterion,
forward_only=False
)
schedule.optimizer_step(model, optimizer)
if gpc.is_last_rank(ParallelMode.PIPELINE):
logger.info('losses: {}'.format(loss))
gpc.destroy()
logger.info('training finished')
if __name__ == '__main__':
test_schedule()
| [
"colossalai.initialize.initialize",
"colossalai.core.global_context.is_last_rank",
"colossalai.core.global_context.destroy",
"colossalai.logging.get_global_dist_logger"
] | [((411, 470), 'os.path.join', 'osp.join', (['DIR_PATH', '"""../configs/pipeline_vanilla_resnet.py"""'], {}), "(DIR_PATH, '../configs/pipeline_vanilla_resnet.py')\n", (419, 470), True, 'import os.path as osp\n'), ((474, 548), 'pytest.mark.skip', 'pytest.mark.skip', (['"""This test should be invoked using the test.sh provided"""'], {}), "('This test should be invoked using the test.sh provided')\n", (490, 548), False, 'import pytest\n'), ((373, 395), 'os.path.realpath', 'osp.realpath', (['__file__'], {}), '(__file__)\n', (385, 395), True, 'import os.path as osp\n'), ((636, 659), 'colossalai.initialize.initialize', 'initialize', (['CONFIG_PATH'], {}), '(CONFIG_PATH)\n', (646, 659), False, 'from colossalai.initialize import initialize\n'), ((673, 697), 'colossalai.logging.get_global_dist_logger', 'get_global_dist_logger', ([], {}), '()\n', (695, 697), False, 'from colossalai.logging import get_global_dist_logger\n'), ((1089, 1128), 'colossalai.core.global_context.is_last_rank', 'gpc.is_last_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (1105, 1128), True, 'from colossalai.core import global_context as gpc\n'), ((1182, 1195), 'colossalai.core.global_context.destroy', 'gpc.destroy', ([], {}), '()\n', (1193, 1195), True, 'from colossalai.core import global_context as gpc\n')] |
import torch
import torch.distributed as dist
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device
def send_tensor_meta(tensor, need_meta=True, next_rank=None):
"""Sends tensor meta information before sending a specific tensor.
Since the recipient must know the shape of the tensor in p2p communications,
meta information of the tensor should be sent before communications. This function
synchronizes with :func:`recv_tensor_meta`.
:param tensor: Tensor to be sent
:param need_meta: If False, meta information won't be sent
:param next_rank: The rank of the next member in pipeline parallel group
:type tensor: Tensor
:type need_meta: bool, optional
:type next_rank: int
:return: False
:rtype: bool
"""
if need_meta:
if next_rank is None:
next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE)
tensor_kwargs = {'dtype': torch.long, 'device': get_current_device()}
send_shape = torch.tensor(tensor.size(), **tensor_kwargs)
send_ndims = torch.tensor(len(tensor.size()), **tensor_kwargs)
dist.send(send_ndims, next_rank)
dist.send(send_shape, next_rank)
return False
def recv_tensor_meta(tensor_shape, prev_rank=None):
"""Recieves tensor meta information before recieving a specific tensor.
Since the recipient must know the shape of the tensor in p2p communications,
meta information of the tensor should be recieved before communications. This function
synchronizes with :func:`send_tensor_meta`.
:param tensor_shape: The shape of the tensor to be recieved
:param prev_rank: The rank of the source of the tensor
:type tensor_shape: torch.Size
:type prev_rank: int, optional
:return: The shape of the tensor to be recieved
:rtype: torch.Size
"""
if tensor_shape is None:
if prev_rank is None:
prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE)
tensor_kwargs = {'dtype': torch.long, 'device': get_current_device()}
recv_ndims = torch.empty((), **tensor_kwargs)
dist.recv(recv_ndims, prev_rank)
recv_shape = torch.empty(recv_ndims, **tensor_kwargs)
dist.recv(recv_shape, prev_rank)
tensor_shape = torch.Size(recv_shape)
return tensor_shape
def split_tensor_into_1d_equal_chunks(tensor, new_buffer=False):
"""Break a tensor into equal 1D chunks."""
partition_size = torch.numel(tensor) // gpc.get_world_size(ParallelMode.PARALLEL_1D)
start_index = partition_size * gpc.get_local_rank(ParallelMode.PARALLEL_1D)
end_index = start_index + partition_size
if new_buffer:
data = torch.empty(partition_size, dtype=tensor.dtype,
device=torch.cuda.current_device(),
requires_grad=False)
data.copy_(tensor.view(-1)[start_index:end_index])
else:
data = tensor.view(-1)[start_index:end_index]
return data
def gather_split_1d_tensor(tensor):
"""Opposite of above function, gather values from model parallel ranks."""
world_size = gpc.get_world_size(ParallelMode.PARALLEL_1D)
numel = torch.numel(tensor)
numel_gathered = world_size * numel
gathered = torch.empty(numel_gathered, dtype=tensor.dtype,
device=torch.cuda.current_device(),
requires_grad=False)
chunks = [gathered[i*numel:(i+1)*numel] for i in range(world_size)]
dist.all_gather(chunks, tensor, group=gpc.get_group(ParallelMode.PARALLEL_1D))
return gathered
| [
"colossalai.core.global_context.get_prev_global_rank",
"colossalai.core.global_context.get_local_rank",
"colossalai.utils.get_current_device",
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.get_next_global_rank"
] | [((3203, 3247), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (3221, 3247), True, 'from colossalai.core import global_context as gpc\n'), ((3260, 3279), 'torch.numel', 'torch.numel', (['tensor'], {}), '(tensor)\n', (3271, 3279), False, 'import torch\n'), ((1208, 1240), 'torch.distributed.send', 'dist.send', (['send_ndims', 'next_rank'], {}), '(send_ndims, next_rank)\n', (1217, 1240), True, 'import torch.distributed as dist\n'), ((1249, 1281), 'torch.distributed.send', 'dist.send', (['send_shape', 'next_rank'], {}), '(send_shape, next_rank)\n', (1258, 1281), True, 'import torch.distributed as dist\n'), ((2160, 2192), 'torch.empty', 'torch.empty', (['()'], {}), '((), **tensor_kwargs)\n', (2171, 2192), False, 'import torch\n'), ((2201, 2233), 'torch.distributed.recv', 'dist.recv', (['recv_ndims', 'prev_rank'], {}), '(recv_ndims, prev_rank)\n', (2210, 2233), True, 'import torch.distributed as dist\n'), ((2255, 2295), 'torch.empty', 'torch.empty', (['recv_ndims'], {}), '(recv_ndims, **tensor_kwargs)\n', (2266, 2295), False, 'import torch\n'), ((2304, 2336), 'torch.distributed.recv', 'dist.recv', (['recv_shape', 'prev_rank'], {}), '(recv_shape, prev_rank)\n', (2313, 2336), True, 'import torch.distributed as dist\n'), ((2361, 2383), 'torch.Size', 'torch.Size', (['recv_shape'], {}), '(recv_shape)\n', (2371, 2383), False, 'import torch\n'), ((2544, 2563), 'torch.numel', 'torch.numel', (['tensor'], {}), '(tensor)\n', (2555, 2563), False, 'import torch\n'), ((2567, 2611), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (2585, 2611), True, 'from colossalai.core import global_context as gpc\n'), ((2647, 2691), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (2665, 2691), True, 'from colossalai.core import global_context as gpc\n'), ((935, 982), 'colossalai.core.global_context.get_next_global_rank', 'gpc.get_next_global_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (959, 982), True, 'from colossalai.core import global_context as gpc\n'), ((1040, 1060), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1058, 1060), False, 'from colossalai.utils import get_current_device\n'), ((2011, 2058), 'colossalai.core.global_context.get_prev_global_rank', 'gpc.get_prev_global_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (2035, 2058), True, 'from colossalai.core import global_context as gpc\n'), ((2116, 2136), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (2134, 2136), False, 'from colossalai.utils import get_current_device\n'), ((3417, 3444), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (3442, 3444), False, 'import torch\n'), ((3608, 3647), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (3621, 3647), True, 'from colossalai.core import global_context as gpc\n'), ((2853, 2880), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (2878, 2880), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from typing import Any, Tuple
import torch
import torch.distributed as dist
from colossalai.communication import all_gather, reduce_scatter, scatter
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import empty_cache, get_current_device
from torch import Tensor
class Matmul_AB_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = AB`
"""
@staticmethod
def forward(ctx: Any,
A: Tensor,
B: Tensor,
depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
# A: [m/q^2, n, k/q]
# B: [k/q, h/q^2]
# C: [m/q^2, n, h/q]
empty_cache()
ctx.save_for_backward(A, B)
assert A.shape[-1] == B.shape[0], \
'Invalid shapes: A={}, B={}.'.format(A.shape, B.shape)
A_temp = all_gather(A, input_dim, input_parallel_mode)
B_temp = all_gather(B, weight_dim, weight_parallel_mode)
C = torch.matmul(A_temp, B_temp)
out = reduce_scatter(C, output_dim, output_parallel_mode)
ctx.depth = depth
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
ctx.A_dim = input_dim
ctx.B_dim = weight_dim
ctx.C_dim = output_dim
return out
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_3D.apply(output_grad, B, ctx.depth,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode,
ctx.A_group_parallel_mode, ctx.C_dim,
ctx.B_dim, ctx.A_dim)
B_grad = Matmul_ATB_3D.apply(A, output_grad, ctx.depth,
ctx.A_group_parallel_mode,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode, ctx.A_dim,
ctx.C_dim, ctx.B_dim)
return A_grad, B_grad, None, None, None, None, None, None, None
class Matmul_ABT_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = AB^T`
"""
@staticmethod
def forward(ctx: Any,
A: Tensor,
B: Tensor,
depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
# A: [m/q^2, n, h/q]
# B: [k/q, h/q^2]
# C: [m/q^2, n, k/q]
empty_cache()
ctx.save_for_backward(A, B)
A_temp = all_gather(A, input_dim, input_parallel_mode)
B_temp = all_gather(B, weight_dim, weight_parallel_mode)
C = torch.matmul(A_temp, B_temp.transpose(0, 1))
out = reduce_scatter(C, output_dim, output_parallel_mode)
ctx.depth = depth
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
ctx.A_dim = input_dim
ctx.B_dim = weight_dim
ctx.C_dim = output_dim
return out
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_AB_3D.apply(output_grad, B, ctx.depth,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode,
ctx.A_group_parallel_mode, ctx.C_dim,
ctx.B_dim, ctx.A_dim)
B_grad = Matmul_ATB_3D.apply(output_grad, A, ctx.depth,
ctx.C_group_parallel_mode,
ctx.A_group_parallel_mode,
ctx.B_group_parallel_mode, ctx.C_dim,
ctx.A_dim, ctx.B_dim)
return A_grad, B_grad, None, None, None, None, None, None, None
class Matmul_ATB_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = A^TB`
"""
@staticmethod
def forward(ctx: Any,
A: Tensor,
B: Tensor,
depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = 0,
output_dim: int = -1) -> Tensor:
# A: [m/q^2, n, k/q]
# B: [m/q^2, n, h/q]
# C: [k/q, h/q^2]
empty_cache()
ctx.save_for_backward(A, B)
A_temp = all_gather(A, input_dim, input_parallel_mode)
A_temp = A_temp.reshape(-1, A.shape[-1])
B_temp = all_gather(B, weight_dim, weight_parallel_mode)
B_temp = B_temp.reshape(-1, B.shape[-1])
C = torch.matmul(A_temp.transpose(0, 1), B_temp)
out = reduce_scatter(C, output_dim, output_parallel_mode)
ctx.depth = depth
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
ctx.A_dim = input_dim
ctx.B_dim = weight_dim
ctx.C_dim = output_dim
return out
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_3D.apply(B, output_grad, ctx.depth,
ctx.B_group_parallel_mode,
ctx.C_group_parallel_mode,
ctx.A_group_parallel_mode, ctx.B_dim,
ctx.C_dim, ctx.A_dim)
B_grad = Matmul_AB_3D.apply(A, output_grad, ctx.depth,
ctx.A_group_parallel_mode,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode, ctx.A_dim,
ctx.C_dim, ctx.B_dim)
return A_grad, B_grad, None, None, None, None, None, None, None
class Add_3D(torch.autograd.Function):
"""Matrix add bias: :math:`C = A + b`
"""
@staticmethod
def forward(ctx: Any, input_: Tensor, bias: Tensor, depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
# input: [m/q^2, n, h/q]
# bias: [h/q^2]
ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
bias_temp = bias.clone()
dist.broadcast(bias_temp,
src=src_rank,
group=gpc.get_group(input_parallel_mode))
# [h/q]
bias_temp = all_gather(bias_temp, -1, weight_parallel_mode)
out = input_ + bias_temp
ctx.depth = depth
ctx.src_rank = src_rank
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
return out
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
# output_grad: [m/q^2, n, h/q]
with torch.no_grad():
# [h/q]
grad = torch.sum(output_grad,
dim=tuple(range(len(output_grad.shape))[:-1]))
bias_grad = reduce_scatter(grad, -1, ctx.B_group_parallel_mode)
dist.reduce(bias_grad,
dst=ctx.src_rank,
group=gpc.get_group(ctx.A_group_parallel_mode))
if gpc.get_local_rank(
ctx.A_group_parallel_mode) != gpc.get_local_rank(
ctx.C_group_parallel_mode):
bias_grad = None
return output_grad, bias_grad, None, None, None, None
class Mul_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = A * b`
"""
@staticmethod
def forward(ctx: Any, input_: Tensor, bias: Tensor, depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
# input: [m/q^2, n, h/q]
# bias: [h/q^2]
ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
# [h/q^2]
bias_temp = bias.clone()
dist.broadcast(bias_temp,
src=src_rank,
group=gpc.get_group(input_parallel_mode))
# [h/q]
bias_temp = all_gather(bias_temp, -1, weight_parallel_mode)
empty_cache()
ctx.save_for_backward(input_, bias_temp)
out = torch.mul(input_, bias_temp)
ctx.depth = depth
ctx.src_rank = src_rank
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
return out
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
# output_grad: [m/q^2, n, h/q]
with torch.no_grad():
input_, bias = ctx.saved_tensors
# [m/q^2, n, h/q]
input_grad = torch.mul(output_grad, bias)
# [h/q]
grad = torch.mul(output_grad, input_)
grad = torch.sum(grad,
dim=tuple(range(len(output_grad.shape))[:-1]))
bias_grad = reduce_scatter(grad, -1, ctx.B_group_parallel_mode)
dist.reduce(bias_grad,
dst=ctx.src_rank,
group=gpc.get_group(ctx.A_group_parallel_mode))
if gpc.get_local_rank(
ctx.A_group_parallel_mode) != gpc.get_local_rank(
ctx.C_group_parallel_mode):
bias_grad = None
return input_grad, bias_grad, None, None, None, None
class Sum_3D(torch.autograd.Function):
"""Compute the sum of input tensors
"""
@staticmethod
def forward(ctx: Any,
input_: Tensor,
dim: int,
depth: int,
parallel_mode: ParallelMode,
keepdim: bool = False) -> Tensor:
# input: [m/q^2, n, h/q]
out = torch.sum(input_, dim=dim, keepdim=keepdim)
dist.all_reduce(out, group=gpc.get_group(parallel_mode))
ctx.input_shape = input_.shape
ctx.depth = depth
ctx.group = parallel_mode
ctx.dim = dim
return out
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
with torch.no_grad():
output_grad = output_grad.contiguous()
dist.all_reduce(output_grad, group=gpc.get_group(ctx.group))
if len(output_grad.shape) < len(ctx.input_shape):
output_grad = torch.unsqueeze(output_grad, ctx.dim)
dims = [1 for _ in range(len(output_grad.shape))]
dims[ctx.dim] = ctx.input_shape[ctx.dim]
input_grad = output_grad.repeat(tuple(dims))
return input_grad, None, None, None, None, None
class Reduce_3D(torch.autograd.Function):
"""Reduce input tensors
"""
@staticmethod
def forward(ctx: Any, input_: Tensor, depth: int,
parallel_mode: ParallelMode) -> Tensor:
dist.all_reduce(input_, group=gpc.get_group(parallel_mode))
return input_.clone()
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
return output_grad, None, None
class Slice_3D(torch.autograd.Function):
"""Slice input tensor
"""
@staticmethod
def forward(ctx: Any, input_: Tensor, dim: int, depth: int,
parallel_mode: ParallelMode) -> Tensor:
rank = gpc.get_local_rank(parallel_mode)
out = torch.chunk(input_, depth, dim=dim)[rank].contiguous()
ctx.depth = depth
ctx.parallel_mode = parallel_mode
ctx.dim = dim
ctx.input_shape = input_.shape
return out
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
with torch.no_grad():
input_grad = all_gather(output_grad, ctx.dim, ctx.parallel_mode)
input_grad.reshape(ctx.input_shape)
return input_grad, None, None, None
| [
"colossalai.utils.empty_cache",
"colossalai.core.global_context.get_local_rank",
"colossalai.communication.all_gather",
"colossalai.communication.reduce_scatter",
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_ranks_in_group"
] | [((991, 1004), 'colossalai.utils.empty_cache', 'empty_cache', ([], {}), '()\n', (1002, 1004), False, 'from colossalai.utils import empty_cache, get_current_device\n'), ((1171, 1216), 'colossalai.communication.all_gather', 'all_gather', (['A', 'input_dim', 'input_parallel_mode'], {}), '(A, input_dim, input_parallel_mode)\n', (1181, 1216), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((1234, 1281), 'colossalai.communication.all_gather', 'all_gather', (['B', 'weight_dim', 'weight_parallel_mode'], {}), '(B, weight_dim, weight_parallel_mode)\n', (1244, 1281), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((1295, 1323), 'torch.matmul', 'torch.matmul', (['A_temp', 'B_temp'], {}), '(A_temp, B_temp)\n', (1307, 1323), False, 'import torch\n'), ((1338, 1389), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['C', 'output_dim', 'output_parallel_mode'], {}), '(C, output_dim, output_parallel_mode)\n', (1352, 1389), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((3217, 3230), 'colossalai.utils.empty_cache', 'empty_cache', ([], {}), '()\n', (3228, 3230), False, 'from colossalai.utils import empty_cache, get_current_device\n'), ((3285, 3330), 'colossalai.communication.all_gather', 'all_gather', (['A', 'input_dim', 'input_parallel_mode'], {}), '(A, input_dim, input_parallel_mode)\n', (3295, 3330), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((3348, 3395), 'colossalai.communication.all_gather', 'all_gather', (['B', 'weight_dim', 'weight_parallel_mode'], {}), '(B, weight_dim, weight_parallel_mode)\n', (3358, 3395), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((3468, 3519), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['C', 'output_dim', 'output_parallel_mode'], {}), '(C, output_dim, output_parallel_mode)\n', (3482, 3519), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((5342, 5355), 'colossalai.utils.empty_cache', 'empty_cache', ([], {}), '()\n', (5353, 5355), False, 'from colossalai.utils import empty_cache, get_current_device\n'), ((5410, 5455), 'colossalai.communication.all_gather', 'all_gather', (['A', 'input_dim', 'input_parallel_mode'], {}), '(A, input_dim, input_parallel_mode)\n', (5420, 5455), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((5522, 5569), 'colossalai.communication.all_gather', 'all_gather', (['B', 'weight_dim', 'weight_parallel_mode'], {}), '(B, weight_dim, weight_parallel_mode)\n', (5532, 5569), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((5691, 5742), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['C', 'output_dim', 'output_parallel_mode'], {}), '(C, output_dim, output_parallel_mode)\n', (5705, 5742), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((7389, 7432), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (7411, 7432), True, 'from colossalai.core import global_context as gpc\n'), ((7714, 7761), 'colossalai.communication.all_gather', 'all_gather', (['bias_temp', '(-1)', 'weight_parallel_mode'], {}), '(bias_temp, -1, weight_parallel_mode)\n', (7724, 7761), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((9245, 9288), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (9267, 9288), True, 'from colossalai.core import global_context as gpc\n'), ((9588, 9635), 'colossalai.communication.all_gather', 'all_gather', (['bias_temp', '(-1)', 'weight_parallel_mode'], {}), '(bias_temp, -1, weight_parallel_mode)\n', (9598, 9635), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((9645, 9658), 'colossalai.utils.empty_cache', 'empty_cache', ([], {}), '()\n', (9656, 9658), False, 'from colossalai.utils import empty_cache, get_current_device\n'), ((9723, 9751), 'torch.mul', 'torch.mul', (['input_', 'bias_temp'], {}), '(input_, bias_temp)\n', (9732, 9751), False, 'import torch\n'), ((11299, 11342), 'torch.sum', 'torch.sum', (['input_'], {'dim': 'dim', 'keepdim': 'keepdim'}), '(input_, dim=dim, keepdim=keepdim)\n', (11308, 11342), False, 'import torch\n'), ((12816, 12849), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['parallel_mode'], {}), '(parallel_mode)\n', (12834, 12849), True, 'from colossalai.core import global_context as gpc\n'), ((1835, 1850), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1848, 1850), False, 'import torch\n'), ((3965, 3980), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3978, 3980), False, 'import torch\n'), ((6188, 6203), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6201, 6203), False, 'import torch\n'), ((7467, 7507), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['output_parallel_mode'], {}), '(output_parallel_mode)\n', (7485, 7507), True, 'from colossalai.core import global_context as gpc\n'), ((8187, 8202), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8200, 8202), False, 'import torch\n'), ((8366, 8417), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['grad', '(-1)', 'ctx.B_group_parallel_mode'], {}), '(grad, -1, ctx.B_group_parallel_mode)\n', (8380, 8417), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((9323, 9363), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['output_parallel_mode'], {}), '(output_parallel_mode)\n', (9341, 9363), True, 'from colossalai.core import global_context as gpc\n'), ((10143, 10158), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10156, 10158), False, 'import torch\n'), ((10260, 10288), 'torch.mul', 'torch.mul', (['output_grad', 'bias'], {}), '(output_grad, bias)\n', (10269, 10288), False, 'import torch\n'), ((10328, 10358), 'torch.mul', 'torch.mul', (['output_grad', 'input_'], {}), '(output_grad, input_)\n', (10337, 10358), False, 'import torch\n'), ((10494, 10545), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['grad', '(-1)', 'ctx.B_group_parallel_mode'], {}), '(grad, -1, ctx.B_group_parallel_mode)\n', (10508, 10545), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((11652, 11667), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11665, 11667), False, 'import torch\n'), ((13172, 13187), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13185, 13187), False, 'import torch\n'), ((13214, 13265), 'colossalai.communication.all_gather', 'all_gather', (['output_grad', 'ctx.dim', 'ctx.parallel_mode'], {}), '(output_grad, ctx.dim, ctx.parallel_mode)\n', (13224, 13265), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((7642, 7676), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (7655, 7676), True, 'from colossalai.core import global_context as gpc\n'), ((8582, 8627), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (8600, 8627), True, 'from colossalai.core import global_context as gpc\n'), ((8648, 8693), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.C_group_parallel_mode'], {}), '(ctx.C_group_parallel_mode)\n', (8666, 8693), True, 'from colossalai.core import global_context as gpc\n'), ((9516, 9550), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (9529, 9550), True, 'from colossalai.core import global_context as gpc\n'), ((10710, 10755), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (10728, 10755), True, 'from colossalai.core import global_context as gpc\n'), ((10776, 10821), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.C_group_parallel_mode'], {}), '(ctx.C_group_parallel_mode)\n', (10794, 10821), True, 'from colossalai.core import global_context as gpc\n'), ((11378, 11406), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (11391, 11406), True, 'from colossalai.core import global_context as gpc\n'), ((11885, 11922), 'torch.unsqueeze', 'torch.unsqueeze', (['output_grad', 'ctx.dim'], {}), '(output_grad, ctx.dim)\n', (11900, 11922), False, 'import torch\n'), ((12397, 12425), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (12410, 12425), True, 'from colossalai.core import global_context as gpc\n'), ((8525, 8565), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (8538, 8565), True, 'from colossalai.core import global_context as gpc\n'), ((10653, 10693), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (10666, 10693), True, 'from colossalai.core import global_context as gpc\n'), ((11767, 11791), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.group'], {}), '(ctx.group)\n', (11780, 11791), True, 'from colossalai.core import global_context as gpc\n'), ((12864, 12899), 'torch.chunk', 'torch.chunk', (['input_', 'depth'], {'dim': 'dim'}), '(input_, depth, dim=dim)\n', (12875, 12899), False, 'import torch\n')] |
import inspect
from pathlib import Path
from functools import partial
import torch
from torch.autograd.profiler import profile
import torch.distributed as dist
from torch.distributed import ReduceOp
from colossalai.utils import get_current_device
from .prof_utils import BaseProfiler, _format_time, _format_memory, _format_bandwidth
from typing import List, Optional
def _get_code_location(depth: int):
ret = []
length = min(len(inspect.stack()), depth + 1)
for i in range(3, length):
upper_frame = inspect.stack()[i]
function_name = inspect.stack()[i - 1].function
ret.append(upper_frame.filename)
ret.append('(')
ret.append(str(upper_frame.lineno))
ret.append('): ')
ret.append(function_name)
if i != length - 1:
ret.append('\n')
return ''.join(ret)
torch_all_reduce = dist.all_reduce
torch_all_gather = dist.all_gather
torch_reduce_scatter = dist.reduce_scatter
torch_broadcast = dist.broadcast
torch_reduce = dist.reduce
class CommEvent(object):
"""Communication Event. Used for communication time and communication
volume recording.
"""
def __init__(self, count: int = 0, comm_vol: float = 0., cuda_time: int = 0):
self.self_count = count
self.self_comm_vol = comm_vol
self.self_cuda_time = cuda_time
def add(self, rhs):
self.self_count += rhs.self_count
self.self_comm_vol += rhs.self_comm_vol
self.self_cuda_time += rhs.self_cuda_time
class CommProfiler(BaseProfiler):
"""Communication profiler. Records all communication events.
"""
def __init__(self, depth: int = 0, total_count: int = 0, total_comm_vol: float = 0, total_cuda_time: int = 0):
super().__init__(profiler_name="Collective_Communication", priority=0)
self.depth = 3 + depth
self.total_count = total_count
self.total_comm_vol = total_comm_vol
self.total_cuda_time = total_cuda_time
self.ops_record = dict()
self.profiler = None
self.pending_op = None
self.pending_metadata = None
self.warn_flag = False
def reset(self):
self.total_count = 0
self.total_comm_vol = 0
self.total_cuda_time = 0
self.ops_record = dict()
self.profiler = None
self.pending_op = None
self.pending_metadata = None
self.warn_flag = False
def enable(self):
dist.all_reduce = partial(all_reduce, profiler=self)
dist.all_gather = partial(all_gather, profiler=self)
dist.reduce_scatter = partial(reduce_scatter, profiler=self)
dist.broadcast = partial(broadcast, profiler=self)
dist.reduce = partial(reduce, profiler=self)
def disable(self):
dist.all_reduce = torch_all_reduce
dist.all_gather = torch_all_gather
dist.reduce_scatter = torch_reduce_scatter
dist.broadcast = torch_broadcast
dist.reduce = torch_reduce
def to_tensorboard(self, writer):
writer.add_text(tag="Collective Communication", text_string=self.result_str("\n\n"))
def to_file(self, filename: Path):
with open(filename, "w") as f:
f.write(self.result_str())
def show(self):
print(self.result_str())
def result_str(self, sep: str = "\n"):
res = []
def append(s: str = None):
if s is not None:
res.append(s)
res.append(sep)
if self.warn_flag:
append("Warnning: there exists multiple communication operations in the same time. As a result, "
"the profiling result is not accurate.")
if self.total_cuda_time == 0:
return "No collective communication has been called yet!"
append("Collective communication profiling result:")
append("total cuda time: {}".format(_format_time(self.total_cuda_time)))
append("average bandwidth: {}".format(_format_bandwidth(self.total_comm_vol, self.total_cuda_time)))
append("total number of calls: {}".format(self.total_count))
append("All events:")
seperation = '-' * 74
row_format = '{:^10}' + '{:^12}' * 2 + '{:^16}' + '{:^12}' * 2
append(seperation)
append(row_format.format('Location', 'GPU time', 'Percentage', 'Comm volume', 'Bandwidth', 'Num of calls'))
append(seperation)
show_list = sorted(self.ops_record.items(), key=lambda kv: -kv[1].self_cuda_time)
for location, event in show_list:
append(location)
append(
row_format.format('', _format_time(event.self_cuda_time),
'{:.1f}%'.format(event.self_cuda_time / self.total_cuda_time * 100.0),
_format_memory(event.self_comm_vol),
_format_bandwidth(event.self_comm_vol, event.self_cuda_time), event.self_count))
append()
return ''.join(res)
@property
def has_aync_op(self):
return self.pending_op is not None
def activate_profiler(self, kn: str, vol: float):
self.pending_metadata = (kn, _get_code_location(self.depth), vol)
self.profiler = profile(enabled=True, use_cuda=True, use_cpu=True, use_kineto=True)
self.profiler.__enter__()
def close_profiler(self, group=None):
assert self.profiler is not None, "There is no running dist op"
kernel_name, code_location, vol = self.pending_metadata
self.profiler.__exit__(None, None, None)
if self.profiler.enabled and dist.get_world_size(group) > 1:
assert_flag = 0
current_comm_event = None
events = self.profiler.function_events
for event in events:
if kernel_name in event.name:
assert assert_flag == 0, "Multiple dist ops has been called "
current_comm_event = CommEvent(1, vol, event.self_cuda_time_total)
assert_flag += 1
assert current_comm_event is not None, "dist op has not been found"
buffer = torch.tensor([current_comm_event.self_cuda_time], device=get_current_device())
torch_all_reduce(buffer, op=ReduceOp.MIN, group=group)
current_comm_event.self_cuda_time = buffer.item()
self.total_count += current_comm_event.self_count
self.total_comm_vol += current_comm_event.self_comm_vol
self.total_cuda_time += current_comm_event.self_cuda_time
if code_location in self.ops_record:
self.ops_record[code_location].add(current_comm_event)
else:
self.ops_record[code_location] = current_comm_event
self.profiler = None
self.pending_op = None
self.pending_metadata = None
def wait_async_op(self):
if self.pending_op is not None:
op = self.pending_op
op.wait()
self.close_profiler()
class CommHandler(object):
"""Communication handler. A dummy handler to wait aync operations.
"""
def __init__(self, profiler: CommProfiler):
super().__init__()
self.prof = profiler
def wait(self):
self.prof.wait_async_op()
def async_check(profiler: CommProfiler):
if profiler.pending_op is not None:
profiler.warn_flag = True
profiler.wait_async_op()
def all_reduce(tensor: torch.Tensor,
op: ReduceOp = ReduceOp.SUM,
group=None,
async_op: bool = False,
profiler: CommProfiler = None) -> Optional[CommHandler]:
async_check(profiler)
comm_size = dist.get_world_size(group)
correction = 2 * (comm_size - 1) / comm_size
comm_vol = correction * tensor.element_size() * tensor.numel()
profiler.activate_profiler("ncclKernel_AllReduce_", comm_vol)
profiler.pending_op = torch_all_reduce(tensor, op, group, async_op)
if async_op:
return CommHandler(profiler)
profiler.close_profiler(group)
def reduce_scatter(output: torch.Tensor,
input_list: List[torch.Tensor],
op: ReduceOp = ReduceOp.SUM,
group=None,
async_op: bool = False,
profiler: CommProfiler = None) -> Optional[CommHandler]:
async_check(profiler)
comm_size = dist.get_world_size(group)
correction = (comm_size - 1) / comm_size
comm_vol = 0
for tensor in input_list:
comm_vol += tensor.element_size() * tensor.numel()
comm_vol *= correction
profiler.activate_profiler("ncclKernel_ReduceScatter_", comm_vol)
profiler.pending_op = torch_reduce_scatter(output, input_list, op, group, async_op)
if async_op:
return CommHandler(profiler)
profiler.close_profiler(group)
def all_gather(tensor_list: List[torch.Tensor],
tensor: torch.Tensor,
group=None,
async_op: bool = False,
profiler: CommProfiler = None) -> Optional[CommHandler]:
async_check(profiler)
comm_size = dist.get_world_size(group)
correction = (comm_size - 1) / comm_size
comm_vol = 0
for ten in tensor_list:
comm_vol += ten.element_size() * ten.numel()
comm_vol *= correction
profiler.activate_profiler("ncclKernel_AllGather_", comm_vol)
profiler.pending_op = torch_all_gather(tensor_list, tensor, group, async_op)
if async_op:
return CommHandler(profiler)
profiler.close_profiler(group)
def broadcast(tensor: torch.Tensor,
src: int,
group=None,
async_op: bool = False,
profiler: CommProfiler = None) -> Optional[CommHandler]:
async_check(profiler)
comm_vol = 1.0 * tensor.element_size() * tensor.numel()
profiler.activate_profiler("ncclKernel_Broadcast_", comm_vol)
profiler.pending_op = torch_broadcast(tensor, src, group, async_op)
if async_op:
return CommHandler(profiler)
profiler.close_profiler(group)
def reduce(tensor: torch.Tensor,
dst: int,
op: ReduceOp = ReduceOp.SUM,
group=None,
async_op: bool = False,
profiler: CommProfiler = None) -> Optional[CommHandler]:
async_check(profiler)
comm_vol = 1.0 * tensor.element_size() * tensor.numel()
profiler.activate_profiler("ncclKernel_Reduce_", comm_vol)
profiler.pending_op = torch_reduce(tensor, dst, op, group, async_op)
if async_op:
return CommHandler(profiler)
profiler.close_profiler(group)
| [
"colossalai.utils.get_current_device"
] | [((7912, 7938), 'torch.distributed.get_world_size', 'dist.get_world_size', (['group'], {}), '(group)\n', (7931, 7938), True, 'import torch.distributed as dist\n'), ((8639, 8665), 'torch.distributed.get_world_size', 'dist.get_world_size', (['group'], {}), '(group)\n', (8658, 8665), True, 'import torch.distributed as dist\n'), ((9383, 9409), 'torch.distributed.get_world_size', 'dist.get_world_size', (['group'], {}), '(group)\n', (9402, 9409), True, 'import torch.distributed as dist\n'), ((2547, 2581), 'functools.partial', 'partial', (['all_reduce'], {'profiler': 'self'}), '(all_reduce, profiler=self)\n', (2554, 2581), False, 'from functools import partial\n'), ((2609, 2643), 'functools.partial', 'partial', (['all_gather'], {'profiler': 'self'}), '(all_gather, profiler=self)\n', (2616, 2643), False, 'from functools import partial\n'), ((2675, 2713), 'functools.partial', 'partial', (['reduce_scatter'], {'profiler': 'self'}), '(reduce_scatter, profiler=self)\n', (2682, 2713), False, 'from functools import partial\n'), ((2740, 2773), 'functools.partial', 'partial', (['broadcast'], {'profiler': 'self'}), '(broadcast, profiler=self)\n', (2747, 2773), False, 'from functools import partial\n'), ((2797, 2827), 'functools.partial', 'partial', (['reduce'], {'profiler': 'self'}), '(reduce, profiler=self)\n', (2804, 2827), False, 'from functools import partial\n'), ((5385, 5452), 'torch.autograd.profiler.profile', 'profile', ([], {'enabled': '(True)', 'use_cuda': '(True)', 'use_cpu': '(True)', 'use_kineto': '(True)'}), '(enabled=True, use_cuda=True, use_cpu=True, use_kineto=True)\n', (5392, 5452), False, 'from torch.autograd.profiler import profile\n'), ((453, 468), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (466, 468), False, 'import inspect\n'), ((537, 552), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (550, 552), False, 'import inspect\n'), ((581, 596), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (594, 596), False, 'import inspect\n'), ((5761, 5787), 'torch.distributed.get_world_size', 'dist.get_world_size', (['group'], {}), '(group)\n', (5780, 5787), True, 'import torch.distributed as dist\n'), ((6367, 6387), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (6385, 6387), False, 'from colossalai.utils import get_current_device\n')] |
import contextlib
import os
import colossalai
import torch
from colossalai.core import global_context as gpc
from colossalai.engine.schedule import (InterleavedPipelineSchedule, PipelineSchedule)
from colossalai.logging import get_dist_logger
from colossalai.nn import CosineAnnealingWarmupLR
from colossalai.trainer import Trainer, hooks
from colossalai.utils import MultiTimer, get_dataloader
from colossalai.zero import zero3_model_context
from model_zoo.gpt import GPTLMLoss, gpt2_small, gpt2_medium, gpt2_large, gpt2_xl
from data import WebtextDataset
def train_gpt():
args = colossalai.get_default_parser().parse_args()
# standard launch
# colossalai.launch(config=args.config,
# rank=args.rank,
# world_size=args.world_size,
# local_rank=args.local_rank,
# host=args.host,
# port=args.port)
# launch from torchrun
colossalai.launch_from_torch(config=args.config)
logger = get_dist_logger()
if hasattr(gpc.config, 'LOG_PATH'):
if gpc.get_global_rank() == 0:
log_path = gpc.config.LOG_PATH
if not os.path.exists(log_path):
os.mkdir(log_path)
logger.log_to_file(log_path)
train_dataset = WebtextDataset(os.environ['DATA'], seq_len=gpc.config.SEQ_LENGTH)
train_dataloader = get_dataloader(train_dataset,
seed=42,
batch_size=gpc.config.BATCH_SIZE // gpc.data_parallel_size,
pin_memory=True,
shuffle=True,
drop_last=True)
logger.info(f'Loaded {len(train_dataset)}/{len(train_dataloader)} samples/batches', ranks=[0])
# zero3 under test
# use_zero3 = hasattr(gpc.config, 'zero') and gpc.config.zero.level == 3
# cm = zero3_model_context() if use_zero3 else contextlib.nullcontext()
# with cm:
# model = gpc.config.model.pop('type')(**gpc.config.model)
model = gpt2_medium(vocab_size=gpc.config.VOCAB_SIZE,
max_position_embeddings=gpc.config.SEQ_LENGTH,
checkpoint=True)
criterion = GPTLMLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.00015, weight_decay=1e-2)
steps_per_epoch = len(train_dataloader) // gpc.config.gradient_accumulation
lr_scheduler = CosineAnnealingWarmupLR(optimizer=optimizer,
total_steps=gpc.config.NUM_EPOCHS * steps_per_epoch,
warmup_steps=gpc.config.WARMUP_EPOCHS * steps_per_epoch,
eta_min=1e-5)
engine, train_dataloader, _, lr_scheduler = colossalai.initialize(model=model,
optimizer=optimizer,
criterion=criterion,
train_dataloader=train_dataloader,
lr_scheduler=lr_scheduler)
# pipeline under test
# num_model_chunks = getattr(gpc.config.model, 'num_chunks', 1)
# if num_model_chunks > 1:
# logger.info('Build InterleavedPipelineSchedule', ranks=[0])
# schedule = InterleavedPipelineSchedule(gpc.config.NUM_MICRO_BATCHES, num_model_chunks)
# else:
# logger.info('Build PipelineSchedule', ranks=[0])
# schedule = PipelineSchedule(gpc.config.NUM_MICRO_BATCHES)
timer = MultiTimer()
trainer = Trainer(engine=engine, logger=logger, timer=timer)
hook_list = [
hooks.LogMetricByEpochHook(logger=logger),
hooks.LogMetricByStepHook(),
hooks.LossHook(),
hooks.ThroughputHook(),
hooks.LRSchedulerHook(lr_scheduler=lr_scheduler, by_epoch=False),
# hooks.TensorboardHook(log_dir='./tb_logs', ranks=[0]),
# hooks.LogMemoryByEpochHook(logger),
# hooks.LogTimingByEpochHook(timer, logger),
# hooks.SaveCheckpointHook(checkpoint_dir='./ckpt')
]
logger.info("Training start", ranks=[0])
trainer.fit(train_dataloader=train_dataloader, epochs=gpc.config.NUM_EPOCHS, hooks=hook_list, display_progress=True)
if __name__ == '__main__':
train_gpt()
| [
"colossalai.trainer.hooks.ThroughputHook",
"colossalai.trainer.Trainer",
"colossalai.trainer.hooks.LogMetricByStepHook",
"colossalai.trainer.hooks.LossHook",
"colossalai.logging.get_dist_logger",
"colossalai.initialize",
"colossalai.launch_from_torch",
"colossalai.utils.MultiTimer",
"colossalai.trai... | [((984, 1032), 'colossalai.launch_from_torch', 'colossalai.launch_from_torch', ([], {'config': 'args.config'}), '(config=args.config)\n', (1012, 1032), False, 'import colossalai\n'), ((1049, 1066), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (1064, 1066), False, 'from colossalai.logging import get_dist_logger\n'), ((1339, 1404), 'data.WebtextDataset', 'WebtextDataset', (["os.environ['DATA']"], {'seq_len': 'gpc.config.SEQ_LENGTH'}), "(os.environ['DATA'], seq_len=gpc.config.SEQ_LENGTH)\n", (1353, 1404), False, 'from data import WebtextDataset\n'), ((1429, 1578), 'colossalai.utils.get_dataloader', 'get_dataloader', (['train_dataset'], {'seed': '(42)', 'batch_size': '(gpc.config.BATCH_SIZE // gpc.data_parallel_size)', 'pin_memory': '(True)', 'shuffle': '(True)', 'drop_last': '(True)'}), '(train_dataset, seed=42, batch_size=gpc.config.BATCH_SIZE //\n gpc.data_parallel_size, pin_memory=True, shuffle=True, drop_last=True)\n', (1443, 1578), False, 'from colossalai.utils import MultiTimer, get_dataloader\n'), ((2150, 2264), 'model_zoo.gpt.gpt2_medium', 'gpt2_medium', ([], {'vocab_size': 'gpc.config.VOCAB_SIZE', 'max_position_embeddings': 'gpc.config.SEQ_LENGTH', 'checkpoint': '(True)'}), '(vocab_size=gpc.config.VOCAB_SIZE, max_position_embeddings=gpc.\n config.SEQ_LENGTH, checkpoint=True)\n', (2161, 2264), False, 'from model_zoo.gpt import GPTLMLoss, gpt2_small, gpt2_medium, gpt2_large, gpt2_xl\n'), ((2329, 2340), 'model_zoo.gpt.GPTLMLoss', 'GPTLMLoss', ([], {}), '()\n', (2338, 2340), False, 'from model_zoo.gpt import GPTLMLoss, gpt2_small, gpt2_medium, gpt2_large, gpt2_xl\n'), ((2533, 2711), 'colossalai.nn.CosineAnnealingWarmupLR', 'CosineAnnealingWarmupLR', ([], {'optimizer': 'optimizer', 'total_steps': '(gpc.config.NUM_EPOCHS * steps_per_epoch)', 'warmup_steps': '(gpc.config.WARMUP_EPOCHS * steps_per_epoch)', 'eta_min': '(1e-05)'}), '(optimizer=optimizer, total_steps=gpc.config.\n NUM_EPOCHS * steps_per_epoch, warmup_steps=gpc.config.WARMUP_EPOCHS *\n steps_per_epoch, eta_min=1e-05)\n', (2556, 2711), False, 'from colossalai.nn import CosineAnnealingWarmupLR\n'), ((2885, 3027), 'colossalai.initialize', 'colossalai.initialize', ([], {'model': 'model', 'optimizer': 'optimizer', 'criterion': 'criterion', 'train_dataloader': 'train_dataloader', 'lr_scheduler': 'lr_scheduler'}), '(model=model, optimizer=optimizer, criterion=criterion,\n train_dataloader=train_dataloader, lr_scheduler=lr_scheduler)\n', (2906, 3027), False, 'import colossalai\n'), ((3764, 3776), 'colossalai.utils.MultiTimer', 'MultiTimer', ([], {}), '()\n', (3774, 3776), False, 'from colossalai.utils import MultiTimer, get_dataloader\n'), ((3794, 3844), 'colossalai.trainer.Trainer', 'Trainer', ([], {'engine': 'engine', 'logger': 'logger', 'timer': 'timer'}), '(engine=engine, logger=logger, timer=timer)\n', (3801, 3844), False, 'from colossalai.trainer import Trainer, hooks\n'), ((3875, 3916), 'colossalai.trainer.hooks.LogMetricByEpochHook', 'hooks.LogMetricByEpochHook', ([], {'logger': 'logger'}), '(logger=logger)\n', (3901, 3916), False, 'from colossalai.trainer import Trainer, hooks\n'), ((3927, 3954), 'colossalai.trainer.hooks.LogMetricByStepHook', 'hooks.LogMetricByStepHook', ([], {}), '()\n', (3952, 3954), False, 'from colossalai.trainer import Trainer, hooks\n'), ((3965, 3981), 'colossalai.trainer.hooks.LossHook', 'hooks.LossHook', ([], {}), '()\n', (3979, 3981), False, 'from colossalai.trainer import Trainer, hooks\n'), ((3992, 4014), 'colossalai.trainer.hooks.ThroughputHook', 'hooks.ThroughputHook', ([], {}), '()\n', (4012, 4014), False, 'from colossalai.trainer import Trainer, hooks\n'), ((4025, 4089), 'colossalai.trainer.hooks.LRSchedulerHook', 'hooks.LRSchedulerHook', ([], {'lr_scheduler': 'lr_scheduler', 'by_epoch': '(False)'}), '(lr_scheduler=lr_scheduler, by_epoch=False)\n', (4046, 4089), False, 'from colossalai.trainer import Trainer, hooks\n'), ((607, 638), 'colossalai.get_default_parser', 'colossalai.get_default_parser', ([], {}), '()\n', (636, 638), False, 'import colossalai\n'), ((1120, 1141), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (1139, 1141), True, 'from colossalai.core import global_context as gpc\n'), ((1212, 1236), 'os.path.exists', 'os.path.exists', (['log_path'], {}), '(log_path)\n', (1226, 1236), False, 'import os\n'), ((1255, 1273), 'os.mkdir', 'os.mkdir', (['log_path'], {}), '(log_path)\n', (1263, 1273), False, 'import os\n')] |
import click
from .utils import *
from .benchmark import run_benchmark
from colossalai.context import Config
__all__ = ['benchmark']
@click.command()
@click.option("-g", "--gpus", type=int, default=None, help="Total number of devices to use.")
@click.option("-b", "--batch_size", type=int, default=8, help="Batch size of the input tensor.")
@click.option("-s", "--seq_len", type=int, default=512, help="Sequence length of the input tensor.")
@click.option("-d", "--dimension", type=int, default=1024, help="Hidden dimension of the input tensor.")
@click.option("-w", "--warmup_steps", type=int, default=10, help="The number of warmup steps.")
@click.option("-p", "--profile_steps", type=int, default=50, help="The number of profiling steps.")
@click.option("-l", "--layers", type=int, default=2)
@click.option("-m",
"--model",
type=click.Choice(['mlp'], case_sensitive=False),
default='mlp',
help="Select the model to benchmark, currently only supports MLP")
def benchmark(gpus: int, batch_size: int, seq_len: int, dimension: int, warmup_steps: int, profile_steps: int,
layers: int, model: str):
args_dict = locals()
args = Config(args_dict)
run_benchmark(args)
| [
"colossalai.context.Config"
] | [((138, 153), 'click.command', 'click.command', ([], {}), '()\n', (151, 153), False, 'import click\n'), ((155, 252), 'click.option', 'click.option', (['"""-g"""', '"""--gpus"""'], {'type': 'int', 'default': 'None', 'help': '"""Total number of devices to use."""'}), "('-g', '--gpus', type=int, default=None, help=\n 'Total number of devices to use.')\n", (167, 252), False, 'import click\n'), ((249, 349), 'click.option', 'click.option', (['"""-b"""', '"""--batch_size"""'], {'type': 'int', 'default': '(8)', 'help': '"""Batch size of the input tensor."""'}), "('-b', '--batch_size', type=int, default=8, help=\n 'Batch size of the input tensor.')\n", (261, 349), False, 'import click\n'), ((346, 450), 'click.option', 'click.option', (['"""-s"""', '"""--seq_len"""'], {'type': 'int', 'default': '(512)', 'help': '"""Sequence length of the input tensor."""'}), "('-s', '--seq_len', type=int, default=512, help=\n 'Sequence length of the input tensor.')\n", (358, 450), False, 'import click\n'), ((447, 555), 'click.option', 'click.option', (['"""-d"""', '"""--dimension"""'], {'type': 'int', 'default': '(1024)', 'help': '"""Hidden dimension of the input tensor."""'}), "('-d', '--dimension', type=int, default=1024, help=\n 'Hidden dimension of the input tensor.')\n", (459, 555), False, 'import click\n'), ((552, 651), 'click.option', 'click.option', (['"""-w"""', '"""--warmup_steps"""'], {'type': 'int', 'default': '(10)', 'help': '"""The number of warmup steps."""'}), "('-w', '--warmup_steps', type=int, default=10, help=\n 'The number of warmup steps.')\n", (564, 651), False, 'import click\n'), ((648, 751), 'click.option', 'click.option', (['"""-p"""', '"""--profile_steps"""'], {'type': 'int', 'default': '(50)', 'help': '"""The number of profiling steps."""'}), "('-p', '--profile_steps', type=int, default=50, help=\n 'The number of profiling steps.')\n", (660, 751), False, 'import click\n'), ((748, 799), 'click.option', 'click.option', (['"""-l"""', '"""--layers"""'], {'type': 'int', 'default': '(2)'}), "('-l', '--layers', type=int, default=2)\n", (760, 799), False, 'import click\n'), ((1206, 1223), 'colossalai.context.Config', 'Config', (['args_dict'], {}), '(args_dict)\n', (1212, 1223), False, 'from colossalai.context import Config\n'), ((864, 907), 'click.Choice', 'click.Choice', (["['mlp']"], {'case_sensitive': '(False)'}), "(['mlp'], case_sensitive=False)\n", (876, 907), False, 'import click\n')] |
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.logging import get_dist_logger
from colossalai.testing import parameterize
from colossalai.utils import free_port
from colossalai.context import MOE_CONTEXT
from colossalai.nn.layer import MoeModule
from colossalai.zero.init_ctx import ZeroInitContext
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
from colossalai.testing import rerun_on_exception
from colossalai.utils import get_current_device
from tests.test_zero_data_parallel.common import CONFIG
class MoeModel(nn.Module):
def __init__(self):
super().__init__()
self.proj1 = nn.Linear(4, 16)
expert_cls = nn.Linear
expert_args_dict = dict(in_features=16, out_features=16)
self.moe = MoeModule(dim_model=16, num_experts=8, use_residual=True, expert_cls=expert_cls, **expert_args_dict)
self.proj2 = nn.Linear(16, 4)
def forward(self, x):
x = self.proj1(x)
x = self.moe(x)
x = self.proj2(x)
return x
@parameterize("init_device_type", ['cpu', 'cuda'])
@parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy])
def run_moe_zero_init(init_device_type, shard_strategy_class):
logger = get_dist_logger("test_moe_zero_init")
if init_device_type == 'cuda':
init_device = torch.device(f"cuda:{get_current_device()}")
elif init_device_type == 'cpu':
init_device = torch.device("cpu")
else:
raise NotImplementedError("Unknown device found.")
model_numel_tensor = torch.zeros(1, dtype=torch.int)
with ZeroInitContext(target_device=init_device,
shard_strategy=shard_strategy_class(),
shard_param=True,
model_numel_tensor=model_numel_tensor,
rm_torch_payload_on_the_fly=False):
model = MoeModel()
for name, param in model.named_parameters():
assert hasattr(param, 'colo_attr')
# the weights in the gate should be fp32
if 'gate' in name:
assert param.colo_attr.sharded_data_tensor.dtype == torch.float32
else:
assert param.colo_attr.sharded_data_tensor.dtype == torch.half
# the parameters in moe experts and its gate should not be sharded
if ('experts' in name) or ('gate' in name) or ('residual_combine' in name):
assert not param.colo_attr.sharded_data_tensor.is_sharded
else:
assert param.colo_attr.sharded_data_tensor.is_sharded
# the parameters in moe experts is not replicated
if 'experts' in name:
assert not param.is_replicated
else:
assert param.is_replicated
assert param.colo_attr.sharded_data_tensor.payload.device.type == init_device.type, \
f'{param.colo_attr.sharded_data_tensor.payload.device.type} vs. {init_device.type}'
def _run_dist(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
MOE_CONTEXT.setup(seed=42)
run_moe_zero_init()
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [2, 4])
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_moe_zero_init(world_size):
run_func = partial(_run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_moe_zero_init(world_size=2)
| [
"colossalai.launch",
"colossalai.logging.get_dist_logger",
"colossalai.utils.free_port",
"colossalai.testing.rerun_on_exception",
"colossalai.utils.get_current_device",
"colossalai.context.MOE_CONTEXT.setup",
"colossalai.nn.layer.MoeModule",
"colossalai.testing.parameterize"
] | [((1178, 1227), 'colossalai.testing.parameterize', 'parameterize', (['"""init_device_type"""', "['cpu', 'cuda']"], {}), "('init_device_type', ['cpu', 'cuda'])\n", (1190, 1227), False, 'from colossalai.testing import parameterize\n'), ((1230, 1320), 'colossalai.testing.parameterize', 'parameterize', (['"""shard_strategy_class"""', '[TensorShardStrategy, BucketTensorShardStrategy]'], {}), "('shard_strategy_class', [TensorShardStrategy,\n BucketTensorShardStrategy])\n", (1242, 1320), False, 'from colossalai.testing import parameterize\n'), ((3433, 3478), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[2, 4]'], {}), "('world_size', [2, 4])\n", (3456, 3478), False, 'import pytest\n'), ((3481, 3584), 'colossalai.testing.rerun_on_exception', 'rerun_on_exception', ([], {'exception_type': 'mp.ProcessRaisedException', 'pattern': '""".*Address already in use.*"""'}), "(exception_type=mp.ProcessRaisedException, pattern=\n '.*Address already in use.*')\n", (3499, 3584), False, 'from colossalai.testing import rerun_on_exception\n'), ((1395, 1432), 'colossalai.logging.get_dist_logger', 'get_dist_logger', (['"""test_moe_zero_init"""'], {}), "('test_moe_zero_init')\n", (1410, 1432), False, 'from colossalai.logging import get_dist_logger\n'), ((1718, 1749), 'torch.zeros', 'torch.zeros', (['(1)'], {'dtype': 'torch.int'}), '(1, dtype=torch.int)\n', (1729, 1749), False, 'import torch\n'), ((3239, 3355), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (3256, 3355), False, 'import colossalai\n'), ((3356, 3382), 'colossalai.context.MOE_CONTEXT.setup', 'MOE_CONTEXT.setup', ([], {'seed': '(42)'}), '(seed=42)\n', (3373, 3382), False, 'from colossalai.context import MOE_CONTEXT\n'), ((3698, 3735), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (3706, 3735), True, 'import torch.multiprocessing as mp\n'), ((771, 787), 'torch.nn.Linear', 'nn.Linear', (['(4)', '(16)'], {}), '(4, 16)\n', (780, 787), True, 'import torch.nn as nn\n'), ((906, 1011), 'colossalai.nn.layer.MoeModule', 'MoeModule', ([], {'dim_model': '(16)', 'num_experts': '(8)', 'use_residual': '(True)', 'expert_cls': 'expert_cls'}), '(dim_model=16, num_experts=8, use_residual=True, expert_cls=\n expert_cls, **expert_args_dict)\n', (915, 1011), False, 'from colossalai.nn.layer import MoeModule\n'), ((1029, 1045), 'torch.nn.Linear', 'nn.Linear', (['(16)', '(4)'], {}), '(16, 4)\n', (1038, 1045), True, 'import torch.nn as nn\n'), ((1599, 1618), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1611, 1618), False, 'import torch\n'), ((3680, 3691), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (3689, 3691), False, 'from colossalai.utils import free_port\n'), ((1515, 1535), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1533, 1535), False, 'from colossalai.utils import get_current_device\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from colossalai.context import ParallelMode
from colossalai.registry import HOOKS
from colossalai.utils import is_no_pp_or_last_stage
from ._base_hook import BaseHook
from ..metric import Loss, Accuracy1D, Accuracy2D, Accuracy, Accuracy2p5D, Accuracy3D
class MetricHook(BaseHook):
"""Specialized hook classes for :class:`Metric`.
Some help metric collectors initialize, reset and
update their states. Others are used to display and
record the metric.
:param trainer: Trainer attached with current hook
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type trainer: Trainer
:type priority: int
"""
def __init__(self,
priority: int,
):
super().__init__(priority)
self._is_stage_to_compute = is_no_pp_or_last_stage()
def _check_metric_states_initialization(self, trainer):
if 'metrics' not in trainer.states:
self.init_runner_states(trainer, 'metrics', dict(train={}, test={}))
@HOOKS.register_module
class LossHook(MetricHook):
"""Specialized hook class for :class:`Loss`.
:param trainer: Trainer attached with current hook
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type trainer: Trainer
:type priority: int, optional
"""
def __init__(self, priority: int = 0):
super().__init__(priority)
def after_hook_is_attached(self, trainer):
self._check_metric_states_initialization(trainer)
if self._is_stage_to_compute:
self.train_loss = Loss(epoch_only=False)
self.test_loss = Loss(epoch_only=True)
# register the metric calculator
trainer.states['metrics']['train'][
self.train_loss.__class__.__name__] = self.train_loss
trainer.states['metrics']['test'][
self.test_loss.__class__.__name__] = self.test_loss
def before_train_epoch(self, trainer):
if self._is_stage_to_compute:
self.train_loss.reset()
def after_train_iter(self, trainer, logits, label, loss):
if self._is_stage_to_compute:
self.train_loss.update(loss)
def before_test_epoch(self, trainer):
if self._is_stage_to_compute:
self.test_loss.reset()
def after_test_iter(self, trainer, logits, label, loss):
if self._is_stage_to_compute:
self.test_loss.update(loss)
@HOOKS.register_module
class Accuracy1DHook(MetricHook):
"""Specialized hook class for :class:`Accuracy1D`.
It acts the same as :class:`AccuracyHook`.
:param trainer: Trainer attached with current hook
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type trainer: Trainer
:type priority: int, optional
"""
def __init__(self, priority: int = 10):
super().__init__(priority)
def after_hook_is_attached(self, trainer):
self._check_metric_states_initialization(trainer)
if self._is_stage_to_compute:
self.metric = Accuracy1D(epoch_only=True)
# register the metric
trainer.states['metrics']['test'][
self.metric.__class__.__name__] = self.metric
def before_test(self, trainer):
if self._is_stage_to_compute:
self.metric.reset()
def after_test_iter(self, trainer, logits, label, *args):
if self._is_stage_to_compute:
self.metric.update(logits, label)
@HOOKS.register_module
class Accuracy2DHook(MetricHook):
"""Specialized hook class for :class:`Accuracy2D`.
It acts the same as :class:`AccuracyHook`.
:param trainer: Trainer attached with current hook
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type trainer: Trainer
:type priority: int, optional
"""
def __init__(self, priority: int = 0):
super().__init__(priority)
def after_hook_is_attached(self, trainer):
self._check_metric_states_initialization(trainer)
if self._is_stage_to_compute:
self.metric = Accuracy2D(epoch_only=True)
# register the metric
trainer.states['metrics']['test'][
self.metric.__class__.__name__] = self.metric
def before_test(self, trainer):
if self._is_stage_to_compute:
self.metric.reset()
def after_test_iter(self, trainer, logits, label, *args):
if self._is_stage_to_compute:
self.metric.update(logits, label)
@HOOKS.register_module
class Accuracy2p5DHook(MetricHook):
def __init__(self, priority: int = 0):
super().__init__(priority)
def after_hook_is_attached(self, trainer):
self._check_metric_states_initialization(trainer)
if self._is_stage_to_compute:
self.metric = Accuracy2p5D(epoch_only=True)
# register the metric
trainer.states['metrics']['test'][
self.metric.__class__.__name__] = self.metric
def before_test(self, trainer):
if self._is_stage_to_compute:
self.metric.reset()
def after_test_iter(self, trainer, logits, label, *args):
if self._is_stage_to_compute:
self.metric.update(logits, label)
@HOOKS.register_module
class Accuracy3DHook(MetricHook):
"""Specialized hook class for :class:`Accuracy3D`.
:param trainer: Trainer attached with current hook
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type trainer: Trainer
:type priority: int
"""
def __init__(self,
priority: int = 10):
super().__init__(priority)
def after_hook_is_attached(self, trainer):
if self._is_stage_to_compute:
self.metric = Accuracy3D(epoch_only=True)
# register the metric
trainer.states['metrics']['test'][
self.metric.__class__.__name__] = self.metric
def before_test(self, trainer):
if self._is_stage_to_compute:
self.metric.reset()
def after_test_iter(self, trainer, logits, label, *args):
if self._is_stage_to_compute:
self.metric.update(logits, label)
@HOOKS.register_module
class AccuracyHook(MetricHook):
"""Specialized hook class for :class:`Accuracy`.
:param trainer: Trainer attached with current hook
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type trainer: Trainer
:type priority: int
"""
def __init__(self, priority: int = 0):
super().__init__(priority)
def after_hook_is_attached(self, trainer):
if self._is_stage_to_compute:
self.metric = Accuracy(epoch_only=True)
# register the metric
trainer.states['metrics']['test'][
self.metric.__class__.__name__] = self.metric
def before_test(self, trainer):
if self._is_stage_to_compute:
self.metric.reset()
def after_test_iter(self, trainer, logits, label, *args):
if self._is_stage_to_compute:
self.metric.update(logits, label)
| [
"colossalai.utils.is_no_pp_or_last_stage"
] | [((881, 905), 'colossalai.utils.is_no_pp_or_last_stage', 'is_no_pp_or_last_stage', ([], {}), '()\n', (903, 905), False, 'from colossalai.utils import is_no_pp_or_last_stage\n')] |
from typing import Any, Tuple
import torch
import torch.distributed as dist
from colossalai.communication.collective import (all_gather, all_reduce, reduce_scatter)
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device
from torch import Tensor
from torch.cuda.amp import custom_bwd, custom_fwd
def get_parallel_group(parallel_mode: ParallelMode):
return gpc.get_group(parallel_mode)
def get_global_rank():
return gpc.get_global_rank()
def get_parallel_rank(parallel_mode: ParallelMode):
return gpc.get_local_rank(parallel_mode)
class _Classifier2p5D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(
ctx: Any,
A: Tensor,
B: Tensor,
bias,
tesseract_dim: int,
out_shape: Tuple[int, ...],
row_rank: int,
col_rank: int,
row_parallel_mode: ParallelMode,
col_parallel_mode: ParallelMode,
data_parallel_rank: int,
pipeline_parallel_rank: int,
pipeline_parallel_size: int,
tensor_parallel_size: int,
) -> Tensor:
A_shape = A.shape
A = A.reshape((-1, A_shape[-1]))
B_shape = B.shape
B = B.reshape((-1, B_shape[-1]))
B_temp = all_gather(B, -1, col_parallel_mode)
if ctx:
ctx.save_for_backward(A, B_temp)
C = torch.matmul(A, B_temp.transpose(0, 1))
C = all_reduce(C, row_parallel_mode)
ctx.use_bias = bias is not None
if bias is not None:
C = C + bias
out = C.reshape(out_shape)
if ctx:
ctx.tesseract_dim = tesseract_dim
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.row_parallel_mode = row_parallel_mode
ctx.col_parallel_mode = col_parallel_mode
ctx.A_shape = A_shape
ctx.B_shape = B_shape
ctx.data_parallel_rank = data_parallel_rank
ctx.pipeline_parallel_rank = pipeline_parallel_rank
ctx.pipeline_parallel_size = pipeline_parallel_size
ctx.tensor_parallel_size = tensor_parallel_size
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = torch.matmul(output_grad, B)
A_grad = A_grad.reshape(ctx.A_shape)
B_grad = torch.matmul(output_grad.reshape(-1, output_grad.shape[-1]).transpose(0, 1), A)
B_grad = reduce_scatter(B_grad, -1, ctx.col_parallel_mode)
B_grad = B_grad.reshape(ctx.B_shape)
if ctx.use_bias:
bias_grad = torch.sum(output_grad, dim=tuple(range(output_grad.ndim - 1)))
bias_grad = all_reduce(bias_grad, ctx.col_parallel_mode)
else:
bias_grad = None
return A_grad, B_grad, bias_grad, None, None, None, None, None, None, None, None, None, None
def classifier_2p5d(A: Tensor, B: Tensor, bias, tesseract_dim: int, out_shape: Tuple[int,
...], row_rank: int, col_rank: int,
row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, data_parallel_rank: int,
pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int) -> Tensor:
r"""Classifier.
Args:
A (:class:`torch.tensor`): matrix :math:`A`.
B (:class:`torch.tensor`): matrix :math:`B`.
bias (:class:`torch.tensor`): matrix of bias.
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism.
out_shape (:class:`torch.size`): shape of output tensor.
row_rank (int): the rank of row.
col_rank (int): the rank of column.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
data_parallel_rank (int): data parallel rank.
pipeline_parallel_rank (int): pipeline parallel rank
pipeline_parallel_size (int): pipeline parallel size.
tensor_parallel_size (int): tensor parallel size.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _Classifier2p5D.apply(A, B, bias, tesseract_dim, out_shape, row_rank, col_rank, row_parallel_mode,
col_parallel_mode, data_parallel_rank, pipeline_parallel_rank, pipeline_parallel_size,
tensor_parallel_size)
class Matmul_AB_2p5D(torch.autograd.Function):
r"""Matrix multiplication for :math:`C = AB`.
Args:
A (:class:`torch.tensor`): matrix :math:`A`.
B (:class:`torch.tensor`): matrix :math:`B`.
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism.
out_shape (:class:`torch.size`): shape of output tensor.
row_rank (int): the rank of row.
col_rank (int): the rank of column.
dep_rank (int): the rank of depth.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
data_parallel_rank (int): data parallel rank.
pipeline_parallel_rank (int): pipeline parallel rank
pipeline_parallel_size (int): pipeline parallel size.
tensor_parallel_size (int): tensor parallel size.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, A: Tensor, B: Tensor, tesseract_dim: int, out_shape: Tuple[int, ...], row_rank: int,
col_rank: int, dep_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode,
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
tensor_parallel_size: int) -> Tensor:
# A: [b / dq, s, h / q] -> [(b * s) / dq, h / q]
# B: [h / dq, s / q]
# C: [b / dq, s, s / q] -> [(b * s) / dq, s / q]
assert A.shape[-1] == B.shape[-2], \
'Invalid shapes: A={}, B={} for AB.'.format(A.shape, B.shape)
if ctx:
ctx.save_for_backward(A, B)
A_shape = A.shape
A = A.reshape((-1, A_shape[-1]))
B_shape = B.shape
B = B.reshape((-1, B_shape[-1]))
C_shape = (A.shape[0], B.shape[-1])
C = torch.zeros(C_shape, dtype=A.dtype, device=get_current_device())
# use circular buffer to store the communication tensor
# 2 is enough for all cases
A_list = [torch.empty_like(A) for _ in range(2)]
B_list = [torch.empty_like(B) for _ in range(2)]
row_group = gpc.get_group(row_parallel_mode)
col_group = gpc.get_group(col_parallel_mode)
src_a = \
tesseract_dim * row_rank + tesseract_dim ** 2 * dep_rank + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
src_b = \
col_rank + tesseract_dim ** 2 * dep_rank + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
opa = [None] * 2
opb = [None] * 2
A_list[0].copy_(A)
B_list[0].copy_(B)
opa[0] = dist.broadcast(A_list[0], src=src_a, group=row_group, async_op=True)
opb[0] = dist.broadcast(B_list[0], src=src_b, group=col_group, async_op=True)
cur = 0
for i in range(tesseract_dim):
if i != tesseract_dim - 1:
A_list[1 - cur].copy_(A)
opa[1 - cur] = dist.broadcast(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True)
B_list[1 - cur].copy_(B)
opb[1 - cur] = dist.broadcast(B_list[1 - cur],
src=src_b + tesseract_dim,
group=col_group,
async_op=True)
if opa[cur] is not None:
opa[cur].wait()
if opb[cur] is not None:
opb[cur].wait()
torch.addmm(C, A_list[cur], B_list[cur], out=C)
cur = 1 - cur
src_a += 1
src_b += tesseract_dim
out = C.reshape(out_shape)
if ctx:
ctx.tesseract_dim = tesseract_dim
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.dep_rank = dep_rank
ctx.row_parallel_mode = row_parallel_mode
ctx.col_parallel_mode = col_parallel_mode
ctx.A_shape = A_shape
ctx.B_shape = B_shape
ctx.data_parallel_rank = data_parallel_rank
ctx.pipeline_parallel_rank = pipeline_parallel_rank
ctx.pipeline_parallel_size = pipeline_parallel_size
ctx.tensor_parallel_size = tensor_parallel_size
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_2p5D.apply(output_grad, B, ctx.tesseract_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank,
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
B_grad = Matmul_ATB_2p5D.apply(A, output_grad, ctx.tesseract_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank,
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None, None, None, None
class Matmul_ABT_2p5D(torch.autograd.Function):
r"""Matrix multiplication for :math:`C = AB^T`.
Args:
A (:class:`torch.tensor`): matrix :math:`A`.
B (:class:`torch.tensor`): matrix :math:`B`.
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism.
out_shape (:class:`torch.size`): shape of output tensor.
row_rank (int): the rank of row.
col_rank (int): the rank of column.
dep_rank (int): the rank of depth.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
data_parallel_rank (int): data parallel rank.
pipeline_parallel_rank (int): pipeline parallel rank
pipeline_parallel_size (int): pipeline parallel size.
tensor_parallel_size (int): tensor parallel size.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, A: Tensor, B: Tensor, tesseract_dim: int, out_shape: Tuple[int, ...], row_rank: int,
col_rank: int, dep_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode,
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
tensor_parallel_size: int) -> Tensor:
assert A.shape[-1] == B.shape[-1], \
'Invalid shapes: A={}, B={} for ABT.'.format(A.shape, B.shape)
if ctx:
ctx.save_for_backward(A, B)
A_shape = A.shape
A = A.reshape((-1, A_shape[-1]))
B_shape = B.shape
B = B.reshape((-1, B_shape[-1]))
C_shape = (A.shape[0], B.shape[0])
C = torch.empty(C_shape, dtype=A.dtype, device=get_current_device())
# use circular buffer to store the communication tensor
# 2 is enough for all cases
B_list = [torch.empty_like(B) for _ in range(2)]
C_list = [torch.empty_like(C) for _ in range(2)]
row_group = gpc.get_group(row_parallel_mode)
col_group = gpc.get_group(col_parallel_mode)
src_b = \
col_rank + tesseract_dim ** 2 * dep_rank + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
src_c = \
tesseract_dim * row_rank + tesseract_dim ** 2 * dep_rank + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
opb = [None] * 2
opr = [None] * 2
B_list[0].copy_(B)
opb[0] = dist.broadcast(B_list[0], src=src_b, group=col_group, async_op=True)
cur = 0
for i in range(tesseract_dim):
if i != tesseract_dim - 1:
B_list[1 - cur].copy_(B)
opb[1 - cur] = dist.broadcast(B_list[1 - cur],
src=src_b + tesseract_dim,
group=col_group,
async_op=True)
if opr[cur] is not None:
opr[cur].wait()
if i - 2 == col_rank:
C.copy_(C_list[cur])
if opb[cur] is not None:
opb[cur].wait()
torch.matmul(A, B_list[cur].transpose(0, 1), out=C_list[cur])
opr[cur] = dist.reduce(C_list[cur], dst=src_c, group=row_group, async_op=True)
cur = 1 - cur
src_b += tesseract_dim
src_c += 1
for op in opr:
op.wait()
if tesseract_dim - 2 == col_rank:
C.copy_(C_list[cur])
if tesseract_dim - 1 == col_rank:
C.copy_(C_list[1 - cur])
out = C.reshape(out_shape)
if ctx:
ctx.tesseract_dim = tesseract_dim
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.dep_rank = dep_rank
ctx.row_parallel_mode = row_parallel_mode
ctx.col_parallel_mode = col_parallel_mode
ctx.A_shape = A_shape
ctx.B_shape = B_shape
ctx.data_parallel_rank = data_parallel_rank
ctx.pipeline_parallel_rank = pipeline_parallel_rank
ctx.pipeline_parallel_size = pipeline_parallel_size
ctx.tensor_parallel_size = tensor_parallel_size
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_AB_2p5D.apply(output_grad, B, ctx.tesseract_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank,
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
B_grad = Matmul_ATB_2p5D.apply(output_grad, A, ctx.tesseract_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank,
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None, None, None, None
class Matmul_ATB_2p5D(torch.autograd.Function):
r"""Matrix multiplication for :math:`C = A^TB`
Args:
A (:class:`torch.tensor`): matrix :math:`A`.
B (:class:`torch.tensor`): matrix :math:`B`.
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism.
out_shape (:class:`torch.size`): shape of output tensor.
row_rank (int): the rank of row.
col_rank (int): the rank of column.
dep_rank (int): the rank of depth.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
data_parallel_rank (int): data parallel rank.
pipeline_parallel_rank (int): pipeline parallel rank
pipeline_parallel_size (int): pipeline parallel size.
tensor_parallel_size (int): tensor parallel size.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, A: Tensor, B: Tensor, tesseract_dim: int, out_shape: Tuple[int, ...], row_rank: int,
col_rank: int, dep_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode,
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
tensor_parallel_size: int):
assert A.shape[-2] == B.shape[-2], \
'Invalid shapes: A={}, B={} for ATB.'.format(A.shape, B.shape)
if ctx:
ctx.save_for_backward(A, B)
A_shape = A.shape
A = A.reshape((-1, A_shape[-1]))
B_shape = B.shape
B = B.reshape((-1, B_shape[-1]))
C_shape = (A.shape[-1], B.shape[-1])
C = torch.empty(C_shape, dtype=A.dtype, device=get_current_device())
# use circular buffer to store the communication tensor
# 2 is enough for all cases
A_list = [torch.empty_like(A) for _ in range(2)]
C_list = [torch.empty_like(C) for _ in range(2)]
row_group = gpc.get_group(row_parallel_mode)
col_group = gpc.get_group(col_parallel_mode)
src_a = \
tesseract_dim * row_rank + tesseract_dim ** 2 * dep_rank + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
src_c = \
col_rank + tesseract_dim ** 2 * dep_rank + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
opa = [None] * 2
opr = [None] * 2
A_list[0].copy_(A)
opa[0] = dist.broadcast(A_list[0], src=src_a, group=row_group, async_op=True)
cur = 0
for i in range(tesseract_dim):
if i != tesseract_dim - 1:
A_list[1 - cur].copy_(A)
opa[1 - cur] = dist.broadcast(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True)
if opr[cur] is not None:
opr[cur].wait()
if i - 2 == row_rank:
C.copy_(C_list[cur])
if opa[cur] is not None:
opa[cur].wait()
torch.matmul(A_list[cur].transpose(0, 1), B, out=C_list[cur])
opr[cur] = dist.reduce(C_list[cur], dst=src_c, group=col_group, async_op=True)
cur = 1 - cur
src_a += 1
src_c += tesseract_dim
for op in opr:
op.wait()
if tesseract_dim - 2 == row_rank:
C.copy_(C_list[cur])
if tesseract_dim - 1 == row_rank:
C.copy_(C_list[1 - cur])
out = C.reshape(out_shape)
if ctx:
ctx.tesseract_dim = tesseract_dim
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.dep_rank = dep_rank
ctx.row_parallel_mode = row_parallel_mode
ctx.col_parallel_mode = col_parallel_mode
ctx.A_shape = A_shape
ctx.B_shape = B_shape
ctx.data_parallel_rank = data_parallel_rank
ctx.pipeline_parallel_rank = pipeline_parallel_rank
ctx.pipeline_parallel_size = pipeline_parallel_size
ctx.tensor_parallel_size = tensor_parallel_size
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_2p5D.apply(B, output_grad, ctx.tesseract_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank,
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
B_grad = Matmul_AB_2p5D.apply(A, output_grad, ctx.tesseract_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank,
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None, None, None, None
class _Add_Bias_2p5D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, input: Tensor, bias: Tensor, output_size_per_partition: int, tesseract_dim: int,
row_rank: int, col_rank: int, dep_rank: int, col_parallel_mode: ParallelMode, skip_bias_add: bool,
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
tensor_parallel_size: int) -> Tensor:
if row_rank == 0:
bias_temp = bias.clone()
else:
bias_temp = torch.zeros(output_size_per_partition, dtype=bias.dtype, device=get_current_device())
src_rank = \
col_rank + dep_rank * tesseract_dim ** 2 + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
dist.broadcast(bias_temp, src=src_rank, group=get_parallel_group(col_parallel_mode))
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.dep_rank = dep_rank
ctx.tesseract_dim = tesseract_dim
ctx.col_parallel_mode = col_parallel_mode
ctx.bias = skip_bias_add
ctx.data_parallel_rank = data_parallel_rank
ctx.pipeline_parallel_rank = pipeline_parallel_rank
ctx.pipeline_parallel_size = pipeline_parallel_size
ctx.tensor_parallel_size = tensor_parallel_size
if skip_bias_add:
return bias_temp
else:
output = input + bias_temp
return output
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
row_rank = ctx.row_rank
col_rank = ctx.col_rank
dep_rank = ctx.dep_rank
tesseract_dim = ctx.tesseract_dim
col_parallel_mode = ctx.col_parallel_mode
data_parallel_rank = ctx.data_parallel_rank
pipeline_parallel_rank = ctx.pipeline_parallel_rank
pipeline_parallel_size = ctx.pipeline_parallel_size
tensor_parallel_size = ctx.tensor_parallel_size
if ctx.bias:
dst_rank = \
col_rank + dep_rank * (tesseract_dim ** 2) + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
dist.reduce(output_grad, dst=dst_rank, group=get_parallel_group(col_parallel_mode))
if row_rank == 0:
return \
None, output_grad, None, None, None, None, None, None, \
None, None, None, None, None, None, None, None
else:
grad_tmp = torch.zeros_like(output_grad)
return \
None, grad_tmp, None, None, None, None, None, None, \
None, None, None, None, None, None, None, None
else:
reduce_dim = tuple(range(output_grad.ndim - 1))
reduce = torch.sum(output_grad, dim=reduce_dim)
dst_rank = \
col_rank + dep_rank * (tesseract_dim ** 2) + \
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
dist.reduce(reduce, dst=dst_rank, group=get_parallel_group(col_parallel_mode))
if row_rank == 0:
return \
output_grad, reduce, None, None, None, None, None, None, None, \
None, None, None, None, None, None, None, None
else:
reduce_tmp = torch.zeros_like(reduce)
return \
output_grad, reduce_tmp, None, None, None, None, None, None, \
None, None, None, None, None, None, None, None, None
def add_bias_2p5d(input: Tensor, bias: Tensor, output_size_per_partition: int, tesseract_dim: int, row_rank: int,
col_rank: int, dep_rank: int, col_parallel_mode: ParallelMode, skip_bias_add: bool,
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
tensor_parallel_size: int) -> Tensor:
r"""Matrix add bias: :math:`C = A + b`.
Args:
input (:class:`torch.tensor`): matrix :math:`A`.
bias (:class:`torch.tensor`): matrix :math:`B`.
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism.
output_size_per_partition (int): output size in each partition.
row_rank (int): the rank of row.
col_rank (int): the rank of column.
dep_rank (int): the rank of depth.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
skip_bias_add (bool): If set to ``True``, it will skip bias add for linear layer,
which is preserved for kernel fusion.
data_parallel_rank (int): data parallel rank.
pipeline_parallel_rank (int): pipeline parallel rank
pipeline_parallel_size (int): pipeline parallel size.
tensor_parallel_size (int): tensor parallel size.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _Add_Bias_2p5D.apply(input, bias, output_size_per_partition, tesseract_dim, row_rank, col_rank, dep_rank,
col_parallel_mode, skip_bias_add, data_parallel_rank, pipeline_parallel_rank,
pipeline_parallel_size, tensor_parallel_size)
class _Layernorm2p5D(torch.autograd.Function):
r"""Layernorm.
Args:
input (:class:`torch.tensor`): input matrix.
E_x (:class:`torch.tensor`): mean.
Var_x (:class:`torch.tensor`): variance.
hidden_size (int): hidden size.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(ctx: Any, input: Tensor, E_x: Tensor, Var_x: Tensor, hidden_size: int,
row_parallel_mode: ParallelMode) -> Tensor:
input = input - E_x
# in here, input = x - E[x], Var_x = 1 / sqrt(Var[x] + eps)
ctx.hidden_size = hidden_size
output = input * Var_x
ctx.save_for_backward(output, Var_x)
ctx.row_parallel_mode = row_parallel_mode
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grad):
row_parallel_mode = ctx.row_parallel_mode
x, Var_x = ctx.saved_tensors
# in here, Var_x = 1 / sqrt(Var[x] + eps), x = (x - E[x]) * Var_x
with torch.no_grad():
output_grad_sum = torch.sum(output_grad, dim=-1, keepdim=True)
torch.distributed.all_reduce(output_grad_sum, group=get_parallel_group(row_parallel_mode))
output_grad_sum /= ctx.hidden_size
output_grad_mul_x_sum = torch.sum(output_grad * x, dim=-1, keepdim=True)
torch.distributed.all_reduce(output_grad_mul_x_sum, group=get_parallel_group(row_parallel_mode))
output_grad_mul_x_sum /= ctx.hidden_size
input_grad = output_grad.clone()
input_grad -= x * output_grad_mul_x_sum
input_grad -= output_grad_sum
input_grad *= Var_x
return input_grad, None, None, None, None, None, None
def layernorm_2p5d(input: Tensor, E_x: Tensor, Var_x: Tensor, hidden_size: int,
row_parallel_mode: ParallelMode) -> Tensor:
r"""Layernorm.
Args:
input (:class:`torch.tensor`): input matrix.
E_x (:class:`torch.tensor`): mean.
Var_x (:class:`torch.tensor`): variance.
hidden_size (int): hidden size.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
return _Layernorm2p5D.apply(input, E_x, Var_x, hidden_size, row_parallel_mode)
class _AllGatherTensor2p5D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, inputs: Tensor, dim: int, col_parallel_mode: ParallelMode) -> Tensor:
ctx.dim = dim
ctx.col_parallel_mode = col_parallel_mode
outputs = all_gather(inputs, dim, col_parallel_mode)
return outputs
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
grad = reduce_scatter(output_grad, ctx.dim, ctx.col_parallel_mode)
return grad.contiguous(), None, None
def all_gather_tensor_2p5d(inputs: Tensor, dim: int, col_parallel_mode: ParallelMode) -> Tensor:
r"""all gather the weight of 2.5D parallelism.
Args:
inputs (:class:`torch.tensor`): input tensor.
dim (int): dimension of all-gather.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
return _AllGatherTensor2p5D.apply(inputs, dim, col_parallel_mode)
class SplitFirst(torch.autograd.Function):
r"""
Args:
inputs (:class:`torch.tensor`): input tensor.
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, inputs: Tensor, tesseract_dim: int, col_parallel_mode: ParallelMode) -> Tensor:
ctx.tesseract_dim = tesseract_dim
ctx.batch_size = inputs.size(0)
ctx.para_mode = col_parallel_mode
row_rank = gpc.get_local_rank(col_parallel_mode)
outputs = inputs.chunk(tesseract_dim, dim=0)[row_rank]
return outputs
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
grad_shape = (ctx.batch_size,) + output_grad.shape[1:]
grad = torch.empty(grad_shape, dtype=output_grad.dtype, device=get_current_device())
dist.all_gather(list(grad.chunk(ctx.tesseract_dim, dim=0)),
output_grad.contiguous(),
group=gpc.get_group(ctx.para_mode))
return grad, None, None
def split_tensor_2p5d(input_: Tensor, dim: int = 0) -> Tensor:
"""Splits 2P5D tensor in specified dimension across cols.
Args:
input_ (:class:`torch.tensor`): Input tensor.
dim (int): Specified dimension in which to split.
Returns:
:class:`torch.tensor`: The tensor has been split.
"""
if input_.size(dim) <= 1:
return input_
return torch.chunk(input_, gpc.get_world_size(ParallelMode.PARALLEL_2P5D_COL),
dim=dim)[gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)].contiguous()
class _ReduceTensor2p5D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, parallel_mode):
return all_reduce(input_, parallel_mode)
@staticmethod
def backward(ctx, output_grad):
return output_grad, None
def reduce_tensor_2p5d(input_: Tensor, parallel_mode: ParallelMode) -> Tensor:
r"""All-reduce the input.
Args:
input_ (:class:`torch.tensor`): Input tensor.
parallel_mode (:class:`colossalai.context.ParallelMode`): The parallel mode tensor used.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _ReduceTensor2p5D.apply(input_, parallel_mode)
class _ReduceScatterTensor2p5D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, dim, parallel_mode):
ctx.dim = dim
ctx.parallel_mode = parallel_mode
return reduce_scatter(input_, dim, parallel_mode)
@staticmethod
def backward(ctx, output_grad):
return all_gather(output_grad, ctx.dim, ctx.parallel_mode), None, None
def reduce_scatter_tensor_2p5d(input_: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
r"""Reduce-scatter the input.
Args:
input_ (:class:`torch.tensor`): Input tensor.
dim (int): Dimension to reduce.
parallel_mode (:class:`colossalai.context.ParallelMode`): The parallel mode tensor used.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _ReduceScatterTensor2p5D.apply(input_, dim, parallel_mode)
class _RreduceByBatch2p5D(torch.autograd.Function):
@staticmethod
def symbolic(graph, input_, reduce_mean: bool = False):
output = all_reduce(input_, ParallelMode.PARALLEL_2P5D_COL)
if reduce_mean:
reduce_size = gpc.get_world_size(ParallelMode.PARALLEL_2P5D_COL)
return output / reduce_size
return output
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(ctx, input_, reduce_mean: bool = False):
output = all_reduce(input_, ParallelMode.PARALLEL_2P5D_COL)
ctx.reduce_mean = reduce_mean
if reduce_mean:
reduce_size = gpc.get_world_size(ParallelMode.PARALLEL_2P5D_COL)
ctx.reduce_size = reduce_size
return output.clone() / reduce_size
return output.clone()
@staticmethod
@custom_bwd
def backward(ctx, output_grad):
if ctx.reduce_mean:
return output_grad / ctx.reduce_size, None
else:
return output_grad, None
def reduce_by_batch_2p5d(input_, reduce_mean: bool = False) -> Tensor:
r"""All-reduce the input from the model parallel region.
Args:
input_ (:class:`torch.tensor`): input matrix.
reduce_mean (bool, optional):
If set to ``True``, it will divide the output by column parallel size, default to False.
"""
return _RreduceByBatch2p5D.apply(input_, reduce_mean)
| [
"colossalai.core.global_context.get_local_rank",
"colossalai.communication.collective.reduce_scatter",
"colossalai.communication.collective.all_gather",
"colossalai.core.global_context.get_group",
"colossalai.communication.collective.all_reduce",
"colossalai.core.global_context.get_world_size",
"colossa... | [((463, 491), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (476, 491), True, 'from colossalai.core import global_context as gpc\n'), ((528, 549), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (547, 549), True, 'from colossalai.core import global_context as gpc\n'), ((615, 648), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['parallel_mode'], {}), '(parallel_mode)\n', (633, 648), True, 'from colossalai.core import global_context as gpc\n'), ((722, 759), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (732, 759), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((6129, 6166), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (6139, 6166), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((11928, 11965), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (11938, 11965), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((17701, 17738), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (17711, 17738), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((22211, 22248), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (22221, 22248), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((28418, 28455), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float32'}), '(cast_inputs=torch.float32)\n', (28428, 28455), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((30719, 30756), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (30729, 30756), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((32472, 32509), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (32482, 32509), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((36182, 36219), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float32'}), '(cast_inputs=torch.float32)\n', (36192, 36219), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((1406, 1442), 'colossalai.communication.collective.all_gather', 'all_gather', (['B', '(-1)', 'col_parallel_mode'], {}), '(B, -1, col_parallel_mode)\n', (1416, 1442), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce_scatter\n'), ((1570, 1602), 'colossalai.communication.collective.all_reduce', 'all_reduce', (['C', 'row_parallel_mode'], {}), '(C, row_parallel_mode)\n', (1580, 1602), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce_scatter\n'), ((7355, 7387), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['row_parallel_mode'], {}), '(row_parallel_mode)\n', (7368, 7387), True, 'from colossalai.core import global_context as gpc\n'), ((7408, 7440), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['col_parallel_mode'], {}), '(col_parallel_mode)\n', (7421, 7440), True, 'from colossalai.core import global_context as gpc\n'), ((8013, 8081), 'torch.distributed.broadcast', 'dist.broadcast', (['A_list[0]'], {'src': 'src_a', 'group': 'row_group', 'async_op': '(True)'}), '(A_list[0], src=src_a, group=row_group, async_op=True)\n', (8027, 8081), True, 'import torch.distributed as dist\n'), ((8099, 8167), 'torch.distributed.broadcast', 'dist.broadcast', (['B_list[0]'], {'src': 'src_b', 'group': 'col_group', 'async_op': '(True)'}), '(B_list[0], src=src_b, group=col_group, async_op=True)\n', (8113, 8167), True, 'import torch.distributed as dist\n'), ((13011, 13043), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['row_parallel_mode'], {}), '(row_parallel_mode)\n', (13024, 13043), True, 'from colossalai.core import global_context as gpc\n'), ((13064, 13096), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['col_parallel_mode'], {}), '(col_parallel_mode)\n', (13077, 13096), True, 'from colossalai.core import global_context as gpc\n'), ((13642, 13710), 'torch.distributed.broadcast', 'dist.broadcast', (['B_list[0]'], {'src': 'src_b', 'group': 'col_group', 'async_op': '(True)'}), '(B_list[0], src=src_b, group=col_group, async_op=True)\n', (13656, 13710), True, 'import torch.distributed as dist\n'), ((18776, 18808), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['row_parallel_mode'], {}), '(row_parallel_mode)\n', (18789, 18808), True, 'from colossalai.core import global_context as gpc\n'), ((18829, 18861), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['col_parallel_mode'], {}), '(col_parallel_mode)\n', (18842, 18861), True, 'from colossalai.core import global_context as gpc\n'), ((19407, 19475), 'torch.distributed.broadcast', 'dist.broadcast', (['A_list[0]'], {'src': 'src_a', 'group': 'row_group', 'async_op': '(True)'}), '(A_list[0], src=src_a, group=row_group, async_op=True)\n', (19421, 19475), True, 'import torch.distributed as dist\n'), ((30944, 30986), 'colossalai.communication.collective.all_gather', 'all_gather', (['inputs', 'dim', 'col_parallel_mode'], {}), '(inputs, dim, col_parallel_mode)\n', (30954, 30986), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce_scatter\n'), ((31131, 31190), 'colossalai.communication.collective.reduce_scatter', 'reduce_scatter', (['output_grad', 'ctx.dim', 'ctx.col_parallel_mode'], {}), '(output_grad, ctx.dim, ctx.col_parallel_mode)\n', (31145, 31190), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce_scatter\n'), ((32759, 32796), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['col_parallel_mode'], {}), '(col_parallel_mode)\n', (32777, 32796), True, 'from colossalai.core import global_context as gpc\n'), ((34048, 34081), 'colossalai.communication.collective.all_reduce', 'all_reduce', (['input_', 'parallel_mode'], {}), '(input_, parallel_mode)\n', (34058, 34081), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce_scatter\n'), ((34960, 35002), 'colossalai.communication.collective.reduce_scatter', 'reduce_scatter', (['input_', 'dim', 'parallel_mode'], {}), '(input_, dim, parallel_mode)\n', (34974, 35002), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce_scatter\n'), ((35944, 35994), 'colossalai.communication.collective.all_reduce', 'all_reduce', (['input_', 'ParallelMode.PARALLEL_2P5D_COL'], {}), '(input_, ParallelMode.PARALLEL_2P5D_COL)\n', (35954, 35994), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce_scatter\n'), ((36294, 36344), 'colossalai.communication.collective.all_reduce', 'all_reduce', (['input_', 'ParallelMode.PARALLEL_2P5D_COL'], {}), '(input_, ParallelMode.PARALLEL_2P5D_COL)\n', (36304, 36344), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce_scatter\n'), ((2462, 2477), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2475, 2477), False, 'import torch\n'), ((2500, 2528), 'torch.matmul', 'torch.matmul', (['output_grad', 'B'], {}), '(output_grad, B)\n', (2512, 2528), False, 'import torch\n'), ((2700, 2749), 'colossalai.communication.collective.reduce_scatter', 'reduce_scatter', (['B_grad', '(-1)', 'ctx.col_parallel_mode'], {}), '(B_grad, -1, ctx.col_parallel_mode)\n', (2714, 2749), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce_scatter\n'), ((7238, 7257), 'torch.empty_like', 'torch.empty_like', (['A'], {}), '(A)\n', (7254, 7257), False, 'import torch\n'), ((7295, 7314), 'torch.empty_like', 'torch.empty_like', (['B'], {}), '(B)\n', (7311, 7314), False, 'import torch\n'), ((8867, 8914), 'torch.addmm', 'torch.addmm', (['C', 'A_list[cur]', 'B_list[cur]'], {'out': 'C'}), '(C, A_list[cur], B_list[cur], out=C)\n', (8878, 8914), False, 'import torch\n'), ((9797, 9812), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9810, 9812), False, 'import torch\n'), ((12894, 12913), 'torch.empty_like', 'torch.empty_like', (['B'], {}), '(B)\n', (12910, 12913), False, 'import torch\n'), ((12951, 12970), 'torch.empty_like', 'torch.empty_like', (['C'], {}), '(C)\n', (12967, 12970), False, 'import torch\n'), ((14424, 14491), 'torch.distributed.reduce', 'dist.reduce', (['C_list[cur]'], {'dst': 'src_c', 'group': 'row_group', 'async_op': '(True)'}), '(C_list[cur], dst=src_c, group=row_group, async_op=True)\n', (14435, 14491), True, 'import torch.distributed as dist\n'), ((15575, 15590), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15588, 15590), False, 'import torch\n'), ((18659, 18678), 'torch.empty_like', 'torch.empty_like', (['A'], {}), '(A)\n', (18675, 18678), False, 'import torch\n'), ((18716, 18735), 'torch.empty_like', 'torch.empty_like', (['C'], {}), '(C)\n', (18732, 18735), False, 'import torch\n'), ((20039, 20106), 'torch.distributed.reduce', 'dist.reduce', (['C_list[cur]'], {'dst': 'src_c', 'group': 'col_group', 'async_op': '(True)'}), '(C_list[cur], dst=src_c, group=col_group, async_op=True)\n', (20050, 20106), True, 'import torch.distributed as dist\n'), ((21190, 21205), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21203, 21205), False, 'import torch\n'), ((25120, 25158), 'torch.sum', 'torch.sum', (['output_grad'], {'dim': 'reduce_dim'}), '(output_grad, dim=reduce_dim)\n', (25129, 25158), False, 'import torch\n'), ((29130, 29145), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (29143, 29145), False, 'import torch\n'), ((29177, 29221), 'torch.sum', 'torch.sum', (['output_grad'], {'dim': '(-1)', 'keepdim': '(True)'}), '(output_grad, dim=-1, keepdim=True)\n', (29186, 29221), False, 'import torch\n'), ((29409, 29457), 'torch.sum', 'torch.sum', (['(output_grad * x)'], {'dim': '(-1)', 'keepdim': '(True)'}), '(output_grad * x, dim=-1, keepdim=True)\n', (29418, 29457), False, 'import torch\n'), ((35073, 35124), 'colossalai.communication.collective.all_gather', 'all_gather', (['output_grad', 'ctx.dim', 'ctx.parallel_mode'], {}), '(output_grad, ctx.dim, ctx.parallel_mode)\n', (35083, 35124), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce_scatter\n'), ((36045, 36095), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_2P5D_COL'], {}), '(ParallelMode.PARALLEL_2P5D_COL)\n', (36063, 36095), True, 'from colossalai.core import global_context as gpc\n'), ((36433, 36483), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_2P5D_COL'], {}), '(ParallelMode.PARALLEL_2P5D_COL)\n', (36451, 36483), True, 'from colossalai.core import global_context as gpc\n'), ((2948, 2992), 'colossalai.communication.collective.all_reduce', 'all_reduce', (['bias_grad', 'ctx.col_parallel_mode'], {}), '(bias_grad, ctx.col_parallel_mode)\n', (2958, 2992), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce_scatter\n'), ((7097, 7117), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (7115, 7117), False, 'from colossalai.utils import get_current_device\n'), ((8335, 8413), 'torch.distributed.broadcast', 'dist.broadcast', (['A_list[1 - cur]'], {'src': '(src_a + 1)', 'group': 'row_group', 'async_op': '(True)'}), '(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True)\n', (8349, 8413), True, 'import torch.distributed as dist\n'), ((8486, 8580), 'torch.distributed.broadcast', 'dist.broadcast', (['B_list[1 - cur]'], {'src': '(src_b + tesseract_dim)', 'group': 'col_group', 'async_op': '(True)'}), '(B_list[1 - cur], src=src_b + tesseract_dim, group=col_group,\n async_op=True)\n', (8500, 8580), True, 'import torch.distributed as dist\n'), ((12753, 12773), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (12771, 12773), False, 'from colossalai.utils import get_current_device\n'), ((13878, 13972), 'torch.distributed.broadcast', 'dist.broadcast', (['B_list[1 - cur]'], {'src': '(src_b + tesseract_dim)', 'group': 'col_group', 'async_op': '(True)'}), '(B_list[1 - cur], src=src_b + tesseract_dim, group=col_group,\n async_op=True)\n', (13892, 13972), True, 'import torch.distributed as dist\n'), ((18518, 18538), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (18536, 18538), False, 'from colossalai.utils import get_current_device\n'), ((19643, 19721), 'torch.distributed.broadcast', 'dist.broadcast', (['A_list[1 - cur]'], {'src': '(src_a + 1)', 'group': 'row_group', 'async_op': '(True)'}), '(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True)\n', (19657, 19721), True, 'import torch.distributed as dist\n'), ((24829, 24858), 'torch.zeros_like', 'torch.zeros_like', (['output_grad'], {}), '(output_grad)\n', (24845, 24858), False, 'import torch\n'), ((25741, 25765), 'torch.zeros_like', 'torch.zeros_like', (['reduce'], {}), '(reduce)\n', (25757, 25765), False, 'import torch\n'), ((33124, 33144), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (33142, 33144), False, 'from colossalai.utils import get_current_device\n'), ((33294, 33322), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.para_mode'], {}), '(ctx.para_mode)\n', (33307, 33322), True, 'from colossalai.core import global_context as gpc\n'), ((33853, 33903), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_COL'], {}), '(ParallelMode.PARALLEL_2P5D_COL)\n', (33871, 33903), True, 'from colossalai.core import global_context as gpc\n'), ((22789, 22809), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (22807, 22809), False, 'from colossalai.utils import get_current_device\n'), ((33769, 33819), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_2P5D_COL'], {}), '(ParallelMode.PARALLEL_2P5D_COL)\n', (33787, 33819), True, 'from colossalai.core import global_context as gpc\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from typing import Optional, Tuple
import torch
from colossalai.communication import (all_gather, all_reduce, broadcast, reduce, reduce_scatter)
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from torch import Tensor
from torch.cuda.amp import custom_bwd, custom_fwd
from ._utils import get_parallel_mode_from_env
from colossalai.constants import INPUT_GROUP_3D, WEIGHT_GROUP_3D
class _Linear3D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx,
input_: Tensor,
weight: Tensor,
bias: Optional[Tensor],
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
ctx.use_bias = bias is not None
input_ = all_gather(input_, input_dim, input_parallel_mode)
weight = all_gather(weight, weight_dim, weight_parallel_mode)
ctx.save_for_backward(input_, weight)
output = torch.matmul(input_, weight)
output = reduce_scatter(output, output_dim, output_parallel_mode)
if bias is not None:
output += bias
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
ctx.input_dim = input_dim
ctx.weight_dim = weight_dim
ctx.output_dim = output_dim
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]:
input_, weight = ctx.saved_tensors
with torch.no_grad():
output_grad = all_gather(output_grad, ctx.output_dim, ctx.output_parallel_mode)
async_ops = list()
input_grad = torch.matmul(output_grad, weight.transpose(0, 1))
input_grad, op = reduce_scatter(input_grad, ctx.input_dim, ctx.input_parallel_mode, async_op=True)
async_ops.append(op)
weight_grad = torch.matmul(
input_.reshape(-1, input_.shape[-1]).transpose(0, 1), output_grad.reshape(-1, output_grad.shape[-1]))
weight_grad, op = reduce_scatter(weight_grad, ctx.weight_dim, ctx.weight_parallel_mode, async_op=True)
async_ops.append(op)
if ctx.use_bias:
bias_grad = torch.sum(output_grad, dim=tuple(range(len(output_grad.shape))[:-1]))
bias_grad, op = all_reduce(bias_grad, ctx.weight_parallel_mode, async_op=True)
async_ops.append(op)
else:
bias_grad = None
for op in async_ops:
if op is not None:
op.wait()
return input_grad, weight_grad, bias_grad, None, None, None, None, None, None
def linear_3d(input_: Tensor,
weight: Tensor,
bias: Optional[Tensor],
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
r"""Linear layer for 3D parallelism.
Args:
input_ (:class:`torch.tensor`): input matrix.
weight (:class:`torch.tensor`): matrix of weight.
bias (:class:`torch.tensor`): matrix of bias.
input_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): input parallel mode.
weight_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): weight parallel mode.
output_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): output parallel mode.
input_dim (int, optional): dimension of input, defaults to 0.
weight_dim (int, optional): dimension of weight, defaults to -1.
output_dim (int, optional): dimension of output, defaults to 0.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _Linear3D.apply(input_, weight, bias, input_parallel_mode, weight_parallel_mode, output_parallel_mode,
input_dim, weight_dim, output_dim)
class _Classifier3D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, input_: Tensor, weight: Tensor, bias: Optional[Tensor], input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode, output_parallel_mode: ParallelMode) -> Tensor:
ctx.use_bias = bias is not None
ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
weight = broadcast(weight, src_rank, input_parallel_mode)
ctx.save_for_backward(input_, weight)
output = torch.matmul(input_, weight.transpose(0, 1))
output = all_reduce(output, output_parallel_mode)
if bias is not None:
output += bias
ctx.src_rank = src_rank
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]:
input_, weight = ctx.saved_tensors
with torch.no_grad():
async_ops = list()
weight_grad = torch.matmul(
output_grad.reshape(-1, output_grad.shape[-1]).transpose(0, 1), input_.reshape(-1, input_.shape[-1]))
weight_grad = reduce(weight_grad, ctx.src_rank, ctx.input_parallel_mode)
if gpc.get_local_rank(ctx.input_parallel_mode) == gpc.get_local_rank(ctx.output_parallel_mode):
weight_grad, op = all_reduce(weight_grad, ctx.weight_parallel_mode, async_op=True)
async_ops.append(op)
else:
weight_grad = None
if ctx.use_bias:
bias_grad = torch.sum(output_grad, dim=tuple(range(len(output_grad.shape))[:-1]))
bias_grad = all_reduce(bias_grad, ctx.input_parallel_mode)
bias_grad, op = all_reduce(bias_grad, ctx.weight_parallel_mode, async_op=True)
async_ops.append(op)
else:
bias_grad = None
input_grad = torch.matmul(output_grad, weight)
for op in async_ops:
if op is not None:
op.wait()
return input_grad, weight_grad, bias_grad, None, None, None, None, None, None
def classifier_3d(input_: Tensor, weight: Tensor, bias: Optional[Tensor], input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode, output_parallel_mode: ParallelMode) -> Tensor:
r"""3D parallel classifier.
Args:
input_ (:class:`torch.tensor`): input matrix.
weight (:class:`torch.tensor`): matrix of weight.
bias (:class:`torch.tensor`): matrix of bias.
input_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): input parallel mode.
weight_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): weight parallel mode.
output_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): output parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _Classifier3D.apply(input_, weight, bias, input_parallel_mode, weight_parallel_mode, output_parallel_mode)
class _Layernorm3D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(ctx, input_: Tensor, weight: Tensor, bias: Tensor, normalized_shape: int, eps: float,
input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
mean = all_reduce(torch.sum(input_, dim=-1, keepdim=True), output_parallel_mode) / normalized_shape
mu = input_ - mean
var = all_reduce(torch.sum(mu**2, dim=-1, keepdim=True), output_parallel_mode) / normalized_shape
sigma = torch.sqrt(var + eps)
ctx.save_for_backward(mu, sigma, weight)
z = mu / sigma
output = weight * z + bias
ctx.normalized_shape = normalized_shape
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]:
mu, sigma, weight = ctx.saved_tensors
with torch.no_grad():
bias_grad, weight_grad = output_grad, output_grad * mu / sigma
grads = torch.stack([bias_grad, weight_grad]).contiguous()
grads = torch.sum(grads, dim=tuple(range(len(grads.shape))[1:-1]))
grads = all_reduce(grads, ctx.weight_parallel_mode)
grads = all_reduce(grads, ctx.input_parallel_mode)
bias_grad, weight_grad = grads[0], grads[1]
dz = output_grad * weight
dvar = dz * mu * (-0.5) * sigma**(-3)
dvar = all_reduce(torch.sum(dvar, dim=-1, keepdim=True), ctx.output_parallel_mode)
dmean = dz * (-1 / sigma) + dvar * -2 * mu / ctx.normalized_shape
dmean = all_reduce(torch.sum(dmean, dim=-1, keepdim=True), ctx.output_parallel_mode)
input_grad = dz / sigma + dvar * 2 * mu / \
ctx.normalized_shape + dmean / ctx.normalized_shape
return input_grad, weight_grad, bias_grad, None, None, None, None, None
def layernorm_3d(input_: Tensor, weight: Tensor, bias: Tensor, normalized_shape: int, eps: float,
input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
r"""3D parallel Layernorm.
Args:
input_ (:class:`torch.tensor`): input matrix.
weight (:class:`torch.tensor`): matrix of weight.
bias (:class:`torch.tensor`): matrix of bias.
normalized_shape (int): input shape from an expected input of size.
:math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1]
\times \ldots \times \text{normalized_shape}[-1]]`
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps (float): a value added to the denominator for numerical stability
input_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): input parallel mode.
weight_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): weight parallel mode.
output_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): output parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _Layernorm3D.apply(input_, weight, bias, normalized_shape, eps, input_parallel_mode, weight_parallel_mode,
output_parallel_mode)
def split_tensor_3d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
r"""Splits 3D parallel tensor in specified dimension.
Args:
tensor (:class:`torch.tensor`): Input tensor.
dim (int): Specified dimension in which to split.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`, optional): Parallel mode.
Returns:
:class:`torch.tensor`: The tensor has been split.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
if tensor.size(dim) <= 1:
return tensor
output = torch.chunk(tensor, gpc.get_world_size(parallel_mode),
dim=dim)[gpc.get_local_rank(parallel_mode)].contiguous()
return output
def split_batch_3d(input_: Tensor,
dim: int = 0,
input_parallel_mode: ParallelMode = ParallelMode.PARALLEL_3D_INPUT,
weight_parallel_mode: ParallelMode = ParallelMode.PARALLEL_3D_WEIGHT) -> Tensor:
r"""Splits 3D tensor in batch.
Args:
input_ (:class:`torch.tensor`): Input tensor.
dim (int): Specified dimension in which to split.
input_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`, optional): input parallel mode.
weight_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`, optional): weight parallel mode.
Returns:
:class:`torch.tensor`: The tensor has been split.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
if input_.size(dim) <= 1:
return input_
weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
output = torch.chunk(input_, gpc.get_world_size(weight_parallel_mode),
dim=dim)[gpc.get_local_rank(weight_parallel_mode)].contiguous()
output = torch.chunk(output, gpc.get_world_size(input_parallel_mode),
dim=dim)[gpc.get_local_rank(input_parallel_mode)].contiguous()
return output
class _ReduceTensor3D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, parallel_mode):
return all_reduce(input_, parallel_mode)
@staticmethod
def backward(ctx, output_grad):
return output_grad, None
def reduce_tensor_3d(tensor: Tensor, parallel_mode: ParallelMode) -> Tensor:
r"""All-reduce the input
Args:
tensor (:class:`torch.tensor`): Input tensor.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): Parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
return _ReduceTensor3D.apply(tensor, parallel_mode)
class _AllGatherTensor3D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, dim, parallel_mode):
ctx.dim = dim
ctx.parallel_mode = parallel_mode
output = all_gather(input_, dim, parallel_mode)
return output
@staticmethod
def backward(ctx, output_grad):
input_grad = reduce_scatter(output_grad, ctx.dim, ctx.parallel_mode)
return input_grad, None, None
def all_gather_tensor_3d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
r"""All-reduce the gradient in backward pass.
Args:
tensor (:class:`torch.tensor`): Input tensor.
dim (int): Dimension to gather.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): Parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
return _AllGatherTensor3D.apply(tensor, dim, parallel_mode)
class _ReduceScatterTensor3D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, dim, parallel_mode):
ctx.dim = dim
ctx.parallel_mode = parallel_mode
return reduce_scatter(input_, dim, parallel_mode)
@staticmethod
def backward(ctx, output_grad):
input_grad = all_gather(output_grad, ctx.dim, ctx.parallel_mode)
return input_grad, None, None
def reduce_scatter_tensor_3d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
r"""Reduce-scatter the input.
Args:
tensor (:class:`torch.tensor`): Input tensor.
dim (int): Dimension to scatter.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): Parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _ReduceScatterTensor3D.apply(tensor, dim, parallel_mode)
class _ReduceByBatch3D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(ctx,
input_: Tensor,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
reduce_mean: bool = False) -> Tensor:
output = all_reduce(input_, input_parallel_mode)
output = all_reduce(output, weight_parallel_mode)
ctx.reduce_mean = reduce_mean
if reduce_mean:
reduce_size = gpc.get_world_size(input_parallel_mode) * gpc.get_world_size(weight_parallel_mode)
ctx.reduce_size = reduce_size
return output.clone() / reduce_size
return output.clone()
@staticmethod
@custom_bwd
def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]:
if ctx.reduce_mean:
return output_grad / ctx.reduce_size, None, None, None
else:
return output_grad, None, None, None
def reduce_by_batch_3d(tensor: Tensor,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
reduce_mean: bool = False) -> Tensor:
r"""All-reduce the input from the model parallel region.
Args:
input_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): input parallel mode.
weight_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): weight parallel mode.
reduce_mean (bool, optional): If set to ``True``, it will divide the output by
(input parallel size * weight parallel size), default to False.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _ReduceByBatch3D.apply(tensor, input_parallel_mode, weight_parallel_mode, reduce_mean)
class _BroadcastWeight3D_FromDiagonal(torch.autograd.Function):
r"""broadcast weight from diagonal.
Args:
input_ (:class:`torch.tensor`): input matrix.
input_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): input parallel mode.
weight_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): weight parallel mode.
output_parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): output parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, input_: Tensor, input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
output = broadcast(input_, src_rank, input_parallel_mode)
ctx.src_rank = src_rank
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]:
input_grad = reduce(output_grad, ctx.src_rank, ctx.input_parallel_mode)
if gpc.get_local_rank(ctx.input_parallel_mode) == gpc.get_local_rank(ctx.output_parallel_mode):
input_grad = all_reduce(input_grad, ctx.weight_parallel_mode)
else:
input_grad = None
return input_grad, None, None, None
def broadcast_weight_3d_from_diagonal(tensor: Tensor, input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode, output_parallel_mode: ParallelMode) -> Tensor:
return _BroadcastWeight3D_FromDiagonal.apply(tensor, input_parallel_mode, weight_parallel_mode,
output_parallel_mode)
| [
"colossalai.core.global_context.get_local_rank",
"colossalai.communication.all_gather",
"colossalai.communication.reduce_scatter",
"colossalai.communication.broadcast",
"colossalai.core.global_context.get_world_size",
"colossalai.communication.all_reduce",
"colossalai.core.global_context.get_ranks_in_gr... | [((558, 595), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (568, 595), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((4642, 4679), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (4652, 4679), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((8154, 8191), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float32'}), '(cast_inputs=torch.float32)\n', (8164, 8191), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((17424, 17461), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float32'}), '(cast_inputs=torch.float32)\n', (17434, 17461), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((20131, 20168), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (20141, 20168), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((1056, 1106), 'colossalai.communication.all_gather', 'all_gather', (['input_', 'input_dim', 'input_parallel_mode'], {}), '(input_, input_dim, input_parallel_mode)\n', (1066, 1106), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((1124, 1176), 'colossalai.communication.all_gather', 'all_gather', (['weight', 'weight_dim', 'weight_parallel_mode'], {}), '(weight, weight_dim, weight_parallel_mode)\n', (1134, 1176), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((1241, 1269), 'torch.matmul', 'torch.matmul', (['input_', 'weight'], {}), '(input_, weight)\n', (1253, 1269), False, 'import torch\n'), ((1287, 1343), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['output', 'output_dim', 'output_parallel_mode'], {}), '(output, output_dim, output_parallel_mode)\n', (1301, 1343), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((4957, 5000), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (4979, 5000), True, 'from colossalai.core import global_context as gpc\n'), ((5094, 5142), 'colossalai.communication.broadcast', 'broadcast', (['weight', 'src_rank', 'input_parallel_mode'], {}), '(weight, src_rank, input_parallel_mode)\n', (5103, 5142), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((5269, 5309), 'colossalai.communication.all_reduce', 'all_reduce', (['output', 'output_parallel_mode'], {}), '(output, output_parallel_mode)\n', (5279, 5309), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((8701, 8722), 'torch.sqrt', 'torch.sqrt', (['(var + eps)'], {}), '(var + eps)\n', (8711, 8722), False, 'import torch\n'), ((14487, 14520), 'colossalai.communication.all_reduce', 'all_reduce', (['input_', 'parallel_mode'], {}), '(input_, parallel_mode)\n', (14497, 14520), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((15390, 15428), 'colossalai.communication.all_gather', 'all_gather', (['input_', 'dim', 'parallel_mode'], {}), '(input_, dim, parallel_mode)\n', (15400, 15428), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((15527, 15582), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['output_grad', 'ctx.dim', 'ctx.parallel_mode'], {}), '(output_grad, ctx.dim, ctx.parallel_mode)\n', (15541, 15582), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((16487, 16529), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['input_', 'dim', 'parallel_mode'], {}), '(input_, dim, parallel_mode)\n', (16501, 16529), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((16606, 16657), 'colossalai.communication.all_gather', 'all_gather', (['output_grad', 'ctx.dim', 'ctx.parallel_mode'], {}), '(output_grad, ctx.dim, ctx.parallel_mode)\n', (16616, 16657), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((17689, 17728), 'colossalai.communication.all_reduce', 'all_reduce', (['input_', 'input_parallel_mode'], {}), '(input_, input_parallel_mode)\n', (17699, 17728), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((17746, 17786), 'colossalai.communication.all_reduce', 'all_reduce', (['output', 'weight_parallel_mode'], {}), '(output, weight_parallel_mode)\n', (17756, 17786), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((20365, 20408), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (20387, 20408), True, 'from colossalai.core import global_context as gpc\n'), ((20502, 20550), 'colossalai.communication.broadcast', 'broadcast', (['input_', 'src_rank', 'input_parallel_mode'], {}), '(input_, src_rank, input_parallel_mode)\n', (20511, 20550), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((20893, 20951), 'colossalai.communication.reduce', 'reduce', (['output_grad', 'ctx.src_rank', 'ctx.input_parallel_mode'], {}), '(output_grad, ctx.src_rank, ctx.input_parallel_mode)\n', (20899, 20951), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((1853, 1868), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1866, 1868), False, 'import torch\n'), ((1896, 1961), 'colossalai.communication.all_gather', 'all_gather', (['output_grad', 'ctx.output_dim', 'ctx.output_parallel_mode'], {}), '(output_grad, ctx.output_dim, ctx.output_parallel_mode)\n', (1906, 1961), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((2099, 2185), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['input_grad', 'ctx.input_dim', 'ctx.input_parallel_mode'], {'async_op': '(True)'}), '(input_grad, ctx.input_dim, ctx.input_parallel_mode, async_op\n =True)\n', (2113, 2185), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((2403, 2491), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['weight_grad', 'ctx.weight_dim', 'ctx.weight_parallel_mode'], {'async_op': '(True)'}), '(weight_grad, ctx.weight_dim, ctx.weight_parallel_mode,\n async_op=True)\n', (2417, 2491), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((5035, 5075), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['output_parallel_mode'], {}), '(output_parallel_mode)\n', (5053, 5075), True, 'from colossalai.core import global_context as gpc\n'), ((5745, 5760), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5758, 5760), False, 'import torch\n'), ((5978, 6036), 'colossalai.communication.reduce', 'reduce', (['weight_grad', 'ctx.src_rank', 'ctx.input_parallel_mode'], {}), '(weight_grad, ctx.src_rank, ctx.input_parallel_mode)\n', (5984, 6036), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((6746, 6779), 'torch.matmul', 'torch.matmul', (['output_grad', 'weight'], {}), '(output_grad, weight)\n', (6758, 6779), False, 'import torch\n'), ((9230, 9245), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9243, 9245), False, 'import torch\n'), ((9492, 9535), 'colossalai.communication.all_reduce', 'all_reduce', (['grads', 'ctx.weight_parallel_mode'], {}), '(grads, ctx.weight_parallel_mode)\n', (9502, 9535), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((9556, 9598), 'colossalai.communication.all_reduce', 'all_reduce', (['grads', 'ctx.input_parallel_mode'], {}), '(grads, ctx.input_parallel_mode)\n', (9566, 9598), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((20443, 20483), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['output_parallel_mode'], {}), '(output_parallel_mode)\n', (20461, 20483), True, 'from colossalai.core import global_context as gpc\n'), ((20963, 21006), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.input_parallel_mode'], {}), '(ctx.input_parallel_mode)\n', (20981, 21006), True, 'from colossalai.core import global_context as gpc\n'), ((21010, 21054), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.output_parallel_mode'], {}), '(ctx.output_parallel_mode)\n', (21028, 21054), True, 'from colossalai.core import global_context as gpc\n'), ((21081, 21129), 'colossalai.communication.all_reduce', 'all_reduce', (['input_grad', 'ctx.weight_parallel_mode'], {}), '(input_grad, ctx.weight_parallel_mode)\n', (21091, 21129), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((2681, 2743), 'colossalai.communication.all_reduce', 'all_reduce', (['bias_grad', 'ctx.weight_parallel_mode'], {'async_op': '(True)'}), '(bias_grad, ctx.weight_parallel_mode, async_op=True)\n', (2691, 2743), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((6052, 6095), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.input_parallel_mode'], {}), '(ctx.input_parallel_mode)\n', (6070, 6095), True, 'from colossalai.core import global_context as gpc\n'), ((6099, 6143), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.output_parallel_mode'], {}), '(ctx.output_parallel_mode)\n', (6117, 6143), True, 'from colossalai.core import global_context as gpc\n'), ((6179, 6243), 'colossalai.communication.all_reduce', 'all_reduce', (['weight_grad', 'ctx.weight_parallel_mode'], {'async_op': '(True)'}), '(weight_grad, ctx.weight_parallel_mode, async_op=True)\n', (6189, 6243), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((6490, 6536), 'colossalai.communication.all_reduce', 'all_reduce', (['bias_grad', 'ctx.input_parallel_mode'], {}), '(bias_grad, ctx.input_parallel_mode)\n', (6500, 6536), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((6569, 6631), 'colossalai.communication.all_reduce', 'all_reduce', (['bias_grad', 'ctx.weight_parallel_mode'], {'async_op': '(True)'}), '(bias_grad, ctx.weight_parallel_mode, async_op=True)\n', (6579, 6631), False, 'from colossalai.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter\n'), ((8470, 8509), 'torch.sum', 'torch.sum', (['input_'], {'dim': '(-1)', 'keepdim': '(True)'}), '(input_, dim=-1, keepdim=True)\n', (8479, 8509), False, 'import torch\n'), ((8604, 8644), 'torch.sum', 'torch.sum', (['(mu ** 2)'], {'dim': '(-1)', 'keepdim': '(True)'}), '(mu ** 2, dim=-1, keepdim=True)\n', (8613, 8644), False, 'import torch\n'), ((9774, 9811), 'torch.sum', 'torch.sum', (['dvar'], {'dim': '(-1)', 'keepdim': '(True)'}), '(dvar, dim=-1, keepdim=True)\n', (9783, 9811), False, 'import torch\n'), ((9948, 9986), 'torch.sum', 'torch.sum', (['dmean'], {'dim': '(-1)', 'keepdim': '(True)'}), '(dmean, dim=-1, keepdim=True)\n', (9957, 9986), False, 'import torch\n'), ((12779, 12812), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['parallel_mode'], {}), '(parallel_mode)\n', (12797, 12812), True, 'from colossalai.core import global_context as gpc\n'), ((14123, 14163), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['weight_parallel_mode'], {}), '(weight_parallel_mode)\n', (14141, 14163), True, 'from colossalai.core import global_context as gpc\n'), ((14286, 14325), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (14304, 14325), True, 'from colossalai.core import global_context as gpc\n'), ((17875, 17914), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (17893, 17914), True, 'from colossalai.core import global_context as gpc\n'), ((17917, 17957), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['weight_parallel_mode'], {}), '(weight_parallel_mode)\n', (17935, 17957), True, 'from colossalai.core import global_context as gpc\n'), ((9342, 9379), 'torch.stack', 'torch.stack', (['[bias_grad, weight_grad]'], {}), '([bias_grad, weight_grad])\n', (9353, 9379), False, 'import torch\n'), ((12710, 12743), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['parallel_mode'], {}), '(parallel_mode)\n', (12728, 12743), True, 'from colossalai.core import global_context as gpc\n'), ((14047, 14087), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['weight_parallel_mode'], {}), '(weight_parallel_mode)\n', (14065, 14087), True, 'from colossalai.core import global_context as gpc\n'), ((14211, 14250), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (14229, 14250), True, 'from colossalai.core import global_context as gpc\n')] |
import colossalai
import colossalai.nn as col_nn
import torch
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device, print_rank_0
CONFIG = dict(parallel=dict(
data=1,
pipeline=1,
tensor=dict(size=8, mode='2.5d', depth=2),
))
class MLP(torch.nn.Module):
def __init__(self, dim: int = 256):
super().__init__()
intermediate_dim = dim * 4
self.dense_1 = col_nn.Linear(dim, intermediate_dim)
print_rank_0(f'Weight of the first linear layer: {self.dense_1.weight.shape}')
self.activation = torch.nn.GELU()
self.dense_2 = col_nn.Linear(intermediate_dim, dim)
print_rank_0(f'Weight of the second linear layer: {self.dense_2.weight.shape}')
self.dropout = col_nn.Dropout(0.1)
def forward(self, x):
x = self.dense_1(x)
print_rank_0(f'Output of the first linear layer: {x.shape}')
x = self.activation(x)
x = self.dense_2(x)
print_rank_0(f'Output of the second linear layer: {x.shape}')
x = self.dropout(x)
return x
def main():
colossalai.logging.disable_existing_loggers()
parser = colossalai.get_default_parser()
parser.add_argument('--from_torch', default=False, action='store_true')
args = parser.parse_args()
if args.from_torch:
colossalai.launch_from_torch(config=CONFIG)
else:
# standard launch
colossalai.launch(config=CONFIG,
rank=args.rank,
world_size=args.world_size,
local_rank=args.local_rank,
host=args.host,
port=args.port)
m = MLP()
x = torch.randn((16, 256), device=get_current_device())
# partition input
torch.distributed.broadcast(x, src=0)
x = torch.chunk(x, 2, dim=0)[gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)]
x = torch.chunk(x, 2, dim=0)[gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)]
x = torch.chunk(x, 2, dim=-1)[gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)]
print_rank_0(f'Input: {x.shape}')
x = m(x)
if __name__ == '__main__':
main()
| [
"colossalai.nn.Linear",
"colossalai.utils.print_rank_0",
"colossalai.launch",
"colossalai.core.global_context.get_local_rank",
"colossalai.utils.get_current_device",
"colossalai.logging.disable_existing_loggers",
"colossalai.launch_from_torch",
"colossalai.nn.Dropout",
"colossalai.get_default_parser... | [((1192, 1237), 'colossalai.logging.disable_existing_loggers', 'colossalai.logging.disable_existing_loggers', ([], {}), '()\n', (1235, 1237), False, 'import colossalai\n'), ((1252, 1283), 'colossalai.get_default_parser', 'colossalai.get_default_parser', ([], {}), '()\n', (1281, 1283), False, 'import colossalai\n'), ((1898, 1935), 'torch.distributed.broadcast', 'torch.distributed.broadcast', (['x'], {'src': '(0)'}), '(x, src=0)\n', (1925, 1935), False, 'import torch\n'), ((2200, 2233), 'colossalai.utils.print_rank_0', 'print_rank_0', (['f"""Input: {x.shape}"""'], {}), "(f'Input: {x.shape}')\n", (2212, 2233), False, 'from colossalai.utils import get_current_device, print_rank_0\n'), ((501, 537), 'colossalai.nn.Linear', 'col_nn.Linear', (['dim', 'intermediate_dim'], {}), '(dim, intermediate_dim)\n', (514, 537), True, 'import colossalai.nn as col_nn\n'), ((547, 625), 'colossalai.utils.print_rank_0', 'print_rank_0', (['f"""Weight of the first linear layer: {self.dense_1.weight.shape}"""'], {}), "(f'Weight of the first linear layer: {self.dense_1.weight.shape}')\n", (559, 625), False, 'from colossalai.utils import get_current_device, print_rank_0\n'), ((653, 668), 'torch.nn.GELU', 'torch.nn.GELU', ([], {}), '()\n', (666, 668), False, 'import torch\n'), ((693, 729), 'colossalai.nn.Linear', 'col_nn.Linear', (['intermediate_dim', 'dim'], {}), '(intermediate_dim, dim)\n', (706, 729), True, 'import colossalai.nn as col_nn\n'), ((739, 818), 'colossalai.utils.print_rank_0', 'print_rank_0', (['f"""Weight of the second linear layer: {self.dense_2.weight.shape}"""'], {}), "(f'Weight of the second linear layer: {self.dense_2.weight.shape}')\n", (751, 818), False, 'from colossalai.utils import get_current_device, print_rank_0\n'), ((843, 862), 'colossalai.nn.Dropout', 'col_nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (857, 862), True, 'import colossalai.nn as col_nn\n'), ((930, 990), 'colossalai.utils.print_rank_0', 'print_rank_0', (['f"""Output of the first linear layer: {x.shape}"""'], {}), "(f'Output of the first linear layer: {x.shape}')\n", (942, 990), False, 'from colossalai.utils import get_current_device, print_rank_0\n'), ((1061, 1122), 'colossalai.utils.print_rank_0', 'print_rank_0', (['f"""Output of the second linear layer: {x.shape}"""'], {}), "(f'Output of the second linear layer: {x.shape}')\n", (1073, 1122), False, 'from colossalai.utils import get_current_device, print_rank_0\n'), ((1427, 1470), 'colossalai.launch_from_torch', 'colossalai.launch_from_torch', ([], {'config': 'CONFIG'}), '(config=CONFIG)\n', (1455, 1470), False, 'import colossalai\n'), ((1518, 1658), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'args.rank', 'world_size': 'args.world_size', 'local_rank': 'args.local_rank', 'host': 'args.host', 'port': 'args.port'}), '(config=CONFIG, rank=args.rank, world_size=args.world_size,\n local_rank=args.local_rank, host=args.host, port=args.port)\n', (1535, 1658), False, 'import colossalai\n'), ((1945, 1969), 'torch.chunk', 'torch.chunk', (['x', '(2)'], {'dim': '(0)'}), '(x, 2, dim=0)\n', (1956, 1969), False, 'import torch\n'), ((1970, 2020), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_DEP'], {}), '(ParallelMode.PARALLEL_2P5D_DEP)\n', (1988, 2020), True, 'from colossalai.core import global_context as gpc\n'), ((2031, 2055), 'torch.chunk', 'torch.chunk', (['x', '(2)'], {'dim': '(0)'}), '(x, 2, dim=0)\n', (2042, 2055), False, 'import torch\n'), ((2056, 2106), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_COL'], {}), '(ParallelMode.PARALLEL_2P5D_COL)\n', (2074, 2106), True, 'from colossalai.core import global_context as gpc\n'), ((2117, 2142), 'torch.chunk', 'torch.chunk', (['x', '(2)'], {'dim': '(-1)'}), '(x, 2, dim=-1)\n', (2128, 2142), False, 'import torch\n'), ((2143, 2193), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_ROW'], {}), '(ParallelMode.PARALLEL_2P5D_ROW)\n', (2161, 2193), True, 'from colossalai.core import global_context as gpc\n'), ((1848, 1868), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1866, 1868), False, 'from colossalai.utils import get_current_device, print_rank_0\n')] |
import math
import torch
from torch import Tensor
from torch.nn import Parameter, init as init
from colossalai.context import seed, ParallelMode
from colossalai.core import global_context as gpc
from colossalai.registry import LAYERS
from colossalai.utils import get_current_device
from ._operation import Matmul_AB_2p5D, Add_Bias_2p5D, _LayerNorm_2p5D
from ._utils import get_tesseract_dim_dep_from_env, assert_tesseract_initialization
from .._common_utils import divide, set_tensor_parallel_attribute_by_partition
from ..base_layer import ParallelLayer
@LAYERS.register_module
class Linear2p5D(ParallelLayer):
"""Linear layer for 2.5D parallelism
:param in_features: size of each input sample
:type in_features: int
:param out_features: size of each output sample
:type out_features: int
:param bias: If set to ``False``, the layer will not learn an additive bias, defaults to True
:type bias: bool, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
"""
def __init__(self,
in_features: int,
out_features: int,
bias: bool = True,
dtype=None,
skip_bias_add: bool = False,
init_weight='torch',
init_bias='torch'
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.skip_bias_add = skip_bias_add
# parallel setting
assert_tesseract_initialization()
self.row_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
self.col_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
self.dep_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
self.tesseract_dim, _ = get_tesseract_dim_dep_from_env()
# partitioning dimension
self.input_size_per_partition = divide(in_features, self.tesseract_dim)
self.hidden_size_per_partition = divide(
out_features, self.tesseract_dim)
# create weight, shape: [k/q, h/q]
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.weight = Parameter(torch.empty(
self.input_size_per_partition,
self.hidden_size_per_partition,
**factory_kwargs))
# create bias, shape: [h/q]
if bias:
self.bias = Parameter(torch.empty(
self.hidden_size_per_partition,
**factory_kwargs))
else:
self.register_parameter('bias', None)
# initialize parameters
with seed(ParallelMode.TENSOR):
self.reset_parameters(init_weight, init_bias)
self._set_tensor_parallel_attributes()
def _set_tensor_parallel_attributes(self):
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
set_tensor_parallel_attribute_by_partition(self.weight, num_partition)
if self.bias is not None:
set_tensor_parallel_attribute_by_partition(self.bias, num_partition)
def reset_parameters(self, init_weight, init_bias) -> None:
assert init_weight in ('torch', 'jax', 'zero')
assert init_bias in ('torch', 'jax', 'zero')
# setting
fan_in, fan_out = self.in_features, self.out_features
# init weight
if init_weight == 'torch':
a = math.sqrt(5)
nonlinearity = 'leaky_relu'
std = init.calculate_gain(nonlinearity, a) / math.sqrt(fan_in)
bound = math.sqrt(3.0) * std
init.uniform_(self.weight, -bound, bound)
elif init_weight == 'jax':
std = math.sqrt(2.0 / float(fan_in + fan_out))
a = math.sqrt(3.0) * std
init.uniform_(self.weight, -a, a)
elif init_weight == 'zero':
init.zeros_(self.weight)
# init bias
if self.bias is not None:
if init_bias == 'torch':
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
elif init_bias == 'jax':
init.normal_(self.bias, std=1e-6)
elif init_bias == 'zero':
init.zeros_(self.bias)
def forward(self, x: Tensor) -> Tensor:
# input: [m/dq, n/q, k/q]
# output: [m/dq, n/q, h/q]
out_shape = x.shape[:-1] + (self.hidden_size_per_partition,)
output = Matmul_AB_2p5D.apply(
x,
self.weight,
self.tesseract_dim,
out_shape,
self.row_rank, self.col_rank, self.dep_rank,
ParallelMode.PARALLEL_2P5D_ROW,
ParallelMode.PARALLEL_2P5D_COL,
self.data_parallel_rank,
self.pipeline_parallel_rank,
self.pipeline_parallel_size,
self.tensor_parallel_size,
)
if self.bias is not None:
if self.skip_bias_add:
bias = Add_Bias_2p5D.apply(
None,
self.bias,
self.hidden_size_per_partition,
self.tesseract_dim,
self.row_rank, self.col_rank, self.dep_rank,
ParallelMode.PARALLEL_2P5D_COL,
True,
self.data_parallel_rank,
self.pipeline_parallel_rank,
self.pipeline_parallel_size,
self.tensor_parallel_size
)
return output, bias
else:
output = Add_Bias_2p5D.apply(
output,
self.bias,
self.hidden_size_per_partition,
self.tesseract_dim,
self.row_rank, self.col_rank, self.dep_rank,
ParallelMode.PARALLEL_2P5D_COL,
False,
self.data_parallel_rank,
self.pipeline_parallel_rank,
self.pipeline_parallel_size,
self.tensor_parallel_size
)
return output
else:
return output
@LAYERS.register_module
class LayerNorm2p5D(ParallelLayer):
r"""Layer Normalization for 2.5D parallelism
:param normalized_shape: input shape from an expected input
of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]`
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
:type normalized_shape: int
:param eps: a value added to the denominator for numerical stability, defaults to 1e-05
:type eps: float, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
"""
def __init__(self,
normalized_shape: int,
eps: float = 1e-05,
dtype=None
):
super().__init__()
# layer norm config
self.normalized_shape = normalized_shape
self.variance_epsilon = eps
# parallel setting
assert_tesseract_initialization()
self.row_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
self.col_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
self.dep_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
self.tesseract_dim, _ = get_tesseract_dim_dep_from_env()
# partitioning dimension
self.partitioned_partition = divide(
normalized_shape, self.tesseract_dim) # *
# create parameters
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.gamma = Parameter(torch.ones(
self.partitioned_partition,
**factory_kwargs))
self.beta = Parameter(torch.zeros(
self.partitioned_partition,
**factory_kwargs))
self._set_tensor_parallel_attribute()
def _set_tensor_parallel_attribute(self):
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
set_tensor_parallel_attribute_by_partition(self.gamma, num_partition)
set_tensor_parallel_attribute_by_partition(self.beta, num_partition)
def forward(self, x: Tensor) -> Tensor:
with torch.no_grad():
E_x = torch.sum(x, dim=-1, keepdim=True) # [b/q, s, 1]
torch.distributed.all_reduce(
E_x, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW))
E_x /= self.normalized_shape
# Var_x in the block below is the sum of input^2
Var_x = torch.sum(x * x, dim=-1, keepdim=True) # [b/q, s, 1]
torch.distributed.all_reduce(
Var_x, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW))
Var_x /= self.normalized_shape
Var_x = Var_x - E_x * E_x # variance of x [b/q, s, 1]
# this time 1/sqrt(Var_x + epsilon)
Var_x = 1.0 / torch.sqrt(Var_x + self.variance_epsilon)
output = _LayerNorm_2p5D.apply(x, E_x, Var_x, self.normalized_shape,
ParallelMode.PARALLEL_2P5D_ROW)
bias = Add_Bias_2p5D.apply(
None, self.beta, self.partitioned_partition,
self.tesseract_dim,
self.row_rank, self.col_rank, self.dep_rank,
ParallelMode.PARALLEL_2P5D_COL,
True,
self.data_parallel_rank,
self.pipeline_parallel_rank,
self.pipeline_parallel_size,
self.tensor_parallel_size
)
scale = Add_Bias_2p5D.apply(
None, self.gamma, self.partitioned_partition,
self.tesseract_dim,
self.row_rank, self.col_rank, self.dep_rank,
ParallelMode.PARALLEL_2P5D_COL,
True,
self.data_parallel_rank,
self.pipeline_parallel_rank,
self.pipeline_parallel_size,
self.tensor_parallel_size
)
output = torch.addcmul(bias, scale, output)
return output
| [
"colossalai.core.global_context.get_local_rank",
"colossalai.utils.get_current_device",
"colossalai.core.global_context.get_world_size",
"colossalai.context.seed",
"colossalai.core.global_context.get_group"
] | [((1595, 1645), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_COL'], {}), '(ParallelMode.PARALLEL_2P5D_COL)\n', (1613, 1645), True, 'from colossalai.core import global_context as gpc\n'), ((1670, 1720), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_ROW'], {}), '(ParallelMode.PARALLEL_2P5D_ROW)\n', (1688, 1720), True, 'from colossalai.core import global_context as gpc\n'), ((1745, 1795), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_DEP'], {}), '(ParallelMode.PARALLEL_2P5D_DEP)\n', (1763, 1795), True, 'from colossalai.core import global_context as gpc\n'), ((2849, 2888), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (2867, 2888), True, 'from colossalai.core import global_context as gpc\n'), ((7328, 7378), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_COL'], {}), '(ParallelMode.PARALLEL_2P5D_COL)\n', (7346, 7378), True, 'from colossalai.core import global_context as gpc\n'), ((7403, 7453), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_ROW'], {}), '(ParallelMode.PARALLEL_2P5D_ROW)\n', (7421, 7453), True, 'from colossalai.core import global_context as gpc\n'), ((7478, 7528), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_DEP'], {}), '(ParallelMode.PARALLEL_2P5D_DEP)\n', (7496, 7528), True, 'from colossalai.core import global_context as gpc\n'), ((8178, 8217), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (8196, 8217), True, 'from colossalai.core import global_context as gpc\n'), ((10144, 10178), 'torch.addcmul', 'torch.addcmul', (['bias', 'scale', 'output'], {}), '(bias, scale, output)\n', (10157, 10178), False, 'import torch\n'), ((2150, 2170), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (2168, 2170), False, 'from colossalai.utils import get_current_device\n'), ((2220, 2316), 'torch.empty', 'torch.empty', (['self.input_size_per_partition', 'self.hidden_size_per_partition'], {}), '(self.input_size_per_partition, self.hidden_size_per_partition,\n **factory_kwargs)\n', (2231, 2316), False, 'import torch\n'), ((2645, 2670), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (2649, 2670), False, 'from colossalai.context import seed, ParallelMode\n'), ((3410, 3422), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (3419, 3422), False, 'import math\n'), ((3591, 3632), 'torch.nn.init.uniform_', 'init.uniform_', (['self.weight', '(-bound)', 'bound'], {}), '(self.weight, -bound, bound)\n', (3604, 3632), True, 'from torch.nn import Parameter, init as init\n'), ((7793, 7813), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (7811, 7813), False, 'from colossalai.utils import get_current_device\n'), ((7863, 7919), 'torch.ones', 'torch.ones', (['self.partitioned_partition'], {}), '(self.partitioned_partition, **factory_kwargs)\n', (7873, 7919), False, 'import torch\n'), ((7976, 8033), 'torch.zeros', 'torch.zeros', (['self.partitioned_partition'], {}), '(self.partitioned_partition, **factory_kwargs)\n', (7987, 8033), False, 'import torch\n'), ((8431, 8446), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8444, 8446), False, 'import torch\n'), ((8466, 8500), 'torch.sum', 'torch.sum', (['x'], {'dim': '(-1)', 'keepdim': '(True)'}), '(x, dim=-1, keepdim=True)\n', (8475, 8500), False, 'import torch\n'), ((8755, 8793), 'torch.sum', 'torch.sum', (['(x * x)'], {'dim': '(-1)', 'keepdim': '(True)'}), '(x * x, dim=-1, keepdim=True)\n', (8764, 8793), False, 'import torch\n'), ((2439, 2500), 'torch.empty', 'torch.empty', (['self.hidden_size_per_partition'], {}), '(self.hidden_size_per_partition, **factory_kwargs)\n', (2450, 2500), False, 'import torch\n'), ((3481, 3517), 'torch.nn.init.calculate_gain', 'init.calculate_gain', (['nonlinearity', 'a'], {}), '(nonlinearity, a)\n', (3500, 3517), True, 'from torch.nn import Parameter, init as init\n'), ((3520, 3537), 'math.sqrt', 'math.sqrt', (['fan_in'], {}), '(fan_in)\n', (3529, 3537), False, 'import math\n'), ((3558, 3572), 'math.sqrt', 'math.sqrt', (['(3.0)'], {}), '(3.0)\n', (3567, 3572), False, 'import math\n'), ((3776, 3809), 'torch.nn.init.uniform_', 'init.uniform_', (['self.weight', '(-a)', 'a'], {}), '(self.weight, -a, a)\n', (3789, 3809), True, 'from torch.nn import Parameter, init as init\n'), ((4058, 4097), 'torch.nn.init.uniform_', 'init.uniform_', (['self.bias', '(-bound)', 'bound'], {}), '(self.bias, -bound, bound)\n', (4071, 4097), True, 'from torch.nn import Parameter, init as init\n'), ((9112, 9153), 'torch.sqrt', 'torch.sqrt', (['(Var_x + self.variance_epsilon)'], {}), '(Var_x + self.variance_epsilon)\n', (9122, 9153), False, 'import torch\n'), ((3743, 3757), 'math.sqrt', 'math.sqrt', (['(3.0)'], {}), '(3.0)\n', (3752, 3757), False, 'import math\n'), ((3858, 3882), 'torch.nn.init.zeros_', 'init.zeros_', (['self.weight'], {}), '(self.weight)\n', (3869, 3882), True, 'from torch.nn import Parameter, init as init\n'), ((4151, 4185), 'torch.nn.init.normal_', 'init.normal_', (['self.bias'], {'std': '(1e-06)'}), '(self.bias, std=1e-06)\n', (4163, 4185), True, 'from torch.nn import Parameter, init as init\n'), ((8585, 8630), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_2P5D_ROW'], {}), '(ParallelMode.PARALLEL_2P5D_ROW)\n', (8598, 8630), True, 'from colossalai.core import global_context as gpc\n'), ((8880, 8925), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_2P5D_ROW'], {}), '(ParallelMode.PARALLEL_2P5D_ROW)\n', (8893, 8925), True, 'from colossalai.core import global_context as gpc\n'), ((4003, 4020), 'math.sqrt', 'math.sqrt', (['fan_in'], {}), '(fan_in)\n', (4012, 4020), False, 'import math\n'), ((4239, 4261), 'torch.nn.init.zeros_', 'init.zeros_', (['self.bias'], {}), '(self.bias)\n', (4250, 4261), True, 'from torch.nn import Parameter, init as init\n')] |
import copy
from functools import partial
import colossalai
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.utils import free_port
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
from colossalai.zero.sharded_model import ShardedModelV2
from colossalai.zero.sharded_optim import ShardedOptimizerV2
from tests.components_to_test.registry import non_distributed_component_funcs
from torch.nn.parallel import DistributedDataParallel as DDP
from common import CONFIG, check_sharded_params_padding
def run_step(model, optimizer, data, label, criterion, enable_autocast=False):
model.train()
optimizer.zero_grad()
with torch.cuda.amp.autocast(enabled=enable_autocast):
if criterion:
y = model(data)
loss = criterion(y, label)
else:
loss = model(data, label)
loss = loss.float()
if isinstance(model, ShardedModelV2):
optimizer.backward(loss)
else:
loss.backward()
optimizer.step()
def run_dist(rank, world_size, port, cpu_offload, shard_strategy):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
test_models = ['repeated_computed_layers', 'resnet18', 'bert']
shard_strategy = shard_strategy()
for model_name in test_models:
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
model = model(checkpoint=True).cuda()
zero_model = ShardedModelV2(copy.deepcopy(model),
shard_strategy,
offload_config=dict(device='cpu') if cpu_offload else None)
if dist.get_world_size() > 1:
model = DDP(model)
lr = 1e-3
optim = optimizer_class(model.parameters(), lr=lr)
sharded_optim = ShardedOptimizerV2(zero_model,
optimizer_class,
cpu_offload=cpu_offload,
initial_scale=2**5,
lr=lr)
for i, (data, label) in enumerate(train_dataloader):
if i > 2:
break
data, label = data.cuda(), label.cuda()
run_step(model, optim, data, label, criterion, False)
run_step(zero_model, sharded_optim, data, label, criterion, False)
check_sharded_params_padding(model, zero_model, loose=True)
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
@pytest.mark.parametrize("cpu_offload", [True, False])
@pytest.mark.parametrize("shard_strategy", [TensorShardStrategy, BucketTensorShardStrategy])
def test_sharded_optim_v2(world_size, cpu_offload, shard_strategy):
run_func = partial(run_dist,
world_size=world_size,
port=free_port(),
cpu_offload=cpu_offload,
shard_strategy=shard_strategy)
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_sharded_optim_v2(world_size=2, cpu_offload=True, shard_strategy=TensorShardStrategy) | [
"colossalai.launch",
"colossalai.utils.free_port",
"colossalai.zero.sharded_optim.ShardedOptimizerV2"
] | [((2680, 2725), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 2]'], {}), "('world_size', [1, 2])\n", (2703, 2725), False, 'import pytest\n'), ((2727, 2780), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cpu_offload"""', '[True, False]'], {}), "('cpu_offload', [True, False])\n", (2750, 2780), False, 'import pytest\n'), ((2782, 2877), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shard_strategy"""', '[TensorShardStrategy, BucketTensorShardStrategy]'], {}), "('shard_strategy', [TensorShardStrategy,\n BucketTensorShardStrategy])\n", (2805, 2877), False, 'import pytest\n'), ((1151, 1267), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (1168, 1267), False, 'import colossalai\n'), ((3168, 3205), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (3176, 3205), True, 'import torch.multiprocessing as mp\n'), ((732, 780), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': 'enable_autocast'}), '(enabled=enable_autocast)\n', (755, 780), False, 'import torch\n'), ((1433, 1489), 'tests.components_to_test.registry.non_distributed_component_funcs.get_callable', 'non_distributed_component_funcs.get_callable', (['model_name'], {}), '(model_name)\n', (1477, 1489), False, 'from tests.components_to_test.registry import non_distributed_component_funcs\n'), ((2013, 2118), 'colossalai.zero.sharded_optim.ShardedOptimizerV2', 'ShardedOptimizerV2', (['zero_model', 'optimizer_class'], {'cpu_offload': 'cpu_offload', 'initial_scale': '(2 ** 5)', 'lr': 'lr'}), '(zero_model, optimizer_class, cpu_offload=cpu_offload,\n initial_scale=2 ** 5, lr=lr)\n', (2031, 2118), False, 'from colossalai.zero.sharded_optim import ShardedOptimizerV2\n'), ((1673, 1693), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (1686, 1693), False, 'import copy\n'), ((1854, 1875), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (1873, 1875), True, 'import torch.distributed as dist\n'), ((1901, 1911), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['model'], {}), '(model)\n', (1904, 1911), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((2599, 2658), 'common.check_sharded_params_padding', 'check_sharded_params_padding', (['model', 'zero_model'], {'loose': '(True)'}), '(model, zero_model, loose=True)\n', (2627, 2658), False, 'from common import CONFIG, check_sharded_params_padding\n'), ((3049, 3060), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (3058, 3060), False, 'from colossalai.utils import free_port\n')] |
import torch.nn.functional as F
from typing import Optional
from colossalai.tensor.op_wrapper import colo_op_impl
from colossalai.nn.layer.parallel_1d._utils import reduce_input, reduce_grad
from colossalai.tensor import ComputePattern, TensorSpec, ComputePattern, ParallelAction, ColoTensor, distspec
from colossalai.tensor.graph import GraphOpNode, GraphGlobalEnv
from colossalai.context import ParallelMode
from ._utils import GeneralTensor, convert_to_colo_tensor
def colo_linear_1Drow(input_tensor: ColoTensor, weight: ColoTensor, bias: Optional[ColoTensor]) -> ColoTensor:
# Input:S[1] x Weight:S[0] = Output:P
# All-Reduce(Output) + bias = res
# Input:S[1]
input_tensor = input_tensor.convert_to_dist_spec(
distspec.shard(weight.spec.get_process_group(), [-1], [weight.spec.get_process_group_size()]))
# Output:P
partial_output = F.linear(input_tensor, weight)
# Reduce(Output)
output = reduce_input(partial_output, ParallelMode.PARALLEL_1D)
# Bias
if bias is not None:
assert not bias.has_spec(), 'Invalid bias spec for 1Drow Linear op'
output = output + bias
output = ColoTensor.from_torch_tensor(output, spec=TensorSpec(distspec.replicate(weight.spec.get_process_group())))
return output
def colo_linear_1Dcol(input_tensor: ColoTensor, weight: ColoTensor, bias: Optional[ColoTensor]) -> ColoTensor:
# Input:B x Weight:S[1] + Bias:S[1] = Output:S[1]
# All-Gather(Output)
# Input:B
parallel_action = weight.spec.parallel_action
input_tensor = input_tensor.convert_to_dist_spec(distspec.replicate(weight.spec.get_process_group()))
input_parallel = reduce_grad(input_tensor, ParallelMode.PARALLEL_1D)
output_parallel = F.linear(input_parallel, weight, bias)
output = ColoTensor.from_torch_tensor(output_parallel,
spec=TensorSpec(
distspec.shard(weight.spec.get_process_group(), [-1],
[weight.spec.get_process_group_size()]),
ParallelAction(ComputePattern.TP1D)))
if parallel_action.gather_out:
# All-Gather(Output)
output = output.convert_to_dist_spec(distspec.replicate(weight.spec.get_process_group()))
return output
def colo_linear_1d(mode: str, input_tensor: ColoTensor, weight: ColoTensor, bias: Optional[ColoTensor]) -> ColoTensor:
assert mode in ('row', 'col')
funcs = {'row': colo_linear_1Drow, 'col': colo_linear_1Dcol}
return funcs[mode](input_tensor, weight, bias)
@colo_op_impl(F.linear)
def colo_linear(input_tensor: GeneralTensor, weight: GeneralTensor, bias: Optional[GeneralTensor] = None):
"""Handles ``__torch_function__`` dispatch for ``torch.nn.functional.linear``.
This method computes a linear.
"""
input_tensor, weight, bias = tuple(map(convert_to_colo_tensor, (input_tensor, weight, bias)))
# building the computing graph, inputs -> op
if GraphGlobalEnv().graph_building:
cur_op_node = GraphOpNode('linear', [weight, bias])
cur_op_node.add_prev_tensor(input_tensor)
# Add communication logic before and after linear call.
ret_tensor = None
if not weight.has_spec(): # No Model Parallel Applied
assert weight.spec.is_gathered(), 'Invalid weight spec for native Linear op'
assert bias is None or bias.spec.is_gathered(), 'Invalid bias spec for native Linear op'
ret_tensor = ColoTensor.from_torch_tensor(F.linear(input_tensor, weight, bias))
elif weight.spec.has_compute_pattern(ComputePattern.TP1D): # Single Model Parallel Applied
if weight.spec.is_1D_col() and (bias is None or bias.spec.is_gathered()):
mode = 'row'
elif weight.spec.is_1D_row() and (bias is None or bias.spec.is_1D_row() or bias.spec.is_1D_col()):
mode = 'col'
else:
raise NotImplementedError
ret_tensor = colo_linear_1d(mode, input_tensor, weight, bias)
else:
raise NotImplementedError
# building the computing graph, op -> output
if GraphGlobalEnv().graph_building:
cur_op_node.add_post_tensor(ret_tensor)
return ret_tensor
| [
"colossalai.tensor.ParallelAction",
"colossalai.tensor.graph.GraphGlobalEnv",
"colossalai.nn.layer.parallel_1d._utils.reduce_input",
"colossalai.tensor.op_wrapper.colo_op_impl",
"colossalai.tensor.graph.GraphOpNode",
"colossalai.nn.layer.parallel_1d._utils.reduce_grad"
] | [((2629, 2651), 'colossalai.tensor.op_wrapper.colo_op_impl', 'colo_op_impl', (['F.linear'], {}), '(F.linear)\n', (2641, 2651), False, 'from colossalai.tensor.op_wrapper import colo_op_impl\n'), ((872, 902), 'torch.nn.functional.linear', 'F.linear', (['input_tensor', 'weight'], {}), '(input_tensor, weight)\n', (880, 902), True, 'import torch.nn.functional as F\n'), ((937, 991), 'colossalai.nn.layer.parallel_1d._utils.reduce_input', 'reduce_input', (['partial_output', 'ParallelMode.PARALLEL_1D'], {}), '(partial_output, ParallelMode.PARALLEL_1D)\n', (949, 991), False, 'from colossalai.nn.layer.parallel_1d._utils import reduce_input, reduce_grad\n'), ((1657, 1708), 'colossalai.nn.layer.parallel_1d._utils.reduce_grad', 'reduce_grad', (['input_tensor', 'ParallelMode.PARALLEL_1D'], {}), '(input_tensor, ParallelMode.PARALLEL_1D)\n', (1668, 1708), False, 'from colossalai.nn.layer.parallel_1d._utils import reduce_input, reduce_grad\n'), ((1732, 1770), 'torch.nn.functional.linear', 'F.linear', (['input_parallel', 'weight', 'bias'], {}), '(input_parallel, weight, bias)\n', (1740, 1770), True, 'import torch.nn.functional as F\n'), ((3040, 3056), 'colossalai.tensor.graph.GraphGlobalEnv', 'GraphGlobalEnv', ([], {}), '()\n', (3054, 3056), False, 'from colossalai.tensor.graph import GraphOpNode, GraphGlobalEnv\n'), ((3095, 3132), 'colossalai.tensor.graph.GraphOpNode', 'GraphOpNode', (['"""linear"""', '[weight, bias]'], {}), "('linear', [weight, bias])\n", (3106, 3132), False, 'from colossalai.tensor.graph import GraphOpNode, GraphGlobalEnv\n'), ((4156, 4172), 'colossalai.tensor.graph.GraphGlobalEnv', 'GraphGlobalEnv', ([], {}), '()\n', (4170, 4172), False, 'from colossalai.tensor.graph import GraphOpNode, GraphGlobalEnv\n'), ((3558, 3594), 'torch.nn.functional.linear', 'F.linear', (['input_tensor', 'weight', 'bias'], {}), '(input_tensor, weight, bias)\n', (3566, 3594), True, 'import torch.nn.functional as F\n'), ((2137, 2172), 'colossalai.tensor.ParallelAction', 'ParallelAction', (['ComputePattern.TP1D'], {}), '(ComputePattern.TP1D)\n', (2151, 2172), False, 'from colossalai.tensor import ComputePattern, TensorSpec, ComputePattern, ParallelAction, ColoTensor, distspec\n')] |
import torch
import torch.nn as nn
from torch.optim import Optimizer
from colossalai.amp.naive_amp import NaiveAMPModel
from colossalai.utils import is_no_pp_or_last_stage
from colossalai.core import global_context as gpc
from colossalai.context.parallel_mode import ParallelMode
from .zero_redundancy_optimizer_level_2 import ZeroRedundancyOptimizer_Level_2
from .zero_redundancy_optimizer_level_3 import ZeroRedundancyOptimizer_Level_3
def convert_to_zero(model: nn.Module,
optimizer: Optimizer,
level: int,
zero_config: dict):
"""
A helper function to integrate the model and optimizer with ZeRO optimizer and off-loading
:param model: Your model object
:type model: :class:`torch.nn.Module`
:param optimizer: Your optimizer object
:type optimizer: :class:`torch.optim.Optimizer`
:param level: Optimizer level, can be 2 or 3
:type level: int
:param zero_config: Configuration for zero
:type zero_config: dict
:return: (model, optimizer)
:rtype: Tuple
"""
import deepspeed
assert level == 2 or level == 3, 'Only ZERO Optimizer Level 2 and 3 are provided'
model = NaiveAMPModel(model, output_to_fp32=False)
if level == 2:
optimizer = ZeroRedundancyOptimizer_Level_2(init_optimizer=optimizer, **zero_config)
else:
optimizer = ZeroRedundancyOptimizer_Level_3(init_optimizer=optimizer, module=model, **zero_config)
return model, optimizer
def zero3_model_context(dtype=torch.half):
"""A context to enable massive model construction for training with
ZeRO-3. Models are automatically partitioned (or, sharded) across the
system and converted to half precision. Note that the config of ZeRO-3 will be loaded automatically from `gpc.config`.
Args:
dtype (``dtype``, optional): Can be used to change the data type of the parameters.
Supported options are ``torch.half`` and ``torch.float``. Defaults to ``torch.half``
This context accelerates model initialization and enables models that
are too large to allocate in their entirety in CPU memory. It has the
following effects:
#. allocates tensors to either GPU or CPU memory or NVMe
#. converts floating point tensors to half precision
#. immediately partitions tensors among the group of data-parallel devices
#. (*optional*) replaces ``torch.nn.functional.linear`` with a more
memory-efficient implementation
These modifications allow for models that exceed the size of local CPU/GPU
memory/NVMe, but fit within the total NVMe capacity (*i.e.*, aggregate CPU
or GPU memory or NVMe) across all nodes. Consider initializing a model with one
trillion parameters, whose weights occupy two terabytes (TB) in half
precision. The initial CPU allocation in full precision requires 4TB of
memory *per process*, and so a system with 8 GPUs per node would need 32TB of
CPU memory due to data-parallel redundancies. Instead, by immediately
partitioning tensors we remove the redundancies. The result is that
regardless of the number of GPUs, we still only require the original 4TB. This
allows for a linear increase in model size with the aggregate system memory.
For example, if a node has 1TB of memory and 8 GPUs, we could fit a trillion
parameter model with 4 nodes and 32 GPUs.
Important: If the fp16 weights of the model can't fit onto a single GPU memory
this feature must be used.
Examples
--------
#. Allocate a model and partition it among all processes:
.. code-block:: python
with zero3_model_context():
model = MyLargeModel()
"""
assert dtype == torch.half or dtype == torch.float, f'Invalid dtype, except torch.half or torch.float, got {dtype}'
import deepspeed
ds_config = {
"train_micro_batch_size_per_gpu": 1,
"gradient_accumulation_steps": 1,
"zero_optimization": {
"offload_param": getattr(gpc.config.zero, 'offload_param_config', None),
"offload_optimizer": getattr(gpc.config.zero, 'offload_optimizer_config'),
},
"aio": getattr(gpc.config.zero, 'aio_config', None)
}
remote_device = getattr(ds_config['zero_optimization']['offload_param'], 'device', None)
pin_memory = getattr(ds_config['zero_optimization']['offload_param'], 'pin_memory', False)
return deepspeed.zero.Init(data_parallel_group=gpc.get_group(ParallelMode.DATA),
remote_device=remote_device,
config_dict_or_path=ds_config,
pin_memory=pin_memory,
dtype=dtype)
__all__ = ['convert_to_zero', 'ZeroRedundancyOptimizer_Level_2',
'ZeroRedundancyOptimizer_Level_3', 'zero3_model_context']
| [
"colossalai.core.global_context.get_group",
"colossalai.amp.naive_amp.NaiveAMPModel"
] | [((1194, 1236), 'colossalai.amp.naive_amp.NaiveAMPModel', 'NaiveAMPModel', (['model'], {'output_to_fp32': '(False)'}), '(model, output_to_fp32=False)\n', (1207, 1236), False, 'from colossalai.amp.naive_amp import NaiveAMPModel\n'), ((4624, 4656), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (4637, 4656), True, 'from colossalai.core import global_context as gpc\n')] |
import functools
import torch
import types
from colossalai.utils.cuda import get_current_device
from colossalai.gemini.tensor_utils import colo_model_data_tensor_move_inline, colo_tensor_mem_usage
from colossalai.gemini.stateful_tensor import StatefulTensor, TensorState
from colossalai.gemini.tensor_placement_policy import TensorPlacementPolicy
from typing import List
class StatefulTensorMgr(object):
"""
Stateful Tensor Manager, inspired from PatrickStar
PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management
https://arxiv.org/abs/2108.05818
"""
def __init__(self, tensor_placement_policy: TensorPlacementPolicy) -> None:
self._tensor_placement_policy: TensorPlacementPolicy = tensor_placement_policy
self._stateful_tensor_list: List[StatefulTensor] = []
self._compute_list: List[StatefulTensor] = []
self._compute_idx: int = -1
self._cpu_gpu_move_volume = 0
self._warmup = True
def register_stateful_tensor_list(self, tensor_list: List[StatefulTensor]) -> None:
assert self._stateful_tensor_list == [], "Can't register stateful tensors for manager twice"
self._stateful_tensor_list = tensor_list
for t in self._stateful_tensor_list:
assert isinstance(t, StatefulTensor)
t.trans_state = types.MethodType(functools.partial(self._trans_state, t.trans_state), t)
def start_iter(self):
pass
def finish_iter(self):
"""This function must be called when each iteration finishes
"""
self._warmup = False
self._compute_idx = -1
self._cpu_gpu_move_volume = 0
def adjust_layout(self) -> None:
""" Adjust the layout of statefuil tensor according to the information provided
by mem_stats_collector, which should belongs to a Sharded Model.
"""
# find stateful tensor in state COMPUTE
cuda_demand = StatefulTensor.GST_MGR.state_mem['cpu'][TensorState.COMPUTE]
move_to_cuda_tensor_list = []
hold_cuda_tensor_list = []
for tensor in self._stateful_tensor_list:
if tensor.state == TensorState.FREE:
continue
if tensor.device.type == 'cuda':
if tensor.state in [TensorState.HOLD, TensorState.HOLD_AFTER_BWD, TensorState.HOLD_AFTER_FWD]:
hold_cuda_tensor_list.append(tensor)
elif tensor.device.type == 'cpu':
if tensor.state == TensorState.COMPUTE:
move_to_cuda_tensor_list.append(tensor)
else:
raise RuntimeError
self._cpu_gpu_move_volume += self._tensor_placement_policy.evict_tensors(hold_cuda_tensor_list,
cuda_demand=cuda_demand,
warmup=self._warmup,
compute_list=self._compute_list,
compute_idx=self._compute_idx)
# move COMPUTE tensors to CUDA
self._cpu_gpu_move_volume += cuda_demand
for t in move_to_cuda_tensor_list:
colo_model_data_tensor_move_inline(t, get_current_device())
@property
def cpu_gpu_move_volume(self):
return self._cpu_gpu_move_volume
def _trans_state(self, trans_state_func, stateful_tensor, state):
trans_state_func(state)
if state == TensorState.COMPUTE:
self._compute_idx += 1
if self._warmup:
self._compute_list.append(stateful_tensor)
| [
"colossalai.utils.cuda.get_current_device"
] | [((1372, 1423), 'functools.partial', 'functools.partial', (['self._trans_state', 't.trans_state'], {}), '(self._trans_state, t.trans_state)\n', (1389, 1423), False, 'import functools\n'), ((3362, 3382), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (3380, 3382), False, 'from colossalai.utils.cuda import get_current_device\n')] |
from tests.components_to_test.registry import non_distributed_component_funcs
import colossalai
import pytest
import torch.multiprocessing as mp
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from colossalai.utils import ColoInitContext
from functools import partial
def run_simple_net():
# A simple net with two stacked nn.Linear
get_components_func = non_distributed_component_funcs.get_callable('simple_net')
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
with ColoInitContext(device=get_current_device()):
model = model_builder(checkpoint=True)
# we set the Specs for weight of each linear.
# model.proj1.weight.set_spec('1Drow')
# model.proj2.weight.set_spec('1Drow')
for i, (data, label) in enumerate(train_dataloader):
output = model(data)
if criterion:
loss = criterion(output, label)
else:
loss = output
print(loss.torch_tensor())
loss.backward()
if i > 5:
break
# TODO(jzy) check the results with col.nn.Linear?
def run_dist(rank, world_size, port):
config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_simple_net()
@pytest.mark.skip
@pytest.mark.dist
@parameterize('world_size', [1, 4])
@rerun_if_address_is_in_use()
def test_simple_net(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_simple_net()
| [
"colossalai.launch",
"colossalai.utils.free_port",
"colossalai.utils.cuda.get_current_device",
"colossalai.testing.rerun_if_address_is_in_use",
"colossalai.testing.parameterize"
] | [((1523, 1557), 'colossalai.testing.parameterize', 'parameterize', (['"""world_size"""', '[1, 4]'], {}), "('world_size', [1, 4])\n", (1535, 1557), False, 'from colossalai.testing import parameterize, rerun_if_address_is_in_use\n'), ((1559, 1587), 'colossalai.testing.rerun_if_address_is_in_use', 'rerun_if_address_is_in_use', ([], {}), '()\n', (1585, 1587), False, 'from colossalai.testing import parameterize, rerun_if_address_is_in_use\n'), ((482, 540), 'tests.components_to_test.registry.non_distributed_component_funcs.get_callable', 'non_distributed_component_funcs.get_callable', (['"""simple_net"""'], {}), "('simple_net')\n", (526, 540), False, 'from tests.components_to_test.registry import non_distributed_component_funcs\n'), ((1351, 1467), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'config', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=config, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (1368, 1467), False, 'import colossalai\n'), ((1699, 1736), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (1707, 1736), True, 'import torch.multiprocessing as mp\n'), ((1682, 1693), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (1691, 1693), False, 'from colossalai.utils import free_port\n'), ((679, 699), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (697, 699), False, 'from colossalai.utils.cuda import get_current_device\n')] |
import torch
from .simple_model import MLP
from colossalai.utils import Timer, synchronize
from colossalai.core import global_context as gpc
from colossalai.context.parallel_mode import ParallelMode
from argparse import ArgumentParser
BATCH_SIZE = 8
SEQ_LENGTH = 120
HIDDEN_DIM = 1024
ITER_TIMES = 2000
def build_args_parser() -> ArgumentParser:
"""Helper function parsing the command line options."""
parser = ArgumentParser(description="colossal benchmark")
parser.add_argument("--num_gpus",
type=int,
default=-1,
help="Total number of devices to use.")
parser.add_argument("--bs",
type=int,
default=BATCH_SIZE,
help="Batch size of the input tensor.")
parser.add_argument("--seq_len",
type=int,
default=SEQ_LENGTH,
help="Sequence length of the input tensor.")
parser.add_argument("--hid_dim",
type=int,
default=HIDDEN_DIM,
help="Hidden dimension of the input tensor.")
parser.add_argument("--num_steps",
type=int,
default=ITER_TIMES,
help="The number of iteration times.")
return parser
def build_input_tensor(args):
return torch.rand(args.bs, args.seq_len, args.hid_dim)
def build_configs_helper(device_cnt: int):
config_dict = {}
if device_cnt < 2:
return config_dict
if device_cnt < 4:
config_dict["1d"] = dict(parallel=dict(tensor=dict(size=2, mode='1d')))
elif device_cnt < 8:
config_dict["1d"] = dict(parallel=dict(tensor=dict(size=4, mode='1d')))
config_dict["2d"] = dict(parallel=dict(tensor=dict(size=4, mode='2d')))
else:
config_dict["1d"] = dict(parallel=dict(tensor=dict(size=8, mode='1d')))
config_dict["2d"] = dict(parallel=dict(data=2, tensor=dict(size=4, mode='2d')))
config_dict["2p5d"] = dict(parallel=dict(tensor=dict(size=8, mode='2.5d', depth=2)))
config_dict["3d"] = dict(parallel=dict(tensor=dict(size=8, mode='3d')))
return config_dict
def build_configs(args):
total_device_cnt = torch.cuda.device_count()
if args.num_gpus == -1:
config_dict = build_configs_helper(total_device_cnt)
else:
valid_device_cnt = min(args.num_gpus, total_device_cnt)
config_dict = build_configs_helper(valid_device_cnt)
return config_dict
def profile_1d(input_tensor, config, args):
gpc.load_config(config)
gpc.init_parallel_groups()
assert gpc.is_initialized(ParallelMode.PARALLEL_1D)
model = MLP(args.hid_dim).cuda()
input_tensor = input_tensor.cuda()
torch.distributed.broadcast(input_tensor, src=0)
timer = Timer()
iter_times = args.num_steps
timer.start()
for i in range(iter_times):
input_tensor = model(input_tensor)
synchronize()
result_1d = timer.stop()
return result_1d
def profile_2d(input_tensor, config, args):
gpc.load_config(config)
gpc.init_parallel_groups()
assert gpc.is_initialized(ParallelMode.PARALLEL_2D_COL)
assert gpc.is_initialized(ParallelMode.PARALLEL_2D_ROW)
model = MLP(args.hid_dim).cuda()
input_tensor = input_tensor.cuda()
torch.distributed.broadcast(input_tensor, src=0)
input_tensor = torch.chunk(input_tensor, 2, dim=0)[gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)]
input_tensor = torch.chunk(input_tensor, 2, dim=-1)[gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW)]
timer = Timer()
iter_times = args.num_steps
timer.start()
for i in range(iter_times):
input_tensor = model(input_tensor)
synchronize()
result_2d = timer.stop()
return result_2d
def profile_2p5d(input_tensor, config, args):
gpc.load_config(config)
gpc.init_parallel_groups()
assert gpc.is_initialized(ParallelMode.PARALLEL_2P5D_COL)
assert gpc.is_initialized(ParallelMode.PARALLEL_2P5D_ROW)
assert gpc.is_initialized(ParallelMode.PARALLEL_2P5D_DEP)
model = MLP(args.hid_dim).cuda()
input_tensor = input_tensor.cuda()
torch.distributed.broadcast(input_tensor, src=0)
input_tensor = torch.chunk(input_tensor, 2, dim=0)[gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)]
input_tensor = torch.chunk(input_tensor, 2, dim=0)[gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)]
input_tensor = torch.chunk(input_tensor, 2, dim=-1)[gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)]
timer = Timer()
iter_times = args.num_steps
timer.start()
for i in range(iter_times):
input_tensor = model(input_tensor)
synchronize()
result_2p5d = timer.stop()
return result_2p5d
def profile_3d(input_tensor, config, args):
gpc.load_config(config)
gpc.init_parallel_groups()
assert gpc.is_initialized(ParallelMode.PARALLEL_3D_WEIGHT)
assert gpc.is_initialized(ParallelMode.PARALLEL_3D_INPUT)
assert gpc.is_initialized(ParallelMode.PARALLEL_3D_OUTPUT)
model = MLP(args.hid_dim).cuda()
input_tensor = input_tensor.cuda()
torch.distributed.broadcast(input_tensor, src=0)
input_tensor = torch.chunk(input_tensor, 2, dim=0)[gpc.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT)]
input_tensor = torch.chunk(input_tensor, 2, dim=0)[gpc.get_local_rank(ParallelMode.PARALLEL_3D_INPUT)]
input_tensor = torch.chunk(input_tensor, 2, dim=-1)[gpc.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT)]
timer = Timer()
iter_times = args.num_steps
timer.start()
for i in range(iter_times):
input_tensor = model(input_tensor)
synchronize()
result_3d = timer.stop()
return result_3d
| [
"colossalai.core.global_context.init_parallel_groups",
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global_context.load_config",
"colossalai.utils.Timer",
"colossalai.utils.synchronize",
"colossalai.core.global_context.is_initialized"
] | [((422, 470), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""colossal benchmark"""'}), "(description='colossal benchmark')\n", (436, 470), False, 'from argparse import ArgumentParser\n'), ((1427, 1474), 'torch.rand', 'torch.rand', (['args.bs', 'args.seq_len', 'args.hid_dim'], {}), '(args.bs, args.seq_len, args.hid_dim)\n', (1437, 1474), False, 'import torch\n'), ((2312, 2337), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2335, 2337), False, 'import torch\n'), ((2634, 2657), 'colossalai.core.global_context.load_config', 'gpc.load_config', (['config'], {}), '(config)\n', (2649, 2657), True, 'from colossalai.core import global_context as gpc\n'), ((2662, 2688), 'colossalai.core.global_context.init_parallel_groups', 'gpc.init_parallel_groups', ([], {}), '()\n', (2686, 2688), True, 'from colossalai.core import global_context as gpc\n'), ((2700, 2744), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (2718, 2744), True, 'from colossalai.core import global_context as gpc\n'), ((2825, 2873), 'torch.distributed.broadcast', 'torch.distributed.broadcast', (['input_tensor'], {'src': '(0)'}), '(input_tensor, src=0)\n', (2852, 2873), False, 'import torch\n'), ((2886, 2893), 'colossalai.utils.Timer', 'Timer', ([], {}), '()\n', (2891, 2893), False, 'from colossalai.utils import Timer, synchronize\n'), ((3140, 3163), 'colossalai.core.global_context.load_config', 'gpc.load_config', (['config'], {}), '(config)\n', (3155, 3163), True, 'from colossalai.core import global_context as gpc\n'), ((3168, 3194), 'colossalai.core.global_context.init_parallel_groups', 'gpc.init_parallel_groups', ([], {}), '()\n', (3192, 3194), True, 'from colossalai.core import global_context as gpc\n'), ((3206, 3254), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.PARALLEL_2D_COL'], {}), '(ParallelMode.PARALLEL_2D_COL)\n', (3224, 3254), True, 'from colossalai.core import global_context as gpc\n'), ((3266, 3314), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.PARALLEL_2D_ROW'], {}), '(ParallelMode.PARALLEL_2D_ROW)\n', (3284, 3314), True, 'from colossalai.core import global_context as gpc\n'), ((3395, 3443), 'torch.distributed.broadcast', 'torch.distributed.broadcast', (['input_tensor'], {'src': '(0)'}), '(input_tensor, src=0)\n', (3422, 3443), False, 'import torch\n'), ((3667, 3674), 'colossalai.utils.Timer', 'Timer', ([], {}), '()\n', (3672, 3674), False, 'from colossalai.utils import Timer, synchronize\n'), ((3923, 3946), 'colossalai.core.global_context.load_config', 'gpc.load_config', (['config'], {}), '(config)\n', (3938, 3946), True, 'from colossalai.core import global_context as gpc\n'), ((3951, 3977), 'colossalai.core.global_context.init_parallel_groups', 'gpc.init_parallel_groups', ([], {}), '()\n', (3975, 3977), True, 'from colossalai.core import global_context as gpc\n'), ((3989, 4039), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.PARALLEL_2P5D_COL'], {}), '(ParallelMode.PARALLEL_2P5D_COL)\n', (4007, 4039), True, 'from colossalai.core import global_context as gpc\n'), ((4051, 4101), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.PARALLEL_2P5D_ROW'], {}), '(ParallelMode.PARALLEL_2P5D_ROW)\n', (4069, 4101), True, 'from colossalai.core import global_context as gpc\n'), ((4113, 4163), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.PARALLEL_2P5D_DEP'], {}), '(ParallelMode.PARALLEL_2P5D_DEP)\n', (4131, 4163), True, 'from colossalai.core import global_context as gpc\n'), ((4244, 4292), 'torch.distributed.broadcast', 'torch.distributed.broadcast', (['input_tensor'], {'src': '(0)'}), '(input_tensor, src=0)\n', (4271, 4292), False, 'import torch\n'), ((4627, 4634), 'colossalai.utils.Timer', 'Timer', ([], {}), '()\n', (4632, 4634), False, 'from colossalai.utils import Timer, synchronize\n'), ((4885, 4908), 'colossalai.core.global_context.load_config', 'gpc.load_config', (['config'], {}), '(config)\n', (4900, 4908), True, 'from colossalai.core import global_context as gpc\n'), ((4913, 4939), 'colossalai.core.global_context.init_parallel_groups', 'gpc.init_parallel_groups', ([], {}), '()\n', (4937, 4939), True, 'from colossalai.core import global_context as gpc\n'), ((4951, 5002), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.PARALLEL_3D_WEIGHT'], {}), '(ParallelMode.PARALLEL_3D_WEIGHT)\n', (4969, 5002), True, 'from colossalai.core import global_context as gpc\n'), ((5014, 5064), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.PARALLEL_3D_INPUT'], {}), '(ParallelMode.PARALLEL_3D_INPUT)\n', (5032, 5064), True, 'from colossalai.core import global_context as gpc\n'), ((5076, 5127), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.PARALLEL_3D_OUTPUT'], {}), '(ParallelMode.PARALLEL_3D_OUTPUT)\n', (5094, 5127), True, 'from colossalai.core import global_context as gpc\n'), ((5208, 5256), 'torch.distributed.broadcast', 'torch.distributed.broadcast', (['input_tensor'], {'src': '(0)'}), '(input_tensor, src=0)\n', (5235, 5256), False, 'import torch\n'), ((5593, 5600), 'colossalai.utils.Timer', 'Timer', ([], {}), '()\n', (5598, 5600), False, 'from colossalai.utils import Timer, synchronize\n'), ((3027, 3040), 'colossalai.utils.synchronize', 'synchronize', ([], {}), '()\n', (3038, 3040), False, 'from colossalai.utils import Timer, synchronize\n'), ((3463, 3498), 'torch.chunk', 'torch.chunk', (['input_tensor', '(2)'], {'dim': '(0)'}), '(input_tensor, 2, dim=0)\n', (3474, 3498), False, 'import torch\n'), ((3499, 3547), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2D_COL'], {}), '(ParallelMode.PARALLEL_2D_COL)\n', (3517, 3547), True, 'from colossalai.core import global_context as gpc\n'), ((3568, 3604), 'torch.chunk', 'torch.chunk', (['input_tensor', '(2)'], {'dim': '(-1)'}), '(input_tensor, 2, dim=-1)\n', (3579, 3604), False, 'import torch\n'), ((3605, 3653), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2D_ROW'], {}), '(ParallelMode.PARALLEL_2D_ROW)\n', (3623, 3653), True, 'from colossalai.core import global_context as gpc\n'), ((3808, 3821), 'colossalai.utils.synchronize', 'synchronize', ([], {}), '()\n', (3819, 3821), False, 'from colossalai.utils import Timer, synchronize\n'), ((4312, 4347), 'torch.chunk', 'torch.chunk', (['input_tensor', '(2)'], {'dim': '(0)'}), '(input_tensor, 2, dim=0)\n', (4323, 4347), False, 'import torch\n'), ((4348, 4398), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_DEP'], {}), '(ParallelMode.PARALLEL_2P5D_DEP)\n', (4366, 4398), True, 'from colossalai.core import global_context as gpc\n'), ((4419, 4454), 'torch.chunk', 'torch.chunk', (['input_tensor', '(2)'], {'dim': '(0)'}), '(input_tensor, 2, dim=0)\n', (4430, 4454), False, 'import torch\n'), ((4455, 4505), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_COL'], {}), '(ParallelMode.PARALLEL_2P5D_COL)\n', (4473, 4505), True, 'from colossalai.core import global_context as gpc\n'), ((4526, 4562), 'torch.chunk', 'torch.chunk', (['input_tensor', '(2)'], {'dim': '(-1)'}), '(input_tensor, 2, dim=-1)\n', (4537, 4562), False, 'import torch\n'), ((4563, 4613), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_ROW'], {}), '(ParallelMode.PARALLEL_2P5D_ROW)\n', (4581, 4613), True, 'from colossalai.core import global_context as gpc\n'), ((4768, 4781), 'colossalai.utils.synchronize', 'synchronize', ([], {}), '()\n', (4779, 4781), False, 'from colossalai.utils import Timer, synchronize\n'), ((5276, 5311), 'torch.chunk', 'torch.chunk', (['input_tensor', '(2)'], {'dim': '(0)'}), '(input_tensor, 2, dim=0)\n', (5287, 5311), False, 'import torch\n'), ((5312, 5363), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_3D_WEIGHT'], {}), '(ParallelMode.PARALLEL_3D_WEIGHT)\n', (5330, 5363), True, 'from colossalai.core import global_context as gpc\n'), ((5384, 5419), 'torch.chunk', 'torch.chunk', (['input_tensor', '(2)'], {'dim': '(0)'}), '(input_tensor, 2, dim=0)\n', (5395, 5419), False, 'import torch\n'), ((5420, 5470), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_3D_INPUT'], {}), '(ParallelMode.PARALLEL_3D_INPUT)\n', (5438, 5470), True, 'from colossalai.core import global_context as gpc\n'), ((5491, 5527), 'torch.chunk', 'torch.chunk', (['input_tensor', '(2)'], {'dim': '(-1)'}), '(input_tensor, 2, dim=-1)\n', (5502, 5527), False, 'import torch\n'), ((5528, 5579), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_3D_OUTPUT'], {}), '(ParallelMode.PARALLEL_3D_OUTPUT)\n', (5546, 5579), True, 'from colossalai.core import global_context as gpc\n'), ((5734, 5747), 'colossalai.utils.synchronize', 'synchronize', ([], {}), '()\n', (5745, 5747), False, 'from colossalai.utils import Timer, synchronize\n')] |
from functools import partial
import pytest
import torch.nn as nn
import torch.multiprocessing as mp
import torch.distributed as dist
import colossalai
from colossalai.utils import free_port, get_current_device
from colossalai.nn.layer.moe import Experts
from colossalai.context.moe_context import MOE_CONTEXT
from colossalai.utils.moe import sync_moe_model_param
from colossalai.testing import assert_equal_in_group, rerun_if_address_is_in_use
D_MODEL = 4
D_FF = 8
CONFIG = dict()
def run_test(rank, port):
world_size = 4
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
expert_module = nn.Linear
expert_factor = dict(in_features=D_MODEL, out_features=D_FF, device=get_current_device())
MOE_CONTEXT.setup(42) # MOE environment initialization
exp0 = Experts(expert_module, 1, **expert_factor)
exp1 = Experts(expert_module, 2, **expert_factor)
exp2 = Experts(expert_module, 4, **expert_factor)
exp3 = Experts(expert_module, 8, **expert_factor)
assert exp0.num_local_experts == 1
assert exp1.num_local_experts == 1
assert exp2.num_local_experts == 1
assert exp3.num_local_experts == 2
# experts deployment passed
parallel_info_dict = MOE_CONTEXT.parallel_info_dict
rank = dist.get_rank()
assert len(parallel_info_dict) == 3
assert dist.get_rank(parallel_info_dict[4].ep_group) == rank
assert dist.get_rank(parallel_info_dict[2].ep_group) == rank % 2
assert dist.get_rank(parallel_info_dict[1].ep_group) == 0
assert dist.get_rank(parallel_info_dict[4].dp_group) == 0
assert dist.get_rank(parallel_info_dict[2].dp_group) == rank // 2
assert dist.get_rank(parallel_info_dict[1].dp_group) == rank
# group creation passed
model = nn.ModuleList([exp0, exp1, exp2, exp3])
model = model.to(get_current_device())
sync_moe_model_param(model)
assert_equal_in_group(exp0.experts[0].weight.data, parallel_info_dict[1].dp_group)
assert_equal_in_group(exp0.experts[0].bias.data, parallel_info_dict[1].dp_group)
# MOE experts layout success when ep_size = 1
assert_equal_in_group(exp1.experts[0].weight.data, parallel_info_dict[2].dp_group)
assert_equal_in_group(exp1.experts[0].bias.data, parallel_info_dict[2].dp_group)
# MOE experts layout success when ep_size = 2
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_moe_initialization():
world_size = 4
run_func = partial(run_test, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_moe_initialization()
| [
"colossalai.launch",
"colossalai.utils.moe.sync_moe_model_param",
"colossalai.utils.free_port",
"colossalai.utils.get_current_device",
"colossalai.testing.rerun_if_address_is_in_use",
"colossalai.context.moe_context.MOE_CONTEXT.setup",
"colossalai.testing.assert_equal_in_group",
"colossalai.nn.layer.m... | [((2380, 2408), 'colossalai.testing.rerun_if_address_is_in_use', 'rerun_if_address_is_in_use', ([], {}), '()\n', (2406, 2408), False, 'from colossalai.testing import assert_equal_in_group, rerun_if_address_is_in_use\n'), ((534, 650), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (551, 650), False, 'import colossalai\n'), ((775, 796), 'colossalai.context.moe_context.MOE_CONTEXT.setup', 'MOE_CONTEXT.setup', (['(42)'], {}), '(42)\n', (792, 796), False, 'from colossalai.context.moe_context import MOE_CONTEXT\n'), ((844, 886), 'colossalai.nn.layer.moe.Experts', 'Experts', (['expert_module', '(1)'], {}), '(expert_module, 1, **expert_factor)\n', (851, 886), False, 'from colossalai.nn.layer.moe import Experts\n'), ((898, 940), 'colossalai.nn.layer.moe.Experts', 'Experts', (['expert_module', '(2)'], {}), '(expert_module, 2, **expert_factor)\n', (905, 940), False, 'from colossalai.nn.layer.moe import Experts\n'), ((952, 994), 'colossalai.nn.layer.moe.Experts', 'Experts', (['expert_module', '(4)'], {}), '(expert_module, 4, **expert_factor)\n', (959, 994), False, 'from colossalai.nn.layer.moe import Experts\n'), ((1006, 1048), 'colossalai.nn.layer.moe.Experts', 'Experts', (['expert_module', '(8)'], {}), '(expert_module, 8, **expert_factor)\n', (1013, 1048), False, 'from colossalai.nn.layer.moe import Experts\n'), ((1306, 1321), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1319, 1321), True, 'import torch.distributed as dist\n'), ((1798, 1837), 'torch.nn.ModuleList', 'nn.ModuleList', (['[exp0, exp1, exp2, exp3]'], {}), '([exp0, exp1, exp2, exp3])\n', (1811, 1837), True, 'import torch.nn as nn\n'), ((1885, 1912), 'colossalai.utils.moe.sync_moe_model_param', 'sync_moe_model_param', (['model'], {}), '(model)\n', (1905, 1912), False, 'from colossalai.utils.moe import sync_moe_model_param\n'), ((1918, 2005), 'colossalai.testing.assert_equal_in_group', 'assert_equal_in_group', (['exp0.experts[0].weight.data', 'parallel_info_dict[1].dp_group'], {}), '(exp0.experts[0].weight.data, parallel_info_dict[1].\n dp_group)\n', (1939, 2005), False, 'from colossalai.testing import assert_equal_in_group, rerun_if_address_is_in_use\n'), ((2005, 2090), 'colossalai.testing.assert_equal_in_group', 'assert_equal_in_group', (['exp0.experts[0].bias.data', 'parallel_info_dict[1].dp_group'], {}), '(exp0.experts[0].bias.data, parallel_info_dict[1].dp_group\n )\n', (2026, 2090), False, 'from colossalai.testing import assert_equal_in_group, rerun_if_address_is_in_use\n'), ((2141, 2228), 'colossalai.testing.assert_equal_in_group', 'assert_equal_in_group', (['exp1.experts[0].weight.data', 'parallel_info_dict[2].dp_group'], {}), '(exp1.experts[0].weight.data, parallel_info_dict[2].\n dp_group)\n', (2162, 2228), False, 'from colossalai.testing import assert_equal_in_group, rerun_if_address_is_in_use\n'), ((2228, 2313), 'colossalai.testing.assert_equal_in_group', 'assert_equal_in_group', (['exp1.experts[0].bias.data', 'parallel_info_dict[2].dp_group'], {}), '(exp1.experts[0].bias.data, parallel_info_dict[2].dp_group\n )\n', (2249, 2313), False, 'from colossalai.testing import assert_equal_in_group, rerun_if_address_is_in_use\n'), ((2514, 2551), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (2522, 2551), True, 'import torch.multiprocessing as mp\n'), ((1374, 1419), 'torch.distributed.get_rank', 'dist.get_rank', (['parallel_info_dict[4].ep_group'], {}), '(parallel_info_dict[4].ep_group)\n', (1387, 1419), True, 'import torch.distributed as dist\n'), ((1439, 1484), 'torch.distributed.get_rank', 'dist.get_rank', (['parallel_info_dict[2].ep_group'], {}), '(parallel_info_dict[2].ep_group)\n', (1452, 1484), True, 'import torch.distributed as dist\n'), ((1508, 1553), 'torch.distributed.get_rank', 'dist.get_rank', (['parallel_info_dict[1].ep_group'], {}), '(parallel_info_dict[1].ep_group)\n', (1521, 1553), True, 'import torch.distributed as dist\n'), ((1571, 1616), 'torch.distributed.get_rank', 'dist.get_rank', (['parallel_info_dict[4].dp_group'], {}), '(parallel_info_dict[4].dp_group)\n', (1584, 1616), True, 'import torch.distributed as dist\n'), ((1633, 1678), 'torch.distributed.get_rank', 'dist.get_rank', (['parallel_info_dict[2].dp_group'], {}), '(parallel_info_dict[2].dp_group)\n', (1646, 1678), True, 'import torch.distributed as dist\n'), ((1703, 1748), 'torch.distributed.get_rank', 'dist.get_rank', (['parallel_info_dict[1].dp_group'], {}), '(parallel_info_dict[1].dp_group)\n', (1716, 1748), True, 'import torch.distributed as dist\n'), ((1859, 1879), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1877, 1879), False, 'from colossalai.utils import free_port, get_current_device\n'), ((748, 768), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (766, 768), False, 'from colossalai.utils import free_port, get_current_device\n'), ((2497, 2508), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (2506, 2508), False, 'from colossalai.utils import free_port, get_current_device\n')] |
from copy import copy
from colossalai.utils.cuda import get_current_device
from colossalai.utils.model.colo_init_context import ColoInitContext
import torch
from colossalai.context.parallel_mode import ParallelMode
from colossalai.tensor import ColoTensor, distspec
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn.functional as F
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.core import global_context as gpc
from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, register_colo_module, init_colo_module, check_colo_module
from _utils import tensor_equal, tensor_shard_equal, set_seed
from tests.components_to_test.registry import non_distributed_component_funcs
def run_model_with_spec(mode, model_name):
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
set_seed(1)
with ColoInitContext(device=get_current_device()):
model = model_builder(checkpoint=False)
if rank == 0:
model_seq = model_builder(checkpoint=False)
model_seq = model_seq.cuda()
# Make two models have the same init params
for p1, p2 in zip(model.parameters(), model_seq.parameters()):
p2.data.copy_(p1.data)
parallel_action = ParallelAction(ComputePattern.TP1D)
# Not all layers in Bert can be mod by 4.
# e.g. row shard for all layers is invalid because the first dim of some layer is the classification type size 2.
if 'bert' == model_name:
if 'col' == mode:
init_colo_module(model.bert.embeddings, parallel_action, recursive=True, mode=mode)
init_colo_module(model.bert.encoder, parallel_action, recursive=True, mode=mode)
init_colo_module(model.classifier, parallel_action, recursive=True, mode='row')
elif 'row' == mode:
init_colo_module(model.bert.embeddings, parallel_action, recursive=True, mode='col')
init_colo_module(model.bert.encoder, parallel_action, recursive=True, mode=mode)
init_colo_module(model.classifier, parallel_action, recursive=True, mode=mode)
elif 'simple_net' == model_name:
init_colo_module(model, parallel_action, recursive=True, mode=mode)
model = model.cuda()
for i, (data, label) in enumerate(train_dataloader):
data = data.to(get_current_device())
label = label.to(get_current_device())
torch.distributed.broadcast(data, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))
torch.distributed.broadcast(label, 0, group=gpc.get_group(ParallelMode.PARALLEL_1D))
if criterion:
output = model(data)
loss = criterion(output, label)
else:
output = model(data, label)
loss = output
# For reference
if rank == 0:
if criterion:
output_seq = model_seq(data)
loss_seq = criterion(output_seq, label)
else:
output_seq = model_seq(data, label)
loss_seq = output_seq
if rank == 0:
with torch.no_grad():
assert torch.allclose(loss, loss_seq, rtol=1e-2)
loss.backward()
if rank == 0:
loss_seq.backward()
with torch.no_grad():
# check param
for p1, p2 in zip(model.parameters(), model_seq.parameters()):
if p1.size() == p2.size():
assert torch.allclose(p1, p2)
else:
if p1.size(-1) < p2.size(-1): # col
world_size = p2.size(-1) // p1.size(-1)
split_p2 = torch.chunk(p2, world_size, dim=-1)[0]
elif p1.size(0) < p2.size(0): # row
world_size = p2.size(0) // p1.size(0)
split_p2 = torch.chunk(p2, world_size, dim=0)[0]
assert torch.allclose(p1, split_p2)
if i > 3:
break
def run_linear_with_spec(mode):
with ColoInitContext(device=get_current_device()):
model = torch.nn.Linear(4, 8)
model_handy = copy(model)
parallel_action = ParallelAction(ComputePattern.TP1D)
init_colo_module(model, parallel_action, recursive=True, mode=mode)
x = torch.rand(2, 4).cuda()
out = model(x)
colo_out = model_handy(x)
assert tensor_equal(out, colo_out)
grad = torch.rand_like(out)
out.backward(grad)
colo_out.backward(grad)
assert tensor_shard_equal(model.weight.grad, model_handy.weight.grad)
assert tensor_shard_equal(model.bias.grad, model_handy.bias.grad)
def run_check_shared_param():
from transformers import BertForMaskedLM, BertConfig
hidden_dim = 8
num_head = 4
sequence_length = 12
num_layer = 2
vocab_size = 24
config = BertConfig(vocab_size=vocab_size,
hidden_size=hidden_dim,
intermediate_size=hidden_dim * 4,
num_attention_heads=num_head,
max_position_embeddings=sequence_length,
num_hidden_layers=num_layer,
hidden_dropout_prob=0.,
attention_probs_dropout_prob=0.)
with ColoInitContext(lazy_memory_allocate=False, device=get_current_device()):
model = BertForMaskedLM(config)
model = model.cuda()
parallel_action = ParallelAction(ComputePattern.TP1D)
# model.cls.predictions.decoder and model.cls.predictions share the bias, so they should have the same spec
assert len(model.cls.predictions.decoder.bias.shared_param_modules) == 2
# They are all Linear, so both row is allowed. This should pass check.
init_colo_module(model, parallel_action, recursive=True, mode='row')
# This should be detected by check because you can not set weight as row while set bias as col.
col_spec = TensorSpec(
distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [0], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
ParallelAction(ComputePattern.TP1D))
model.cls.predictions.bias.set_spec(col_spec)
try:
check_colo_module(model.cls.predictions.decoder, recursive=False)
except Exception as e:
assert 'incorrectly sharded' in str(e)
def run_dist(rank, world_size, port):
config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_linear_with_spec('col')
run_linear_with_spec('row')
def run_dist_model(rank, world_size, port):
config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
for model_name in ['simple_net', 'bert']:
run_model_with_spec('col', model_name)
run_model_with_spec('row', model_name)
def run_dist_check(rank, world_size, port):
config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_check_shared_param()
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 4])
@rerun_if_address_is_in_use()
def test_module_linear_1d(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 4])
@rerun_if_address_is_in_use()
def test_module_model(world_size):
run_func = partial(run_dist_model, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 2])
@rerun_if_address_is_in_use()
def test_module_check(world_size):
run_func = partial(run_dist_check, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_module_check(2) | [
"colossalai.launch",
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global_context.get_world_size",
"colossalai.tensor.ParallelAction",
"colossalai.utils.free_port",
"colossalai.utils.cuda.get_current_device",
"colossalai.tensor.init_colo_... | [((7574, 7619), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 4]'], {}), "('world_size', [1, 4])\n", (7597, 7619), False, 'import pytest\n'), ((7621, 7649), 'colossalai.testing.rerun_if_address_is_in_use', 'rerun_if_address_is_in_use', ([], {}), '()\n', (7647, 7649), False, 'from colossalai.testing import rerun_if_address_is_in_use\n'), ((7825, 7870), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 4]'], {}), "('world_size', [1, 4])\n", (7848, 7870), False, 'import pytest\n'), ((7872, 7900), 'colossalai.testing.rerun_if_address_is_in_use', 'rerun_if_address_is_in_use', ([], {}), '()\n', (7898, 7900), False, 'from colossalai.testing import rerun_if_address_is_in_use\n'), ((8078, 8123), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 2]'], {}), "('world_size', [1, 2])\n", (8101, 8123), False, 'import pytest\n'), ((8125, 8153), 'colossalai.testing.rerun_if_address_is_in_use', 'rerun_if_address_is_in_use', ([], {}), '()\n', (8151, 8153), False, 'from colossalai.testing import rerun_if_address_is_in_use\n'), ((916, 972), 'tests.components_to_test.registry.non_distributed_component_funcs.get_callable', 'non_distributed_component_funcs.get_callable', (['model_name'], {}), '(model_name)\n', (960, 972), False, 'from tests.components_to_test.registry import non_distributed_component_funcs\n'), ((1089, 1133), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (1107, 1133), True, 'from colossalai.core import global_context as gpc\n'), ((1139, 1150), '_utils.set_seed', 'set_seed', (['(1)'], {}), '(1)\n', (1147, 1150), False, 'from _utils import tensor_equal, tensor_shard_equal, set_seed\n'), ((1548, 1583), 'colossalai.tensor.ParallelAction', 'ParallelAction', (['ComputePattern.TP1D'], {}), '(ComputePattern.TP1D)\n', (1562, 1583), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, register_colo_module, init_colo_module, check_colo_module\n'), ((4461, 4472), 'copy.copy', 'copy', (['model'], {}), '(model)\n', (4465, 4472), False, 'from copy import copy\n'), ((4500, 4535), 'colossalai.tensor.ParallelAction', 'ParallelAction', (['ComputePattern.TP1D'], {}), '(ComputePattern.TP1D)\n', (4514, 4535), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, register_colo_module, init_colo_module, check_colo_module\n'), ((4540, 4607), 'colossalai.tensor.init_colo_module', 'init_colo_module', (['model', 'parallel_action'], {'recursive': '(True)', 'mode': 'mode'}), '(model, parallel_action, recursive=True, mode=mode)\n', (4556, 4607), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, register_colo_module, init_colo_module, check_colo_module\n'), ((4705, 4732), '_utils.tensor_equal', 'tensor_equal', (['out', 'colo_out'], {}), '(out, colo_out)\n', (4717, 4732), False, 'from _utils import tensor_equal, tensor_shard_equal, set_seed\n'), ((4744, 4764), 'torch.rand_like', 'torch.rand_like', (['out'], {}), '(out)\n', (4759, 4764), False, 'import torch\n'), ((4827, 4889), '_utils.tensor_shard_equal', 'tensor_shard_equal', (['model.weight.grad', 'model_handy.weight.grad'], {}), '(model.weight.grad, model_handy.weight.grad)\n', (4845, 4889), False, 'from _utils import tensor_equal, tensor_shard_equal, set_seed\n'), ((4901, 4959), '_utils.tensor_shard_equal', 'tensor_shard_equal', (['model.bias.grad', 'model_handy.bias.grad'], {}), '(model.bias.grad, model_handy.bias.grad)\n', (4919, 4959), False, 'from _utils import tensor_equal, tensor_shard_equal, set_seed\n'), ((5161, 5425), 'transformers.BertConfig', 'BertConfig', ([], {'vocab_size': 'vocab_size', 'hidden_size': 'hidden_dim', 'intermediate_size': '(hidden_dim * 4)', 'num_attention_heads': 'num_head', 'max_position_embeddings': 'sequence_length', 'num_hidden_layers': 'num_layer', 'hidden_dropout_prob': '(0.0)', 'attention_probs_dropout_prob': '(0.0)'}), '(vocab_size=vocab_size, hidden_size=hidden_dim, intermediate_size\n =hidden_dim * 4, num_attention_heads=num_head, max_position_embeddings=\n sequence_length, num_hidden_layers=num_layer, hidden_dropout_prob=0.0,\n attention_probs_dropout_prob=0.0)\n', (5171, 5425), False, 'from transformers import BertForMaskedLM, BertConfig\n'), ((5749, 5784), 'colossalai.tensor.ParallelAction', 'ParallelAction', (['ComputePattern.TP1D'], {}), '(ComputePattern.TP1D)\n', (5763, 5784), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, register_colo_module, init_colo_module, check_colo_module\n'), ((6053, 6121), 'colossalai.tensor.init_colo_module', 'init_colo_module', (['model', 'parallel_action'], {'recursive': '(True)', 'mode': '"""row"""'}), "(model, parallel_action, recursive=True, mode='row')\n", (6069, 6121), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, register_colo_module, init_colo_module, check_colo_module\n'), ((6737, 6853), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'config', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=config, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (6754, 6853), False, 'import colossalai\n'), ((7037, 7153), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'config', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=config, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (7054, 7153), False, 'import colossalai\n'), ((7413, 7529), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'config', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=config, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (7430, 7529), False, 'import colossalai\n'), ((7767, 7804), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (7775, 7804), True, 'import torch.multiprocessing as mp\n'), ((8020, 8057), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (8028, 8057), True, 'import torch.multiprocessing as mp\n'), ((8273, 8310), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (8281, 8310), True, 'import torch.multiprocessing as mp\n'), ((4420, 4441), 'torch.nn.Linear', 'torch.nn.Linear', (['(4)', '(8)'], {}), '(4, 8)\n', (4435, 4441), False, 'import torch\n'), ((5677, 5700), 'transformers.BertForMaskedLM', 'BertForMaskedLM', (['config'], {}), '(config)\n', (5692, 5700), False, 'from transformers import BertForMaskedLM, BertConfig\n'), ((6375, 6410), 'colossalai.tensor.ParallelAction', 'ParallelAction', (['ComputePattern.TP1D'], {}), '(ComputePattern.TP1D)\n', (6389, 6410), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, register_colo_module, init_colo_module, check_colo_module\n'), ((6479, 6544), 'colossalai.tensor.check_colo_module', 'check_colo_module', (['model.cls.predictions.decoder'], {'recursive': '(False)'}), '(model.cls.predictions.decoder, recursive=False)\n', (6496, 6544), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, register_colo_module, init_colo_module, check_colo_module\n'), ((1815, 1902), 'colossalai.tensor.init_colo_module', 'init_colo_module', (['model.bert.embeddings', 'parallel_action'], {'recursive': '(True)', 'mode': 'mode'}), '(model.bert.embeddings, parallel_action, recursive=True,\n mode=mode)\n', (1831, 1902), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, register_colo_module, init_colo_module, check_colo_module\n'), ((1911, 1996), 'colossalai.tensor.init_colo_module', 'init_colo_module', (['model.bert.encoder', 'parallel_action'], {'recursive': '(True)', 'mode': 'mode'}), '(model.bert.encoder, parallel_action, recursive=True, mode=mode\n )\n', (1927, 1996), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, register_colo_module, init_colo_module, check_colo_module\n'), ((2004, 2083), 'colossalai.tensor.init_colo_module', 'init_colo_module', (['model.classifier', 'parallel_action'], {'recursive': '(True)', 'mode': '"""row"""'}), "(model.classifier, parallel_action, recursive=True, mode='row')\n", (2020, 2083), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, register_colo_module, init_colo_module, check_colo_module\n'), ((2438, 2505), 'colossalai.tensor.init_colo_module', 'init_colo_module', (['model', 'parallel_action'], {'recursive': '(True)', 'mode': 'mode'}), '(model, parallel_action, recursive=True, mode=mode)\n', (2454, 2505), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, register_colo_module, init_colo_module, check_colo_module\n'), ((2612, 2632), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (2630, 2632), False, 'from colossalai.utils.cuda import get_current_device\n'), ((2659, 2679), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (2677, 2679), False, 'from colossalai.utils.cuda import get_current_device\n'), ((4621, 4637), 'torch.rand', 'torch.rand', (['(2)', '(4)'], {}), '(2, 4)\n', (4631, 4637), False, 'import torch\n'), ((6272, 6311), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (6285, 6311), True, 'from colossalai.core import global_context as gpc\n'), ((7750, 7761), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (7759, 7761), False, 'from colossalai.utils import free_port\n'), ((8003, 8014), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (8012, 8014), False, 'from colossalai.utils import free_port\n'), ((8256, 8267), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (8265, 8267), False, 'from colossalai.utils import free_port\n'), ((1183, 1203), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (1201, 1203), False, 'from colossalai.utils.cuda import get_current_device\n'), ((2124, 2212), 'colossalai.tensor.init_colo_module', 'init_colo_module', (['model.bert.embeddings', 'parallel_action'], {'recursive': '(True)', 'mode': '"""col"""'}), "(model.bert.embeddings, parallel_action, recursive=True,\n mode='col')\n", (2140, 2212), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, register_colo_module, init_colo_module, check_colo_module\n'), ((2221, 2306), 'colossalai.tensor.init_colo_module', 'init_colo_module', (['model.bert.encoder', 'parallel_action'], {'recursive': '(True)', 'mode': 'mode'}), '(model.bert.encoder, parallel_action, recursive=True, mode=mode\n )\n', (2237, 2306), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, register_colo_module, init_colo_module, check_colo_module\n'), ((2314, 2392), 'colossalai.tensor.init_colo_module', 'init_colo_module', (['model.classifier', 'parallel_action'], {'recursive': '(True)', 'mode': 'mode'}), '(model.classifier, parallel_action, recursive=True, mode=mode)\n', (2330, 2392), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, register_colo_module, init_colo_module, check_colo_module\n'), ((2733, 2772), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (2746, 2772), True, 'from colossalai.core import global_context as gpc\n'), ((2826, 2865), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (2839, 2865), True, 'from colossalai.core import global_context as gpc\n'), ((3369, 3384), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3382, 3384), False, 'import torch\n'), ((3409, 3450), 'torch.allclose', 'torch.allclose', (['loss', 'loss_seq'], {'rtol': '(0.01)'}), '(loss, loss_seq, rtol=0.01)\n', (3423, 3450), False, 'import torch\n'), ((3549, 3564), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3562, 3564), False, 'import torch\n'), ((4381, 4401), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (4399, 4401), False, 'from colossalai.utils.cuda import get_current_device\n'), ((5638, 5658), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (5656, 5658), False, 'from colossalai.utils.cuda import get_current_device\n'), ((6319, 6363), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (6337, 6363), True, 'from colossalai.core import global_context as gpc\n'), ((3753, 3775), 'torch.allclose', 'torch.allclose', (['p1', 'p2'], {}), '(p1, p2)\n', (3767, 3775), False, 'import torch\n'), ((4250, 4278), 'torch.allclose', 'torch.allclose', (['p1', 'split_p2'], {}), '(p1, split_p2)\n', (4264, 4278), False, 'import torch\n'), ((3972, 4007), 'torch.chunk', 'torch.chunk', (['p2', 'world_size'], {'dim': '(-1)'}), '(p2, world_size, dim=-1)\n', (3983, 4007), False, 'import torch\n'), ((4180, 4214), 'torch.chunk', 'torch.chunk', (['p2', 'world_size'], {'dim': '(0)'}), '(p2, world_size, dim=0)\n', (4191, 4214), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# adpated from torch.utils.data.DistributedSampler
import math
import random
import numpy as np
from typing import TypeVar, Iterator
import torch
from torch.utils.data import Sampler, Dataset, DataLoader
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.registry import DATA_SAMPLERS
T_co = TypeVar('T_co', covariant=True)
@DATA_SAMPLERS.register_module
class DataParallelSampler(Sampler):
"""A data sampler for distributed data parallelism.
Args:
dataset (:class:`torch.utils.data.Dataset`): The Dataset for sampling.
shuffle (bool, optional): Whether to shuffle data, defaults to False.
seed (int, optional): The random seed used for sampling, defaults to 0.
drop_last (bool, optional): Set to True to drop the last incomplete batch, if the dataset size
is not divisible by the batch size. If False and the size of dataset is not divisible by
the batch size, then the last batch will be smaller, defaults to False.
"""
def __init__(self,
dataset: Dataset,
shuffle: bool = False,
seed: int = 0,
drop_last: bool = False) -> None:
self.dataset = dataset
self.num_replicas = gpc.get_world_size(ParallelMode.DATA)
self.rank = gpc.get_local_rank(ParallelMode.DATA)
self.epoch = 0
self.drop_last = drop_last
# If the dataset length is evenly divisible by # of replicas, then there
# is no need to drop any data, since the dataset will be split equally.
# type: ignore[arg-type]
if self.drop_last and len(self.dataset) % self.num_replicas != 0:
# Split to nearest available length that is evenly divisible.
# This is to ensure each rank receives the same amount of data when
# using this Sampler.
self.num_samples = math.ceil(
# `type:ignore` is required because Dataset cannot provide a default __len__
# see NOTE in pytorch/torch/utils/data/sampler.py
(len(self.dataset) - self.num_replicas) / \
self.num_replicas # type: ignore[arg-type]
)
else:
self.num_samples = math.ceil(
len(self.dataset) / self.num_replicas) # type: ignore[arg-type]
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.seed = seed
def __iter__(self) -> Iterator[T_co]:
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
# type: ignore[arg-type]
indices = torch.randperm(len(self.dataset), generator=g).tolist()
# update for next epoch so that there is no need to call
# set_epoch manually
self.epoch += 1
else:
indices = list(range(len(self.dataset))) # type: ignore[arg-type]
if not self.drop_last:
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
if padding_size <= len(indices):
indices += indices[:padding_size]
else:
indices += (indices * math.ceil(padding_size /
len(indices)))[:padding_size]
else:
# remove tail of data to make it evenly divisible.
indices = indices[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self) -> int:
return self.num_samples
def set_epoch(self, epoch: int) -> None:
r"""Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
use a different random ordering for each epoch. Otherwise, the next iteration of this
sampler will yield the same ordering.
Args:
epoch (int): Epoch number.
"""
self.epoch = epoch
def get_dataloader(dataset,
shuffle=False,
seed=1024,
add_sampler=True,
drop_last=False,
pin_memory=False,
num_workers=0,
**kwargs):
r"""Set up a deterministic dataloader (also configure seed workers, samplers and whether shuffle or not)
Note:
When pipeline parallel is enabled, shuffle cannot be True as it will result in mismatch between input data
on the 1st stage and label on the last stage.
Args:
dataset (:class:`torch.utils.data.Dataset`): The dataset to be loaded.
shuffle (bool, optional): Whether to shuffle the dataset. Defaults to False.
seed (int, optional): Random worker seed for sampling, defaults to 1024.
add_sampler: Whether to add ``DistributedDataParallelSampler`` to the dataset. Defaults to True.
drop_last (bool, optional): Set to True to drop the last incomplete batch, if the dataset size
is not divisible by the batch size. If False and the size of dataset is not divisible by
the batch size, then the last batch will be smaller, defaults to False.
pin_memory (bool, optional): Whether to pin memory address in CPU memory. Defaults to False.
num_workers (int, optional): Number of worker threads for this dataloader. Defaults to 0.
kwargs (dict): optional parameters for ``torch.utils.data.DataLoader``, more details could be found in
`DataLoader <https://pytorch.org/docs/stable/_modules/torch/utils/data/dataloader.html#DataLoader>`_.
Returns:
:class:`torch.utils.data.DataLoader`: A DataLoader used for training or testing.
"""
_kwargs = kwargs.copy()
if add_sampler and gpc.is_initialized(ParallelMode.DATA) and gpc.get_world_size(ParallelMode.DATA) > 1:
sampler = DataParallelSampler(dataset, shuffle=shuffle)
else:
sampler = None
# Deterministic dataloader
def seed_worker(worker_id):
worker_seed = seed
np.random.seed(worker_seed)
torch.manual_seed(worker_seed)
random.seed(worker_seed)
if sampler is None:
return DataLoader(dataset,
worker_init_fn=seed_worker,
shuffle=shuffle,
drop_last=drop_last,
pin_memory=pin_memory,
num_workers=num_workers,
**_kwargs)
else:
return DataLoader(dataset,
sampler=sampler,
worker_init_fn=seed_worker,
drop_last=drop_last,
pin_memory=pin_memory,
num_workers=num_workers,
**_kwargs)
| [
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.is_initialized"
] | [((417, 448), 'typing.TypeVar', 'TypeVar', (['"""T_co"""'], {'covariant': '(True)'}), "('T_co', covariant=True)\n", (424, 448), False, 'from typing import TypeVar, Iterator\n'), ((1359, 1396), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (1377, 1396), True, 'from colossalai.core import global_context as gpc\n'), ((1417, 1454), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (1435, 1454), True, 'from colossalai.core import global_context as gpc\n'), ((6080, 6117), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (6098, 6117), True, 'from colossalai.core import global_context as gpc\n'), ((6361, 6388), 'numpy.random.seed', 'np.random.seed', (['worker_seed'], {}), '(worker_seed)\n', (6375, 6388), True, 'import numpy as np\n'), ((6397, 6427), 'torch.manual_seed', 'torch.manual_seed', (['worker_seed'], {}), '(worker_seed)\n', (6414, 6427), False, 'import torch\n'), ((6436, 6460), 'random.seed', 'random.seed', (['worker_seed'], {}), '(worker_seed)\n', (6447, 6460), False, 'import random\n'), ((6501, 6650), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'worker_init_fn': 'seed_worker', 'shuffle': 'shuffle', 'drop_last': 'drop_last', 'pin_memory': 'pin_memory', 'num_workers': 'num_workers'}), '(dataset, worker_init_fn=seed_worker, shuffle=shuffle, drop_last=\n drop_last, pin_memory=pin_memory, num_workers=num_workers, **_kwargs)\n', (6511, 6650), False, 'from torch.utils.data import Sampler, Dataset, DataLoader\n'), ((6827, 6976), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'sampler', 'worker_init_fn': 'seed_worker', 'drop_last': 'drop_last', 'pin_memory': 'pin_memory', 'num_workers': 'num_workers'}), '(dataset, sampler=sampler, worker_init_fn=seed_worker, drop_last=\n drop_last, pin_memory=pin_memory, num_workers=num_workers, **_kwargs)\n', (6837, 6976), False, 'from torch.utils.data import Sampler, Dataset, DataLoader\n'), ((2708, 2725), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (2723, 2725), False, 'import torch\n'), ((6122, 6159), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (6140, 6159), True, 'from colossalai.core import global_context as gpc\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from copy import deepcopy
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.logging import disable_existing_loggers, get_dist_logger
from colossalai.testing import parameterize
from colossalai.utils import free_port
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
from colossalai.zero.sharded_param import ShardedParam, ShardedTensor
from colossalai.zero.sharded_param.sharded_param import ShardedParamV2
from tests.components_to_test.registry import non_distributed_component_funcs
from tests.test_zero_data_parallel.common import CONFIG, allclose
@parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy])
def run_shard_tensor_with_strategy(shard_strategy_class, world_size):
t = ShardedTensor(tensor=torch.randn(world_size * 2, 3))
assert list(t.origin_shape) == [world_size * 2, 3]
assert list(t.shape) == [world_size * 2, 3]
shard_strategy = shard_strategy_class()
# test shard strategy
shard_strategy.shard([t])
assert list(t.shape) == [6], f"{list(t.shape)} vs 6"
shard_strategy.gather([t])
assert list(t.shape) == [world_size * 2, 3], f"{list(t.shape)} vs {[world_size * 2, 3]}"
def _run_shard_tensor(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_shard_tensor_with_strategy(world_size=world_size)
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
def test_shard_tensor(world_size):
run_func = partial(_run_shard_tensor, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
def _run_shard_param_v2(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
param = torch.nn.Parameter(torch.randn(2, 3))
param_ref = deepcopy(param)
sparam = ShardedParamV2(param=param, process_group=None)
allclose(sparam.data.payload, param_ref.data)
sparam.remove_torch_payload()
assert (param.data.numel() == 1)
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
def test_shard_param_v2(world_size):
run_func = partial(_run_shard_param_v2, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
def _run_test_shard_param(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
param = torch.nn.Parameter(torch.randn(2, 3))
param_ref = deepcopy(param)
sparam = ShardedParamV2(param=param, process_group=None)
print(sparam.data)
print(param_ref.data)
logger = get_dist_logger()
for get_components_func in non_distributed_component_funcs:
model_builder, *_ = get_components_func()
model = model_builder(checkpoint=True)
# add an attribute as col_attr to hijack the access to param.data
for _, param in model.named_parameters():
numel_ref = (param.numel() + world_size - 1) // world_size
param.col_attr = ShardedParam(param)
param.col_attr.shard()
param_data = param.col_attr.payload(torch.device('cpu'))
assert (numel_ref == param_data.numel())
for _, param in model.named_parameters():
param.col_attr.gather()
param_data = param.col_attr.payload(torch.device('cpu'))
disable_existing_loggers([logger])
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
def test_shard_param(world_size):
run_func = partial(_run_test_shard_param, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
def _run_init_shard_param(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
param = torch.nn.Parameter(data=torch.rand(world_size, 3))
sparam = ShardedParam(param, None, True)
payload = sparam.payload(torch.device('cuda'))
assert (list(payload.shape) == [3])
del sparam
param_shape = (world_size, 3)
sparam = ShardedParam(param_shape, process_group=None, is_sharded=True, device=torch.device('cpu'))
payload = sparam.payload(torch.device('cuda'))
assert (list(payload.shape) == [3])
param_shape = (world_size, 3)
sparam = ShardedParam(param_shape, process_group=None, is_sharded=False, device=torch.device('cpu'))
payload = sparam.payload(torch.device('cuda'))
assert (list(payload.shape) == [world_size, 3])
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 4])
def test_init_shard_param(world_size):
run_func = partial(_run_init_shard_param, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_shard_tensor(2)
test_shard_param(2)
test_shard_param_v2(2)
test_init_shard_param(4)
| [
"colossalai.zero.sharded_param.ShardedParam",
"colossalai.launch",
"colossalai.logging.get_dist_logger",
"colossalai.utils.free_port",
"colossalai.logging.disable_existing_loggers",
"colossalai.testing.parameterize",
"colossalai.zero.sharded_param.sharded_param.ShardedParamV2"
] | [((719, 809), 'colossalai.testing.parameterize', 'parameterize', (['"""shard_strategy_class"""', '[TensorShardStrategy, BucketTensorShardStrategy]'], {}), "('shard_strategy_class', [TensorShardStrategy,\n BucketTensorShardStrategy])\n", (731, 809), False, 'from colossalai.testing import parameterize\n'), ((1567, 1612), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 2]'], {}), "('world_size', [1, 2])\n", (1590, 1612), False, 'import pytest\n'), ((2228, 2273), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 2]'], {}), "('world_size', [1, 2])\n", (2251, 2273), False, 'import pytest\n'), ((3615, 3660), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 2]'], {}), "('world_size', [1, 2])\n", (3638, 3660), False, 'import pytest\n'), ((4701, 4746), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 4]'], {}), "('world_size', [1, 4])\n", (4724, 4746), False, 'import pytest\n'), ((1376, 1492), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (1393, 1492), False, 'import colossalai\n'), ((1735, 1772), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (1743, 1772), True, 'import torch.multiprocessing as mp\n'), ((1828, 1944), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (1845, 1944), False, 'import colossalai\n'), ((2007, 2022), 'copy.deepcopy', 'deepcopy', (['param'], {}), '(param)\n', (2015, 2022), False, 'from copy import deepcopy\n'), ((2036, 2083), 'colossalai.zero.sharded_param.sharded_param.ShardedParamV2', 'ShardedParamV2', ([], {'param': 'param', 'process_group': 'None'}), '(param=param, process_group=None)\n', (2050, 2083), False, 'from colossalai.zero.sharded_param.sharded_param import ShardedParamV2\n'), ((2089, 2134), 'tests.test_zero_data_parallel.common.allclose', 'allclose', (['sparam.data.payload', 'param_ref.data'], {}), '(sparam.data.payload, param_ref.data)\n', (2097, 2134), False, 'from tests.test_zero_data_parallel.common import CONFIG, allclose\n'), ((2400, 2437), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (2408, 2437), True, 'import torch.multiprocessing as mp\n'), ((2495, 2611), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (2512, 2611), False, 'import colossalai\n'), ((2674, 2689), 'copy.deepcopy', 'deepcopy', (['param'], {}), '(param)\n', (2682, 2689), False, 'from copy import deepcopy\n'), ((2703, 2750), 'colossalai.zero.sharded_param.sharded_param.ShardedParamV2', 'ShardedParamV2', ([], {'param': 'param', 'process_group': 'None'}), '(param=param, process_group=None)\n', (2717, 2750), False, 'from colossalai.zero.sharded_param.sharded_param import ShardedParamV2\n'), ((2814, 2831), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (2829, 2831), False, 'from colossalai.logging import disable_existing_loggers, get_dist_logger\n'), ((3786, 3823), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (3794, 3823), True, 'import torch.multiprocessing as mp\n'), ((3881, 3997), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (3898, 3997), False, 'import colossalai\n'), ((4069, 4100), 'colossalai.zero.sharded_param.ShardedParam', 'ShardedParam', (['param', 'None', '(True)'], {}), '(param, None, True)\n', (4081, 4100), False, 'from colossalai.zero.sharded_param import ShardedParam, ShardedTensor\n'), ((4877, 4914), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (4885, 4914), True, 'import torch.multiprocessing as mp\n'), ((1972, 1989), 'torch.randn', 'torch.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (1983, 1989), False, 'import torch\n'), ((2639, 2656), 'torch.randn', 'torch.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (2650, 2656), False, 'import torch\n'), ((3559, 3593), 'colossalai.logging.disable_existing_loggers', 'disable_existing_loggers', (['[logger]'], {}), '([logger])\n', (3583, 3593), False, 'from colossalai.logging import disable_existing_loggers, get_dist_logger\n'), ((4130, 4150), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4142, 4150), False, 'import torch\n'), ((4375, 4395), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4387, 4395), False, 'import torch\n'), ((4606, 4626), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4618, 4626), False, 'import torch\n'), ((905, 935), 'torch.randn', 'torch.randn', (['(world_size * 2)', '(3)'], {}), '(world_size * 2, 3)\n', (916, 935), False, 'import torch\n'), ((1718, 1729), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (1727, 1729), False, 'from colossalai.utils import free_port\n'), ((2383, 2394), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (2392, 2394), False, 'from colossalai.utils import free_port\n'), ((3217, 3236), 'colossalai.zero.sharded_param.ShardedParam', 'ShardedParam', (['param'], {}), '(param)\n', (3229, 3236), False, 'from colossalai.zero.sharded_param import ShardedParam, ShardedTensor\n'), ((3769, 3780), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (3778, 3780), False, 'from colossalai.utils import free_port\n'), ((4029, 4054), 'torch.rand', 'torch.rand', (['world_size', '(3)'], {}), '(world_size, 3)\n', (4039, 4054), False, 'import torch\n'), ((4325, 4344), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4337, 4344), False, 'import torch\n'), ((4556, 4575), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4568, 4575), False, 'import torch\n'), ((4860, 4871), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (4869, 4871), False, 'from colossalai.utils import free_port\n'), ((3320, 3339), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3332, 3339), False, 'import torch\n'), ((3529, 3548), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3541, 3548), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device, synchronize
def ring_forward(tensor_send_next: torch.Tensor, parallel_mode: ParallelMode):
"""Sends a tensor to the next member and receives a tensor from the previous member.
This function returns the received tensor from the previous member.
Args:
tensor_send_next: Tensor sent to next member
parallel_mode: Parallel group mode used in this communication
Returns:
:class:`torch.Tensor`: The tensor received from the previous.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
buffer_shape = tensor_send_next.size()
ops = []
current_rank = gpc.get_global_rank()
tensor_recv_prev = torch.empty(buffer_shape,
requires_grad=True,
device=get_current_device(),
dtype=tensor_send_next.dtype)
# send to next rank
send_next_op = torch.distributed.P2POp(
torch.distributed.isend, tensor_send_next,
gpc.get_next_global_rank(parallel_mode))
ops.append(send_next_op)
# receive from prev rank
recv_prev_op = torch.distributed.P2POp(
torch.distributed.irecv, tensor_recv_prev,
gpc.get_prev_global_rank(parallel_mode))
ops.append(recv_prev_op)
if current_rank % 2 == 0:
ops = ops[::-1]
reqs = torch.distributed.batch_isend_irecv(ops)
for req in reqs:
req.wait()
# To protect against race condition when using batch_isend_irecv().
synchronize()
return tensor_recv_prev
| [
"colossalai.core.global_context.get_prev_global_rank",
"colossalai.utils.get_current_device",
"colossalai.utils.synchronize",
"colossalai.core.global_context.get_global_rank",
"colossalai.core.global_context.get_next_global_rank"
] | [((1022, 1043), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (1041, 1043), True, 'from colossalai.core import global_context as gpc\n'), ((1746, 1786), 'torch.distributed.batch_isend_irecv', 'torch.distributed.batch_isend_irecv', (['ops'], {}), '(ops)\n', (1781, 1786), False, 'import torch\n'), ((1904, 1917), 'colossalai.utils.synchronize', 'synchronize', ([], {}), '()\n', (1915, 1917), False, 'from colossalai.utils import get_current_device, synchronize\n'), ((1406, 1445), 'colossalai.core.global_context.get_next_global_rank', 'gpc.get_next_global_rank', (['parallel_mode'], {}), '(parallel_mode)\n', (1430, 1445), True, 'from colossalai.core import global_context as gpc\n'), ((1609, 1648), 'colossalai.core.global_context.get_prev_global_rank', 'gpc.get_prev_global_rank', (['parallel_mode'], {}), '(parallel_mode)\n', (1633, 1648), True, 'from colossalai.core import global_context as gpc\n'), ((1191, 1211), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1209, 1211), False, 'from colossalai.utils import get_current_device, synchronize\n')] |
import pytest
from colossalai.utils.model.colo_init_context import ColoInitContext
import torch
from colossalai.utils.cuda import get_current_device
@pytest.mark.skip
# FIXME(ver217): support lazy init
def test_lazy_init():
in_dim = 4
out_dim = 5
with ColoInitContext(lazy_memory_allocate=True) as ctx:
fc = torch.nn.Linear(in_dim, out_dim, bias=True)
# lazy_memory_allocate=True, no payload is maintained
assert fc.weight._torch_tensor.numel() == 0
fc.weight.torch_tensor()
assert fc.weight._torch_tensor.numel() == in_dim * out_dim
@pytest.mark.skip
def test_device():
in_dim = 4
out_dim = 5
with ColoInitContext(lazy_memory_allocate=True, device=get_current_device()) as ctx:
fc = torch.nn.Linear(in_dim, out_dim, bias=True)
# eval an lazy parameter
fc.weight.torch_tensor()
assert fc.weight.device == get_current_device()
if __name__ == '__main__':
test_lazy_init()
test_device()
| [
"colossalai.utils.model.colo_init_context.ColoInitContext",
"colossalai.utils.cuda.get_current_device"
] | [((269, 311), 'colossalai.utils.model.colo_init_context.ColoInitContext', 'ColoInitContext', ([], {'lazy_memory_allocate': '(True)'}), '(lazy_memory_allocate=True)\n', (284, 311), False, 'from colossalai.utils.model.colo_init_context import ColoInitContext\n'), ((333, 376), 'torch.nn.Linear', 'torch.nn.Linear', (['in_dim', 'out_dim'], {'bias': '(True)'}), '(in_dim, out_dim, bias=True)\n', (348, 376), False, 'import torch\n'), ((750, 793), 'torch.nn.Linear', 'torch.nn.Linear', (['in_dim', 'out_dim'], {'bias': '(True)'}), '(in_dim, out_dim, bias=True)\n', (765, 793), False, 'import torch\n'), ((884, 904), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (902, 904), False, 'from colossalai.utils.cuda import get_current_device\n'), ((707, 727), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (725, 727), False, 'from colossalai.utils.cuda import get_current_device\n')] |
import torch
from colossalai.context.parallel_mode import ParallelMode
from colossalai.tensor import ColoTensor
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from colossalai.core import global_context as gpc
from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction
from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk
def run_linear_tp1d_col_test():
device = get_current_device()
dtype = torch.float32
DEPTH = gpc.get_world_size(ParallelMode.PARALLEL_1D)
in_features = 4
out_features = 8
local_rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
layer_master = torch.nn.Linear(in_features, out_features)
layer = torch.nn.Linear(in_features, out_features)
A_shape = (2, in_features)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
A = broadcast_tensor_chunk(A_master, chunk_size=1)
A.requires_grad = True
W_shape = (out_features, in_features)
W_master = torch.randn(W_shape, dtype=dtype, device=device)
W = broadcast_tensor_chunk(W_master, chunk_size=1)
W.requires_grad = True
B_shape = (out_features)
B_master = torch.randn(B_shape, dtype=dtype, device=device)
B = broadcast_tensor_chunk(B_master, chunk_size=1)
B.requires_grad = True
# replace the torch nn.Parameters with ColoTensor
sharded_weight = ColoTensor.init_from_torch_tensor(W)
sharded_bias = ColoTensor.init_from_torch_tensor(B)
parallel_action_list = [
ParallelAction(priority=1, compute_pattern=ComputePattern.TP1DCol_Linear, parallel_mode=ParallelMode.PARALLEL_1D)
]
spec = TensorSpec(parallel_action_list)
sharded_weight.set_spec(spec) # reshard
sharded_bias.set_spec(spec)
replace_parameter_add_grad(layer, sharded_weight, sharded_bias)
out = layer(A)
replace_parameter_add_grad(layer_master, W_master, B_master)
A_master.requires_grad = True
#C_master = torch.matmul(A_master, W_master.transpose(0, 1)) + B_master
C_master = layer_master(A_master)
C = C_master.clone()
check_equal(out, C)
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device())
grad = broadcast_tensor_chunk(grad_master, chunk_size=1)
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
W_grad = W_master.grad
W_grad = torch.chunk(W_grad, DEPTH, dim=0)[local_rank]
check_equal(W_grad, layer.weight.grad)
B_grad = B_master.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[local_rank]
check_equal(B_grad, layer.bias.grad)
def run_linear_tp1d_row_test():
device = get_current_device()
dtype = torch.float32
DEPTH = gpc.get_world_size(ParallelMode.PARALLEL_1D)
in_features = 4
out_features = 5
local_rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
layer_master = torch.nn.Linear(in_features, out_features)
layer = torch.nn.Linear(in_features, out_features)
A_shape = (2, in_features)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
A = broadcast_tensor_chunk(A_master, chunk_size=1)
A.requires_grad = True
W_shape = (out_features, in_features)
W_master = torch.randn(W_shape, dtype=dtype, device=device)
W = broadcast_tensor_chunk(W_master, chunk_size=1)
W.requires_grad = True
B_shape = (out_features)
B_master = torch.randn(B_shape, dtype=dtype, device=device)
B = broadcast_tensor_chunk(B_master, chunk_size=1)
B.requires_grad = True
# replace the torch nn.Parameters with ColoTensor
sharded_weight = ColoTensor.init_from_torch_tensor(W)
parallel_action_list = [
ParallelAction(priority=1, compute_pattern=ComputePattern.TP1DRow_Linear, parallel_mode=ParallelMode.PARALLEL_1D)
]
spec = TensorSpec(parallel_action_list)
sharded_weight.set_spec(spec=spec) # reshard
sharded_bias = ColoTensor.init_from_torch_tensor(B)
replace_parameter_add_grad(layer, sharded_weight, sharded_bias)
out = layer(A)
replace_parameter_add_grad(layer_master, W_master, B_master)
A_master.requires_grad = True
#C_master = torch.matmul(A_master, W_master.transpose(0, 1)) + B_master
C_master = layer_master(A_master)
C = C_master.clone()
check_equal(out, C)
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device())
grad = broadcast_tensor_chunk(grad_master, chunk_size=1)
out.backward(grad)
grad_master = grad_master.clone()
C_master.backward(grad_master)
W_grad = W_master.grad
W_grad = torch.chunk(W_grad, DEPTH, dim=-1)[local_rank]
check_equal(W_grad, layer.weight.grad)
B_grad = B_master.grad
check_equal(B_grad, layer.bias.grad)
def run_dist(rank, world_size, port):
config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),))
colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_linear_tp1d_row_test()
run_linear_tp1d_col_test()
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 4])
@rerun_if_address_is_in_use()
def test_linear_1d(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_linear_1d()
| [
"colossalai.tensor.ParallelAction",
"colossalai.core.global_context.get_local_rank",
"colossalai.launch",
"colossalai.utils.free_port",
"colossalai.utils.cuda.get_current_device",
"colossalai.core.global_context.get_world_size",
"colossalai.tensor.TensorSpec",
"colossalai.tensor.ColoTensor.init_from_t... | [((5300, 5345), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 4]'], {}), "('world_size', [1, 4])\n", (5323, 5345), False, 'import pytest\n'), ((5347, 5375), 'colossalai.testing.rerun_if_address_is_in_use', 'rerun_if_address_is_in_use', ([], {}), '()\n', (5373, 5375), False, 'from colossalai.testing import rerun_if_address_is_in_use\n'), ((627, 647), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (645, 647), False, 'from colossalai.utils.cuda import get_current_device\n'), ((686, 730), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (704, 730), True, 'from colossalai.core import global_context as gpc\n'), ((790, 834), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (808, 834), True, 'from colossalai.core import global_context as gpc\n'), ((855, 897), 'torch.nn.Linear', 'torch.nn.Linear', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (870, 897), False, 'import torch\n'), ((910, 952), 'torch.nn.Linear', 'torch.nn.Linear', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (925, 952), False, 'import torch\n'), ((1000, 1048), 'torch.randn', 'torch.randn', (['A_shape'], {'dtype': 'dtype', 'device': 'device'}), '(A_shape, dtype=dtype, device=device)\n', (1011, 1048), False, 'import torch\n'), ((1057, 1103), '_utils.broadcast_tensor_chunk', 'broadcast_tensor_chunk', (['A_master'], {'chunk_size': '(1)'}), '(A_master, chunk_size=1)\n', (1079, 1103), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((1189, 1237), 'torch.randn', 'torch.randn', (['W_shape'], {'dtype': 'dtype', 'device': 'device'}), '(W_shape, dtype=dtype, device=device)\n', (1200, 1237), False, 'import torch\n'), ((1246, 1292), '_utils.broadcast_tensor_chunk', 'broadcast_tensor_chunk', (['W_master'], {'chunk_size': '(1)'}), '(W_master, chunk_size=1)\n', (1268, 1292), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((1365, 1413), 'torch.randn', 'torch.randn', (['B_shape'], {'dtype': 'dtype', 'device': 'device'}), '(B_shape, dtype=dtype, device=device)\n', (1376, 1413), False, 'import torch\n'), ((1422, 1468), '_utils.broadcast_tensor_chunk', 'broadcast_tensor_chunk', (['B_master'], {'chunk_size': '(1)'}), '(B_master, chunk_size=1)\n', (1444, 1468), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((1572, 1608), 'colossalai.tensor.ColoTensor.init_from_torch_tensor', 'ColoTensor.init_from_torch_tensor', (['W'], {}), '(W)\n', (1605, 1608), False, 'from colossalai.tensor import ColoTensor\n'), ((1628, 1664), 'colossalai.tensor.ColoTensor.init_from_torch_tensor', 'ColoTensor.init_from_torch_tensor', (['B'], {}), '(B)\n', (1661, 1664), False, 'from colossalai.tensor import ColoTensor\n'), ((1833, 1865), 'colossalai.tensor.TensorSpec', 'TensorSpec', (['parallel_action_list'], {}), '(parallel_action_list)\n', (1843, 1865), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction\n'), ((1947, 2010), '_utils.replace_parameter_add_grad', 'replace_parameter_add_grad', (['layer', 'sharded_weight', 'sharded_bias'], {}), '(layer, sharded_weight, sharded_bias)\n', (1973, 2010), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((2035, 2095), '_utils.replace_parameter_add_grad', 'replace_parameter_add_grad', (['layer_master', 'W_master', 'B_master'], {}), '(layer_master, W_master, B_master)\n', (2061, 2095), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((2274, 2293), '_utils.check_equal', 'check_equal', (['out', 'C'], {}), '(out, C)\n', (2285, 2293), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((2422, 2471), '_utils.broadcast_tensor_chunk', 'broadcast_tensor_chunk', (['grad_master'], {'chunk_size': '(1)'}), '(grad_master, chunk_size=1)\n', (2444, 2471), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((2660, 2698), '_utils.check_equal', 'check_equal', (['W_grad', 'layer.weight.grad'], {}), '(W_grad, layer.weight.grad)\n', (2671, 2698), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((2790, 2826), '_utils.check_equal', 'check_equal', (['B_grad', 'layer.bias.grad'], {}), '(B_grad, layer.bias.grad)\n', (2801, 2826), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((2873, 2893), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (2891, 2893), False, 'from colossalai.utils.cuda import get_current_device\n'), ((2932, 2976), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (2950, 2976), True, 'from colossalai.core import global_context as gpc\n'), ((3036, 3080), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (3054, 3080), True, 'from colossalai.core import global_context as gpc\n'), ((3101, 3143), 'torch.nn.Linear', 'torch.nn.Linear', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (3116, 3143), False, 'import torch\n'), ((3156, 3198), 'torch.nn.Linear', 'torch.nn.Linear', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (3171, 3198), False, 'import torch\n'), ((3246, 3294), 'torch.randn', 'torch.randn', (['A_shape'], {'dtype': 'dtype', 'device': 'device'}), '(A_shape, dtype=dtype, device=device)\n', (3257, 3294), False, 'import torch\n'), ((3303, 3349), '_utils.broadcast_tensor_chunk', 'broadcast_tensor_chunk', (['A_master'], {'chunk_size': '(1)'}), '(A_master, chunk_size=1)\n', (3325, 3349), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((3435, 3483), 'torch.randn', 'torch.randn', (['W_shape'], {'dtype': 'dtype', 'device': 'device'}), '(W_shape, dtype=dtype, device=device)\n', (3446, 3483), False, 'import torch\n'), ((3492, 3538), '_utils.broadcast_tensor_chunk', 'broadcast_tensor_chunk', (['W_master'], {'chunk_size': '(1)'}), '(W_master, chunk_size=1)\n', (3514, 3538), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((3611, 3659), 'torch.randn', 'torch.randn', (['B_shape'], {'dtype': 'dtype', 'device': 'device'}), '(B_shape, dtype=dtype, device=device)\n', (3622, 3659), False, 'import torch\n'), ((3668, 3714), '_utils.broadcast_tensor_chunk', 'broadcast_tensor_chunk', (['B_master'], {'chunk_size': '(1)'}), '(B_master, chunk_size=1)\n', (3690, 3714), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((3818, 3854), 'colossalai.tensor.ColoTensor.init_from_torch_tensor', 'ColoTensor.init_from_torch_tensor', (['W'], {}), '(W)\n', (3851, 3854), False, 'from colossalai.tensor import ColoTensor\n'), ((4023, 4055), 'colossalai.tensor.TensorSpec', 'TensorSpec', (['parallel_action_list'], {}), '(parallel_action_list)\n', (4033, 4055), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction\n'), ((4124, 4160), 'colossalai.tensor.ColoTensor.init_from_torch_tensor', 'ColoTensor.init_from_torch_tensor', (['B'], {}), '(B)\n', (4157, 4160), False, 'from colossalai.tensor import ColoTensor\n'), ((4165, 4228), '_utils.replace_parameter_add_grad', 'replace_parameter_add_grad', (['layer', 'sharded_weight', 'sharded_bias'], {}), '(layer, sharded_weight, sharded_bias)\n', (4191, 4228), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((4253, 4313), '_utils.replace_parameter_add_grad', 'replace_parameter_add_grad', (['layer_master', 'W_master', 'B_master'], {}), '(layer_master, W_master, B_master)\n', (4279, 4313), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((4492, 4511), '_utils.check_equal', 'check_equal', (['out', 'C'], {}), '(out, C)\n', (4503, 4511), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((4640, 4689), '_utils.broadcast_tensor_chunk', 'broadcast_tensor_chunk', (['grad_master'], {'chunk_size': '(1)'}), '(grad_master, chunk_size=1)\n', (4662, 4689), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((4879, 4917), '_utils.check_equal', 'check_equal', (['W_grad', 'layer.weight.grad'], {}), '(W_grad, layer.weight.grad)\n', (4890, 4917), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((4950, 4986), '_utils.check_equal', 'check_equal', (['B_grad', 'layer.bias.grad'], {}), '(B_grad, layer.bias.grad)\n', (4961, 4986), False, 'from _utils import check_equal, replace_parameter_add_grad, broadcast_tensor_chunk\n'), ((5106, 5222), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'config', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=config, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (5123, 5222), False, 'import colossalai\n'), ((5486, 5523), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (5494, 5523), True, 'import torch.multiprocessing as mp\n'), ((1702, 1819), 'colossalai.tensor.ParallelAction', 'ParallelAction', ([], {'priority': '(1)', 'compute_pattern': 'ComputePattern.TP1DCol_Linear', 'parallel_mode': 'ParallelMode.PARALLEL_1D'}), '(priority=1, compute_pattern=ComputePattern.TP1DCol_Linear,\n parallel_mode=ParallelMode.PARALLEL_1D)\n', (1716, 1819), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction\n'), ((2610, 2643), 'torch.chunk', 'torch.chunk', (['W_grad', 'DEPTH'], {'dim': '(0)'}), '(W_grad, DEPTH, dim=0)\n', (2621, 2643), False, 'import torch\n'), ((2740, 2773), 'torch.chunk', 'torch.chunk', (['B_grad', 'DEPTH'], {'dim': '(0)'}), '(B_grad, DEPTH, dim=0)\n', (2751, 2773), False, 'import torch\n'), ((3892, 4009), 'colossalai.tensor.ParallelAction', 'ParallelAction', ([], {'priority': '(1)', 'compute_pattern': 'ComputePattern.TP1DRow_Linear', 'parallel_mode': 'ParallelMode.PARALLEL_1D'}), '(priority=1, compute_pattern=ComputePattern.TP1DRow_Linear,\n parallel_mode=ParallelMode.PARALLEL_1D)\n', (3906, 4009), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction\n'), ((4828, 4862), 'torch.chunk', 'torch.chunk', (['W_grad', 'DEPTH'], {'dim': '(-1)'}), '(W_grad, DEPTH, dim=-1)\n', (4839, 4862), False, 'import torch\n'), ((2389, 2409), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (2407, 2409), False, 'from colossalai.utils.cuda import get_current_device\n'), ((4607, 4627), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (4625, 4627), False, 'from colossalai.utils.cuda import get_current_device\n'), ((5469, 5480), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (5478, 5480), False, 'from colossalai.utils import free_port\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from colossalai.context import ParallelContext, MoeContext
global_context = ParallelContext.get_instance()
MOE_CONTEXT = MoeContext.get_instance()
| [
"colossalai.context.MoeContext.get_instance",
"colossalai.context.ParallelContext.get_instance"
] | [((126, 156), 'colossalai.context.ParallelContext.get_instance', 'ParallelContext.get_instance', ([], {}), '()\n', (154, 156), False, 'from colossalai.context import ParallelContext, MoeContext\n'), ((171, 196), 'colossalai.context.MoeContext.get_instance', 'MoeContext.get_instance', ([], {}), '()\n', (194, 196), False, 'from colossalai.context import ParallelContext, MoeContext\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
import os.path as osp
import torch
from tensorboardX import SummaryWriter
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.registry import HOOKS
from colossalai.trainer._trainer import Trainer
from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, \
is_tp_rank_0, is_no_pp_or_last_stage
from ._metric_hook import MetricHook
def _format_number(val):
if isinstance(val, float):
return f'{val:.5f}'
elif torch.is_floating_point(val):
return f'{val.item():.5f}'
return val
class EpochIntervalHook(MetricHook):
def __init__(self, trainer: Trainer, interval: int = 1, priority: int = 1):
super().__init__(trainer, priority)
self._interval = interval
def _is_epoch_to_log(self):
return self.trainer.cur_epoch % self._interval == 0
@HOOKS.register_module
class LogMetricByEpochHook(EpochIntervalHook):
"""Specialized Hook to record the metric to log.
:param trainer: Trainer attached with current hook
:type trainer: Trainer
:param interval: Recording interval
:type interval: int, optional
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type priority: int, optional
"""
def __init__(self, trainer: Trainer, interval: int = 1, priority: int = 1) -> None:
super().__init__(trainer=trainer, interval=interval, priority=priority)
self._is_rank_to_log = is_dp_rank_0() and is_tp_rank_0() and is_no_pp_or_last_stage()
def _get_str(self, mode):
msg = []
for metric_name, metric_calculator in self.trainer.states['metrics'][mode].items():
msg.append(
f'{metric_name} = {_format_number(metric_calculator.get_accumulated_value())}')
msg = ', '.join(msg)
return msg
def after_train_epoch(self):
if self._is_epoch_to_log():
msg = self._get_str(mode='train')
if self._is_rank_to_log:
self.logger.info(
f'Training - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}: {msg}')
def after_test_epoch(self):
if self._is_epoch_to_log():
msg = self._get_str(mode='test')
if self._is_rank_to_log:
self.logger.info(
f'Testing - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}: {msg}')
@HOOKS.register_module
class TensorboardHook(MetricHook):
"""Specialized Hook to record the metric to Tensorboard.
:param trainer: Trainer attached with current hook
:type trainer: Trainer
:param log_dir: Directory of log
:type log_dir: str, optional
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type priority: int, optional
"""
def __init__(self, trainer: Trainer, log_dir: str, priority: int = 1) -> None:
super().__init__(trainer=trainer, priority=priority)
self._is_rank_to_log = is_no_pp_or_last_stage()
if self._is_rank_to_log:
# create workspace on only one rank
if gpc.is_initialized(ParallelMode.GLOBAL):
rank = gpc.get_global_rank()
else:
rank = 0
log_dir = osp.join(log_dir, f'rank_{rank}')
# create workspace
if not osp.exists(log_dir):
os.makedirs(log_dir)
self.writer = SummaryWriter(
log_dir=log_dir, filename_suffix=f'_rank_{rank}')
def after_train_iter(self, *args):
for metric_name, metric_calculator in self.trainer.states['metrics']['train'].items():
if metric_calculator.epoch_only:
continue
val = metric_calculator.get_last_step_value()
if self._is_rank_to_log:
self.writer.add_scalar(
f'{metric_name}/train', val, self.trainer.cur_step)
def after_test_iter(self, *args):
for metric_name, metric_calculator in self.trainer.states['metrics']['test'].items():
if metric_calculator.epoch_only:
continue
val = metric_calculator.get_last_step_value()
if self._is_rank_to_log:
self.writer.add_scalar(f'{metric_name}/test', val,
self.trainer.cur_step)
def after_test_epoch(self):
for metric_name, metric_calculator in self.trainer.states['metrics']['test'].items():
if metric_calculator.epoch_only:
val = metric_calculator.get_accumulated_value()
if self._is_rank_to_log:
self.writer.add_scalar(f'{metric_name}/test', val,
self.trainer.cur_step)
def after_train_epoch(self):
for metric_name, metric_calculator in self.trainer.states['metrics']['train'].items():
if metric_calculator.epoch_only:
val = metric_calculator.get_accumulated_value()
if self._is_rank_to_log:
self.writer.add_scalar(f'{metric_name}/train', val,
self.trainer.cur_step)
@HOOKS.register_module
class LogTimingByEpochHook(EpochIntervalHook):
"""Specialized Hook to write timing record to log.
:param trainer: Trainer attached with current hook
:type trainer: Trainer
:param interval: Recording interval
:type interval: int, optional
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type priority: int, optional
:param log_eval: Whether writes in evaluation
:type log_eval: bool, optional
"""
def __init__(self,
trainer: Trainer,
interval: int = 1,
priority: int = 1,
log_eval: bool = True
) -> None:
super().__init__(trainer=trainer, interval=interval, priority=priority)
set_global_multitimer_status(True)
self._global_timer = get_global_multitimer()
self._log_eval = log_eval
self._is_rank_to_log = is_dp_rank_0() and is_tp_rank_0()
def _get_message(self):
msg = []
for timer_name, timer in self._global_timer:
last_elapsed_time = timer.get_elapsed_time()
if timer.has_history:
history_mean = timer.get_history_mean()
history_sum = timer.get_history_sum()
msg.append(
f'{timer_name}: last elapsed time = {last_elapsed_time}, '
f'history sum = {history_sum}, history mean = {history_mean}')
else:
msg.append(
f'{timer_name}: last elapsed time = {last_elapsed_time}')
msg = ', '.join(msg)
return msg
def after_train_epoch(self):
"""Writes log after finishing a training epoch.
"""
if self._is_epoch_to_log() and self._is_rank_to_log:
msg = self._get_message()
self.logger.info(
f'Training - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}: {msg}')
def after_test_epoch(self):
"""Writes log after finishing a testing epoch.
"""
if self._is_epoch_to_log() and self._is_rank_to_log and self._log_eval:
msg = self._get_message()
self.logger.info(
f'Testing - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}: {msg}')
@HOOKS.register_module
class LogMemoryByEpochHook(EpochIntervalHook):
"""Specialized Hook to write memory usage record to log.
:param trainer: Trainer attached with current hook
:type trainer: Trainer
:param interval: Recording interval
:type interval: int, optional
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type priority: int, optional
:param log_eval: Whether writes in evaluation
:type log_eval: bool, optional
"""
def __init__(self,
trainer: Trainer,
interval: int = 1,
priority: int = 1,
log_eval: bool = True
) -> None:
super().__init__(trainer=trainer, interval=interval, priority=priority)
set_global_multitimer_status(True)
self._global_timer = get_global_multitimer()
self._log_eval = log_eval
self._is_rank_to_log = is_dp_rank_0() and is_tp_rank_0()
def before_train(self):
"""Resets before training.
"""
if self._is_epoch_to_log() and self._is_rank_to_log:
report_memory_usage('before-train')
def after_train_epoch(self):
"""Writes log after finishing a training epoch.
"""
if self._is_epoch_to_log() and self._is_rank_to_log:
report_memory_usage(
f'After Train - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}')
def after_test(self):
"""Reports after testing.
"""
if self._is_epoch_to_log() and self._is_rank_to_log and self._log_eval:
report_memory_usage(
f'After Test - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}')
| [
"colossalai.utils.is_no_pp_or_last_stage",
"colossalai.utils.report_memory_usage",
"colossalai.utils.get_global_multitimer",
"colossalai.utils.is_tp_rank_0",
"colossalai.utils.is_dp_rank_0",
"colossalai.core.global_context.get_global_rank",
"colossalai.core.global_context.is_initialized",
"colossalai.... | [((607, 635), 'torch.is_floating_point', 'torch.is_floating_point', (['val'], {}), '(val)\n', (630, 635), False, 'import torch\n'), ((3134, 3158), 'colossalai.utils.is_no_pp_or_last_stage', 'is_no_pp_or_last_stage', ([], {}), '()\n', (3156, 3158), False, 'from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((6124, 6158), 'colossalai.utils.set_global_multitimer_status', 'set_global_multitimer_status', (['(True)'], {}), '(True)\n', (6152, 6158), False, 'from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((6188, 6211), 'colossalai.utils.get_global_multitimer', 'get_global_multitimer', ([], {}), '()\n', (6209, 6211), False, 'from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((8447, 8481), 'colossalai.utils.set_global_multitimer_status', 'set_global_multitimer_status', (['(True)'], {}), '(True)\n', (8475, 8481), False, 'from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((8511, 8534), 'colossalai.utils.get_global_multitimer', 'get_global_multitimer', ([], {}), '()\n', (8532, 8534), False, 'from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((1599, 1613), 'colossalai.utils.is_dp_rank_0', 'is_dp_rank_0', ([], {}), '()\n', (1611, 1613), False, 'from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((1618, 1632), 'colossalai.utils.is_tp_rank_0', 'is_tp_rank_0', ([], {}), '()\n', (1630, 1632), False, 'from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((1637, 1661), 'colossalai.utils.is_no_pp_or_last_stage', 'is_no_pp_or_last_stage', ([], {}), '()\n', (1659, 1661), False, 'from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((3256, 3295), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.GLOBAL'], {}), '(ParallelMode.GLOBAL)\n', (3274, 3295), True, 'from colossalai.core import global_context as gpc\n'), ((3408, 3441), 'os.path.join', 'osp.join', (['log_dir', 'f"""rank_{rank}"""'], {}), "(log_dir, f'rank_{rank}')\n", (3416, 3441), True, 'import os.path as osp\n'), ((3578, 3641), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'log_dir', 'filename_suffix': 'f"""_rank_{rank}"""'}), "(log_dir=log_dir, filename_suffix=f'_rank_{rank}')\n", (3591, 3641), False, 'from tensorboardX import SummaryWriter\n'), ((6277, 6291), 'colossalai.utils.is_dp_rank_0', 'is_dp_rank_0', ([], {}), '()\n', (6289, 6291), False, 'from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((6296, 6310), 'colossalai.utils.is_tp_rank_0', 'is_tp_rank_0', ([], {}), '()\n', (6308, 6310), False, 'from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((8600, 8614), 'colossalai.utils.is_dp_rank_0', 'is_dp_rank_0', ([], {}), '()\n', (8612, 8614), False, 'from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((8619, 8633), 'colossalai.utils.is_tp_rank_0', 'is_tp_rank_0', ([], {}), '()\n', (8631, 8633), False, 'from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((8783, 8818), 'colossalai.utils.report_memory_usage', 'report_memory_usage', (['"""before-train"""'], {}), "('before-train')\n", (8802, 8818), False, 'from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((8994, 9100), 'colossalai.utils.report_memory_usage', 'report_memory_usage', (['f"""After Train - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}"""'], {}), "(\n f'After Train - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}'\n )\n", (9013, 9100), False, 'from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((9273, 9373), 'colossalai.utils.report_memory_usage', 'report_memory_usage', (['f"""After Test - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}"""'], {}), "(\n f'After Test - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}')\n", (9292, 9373), False, 'from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((3320, 3341), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (3339, 3341), True, 'from colossalai.core import global_context as gpc\n'), ((3493, 3512), 'os.path.exists', 'osp.exists', (['log_dir'], {}), '(log_dir)\n', (3503, 3512), True, 'import os.path as osp\n'), ((3530, 3550), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (3541, 3550), False, 'import os\n')] |
import os
from pathlib import Path
import colossalai
import torch
from colossalai.core import global_context as gpc
from colossalai.logging import get_dist_logger
from colossalai.nn.lr_scheduler import LinearWarmupLR
from colossalai.trainer import Trainer, hooks
from colossalai.utils import get_dataloader
from timm.models import vit_base_patch16_224
from torchvision import datasets, transforms
class Gray2RGB:
"""Convert all images (rgb or grayscale) to rgb.
"""
def __call__(self, img):
"""
Args:
img: Tensor
Returns:
Tensor: RGB image.
"""
if img.size(dim=-3) == 1:
img = img.repeat(3, 1, 1)
return img
def main():
# initialize distributed setting
parser = colossalai.get_default_parser()
args = parser.parse_args()
# launch from torch
colossalai.launch_from_torch(config=args.config)
# launch from slurm batch job
# colossalai.launch_from_slurm(config=args.config,
# host=args.host,
# port=args.port,
# backend=args.backend
# )
# get logger
logger = get_dist_logger()
logger.info("initialized distributed environment", ranks=[0])
# build model
model = vit_base_patch16_224(drop_rate=0.1)
# build dataloader
train_dataset = datasets.Caltech101(
root=Path(os.environ['DATA']),
download=True,
transform=transforms.Compose([
transforms.Resize(256),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
Gray2RGB(),
transforms.Normalize([0.5, 0.5, 0.5],
[0.5, 0.5, 0.5])
]))
train_dataloader = get_dataloader(dataset=train_dataset,
shuffle=True,
batch_size=gpc.config.BATCH_SIZE,
num_workers=1,
pin_memory=True,
)
# build optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, weight_decay=0.1)
# build loss
criterion = torch.nn.CrossEntropyLoss()
# lr_scheduelr
lr_scheduler = LinearWarmupLR(optimizer, warmup_steps=1, total_steps=gpc.config.NUM_EPOCHS)
engine, train_dataloader, _, _ = colossalai.initialize(
model, optimizer, criterion, train_dataloader,
)
logger.info("initialized colossalai components", ranks=[0])
# build trainer
engine.train()
for epoch in range(gpc.config.NUM_EPOCHS):
for img, label in train_dataloader:
img = img.cuda()
label = label.cuda()
engine.zero_grad()
output = engine(img)
loss = engine.criterion(output, label)
engine.backward(loss)
engine.step()
lr_scheduler.step()
logger.info('epoch: {}, loss: {}'.format(epoch, loss.item()))
if __name__ == '__main__':
main()
| [
"colossalai.logging.get_dist_logger",
"colossalai.initialize",
"colossalai.utils.get_dataloader",
"colossalai.launch_from_torch",
"colossalai.nn.lr_scheduler.LinearWarmupLR",
"colossalai.get_default_parser"
] | [((774, 805), 'colossalai.get_default_parser', 'colossalai.get_default_parser', ([], {}), '()\n', (803, 805), False, 'import colossalai\n'), ((866, 914), 'colossalai.launch_from_torch', 'colossalai.launch_from_torch', ([], {'config': 'args.config'}), '(config=args.config)\n', (894, 914), False, 'import colossalai\n'), ((1231, 1248), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (1246, 1248), False, 'from colossalai.logging import get_dist_logger\n'), ((1346, 1381), 'timm.models.vit_base_patch16_224', 'vit_base_patch16_224', ([], {'drop_rate': '(0.1)'}), '(drop_rate=0.1)\n', (1366, 1381), False, 'from timm.models import vit_base_patch16_224\n'), ((1873, 1995), 'colossalai.utils.get_dataloader', 'get_dataloader', ([], {'dataset': 'train_dataset', 'shuffle': '(True)', 'batch_size': 'gpc.config.BATCH_SIZE', 'num_workers': '(1)', 'pin_memory': '(True)'}), '(dataset=train_dataset, shuffle=True, batch_size=gpc.config.\n BATCH_SIZE, num_workers=1, pin_memory=True)\n', (1887, 1995), False, 'from colossalai.utils import get_dataloader\n'), ((2319, 2346), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2344, 2346), False, 'import torch\n'), ((2386, 2462), 'colossalai.nn.lr_scheduler.LinearWarmupLR', 'LinearWarmupLR', (['optimizer'], {'warmup_steps': '(1)', 'total_steps': 'gpc.config.NUM_EPOCHS'}), '(optimizer, warmup_steps=1, total_steps=gpc.config.NUM_EPOCHS)\n', (2400, 2462), False, 'from colossalai.nn.lr_scheduler import LinearWarmupLR\n'), ((2501, 2569), 'colossalai.initialize', 'colossalai.initialize', (['model', 'optimizer', 'criterion', 'train_dataloader'], {}), '(model, optimizer, criterion, train_dataloader)\n', (2522, 2569), False, 'import colossalai\n'), ((1460, 1484), 'pathlib.Path', 'Path', (["os.environ['DATA']"], {}), "(os.environ['DATA'])\n", (1464, 1484), False, 'from pathlib import Path\n'), ((1560, 1582), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (1577, 1582), False, 'from torchvision import datasets, transforms\n'), ((1596, 1629), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (1624, 1629), False, 'from torchvision import datasets, transforms\n'), ((1643, 1676), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1674, 1676), False, 'from torchvision import datasets, transforms\n'), ((1690, 1711), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1709, 1711), False, 'from torchvision import datasets, transforms\n'), ((1749, 1803), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.5, 0.5, 0.5]', '[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n', (1769, 1803), False, 'from torchvision import datasets, transforms\n')] |
import math
from typing import Callable
import torch
from colossalai import nn as col_nn
from colossalai.builder.pipeline import partition_uniform
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.logging import get_dist_logger
from colossalai.nn.layer.utils import CheckpointModule, divide
from colossalai.nn.layer.wrapper import PipelineSharedModuleWrapper
from colossalai.registry import LAYERS, LOSSES, MODELS
from colossalai.utils import get_current_device
from torch import dtype, nn
__all__ = [
'GPT', 'GPTLMLoss', 'gpt2_small', 'gpt2_medium', 'gpt2_large', 'gpt2_xl', 'gpt2_8B', 'gpt2_xl_pipeline',
'gpt2_8B_pipeline', 'gpt3', 'gpt3_pipeline'
]
@LAYERS.register_module
class GPTEmbedding(nn.Module):
def __init__(self,
embedding_dim: int,
vocab_size: int,
max_position_embeddings: int,
num_tokentypes: int = 0,
padding_idx: int = None,
dropout: float = 0.,
dtype: dtype = None) -> None:
super().__init__()
self.word_embeddings = col_nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx, dtype=dtype)
self.position_embeddings = col_nn.Embedding(max_position_embeddings, embedding_dim, dtype=dtype)
if num_tokentypes > 0:
self.tokentype_embeddings = col_nn.Embedding(num_tokentypes, embedding_dim, dtype=dtype)
else:
self.tokentype_embeddings = None
self.dropout = col_nn.Dropout(dropout)
@property
def word_embedding_weight(self):
return self.word_embeddings.weight
def forward(self, input_ids, position_ids=None, tokentype_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=get_current_device()).unsqueeze(0)
x = self.word_embeddings(input_ids) + self.position_embeddings(position_ids)
if self.tokentype_embeddings is not None and tokentype_ids is not None:
x = x + self.tokentype_embeddings(tokentype_ids)
x = self.dropout(x)
return x
@LAYERS.register_module
class GPTSelfAttention(nn.Module):
def __init__(self,
dim: int,
num_heads: int,
attention_dropout: float,
dropout: float,
bias: bool = True,
fuse_scale_mask_softmax: bool = False,
dtype: dtype = None) -> None:
super().__init__()
self.fuse_scale_mask_softmax = fuse_scale_mask_softmax
self.attention_head_size = divide(dim, num_heads)
self.query_key_value = col_nn.Linear(dim, 3 * dim, dtype=dtype, bias=bias)
if fuse_scale_mask_softmax:
from colossalai.kernel import FusedScaleMaskSoftmax
from colossalai.kernel.cuda_native.scaled_softmax import \
AttnMaskType
self.softmax = FusedScaleMaskSoftmax(input_in_fp16=True,
input_in_bf16=False,
attn_mask_type=AttnMaskType.causal,
scaled_masked_softmax_fusion=True,
mask_func=None,
softmax_in_fp32=True,
scale=math.sqrt(self.attention_head_size))
else:
self.softmax = nn.Softmax(dim=-1)
self.attention_dropout = col_nn.Dropout(attention_dropout)
self.dense = col_nn.Linear(dim, dim, dtype=dtype, bias=True)
self.dropout = col_nn.Dropout(dropout)
def forward(self, x, attention_mask=None):
qkv = self.query_key_value(x)
all_head_size = qkv.shape[-1] // 3
num_attention_heads = divide(all_head_size, self.attention_head_size)
new_qkv_shape = qkv.shape[:-1] + \
(num_attention_heads, 3 * self.attention_head_size)
qkv = qkv.view(new_qkv_shape)
qkv = qkv.permute((0, 2, 1, 3))
q, k, v = torch.chunk(qkv, 3, dim=-1)
x = torch.matmul(q, k.transpose(-1, -2))
if self.fuse_scale_mask_softmax:
x = self.softmax(x, attention_mask)
else:
x = x / math.sqrt(self.attention_head_size)
# causal mask
q_len, k_len = q.size(-2), k.size(-2)
causal_mask = torch.tril(torch.ones((q_len, k_len), dtype=torch.uint8,
device=get_current_device())).view(1, 1, q_len, k_len).bool()
x = torch.where(causal_mask, x, torch.tensor(-1e4, dtype=x.dtype, device=get_current_device()))
if attention_mask is not None:
x = x + attention_mask
x = self.softmax(x)
x = self.attention_dropout(x)
x = torch.matmul(x, v)
x = x.transpose(1, 2)
new_context_layer_shape = x.size()[:-2] + (all_head_size,)
x = x.reshape(new_context_layer_shape)
x = self.dense(x)
x = self.dropout(x)
return x
@LAYERS.register_module
class GPTMLP(nn.Module):
def __init__(self,
dim: int,
mlp_ratio: float,
activation: Callable,
dropout: float,
dtype: dtype = None,
bias: bool = True):
super().__init__()
intermediate_dim = int(dim * mlp_ratio)
self.dense_1 = col_nn.Linear(dim, intermediate_dim, dtype=dtype, bias=bias)
self.activation = activation
self.dense_2 = col_nn.Linear(intermediate_dim, dim, dtype=dtype, bias=bias)
self.dropout = col_nn.Dropout(dropout)
def forward(self, x):
x = self.dense_1(x)
x = self.activation(x)
x = self.dense_2(x)
x = self.dropout(x)
return x
@LAYERS.register_module
class GPTBlock(CheckpointModule):
def __init__(self,
dim: int,
num_heads: int,
mlp_ratio: float,
activation: Callable,
attention_dropout: float = 0.,
dropout: float = 0.,
layernorm_epsilon: float = 1e-5,
dtype: dtype = None,
bias: bool = True,
apply_post_layernorm: bool = False,
fuse_scale_mask_softmax: bool = False,
checkpoint: bool = False,
activation_offload: bool = False):
super().__init__(checkpoint, activation_offload)
self.apply_post_layernorm = apply_post_layernorm
self.norm1 = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype)
self.attn = GPTSelfAttention(dim=dim,
num_heads=num_heads,
attention_dropout=attention_dropout,
dropout=dropout,
bias=bias,
fuse_scale_mask_softmax=fuse_scale_mask_softmax,
dtype=dtype)
self.norm2 = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype)
self.mlp = GPTMLP(dim=dim, mlp_ratio=mlp_ratio, activation=activation, dropout=dropout, dtype=dtype, bias=bias)
def _forward(self, x, attention_mask=None):
if not self.apply_post_layernorm:
residual = x
x = self.norm1(x)
if self.apply_post_layernorm:
residual = x
x = residual + self.attn(x, attention_mask)
if not self.apply_post_layernorm:
residual = x
x = self.norm2(x)
if self.apply_post_layernorm:
residual = x
x = residual + self.mlp(x)
return x, attention_mask
@LAYERS.register_module
class GPTLMHead(nn.Module):
def __init__(self,
dim: int,
vocab_size: int,
word_embeeding_weight: nn.Parameter = None,
bias: bool = False,
dtype: dtype = None) -> None:
super().__init__()
self.dense = col_nn.Classifier(dim, vocab_size, word_embeeding_weight, bias=bias, dtype=dtype)
@property
def weight(self):
return self.dense.weight
def forward(self, x):
x = self.dense(x)
return x
@LOSSES.register_module
class GPTLMLoss(nn.Module):
def __init__(self):
super().__init__()
self.loss = col_nn.CrossEntropyLoss()
def forward(self, logits, labels):
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
return self.loss(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
@MODELS.register_module
class GPT(nn.Module):
def __init__(self,
vocab_size: int = 50304,
max_position_embeddings: int = 1024,
dim: int = 768,
num_heads: int = 12,
depth: int = 12,
mlp_ratio: float = 4.0,
dropout: float = 0.1,
embedding_dropout: float = 0.1,
attention_dropout: float = 0.1,
layernorm_epsilon: float = 1e-5,
activation: Callable = nn.functional.gelu,
padding_idx: int = None,
dtype: dtype = None,
bias: bool = True,
apply_post_layernorm: bool = False,
fuse_scale_mask_softmax: bool = False,
checkpoint: bool = False,
activation_offload: bool = False) -> None:
super().__init__()
self.embed = GPTEmbedding(embedding_dim=dim,
vocab_size=vocab_size,
max_position_embeddings=max_position_embeddings,
padding_idx=padding_idx,
dropout=embedding_dropout,
dtype=dtype)
self.blocks = nn.ModuleList([
GPTBlock(
dim=dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
activation=activation,
attention_dropout=attention_dropout,
dropout=dropout,
layernorm_epsilon=layernorm_epsilon,
dtype=dtype,
bias=bias,
apply_post_layernorm=apply_post_layernorm,
fuse_scale_mask_softmax=fuse_scale_mask_softmax,
checkpoint=checkpoint,
activation_offload=activation_offload
) for _ in range(depth)
])
self.norm = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype)
self.head = GPTLMHead(dim=dim,
vocab_size=vocab_size,
word_embeeding_weight=self.embed.word_embedding_weight,
dtype=dtype)
def forward(self, input_ids, attention_mask=None):
x = self.embed(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# Adapted from huggingface
if attention_mask is not None:
batch_size = input_ids.shape[0]
attention_mask = attention_mask.view(batch_size, -1)
attention_mask = col_nn.partition_batch(attention_mask)
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = attention_mask.to(dtype=x.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
for block in self.blocks:
x, attention_mask = block(x, attention_mask)
x = self.head(self.norm(x))
return x
class PipelineGPT(nn.Module):
def __init__(self,
vocab_size: int = 50304,
max_position_embeddings: int = 1024,
dim: int = 768,
num_heads: int = 12,
depth: int = 12,
mlp_ratio: float = 4.0,
dropout: float = 0.1,
embedding_dropout: float = 0.1,
attention_dropout: float = 0.1,
layernorm_epsilon: float = 1e-5,
activation: Callable = nn.functional.gelu,
padding_idx: int = None,
dtype: dtype = None,
bias: bool = True,
apply_post_layernorm: bool = False,
fuse_scale_mask_softmax: bool = False,
checkpoint: bool = False,
first: bool = False,
last: bool = False):
super().__init__()
self.checkpoint = checkpoint
self.first = first
self.last = last
if first:
self.embed = GPTEmbedding(embedding_dim=dim,
vocab_size=vocab_size,
max_position_embeddings=max_position_embeddings,
padding_idx=padding_idx,
dropout=embedding_dropout,
dtype=dtype)
self.blocks = nn.ModuleList([
GPTBlock(
dim=dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
activation=activation,
attention_dropout=attention_dropout,
dropout=dropout,
layernorm_epsilon=layernorm_epsilon,
dtype=dtype,
bias=bias,
apply_post_layernorm=apply_post_layernorm,
fuse_scale_mask_softmax=fuse_scale_mask_softmax,
checkpoint=checkpoint,
) for _ in range(depth)
])
if self.last:
self.norm = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype)
self.head = GPTLMHead(dim=dim, vocab_size=vocab_size, dtype=dtype)
def forward(self, x=None, input_ids=None, attention_mask=None):
if self.first:
x = self.embed(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# Adapted from huggingface
if attention_mask is not None:
if self.first:
batch_size = input_ids.shape[0]
else:
batch_size = x.shape[0]
attention_mask = attention_mask.view(batch_size, -1)
attention_mask = col_nn.partition_batch(attention_mask)
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = attention_mask.to(dtype=x.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
for block in self.blocks:
x, attention_mask = block(x, attention_mask)
if self.last:
x = self.head(self.norm(x))
return x
def _create_gpt_model(**model_kwargs):
model = GPT(**model_kwargs)
return model
def _create_gpt_pipeline_model(depth=48, num_chunks=1, layer_partitions=None, **model_kwargs):
logger = get_dist_logger()
pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE)
pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
rank = gpc.get_global_rank()
wrapper = PipelineSharedModuleWrapper([0, pipeline_size - 1])
parts = partition_uniform(depth, pipeline_size,
num_chunks)[pipeline_rank] if layer_partitions is None else layer_partitions
models = []
for start, end in parts:
model_kwargs['first'] = start == 0
model_kwargs['last'] = end == depth
model_kwargs['depth'] = end - start
chunk = PipelineGPT(**model_kwargs).to(get_current_device())
if start == 0:
wrapper.register_parameter(chunk.embed.word_embedding_weight)
elif end == depth:
wrapper.register_parameter(chunk.head.weight)
models.append(chunk)
logger.info(f'==> Rank {rank} built layer {start}-{end} / total {depth}')
if len(models) == 1:
model = models[0]
else:
model = nn.ModuleList(models)
return model
@MODELS.register_module
def gpt2_small(**kwargs):
model_kwargs = dict(dim=768, depth=12, num_heads=12, **kwargs)
return _create_gpt_model(**model_kwargs)
@MODELS.register_module
def gpt2_medium(**kwargs):
model_kwargs = dict(dim=1024, depth=24, num_heads=8, **kwargs)
return _create_gpt_model(**model_kwargs)
@MODELS.register_module
def gpt2_large(**kwargs):
model_kwargs = dict(dim=1536, depth=36, num_heads=12, **kwargs)
return _create_gpt_model(**model_kwargs)
@MODELS.register_module
def gpt2_xl(**kwargs):
model_kwargs = dict(dim=1600, depth=48, num_heads=16, **kwargs)
return _create_gpt_model(**model_kwargs)
@MODELS.register_module
def gpt2_8B(**kwargs):
model_kwargs = dict(dim=3072, depth=72, num_heads=24, **kwargs)
return _create_gpt_model(**model_kwargs)
@MODELS.register_module
def gpt2_xl_pipeline(**kwargs):
model_kwargs = dict(dim=1600, depth=48, num_heads=20, **kwargs)
return _create_gpt_pipeline_model(**model_kwargs)
@MODELS.register_module
def gpt2_8B_pipeline(**kwargs):
model_kwargs = dict(dim=3072, depth=72, num_heads=24, **kwargs)
return _create_gpt_pipeline_model(**model_kwargs)
@MODELS.register_module
def gpt3(**kwargs):
model_kwargs = dict(dim=12288, depth=96, num_heads=96, **kwargs)
return _create_gpt_model(**model_kwargs)
@MODELS.register_module
def gpt3_pipeline(**kwargs):
model_kwargs = dict(dim=12288, depth=96, num_heads=96, **kwargs)
return _create_gpt_pipeline_model(**model_kwargs)
| [
"colossalai.utils.get_current_device",
"colossalai.nn.Linear",
"colossalai.core.global_context.get_local_rank",
"colossalai.logging.get_dist_logger",
"colossalai.nn.Classifier",
"colossalai.core.global_context.get_world_size",
"colossalai.nn.LayerNorm",
"colossalai.builder.pipeline.partition_uniform",... | [((15943, 15960), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (15958, 15960), False, 'from colossalai.logging import get_dist_logger\n'), ((15982, 16023), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (16000, 16023), True, 'from colossalai.core import global_context as gpc\n'), ((16045, 16086), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (16063, 16086), True, 'from colossalai.core import global_context as gpc\n'), ((16099, 16120), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (16118, 16120), True, 'from colossalai.core import global_context as gpc\n'), ((16136, 16187), 'colossalai.nn.layer.wrapper.PipelineSharedModuleWrapper', 'PipelineSharedModuleWrapper', (['[0, pipeline_size - 1]'], {}), '([0, pipeline_size - 1])\n', (16163, 16187), False, 'from colossalai.nn.layer.wrapper import PipelineSharedModuleWrapper\n'), ((1182, 1268), 'colossalai.nn.Embedding', 'col_nn.Embedding', (['vocab_size', 'embedding_dim'], {'padding_idx': 'padding_idx', 'dtype': 'dtype'}), '(vocab_size, embedding_dim, padding_idx=padding_idx, dtype=\n dtype)\n', (1198, 1268), True, 'from colossalai import nn as col_nn\n'), ((1300, 1369), 'colossalai.nn.Embedding', 'col_nn.Embedding', (['max_position_embeddings', 'embedding_dim'], {'dtype': 'dtype'}), '(max_position_embeddings, embedding_dim, dtype=dtype)\n', (1316, 1369), True, 'from colossalai import nn as col_nn\n'), ((1589, 1612), 'colossalai.nn.Dropout', 'col_nn.Dropout', (['dropout'], {}), '(dropout)\n', (1603, 1612), True, 'from colossalai import nn as col_nn\n'), ((2754, 2776), 'colossalai.nn.layer.utils.divide', 'divide', (['dim', 'num_heads'], {}), '(dim, num_heads)\n', (2760, 2776), False, 'from colossalai.nn.layer.utils import CheckpointModule, divide\n'), ((2809, 2860), 'colossalai.nn.Linear', 'col_nn.Linear', (['dim', '(3 * dim)'], {'dtype': 'dtype', 'bias': 'bias'}), '(dim, 3 * dim, dtype=dtype, bias=bias)\n', (2822, 2860), True, 'from colossalai import nn as col_nn\n'), ((3704, 3737), 'colossalai.nn.Dropout', 'col_nn.Dropout', (['attention_dropout'], {}), '(attention_dropout)\n', (3718, 3737), True, 'from colossalai import nn as col_nn\n'), ((3760, 3807), 'colossalai.nn.Linear', 'col_nn.Linear', (['dim', 'dim'], {'dtype': 'dtype', 'bias': '(True)'}), '(dim, dim, dtype=dtype, bias=True)\n', (3773, 3807), True, 'from colossalai import nn as col_nn\n'), ((3832, 3855), 'colossalai.nn.Dropout', 'col_nn.Dropout', (['dropout'], {}), '(dropout)\n', (3846, 3855), True, 'from colossalai import nn as col_nn\n'), ((4020, 4067), 'colossalai.nn.layer.utils.divide', 'divide', (['all_head_size', 'self.attention_head_size'], {}), '(all_head_size, self.attention_head_size)\n', (4026, 4067), False, 'from colossalai.nn.layer.utils import CheckpointModule, divide\n'), ((4276, 4303), 'torch.chunk', 'torch.chunk', (['qkv', '(3)'], {'dim': '(-1)'}), '(qkv, 3, dim=-1)\n', (4287, 4303), False, 'import torch\n'), ((5076, 5094), 'torch.matmul', 'torch.matmul', (['x', 'v'], {}), '(x, v)\n', (5088, 5094), False, 'import torch\n'), ((5717, 5777), 'colossalai.nn.Linear', 'col_nn.Linear', (['dim', 'intermediate_dim'], {'dtype': 'dtype', 'bias': 'bias'}), '(dim, intermediate_dim, dtype=dtype, bias=bias)\n', (5730, 5777), True, 'from colossalai import nn as col_nn\n'), ((5840, 5900), 'colossalai.nn.Linear', 'col_nn.Linear', (['intermediate_dim', 'dim'], {'dtype': 'dtype', 'bias': 'bias'}), '(intermediate_dim, dim, dtype=dtype, bias=bias)\n', (5853, 5900), True, 'from colossalai import nn as col_nn\n'), ((5925, 5948), 'colossalai.nn.Dropout', 'col_nn.Dropout', (['dropout'], {}), '(dropout)\n', (5939, 5948), True, 'from colossalai import nn as col_nn\n'), ((6904, 6978), 'colossalai.nn.LayerNorm', 'col_nn.LayerNorm', ([], {'normalized_shape': 'dim', 'eps': 'layernorm_epsilon', 'dtype': 'dtype'}), '(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype)\n', (6920, 6978), True, 'from colossalai import nn as col_nn\n'), ((7424, 7498), 'colossalai.nn.LayerNorm', 'col_nn.LayerNorm', ([], {'normalized_shape': 'dim', 'eps': 'layernorm_epsilon', 'dtype': 'dtype'}), '(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype)\n', (7440, 7498), True, 'from colossalai import nn as col_nn\n'), ((8465, 8551), 'colossalai.nn.Classifier', 'col_nn.Classifier', (['dim', 'vocab_size', 'word_embeeding_weight'], {'bias': 'bias', 'dtype': 'dtype'}), '(dim, vocab_size, word_embeeding_weight, bias=bias, dtype=\n dtype)\n', (8482, 8551), True, 'from colossalai import nn as col_nn\n'), ((8829, 8854), 'colossalai.nn.CrossEntropyLoss', 'col_nn.CrossEntropyLoss', ([], {}), '()\n', (8852, 8854), True, 'from colossalai import nn as col_nn\n'), ((11136, 11210), 'colossalai.nn.LayerNorm', 'col_nn.LayerNorm', ([], {'normalized_shape': 'dim', 'eps': 'layernorm_epsilon', 'dtype': 'dtype'}), '(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype)\n', (11152, 11210), True, 'from colossalai import nn as col_nn\n'), ((16980, 17001), 'torch.nn.ModuleList', 'nn.ModuleList', (['models'], {}), '(models)\n', (16993, 17001), False, 'from torch import dtype, nn\n'), ((1443, 1503), 'colossalai.nn.Embedding', 'col_nn.Embedding', (['num_tokentypes', 'embedding_dim'], {'dtype': 'dtype'}), '(num_tokentypes, embedding_dim, dtype=dtype)\n', (1459, 1503), True, 'from colossalai import nn as col_nn\n'), ((3651, 3669), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (3661, 3669), False, 'from torch import dtype, nn\n'), ((11959, 11997), 'colossalai.nn.partition_batch', 'col_nn.partition_batch', (['attention_mask'], {}), '(attention_mask)\n', (11981, 11997), True, 'from colossalai import nn as col_nn\n'), ((14483, 14557), 'colossalai.nn.LayerNorm', 'col_nn.LayerNorm', ([], {'normalized_shape': 'dim', 'eps': 'layernorm_epsilon', 'dtype': 'dtype'}), '(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype)\n', (14499, 14557), True, 'from colossalai import nn as col_nn\n'), ((15292, 15330), 'colossalai.nn.partition_batch', 'col_nn.partition_batch', (['attention_mask'], {}), '(attention_mask)\n', (15314, 15330), True, 'from colossalai import nn as col_nn\n'), ((16201, 16252), 'colossalai.builder.pipeline.partition_uniform', 'partition_uniform', (['depth', 'pipeline_size', 'num_chunks'], {}), '(depth, pipeline_size, num_chunks)\n', (16218, 16252), False, 'from colossalai.builder.pipeline import partition_uniform\n'), ((16578, 16598), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (16596, 16598), False, 'from colossalai.utils import get_current_device\n'), ((4485, 4520), 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), '(self.attention_head_size)\n', (4494, 4520), False, 'import math\n'), ((3571, 3606), 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), '(self.attention_head_size)\n', (3580, 3606), False, 'import math\n'), ((4880, 4900), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (4898, 4900), False, 'from colossalai.utils import get_current_device\n'), ((1940, 1960), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1958, 1960), False, 'from colossalai.utils import get_current_device\n'), ((4739, 4759), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (4757, 4759), False, 'from colossalai.utils import get_current_device\n')] |
from colossalai.context.parallel_mode import ParallelMode
import torch
from colossalai.engine.ophooks import BaseOpHook
from colossalai.registry import OPHOOKS
from colossalai.logging import get_dist_logger
from colossalai.core import global_context as gpc
from colossalai.utils.memory_tracer import AsyncMemoryMonitor
import math
@OPHOOKS.register_module
class MemTracerOpHook(BaseOpHook):
"""
Collect GPU memory usage information
:param warmup: This parameter indicates how many iterations to truncate before profiling, defaults to 50
:type warmup: int
:param refreshrate: This parameter decides the frequency of write file, defaults to 10
:type refreshrate: int
:param data_prefix: The prefix of the stats data file, defaults to "memstats"
:type data_prefix: string
"""
def __init__(self, warmup: int = 50, refreshrate: int = 10, data_prefix: str = "memstats"):
super().__init__()
self.async_mem_monitor = AsyncMemoryMonitor()
self._curiter = 0
self._logger = get_dist_logger()
self._count = 0
self._warmup = warmup
self._refreshrate = refreshrate
self._data_prefix = data_prefix
# in distributed environment
if gpc.is_initialized(ParallelMode.GLOBAL):
self._rank = gpc.get_global_rank()
else:
self._rank = 0
def _isvalid(self, module) -> bool:
assert isinstance(module, torch.nn.Module)
return module.training
def _resample(self):
# calculate the average iteration time
total_time = (self.async_mem_monitor.time_stamps[-1] - self.async_mem_monitor.time_stamps[0])
avg_it_time = total_time / self.warmup
self._logger.debug(f"total time for {self.warmup} iterations is {total_time}s")
# adjust the sampling power
power: int = round(-math.log(avg_it_time, 10)) + 1
self._logger.debug(f"the power is {power}")
self.async_mem_monitor.set_interval(power)
@property
def refreshrate(self) -> int:
return self._refreshrate
@property
def warmup(self) -> int:
return self._warmup
@property
def curiter(self) -> int:
return self._curiter
@property
def valid_iter(self) -> int:
return self.curiter - self.warmup
def pre_fwd_exec(self, module: torch.nn.Module, *args):
if self._isvalid(module):
self.async_mem_monitor.finish()
self.async_mem_monitor.start()
def post_fwd_exec(self, module: torch.nn.Module, *args):
if self._isvalid(module):
self.async_mem_monitor.finish()
def pre_bwd_exec(self, module: torch.nn.Module, input, output):
if self._isvalid(module):
self.async_mem_monitor.finish()
self.async_mem_monitor.start()
def post_bwd_exec(self, module: torch.nn.Module, input):
if self._isvalid(module):
self.async_mem_monitor.finish()
def pre_iter(self):
pass
def post_iter(self):
self.async_mem_monitor.finish()
# in the warmup stage
if self.curiter < self.warmup:
pass
# adjust the sampling rate
elif self.curiter == self.warmup:
# use adaptive sample rate
self._resample()
# record data to log file
else:
# every `refreshrate` times, refresh the file
if self.valid_iter != 0 and self.valid_iter % self.refreshrate == 0:
# output file info
self._logger.info(f"dump a memory statistics as pickle to {self._data_prefix}-{self._rank}.pkl")
self.save_results()
self._count += 1
self._logger.debug(f"data file has been refreshed {self._count} times")
# finish a iteration
self._curiter += 1
def save_results(self):
datafile = f"{self._data_prefix}-{self._rank}.pkl"
self.async_mem_monitor.save(datafile) | [
"colossalai.logging.get_dist_logger",
"colossalai.utils.memory_tracer.AsyncMemoryMonitor",
"colossalai.core.global_context.get_global_rank",
"colossalai.core.global_context.is_initialized"
] | [((970, 990), 'colossalai.utils.memory_tracer.AsyncMemoryMonitor', 'AsyncMemoryMonitor', ([], {}), '()\n', (988, 990), False, 'from colossalai.utils.memory_tracer import AsyncMemoryMonitor\n'), ((1040, 1057), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (1055, 1057), False, 'from colossalai.logging import get_dist_logger\n'), ((1240, 1279), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.GLOBAL'], {}), '(ParallelMode.GLOBAL)\n', (1258, 1279), True, 'from colossalai.core import global_context as gpc\n'), ((1306, 1327), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (1325, 1327), True, 'from colossalai.core import global_context as gpc\n'), ((1866, 1891), 'math.log', 'math.log', (['avg_it_time', '(10)'], {}), '(avg_it_time, 10)\n', (1874, 1891), False, 'import math\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import colossalai
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.core import global_context as gpc
from colossalai.testing import rerun_on_exception
from colossalai.utils import free_port
from colossalai.zero.init_ctx import ZeroInitContext
from colossalai.zero.sharded_model.utils import col_model_deepcopy
from colossalai.zero.sharded_optim._utils import has_inf_or_nan
from tests.components_to_test.registry import non_distributed_component_funcs
from torch.nn.parallel import DistributedDataParallel as DDP
from common import (MP_PARALLEL_CONFIG, ZERO_PARALLEL_CONFIG, check_params, check_sharded_model_params)
def run_dist(rank, world_size, port, parallel_config):
colossalai.launch(config=parallel_config,
rank=rank,
world_size=world_size,
host='localhost',
port=port,
backend='nccl')
test_models = ['repeated_computed_layers', 'resnet18', 'bert']
for model_name in test_models:
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, _, optimizer_class, criterion = get_components_func()
with ZeroInitContext(target_device=torch.cuda.current_device(),
shard_strategy=gpc.config.zero.model_config.shard_strategy,
shard_param=True):
colo_model = model_builder(checkpoint=True)
colo_optimizer = optimizer_class(colo_model.parameters(), lr=1e-3)
engine, train_dataloader, _, _ = colossalai.initialize(colo_model,
optimizer=colo_optimizer,
criterion=criterion,
train_dataloader=train_dataloader)
torch_model = model_builder(checkpoint=True).half()
col_model_deepcopy(engine.model, torch_model)
torch_model = torch_model.cuda().float()
engine.train()
torch_optimizer = optimizer_class(torch_model.parameters(), lr=1e-3)
if dist.get_world_size() > 1:
torch_model = DDP(torch_model)
i = 0
for data, label in train_dataloader:
if i > 4:
break
data, label = data.cuda(), label.cuda()
engine.zero_grad()
torch_optimizer.zero_grad()
if criterion:
output = engine(data)
loss = engine.criterion(output, label)
torch_output = torch_model(data)
torch_loss = engine.criterion(torch_output, label)
else:
loss = engine(data, label)
torch_loss = torch_model(data, label)
engine.backward(loss)
engine.step()
torch_loss.backward()
for param in torch_model.parameters():
if param.grad is not None:
assert not has_inf_or_nan(param.grad)
torch_optimizer.step()
i += 1
if parallel_config == MP_PARALLEL_CONFIG:
check_params(torch_model, colo_model, loose=True)
elif parallel_config == ZERO_PARALLEL_CONFIG:
check_sharded_model_params(torch_model, colo_model, loose=True)
# FIXME: enable this test in next PR
@pytest.mark.skip
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [2, 4])
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_mp_engine(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port(), parallel_config=MP_PARALLEL_CONFIG)
mp.spawn(run_func, nprocs=world_size)
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_zero_engine(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port(), parallel_config=ZERO_PARALLEL_CONFIG)
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_zero_engine(world_size=4)
| [
"colossalai.zero.sharded_optim._utils.has_inf_or_nan",
"colossalai.launch",
"colossalai.initialize",
"colossalai.utils.free_port",
"colossalai.testing.rerun_on_exception",
"colossalai.zero.sharded_model.utils.col_model_deepcopy"
] | [((3576, 3621), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[2, 4]'], {}), "('world_size', [2, 4])\n", (3599, 3621), False, 'import pytest\n'), ((3623, 3726), 'colossalai.testing.rerun_on_exception', 'rerun_on_exception', ([], {'exception_type': 'mp.ProcessRaisedException', 'pattern': '""".*Address already in use.*"""'}), "(exception_type=mp.ProcessRaisedException, pattern=\n '.*Address already in use.*')\n", (3641, 3726), False, 'from colossalai.testing import rerun_on_exception\n'), ((3927, 3972), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 2]'], {}), "('world_size', [1, 2])\n", (3950, 3972), False, 'import pytest\n'), ((3974, 4077), 'colossalai.testing.rerun_on_exception', 'rerun_on_exception', ([], {'exception_type': 'mp.ProcessRaisedException', 'pattern': '""".*Address already in use.*"""'}), "(exception_type=mp.ProcessRaisedException, pattern=\n '.*Address already in use.*')\n", (3992, 4077), False, 'from colossalai.testing import rerun_on_exception\n'), ((821, 945), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'parallel_config', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=parallel_config, rank=rank, world_size=world_size,\n host='localhost', port=port, backend='nccl')\n", (838, 945), False, 'import colossalai\n'), ((3868, 3905), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (3876, 3905), True, 'import torch.multiprocessing as mp\n'), ((4223, 4260), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (4231, 4260), True, 'import torch.multiprocessing as mp\n'), ((1185, 1241), 'tests.components_to_test.registry.non_distributed_component_funcs.get_callable', 'non_distributed_component_funcs.get_callable', (['model_name'], {}), '(model_name)\n', (1229, 1241), False, 'from tests.components_to_test.registry import non_distributed_component_funcs\n'), ((1719, 1839), 'colossalai.initialize', 'colossalai.initialize', (['colo_model'], {'optimizer': 'colo_optimizer', 'criterion': 'criterion', 'train_dataloader': 'train_dataloader'}), '(colo_model, optimizer=colo_optimizer, criterion=\n criterion, train_dataloader=train_dataloader)\n', (1740, 1839), False, 'import colossalai\n'), ((2092, 2137), 'colossalai.zero.sharded_model.utils.col_model_deepcopy', 'col_model_deepcopy', (['engine.model', 'torch_model'], {}), '(engine.model, torch_model)\n', (2110, 2137), False, 'from colossalai.zero.sharded_model.utils import col_model_deepcopy\n'), ((2300, 2321), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2319, 2321), True, 'import torch.distributed as dist\n'), ((2353, 2369), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['torch_model'], {}), '(torch_model)\n', (2356, 2369), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((3318, 3367), 'common.check_params', 'check_params', (['torch_model', 'colo_model'], {'loose': '(True)'}), '(torch_model, colo_model, loose=True)\n', (3330, 3367), False, 'from common import MP_PARALLEL_CONFIG, ZERO_PARALLEL_CONFIG, check_params, check_sharded_model_params\n'), ((3815, 3826), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (3824, 3826), False, 'from colossalai.utils import free_port\n'), ((4168, 4179), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (4177, 4179), False, 'from colossalai.utils import free_port\n'), ((3434, 3497), 'common.check_sharded_model_params', 'check_sharded_model_params', (['torch_model', 'colo_model'], {'loose': '(True)'}), '(torch_model, colo_model, loose=True)\n', (3460, 3497), False, 'from common import MP_PARALLEL_CONFIG, ZERO_PARALLEL_CONFIG, check_params, check_sharded_model_params\n'), ((1380, 1407), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (1405, 1407), False, 'import torch\n'), ((3173, 3199), 'colossalai.zero.sharded_optim._utils.has_inf_or_nan', 'has_inf_or_nan', (['param.grad'], {}), '(param.grad)\n', (3187, 3199), False, 'from colossalai.zero.sharded_optim._utils import has_inf_or_nan\n')] |
import torch
import colossalai
import pytest
import torch.multiprocessing as mp
from colossalai.utils.cuda import get_current_device
from colossalai.gemini.memory_tracer import MemStatsCollector
from colossalai.gemini.memory_tracer import GLOBAL_MODEL_DATA_TRACER
from colossalai.utils.memory import colo_set_process_memory_fraction
from colossalai.zero.sharded_param.sharded_param import ShardedParamV2
from colossalai.gemini.stateful_tensor import TensorState
from colossalai.utils import free_port
from colossalai.testing import rerun_if_address_is_in_use
from torch.nn.parameter import Parameter
from typing import List
from functools import partial
from colossalai.gemini import StatefulTensorMgr
from colossalai.gemini.tensor_placement_policy import AutoTensorPlacementPolicy
class Net(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
# each parameter is 128 MB
self.p0 = Parameter(torch.empty(1024, 1024, 32))
self.p1 = Parameter(torch.empty(1024, 1024, 32))
self.p2 = Parameter(torch.empty(1024, 1024, 32))
def limit_cuda_memory(memory_in_g: float):
cuda_capacity = torch.cuda.get_device_properties(get_current_device()).total_memory
fraction = (memory_in_g * 1024**3) / cuda_capacity
colo_set_process_memory_fraction(fraction)
def run_stm():
# warmup phase use 20% CUDA memory to store params
# only 2 params can be on CUDA
limit_cuda_memory(1.26)
model = Net()
for p in model.parameters():
p.colo_attr = ShardedParamV2(p, set_data_none=True)
GLOBAL_MODEL_DATA_TRACER.register_model(model)
mem_collector = MemStatsCollector()
tensor_placement_policy = AutoTensorPlacementPolicy(mem_stats_collector=mem_collector)
stateful_tensor_mgr = StatefulTensorMgr(tensor_placement_policy)
for p in model.parameters():
stateful_tensor_mgr.register_stateful_param(p.colo_attr)
mem_collector.start_collection()
# Compute order: 0 1 2 0 1
# warmup
# use naive eviction strategy
apply_adjust(model, model.p0, [model.p0], stateful_tensor_mgr)
mem_collector.sample_model_data()
mem_collector.sample_overall_data()
apply_adjust(model, model.p1, [model.p0, model.p1], stateful_tensor_mgr)
mem_collector.sample_model_data()
mem_collector.sample_overall_data()
apply_adjust(model, model.p2, [model.p1, model.p2], stateful_tensor_mgr)
mem_collector.sample_model_data()
mem_collector.sample_overall_data()
apply_adjust(model, model.p0, [model.p0, model.p2], stateful_tensor_mgr)
mem_collector.sample_model_data()
mem_collector.sample_overall_data()
apply_adjust(model, model.p1, [model.p1, model.p2], stateful_tensor_mgr)
mem_collector.sample_model_data()
mem_collector.finish_collection()
stateful_tensor_mgr.reset()
# warmup done
# only 2 params can be on CUDA
limit_cuda_memory(0.26 / tensor_placement_policy._steady_cuda_cap_ratio)
# use OPT-like eviction strategy
apply_adjust(model, model.p0, [model.p0, model.p1], stateful_tensor_mgr)
apply_adjust(model, model.p1, [model.p0, model.p1], stateful_tensor_mgr)
apply_adjust(model, model.p2, [model.p0, model.p2], stateful_tensor_mgr)
apply_adjust(model, model.p0, [model.p0, model.p2], stateful_tensor_mgr)
apply_adjust(model, model.p1, [model.p1, model.p2], stateful_tensor_mgr)
def apply_adjust(model: torch.nn.Module, compute_param: Parameter, cuda_param_after_adjust: List[Parameter],
stateful_tensor_mgr: StatefulTensorMgr):
compute_param.colo_attr._sharded_data_tensor.trans_state(TensorState.COMPUTE)
for p in model.parameters():
if p is not compute_param and p.colo_attr._sharded_data_tensor.state != TensorState.HOLD:
p.colo_attr._sharded_data_tensor.trans_state(TensorState.HOLD)
stateful_tensor_mgr.adjust_layout()
print_stats(model)
device = torch.device(torch.cuda.current_device())
cuda_param_after_adjust = [hash(p) for p in cuda_param_after_adjust]
for n, p in model.named_parameters():
if hash(p) in cuda_param_after_adjust:
assert p.colo_attr._sharded_data_tensor.device == device, f'{n} {p.colo_attr._sharded_data_tensor.device} vs {device}'
else:
assert p.colo_attr._sharded_data_tensor.device == torch.device('cpu')
def print_stats(model: torch.nn.Module):
msgs = []
for n, p in model.named_parameters():
msgs.append(f'{n}: {p.colo_attr._sharded_data_tensor.state}({p.colo_attr._sharded_data_tensor.device})')
print(f'[ {", ".join(msgs)} ]')
def run_dist(rank, world_size, port):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_stm()
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_stateful_tensor_manager(world_size=1):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
# this unit test can pass if available CUDA memory >= 1.5G
test_stateful_tensor_manager()
| [
"colossalai.gemini.tensor_placement_policy.AutoTensorPlacementPolicy",
"colossalai.gemini.memory_tracer.GLOBAL_MODEL_DATA_TRACER.register_model",
"colossalai.launch",
"colossalai.gemini.StatefulTensorMgr",
"colossalai.utils.free_port",
"colossalai.utils.cuda.get_current_device",
"colossalai.gemini.memor... | [((4772, 4800), 'colossalai.testing.rerun_if_address_is_in_use', 'rerun_if_address_is_in_use', ([], {}), '()\n', (4798, 4800), False, 'from colossalai.testing import rerun_if_address_is_in_use\n'), ((1271, 1313), 'colossalai.utils.memory.colo_set_process_memory_fraction', 'colo_set_process_memory_fraction', (['fraction'], {}), '(fraction)\n', (1303, 1313), False, 'from colossalai.utils.memory import colo_set_process_memory_fraction\n'), ((1564, 1610), 'colossalai.gemini.memory_tracer.GLOBAL_MODEL_DATA_TRACER.register_model', 'GLOBAL_MODEL_DATA_TRACER.register_model', (['model'], {}), '(model)\n', (1603, 1610), False, 'from colossalai.gemini.memory_tracer import GLOBAL_MODEL_DATA_TRACER\n'), ((1631, 1650), 'colossalai.gemini.memory_tracer.MemStatsCollector', 'MemStatsCollector', ([], {}), '()\n', (1648, 1650), False, 'from colossalai.gemini.memory_tracer import MemStatsCollector\n'), ((1681, 1741), 'colossalai.gemini.tensor_placement_policy.AutoTensorPlacementPolicy', 'AutoTensorPlacementPolicy', ([], {'mem_stats_collector': 'mem_collector'}), '(mem_stats_collector=mem_collector)\n', (1706, 1741), False, 'from colossalai.gemini.tensor_placement_policy import AutoTensorPlacementPolicy\n'), ((1768, 1810), 'colossalai.gemini.StatefulTensorMgr', 'StatefulTensorMgr', (['tensor_placement_policy'], {}), '(tensor_placement_policy)\n', (1785, 1810), False, 'from colossalai.gemini import StatefulTensorMgr\n'), ((4629, 4741), 'colossalai.launch', 'colossalai.launch', ([], {'config': '{}', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config={}, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (4646, 4741), False, 'import colossalai\n'), ((4927, 4964), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (4935, 4964), True, 'import torch.multiprocessing as mp\n'), ((1522, 1559), 'colossalai.zero.sharded_param.sharded_param.ShardedParamV2', 'ShardedParamV2', (['p'], {'set_data_none': '(True)'}), '(p, set_data_none=True)\n', (1536, 1559), False, 'from colossalai.zero.sharded_param.sharded_param import ShardedParamV2\n'), ((3919, 3946), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (3944, 3946), False, 'import torch\n'), ((936, 963), 'torch.empty', 'torch.empty', (['(1024)', '(1024)', '(32)'], {}), '(1024, 1024, 32)\n', (947, 963), False, 'import torch\n'), ((993, 1020), 'torch.empty', 'torch.empty', (['(1024)', '(1024)', '(32)'], {}), '(1024, 1024, 32)\n', (1004, 1020), False, 'import torch\n'), ((1050, 1077), 'torch.empty', 'torch.empty', (['(1024)', '(1024)', '(32)'], {}), '(1024, 1024, 32)\n', (1061, 1077), False, 'import torch\n'), ((1177, 1197), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (1195, 1197), False, 'from colossalai.utils.cuda import get_current_device\n'), ((4910, 4921), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (4919, 4921), False, 'from colossalai.utils import free_port\n'), ((4317, 4336), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4329, 4336), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
import torch.nn as nn
import torch.distributed as dist
from torch import Tensor
from typing import Any
from torch.optim import Optimizer
from torch.distributed import ReduceOp
from colossalai.core import global_context as gpc
from colossalai.context import ParallelMode
from colossalai.nn.optimizer import ColossalaiOptimizer
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from ._fp16_optimizer import FP16Optimizer
class NaiveAMPOptimizer(ColossalaiOptimizer):
"""A wrapper class for optimizer to cast all parameters to fp16
:param optim: A normal optimizer like Adam or SGD
:param args: Args used to initialize FP16 optimizer
:param kwargs: Kwargs used to initialize FP16 optimizer
:type optim: torch.optim.Optimizer
"""
def __init__(self, optim: Optimizer, *args, **kwargs):
optim = FP16Optimizer(optimizer=optim, *args, **kwargs)
super().__init__(optim)
def backward(self, loss: Tensor):
"""Backward with gradient scaler
:param loss: loss computed by a loss function
:type loss: torch.Tensor
"""
loss = self.optim.scale_loss(loss)
loss.backward()
def step(self):
return self.optim.step()
def clip_grad_norm(self, model: nn.Module, max_norm: float):
pass
class NaiveAMPModel(nn.Module):
"""A wrapper class for model to cast the model into fp16 and
automatically cast the input and output
"""
def __init__(self,
model: nn.Module,
output_to_fp32: bool = True,
parallel_mode: ParallelMode = ParallelMode.DATA,
sync_buffer: bool = True):
super().__init__()
self.model = model.half()
self._output_to_fp32 = output_to_fp32
self._sync_buf = sync_buffer
if gpc.is_initialized(parallel_mode) and gpc.get_world_size(parallel_mode) > 1:
self._process_group = gpc.get_group(parallel_mode)
self._world_size = gpc.get_world_size(parallel_mode)
else:
self._process_group = None
self._world_size = 1
self._sync_buf = False
self._first_eval_run = False
@property
def sync_buffer(self):
return self._sync_buf
@sync_buffer.setter
def sync_buffer(self, state: bool):
self._sync_buf = state
def _convert_to_fp16(self, input_: Any):
if isinstance(input_, Tensor) and input_.dtype == torch.float32:
input_ = input_.half()
return input_
def _convert_to_fp32(self, input_: Any):
if isinstance(input_, Tensor) and input_.dtype == torch.float16:
input_ = input_.float()
return input_
def _reduce_module_buffer(self):
"""
All-reduce the buffers (e.g. running stats of batch normalization) across
data parallel ranks so that all the ranks will produce consistent results
when given the same input
"""
buf_list = []
# find valid buffers
for buf in self.model.buffers():
if buf is not None:
buf_list.append(buf)
# reduce buffers across data parallel ranks
if buf_list:
coalesced_buf = _flatten_dense_tensors(buf_list)
coalesced_buf.div_(self._world_size)
dist.all_reduce(coalesced_buf, op=ReduceOp.SUM, group=self._process_group)
unflattened_buf_list = _unflatten_dense_tensors(coalesced_buf, buf_list)
for old, new in zip(buf_list, unflattened_buf_list):
old.copy_(new)
def eval(self):
self.model.eval()
# we only sync buffer in the first eval iteration
# so that future eval iterations can be done without communication
self._first_eval_run = True
def forward(self, *args, **kwargs):
# reduce buffers after forward will lead to error
# as we cannot change the variables needed for gradient computation after forward
# so we sync buffer before forward
if (self.training or self._first_eval_run) and self._sync_buf:
with torch.no_grad():
self._reduce_module_buffer()
if self._first_eval_run:
self._first_eval_run = False
if args:
args = [self._convert_to_fp16(arg) for arg in args]
if kwargs:
for k, v in kwargs.items():
kwargs[k] = self._convert_to_fp16(v)
out = self.model(*args, **kwargs)
if self._output_to_fp32:
if isinstance(out, Tensor):
out = self._convert_to_fp32(out)
elif isinstance(out, (tuple, list)):
out = [self._convert_to_fp32(val) for val in out]
return out
| [
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.is_initialized"
] | [((1898, 1931), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['parallel_mode'], {}), '(parallel_mode)\n', (1916, 1931), True, 'from colossalai.core import global_context as gpc\n'), ((2009, 2037), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (2022, 2037), True, 'from colossalai.core import global_context as gpc\n'), ((2069, 2102), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['parallel_mode'], {}), '(parallel_mode)\n', (2087, 2102), True, 'from colossalai.core import global_context as gpc\n'), ((3306, 3338), 'torch._utils._flatten_dense_tensors', '_flatten_dense_tensors', (['buf_list'], {}), '(buf_list)\n', (3328, 3338), False, 'from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors\n'), ((3400, 3474), 'torch.distributed.all_reduce', 'dist.all_reduce', (['coalesced_buf'], {'op': 'ReduceOp.SUM', 'group': 'self._process_group'}), '(coalesced_buf, op=ReduceOp.SUM, group=self._process_group)\n', (3415, 3474), True, 'import torch.distributed as dist\n'), ((3510, 3559), 'torch._utils._unflatten_dense_tensors', '_unflatten_dense_tensors', (['coalesced_buf', 'buf_list'], {}), '(coalesced_buf, buf_list)\n', (3534, 3559), False, 'from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors\n'), ((1936, 1969), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['parallel_mode'], {}), '(parallel_mode)\n', (1954, 1969), True, 'from colossalai.core import global_context as gpc\n'), ((4193, 4208), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4206, 4208), False, 'import torch\n')] |
from .base_benchmarker import Benchmaker
from colossalai.context import Config
import torch
from colossalai.zero import ShardedModel, ShardedOptimizer
class ColossalaiBenchmarker(Benchmaker):
def __init__(self, config_path, optim_class, model, criterion):
config = Config.from_file(config_path)
optimizer = optim_class(model.parameters(), **config.optimizer)
if config.stage in [1, 2]:
model = model.half().cuda()
optimizer = ShardedOptimizer(optimizer=optimizer, **config.zero.optimizer)
elif config.stage == 3:
if hasattr(config.zero.model, 'offload_config'):
model = model.cpu()
model = ShardedModel(module=model, **config.zero.model)
if hasattr(config, 'autocast'):
self.autocast = True
else:
self.autocast = False
else:
raise ValueError('invalid stage number')
super().__init__(config, optimizer, model, criterion)
def run_iter(self, data, label):
if self.config.stage in [1, 2]:
self._run_iter_stage_1_2(data, label)
elif self.config.stage == 3:
self._run_iter_stage_3(data, label)
def _run_iter_stage_1_2(self, data, label):
data = [val.half() if val.dtype == torch.float else val for val in data]
self.optimizer.zero_grad()
# forward
out = self.model(*data)
loss = self.criterion(out, *label)
# backward
self.optimizer.backward(loss)
self.optimizer.sync_grad()
self.optimizer.step()
def _run_iter_stage_3(self, data, label):
self.optimizer.zero_grad()
self.model.zero_grad(set_to_none=True)
with torch.cuda.amp.autocast(enabled=self.autocast):
out = self.model(*data)
if torch.is_tensor(out):
out = [out]
loss = self.criterion(*out, *label)
loss.backward()
self.optimizer.step()
| [
"colossalai.zero.ShardedOptimizer",
"colossalai.zero.ShardedModel",
"colossalai.context.Config.from_file"
] | [((280, 309), 'colossalai.context.Config.from_file', 'Config.from_file', (['config_path'], {}), '(config_path)\n', (296, 309), False, 'from colossalai.context import Config\n'), ((481, 543), 'colossalai.zero.ShardedOptimizer', 'ShardedOptimizer', ([], {'optimizer': 'optimizer'}), '(optimizer=optimizer, **config.zero.optimizer)\n', (497, 543), False, 'from colossalai.zero import ShardedModel, ShardedOptimizer\n'), ((1746, 1792), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': 'self.autocast'}), '(enabled=self.autocast)\n', (1769, 1792), False, 'import torch\n'), ((1845, 1865), 'torch.is_tensor', 'torch.is_tensor', (['out'], {}), '(out)\n', (1860, 1865), False, 'import torch\n'), ((693, 740), 'colossalai.zero.ShardedModel', 'ShardedModel', ([], {'module': 'model'}), '(module=model, **config.zero.model)\n', (705, 740), False, 'from colossalai.zero import ShardedModel, ShardedOptimizer\n')] |
import torch
import torch.nn as nn
from torch.optim.adam import Adam
from torch.optim import AdamW
from colossalai.nn.optimizer.hybrid_adam import HybridAdam
from colossalai.testing import parameterize
RE = 1024
@parameterize('adamw', [False, True])
@parameterize('device', ['cpu', 'cuda:0'])
@parameterize('p_dtype', [torch.float])
@parameterize('g_dtype', [torch.float, torch.half])
def test_adam(adamw, device, p_dtype, g_dtype):
rng_state = torch.get_rng_state()
p = nn.Parameter(torch.rand(64).to(device, p_dtype))
torch.set_rng_state(rng_state)
p_copy = nn.Parameter(torch.rand(64).to(device).float())
if adamw:
optim = HybridAdam([p], lr=1e-3, adamw_mode=True)
torch_optim = AdamW([p_copy], lr=1e-3)
else:
optim = HybridAdam([p], lr=1e-3)
torch_optim = Adam([p_copy], lr=1e-3)
print(f"adaw mode {adamw}, device {device}, p_dtype {p_dtype}, g_dtype {g_dtype}")
for i in range(RE):
p.grad = torch.rand(64).to(device, p_dtype)
p_copy.grad = p.grad.clone().float()
p.grad.data = p.grad.data.to(g_dtype)
optim.step()
torch_optim.step()
if torch.isnan(p.data).any() or torch.isnan(p_copy.data).any():
continue
assert torch.allclose(p.data, p_copy.data, 1e-4, 1e-2), \
f"adaw mode {adamw}, device {device}, p_dtype {p_dtype}, g_dtype {g_dtype}"
| [
"colossalai.nn.optimizer.hybrid_adam.HybridAdam",
"colossalai.testing.parameterize"
] | [((217, 253), 'colossalai.testing.parameterize', 'parameterize', (['"""adamw"""', '[False, True]'], {}), "('adamw', [False, True])\n", (229, 253), False, 'from colossalai.testing import parameterize\n'), ((255, 296), 'colossalai.testing.parameterize', 'parameterize', (['"""device"""', "['cpu', 'cuda:0']"], {}), "('device', ['cpu', 'cuda:0'])\n", (267, 296), False, 'from colossalai.testing import parameterize\n'), ((298, 336), 'colossalai.testing.parameterize', 'parameterize', (['"""p_dtype"""', '[torch.float]'], {}), "('p_dtype', [torch.float])\n", (310, 336), False, 'from colossalai.testing import parameterize\n'), ((338, 388), 'colossalai.testing.parameterize', 'parameterize', (['"""g_dtype"""', '[torch.float, torch.half]'], {}), "('g_dtype', [torch.float, torch.half])\n", (350, 388), False, 'from colossalai.testing import parameterize\n'), ((453, 474), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (472, 474), False, 'import torch\n'), ((536, 566), 'torch.set_rng_state', 'torch.set_rng_state', (['rng_state'], {}), '(rng_state)\n', (555, 566), False, 'import torch\n'), ((660, 702), 'colossalai.nn.optimizer.hybrid_adam.HybridAdam', 'HybridAdam', (['[p]'], {'lr': '(0.001)', 'adamw_mode': '(True)'}), '([p], lr=0.001, adamw_mode=True)\n', (670, 702), False, 'from colossalai.nn.optimizer.hybrid_adam import HybridAdam\n'), ((724, 749), 'torch.optim.AdamW', 'AdamW', (['[p_copy]'], {'lr': '(0.001)'}), '([p_copy], lr=0.001)\n', (729, 749), False, 'from torch.optim import AdamW\n'), ((775, 800), 'colossalai.nn.optimizer.hybrid_adam.HybridAdam', 'HybridAdam', (['[p]'], {'lr': '(0.001)'}), '([p], lr=0.001)\n', (785, 800), False, 'from colossalai.nn.optimizer.hybrid_adam import HybridAdam\n'), ((822, 846), 'torch.optim.adam.Adam', 'Adam', (['[p_copy]'], {'lr': '(0.001)'}), '([p_copy], lr=0.001)\n', (826, 846), False, 'from torch.optim.adam import Adam\n'), ((1259, 1308), 'torch.allclose', 'torch.allclose', (['p.data', 'p_copy.data', '(0.0001)', '(0.01)'], {}), '(p.data, p_copy.data, 0.0001, 0.01)\n', (1273, 1308), False, 'import torch\n'), ((496, 510), 'torch.rand', 'torch.rand', (['(64)'], {}), '(64)\n', (506, 510), False, 'import torch\n'), ((975, 989), 'torch.rand', 'torch.rand', (['(64)'], {}), '(64)\n', (985, 989), False, 'import torch\n'), ((1162, 1181), 'torch.isnan', 'torch.isnan', (['p.data'], {}), '(p.data)\n', (1173, 1181), False, 'import torch\n'), ((1191, 1215), 'torch.isnan', 'torch.isnan', (['p_copy.data'], {}), '(p_copy.data)\n', (1202, 1215), False, 'import torch\n'), ((593, 607), 'torch.rand', 'torch.rand', (['(64)'], {}), '(64)\n', (603, 607), False, 'import torch\n')] |
import math
import time
import torch
from colossalai.utils import MultiTimer
from colossalai.context import ParallelMode, Config
from typing import List, Dict, Tuple, Callable
def get_time_stamp() -> int:
"""
Return the time stamp for profiling.
Returns:
time_stamp (int): the time given by time.time()
"""
torch.cuda.synchronize()
time_stamp = time.time()
return time_stamp
def get_memory_states() -> Tuple[float]:
"""
Return the memory statistics.
Returns:
max_allocated (float): the allocated CUDA memory
max_cached (float): the cached CUDA memory
"""
max_allocated = torch.cuda.max_memory_allocated() / (1024**3)
max_cached = torch.cuda.max_memory_reserved() / (1024**3)
torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache()
return max_allocated, max_cached
def find_all_configs(device_cnt: int) -> List[Dict]:
"""
Find all possible configurations for tensor parallelism
Args:
device_cnt (int): the number of devices
Returns:
config_list (List[Dict]): a list of configurations
"""
def _is_square(num):
# 2D parallel should be implemented with at least 2 devices.
if num <= 1:
return False
return math.floor(math.sqrt(num))**2 == num
def _is_cube(num):
# 3D parallel should be implemented with at least 2 devices.
if num <= 1:
return False
return math.floor(num**(1. / 3.))**3 == num
config_list = []
# add non-parallel config
config = dict(parallel=dict(tensor=dict(size=device_cnt, mode=None)))
config_list.append(config)
# add 1D config
config = dict(parallel=dict(tensor=dict(size=device_cnt, mode='1d')))
config_list.append(config)
# add 2D config only if device_cnt is a square
if _is_square(device_cnt):
config = dict(parallel=dict(tensor=dict(size=device_cnt, mode='2d')))
config_list.append(config)
# check for 2.5D
# iterate over depth
for depth in range(1, device_cnt):
if device_cnt % depth == 0 and _is_square(device_cnt // depth):
config = dict(parallel=dict(tensor=dict(size=device_cnt, mode='2.5d', depth=depth)))
config_list.append(config)
# check for 3D if device_cnt is a cube
if _is_cube(device_cnt):
config = dict(parallel=dict(tensor=dict(size=device_cnt, mode='3d')))
config_list.append(config)
config_list = [Config(cfg) for cfg in config_list]
return config_list
def profile_model(model: torch.nn.Module, warmup_steps: int, profile_steps: int, data_func: Callable,
timer: MultiTimer) -> Tuple[float]:
"""
Profile the forward and backward of a model
Args:
model (torch.nn.Module): a PyTorch model
warmup_steps (int): the number of steps for warmup
profile_steps (int): the number of steps for profiling
data_func (Callable): a function to generate random data
timer (colossalai.utils.Multitimer): a timer instance for time recording
Returns:
fwd_time (float): the average forward time taken by forward pass in second
bwd_time (float): the average backward time taken by forward pass in second
max_allocated (float): the maximum GPU memory allocated in GB
max_cached (float): the maximum GPU memory cached in GB
"""
def _run_step(data):
timer.start('forward')
out = model(data)
timer.stop('forward', keep_in_history=True)
timer.start('backward')
out.mean().backward()
timer.stop('backward', keep_in_history=True)
data_list = [data_func() for _ in range(warmup_steps)]
for data in data_list:
_run_step(data)
timer.reset('forward')
timer.reset('backward')
for _ in range(profile_steps):
data = data_func()
_run_step(data)
max_allocated, max_cached = get_memory_states()
fwd_time = timer.get_timer('forward').get_history_mean()
bwd_time = timer.get_timer('backward').get_history_mean()
return fwd_time, bwd_time, max_allocated, max_cached
def get_batch_data(dim: int, batch_size: int, seq_length: int, mode: ParallelMode) -> torch.Tensor:
"""
Return a random data of shape (batch_size, seq_length, dim) for profiling.
Args:
dim (int): hidden size
batch_size (int): the number of data samples
seq_length (int): the number of tokens
mode (ParallelMode): Colossal-AI ParallelMode enum
Returns:
data (torch.Tensor): random data
"""
if mode in ['2d', '2.5d']:
batch_size = batch_size // 2
dim = dim // 2
elif mode == '3d':
batch_size = batch_size // 4
dim = dim // 2
data = torch.rand(batch_size, seq_length, dim).cuda()
return data
| [
"colossalai.context.Config"
] | [((340, 364), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (362, 364), False, 'import torch\n'), ((382, 393), 'time.time', 'time.time', ([], {}), '()\n', (391, 393), False, 'import time\n'), ((767, 803), 'torch.cuda.reset_peak_memory_stats', 'torch.cuda.reset_peak_memory_stats', ([], {}), '()\n', (801, 803), False, 'import torch\n'), ((808, 832), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (830, 832), False, 'import torch\n'), ((655, 688), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (686, 688), False, 'import torch\n'), ((718, 750), 'torch.cuda.max_memory_reserved', 'torch.cuda.max_memory_reserved', ([], {}), '()\n', (748, 750), False, 'import torch\n'), ((2497, 2508), 'colossalai.context.Config', 'Config', (['cfg'], {}), '(cfg)\n', (2503, 2508), False, 'from colossalai.context import ParallelMode, Config\n'), ((4801, 4840), 'torch.rand', 'torch.rand', (['batch_size', 'seq_length', 'dim'], {}), '(batch_size, seq_length, dim)\n', (4811, 4840), False, 'import torch\n'), ((1480, 1510), 'math.floor', 'math.floor', (['(num ** (1.0 / 3.0))'], {}), '(num ** (1.0 / 3.0))\n', (1490, 1510), False, 'import math\n'), ((1300, 1314), 'math.sqrt', 'math.sqrt', (['num'], {}), '(num)\n', (1309, 1314), False, 'import math\n')] |
import math
from typing import Callable
import torch
import torch.nn as nn
import torch.nn.functional as F
from colossalai.communication import broadcast
from colossalai.context import ParallelMode, seed
from colossalai.core import global_context as gpc
from colossalai.global_variables import tensor_parallel_env as env
from colossalai.nn import init as init
from colossalai.registry import LAYERS
from colossalai.utils.cuda import get_current_device
from torch import Tensor
from torch.nn import Parameter
from ..base_layer import ParallelLayer
from ..utils import divide, set_tensor_parallel_attribute_by_partition, to_2tuple
from ._operation import (add_bias_2p5d, Matmul_AB_2p5D, Matmul_ABT_2p5D, all_gather_tensor_2p5d, classifier_2p5d,
layernorm_2p5d, reduce_scatter_tensor_2p5d, split_tensor_2p5d)
from ._utils import assert_tesseract_initialization, get_tesseract_dim_dep_from_env
@LAYERS.register_module
class Linear2p5D(ParallelLayer):
"""
Linear layer for 2.5D parallelism
:param in_features: size of each input sample
:type in_features: int
:param out_features: size of each output sample
:type out_features: int
:param bias: If set to ``False``, the layer will not learn an additive bias, defaults to True
:type bias: bool, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param weight_initializer: The intializer of weight, defaults to kaiming uniform initializer
:type weight_initializer: typing.Callable, optional
:param bias_initializer: The intializer of bias, defaults to xavier uniform initializer
:type bias_initializer: typing.Callable, optional
"""
def __init__(self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: torch.dtype = None,
skip_bias_add: bool = False,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.skip_bias_add = skip_bias_add
# parallel setting
assert_tesseract_initialization()
self.row_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
self.col_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
self.dep_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
self.tesseract_dim, _ = get_tesseract_dim_dep_from_env()
# partitioning dimension
self.input_size_per_partition = divide(in_features, self.tesseract_dim)
self.hidden_size_per_partition = divide(out_features, self.tesseract_dim)
# create weight, shape: [k/q, h/q]
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.weight = Parameter(
torch.empty(self.input_size_per_partition, self.hidden_size_per_partition, **factory_kwargs))
# create bias, shape: [h/q]
if bias:
self.bias = Parameter(torch.empty(self.hidden_size_per_partition, **factory_kwargs))
else:
self.register_parameter('bias', None)
# initialize parameters
with seed(ParallelMode.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
def _set_tensor_parallel_attributes(self):
set_tensor_parallel_attribute_by_partition(self.weight, self.tesseract_dim**2)
if self.bias is not None:
set_tensor_parallel_attribute_by_partition(self.bias, self.tesseract_dim)
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.out_features
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
def forward(self, x: Tensor) -> Tensor:
# input: [m/dq, n/q, k/q]
# output: [m/dq, n/q, h/q]
out_shape = x.shape[:-1] + (self.hidden_size_per_partition, )
output = Matmul_AB_2p5D.apply(
x,
self.weight,
self.tesseract_dim,
out_shape,
self.row_rank,
self.col_rank,
self.dep_rank,
ParallelMode.PARALLEL_2P5D_ROW,
ParallelMode.PARALLEL_2P5D_COL,
self.data_parallel_rank,
self.pipeline_parallel_rank,
self.pipeline_parallel_size,
self.tensor_parallel_size,
)
if self.bias is not None:
if self.skip_bias_add:
bias = add_bias_2p5d(None, self.bias, self.hidden_size_per_partition, self.tesseract_dim, self.row_rank,
self.col_rank, self.dep_rank, ParallelMode.PARALLEL_2P5D_COL, True,
self.data_parallel_rank, self.pipeline_parallel_rank, self.pipeline_parallel_size,
self.tensor_parallel_size)
return output, bias
else:
output = add_bias_2p5d(output, self.bias, self.hidden_size_per_partition, self.tesseract_dim,
self.row_rank, self.col_rank, self.dep_rank, ParallelMode.PARALLEL_2P5D_COL,
False, self.data_parallel_rank, self.pipeline_parallel_rank,
self.pipeline_parallel_size, self.tensor_parallel_size)
return output
else:
return output
@LAYERS.register_module
class LayerNorm2p5D(ParallelLayer):
r"""
Layer Normalization for 2.5D parallelism
:param normalized_shape: input shape from an expected input
of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]`
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
:type normalized_shape: int
:param eps: a value added to the denominator for numerical stability, defaults to 1e-05
:type eps: float, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
"""
def __init__(self, normalized_shape: int, eps: float = 1e-05, dtype=None):
super().__init__()
# layer norm config
self.normalized_shape = normalized_shape
self.variance_epsilon = eps
# parallel setting
assert_tesseract_initialization()
self.row_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
self.col_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
self.dep_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
self.tesseract_dim, _ = get_tesseract_dim_dep_from_env()
# partitioning dimension
self.partitioned_partition = divide(normalized_shape, self.tesseract_dim) # *
# create parameters
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.gamma = Parameter(torch.ones(self.partitioned_partition, **factory_kwargs))
self.beta = Parameter(torch.zeros(self.partitioned_partition, **factory_kwargs))
self._set_tensor_parallel_attribute()
def _set_tensor_parallel_attribute(self):
set_tensor_parallel_attribute_by_partition(self.gamma, self.tesseract_dim)
set_tensor_parallel_attribute_by_partition(self.beta, self.tesseract_dim)
def forward(self, x: Tensor) -> Tensor:
with torch.no_grad():
E_x = torch.sum(x, dim=-1, keepdim=True) # [b/q, s, 1]
torch.distributed.all_reduce(E_x, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW))
E_x /= self.normalized_shape
# Var_x in the block below is the sum of input^2
Var_x = torch.sum(x * x, dim=-1, keepdim=True) # [b/q, s, 1]
torch.distributed.all_reduce(Var_x, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW))
Var_x /= self.normalized_shape
Var_x = Var_x - E_x * E_x # variance of x [b/q, s, 1]
# this time 1/sqrt(Var_x + epsilon)
Var_x = 1.0 / torch.sqrt(Var_x + self.variance_epsilon)
output = layernorm_2p5d(x, E_x, Var_x, self.normalized_shape, ParallelMode.PARALLEL_2P5D_ROW)
bias = add_bias_2p5d(None, self.beta, self.partitioned_partition, self.tesseract_dim, self.row_rank,
self.col_rank, self.dep_rank, ParallelMode.PARALLEL_2P5D_COL, True,
self.data_parallel_rank, self.pipeline_parallel_rank, self.pipeline_parallel_size,
self.tensor_parallel_size)
scale = add_bias_2p5d(None, self.gamma, self.partitioned_partition, self.tesseract_dim, self.row_rank,
self.col_rank, self.dep_rank, ParallelMode.PARALLEL_2P5D_COL, True,
self.data_parallel_rank, self.pipeline_parallel_rank, self.pipeline_parallel_size,
self.tensor_parallel_size)
output = torch.addcmul(bias, scale, output)
return output
@LAYERS.register_module
class PatchEmbedding2p5D(ParallelLayer):
"""
2D Image to Patch Embedding
:param img_size: image size
:type img_size: int
:param patch_size: patch size
:type patch_size: int
:param in_chans: number of channels of input image
:type in_chans: int
:param embed_size: size of embedding
:type embed_size: int
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param flatten: whether to flatten output tensor, defaults to True
:type flatten: bool, optional
:param weight_initializer: The intializer of weight, defaults to kaiming uniform initializer
:type weight_initializer: typing.Callable, optional
:param bias_initializer: The intializer of bias, defaults to xavier uniform initializer
:type bias_initializer: typing.Callable, optional
:param position_embed_initializer: The intializer of position embedding, defaults to zero
:type position_embed_initializer: typing.Callable, optional
"""
def __init__(self,
img_size: int,
patch_size: int,
in_chans: int,
embed_size: int,
flatten: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1),
position_embed_initializer: Callable = init.zeros_()):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
assert_tesseract_initialization()
self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env()
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
self.embed_size = embed_size
self.embed_size_per_partition = embed_size // self.tesseract_dim**2
with seed(ParallelMode.TENSOR):
self.weight = Parameter(
torch.empty((self.embed_size_per_partition, in_chans, *self.patch_size),
device=get_current_device(),
dtype=dtype))
self.bias = Parameter(torch.empty(self.embed_size_per_partition, device=get_current_device(), dtype=dtype))
self.cls_token = Parameter(
torch.zeros((1, 1, self.embed_size_per_partition), device=get_current_device(), dtype=dtype))
self.pos_embed = Parameter(
torch.zeros((1, self.num_patches + 1, self.embed_size_per_partition),
device=get_current_device(),
dtype=dtype))
self.reset_parameters(weight_initializer, bias_initializer, position_embed_initializer)
self._set_tensor_parallel_attribute()
def _set_tensor_parallel_attribute(self):
set_tensor_parallel_attribute_by_partition(self.weight, self.tesseract_dim**2)
set_tensor_parallel_attribute_by_partition(self.bias, self.tesseract_dim**2)
set_tensor_parallel_attribute_by_partition(self.cls_token, self.tesseract_dim**2)
set_tensor_parallel_attribute_by_partition(self.pos_embed, self.tesseract_dim**2)
def reset_parameters(self, weight_initializer, bias_initializer, position_embed_initializer):
with seed(ParallelMode.TENSOR):
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
fan_out = self.embed_size
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
bias_initializer(self.bias, fan_in=fan_in)
position_embed_initializer(self.pos_embed)
def forward(self, input_: Tensor) -> Tensor:
input_ = split_tensor_2p5d(input_, 0)
B, C, H, W = input_.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
weight = all_gather_tensor_2p5d(self.weight, 0, ParallelMode.PARALLEL_2P5D_COL)
bias = all_gather_tensor_2p5d(self.bias, 0, ParallelMode.PARALLEL_2P5D_COL)
output = F.conv2d(input_, weight, bias, stride=self.patch_size)
if self.flatten:
output = output.flatten(2).transpose(1, 2) # BCHW -> BNC
cls_token = all_gather_tensor_2p5d(self.cls_token, -1, ParallelMode.PARALLEL_2P5D_COL)
pos_embed = all_gather_tensor_2p5d(self.pos_embed, -1, ParallelMode.PARALLEL_2P5D_COL)
cls_token = cls_token.expand(output.shape[0], -1, -1)
output = torch.cat((cls_token, output), dim=1)
output = output + pos_embed
return output
@LAYERS.register_module
class Embedding2p5D(ParallelLayer):
"""
Embedding for 2.5D parallelism
:param num_embeddings: number of embeddings
:type num_embeddings: int
:param embedding_dim: dimension of embedding
:type embedding_dim: int
:param padding_idx: index of padding, defaults to None
:type padding_idx: int, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param weight_initializer: The intializer of weight, defaults to normal initializer
:type weight_initializer: typing.Callable, optional
:param args: Args used in F.embedding
:param kwargs: Kwargs used in F.embedding
"""
def __init__(self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int = None,
dtype: torch.dtype = None,
weight_initializer: Callable = init.normal_(),
*args,
**kwargs):
super().__init__()
assert_tesseract_initialization()
self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env()
self.num_embeddings = num_embeddings
self.embed_dim = embedding_dim
embed_dim_per_partition = embedding_dim // self.tesseract_dim**2
self.padding_idx = padding_idx
self.embed_args = args
self.embed_kwargs = kwargs
self.weight = Parameter(
torch.empty((num_embeddings, embed_dim_per_partition), device=get_current_device(), dtype=dtype))
self.reset_parameters(weight_initializer)
self._set_tensor_parallel_attributes()
def _set_tensor_parallel_attributes(self):
set_tensor_parallel_attribute_by_partition(self.weight, self.tesseract_dim**2)
def reset_parameters(self, weight_initializer) -> None:
with seed(ParallelMode.TENSOR):
fan_in, fan_out = self.num_embeddings, self.embed_dim
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
self._fill_padding_idx_with_zero()
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input_: Tensor) -> Tensor:
input_ = split_tensor_2p5d(input_, 0)
weight = all_gather_tensor_2p5d(self.weight, -1, ParallelMode.PARALLEL_2P5D_COL)
output = F.embedding(input_, weight, self.padding_idx, *self.embed_args, **self.embed_kwargs)
return output
@LAYERS.register_module
class VocabParallelEmbedding2p5D(torch.nn.Module):
"""Embedding parallelized in the vocabulary dimension.
:param num_embeddings: number of embeddings
:type num_embeddings: int
:param embedding_dim: dimension of embedding
:type embedding_dim: int
:param padding_idx: index of padding, defaults to None
:type padding_idx: int, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param weight_initializer: The intializer of weight, defaults to normal initializer
:type weight_initializer: typing.Callable, optional
:param args: Args used in F.embedding
:param kwargs: Kwargs used in F.embedding
"""
def __init__(self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int = None,
dtype: torch.dtype = None,
weight_initializer: Callable = init.normal_(),
*args,
**kwargs):
super().__init__()
self.num_embeddings = num_embeddings
self.embed_dim = embedding_dim
self.padding_idx = padding_idx
self.embed_args = args
self.embed_kwargs = kwargs
assert_tesseract_initialization()
self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env()
self.num_embeddings_per_partition = divide(self.num_embeddings, self.tesseract_dim)
self.embed_dim_per_partition = divide(self.embed_dim, self.tesseract_dim)
tensor_parallel_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
self.vocab_start_index = tensor_parallel_rank * self.num_embeddings_per_partition
self.vocab_end_index = self.vocab_start_index + self.num_embeddings_per_partition
self.weight = Parameter(
torch.empty((self.num_embeddings_per_partition, self.embed_dim_per_partition),
device=get_current_device(),
dtype=dtype))
self.reset_parameters(weight_initializer)
self._set_tensor_parallel_attributes()
env.vocab_parallel = True
def _set_tensor_parallel_attributes(self):
set_tensor_parallel_attribute_by_partition(self.weight, self.tesseract_dim**2)
def reset_parameters(self, weight_initializer) -> None:
with seed(ParallelMode.TENSOR):
fan_in, fan_out = self.num_embeddings, self.embed_dim
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
self._fill_padding_idx_with_zero()
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input_: Tensor) -> Tensor:
# Build the mask.
input_mask = (input_ < self.vocab_start_index) | (input_ >= self.vocab_end_index)
# Mask the input.
masked_input = input_.clone() - self.vocab_start_index
masked_input[input_mask] = 0
output_parallel = F.embedding(masked_input, self.weight, self.padding_idx, *self.embed_args,
**self.embed_kwargs)
# Mask the output embedding.
output_parallel[input_mask, :] = 0.
# Reduce across all the model parallel GPUs.
output = reduce_scatter_tensor_2p5d(output_parallel, 0, ParallelMode.PARALLEL_2P5D_COL)
return output
@LAYERS.register_module
class Classifier2p5D(ParallelLayer):
"""
Classifier for 2.5D parallelism
:param in_features: size of each input sample
:type in_features: int
:param num_classes: number of classes
:type num_classes: int
:param weight: weight of the classifier, defaults to True
:type weight: torch.nn.Parameter, optional
:param bias: If set to ``False``, the layer will not learn an additive bias, defaults to True
:type bias: bool, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param weight_initializer: The intializer of weight, defaults to kaiming uniform initializer
:type weight_initializer: typing.Callable, optional
:param bias_initializer: The intializer of bias, defaults to xavier uniform initializer
:type bias_initializer: typing.Callable, optional
"""
def __init__(self,
in_features: int,
num_classes: int,
weight: Parameter = None,
bias: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
self.in_features = in_features
self.num_classes = num_classes
assert_tesseract_initialization()
self.row_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
self.col_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
self.dep_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env()
# partitioning dimension
self.input_size_per_partition = divide(self.in_features, self.tesseract_dim**2)
if weight is not None:
self.weight = weight
self.has_weight = False
else:
self.weight = Parameter(
torch.empty(self.num_classes, self.input_size_per_partition, device=get_current_device(), dtype=dtype))
self.has_weight = True
if bias:
self.bias = Parameter(torch.zeros(self.num_classes, device=get_current_device(), dtype=dtype))
else:
self.bias = None
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
def _set_tensor_parallel_attributes(self):
if self.has_weight:
set_tensor_parallel_attribute_by_partition(self.weight, self.tesseract_dim**2)
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
with seed(ParallelMode.TENSOR):
fan_in, fan_out = self.in_features, self.num_classes
col_src_rank = gpc.get_ranks_in_group(ParallelMode.PARALLEL_2P5D_COL)[0]
row_src_rank = gpc.get_ranks_in_group(ParallelMode.PARALLEL_2P5D_ROW)[0]
if self.has_weight:
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
broadcast(self.bias, col_src_rank, ParallelMode.PARALLEL_2P5D_COL)
broadcast(self.bias, row_src_rank, ParallelMode.PARALLEL_2P5D_ROW)
def forward(self, input_: Tensor) -> Tensor:
out_shape = input_.shape[:-1] + (self.num_classes, )
return classifier_2p5d(input_, self.weight, self.bias, self.tesseract_dim, out_shape, self.row_rank,
self.col_rank, ParallelMode.PARALLEL_2P5D_ROW, ParallelMode.PARALLEL_2P5D_COL,
self.data_parallel_rank, self.pipeline_parallel_rank, self.pipeline_parallel_size,
self.tensor_parallel_size)
@LAYERS.register_module
class VocabParallelClassifier2p5D(ParallelLayer):
"""
Vocab parallel classifier layer for 2.5D parallelism
:param in_features: size of each input sample
:type in_features: int
:param num_classes: number of classes
:type num_classes: int
:param weight: weight of the classifier, defaults to True
:type weight: torch.nn.Parameter, optional
:param bias: If set to ``False``, the layer will not learn an additive bias, defaults to ``True``
:type bias: bool, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param weight_initializer: The intializer of weight, defaults to kaiming uniform initializer
:type weight_initializer: typing.Callable, optional
:param bias_initializer: The intializer of bias, defaults to xavier uniform initializer
:type bias_initializer: typing.Callable, optional
"""
def __init__(self,
in_features: int,
num_classes: int,
weight: Parameter = None,
bias: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
self.in_features = in_features
self.num_classes = num_classes
# parallel setting
assert_tesseract_initialization()
self.row_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
self.col_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)
self.dep_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)
self.tesseract_dim, _ = get_tesseract_dim_dep_from_env()
# partitioning dimension
self.input_size_per_partition = divide(in_features, self.tesseract_dim)
self.hidden_size_per_partition = divide(num_classes, self.tesseract_dim)
# create weight, shape: [k/q, h/q]
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
if weight is not None:
self.weight = weight
self.has_weight = False
else:
self.weight = Parameter(
torch.empty(self.hidden_size_per_partition, self.input_size_per_partition, **factory_kwargs))
self.has_weight = True
# create bias, shape: [h/q]
if bias:
self.bias = Parameter(torch.empty(self.hidden_size_per_partition, **factory_kwargs))
else:
self.bias = None
# initialize parameters
with seed(ParallelMode.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
env.vocab_parallel = True
def _set_tensor_parallel_attributes(self):
if self.has_weight:
set_tensor_parallel_attribute_by_partition(self.weight, self.tesseract_dim**2)
if self.bias is not None:
set_tensor_parallel_attribute_by_partition(self.bias, self.tesseract_dim)
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.num_classes
if self.has_weight:
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
def forward(self, x: Tensor) -> Tensor:
# input: [m/dq, n/q, k/q]
# output: [m/dq, n/q, h/q]
out_shape = x.shape[:-1] + (self.hidden_size_per_partition, )
output = Matmul_ABT_2p5D.apply(
x,
self.weight,
self.tesseract_dim,
out_shape,
self.row_rank,
self.col_rank,
self.dep_rank,
ParallelMode.PARALLEL_2P5D_ROW,
ParallelMode.PARALLEL_2P5D_COL,
self.data_parallel_rank,
self.pipeline_parallel_rank,
self.pipeline_parallel_size,
self.tensor_parallel_size,
)
if self.bias is not None:
output = add_bias_2p5d(output, self.bias, self.hidden_size_per_partition, self.tesseract_dim, self.row_rank,
self.col_rank, self.dep_rank, ParallelMode.PARALLEL_2P5D_COL, False,
self.data_parallel_rank, self.pipeline_parallel_rank, self.pipeline_parallel_size,
self.tensor_parallel_size)
return output
| [
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_ranks_in_group",
"colossalai.nn.init.zeros_",
"colossalai.core.global_context.get_local_rank",
"colossalai.context.seed",
"colossalai.utils.cuda.get_current_device",
"colossalai.nn.init.xavier_uniform_",
"colossalai.commun... | [((2068, 2102), 'colossalai.nn.init.xavier_uniform_', 'init.xavier_uniform_', ([], {'a': '(1)', 'scale': '(1)'}), '(a=1, scale=1)\n', (2088, 2102), True, 'from colossalai.nn import init as init\n'), ((2350, 2400), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_COL'], {}), '(ParallelMode.PARALLEL_2P5D_COL)\n', (2368, 2400), True, 'from colossalai.core import global_context as gpc\n'), ((2425, 2475), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_ROW'], {}), '(ParallelMode.PARALLEL_2P5D_ROW)\n', (2443, 2475), True, 'from colossalai.core import global_context as gpc\n'), ((2500, 2550), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_DEP'], {}), '(ParallelMode.PARALLEL_2P5D_DEP)\n', (2518, 2550), True, 'from colossalai.core import global_context as gpc\n'), ((6799, 6849), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_COL'], {}), '(ParallelMode.PARALLEL_2P5D_COL)\n', (6817, 6849), True, 'from colossalai.core import global_context as gpc\n'), ((6874, 6924), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_ROW'], {}), '(ParallelMode.PARALLEL_2P5D_ROW)\n', (6892, 6924), True, 'from colossalai.core import global_context as gpc\n'), ((6949, 6999), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_DEP'], {}), '(ParallelMode.PARALLEL_2P5D_DEP)\n', (6967, 6999), True, 'from colossalai.core import global_context as gpc\n'), ((9347, 9381), 'torch.addcmul', 'torch.addcmul', (['bias', 'scale', 'output'], {}), '(bias, scale, output)\n', (9360, 9381), False, 'import torch\n'), ((10814, 10848), 'colossalai.nn.init.xavier_uniform_', 'init.xavier_uniform_', ([], {'a': '(1)', 'scale': '(1)'}), '(a=1, scale=1)\n', (10834, 10848), True, 'from colossalai.nn import init as init\n'), ((10906, 10919), 'colossalai.nn.init.zeros_', 'init.zeros_', ([], {}), '()\n', (10917, 10919), True, 'from colossalai.nn import init as init\n'), ((13754, 13808), 'torch.nn.functional.conv2d', 'F.conv2d', (['input_', 'weight', 'bias'], {'stride': 'self.patch_size'}), '(input_, weight, bias, stride=self.patch_size)\n', (13762, 13808), True, 'import torch.nn.functional as F\n'), ((14174, 14211), 'torch.cat', 'torch.cat', (['(cls_token, output)'], {'dim': '(1)'}), '((cls_token, output), dim=1)\n', (14183, 14211), False, 'import torch\n'), ((15200, 15214), 'colossalai.nn.init.normal_', 'init.normal_', ([], {}), '()\n', (15212, 15214), True, 'from colossalai.nn import init as init\n'), ((16736, 16825), 'torch.nn.functional.embedding', 'F.embedding', (['input_', 'weight', 'self.padding_idx', '*self.embed_args'], {}), '(input_, weight, self.padding_idx, *self.embed_args, **self.\n embed_kwargs)\n', (16747, 16825), True, 'import torch.nn.functional as F\n'), ((17804, 17818), 'colossalai.nn.init.normal_', 'init.normal_', ([], {}), '()\n', (17816, 17818), True, 'from colossalai.nn import init as init\n'), ((18418, 18468), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_COL'], {}), '(ParallelMode.PARALLEL_2P5D_COL)\n', (18436, 18468), True, 'from colossalai.core import global_context as gpc\n'), ((19923, 20022), 'torch.nn.functional.embedding', 'F.embedding', (['masked_input', 'self.weight', 'self.padding_idx', '*self.embed_args'], {}), '(masked_input, self.weight, self.padding_idx, *self.embed_args,\n **self.embed_kwargs)\n', (19934, 20022), True, 'import torch.nn.functional as F\n'), ((21557, 21591), 'colossalai.nn.init.xavier_uniform_', 'init.xavier_uniform_', ([], {'a': '(1)', 'scale': '(1)'}), '(a=1, scale=1)\n', (21577, 21591), True, 'from colossalai.nn import init as init\n'), ((21765, 21815), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_COL'], {}), '(ParallelMode.PARALLEL_2P5D_COL)\n', (21783, 21815), True, 'from colossalai.core import global_context as gpc\n'), ((21840, 21890), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_ROW'], {}), '(ParallelMode.PARALLEL_2P5D_ROW)\n', (21858, 21890), True, 'from colossalai.core import global_context as gpc\n'), ((21915, 21965), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_DEP'], {}), '(ParallelMode.PARALLEL_2P5D_DEP)\n', (21933, 21965), True, 'from colossalai.core import global_context as gpc\n'), ((25446, 25480), 'colossalai.nn.init.xavier_uniform_', 'init.xavier_uniform_', ([], {'a': '(1)', 'scale': '(1)'}), '(a=1, scale=1)\n', (25466, 25480), True, 'from colossalai.nn import init as init\n'), ((25683, 25733), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_COL'], {}), '(ParallelMode.PARALLEL_2P5D_COL)\n', (25701, 25733), True, 'from colossalai.core import global_context as gpc\n'), ((25758, 25808), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_ROW'], {}), '(ParallelMode.PARALLEL_2P5D_ROW)\n', (25776, 25808), True, 'from colossalai.core import global_context as gpc\n'), ((25833, 25883), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2P5D_DEP'], {}), '(ParallelMode.PARALLEL_2P5D_DEP)\n', (25851, 25883), True, 'from colossalai.core import global_context as gpc\n'), ((2892, 2912), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (2910, 2912), False, 'from colossalai.utils.cuda import get_current_device\n'), ((2975, 3071), 'torch.empty', 'torch.empty', (['self.input_size_per_partition', 'self.hidden_size_per_partition'], {}), '(self.input_size_per_partition, self.hidden_size_per_partition,\n **factory_kwargs)\n', (2986, 3071), False, 'import torch\n'), ((3330, 3355), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (3334, 3355), False, 'from colossalai.context import ParallelMode, seed\n'), ((7251, 7271), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (7269, 7271), False, 'from colossalai.utils.cuda import get_current_device\n'), ((7321, 7377), 'torch.ones', 'torch.ones', (['self.partitioned_partition'], {}), '(self.partitioned_partition, **factory_kwargs)\n', (7331, 7377), False, 'import torch\n'), ((7409, 7466), 'torch.zeros', 'torch.zeros', (['self.partitioned_partition'], {}), '(self.partitioned_partition, **factory_kwargs)\n', (7420, 7466), False, 'import torch\n'), ((7785, 7800), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7798, 7800), False, 'import torch\n'), ((7820, 7854), 'torch.sum', 'torch.sum', (['x'], {'dim': '(-1)', 'keepdim': '(True)'}), '(x, dim=-1, keepdim=True)\n', (7829, 7854), False, 'import torch\n'), ((8092, 8130), 'torch.sum', 'torch.sum', (['(x * x)'], {'dim': '(-1)', 'keepdim': '(True)'}), '(x * x, dim=-1, keepdim=True)\n', (8101, 8130), False, 'import torch\n'), ((11535, 11560), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (11539, 11560), False, 'from colossalai.context import ParallelMode, seed\n'), ((12937, 12962), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (12941, 12962), False, 'from colossalai.context import ParallelMode, seed\n'), ((12988, 13038), 'torch.nn.init._calculate_fan_in_and_fan_out', 'nn.init._calculate_fan_in_and_fan_out', (['self.weight'], {}), '(self.weight)\n', (13025, 13038), True, 'import torch.nn as nn\n'), ((16134, 16159), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (16138, 16159), False, 'from colossalai.context import ParallelMode, seed\n'), ((19206, 19231), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (19210, 19231), False, 'from colossalai.context import ParallelMode, seed\n'), ((23019, 23044), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (23023, 23044), False, 'from colossalai.context import ParallelMode, seed\n'), ((26224, 26244), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (26242, 26244), False, 'from colossalai.utils.cuda import get_current_device\n'), ((26797, 26822), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (26801, 26822), False, 'from colossalai.context import ParallelMode, seed\n'), ((2007, 2019), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (2016, 2019), False, 'import math\n'), ((3157, 3218), 'torch.empty', 'torch.empty', (['self.hidden_size_per_partition'], {}), '(self.hidden_size_per_partition, **factory_kwargs)\n', (3168, 3218), False, 'import torch\n'), ((8432, 8473), 'torch.sqrt', 'torch.sqrt', (['(Var_x + self.variance_epsilon)'], {}), '(Var_x + self.variance_epsilon)\n', (8442, 8473), False, 'import torch\n'), ((10753, 10765), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (10762, 10765), False, 'import math\n'), ((16460, 16475), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16473, 16475), False, 'import torch\n'), ((19532, 19547), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19545, 19547), False, 'import torch\n'), ((21496, 21508), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (21505, 21508), False, 'import math\n'), ((23138, 23192), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['ParallelMode.PARALLEL_2P5D_COL'], {}), '(ParallelMode.PARALLEL_2P5D_COL)\n', (23160, 23192), True, 'from colossalai.core import global_context as gpc\n'), ((23223, 23277), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['ParallelMode.PARALLEL_2P5D_ROW'], {}), '(ParallelMode.PARALLEL_2P5D_ROW)\n', (23245, 23277), True, 'from colossalai.core import global_context as gpc\n'), ((23508, 23574), 'colossalai.communication.broadcast', 'broadcast', (['self.bias', 'col_src_rank', 'ParallelMode.PARALLEL_2P5D_COL'], {}), '(self.bias, col_src_rank, ParallelMode.PARALLEL_2P5D_COL)\n', (23517, 23574), False, 'from colossalai.communication import broadcast\n'), ((23591, 23657), 'colossalai.communication.broadcast', 'broadcast', (['self.bias', 'row_src_rank', 'ParallelMode.PARALLEL_2P5D_ROW'], {}), '(self.bias, row_src_rank, ParallelMode.PARALLEL_2P5D_ROW)\n', (23600, 23657), False, 'from colossalai.communication import broadcast\n'), ((25385, 25397), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (25394, 25397), False, 'import math\n'), ((26429, 26525), 'torch.empty', 'torch.empty', (['self.hidden_size_per_partition', 'self.input_size_per_partition'], {}), '(self.hidden_size_per_partition, self.input_size_per_partition,\n **factory_kwargs)\n', (26440, 26525), False, 'import torch\n'), ((26645, 26706), 'torch.empty', 'torch.empty', (['self.hidden_size_per_partition'], {}), '(self.hidden_size_per_partition, **factory_kwargs)\n', (26656, 26706), False, 'import torch\n'), ((7922, 7967), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_2P5D_ROW'], {}), '(ParallelMode.PARALLEL_2P5D_ROW)\n', (7935, 7967), True, 'from colossalai.core import global_context as gpc\n'), ((8200, 8245), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_2P5D_ROW'], {}), '(ParallelMode.PARALLEL_2P5D_ROW)\n', (8213, 8245), True, 'from colossalai.core import global_context as gpc\n'), ((15791, 15811), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (15809, 15811), False, 'from colossalai.utils.cuda import get_current_device\n'), ((18805, 18825), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (18823, 18825), False, 'from colossalai.utils.cuda import get_current_device\n'), ((11723, 11743), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (11741, 11743), False, 'from colossalai.utils.cuda import get_current_device\n'), ((11871, 11891), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (11889, 11891), False, 'from colossalai.utils.cuda import get_current_device\n'), ((12022, 12042), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (12040, 12042), False, 'from colossalai.utils.cuda import get_current_device\n'), ((12219, 12239), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (12237, 12239), False, 'from colossalai.utils.cuda import get_current_device\n'), ((22406, 22426), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (22424, 22426), False, 'from colossalai.utils.cuda import get_current_device\n'), ((22565, 22585), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (22583, 22585), False, 'from colossalai.utils.cuda import get_current_device\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from typing import List, Tuple, Union
import torch
import torch.distributed as dist
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device
from functools import reduce
import operator
from .utils import split_tensor_into_1d_equal_chunks, gather_split_1d_tensor
TensorShape = Union[torch.Size, List[int], Tuple[int]]
def _get_tensor_shape(tensor_shape: TensorShape, chunk_tensor: bool = False) -> Tuple[TensorShape, bool]:
"""get the exact tensor shape when communicating and return whether the tensor is a chunk
Args:
tensor_shape (:class:`torch.Size`): shape of tensor
chunk_tensor (bool, optional): whether to chunk tensor, defaults to False
Returns:
Tuple[Union[:class:`torch.Size`, List[int], Tuple[int]], bool]: exact tensor shape, whether to chunk tensor
"""
if chunk_tensor:
tensor_chunk_shape = reduce(operator.mul, tensor_shape, 1)
tensor_parallel_world_size = gpc.get_world_size(ParallelMode.TENSOR)
if tensor_chunk_shape % tensor_parallel_world_size == 0:
tensor_chunk_shape = tensor_chunk_shape // tensor_parallel_world_size
else:
tensor_chunk_shape = tensor_shape
chunk_tensor = False
else:
tensor_chunk_shape = tensor_shape
return tensor_chunk_shape, chunk_tensor
def create_recv_buffer_with_shapes(recv_shapes, dtype, scatter_gather_tensors):
if isinstance(recv_shapes, torch.Size):
recv_chunk_shape, recv_split = _get_tensor_shape(recv_shapes, scatter_gather_tensors)
buffer_recv = torch.empty(recv_chunk_shape, requires_grad=True, device=get_current_device(), dtype=dtype)
return buffer_recv, recv_split
buffer_recv = []
for recv_shape in recv_shapes:
recv_chunk_shape, recv_split = _get_tensor_shape(recv_shape, scatter_gather_tensors)
tensor_recv = torch.empty(recv_chunk_shape, requires_grad=True, device=get_current_device(), dtype=dtype)
buffer_recv.append(tensor_recv)
return buffer_recv, recv_split
def process_object_to_send(object_send, scatter_gather_tensors):
if isinstance(object_send, torch.Tensor):
send_split = _get_tensor_shape(object_send.shape, scatter_gather_tensors)[1]
if send_split:
object_send = split_tensor_into_1d_equal_chunks(object_send)
return object_send
for tensor_send in object_send:
send_split = _get_tensor_shape(tensor_send.shape, scatter_gather_tensors)[1]
if send_split:
tensor_send = split_tensor_into_1d_equal_chunks(tensor_send)
return object_send
def filling_ops_queue(obj, comm_op, comm_rank, ops_queue):
if isinstance(obj, torch.Tensor):
op_to_add = dist.P2POp(comm_op, obj, comm_rank)
ops_queue.append(op_to_add)
else:
for tensor_to_comm in obj:
op_to_add = dist.P2POp(comm_op, tensor_to_comm, comm_rank)
ops_queue.append(op_to_add)
def _communicate(object_send_next: Union[torch.Tensor, List[torch.Tensor]] = None,
object_send_prev: Union[torch.Tensor, List[torch.Tensor]] = None,
recv_prev: bool = False,
recv_next: bool = False,
recv_prev_shape: Union[torch.Size, List[torch.Size]] = None,
recv_next_shape: Union[torch.Size, List[torch.Size]] = None,
prev_rank: int = None,
next_rank: int = None,
dtype: torch.dtype = None,
scatter_gather_tensors: bool = False) -> Tuple[Union[torch.Tensor, List[torch.Tensor]]]:
"""
Adapted from megatron.p2p_communication.
Communicate tensors between stages. Used as helper method in other
communication methods that are used in pipeline schedule.
Takes the following arguments:
object_send_next (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): tensor to send to next rank (no tensor sent if
set to None).
object_send_prev (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): tensor to send to prev rank (no tensor sent if
set to None).
recv_prev (bool): boolean for whether tensor should be received from
previous rank.
recv_next (bool): boolean for whether tensor should be received from
next rank.
recv_prev_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): shape of the tensor to be received from the previous stage, defualts to None.
recv_next_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): shape of the tensor to be received from the next stage, defualts to None.
prev_rank (int): the rank of the previous pipeline stage, defualts to None,
next_rank (int): the rank of the next pipeline stage, defualts to None,
dtype (torch.dtype): data type of intermediate buffers, defaults to None
scatter_gather_tensors (bool): whether to scatter and gather tensor between pipeline stages, defaults to False
Returns:
Tuple[Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]]: returns tensor_recv_prev, tensor_recv_next
"""
# Create placeholder tensors for receive in forward and backward directions
# if needed.
tensor_recv_prev = None
tensor_recv_next = None
if recv_prev:
assert recv_prev_shape is not None
tensor_recv_prev, recv_prev_split = create_recv_buffer_with_shapes(recv_prev_shape, dtype,
scatter_gather_tensors)
if recv_next:
assert recv_next_shape is not None
tensor_recv_next, recv_next_split = create_recv_buffer_with_shapes(recv_next_shape, dtype,
scatter_gather_tensors)
if object_send_prev is not None or recv_prev:
if prev_rank is None:
prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE)
if object_send_next is not None or recv_next:
if next_rank is None:
next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE)
if object_send_prev is not None:
object_send_prev = process_object_to_send(object_send_prev, scatter_gather_tensors)
if object_send_next is not None:
object_send_next = process_object_to_send(object_send_next, scatter_gather_tensors)
ops = []
if object_send_prev is not None:
filling_ops_queue(object_send_prev, dist.isend, prev_rank, ops)
if tensor_recv_prev is not None:
filling_ops_queue(tensor_recv_prev, dist.irecv, prev_rank, ops)
if tensor_recv_next is not None:
filling_ops_queue(tensor_recv_next, dist.irecv, next_rank, ops)
if object_send_next is not None:
filling_ops_queue(object_send_next, dist.isend, next_rank, ops)
if len(ops) > 0:
reqs = dist.batch_isend_irecv(ops)
for req in reqs:
req.wait()
# To protect against race condition when using batch_isend_irecv().
torch.cuda.synchronize()
if recv_prev and recv_prev_split:
if isinstance(tensor_recv_prev, torch.Tensor):
tensor_recv_prev = gather_split_1d_tensor(tensor_recv_prev).view(recv_prev_shape).requires_grad_()
else:
for tensor_recv, tensor_shape in zip(tensor_recv_prev, recv_prev_shape):
tensor_recv = gather_split_1d_tensor(tensor_recv).view(tensor_shape).requires_grad_()
if recv_next and recv_next_split:
if isinstance(tensor_recv_next, torch.Tensor):
tensor_recv_next = gather_split_1d_tensor(tensor_recv_next).view(recv_next_shape).requires_grad_()
else:
for tensor_recv, tensor_shape in zip(tensor_recv_next, recv_next_shape):
tensor_recv = gather_split_1d_tensor(tensor_recv).view(tensor_shape).requires_grad_()
return tensor_recv_prev, tensor_recv_next
def recv_forward(input_tensor_shape,
prev_rank=None,
dtype=torch.float,
scatter_gather_tensors=False) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Copy the forward output from the previous stage in pipeline as the input tensor of this stage.
Args:
input_tensor_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor to be received.
prev_rank (int, optional): The rank of the source of the tensor.
Returns:
Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input tensor or input tensor list.
"""
if gpc.is_pipeline_first_stage():
input_tensor = None
else:
input_tensor, _ = _communicate(recv_prev=True,
recv_prev_shape=input_tensor_shape,
prev_rank=prev_rank,
dtype=dtype,
scatter_gather_tensors=scatter_gather_tensors)
return input_tensor
def recv_backward(output_grad_shape,
next_rank=None,
dtype=torch.float,
scatter_gather_tensors=False) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Copy the gradient tensor from the next stage in pipeline as the input gradient of this stage.
Args:
output_grad_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor to be received.
next_rank (int, optional): The rank of the source of the tensor.
Returns:
Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input gradient tensor or gradident tensor list.
"""
if gpc.is_pipeline_last_stage():
output_tensor_grad = None
else:
_, output_tensor_grad = _communicate(recv_next=True,
recv_next_shape=output_grad_shape,
next_rank=next_rank,
dtype=dtype,
scatter_gather_tensors=scatter_gather_tensors)
return output_tensor_grad
def send_forward(output_tensor, next_rank=None, scatter_gather_tensors=False) -> None:
"""Sends the input tensor to the next stage in pipeline.
Args:
output_tensor (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent.
next_rank (int, optional): The rank of the recipient of the tensor.
"""
if not gpc.is_pipeline_last_stage():
_communicate(object_send_next=output_tensor, next_rank=next_rank, scatter_gather_tensors=scatter_gather_tensors)
def send_backward(input_tensor_grad, prev_rank=None, scatter_gather_tensors=False) -> None:
"""Sends the gradient tensor to the previous stage in pipeline.
Args:
input_tensor_grad (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent
prev_rank (int, optional): The rank of the recipient of the tensor
"""
if not gpc.is_pipeline_first_stage():
_communicate(object_send_prev=input_tensor_grad,
prev_rank=prev_rank,
scatter_gather_tensors=scatter_gather_tensors)
def send_forward_recv_backward(output_tensor,
output_grad_shape,
recv_next=True,
next_rank=None,
dtype=torch.float,
scatter_gather_tensors=False) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Batched communication operation. Sends the input tensor to the
next stage in pipeline, while receives the gradient tensor from the
next stage in pipeline as the input gradient tensor of this stage.
Args:
output_tensor (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent.
output_grad_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor to be received.
Returns:
Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input gradient tensor.
"""
if gpc.is_pipeline_last_stage():
output_tensor_grad = None
else:
_, output_tensor_grad = _communicate(object_send_next=output_tensor,
recv_next=recv_next,
recv_next_shape=output_grad_shape,
next_rank=next_rank,
dtype=dtype,
scatter_gather_tensors=scatter_gather_tensors)
return output_tensor_grad
def send_backward_recv_forward(input_tensor_grad,
input_tensor_shape,
recv_prev=True,
prev_rank=None,
dtype=torch.float,
scatter_gather_tensors=False) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Batched communication operation. Sends the gradient tensor to the
previous stage in pipeline, while receives the output tensor from the
previous stage in pipeline as the input of this stage.
Args:
input_tensor_grad (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent.
input_tensor_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor to be received.
Returns:
Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input tensor.
"""
if gpc.is_pipeline_first_stage():
input_tensor = None
else:
input_tensor, _ = _communicate(object_send_prev=input_tensor_grad,
recv_prev=recv_prev,
recv_prev_shape=input_tensor_shape,
prev_rank=prev_rank,
dtype=dtype,
scatter_gather_tensors=scatter_gather_tensors)
return input_tensor
def send_forward_recv_forward(output_tensor,
input_tensor_shape,
recv_prev=True,
prev_rank=None,
next_rank=None,
dtype=torch.float,
scatter_gather_tensors=False) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Batched communication operation. Sends the input tensor to the
next stage in pipeline, while receives the output tensor from the
previous stage in pipeline as the input of this stage.
Args:
output_tensor (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent.
input_tensor_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor to be received.
Returns:
Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input tensor.
"""
input_tensor, _ = _communicate(object_send_next=output_tensor,
recv_prev=recv_prev,
recv_prev_shape=input_tensor_shape,
prev_rank=prev_rank,
next_rank=next_rank,
dtype=dtype,
scatter_gather_tensors=scatter_gather_tensors)
return input_tensor
def send_backward_recv_backward(input_tensor_grad,
output_grad_shape,
recv_next=True,
prev_rank=None,
next_rank=None,
dtype=torch.float,
scatter_gather_tensors=False) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Batched communication operation. Sends the gradient tensor to the
previous stage in pipeline, while receives the gradient tensor from the
next member in pipeline as the input of this stage.
Args:
input_tensor_grad (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent.
output_grad_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor to be received.
Returns:
Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input gradient tensor.
"""
_, output_tensor_grad = _communicate(object_send_prev=input_tensor_grad,
recv_next=recv_next,
recv_next_shape=output_grad_shape,
prev_rank=prev_rank,
next_rank=next_rank,
dtype=dtype,
scatter_gather_tensors=scatter_gather_tensors)
return output_tensor_grad
def send_forward_backward_recv_forward_backward(
output_tensor,
input_tensor_grad,
input_tensor_shape,
output_grad_shape,
recv_prev=True,
recv_next=True,
prev_rank=None,
next_rank=None,
dtype=torch.float,
scatter_gather_tensors=False) -> Tuple[Union[torch.Tensor, List[torch.Tensor]]]:
"""Batched communication operation. Sends the input tensor to the next stage in pipeline and
the gradient tensor to the previous stage, while receives the input gradient tensor from the
next stage and the input tensor from the previous stage.
Args:
output_tensor (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor sent to the next.
input_tensor_grad (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor sent to the previous.
input_tensor_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor received from the previous.
output_grad_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor received from the next.
Returns:
Tuple(Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]], Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): (the input tensor, the input gradient tensor)
"""
input_tensor, output_tensor_grad = _communicate(object_send_next=output_tensor,
object_send_prev=input_tensor_grad,
recv_prev=recv_prev,
recv_next=recv_next,
recv_prev_shape=input_tensor_shape,
recv_next_shape=output_grad_shape,
prev_rank=prev_rank,
next_rank=next_rank,
dtype=dtype,
scatter_gather_tensors=scatter_gather_tensors)
return input_tensor, output_tensor_grad
| [
"colossalai.core.global_context.get_prev_global_rank",
"colossalai.core.global_context.is_pipeline_last_stage",
"colossalai.core.global_context.is_pipeline_first_stage",
"colossalai.utils.get_current_device",
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.get_next_global_... | [((7203, 7227), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (7225, 7227), False, 'import torch\n'), ((8731, 8760), 'colossalai.core.global_context.is_pipeline_first_stage', 'gpc.is_pipeline_first_stage', ([], {}), '()\n', (8758, 8760), True, 'from colossalai.core import global_context as gpc\n'), ((9810, 9838), 'colossalai.core.global_context.is_pipeline_last_stage', 'gpc.is_pipeline_last_stage', ([], {}), '()\n', (9836, 9838), True, 'from colossalai.core import global_context as gpc\n'), ((12270, 12298), 'colossalai.core.global_context.is_pipeline_last_stage', 'gpc.is_pipeline_last_stage', ([], {}), '()\n', (12296, 12298), True, 'from colossalai.core import global_context as gpc\n'), ((13726, 13755), 'colossalai.core.global_context.is_pipeline_first_stage', 'gpc.is_pipeline_first_stage', ([], {}), '()\n', (13753, 13755), True, 'from colossalai.core import global_context as gpc\n'), ((1011, 1048), 'functools.reduce', 'reduce', (['operator.mul', 'tensor_shape', '(1)'], {}), '(operator.mul, tensor_shape, 1)\n', (1017, 1048), False, 'from functools import reduce\n'), ((1086, 1125), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (1104, 1125), True, 'from colossalai.core import global_context as gpc\n'), ((2853, 2888), 'torch.distributed.P2POp', 'dist.P2POp', (['comm_op', 'obj', 'comm_rank'], {}), '(comm_op, obj, comm_rank)\n', (2863, 2888), True, 'import torch.distributed as dist\n'), ((7051, 7078), 'torch.distributed.batch_isend_irecv', 'dist.batch_isend_irecv', (['ops'], {}), '(ops)\n', (7073, 7078), True, 'import torch.distributed as dist\n'), ((10629, 10657), 'colossalai.core.global_context.is_pipeline_last_stage', 'gpc.is_pipeline_last_stage', ([], {}), '()\n', (10655, 10657), True, 'from colossalai.core import global_context as gpc\n'), ((11152, 11181), 'colossalai.core.global_context.is_pipeline_first_stage', 'gpc.is_pipeline_first_stage', ([], {}), '()\n', (11179, 11181), True, 'from colossalai.core import global_context as gpc\n'), ((2994, 3040), 'torch.distributed.P2POp', 'dist.P2POp', (['comm_op', 'tensor_to_comm', 'comm_rank'], {}), '(comm_op, tensor_to_comm, comm_rank)\n', (3004, 3040), True, 'import torch.distributed as dist\n'), ((6100, 6147), 'colossalai.core.global_context.get_prev_global_rank', 'gpc.get_prev_global_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (6124, 6147), True, 'from colossalai.core import global_context as gpc\n'), ((6253, 6300), 'colossalai.core.global_context.get_next_global_rank', 'gpc.get_next_global_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (6277, 6300), True, 'from colossalai.core import global_context as gpc\n'), ((1761, 1781), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1779, 1781), False, 'from colossalai.utils import get_current_device\n'), ((2063, 2083), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (2081, 2083), False, 'from colossalai.utils import get_current_device\n')] |
import torch.nn as nn
from colossalai.context import ParallelMode, seed
from ..parallel_1d import *
from ..utils import get_tensor_parallel_mode
from ._utils import ColossalaiModule
class Dropout(ColossalaiModule):
"""Dropout layer of colossalai.
Args:
p (float, optional): probability of an element to be zeroed, defaults 0.5.
inplace (bool, optional): whether to do dropout in-place, default to be False.
"""
def __init__(self, p: float = 0.5, inplace: bool = False) -> None:
tensor_parallel = get_tensor_parallel_mode()
if tensor_parallel == "1d":
drop = Dropout1D(p, inplace)
else:
drop = nn.Dropout(p, inplace)
super().__init__(drop, tensor_parallel=tensor_parallel)
def forward(self, *args):
if self.tensor_parallel in [None, '1d']:
return self._forward_func(*args)
else:
with seed(ParallelMode.TENSOR):
return self._forward_func(*args)
| [
"colossalai.context.seed"
] | [((697, 719), 'torch.nn.Dropout', 'nn.Dropout', (['p', 'inplace'], {}), '(p, inplace)\n', (707, 719), True, 'import torch.nn as nn\n'), ((947, 972), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (951, 972), False, 'from colossalai.context import ParallelMode, seed\n')] |
from typing import Tuple, Optional
import torch
import torch.nn as nn
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from fastfold.model.fastnn import MSAStack, OutProductMean, PairStack
from fastfold.distributed.comm_async import All_to_All_Async, All_to_All_Async_Opp
from fastfold.distributed.comm import gather, scatter
class EvoformerBlock(nn.Module):
def __init__(self, c_m: int, c_z: int, first_block: bool, last_block: bool):
super(EvoformerBlock, self).__init__()
self.first_block = first_block
self.last_block = last_block
self.msa_stack = MSAStack(c_m, c_z, p_drop=0.15)
self.communication = OutProductMean(n_feat=c_m, n_feat_out=c_z, n_feat_proj=32)
self.pair_stack = PairStack(d_pair=c_z)
def forward(
self,
m: torch.Tensor,
z: torch.Tensor,
msa_mask: torch.Tensor,
pair_mask: torch.Tensor,
chunk_size: Optional[int] = None,
_mask_trans: bool = True,
) -> Tuple[torch.Tensor, torch.Tensor]:
dap_size = gpc.get_world_size(ParallelMode.TENSOR)
seq_length = pair_mask.size(-1)
padding_size = (int(seq_length / dap_size) + 1) * dap_size - seq_length
if self.first_block:
m = m.unsqueeze(0)
z = z.unsqueeze(0)
m = torch.nn.functional.pad(m, (0, 0, 0, padding_size))
z = torch.nn.functional.pad(z, (0, 0, 0, padding_size, 0, padding_size))
m = scatter(m, dim=1)
z = scatter(z, dim=1)
msa_mask = msa_mask.unsqueeze(0)
pair_mask = pair_mask.unsqueeze(0)
msa_mask = torch.nn.functional.pad(msa_mask, (0, padding_size))
pair_mask = torch.nn.functional.pad(pair_mask, (0, padding_size, 0, padding_size))
m = self.msa_stack(m, z, msa_mask)
z = z + self.communication(m, msa_mask)
m, work = All_to_All_Async.apply(m, 1, 2)
z = self.pair_stack(z, pair_mask)
m = All_to_All_Async_Opp.apply(m, work, 1, 2)
if self.last_block:
m = m.squeeze(0)
z = z.squeeze(0)
m = gather(m, dim=0)
z = gather(z, dim=0)
m = m[:, :-padding_size, :]
z = z[:-padding_size, :-padding_size, :]
return m, z
def copy_layernorm(model_fast, model_ori):
model_fast.weight.copy_(model_ori.weight)
model_fast.bias.copy_(model_ori.bias)
def copy_linear(model_fast, model_ori):
model_fast.weight.copy_(model_ori.weight)
if model_fast.use_bias:
model_fast.bias.copy_(model_ori.bias)
def copy_qkv_linear(model_fast, ori_q, ori_k, ori_v):
model_fast.weight.copy_(torch.cat((ori_q.weight, ori_k.weight, ori_v.weight), dim=0))
def copy_attention(model_fast, model_ori):
copy_qkv_linear(model_fast.to_qkv, model_ori.linear_q, model_ori.linear_k, model_ori.linear_v)
copy_linear(model_fast.gating_linear, model_ori.linear_g)
copy_linear(model_fast.o_linear, model_ori.linear_o)
try:
model_fast.gating_bias.copy_(model_ori.linear_g.bias)
except:
print("no gating_bias need copy")
def copy_left_right(model_fast, ori_left, ori_right):
model_fast.weight.copy_(torch.cat((ori_left.weight, ori_right.weight), dim=0))
model_fast.bias.copy_(torch.cat((ori_left.bias, ori_right.bias), dim=0))
def copy_transition(model_fast, model_ori):
copy_layernorm(model_fast.norm, model_ori.layer_norm)
copy_linear(model_fast.linear1, model_ori.linear_1)
copy_linear(model_fast.linear2, model_ori.linear_2)
def copy_triangle(model_fast, model_ori):
copy_layernorm(model_fast.layernorm1, model_ori.layer_norm_in)
copy_layernorm(model_fast.layernorm2, model_ori.layer_norm_out)
copy_linear(model_fast.output_gate, model_ori.linear_g)
copy_linear(model_fast.output_projection, model_ori.linear_z)
model_fast.output_bias.copy_(model_ori.linear_z.bias)
copy_left_right(model_fast.left_right_projection, model_ori.linear_a_p, model_ori.linear_b_p)
copy_left_right(model_fast.left_right_gate, model_ori.linear_a_g, model_ori.linear_b_g)
def copy_triangle_att(model_fast, model_ori):
copy_layernorm(model_fast.layernorm1, model_ori.layer_norm)
copy_linear(model_fast.linear_b, model_ori.linear)
copy_attention(model_fast.attention, model_ori.mha)
model_fast.out_bias.copy_(model_ori.mha.linear_o.bias)
def copy_para(block_fast, block_ori):
# msa_stack
# MSARowAttentionWithPairBias
copy_layernorm(block_fast.msa_stack.MSARowAttentionWithPairBias.layernormM,
block_ori.msa_att_row.layer_norm_m)
copy_layernorm(block_fast.msa_stack.MSARowAttentionWithPairBias.layernormZ,
block_ori.msa_att_row.layer_norm_z)
copy_attention(block_fast.msa_stack.MSARowAttentionWithPairBias.attention,
block_ori.msa_att_row.mha)
block_fast.msa_stack.MSARowAttentionWithPairBias.linear_b_weights.copy_(
block_ori.msa_att_row.linear_z.weight)
block_fast.msa_stack.MSARowAttentionWithPairBias.out_bias.copy_(
block_ori.msa_att_row.mha.linear_o.bias)
# MSAColumnAttention
copy_layernorm(block_fast.msa_stack.MSAColumnAttention.layernormM,
block_ori.msa_att_col._msa_att.layer_norm_m)
copy_attention(block_fast.msa_stack.MSAColumnAttention.attention,
block_ori.msa_att_col._msa_att.mha)
# MSATransition
copy_transition(block_fast.msa_stack.MSATransition, block_ori.core.msa_transition)
# communication
copy_layernorm(block_fast.communication.layernormM,
block_ori.core.outer_product_mean.layer_norm)
copy_linear(block_fast.communication.linear_a, block_ori.core.outer_product_mean.linear_1)
copy_linear(block_fast.communication.linear_b, block_ori.core.outer_product_mean.linear_2)
copy_linear(block_fast.communication.o_linear, block_ori.core.outer_product_mean.linear_out)
# pair_stack
# TriangleMultiplicationOutgoing
copy_triangle(block_fast.pair_stack.TriangleMultiplicationOutgoing, block_ori.core.tri_mul_out)
# TriangleMultiplicationIncoming
copy_triangle(block_fast.pair_stack.TriangleMultiplicationIncoming, block_ori.core.tri_mul_in)
# TriangleAttentionStartingNode
copy_triangle_att(block_fast.pair_stack.TriangleAttentionStartingNode,
block_ori.core.tri_att_start)
copy_triangle_att(block_fast.pair_stack.TriangleAttentionEndingNode, block_ori.core.tri_att_end)
copy_transition(block_fast.pair_stack.PairTransition, block_ori.core.pair_transition)
def inject_fastnn(model):
with torch.no_grad():
fastfold_blocks = nn.ModuleList()
for block_id, ori_block in enumerate(model.evoformer.blocks):
c_m = ori_block.msa_att_row.c_in
c_z = ori_block.msa_att_row.c_z
fastfold_block = EvoformerBlock(c_m=c_m,
c_z=c_z,
first_block=(block_id == 0),
last_block=(block_id == len(model.evoformer.blocks) -
1))
copy_para(fastfold_block, ori_block)
fastfold_blocks.append(fastfold_block)
model.evoformer.blocks = fastfold_blocks
return model
| [
"colossalai.core.global_context.get_world_size"
] | [((655, 686), 'fastfold.model.fastnn.MSAStack', 'MSAStack', (['c_m', 'c_z'], {'p_drop': '(0.15)'}), '(c_m, c_z, p_drop=0.15)\n', (663, 686), False, 'from fastfold.model.fastnn import MSAStack, OutProductMean, PairStack\n'), ((716, 774), 'fastfold.model.fastnn.OutProductMean', 'OutProductMean', ([], {'n_feat': 'c_m', 'n_feat_out': 'c_z', 'n_feat_proj': '(32)'}), '(n_feat=c_m, n_feat_out=c_z, n_feat_proj=32)\n', (730, 774), False, 'from fastfold.model.fastnn import MSAStack, OutProductMean, PairStack\n'), ((801, 822), 'fastfold.model.fastnn.PairStack', 'PairStack', ([], {'d_pair': 'c_z'}), '(d_pair=c_z)\n', (810, 822), False, 'from fastfold.model.fastnn import MSAStack, OutProductMean, PairStack\n'), ((1110, 1149), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (1128, 1149), True, 'from colossalai.core import global_context as gpc\n'), ((1691, 1743), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['msa_mask', '(0, padding_size)'], {}), '(msa_mask, (0, padding_size))\n', (1714, 1743), False, 'import torch\n'), ((1764, 1834), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['pair_mask', '(0, padding_size, 0, padding_size)'], {}), '(pair_mask, (0, padding_size, 0, padding_size))\n', (1787, 1834), False, 'import torch\n'), ((1946, 1977), 'fastfold.distributed.comm_async.All_to_All_Async.apply', 'All_to_All_Async.apply', (['m', '(1)', '(2)'], {}), '(m, 1, 2)\n', (1968, 1977), False, 'from fastfold.distributed.comm_async import All_to_All_Async, All_to_All_Async_Opp\n'), ((2032, 2073), 'fastfold.distributed.comm_async.All_to_All_Async_Opp.apply', 'All_to_All_Async_Opp.apply', (['m', 'work', '(1)', '(2)'], {}), '(m, work, 1, 2)\n', (2058, 2073), False, 'from fastfold.distributed.comm_async import All_to_All_Async, All_to_All_Async_Opp\n'), ((2722, 2782), 'torch.cat', 'torch.cat', (['(ori_q.weight, ori_k.weight, ori_v.weight)'], {'dim': '(0)'}), '((ori_q.weight, ori_k.weight, ori_v.weight), dim=0)\n', (2731, 2782), False, 'import torch\n'), ((3257, 3310), 'torch.cat', 'torch.cat', (['(ori_left.weight, ori_right.weight)'], {'dim': '(0)'}), '((ori_left.weight, ori_right.weight), dim=0)\n', (3266, 3310), False, 'import torch\n'), ((3338, 3387), 'torch.cat', 'torch.cat', (['(ori_left.bias, ori_right.bias)'], {'dim': '(0)'}), '((ori_left.bias, ori_right.bias), dim=0)\n', (3347, 3387), False, 'import torch\n'), ((6681, 6696), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6694, 6696), False, 'import torch\n'), ((6724, 6739), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6737, 6739), True, 'import torch.nn as nn\n'), ((1380, 1431), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['m', '(0, 0, 0, padding_size)'], {}), '(m, (0, 0, 0, padding_size))\n', (1403, 1431), False, 'import torch\n'), ((1448, 1516), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['z', '(0, 0, 0, padding_size, 0, padding_size)'], {}), '(z, (0, 0, 0, padding_size, 0, padding_size))\n', (1471, 1516), False, 'import torch\n'), ((1534, 1551), 'fastfold.distributed.comm.scatter', 'scatter', (['m'], {'dim': '(1)'}), '(m, dim=1)\n', (1541, 1551), False, 'from fastfold.distributed.comm import gather, scatter\n'), ((1568, 1585), 'fastfold.distributed.comm.scatter', 'scatter', (['z'], {'dim': '(1)'}), '(z, dim=1)\n', (1575, 1585), False, 'from fastfold.distributed.comm import gather, scatter\n'), ((2178, 2194), 'fastfold.distributed.comm.gather', 'gather', (['m'], {'dim': '(0)'}), '(m, dim=0)\n', (2184, 2194), False, 'from fastfold.distributed.comm import gather, scatter\n'), ((2211, 2227), 'fastfold.distributed.comm.gather', 'gather', (['z'], {'dim': '(0)'}), '(z, dim=0)\n', (2217, 2227), False, 'from fastfold.distributed.comm import gather, scatter\n')] |
from functools import partial
import pytest
import torch
import torch.nn as nn
import torch.multiprocessing as mp
import colossalai
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import free_port, get_current_device
from colossalai.nn.layer.moe import Top2Router, MoeLayer
from colossalai.global_variables import moe_env
BATCH_SIZE = 32
NUM_EXPERTS = 4
CONFIG = dict(parallel=dict(moe=dict(size=4)))
def check_equal(A, B, atol=1e-06):
assert torch.allclose(A, B, rtol=0, atol=atol) is True
def run_routing(rank, world_size, port, rs=2, hidden_size=128, data_type=torch.float32):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
# torch.set_printoptions(precision=30)
torch.backends.cuda.matmul.allow_tf32 = False
local_rank = gpc.get_local_rank(ParallelMode.GLOBAL)
torch.manual_seed(rs + local_rank)
moe_env.reset_loss()
tokens = torch.randn(BATCH_SIZE, hidden_size, dtype=data_type, device=get_current_device(), requires_grad=True)
# print(f"tokens:\n{tokens}")
router = Top2Router(1)
layer = MoeLayer(hidden_size, NUM_EXPERTS, router, nn.Identity())
if data_type == torch.float16:
layer = layer.half()
layer.cuda_mode = False
old_out = layer(tokens)
# print(f"old output:\n{old_out}")
ech = old_out.shape
grad = torch.randn(ech, device=get_current_device())
old_out.backward(grad)
o_tk_grad = tokens.grad.data.clone()
o_gt_grad = layer.gate.weight.grad.data.clone()
tokens.grad.zero_()
layer.gate.weight.grad.zero_()
layer.cuda_mode = True
new_out = layer(tokens)
# print(torch.max(torch.abs(old_out - new_out)))
if data_type == torch.float32:
check_equal(old_out, new_out)
else:
check_equal(old_out, new_out, 1e-2)
# print(f"forward functions passed")
# print(f"new output:\n{new_out}")
new_out.backward(grad)
n_tk_grad = tokens.grad.data.clone()
n_gt_grad = layer.gate.weight.grad.data.clone()
# print(torch.max(torch.abs(o_tk_grad - n_tk_grad)))
if data_type == torch.float32:
check_equal(o_tk_grad, n_tk_grad)
else:
check_equal(o_tk_grad, o_tk_grad, 1e-2)
# print(f"tokens gradient passed")
# print(torch.max(torch.abs(o_gt_grad - n_gt_grad)))
if data_type == torch.float32:
check_equal(o_gt_grad, n_gt_grad, 5e-05)
else:
check_equal(o_gt_grad, n_gt_grad, 2e-01)
# print(f"linear weight gradient passed")
@pytest.mark.skip(reason="Should be activated for detailed tests")
@pytest.mark.parametrize("rs", [2, 42, 60])
@pytest.mark.parametrize("hidden_size", [128, 256, 512, 768, 1024, 2048])
@pytest.mark.parametrize("data_type", [torch.float32, torch.float16])
def test_moe_top2(rs, hidden_size, data_type):
world_size = 4
run_func = partial(run_routing,
world_size=world_size,
port=free_port(),
rs=rs,
hidden_size=hidden_size,
data_type=data_type)
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_moe_top2(2, 256, torch.float16)
| [
"colossalai.core.global_context.get_local_rank",
"colossalai.launch",
"colossalai.global_variables.moe_env.reset_loss",
"colossalai.utils.free_port",
"colossalai.nn.layer.moe.Top2Router",
"colossalai.utils.get_current_device"
] | [((2658, 2723), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Should be activated for detailed tests"""'}), "(reason='Should be activated for detailed tests')\n", (2674, 2723), False, 'import pytest\n'), ((2726, 2768), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""rs"""', '[2, 42, 60]'], {}), "('rs', [2, 42, 60])\n", (2749, 2768), False, 'import pytest\n'), ((2771, 2843), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""hidden_size"""', '[128, 256, 512, 768, 1024, 2048]'], {}), "('hidden_size', [128, 256, 512, 768, 1024, 2048])\n", (2794, 2843), False, 'import pytest\n'), ((2846, 2914), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_type"""', '[torch.float32, torch.float16]'], {}), "('data_type', [torch.float32, torch.float16])\n", (2869, 2914), False, 'import pytest\n'), ((683, 799), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (700, 799), False, 'import colossalai\n'), ((910, 949), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.GLOBAL'], {}), '(ParallelMode.GLOBAL)\n', (928, 949), True, 'from colossalai.core import global_context as gpc\n'), ((955, 989), 'torch.manual_seed', 'torch.manual_seed', (['(rs + local_rank)'], {}), '(rs + local_rank)\n', (972, 989), False, 'import torch\n'), ((995, 1015), 'colossalai.global_variables.moe_env.reset_loss', 'moe_env.reset_loss', ([], {}), '()\n', (1013, 1015), False, 'from colossalai.global_variables import moe_env\n'), ((1182, 1195), 'colossalai.nn.layer.moe.Top2Router', 'Top2Router', (['(1)'], {}), '(1)\n', (1192, 1195), False, 'from colossalai.nn.layer.moe import Top2Router, MoeLayer\n'), ((3239, 3276), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (3247, 3276), True, 'import torch.multiprocessing as mp\n'), ((536, 575), 'torch.allclose', 'torch.allclose', (['A', 'B'], {'rtol': '(0)', 'atol': 'atol'}), '(A, B, rtol=0, atol=atol)\n', (550, 575), False, 'import torch\n'), ((1252, 1265), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (1263, 1265), True, 'import torch.nn as nn\n'), ((1091, 1111), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1109, 1111), False, 'from colossalai.utils import free_port, get_current_device\n'), ((1496, 1516), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1514, 1516), False, 'from colossalai.utils import free_port, get_current_device\n'), ((3096, 3107), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (3105, 3107), False, 'from colossalai.utils import free_port, get_current_device\n')] |
from colossalai.context.parallel_mode import ParallelMode
from colossalai.logging import get_dist_logger, disable_existing_loggers
import colossalai
import os
from colossalai.core import global_context as gpc
from colossalai.utils.timer import MultiTimer
from colossalai.zero.init_ctx import ZeroInitContext
import colossalai.utils as utils
from colossalai.trainer import hooks, Trainer
from colossalai import nn as col_nn
from colossalai.nn import LinearWarmupLR
from colossalai.pipeline.pipelinable import PipelinableContext
import torch.nn as nn
from dataset.webtext import WebtextDataset
import contextlib
from titans.loss.lm_loss import GPTLMLoss
from colossalai.utils import is_using_pp
def main():
parser = colossalai.get_default_parser()
parser.add_argument('--from_torch', default=False, action='store_true')
args = parser.parse_args()
disable_existing_loggers()
if args.from_torch:
colossalai.launch_from_torch(config=args.config)
else:
colossalai.launch_from_slurm(config=args.config, host=args.host, port=29500, seed=42)
logger = get_dist_logger()
logger.info('Build data loader', ranks=[0])
train_ds = WebtextDataset(os.environ['DATA'], seq_len=gpc.config.SEQ_LEN)
train_dataloader = utils.get_dataloader(train_ds,
seed=42,
batch_size=gpc.config.BATCH_SIZE,
pin_memory=True,
shuffle=True,
drop_last=True)
logger.info('Build model', ranks=[0])
use_pipeline = is_using_pp()
use_interleaved = hasattr(gpc.config.model, 'num_chunks')
num_chunks = getattr(gpc.config.model, 'num_chunks', 1)
use_zero3 = hasattr(gpc.config, 'zero') and gpc.config.zero.level == 3
if not use_pipeline:
ctx = contextlib.nullcontext()
if use_zero3:
ctx = ZeroInitContext(target_device=torch.cuda.current_device(),
shard_strategy=gpc.config.zero.model_config.shard_strategy,
shard_param=True)
with ctx:
model = gpc.config.model.pop('type')(**gpc.config.model)
else:
pipelinable = PipelinableContext()
with pipelinable:
model = gpc.config.model.pop('type')(**gpc.config.model)
def mask_function(attention_mask=None):
if attention_mask is not None:
batch_size = gpc.config.BATCH_SIZE // gpc.config.NUM_MICRO_BATCHES
attention_mask = attention_mask.view(batch_size, -1)
attention_mask = col_nn.partition_batch(attention_mask)
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = (1.0 - attention_mask) * -10000.0
return attention_mask
# deepnet_small exec_seq
exec_seq = ['embed', mask_function, 'blocks.0', 'blocks.1', 'blocks.2', 'blocks.3', 'blocks.4', 'blocks.5', (mask_function, "front"), \
'blocks.6', 'blocks.7', 'blocks.8', 'blocks.9', 'blocks.10', 'blocks.11', 'norm', 'head']
pipelinable.to_layer_list(exec_seq)
ctx = contextlib.nullcontext()
# (lyl)TODO: Zero context and pipelinable context should be integrated into one context.
if use_zero3:
ctx = ZeroInitContext(target_device=torch.cuda.current_device(),
shard_strategy=gpc.config.zero.model_config.shard_strategy,
shard_param=True)
with ctx:
model = pipelinable.partition(num_chunks, gpc.pipeline_parallel_size,
gpc.get_local_rank(ParallelMode.PIPELINE))
criterion = getattr(gpc.config, 'loss_fn', None)
if criterion is not None:
criterion = criterion.type()
else:
criterion = GPTLMLoss()
logger.info('Build optimizer', ranks=[0])
optimizer = gpc.config.optimizer.pop('type')(model.parameters(), **gpc.config.optimizer)
lr_scheduler = LinearWarmupLR(optimizer, total_steps=gpc.config.NUM_EPOCHS, warmup_steps=5)
engine, train_dataloader, _, lr_scheduler = colossalai.initialize(model,
optimizer,
criterion,
train_dataloader=train_dataloader,
lr_scheduler=lr_scheduler)
global_batch_size = gpc.config.BATCH_SIZE * \
gpc.get_world_size(ParallelMode.DATA) * getattr(gpc.config, "gradient_accumulation", 1)
logger.info(f'Init done, global batch size = {global_batch_size}', ranks=[0])
timier = MultiTimer()
trainer = Trainer(engine=engine, logger=logger, timer=timier)
hook_list = [
hooks.LossHook(),
hooks.LRSchedulerHook(lr_scheduler=lr_scheduler, by_epoch=True),
hooks.LogMetricByEpochHook(logger),
hooks.ThroughputHook(),
hooks.LogMetricByStepHook(),
]
trainer.fit(train_dataloader=train_dataloader,
epochs=gpc.config.NUM_EPOCHS,
test_interval=1,
hooks=hook_list,
display_progress=True,
return_output_label=False)
if __name__ == '__main__':
main()
| [
"colossalai.nn.LinearWarmupLR",
"colossalai.trainer.hooks.ThroughputHook",
"colossalai.trainer.Trainer",
"colossalai.trainer.hooks.LogMetricByStepHook",
"colossalai.core.global_context.get_local_rank",
"colossalai.trainer.hooks.LossHook",
"colossalai.logging.get_dist_logger",
"colossalai.initialize",
... | [((740, 771), 'colossalai.get_default_parser', 'colossalai.get_default_parser', ([], {}), '()\n', (769, 771), False, 'import colossalai\n'), ((886, 912), 'colossalai.logging.disable_existing_loggers', 'disable_existing_loggers', ([], {}), '()\n', (910, 912), False, 'from colossalai.logging import get_dist_logger, disable_existing_loggers\n'), ((1118, 1135), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (1133, 1135), False, 'from colossalai.logging import get_dist_logger, disable_existing_loggers\n'), ((1203, 1265), 'dataset.webtext.WebtextDataset', 'WebtextDataset', (["os.environ['DATA']"], {'seq_len': 'gpc.config.SEQ_LEN'}), "(os.environ['DATA'], seq_len=gpc.config.SEQ_LEN)\n", (1217, 1265), False, 'from dataset.webtext import WebtextDataset\n'), ((1290, 1414), 'colossalai.utils.get_dataloader', 'utils.get_dataloader', (['train_ds'], {'seed': '(42)', 'batch_size': 'gpc.config.BATCH_SIZE', 'pin_memory': '(True)', 'shuffle': '(True)', 'drop_last': '(True)'}), '(train_ds, seed=42, batch_size=gpc.config.BATCH_SIZE,\n pin_memory=True, shuffle=True, drop_last=True)\n', (1310, 1414), True, 'import colossalai.utils as utils\n'), ((1701, 1714), 'colossalai.utils.is_using_pp', 'is_using_pp', ([], {}), '()\n', (1712, 1714), False, 'from colossalai.utils import is_using_pp\n'), ((4217, 4293), 'colossalai.nn.LinearWarmupLR', 'LinearWarmupLR', (['optimizer'], {'total_steps': 'gpc.config.NUM_EPOCHS', 'warmup_steps': '(5)'}), '(optimizer, total_steps=gpc.config.NUM_EPOCHS, warmup_steps=5)\n', (4231, 4293), False, 'from colossalai.nn import LinearWarmupLR\n'), ((4345, 4462), 'colossalai.initialize', 'colossalai.initialize', (['model', 'optimizer', 'criterion'], {'train_dataloader': 'train_dataloader', 'lr_scheduler': 'lr_scheduler'}), '(model, optimizer, criterion, train_dataloader=\n train_dataloader, lr_scheduler=lr_scheduler)\n', (4366, 4462), False, 'import colossalai\n'), ((4989, 5001), 'colossalai.utils.timer.MultiTimer', 'MultiTimer', ([], {}), '()\n', (4999, 5001), False, 'from colossalai.utils.timer import MultiTimer\n'), ((5019, 5070), 'colossalai.trainer.Trainer', 'Trainer', ([], {'engine': 'engine', 'logger': 'logger', 'timer': 'timier'}), '(engine=engine, logger=logger, timer=timier)\n', (5026, 5070), False, 'from colossalai.trainer import hooks, Trainer\n'), ((947, 995), 'colossalai.launch_from_torch', 'colossalai.launch_from_torch', ([], {'config': 'args.config'}), '(config=args.config)\n', (975, 995), False, 'import colossalai\n'), ((1016, 1105), 'colossalai.launch_from_slurm', 'colossalai.launch_from_slurm', ([], {'config': 'args.config', 'host': 'args.host', 'port': '(29500)', 'seed': '(42)'}), '(config=args.config, host=args.host, port=29500,\n seed=42)\n', (1044, 1105), False, 'import colossalai\n'), ((1956, 1980), 'contextlib.nullcontext', 'contextlib.nullcontext', ([], {}), '()\n', (1978, 1980), False, 'import contextlib\n'), ((2353, 2373), 'colossalai.pipeline.pipelinable.PipelinableContext', 'PipelinableContext', ([], {}), '()\n', (2371, 2373), False, 'from colossalai.pipeline.pipelinable import PipelinableContext\n'), ((3323, 3347), 'contextlib.nullcontext', 'contextlib.nullcontext', ([], {}), '()\n', (3345, 3347), False, 'import contextlib\n'), ((4040, 4051), 'titans.loss.lm_loss.GPTLMLoss', 'GPTLMLoss', ([], {}), '()\n', (4049, 4051), False, 'from titans.loss.lm_loss import GPTLMLoss\n'), ((4118, 4150), 'colossalai.core.global_context.config.optimizer.pop', 'gpc.config.optimizer.pop', (['"""type"""'], {}), "('type')\n", (4142, 4150), True, 'from colossalai.core import global_context as gpc\n'), ((5101, 5117), 'colossalai.trainer.hooks.LossHook', 'hooks.LossHook', ([], {}), '()\n', (5115, 5117), False, 'from colossalai.trainer import hooks, Trainer\n'), ((5128, 5191), 'colossalai.trainer.hooks.LRSchedulerHook', 'hooks.LRSchedulerHook', ([], {'lr_scheduler': 'lr_scheduler', 'by_epoch': '(True)'}), '(lr_scheduler=lr_scheduler, by_epoch=True)\n', (5149, 5191), False, 'from colossalai.trainer import hooks, Trainer\n'), ((5202, 5236), 'colossalai.trainer.hooks.LogMetricByEpochHook', 'hooks.LogMetricByEpochHook', (['logger'], {}), '(logger)\n', (5228, 5236), False, 'from colossalai.trainer import hooks, Trainer\n'), ((5247, 5269), 'colossalai.trainer.hooks.ThroughputHook', 'hooks.ThroughputHook', ([], {}), '()\n', (5267, 5269), False, 'from colossalai.trainer import hooks, Trainer\n'), ((5280, 5307), 'colossalai.trainer.hooks.LogMetricByStepHook', 'hooks.LogMetricByStepHook', ([], {}), '()\n', (5305, 5307), False, 'from colossalai.trainer import hooks, Trainer\n'), ((4802, 4839), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (4820, 4839), True, 'from colossalai.core import global_context as gpc\n'), ((2270, 2298), 'colossalai.core.global_context.config.model.pop', 'gpc.config.model.pop', (['"""type"""'], {}), "('type')\n", (2290, 2298), True, 'from colossalai.core import global_context as gpc\n'), ((2422, 2450), 'colossalai.core.global_context.config.model.pop', 'gpc.config.model.pop', (['"""type"""'], {}), "('type')\n", (2442, 2450), True, 'from colossalai.core import global_context as gpc\n'), ((2754, 2792), 'colossalai.nn.partition_batch', 'col_nn.partition_batch', (['attention_mask'], {}), '(attention_mask)\n', (2776, 2792), True, 'from colossalai import nn as col_nn\n'), ((3840, 3881), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (3858, 3881), True, 'from colossalai.core import global_context as gpc\n')] |
import math
import os
from typing import Tuple, Optional
import torch
import torch.distributed as dist
from colossalai.constants import (INPUT_GROUP_3D, OUTPUT_GROUP_3D,
WEIGHT_GROUP_3D)
from colossalai.context import ParallelMode, seed
from colossalai.core import global_context as gpc
from colossalai.registry import LAYERS
from colossalai.nn.init import init_bias_, init_weight_
from colossalai.utils import checkpoint, get_current_device
from torch import Tensor, dtype, nn
from .._common_utils import ACT2FN, divide, set_tensor_parallel_attribute_by_size, to_2tuple
from ._utils import get_depth_from_env, get_parallel_mode_from_env, get_last_group
from .layers import Linear3D
@LAYERS.register_module
class ViTPatchEmbedding3D(nn.Module):
""" 3D Image to Patch Embedding
:param img_size: iamge size
:type img_size: int
:param patch_size: patch size
:type patch_size: int
:param in_chans: number of channels of input image
:type in_chans: int
:param embed_size: dimension of embedding
:type embed_size: int
:param drop_prob: dropout probability
:type drop_prob: float
:param flatten: whether to flatten output tensor, defaults to True
:type flatten: bool, optional
"""
def __init__(self,
img_size: int,
patch_size: int,
in_chans: int,
embed_size: int,
drop_prob: float,
flatten: bool = True,
init_method: str = 'torch'):
super().__init__()
self.depth = get_depth_from_env()
self.input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
self.weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
self.output_parallel_mode = get_last_group(self.input_parallel_mode,
self.weight_parallel_mode)
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0],
img_size[1] // patch_size[1])
self.in_chans = in_chans
self.embed_size = embed_size
self.embed_size_per_partition = divide(self.embed_size, self.depth)
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
self.init_weight = 'torch'
self.init_bias = 'torch'
if init_method == 'jax':
self.init_weight = 'jax_embed'
self.init_bias = 'zero'
self.proj = nn.Conv2d(self.in_chans,
self.embed_size_per_partition,
kernel_size=patch_size,
stride=patch_size)
self.cls_token = nn.Parameter(
torch.zeros(1, 1, self.embed_size_per_partition))
self.pos_embed = nn.Parameter(
torch.zeros(1, self.num_patches + 1,
self.embed_size_per_partition))
self.pos_drop = nn.Dropout(drop_prob)
self.reset_parameters(self.init_weight, self.init_bias)
self._set_tensor_parallel_attributes()
def _set_tensor_parallel_attributes(self):
set_tensor_parallel_attribute_by_size(self.proj.weight, self.in_chans * self.embed_size * self.num_patches)
set_tensor_parallel_attribute_by_size(self.proj.bias, self.embed_size)
set_tensor_parallel_attribute_by_size(self.cls_token, 1 * 1 * self.embed_size)
set_tensor_parallel_attribute_by_size(self.pos_embed, 1 * (self.num_patches + 1) * self.embed_size)
def reset_parameters(self, init_weight, init_bias):
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.proj.weight)
# std = math.sqrt(1.0 / fan_in)
# nn.init.trunc_normal_(self.proj.weight, std=std / .87962566103423978)
# nn.init.zeros_(self.proj.bias)
if init_weight != 'torch':
init_weight_(self.proj.weight, fan_in, init_method=init_weight)
init_bias_(self.pos_embed, fan_in, init_method=init_weight)
if init_bias != 'torch':
init_bias_(self.proj.bias, fan_in, init_method=init_bias)
self.to(get_current_device())
weight_src_rank = gpc.get_ranks_in_group(self.weight_parallel_mode)[0]
dist.broadcast(self.proj.weight,
src=weight_src_rank,
group=gpc.get_group(self.weight_parallel_mode))
dist.broadcast(self.proj.bias,
src=weight_src_rank,
group=gpc.get_group(self.weight_parallel_mode))
input_src_rank = gpc.get_ranks_in_group(self.input_parallel_mode)[0]
dist.broadcast(self.proj.weight,
src=input_src_rank,
group=gpc.get_group(self.input_parallel_mode))
dist.broadcast(self.proj.bias,
src=input_src_rank,
group=gpc.get_group(self.input_parallel_mode))
self.proj.weight.register_hook(self._sync_grad_hook)
self.proj.bias.register_hook(self._sync_grad_hook)
self.cls_token.register_hook(self._sync_grad_hook)
self.pos_embed.register_hook(self._sync_grad_hook)
def _sync_grad_hook(self, grad) -> None:
dist.all_reduce(grad, group=gpc.get_group(self.input_parallel_mode))
dist.all_reduce(grad, group=gpc.get_group(self.weight_parallel_mode))
return grad
def forward(self, x: Tensor) -> Tensor:
# split a partition from inputs
x = torch.chunk(x, self.depth, dim=0)[gpc.get_local_rank(
self.weight_parallel_mode)].contiguous()
x = torch.chunk(x, self.depth, dim=0)[gpc.get_local_rank(
self.input_parallel_mode)].contiguous()
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
# add cls token & pos embedding
# [b/q^2,s,h/q] --> [b/q^2, 1+s, h/q]
cls_token = self.cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_token, x), dim=1)
with seed(ParallelMode.TENSOR):
x = self.pos_drop(x + self.pos_embed)
return x
@LAYERS.register_module
class ViTSelfAttention3D(nn.Module):
"""Self-attention layer for 3D parallel Vision Transformer
:param hidden_size: hidden size
:type hidden_size: int
:param num_attention_heads: number of attention heads
:type num_attention_heads: int
:param attention_probs_dropout_prob: dropout probability for attention layers
:type attention_probs_dropout_prob: bool
:param hidden_dropout_prob: dropout probability for hidden layers
:type hidden_dropout_prob: bool
:param depth: the 3D parallelism depth
:type depth: int
:param input_parallel_mode: parallel mode of input tensor
:type input_parallel_mode: ParallelMode
:param weight_parallel_mode: parallel mode of weight
:type weight_parallel_mode: ParallelMode
:param dtype: dtype of parameters, defaults to None
:type dtype: dtype, optional
:param bias: whether to add bias, defaults to True
:type bias: bool, optional
"""
def __init__(self,
hidden_size: int,
num_attention_heads: int,
attention_probs_dropout_prob: float,
hidden_dropout_prob: float,
dtype: dtype = None,
bias: bool = True,
checkpoint: bool = False,
init_method: str = 'torch'):
super().__init__()
self.depth = get_depth_from_env()
# self.input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
# self.weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
# self.output_parallel_mode = get_last_group(self.input_parallel_mode,
# self.weight_parallel_mode)
self.hidden_size = hidden_size
self.num_attention_heads = divide(num_attention_heads, self.depth)
self.attention_head_size = divide(hidden_size, num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.checkpoint = checkpoint
self.init_weight = 'torch'
self.init_bias = 'torch'
if init_method == 'jax':
self.init_weight = 'jax'
self.init_bias = 'zero'
self.query_key_value = Linear3D(self.hidden_size,
3 * self.hidden_size,
# self.input_parallel_mode,
# self.weight_parallel_mode,
dtype=dtype,
bias=bias,
init_weight=self.init_weight,
init_bias=self.init_bias)
self.attention_dropout = nn.Dropout(attention_probs_dropout_prob)
self.dense = Linear3D(self.hidden_size,
self.hidden_size,
# self.output_parallel_mode,
# self.weight_parallel_mode,
dtype=dtype,
bias=bias,
init_weight=self.init_weight,
init_bias=self.init_bias)
self.dropout = nn.Dropout(hidden_dropout_prob)
self.softmax = nn.Softmax(dim=-1)
# def groups_for_next_layer(self) -> Tuple[ParallelMode, ParallelMode]:
# return self.input_parallel_mode, self.weight_parallel_mode
def _forward(self, hidden_states: Tensor) -> Tensor:
query_key_value = self.query_key_value(hidden_states)
new_qkv_shape = query_key_value.shape[:-1] + \
(self.num_attention_heads, 3 * self.attention_head_size)
query_key_value = query_key_value.view(new_qkv_shape)
query_key_value = query_key_value.permute((0, 2, 1, 3))
query_layer, key_layer, value_layer = torch.chunk(query_key_value,
3,
dim=-1)
attention_scores = torch.matmul(query_layer,
key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(
self.attention_head_size)
attention_probs = self.softmax(attention_scores)
with seed(ParallelMode.TENSOR):
attention_probs = self.attention_dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(1, 2)
new_context_layer_shape = context_layer.size()[:-2] + (
self.all_head_size, )
context_layer = context_layer.reshape(new_context_layer_shape)
output = self.dense(context_layer)
with seed(ParallelMode.TENSOR):
output = self.dropout(output)
return output
def _checkpoint_forward(self, hidden_states: Tensor) -> Tensor:
return checkpoint(self._forward, hidden_states)
def forward(self, hidden_states: Tensor) -> Tensor:
if self.checkpoint:
return self._checkpoint_forward(hidden_states)
else:
return self._forward(hidden_states)
@LAYERS.register_module
class ViTMLP3D(nn.Module):
"""[summary]
:param hidden_size: hidden size
:type hidden_size: int
:param mlp_ratio: hidden size of MLP divided by embedding dim
:type mlp_ratio: int
:param hidden_dropout_prob: dropout probability for hidden layers
:type hidden_dropout_prob: float
:param hidden_act: activation function for hidden layers
:type hidden_act: str
:param depth: the 3D parallelism depth
:type depth: int
:param input_parallel_mode: parallel mode of input tensor
:type input_parallel_mode: ParallelMode
:param weight_parallel_mode: parallel mode of weight
:type weight_parallel_mode: ParallelMode
:param dtype: dtype of parameters, defaults to None
:type dtype: dtype, optional
:param bias: whether to add bias, defaults to True
:type bias: bool, optional
"""
def __init__(self,
hidden_size: int,
mlp_ratio: int,
hidden_dropout_prob: float,
hidden_act: str = 'gelu',
dtype: dtype = None,
bias: bool = True,
checkpoint: bool = False,
init_method: str = 'torch'):
super().__init__()
# self.depth = get_depth_from_env()
# self.input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
# self.weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
# self.output_parallel_mode = get_last_group(self.input_parallel_mode,
# self.weight_parallel_mode)
self.hidden_size = hidden_size
self.mlp_ratio = mlp_ratio
self.checkpoint = checkpoint
self.init_weight = init_method
self.init_bias = init_method
self.dense_1 = Linear3D(self.hidden_size,
self.mlp_ratio * self.hidden_size,
# self.input_parallel_mode,
# self.weight_parallel_mode,
dtype=dtype,
bias=bias,
init_weight=self.init_weight,
init_bias=self.init_bias)
self.activation_func = ACT2FN[hidden_act]
self.dense_2 = Linear3D(self.mlp_ratio * self.hidden_size,
self.hidden_size,
# self.output_parallel_mode,
# self.weight_parallel_mode,
dtype=dtype,
bias=bias,
init_weight=self.init_weight,
init_bias=self.init_bias)
self.dropout = nn.Dropout(hidden_dropout_prob)
# def groups_for_next_layer(self) -> Tuple[ParallelMode, ParallelMode]:
# return self.input_parallel_mode, self.weight_parallel_mode
def _forward(self, hidden_states: Tensor) -> Tensor:
intermediate_output = self.dense_1(hidden_states)
intermediate_output = self.activation_func(intermediate_output)
output = self.dense_2(intermediate_output)
with seed(ParallelMode.TENSOR):
output = self.dropout(output)
return output
def _checkpoint_forward(self, hidden_states: Tensor) -> Tensor:
return checkpoint(self._forward, hidden_states)
def forward(self, hidden_states: Tensor) -> Tensor:
if self.checkpoint:
return self._checkpoint_forward(hidden_states)
else:
return self._forward(hidden_states)
@LAYERS.register_module
class ViTHead3D(nn.Module):
"""Output layer for 3D parallel Vision Transformer
:param in_features: size of input tensor
:type in_features: int
:param num_classes: number of classes
:type num_classes: int
:param depth: the 3D parallelism depth
:type depth: int
:param input_parallel_mode: parallel mode of input tensor
:type input_parallel_mode: ParallelMode
:param weight_parallel_mode: parallel mode of weight
:type weight_parallel_mode: ParallelMode
:param dtype: dtype of parameters, defaults to None
:type dtype: dtype, optional
:param bias: whether to add bias, defaults to True
:type bias: bool, optional
"""
def __init__(self,
in_features: int,
num_classes: int,
dtype: dtype = None,
bias: bool = True,
init_method: str = 'torch'):
super().__init__()
# self.depth = get_depth_from_env()
# self.input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
# self.weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
# self.output_parallel_mode = get_last_group(self.input_parallel_mode,
# self.weight_parallel_mode)
self.in_features = in_features
self.num_classes = num_classes
# out_features = math.ceil(self.num_classes /
# (self.depth**2)) * (self.depth**2)
# self.num_classes_per_partition = divide(self.num_classes, self.depth)
self.init_weight = 'torch'
self.init_bias = 'torch'
if init_method == 'jax':
self.init_weight = 'zero'
self.init_bias = 'zero'
self.linear = Linear3D(self.in_features,
self.num_classes,
# self.input_parallel_mode,
# self.weight_parallel_mode,
dtype=dtype,
bias=bias,
init_weight=self.init_weight,
init_bias=self.init_bias)
def forward(self, x: Tensor) -> Tensor:
# [b/q^2, s, h/q] --> [b/q^2, h/q]
x = x[:, 0]
# [b/q^2, h/q] --> [b/q^2, c/q]
x = self.linear(x)
# return x[:, :self.num_classes_per_partition]
return x
def extra_repr(self):
return 'in_features={}, num_classes={}'.format(self.in_features,
self.num_classes)
| [
"colossalai.utils.checkpoint",
"colossalai.core.global_context.get_local_rank",
"colossalai.utils.get_current_device",
"colossalai.nn.init.init_weight_",
"colossalai.nn.init.init_bias_",
"colossalai.context.seed",
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_ranks_in... | [((2634, 2737), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.in_chans', 'self.embed_size_per_partition'], {'kernel_size': 'patch_size', 'stride': 'patch_size'}), '(self.in_chans, self.embed_size_per_partition, kernel_size=\n patch_size, stride=patch_size)\n', (2643, 2737), False, 'from torch import Tensor, dtype, nn\n'), ((3093, 3114), 'torch.nn.Dropout', 'nn.Dropout', (['drop_prob'], {}), '(drop_prob)\n', (3103, 3114), False, 'from torch import Tensor, dtype, nn\n'), ((3742, 3797), 'torch.nn.init._calculate_fan_in_and_fan_out', 'nn.init._calculate_fan_in_and_fan_out', (['self.proj.weight'], {}), '(self.proj.weight)\n', (3779, 3797), False, 'from torch import Tensor, dtype, nn\n'), ((6308, 6340), 'torch.cat', 'torch.cat', (['(cls_token, x)'], {'dim': '(1)'}), '((cls_token, x), dim=1)\n', (6317, 6340), False, 'import torch\n'), ((9188, 9228), 'torch.nn.Dropout', 'nn.Dropout', (['attention_probs_dropout_prob'], {}), '(attention_probs_dropout_prob)\n', (9198, 9228), False, 'from torch import Tensor, dtype, nn\n'), ((9670, 9701), 'torch.nn.Dropout', 'nn.Dropout', (['hidden_dropout_prob'], {}), '(hidden_dropout_prob)\n', (9680, 9701), False, 'from torch import Tensor, dtype, nn\n'), ((9725, 9743), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (9735, 9743), False, 'from torch import Tensor, dtype, nn\n'), ((10306, 10345), 'torch.chunk', 'torch.chunk', (['query_key_value', '(3)'], {'dim': '(-1)'}), '(query_key_value, 3, dim=-1)\n', (10317, 10345), False, 'import torch\n'), ((10872, 10914), 'torch.matmul', 'torch.matmul', (['attention_probs', 'value_layer'], {}), '(attention_probs, value_layer)\n', (10884, 10914), False, 'import torch\n'), ((11371, 11411), 'colossalai.utils.checkpoint', 'checkpoint', (['self._forward', 'hidden_states'], {}), '(self._forward, hidden_states)\n', (11381, 11411), False, 'from colossalai.utils import checkpoint, get_current_device\n'), ((14381, 14412), 'torch.nn.Dropout', 'nn.Dropout', (['hidden_dropout_prob'], {}), '(hidden_dropout_prob)\n', (14391, 14412), False, 'from torch import Tensor, dtype, nn\n'), ((14986, 15026), 'colossalai.utils.checkpoint', 'checkpoint', (['self._forward', 'hidden_states'], {}), '(self._forward, hidden_states)\n', (14996, 15026), False, 'from colossalai.utils import checkpoint, get_current_device\n'), ((2875, 2923), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', 'self.embed_size_per_partition'], {}), '(1, 1, self.embed_size_per_partition)\n', (2886, 2923), False, 'import torch\n'), ((2976, 3043), 'torch.zeros', 'torch.zeros', (['(1)', '(self.num_patches + 1)', 'self.embed_size_per_partition'], {}), '(1, self.num_patches + 1, self.embed_size_per_partition)\n', (2987, 3043), False, 'import torch\n'), ((4006, 4069), 'colossalai.nn.init.init_weight_', 'init_weight_', (['self.proj.weight', 'fan_in'], {'init_method': 'init_weight'}), '(self.proj.weight, fan_in, init_method=init_weight)\n', (4018, 4069), False, 'from colossalai.nn.init import init_bias_, init_weight_\n'), ((4082, 4141), 'colossalai.nn.init.init_bias_', 'init_bias_', (['self.pos_embed', 'fan_in'], {'init_method': 'init_weight'}), '(self.pos_embed, fan_in, init_method=init_weight)\n', (4092, 4141), False, 'from colossalai.nn.init import init_bias_, init_weight_\n'), ((4187, 4244), 'colossalai.nn.init.init_bias_', 'init_bias_', (['self.proj.bias', 'fan_in'], {'init_method': 'init_bias'}), '(self.proj.bias, fan_in, init_method=init_bias)\n', (4197, 4244), False, 'from colossalai.nn.init import init_bias_, init_weight_\n'), ((4262, 4282), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (4280, 4282), False, 'from colossalai.utils import checkpoint, get_current_device\n'), ((4310, 4359), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['self.weight_parallel_mode'], {}), '(self.weight_parallel_mode)\n', (4332, 4359), True, 'from colossalai.core import global_context as gpc\n'), ((4698, 4746), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['self.input_parallel_mode'], {}), '(self.input_parallel_mode)\n', (4720, 4746), True, 'from colossalai.core import global_context as gpc\n'), ((6355, 6380), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (6359, 6380), False, 'from colossalai.context import ParallelMode, seed\n'), ((10631, 10666), 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), '(self.attention_head_size)\n', (10640, 10666), False, 'import math\n'), ((10750, 10775), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (10754, 10775), False, 'from colossalai.context import ParallelMode, seed\n'), ((11195, 11220), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (11199, 11220), False, 'from colossalai.context import ParallelMode, seed\n'), ((14811, 14836), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (14815, 14836), False, 'from colossalai.context import ParallelMode, seed\n'), ((4477, 4517), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['self.weight_parallel_mode'], {}), '(self.weight_parallel_mode)\n', (4490, 4517), True, 'from colossalai.core import global_context as gpc\n'), ((4631, 4671), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['self.weight_parallel_mode'], {}), '(self.weight_parallel_mode)\n', (4644, 4671), True, 'from colossalai.core import global_context as gpc\n'), ((4863, 4902), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['self.input_parallel_mode'], {}), '(self.input_parallel_mode)\n', (4876, 4902), True, 'from colossalai.core import global_context as gpc\n'), ((5015, 5054), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['self.input_parallel_mode'], {}), '(self.input_parallel_mode)\n', (5028, 5054), True, 'from colossalai.core import global_context as gpc\n'), ((5377, 5416), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['self.input_parallel_mode'], {}), '(self.input_parallel_mode)\n', (5390, 5416), True, 'from colossalai.core import global_context as gpc\n'), ((5454, 5494), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['self.weight_parallel_mode'], {}), '(self.weight_parallel_mode)\n', (5467, 5494), True, 'from colossalai.core import global_context as gpc\n'), ((5613, 5646), 'torch.chunk', 'torch.chunk', (['x', 'self.depth'], {'dim': '(0)'}), '(x, self.depth, dim=0)\n', (5624, 5646), False, 'import torch\n'), ((5647, 5692), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['self.weight_parallel_mode'], {}), '(self.weight_parallel_mode)\n', (5665, 5692), True, 'from colossalai.core import global_context as gpc\n'), ((5732, 5765), 'torch.chunk', 'torch.chunk', (['x', 'self.depth'], {'dim': '(0)'}), '(x, self.depth, dim=0)\n', (5743, 5765), False, 'import torch\n'), ((5766, 5810), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['self.input_parallel_mode'], {}), '(self.input_parallel_mode)\n', (5784, 5810), True, 'from colossalai.core import global_context as gpc\n')] |
from pathlib import Path
from colossalai.logging import get_dist_logger
import colossalai
import torch
import os
from colossalai.core import global_context as gpc
from colossalai.utils import get_dataloader, MultiTimer
from colossalai.trainer import Trainer, hooks
from colossalai.nn.metric import Accuracy
from torchvision import transforms
from colossalai.nn.lr_scheduler import CosineAnnealingLR
from torchvision.datasets import CIFAR10
from resnet import ResNet18
from tqdm import tqdm
from titans.utils import barrier_context
DATA_ROOT = Path(os.environ.get('DATA', './data'))
def main():
parser = colossalai.get_default_parser()
parser.add_argument('--use_trainer', action='store_true', help='whether to use trainer')
args = parser.parse_args()
colossalai.launch_from_torch(config='./config.py')
logger = get_dist_logger()
# build resnet
model = ResNet18()
with barrier_context():
# build dataloaders
train_dataset = CIFAR10(root=DATA_ROOT,
download=True,
transform=transforms.Compose([
transforms.RandomCrop(size=32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]),
]))
test_dataset = CIFAR10(root=DATA_ROOT,
train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]),
]))
train_dataloader = get_dataloader(
dataset=train_dataset,
shuffle=True,
batch_size=gpc.config.BATCH_SIZE,
pin_memory=True,
)
test_dataloader = get_dataloader(
dataset=test_dataset,
add_sampler=False,
batch_size=gpc.config.BATCH_SIZE,
pin_memory=True,
)
# build criterion
criterion = torch.nn.CrossEntropyLoss()
# optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
# lr_scheduler
lr_scheduler = CosineAnnealingLR(optimizer, total_steps=gpc.config.NUM_EPOCHS)
engine, train_dataloader, test_dataloader, _ = colossalai.initialize(
model,
optimizer,
criterion,
train_dataloader,
test_dataloader,
)
if not args.use_trainer:
for epoch in range(gpc.config.NUM_EPOCHS):
engine.train()
if gpc.get_global_rank() == 0:
train_dl = tqdm(train_dataloader)
else:
train_dl = train_dataloader
for img, label in train_dl:
img = img.cuda()
label = label.cuda()
engine.zero_grad()
output = engine(img)
train_loss = engine.criterion(output, label)
engine.backward(train_loss)
engine.step()
lr_scheduler.step()
engine.eval()
correct = 0
total = 0
for img, label in test_dataloader:
img = img.cuda()
label = label.cuda()
with torch.no_grad():
output = engine(img)
test_loss = engine.criterion(output, label)
pred = torch.argmax(output, dim=-1)
correct += torch.sum(pred == label)
total += img.size(0)
logger.info(
f"Epoch {epoch} - train loss: {train_loss:.5}, test loss: {test_loss:.5}, acc: {correct / total:.5}, lr: {lr_scheduler.get_last_lr()[0]:.5g}",
ranks=[0])
else:
# build a timer to measure time
timer = MultiTimer()
# create a trainer object
trainer = Trainer(engine=engine, timer=timer, logger=logger)
# define the hooks to attach to the trainer
hook_list = [
hooks.LossHook(),
hooks.LRSchedulerHook(lr_scheduler=lr_scheduler, by_epoch=True),
hooks.AccuracyHook(accuracy_func=Accuracy()),
hooks.LogMetricByEpochHook(logger),
hooks.LogMemoryByEpochHook(logger),
hooks.LogTimingByEpochHook(timer, logger),
# you can uncomment these lines if you wish to use them
# hooks.TensorboardHook(log_dir='./tb_logs', ranks=[0]),
# hooks.SaveCheckpointHook(checkpoint_dir='./ckpt')
]
# start training
trainer.fit(train_dataloader=train_dataloader,
epochs=gpc.config.NUM_EPOCHS,
test_dataloader=test_dataloader,
test_interval=1,
hooks=hook_list,
display_progress=True)
if __name__ == '__main__':
main()
| [
"colossalai.trainer.Trainer",
"colossalai.trainer.hooks.LogTimingByEpochHook",
"colossalai.trainer.hooks.LossHook",
"colossalai.logging.get_dist_logger",
"colossalai.initialize",
"colossalai.launch_from_torch",
"colossalai.trainer.hooks.LogMemoryByEpochHook",
"colossalai.utils.MultiTimer",
"colossal... | [((549, 581), 'os.environ.get', 'os.environ.get', (['"""DATA"""', '"""./data"""'], {}), "('DATA', './data')\n", (563, 581), False, 'import os\n'), ((610, 641), 'colossalai.get_default_parser', 'colossalai.get_default_parser', ([], {}), '()\n', (639, 641), False, 'import colossalai\n'), ((771, 821), 'colossalai.launch_from_torch', 'colossalai.launch_from_torch', ([], {'config': '"""./config.py"""'}), "(config='./config.py')\n", (799, 821), False, 'import colossalai\n'), ((836, 853), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (851, 853), False, 'from colossalai.logging import get_dist_logger\n'), ((886, 896), 'resnet.ResNet18', 'ResNet18', ([], {}), '()\n', (894, 896), False, 'from resnet import ResNet18\n'), ((1841, 1948), 'colossalai.utils.get_dataloader', 'get_dataloader', ([], {'dataset': 'train_dataset', 'shuffle': '(True)', 'batch_size': 'gpc.config.BATCH_SIZE', 'pin_memory': '(True)'}), '(dataset=train_dataset, shuffle=True, batch_size=gpc.config.\n BATCH_SIZE, pin_memory=True)\n', (1855, 1948), False, 'from colossalai.utils import get_dataloader, MultiTimer\n'), ((2006, 2117), 'colossalai.utils.get_dataloader', 'get_dataloader', ([], {'dataset': 'test_dataset', 'add_sampler': '(False)', 'batch_size': 'gpc.config.BATCH_SIZE', 'pin_memory': '(True)'}), '(dataset=test_dataset, add_sampler=False, batch_size=gpc.\n config.BATCH_SIZE, pin_memory=True)\n', (2020, 2117), False, 'from colossalai.utils import get_dataloader, MultiTimer\n'), ((2191, 2218), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2216, 2218), False, 'import torch\n'), ((2368, 2431), 'colossalai.nn.lr_scheduler.CosineAnnealingLR', 'CosineAnnealingLR', (['optimizer'], {'total_steps': 'gpc.config.NUM_EPOCHS'}), '(optimizer, total_steps=gpc.config.NUM_EPOCHS)\n', (2385, 2431), False, 'from colossalai.nn.lr_scheduler import CosineAnnealingLR\n'), ((2484, 2573), 'colossalai.initialize', 'colossalai.initialize', (['model', 'optimizer', 'criterion', 'train_dataloader', 'test_dataloader'], {}), '(model, optimizer, criterion, train_dataloader,\n test_dataloader)\n', (2505, 2573), False, 'import colossalai\n'), ((907, 924), 'titans.utils.barrier_context', 'barrier_context', ([], {}), '()\n', (922, 924), False, 'from titans.utils import barrier_context\n'), ((3983, 3995), 'colossalai.utils.MultiTimer', 'MultiTimer', ([], {}), '()\n', (3993, 3995), False, 'from colossalai.utils import get_dataloader, MultiTimer\n'), ((4049, 4099), 'colossalai.trainer.Trainer', 'Trainer', ([], {'engine': 'engine', 'timer': 'timer', 'logger': 'logger'}), '(engine=engine, timer=timer, logger=logger)\n', (4056, 4099), False, 'from colossalai.trainer import Trainer, hooks\n'), ((4187, 4203), 'colossalai.trainer.hooks.LossHook', 'hooks.LossHook', ([], {}), '()\n', (4201, 4203), False, 'from colossalai.trainer import Trainer, hooks\n'), ((4217, 4280), 'colossalai.trainer.hooks.LRSchedulerHook', 'hooks.LRSchedulerHook', ([], {'lr_scheduler': 'lr_scheduler', 'by_epoch': '(True)'}), '(lr_scheduler=lr_scheduler, by_epoch=True)\n', (4238, 4280), False, 'from colossalai.trainer import Trainer, hooks\n'), ((4352, 4386), 'colossalai.trainer.hooks.LogMetricByEpochHook', 'hooks.LogMetricByEpochHook', (['logger'], {}), '(logger)\n', (4378, 4386), False, 'from colossalai.trainer import Trainer, hooks\n'), ((4400, 4434), 'colossalai.trainer.hooks.LogMemoryByEpochHook', 'hooks.LogMemoryByEpochHook', (['logger'], {}), '(logger)\n', (4426, 4434), False, 'from colossalai.trainer import Trainer, hooks\n'), ((4448, 4489), 'colossalai.trainer.hooks.LogTimingByEpochHook', 'hooks.LogTimingByEpochHook', (['timer', 'logger'], {}), '(timer, logger)\n', (4474, 4489), False, 'from colossalai.trainer import Trainer, hooks\n'), ((2740, 2761), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (2759, 2761), True, 'from colossalai.core import global_context as gpc\n'), ((2795, 2817), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {}), '(train_dataloader)\n', (2799, 2817), False, 'from tqdm import tqdm\n'), ((3587, 3615), 'torch.argmax', 'torch.argmax', (['output'], {'dim': '(-1)'}), '(output, dim=-1)\n', (3599, 3615), False, 'import torch\n'), ((3643, 3667), 'torch.sum', 'torch.sum', (['(pred == label)'], {}), '(pred == label)\n', (3652, 3667), False, 'import torch\n'), ((1649, 1670), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1668, 1670), False, 'from torchvision import transforms\n'), ((1703, 1788), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.4914, 0.4822, 0.4465]', 'std': '[0.2023, 0.1994, 0.201]'}), '(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]\n )\n', (1723, 1788), False, 'from torchvision import transforms\n'), ((3442, 3457), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3455, 3457), False, 'import torch\n'), ((4327, 4337), 'colossalai.nn.metric.Accuracy', 'Accuracy', ([], {}), '()\n', (4335, 4337), False, 'from colossalai.nn.metric import Accuracy\n'), ((1148, 1189), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', ([], {'size': '(32)', 'padding': '(4)'}), '(size=32, padding=4)\n', (1169, 1189), False, 'from torchvision import transforms\n'), ((1227, 1260), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1258, 1260), False, 'from torchvision import transforms\n'), ((1298, 1319), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1317, 1319), False, 'from torchvision import transforms\n'), ((1357, 1442), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.4914, 0.4822, 0.4465]', 'std': '[0.2023, 0.1994, 0.201]'}), '(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]\n )\n', (1377, 1442), False, 'from torchvision import transforms\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pprint
from functools import partial
import colossalai.nn as col_nn
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.utils import free_port, is_using_pp
from colossalai.utils.checkpointing import gather_pipeline_parallel_state_dict, load_checkpoint, save_checkpoint
from colossalai.testing import rerun_on_exception
def build_pipeline(model):
from colossalai.builder.pipeline import partition_uniform
pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE)
pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
depth = len(model)
start, end = partition_uniform(depth, pipeline_size, 1)[pipeline_rank][0]
layers = []
for i in range(depth):
if start <= i < end:
layers.append(model[i])
else:
layers.append(nn.Identity())
return nn.Sequential(*tuple(layers))
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-3, atol=1e-2)
def check_checkpoint_1d(rank, world_size, port):
config = dict(parallel=dict(pipeline=dict(size=2), tensor=dict(size=4, mode="1d")),)
disable_existing_loggers()
launch(config=config, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
m1 = nn.Sequential(nn.Linear(4, 8), nn.Linear(8, 4))
sd1 = m1.state_dict()
if gpc.get_global_rank() == 0:
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd1)}\n")
save_checkpoint("test.pt", 0, m1)
m2 = nn.Sequential(col_nn.Linear(4, 8), col_nn.Linear(8, 4))
if is_using_pp():
m2 = build_pipeline(m2)
load_checkpoint("test.pt", m2)
sd2 = m2.state_dict()
if is_using_pp() and gpc.get_local_rank(ParallelMode.TENSOR) == 0:
sd2 = gather_pipeline_parallel_state_dict(sd2)
print(f"Rank {gpc.get_global_rank()}:\n{pprint.pformat(sd2)}\n")
if gpc.get_global_rank() == 0:
for k, v in sd1.items():
assert k in sd2
check_equal(v, sd2[k].to(torch.device("cpu")))
@pytest.mark.dist
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_checkpoint_1d():
world_size = 8
run_func = partial(check_checkpoint_1d, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == "__main__":
test_checkpoint_1d()
| [
"colossalai.testing.rerun_on_exception",
"colossalai.nn.Linear",
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global_context.get_world_size",
"colossalai.logging.disable_existing_loggers",
"colossalai.utils.checkpointing.load_checkpoint",
"colossalai.builder.pipeline.partition_unifo... | [((2359, 2462), 'colossalai.testing.rerun_on_exception', 'rerun_on_exception', ([], {'exception_type': 'mp.ProcessRaisedException', 'pattern': '""".*Address already in use.*"""'}), "(exception_type=mp.ProcessRaisedException, pattern=\n '.*Address already in use.*')\n", (2377, 2462), False, 'from colossalai.testing import rerun_on_exception\n'), ((764, 805), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (782, 805), True, 'from colossalai.core import global_context as gpc\n'), ((827, 868), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (845, 868), True, 'from colossalai.core import global_context as gpc\n'), ((1223, 1266), 'torch.allclose', 'torch.allclose', (['A', 'B'], {'rtol': '(0.001)', 'atol': '(0.01)'}), '(A, B, rtol=0.001, atol=0.01)\n', (1237, 1266), False, 'import torch\n'), ((1417, 1443), 'colossalai.logging.disable_existing_loggers', 'disable_existing_loggers', ([], {}), '()\n', (1441, 1443), False, 'from colossalai.logging import disable_existing_loggers\n'), ((1449, 1553), 'colossalai.initialize.launch', 'launch', ([], {'config': 'config', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=config, rank=rank, world_size=world_size, host='localhost',\n port=port, backend='nccl')\n", (1455, 1553), False, 'from colossalai.initialize import launch\n'), ((1752, 1785), 'colossalai.utils.checkpointing.save_checkpoint', 'save_checkpoint', (['"""test.pt"""', '(0)', 'm1'], {}), "('test.pt', 0, m1)\n", (1767, 1785), False, 'from colossalai.utils.checkpointing import gather_pipeline_parallel_state_dict, load_checkpoint, save_checkpoint\n'), ((1862, 1875), 'colossalai.utils.is_using_pp', 'is_using_pp', ([], {}), '()\n', (1873, 1875), False, 'from colossalai.utils import free_port, is_using_pp\n'), ((1917, 1947), 'colossalai.utils.checkpointing.load_checkpoint', 'load_checkpoint', (['"""test.pt"""', 'm2'], {}), "('test.pt', m2)\n", (1932, 1947), False, 'from colossalai.utils.checkpointing import gather_pipeline_parallel_state_dict, load_checkpoint, save_checkpoint\n'), ((2596, 2633), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (2604, 2633), True, 'import torch.multiprocessing as mp\n'), ((1576, 1591), 'torch.nn.Linear', 'nn.Linear', (['(4)', '(8)'], {}), '(4, 8)\n', (1585, 1591), True, 'import torch.nn as nn\n'), ((1593, 1608), 'torch.nn.Linear', 'nn.Linear', (['(8)', '(4)'], {}), '(8, 4)\n', (1602, 1608), True, 'import torch.nn as nn\n'), ((1645, 1666), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (1664, 1666), True, 'from colossalai.core import global_context as gpc\n'), ((1812, 1831), 'colossalai.nn.Linear', 'col_nn.Linear', (['(4)', '(8)'], {}), '(4, 8)\n', (1825, 1831), True, 'import colossalai.nn as col_nn\n'), ((1833, 1852), 'colossalai.nn.Linear', 'col_nn.Linear', (['(8)', '(4)'], {}), '(8, 4)\n', (1846, 1852), True, 'import colossalai.nn as col_nn\n'), ((1983, 1996), 'colossalai.utils.is_using_pp', 'is_using_pp', ([], {}), '()\n', (1994, 1996), False, 'from colossalai.utils import free_port, is_using_pp\n'), ((2062, 2102), 'colossalai.utils.checkpointing.gather_pipeline_parallel_state_dict', 'gather_pipeline_parallel_state_dict', (['sd2'], {}), '(sd2)\n', (2097, 2102), False, 'from colossalai.utils.checkpointing import gather_pipeline_parallel_state_dict, load_checkpoint, save_checkpoint\n'), ((2183, 2204), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (2202, 2204), True, 'from colossalai.core import global_context as gpc\n'), ((911, 953), 'colossalai.builder.pipeline.partition_uniform', 'partition_uniform', (['depth', 'pipeline_size', '(1)'], {}), '(depth, pipeline_size, 1)\n', (928, 953), False, 'from colossalai.builder.pipeline import partition_uniform\n'), ((2001, 2040), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (2019, 2040), True, 'from colossalai.core import global_context as gpc\n'), ((2578, 2589), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (2587, 2589), False, 'from colossalai.utils import free_port, is_using_pp\n'), ((1126, 1139), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (1137, 1139), True, 'import torch.nn as nn\n'), ((2122, 2143), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (2141, 2143), True, 'from colossalai.core import global_context as gpc\n'), ((2148, 2167), 'pprint.pformat', 'pprint.pformat', (['sd2'], {}), '(sd2)\n', (2162, 2167), False, 'import pprint\n'), ((1696, 1717), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (1715, 1717), True, 'from colossalai.core import global_context as gpc\n'), ((1722, 1741), 'pprint.pformat', 'pprint.pformat', (['sd1'], {}), '(sd1)\n', (1736, 1741), False, 'import pprint\n'), ((2312, 2331), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2324, 2331), False, 'import torch\n')] |
import os
import os.path as osp
import re
from typing import Tuple
from pathlib import Path
import torch
from colossalai.context import Config
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
__all__ = [
'get_checkpoint_path', 'get_latest_checkpoint_path', 'get_latest_checkpoint_pattern', 'save_checkpoint',
'load_checkpoint'
]
def unwrap_config(config: Config):
"""Unwrap Config objects to normal dicts
"""
config_dict = dict()
for k, v in config.items():
if isinstance(v, dict):
config_dict[k] = unwrap_config(v)
else:
config_dict[k] = v
return config_dict
def _get_ranks_name():
# tensor parallel
tp_local_rank = 0
if gpc.is_initialized(ParallelMode.TENSOR):
tp_local_rank = gpc.get_local_rank(ParallelMode.TENSOR)
# pipeline parallel
pp_local_rank = 0
if gpc.is_initialized(ParallelMode.PIPELINE):
pp_local_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
ranks_name = f'tp{tp_local_rank}-pp{pp_local_rank}'
return ranks_name
def _get_standard_checkpoint_filename(epoch: int, suffix: str = ''):
ranks_name = _get_ranks_name()
return f'epoch{epoch}-{ranks_name}{suffix}.pt'
def get_checkpoint_path(checkpoint_dir: str, epoch: int, suffix: str = ''):
"""This is a function to generate the checkpoint path from the tuple
(checkpoint_dir, epoch, suffix, gpu_parallel_rank).
This is useful during generation and recuperation of the checkpoint.
Args:
checkpoint_dir (str): Set up a directory for saving checkpoints.
epoch (int): Epoch number (indicate how many epochs have you trained this model).
suffix (str, optional): Additional notation to specify the model or checkpoint, defaults to ''
Returns:
str: The checkpoint path to be generated.
"""
ckpt_filename = _get_standard_checkpoint_filename(epoch, suffix)
return os.path.join(checkpoint_dir, ckpt_filename)
def _ensure_directory_exists(filename: str):
# ensure the directory exists
dirpath = os.path.dirname(filename)
if not os.path.exists(dirpath):
Path(dirpath).mkdir(parents=True, exist_ok=True)
def get_latest_checkpoint_pattern(suffix: str = ''):
"""Generate Regular expression of the latest checkpoint's pattern.
Args:
suffix (str, optional): Additional notation to specify the model or checkpoint, defaults to ''.
Returns:
str: The regular expression of checkpoint pattern.
"""
ranks_name = _get_ranks_name()
pattern = r'epoch(\d+)-{}{}\.pt'.format(ranks_name, suffix)
ckpt_pattern = re.compile(pattern)
return ckpt_pattern
def get_latest_checkpoint_path(checkpoint_dir: str, suffix: str = ''):
"""This is a function to retrieve the latest checkpoint path from the tuple
(checkpoint_dir, suffix, gpu_parallel_rank).
This is useful during recuperation of the checkpoint, especially when you do not know the epoch number.
Args:
checkpoint_dir (str): Directory for saving checkpoints
suffix (str, optional): Additional notation to specify the model or checkpoint, defaults to ''
Returns:
str: The latest retrieved checkpoint path.
Raises:
FileNotFoundError: Raise error when we cannot find the latest checkpoint file with inputs given.
"""
CKPT_NAME_PAT = get_latest_checkpoint_pattern(suffix=suffix)
last_epoch = -1
assert osp.isdir(checkpoint_dir), f'{checkpoint_dir} is not a directory'
for filename in os.listdir(checkpoint_dir):
ret = CKPT_NAME_PAT.match(filename)
if ret:
epoch = int(ret[0].split('-')[0].lstrip('epoch'))
if epoch > last_epoch:
last_epoch = epoch
if last_epoch == -1:
ranks_name = _get_ranks_name()
raise FileNotFoundError(f"Cannot find the latest checkpoint file for {ranks_name} in {checkpoint_dir}")
else:
target_file = _get_standard_checkpoint_filename(last_epoch, suffix=suffix)
path = osp.join(checkpoint_dir, target_file)
return path
def save_checkpoint(checkpoint_path: str,
epoch: int,
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None,
**kwargs):
"""Given a directory to store the checkpoints, saves all the training components' parameters or buffers, such as
model, optimizer, lr_scheduler etc. into a checkpoint dictionary.
This method can be used for both :class:`colossalai.nn.BaseModel` and normal :class:`torch.nn.Module`.
Args:
checkpoint_path (str): Set up a directory for saving checkpoints.
epoch (int): Epoch number (indicate how many epochs have you trained this model).
model (:class:`torch.nn.Module`): Model to be registered.
optimizer (Union[:class:`torch.optim.Optimizer`, :class:`colossalai.nn.optimizer`]): Optimizer to be registered.
lr_scheduler (Union[:class:`torch.optim.lr_scheduler`,
:class:`colossalai.nn.lr_scheduler`], optional): lr_scheduler to be registered, defaults to None.
kwargs (dict): additional parameters to be saved.
"""
# for compatibility with normal pytorch nn.Module
if hasattr(model, 'state_dict_for_save_checkpoint'):
model_sd = model.state_dict_for_save_checkpoint()
else:
model_sd = model.state_dict()
# ckpt container
checkpoint = {'epoch': epoch, 'model': model_sd, 'optimizer': optimizer.state_dict(), **kwargs}
if lr_scheduler is not None:
checkpoint['lr_scheduler'] = lr_scheduler.state_dict()
_ensure_directory_exists(checkpoint_path)
torch.save(checkpoint, checkpoint_path)
def load_checkpoint(checkpoint_path: str,
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None,
finetune: bool = False,
strict: bool = True) -> Tuple:
"""Loads the checkpoint file.
If finetune is False, then we intend to continue/resume the training process from the checkpoint given.
So we copy parameters and buffers from state_dict into these modules(model, optimizer,lr_scheduler)
and its descendants.
If finetune is True, then only the weights and buffers of model should be reloaded.
If strict is True, then the keys of state_dict must exactly match the keys returned
by this module’s state_dict() function.
Args:
checkpoint_path (str): The exact and matched checkpoint_path directory to retrieve appropriate state_dict.
model (:class:`torch.nn.Module`): Model to reload parameters and buffers.
optimizer (Union[:class:`torch.optim.Optimizer`, :class:`colossalai.nn.optimizer`]): Optimizer to recuperate.
lr_scheduler (:class:`torch.optim.lr_scheduler._LRScheduler`, optional):
lr_scheduler to recuperate, defaults to None.
finetune (bool, optional): Whether to finetune the model with new dataset or
continue the pre-training, defaults to False.
strict (bool, optional): Whether to strictly enforce that the keys in :attr:`state_dict`
of the checkpoint match the names of parameters and buffers in model, defaults to True.
Returns:
Tuple(int, ``checkpoint``): The tuple (the epoch number of the checkpoint retrieved, the checkpoint retrieved).
Raises:
ValueError: Raise error if the model/optimizer cannot successfully be recuperated
"""
# Load the checkpoint.
checkpoint = torch.load(checkpoint_path, map_location='cpu')
try:
last_epoch = checkpoint.pop('epoch') if not finetune else 0
model.load_state_dict(checkpoint.pop('model'), strict=strict)
except KeyError:
raise ValueError('Checkpoint is corrupted')
if not finetune:
try:
optimizer.load_state_dict(checkpoint.pop('optimizer'))
except KeyError:
raise ValueError('Checkpoint is corrupted')
if lr_scheduler is not None and 'lr_scheduler' in checkpoint:
lr_scheduler.load_state_dict(checkpoint.pop('lr_scheduler'))
return last_epoch, checkpoint
| [
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global_context.is_initialized"
] | [((769, 808), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (787, 808), True, 'from colossalai.core import global_context as gpc\n'), ((928, 969), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (946, 969), True, 'from colossalai.core import global_context as gpc\n'), ((1982, 2025), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'ckpt_filename'], {}), '(checkpoint_dir, ckpt_filename)\n', (1994, 2025), False, 'import os\n'), ((2121, 2146), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (2136, 2146), False, 'import os\n'), ((2680, 2699), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (2690, 2699), False, 'import re\n'), ((3499, 3524), 'os.path.isdir', 'osp.isdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (3508, 3524), True, 'import os.path as osp\n'), ((3586, 3612), 'os.listdir', 'os.listdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (3596, 3612), False, 'import os\n'), ((5817, 5856), 'torch.save', 'torch.save', (['checkpoint', 'checkpoint_path'], {}), '(checkpoint, checkpoint_path)\n', (5827, 5856), False, 'import torch\n'), ((7762, 7809), 'torch.load', 'torch.load', (['checkpoint_path'], {'map_location': '"""cpu"""'}), "(checkpoint_path, map_location='cpu')\n", (7772, 7809), False, 'import torch\n'), ((834, 873), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (852, 873), True, 'from colossalai.core import global_context as gpc\n'), ((995, 1036), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (1013, 1036), True, 'from colossalai.core import global_context as gpc\n'), ((2158, 2181), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (2172, 2181), False, 'import os\n'), ((4091, 4128), 'os.path.join', 'osp.join', (['checkpoint_dir', 'target_file'], {}), '(checkpoint_dir, target_file)\n', (4099, 4128), True, 'import os.path as osp\n'), ((2191, 2204), 'pathlib.Path', 'Path', (['dirpath'], {}), '(dirpath)\n', (2195, 2204), False, 'from pathlib import Path\n')] |
from typing import Tuple
import torch
import torch.distributed as dist
from torch import Tensor
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from .core import ensure_divisibility
def divide(numerator, denominator):
ensure_divisibility(numerator, denominator)
return numerator // denominator
def _reduce(tensor: Tensor) -> Tensor:
if gpc.get_world_size(ParallelMode.TENSOR) == 1:
return tensor
dist.all_reduce(tensor,
op=dist.ReduceOp.SUM,
group=gpc.get_group(ParallelMode.TENSOR),
async_op=False)
return tensor
def _split(tensor: Tensor, dim: int = -1) -> Tensor:
if gpc.get_world_size(ParallelMode.TENSOR) == 1:
return tensor
split_size = divide(tensor.shape[dim], gpc.get_world_size(ParallelMode.TENSOR))
tensor_list = torch.split(tensor, split_size, dim=dim)
output = tensor_list[gpc.get_local_rank(ParallelMode.TENSOR)].contiguous()
return output
def _gather(tensor: Tensor, dim: int = -1) -> Tensor:
if gpc.get_world_size(ParallelMode.TENSOR) == 1:
return tensor
if dim == 1:
output_shape = list(tensor.shape)
output_shape[1] *= gpc.get_world_size(ParallelMode.TENSOR)
output = torch.empty(output_shape, dtype=tensor.dtype, device=tensor.device)
tensor_list = output.chunk(gpc.get_world_size(ParallelMode.TENSOR), dim=1)
dist.all_gather(list(tensor_list),
tensor,
group=gpc.get_group(ParallelMode.TENSOR),
async_op=False)
else:
tensor_list = [
torch.empty_like(tensor) for _ in range(gpc.get_world_size(ParallelMode.TENSOR))
]
dist.all_gather(tensor_list,
tensor,
group=gpc.get_group(ParallelMode.TENSOR),
async_op=False)
output = torch.cat(tensor_list, dim=dim)
return output
def copy(input: Tensor) -> Tensor:
if torch.is_grad_enabled() and input.requires_grad:
input = Copy.apply(input)
return input
class Copy(torch.autograd.Function):
@staticmethod
def forward(ctx: "Copy", input: Tensor) -> Tensor:
return input
@staticmethod
def backward(ctx: "Copy", grad_output: Tensor) -> Tensor:
return _reduce(grad_output)
def scatter(input: Tensor, dim: int = -1) -> Tensor:
if torch.is_grad_enabled() and input.requires_grad:
input = Scatter.apply(input, dim)
else:
input = _split(input, dim=dim)
return input
class Scatter(torch.autograd.Function):
@staticmethod
def forward(ctx: "Scatter", input: Tensor, dim: int = -1) -> Tensor:
ctx.save_for_backward(torch.tensor([dim]))
return _split(input, dim=dim)
@staticmethod
def backward(ctx: "Scatter", grad_output: Tensor) -> Tuple[Tensor]:
dim, = ctx.saved_tensors
return _gather(grad_output, dim=int(dim)), None
def reduce(input: Tensor) -> Tensor:
if torch.is_grad_enabled() and input.requires_grad:
input = Reduce.apply(input)
else:
input = _reduce(input)
return input
class Reduce(torch.autograd.Function):
@staticmethod
def forward(ctx: "Reduce", input: Tensor) -> Tensor:
return _reduce(input)
@staticmethod
def backward(ctx: "Reduce", grad_output: Tensor) -> Tensor:
return grad_output
def gather(input: Tensor, dim: int = -1) -> Tensor:
if torch.is_grad_enabled() and input.requires_grad:
input = Gather.apply(input, dim)
else:
input = _gather(input, dim=dim)
return input
class Gather(torch.autograd.Function):
@staticmethod
def forward(ctx: "Gather", input: Tensor, dim: int = -1) -> Tensor:
ctx.save_for_backward(torch.tensor([dim]))
return _gather(input, dim=dim)
@staticmethod
def backward(ctx: "Gather", grad_output: Tensor) -> Tuple[Tensor]:
dim, = ctx.saved_tensors
return _split(grad_output, dim=int(dim)), None
def _all_to_all(tensor: Tensor, in_dim: int = -1, out_dim: int = -1) -> Tensor:
if gpc.get_world_size(ParallelMode.TENSOR) == 1:
return tensor
split_size = divide(tensor.shape[in_dim], gpc.get_world_size(ParallelMode.TENSOR))
input_tensor_list = torch.split(tensor, split_size, dim=in_dim)
input_tensor_list = [tensor_.contiguous() for tensor_ in input_tensor_list]
if out_dim == 1:
output_shape = list(input_tensor_list[0].shape)
output_shape[1] *= gpc.get_world_size(ParallelMode.TENSOR)
output = torch.empty(output_shape, dtype=tensor.dtype, device=tensor.device)
output_tensor_list = output.chunk(gpc.get_world_size(ParallelMode.TENSOR), dim=1)
dist.all_to_all(list(output_tensor_list),
input_tensor_list,
group=gpc.get_group(ParallelMode.TENSOR),
async_op=False)
else:
output_tensor_list = [torch.ones_like(tensor_) for tensor_ in input_tensor_list]
dist.all_to_all(output_tensor_list,
input_tensor_list,
group=gpc.get_group(ParallelMode.TENSOR),
async_op=False)
output = torch.cat(output_tensor_list, dim=out_dim)
return output
def col_to_row(input_: Tensor) -> Tensor:
if torch.is_grad_enabled() and input_.requires_grad:
input_ = All_to_All.apply(input_, 1, 2)
else:
input_ = _all_to_all(input_, in_dim=1, out_dim=2)
return input_
def row_to_col(input_: Tensor) -> Tensor:
if torch.is_grad_enabled() and input_.requires_grad:
input_ = All_to_All.apply(input_, 2, 1)
else:
input_ = _all_to_all(input_, in_dim=2, out_dim=1)
return input_
class All_to_All(torch.autograd.Function):
@staticmethod
def forward(ctx: "All_to_All", input_: Tensor, in_dim: int = -1, out_dim: int = -1) -> Tensor:
ctx.save_for_backward(torch.tensor([in_dim, out_dim]))
return _all_to_all(input_, in_dim=in_dim, out_dim=out_dim)
@staticmethod
def backward(ctx: "All_to_All", grad_output: Tensor) -> Tuple[Tensor]:
saved_tensors = ctx.saved_tensors[0]
return _all_to_all(grad_output, in_dim=int(saved_tensors[1]),
out_dim=int(saved_tensors[0])), None, None
| [
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_world_size"
] | [((904, 944), 'torch.split', 'torch.split', (['tensor', 'split_size'], {'dim': 'dim'}), '(tensor, split_size, dim=dim)\n', (915, 944), False, 'import torch\n'), ((4381, 4424), 'torch.split', 'torch.split', (['tensor', 'split_size'], {'dim': 'in_dim'}), '(tensor, split_size, dim=in_dim)\n', (4392, 4424), False, 'import torch\n'), ((415, 454), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (433, 454), True, 'from colossalai.core import global_context as gpc\n'), ((733, 772), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (751, 772), True, 'from colossalai.core import global_context as gpc\n'), ((845, 884), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (863, 884), True, 'from colossalai.core import global_context as gpc\n'), ((1107, 1146), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (1125, 1146), True, 'from colossalai.core import global_context as gpc\n'), ((1262, 1301), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (1280, 1301), True, 'from colossalai.core import global_context as gpc\n'), ((1319, 1386), 'torch.empty', 'torch.empty', (['output_shape'], {'dtype': 'tensor.dtype', 'device': 'tensor.device'}), '(output_shape, dtype=tensor.dtype, device=tensor.device)\n', (1330, 1386), False, 'import torch\n'), ((1980, 2011), 'torch.cat', 'torch.cat', (['tensor_list'], {'dim': 'dim'}), '(tensor_list, dim=dim)\n', (1989, 2011), False, 'import torch\n'), ((2075, 2098), 'torch.is_grad_enabled', 'torch.is_grad_enabled', ([], {}), '()\n', (2096, 2098), False, 'import torch\n'), ((2488, 2511), 'torch.is_grad_enabled', 'torch.is_grad_enabled', ([], {}), '()\n', (2509, 2511), False, 'import torch\n'), ((3094, 3117), 'torch.is_grad_enabled', 'torch.is_grad_enabled', ([], {}), '()\n', (3115, 3117), False, 'import torch\n'), ((3555, 3578), 'torch.is_grad_enabled', 'torch.is_grad_enabled', ([], {}), '()\n', (3576, 3578), False, 'import torch\n'), ((4201, 4240), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (4219, 4240), True, 'from colossalai.core import global_context as gpc\n'), ((4316, 4355), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (4334, 4355), True, 'from colossalai.core import global_context as gpc\n'), ((4610, 4649), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (4628, 4649), True, 'from colossalai.core import global_context as gpc\n'), ((4667, 4734), 'torch.empty', 'torch.empty', (['output_shape'], {'dtype': 'tensor.dtype', 'device': 'tensor.device'}), '(output_shape, dtype=tensor.dtype, device=tensor.device)\n', (4678, 4734), False, 'import torch\n'), ((5335, 5377), 'torch.cat', 'torch.cat', (['output_tensor_list'], {'dim': 'out_dim'}), '(output_tensor_list, dim=out_dim)\n', (5344, 5377), False, 'import torch\n'), ((5448, 5471), 'torch.is_grad_enabled', 'torch.is_grad_enabled', ([], {}), '()\n', (5469, 5471), False, 'import torch\n'), ((5683, 5706), 'torch.is_grad_enabled', 'torch.is_grad_enabled', ([], {}), '()\n', (5704, 5706), False, 'import torch\n'), ((580, 614), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (593, 614), True, 'from colossalai.core import global_context as gpc\n'), ((1422, 1461), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (1440, 1461), True, 'from colossalai.core import global_context as gpc\n'), ((1697, 1721), 'torch.empty_like', 'torch.empty_like', (['tensor'], {}), '(tensor)\n', (1713, 1721), False, 'import torch\n'), ((2809, 2828), 'torch.tensor', 'torch.tensor', (['[dim]'], {}), '([dim])\n', (2821, 2828), False, 'import torch\n'), ((3874, 3893), 'torch.tensor', 'torch.tensor', (['[dim]'], {}), '([dim])\n', (3886, 3893), False, 'import torch\n'), ((4777, 4816), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (4795, 4816), True, 'from colossalai.core import global_context as gpc\n'), ((5064, 5088), 'torch.ones_like', 'torch.ones_like', (['tensor_'], {}), '(tensor_)\n', (5079, 5088), False, 'import torch\n'), ((6060, 6091), 'torch.tensor', 'torch.tensor', (['[in_dim, out_dim]'], {}), '([in_dim, out_dim])\n', (6072, 6091), False, 'import torch\n'), ((971, 1010), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (989, 1010), True, 'from colossalai.core import global_context as gpc\n'), ((1575, 1609), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (1588, 1609), True, 'from colossalai.core import global_context as gpc\n'), ((1887, 1921), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (1900, 1921), True, 'from colossalai.core import global_context as gpc\n'), ((4948, 4982), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (4961, 4982), True, 'from colossalai.core import global_context as gpc\n'), ((5241, 5275), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (5254, 5275), True, 'from colossalai.core import global_context as gpc\n'), ((1737, 1776), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (1755, 1776), True, 'from colossalai.core import global_context as gpc\n')] |
from colossalai.context.parallel_context import ParallelContext
from colossalai.core import global_context as gpc
from colossalai.logging import get_dist_logger
from colossalai.context import ParallelMode
from .datasets.data_samplers import build_pretraining_data_loader
from .datasets.builder import build_train_valid_test_datasets
import torch
def cyclic_iter(iter):
while True:
for x in iter:
yield x
def build_train_valid_test_data_iterators(train_iters,
global_batch_size,
eval_interval,
eval_iters,
dataloader_type='single',
**kwargs
):
(train_dataloader, valid_dataloader, test_dataloader) = (None, None, None)
logger = get_dist_logger()
logger.info('> building train, validation, and test datasets ...', ranks=[0])
# Backward compatibility, assume fixed batch size.
# if iteration > 0 and consumed_train_samples == 0:
# assert train_samples is None, \
# 'only backward compatibility support for iteration-based training'
# consumed_train_samples = iteration * global_batch_size
# if iteration > 0 and consumed_valid_samples == 0:
# if train_samples is None:
# consumed_valid_samples = (iteration // eval_interval) * \
# eval_iters * global_batch_size
# Data loader only on rank 0 of each model parallel group.
if not gpc.is_initialized(ParallelMode.TENSOR) or gpc.get_local_rank(ParallelMode.TENSOR) == 0:
# Number of train/valid/test samples.
train_samples = train_iters * global_batch_size
eval_iters_ = (train_iters // eval_interval + 1) * eval_iters
test_iters = eval_iters
train_val_test_num_samples = [train_samples,
eval_iters_ * global_batch_size,
test_iters * global_batch_size]
logger.info(' > datasets target sizes (minimum size):')
logger.info(' train: {}'.format(train_val_test_num_samples[0]), ranks=[0])
logger.info(' validation: {}'.format(train_val_test_num_samples[1]), ranks=[0])
logger.info(' test: {}'.format(train_val_test_num_samples[2]), ranks=[0])
# Build the datasets.
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
train_valid_test_num_samples=train_val_test_num_samples, **kwargs)
# Build dataloaders.
dp_size = gpc.get_world_size(ParallelMode.DATA)
train_dataloader = build_pretraining_data_loader(
train_ds, consumed_samples=0, micro_batch_size=global_batch_size//dp_size)
valid_dataloader = build_pretraining_data_loader(
valid_ds, consumed_samples=0, micro_batch_size=global_batch_size//dp_size)
test_dataloader = build_pretraining_data_loader(test_ds, 0, micro_batch_size=global_batch_size//dp_size)
# Flags to know if we need to do training/validation/testing.
do_train = train_dataloader is not None and train_iters > 0
do_valid = valid_dataloader is not None and eval_iters > 0
do_test = test_dataloader is not None and eval_iters > 0
# Need to broadcast num_tokens and num_type_tokens.
flags = torch.cuda.LongTensor(
[int(do_train), int(do_valid), int(do_test)])
else:
flags = torch.cuda.LongTensor([0, 0, 0])
# Broadcast num tokens.
torch.distributed.broadcast(flags,
gpc.get_ranks_in_group(ParallelMode.TENSOR)[0],
group=gpc.get_group(ParallelMode.TENSOR))
# Build iterators.
dl_type = dataloader_type
assert dl_type in ['single', 'cyclic']
if train_dataloader is not None:
train_data_iterator = iter(train_dataloader) if dl_type == 'single' \
else iter(cyclic_iter(train_dataloader))
else:
train_data_iterator = None
if valid_dataloader is not None:
valid_data_iterator = iter(valid_dataloader) if dl_type == 'single' \
else iter(cyclic_iter(valid_dataloader))
else:
valid_data_iterator = None
if test_dataloader is not None:
test_data_iterator = iter(test_dataloader) if dl_type == 'single' \
else iter(cyclic_iter(test_dataloader))
else:
test_data_iterator = None
return train_data_iterator, valid_data_iterator, test_data_iterator
| [
"colossalai.core.global_context.get_local_rank",
"colossalai.logging.get_dist_logger",
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.is_initialized",
"colossalai.core.global_context.get_ranks_in_group"
] | [((916, 933), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (931, 933), False, 'from colossalai.logging import get_dist_logger\n'), ((2658, 2695), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (2676, 2695), True, 'from colossalai.core import global_context as gpc\n'), ((3553, 3585), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (3574, 3585), False, 'import torch\n'), ((1604, 1643), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (1622, 1643), True, 'from colossalai.core import global_context as gpc\n'), ((1647, 1686), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (1665, 1686), True, 'from colossalai.core import global_context as gpc\n'), ((3686, 3729), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (3708, 3729), True, 'from colossalai.core import global_context as gpc\n'), ((3772, 3806), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (3785, 3806), True, 'from colossalai.core import global_context as gpc\n')] |
import pytest
import colossalai
from colossalai.utils.cuda import get_current_device
from colossalai.zero.sharded_param import (StatefulTensor, colo_tensor_mem_usage, colo_model_data_tensor_move,
colo_model_data_tensor_move_inline, colo_model_data_move_to_cpu,
colo_model_tensor_clone)
from colossalai.utils import free_port
from colossalai.testing import rerun_if_address_is_in_use
import torch
from functools import partial
import torch.multiprocessing as mp
def _run_colo_tensor_mem_usage():
for i in range(1):
if i == 1:
t1 = StatefulTensor(torch.randn(2, 2))
t2 = StatefulTensor(torch.randn(4, 4))
c1, g1 = colo_tensor_mem_usage(t1)
c2, g2 = colo_tensor_mem_usage(t2)
assert c1 * 4 == c2
assert g1 * 4 == g2
else:
t1 = torch.randn(2, 2)
t2 = torch.randn(4, 4)
c1, g1 = colo_tensor_mem_usage(t1)
c2, g2 = colo_tensor_mem_usage(t2)
assert c1 * 4 == c2
assert g1 * 4 == g2
def _run_colo_model_data_tensor_move_inline():
for t in [StatefulTensor(torch.randn(2, 3)), torch.randn(2, 3)]:
colo_model_data_tensor_move_inline(t, get_current_device())
assert t.device == get_current_device()
def _run_colo_model_data_tensor_move():
for t in [(StatefulTensor(torch.ones(2, 3)), StatefulTensor(torch.zeros(2, 3).to(get_current_device()))),
(torch.ones(2, 3), torch.zeros(2, 3).to(get_current_device()))]:
cpu_t, cuda_t = t
colo_model_data_tensor_move(cpu_t, cuda_t)
assert cuda_t.device == get_current_device()
def _run_colo_model_data_move_to_cpu():
for t in [StatefulTensor(torch.randn(2, 2)), torch.randn(4, 4)]:
colo_model_data_move_to_cpu(t)
assert t.device == torch.device("cpu")
def _run_colo_model_tensor_clone():
for t in [
StatefulTensor(torch.randn(2, 2).cuda(torch.cuda.current_device())),
torch.randn(4, 4).cuda(torch.cuda.current_device())
]:
if issubclass(type(t), StatefulTensor):
assert t.payload.device == get_current_device()
else:
assert t.device == get_current_device()
p = colo_model_tensor_clone(t, get_current_device())
assert p.device == get_current_device()
for i in range(2):
for j in range(2):
if issubclass(type(t), StatefulTensor):
assert t.payload.device == p.device
assert t.payload[i][j] == p[i][j]
else:
assert t.device == p.device
assert t[i][j] == p[i][j]
def run_dist(rank, world_size, port):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
_run_colo_tensor_mem_usage()
_run_colo_model_data_tensor_move_inline()
_run_colo_model_data_tensor_move()
_run_colo_model_data_move_to_cpu()
_run_colo_model_tensor_clone()
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [4, 5])
@rerun_if_address_is_in_use()
def test_zero_tensor_utils(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_zero_tensor_utils(world_size=2)
| [
"colossalai.launch",
"colossalai.zero.sharded_param.colo_model_data_tensor_move",
"colossalai.zero.sharded_param.colo_tensor_mem_usage",
"colossalai.utils.free_port",
"colossalai.utils.cuda.get_current_device",
"colossalai.zero.sharded_param.colo_model_data_move_to_cpu",
"colossalai.testing.rerun_if_add... | [((3116, 3161), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[4, 5]'], {}), "('world_size', [4, 5])\n", (3139, 3161), False, 'import pytest\n'), ((3163, 3191), 'colossalai.testing.rerun_if_address_is_in_use', 'rerun_if_address_is_in_use', ([], {}), '()\n', (3189, 3191), False, 'from colossalai.testing import rerun_if_address_is_in_use\n'), ((2794, 2906), 'colossalai.launch', 'colossalai.launch', ([], {'config': '{}', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config={}, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (2811, 2906), False, 'import colossalai\n'), ((3310, 3347), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (3318, 3347), True, 'import torch.multiprocessing as mp\n'), ((1228, 1245), 'torch.randn', 'torch.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (1239, 1245), False, 'import torch\n'), ((1629, 1671), 'colossalai.zero.sharded_param.colo_model_data_tensor_move', 'colo_model_data_tensor_move', (['cpu_t', 'cuda_t'], {}), '(cpu_t, cuda_t)\n', (1656, 1671), False, 'from colossalai.zero.sharded_param import StatefulTensor, colo_tensor_mem_usage, colo_model_data_tensor_move, colo_model_data_tensor_move_inline, colo_model_data_move_to_cpu, colo_model_tensor_clone\n'), ((1816, 1833), 'torch.randn', 'torch.randn', (['(4)', '(4)'], {}), '(4, 4)\n', (1827, 1833), False, 'import torch\n'), ((1844, 1874), 'colossalai.zero.sharded_param.colo_model_data_move_to_cpu', 'colo_model_data_move_to_cpu', (['t'], {}), '(t)\n', (1871, 1874), False, 'from colossalai.zero.sharded_param import StatefulTensor, colo_tensor_mem_usage, colo_model_data_tensor_move, colo_model_data_tensor_move_inline, colo_model_data_move_to_cpu, colo_model_tensor_clone\n'), ((751, 776), 'colossalai.zero.sharded_param.colo_tensor_mem_usage', 'colo_tensor_mem_usage', (['t1'], {}), '(t1)\n', (772, 776), False, 'from colossalai.zero.sharded_param import StatefulTensor, colo_tensor_mem_usage, colo_model_data_tensor_move, colo_model_data_tensor_move_inline, colo_model_data_move_to_cpu, colo_model_tensor_clone\n'), ((798, 823), 'colossalai.zero.sharded_param.colo_tensor_mem_usage', 'colo_tensor_mem_usage', (['t2'], {}), '(t2)\n', (819, 823), False, 'from colossalai.zero.sharded_param import StatefulTensor, colo_tensor_mem_usage, colo_model_data_tensor_move, colo_model_data_tensor_move_inline, colo_model_data_move_to_cpu, colo_model_tensor_clone\n'), ((919, 936), 'torch.randn', 'torch.randn', (['(2)', '(2)'], {}), '(2, 2)\n', (930, 936), False, 'import torch\n'), ((954, 971), 'torch.randn', 'torch.randn', (['(4)', '(4)'], {}), '(4, 4)\n', (965, 971), False, 'import torch\n'), ((993, 1018), 'colossalai.zero.sharded_param.colo_tensor_mem_usage', 'colo_tensor_mem_usage', (['t1'], {}), '(t1)\n', (1014, 1018), False, 'from colossalai.zero.sharded_param import StatefulTensor, colo_tensor_mem_usage, colo_model_data_tensor_move, colo_model_data_tensor_move_inline, colo_model_data_move_to_cpu, colo_model_tensor_clone\n'), ((1040, 1065), 'colossalai.zero.sharded_param.colo_tensor_mem_usage', 'colo_tensor_mem_usage', (['t2'], {}), '(t2)\n', (1061, 1065), False, 'from colossalai.zero.sharded_param import StatefulTensor, colo_tensor_mem_usage, colo_model_data_tensor_move, colo_model_data_tensor_move_inline, colo_model_data_move_to_cpu, colo_model_tensor_clone\n'), ((1208, 1225), 'torch.randn', 'torch.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (1219, 1225), False, 'import torch\n'), ((1294, 1314), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (1312, 1314), False, 'from colossalai.utils.cuda import get_current_device\n'), ((1343, 1363), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (1361, 1363), False, 'from colossalai.utils.cuda import get_current_device\n'), ((1531, 1547), 'torch.ones', 'torch.ones', (['(2)', '(3)'], {}), '(2, 3)\n', (1541, 1547), False, 'import torch\n'), ((1704, 1724), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (1722, 1724), False, 'from colossalai.utils.cuda import get_current_device\n'), ((1796, 1813), 'torch.randn', 'torch.randn', (['(2)', '(2)'], {}), '(2, 2)\n', (1807, 1813), False, 'import torch\n'), ((1902, 1921), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1914, 1921), False, 'import torch\n'), ((2091, 2118), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (2116, 2118), False, 'import torch\n'), ((2340, 2360), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (2358, 2360), False, 'from colossalai.utils.cuda import get_current_device\n'), ((2389, 2409), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (2407, 2409), False, 'from colossalai.utils.cuda import get_current_device\n'), ((3293, 3304), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (3302, 3304), False, 'from colossalai.utils import free_port\n'), ((660, 677), 'torch.randn', 'torch.randn', (['(2)', '(2)'], {}), '(2, 2)\n', (671, 677), False, 'import torch\n'), ((711, 728), 'torch.randn', 'torch.randn', (['(4)', '(4)'], {}), '(4, 4)\n', (722, 728), False, 'import torch\n'), ((1436, 1452), 'torch.ones', 'torch.ones', (['(2)', '(3)'], {}), '(2, 3)\n', (1446, 1452), False, 'import torch\n'), ((1570, 1590), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (1588, 1590), False, 'from colossalai.utils.cuda import get_current_device\n'), ((2025, 2052), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (2050, 2052), False, 'import torch\n'), ((2068, 2085), 'torch.randn', 'torch.randn', (['(4)', '(4)'], {}), '(4, 4)\n', (2079, 2085), False, 'import torch\n'), ((2214, 2234), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (2232, 2234), False, 'from colossalai.utils.cuda import get_current_device\n'), ((2280, 2300), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (2298, 2300), False, 'from colossalai.utils.cuda import get_current_device\n'), ((1491, 1511), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (1509, 1511), False, 'from colossalai.utils.cuda import get_current_device\n'), ((1549, 1566), 'torch.zeros', 'torch.zeros', (['(2)', '(3)'], {}), '(2, 3)\n', (1560, 1566), False, 'import torch\n'), ((2002, 2019), 'torch.randn', 'torch.randn', (['(2)', '(2)'], {}), '(2, 2)\n', (2013, 2019), False, 'import torch\n'), ((1470, 1487), 'torch.zeros', 'torch.zeros', (['(2)', '(3)'], {}), '(2, 3)\n', (1481, 1487), False, 'import torch\n')] |
# referenced from Megatron and used to testify communication
import os.path as osp
import pytest
import torch
from torch.utils.data import DataLoader
from colossalai.builder import ModelInitializer, build_dataset, build_optimizer, build_loss
from colossalai.communication import p2p as p2p_communication
from colossalai.communication.utils import send_tensor_meta, recv_tensor_meta
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import initialize
from colossalai.utils import print_rank_0, get_current_device
NUM_BATCH = 128
NUM_MICRO = 6
def get_num_microbatches():
return NUM_MICRO
def to_cuda(data):
if isinstance(data, (tuple, list)):
data = data[0].to(get_current_device())
else:
data = data.to(get_current_device())
return data
def step_func(loss):
def _step_func(input_tensor, model):
output = model(input_tensor)
if isinstance(output, (tuple, list)):
if len(output) > 1:
raise NotImplementedError("Multiple output!!!")
else:
output = output[0]
return output, loss
return _step_func
def forward_step(forward_step_func, data_iterator, model, input_tensor, losses_reduced):
"""Forward step for passed-in model.
If first stage, input tensor is obtained from data_iterator, otherwise
passed-in input_tensor is used.
Returns output tensor."""
if input_tensor is None:
data, label = data_iterator.next()
input_tensor = to_cuda(data)
output_tensor, loss_func = forward_step_func(input_tensor, model)
if gpc.is_last_rank(ParallelMode.PIPELINE):
data, label = data_iterator.next()
label = to_cuda(label)
output_tensor = loss_func(output_tensor, label) / get_num_microbatches()
losses_reduced.append(output_tensor)
return output_tensor
def backward_step(optimizer, input_tensor, output_tensor, output_tensor_grad):
"""Backward step through passed-in output tensor.
If last stage, output_tensor_grad is None, otherwise gradient of loss
with respect to stage's output tensor.
Returns gradient of loss with respect to input tensor (None if first
stage)."""
# Retain the grad on the input_tensor.
if input_tensor is not None:
input_tensor.retain_grad()
# Backward pass.
torch.autograd.backward(output_tensor, grad_tensors=output_tensor_grad)
# Collect the grad of the input_tensor.
input_tensor_grad = None
if input_tensor is not None:
input_tensor_grad = input_tensor.grad
return input_tensor_grad
def forward_backward_pipelining_without_interleaving(forward_step_func, data_iterator,
model, optimizer, forward_only):
"""Run non-interleaved 1F1B schedule, with communication between pipeline
stages.
Returns dictionary with losses if the last stage, empty dict otherwise."""
# Compute number of warmup microbatches.
num_microbatches = get_num_microbatches()
num_warmup_microbatches = \
(gpc.get_world_size(ParallelMode.PIPELINE) -
gpc.get_local_rank(ParallelMode.PIPELINE) - 1)
num_warmup_microbatches = min(
num_warmup_microbatches,
num_microbatches)
num_microbatches_remaining = \
num_microbatches - num_warmup_microbatches
# Input, output tensors only need to be saved when doing backward passes
input_tensors = None
output_tensors = None
if not forward_only:
input_tensors = []
output_tensors = []
losses_reduced = []
# Used for tensor meta information communication
ft_shape = None
bt_shape = None
fs_checker = True
# Run warmup forward passes.
for i in range(num_warmup_microbatches):
if not gpc.is_first_rank(ParallelMode.PIPELINE):
ft_shape = recv_tensor_meta(ft_shape)
input_tensor = p2p_communication.recv_forward(ft_shape)
output_tensor = forward_step(forward_step_func, data_iterator, model,
input_tensor, losses_reduced)
if not gpc.is_last_rank(ParallelMode.PIPELINE):
bt_shape = output_tensor.shape
fs_checker = send_tensor_meta(output_tensor, fs_checker)
p2p_communication.send_forward(output_tensor)
if not forward_only:
input_tensors.append(input_tensor)
output_tensors.append(output_tensor)
# Before running 1F1B, need to receive first forward tensor.
# If all microbatches are run in warmup / cooldown phase, then no need to
# receive this tensor here.
if num_microbatches_remaining > 0:
if not gpc.is_first_rank(ParallelMode.PIPELINE):
ft_shape = recv_tensor_meta(ft_shape)
input_tensor = p2p_communication.recv_forward(ft_shape)
# Run 1F1B in steady state.
for i in range(num_microbatches_remaining):
last_iteration = (i == (num_microbatches_remaining - 1))
output_tensor = forward_step(forward_step_func, data_iterator, model,
input_tensor, losses_reduced)
if forward_only:
p2p_communication.send_forward(output_tensor)
if not last_iteration:
input_tensor = p2p_communication.recv_forward(ft_shape)
else:
output_tensor_grad = \
p2p_communication.send_forward_recv_backward(output_tensor, bt_shape)
# Add input_tensor and output_tensor to end of list.
input_tensors.append(input_tensor)
output_tensors.append(output_tensor)
# Pop input_tensor and output_tensor from the start of the list for
# the backward pass.
input_tensor = input_tensors.pop(0)
output_tensor = output_tensors.pop(0)
input_tensor_grad = \
backward_step(optimizer, input_tensor, output_tensor,
output_tensor_grad)
if last_iteration:
input_tensor = None
p2p_communication.send_backward(input_tensor_grad)
else:
input_tensor = \
p2p_communication.send_backward_recv_forward(input_tensor_grad, ft_shape)
# Run cooldown backward passes.
if not forward_only:
for i in range(num_warmup_microbatches):
input_tensor = input_tensors.pop(0)
output_tensor = output_tensors.pop(0)
output_tensor_grad = p2p_communication.recv_backward(bt_shape)
input_tensor_grad = \
backward_step(optimizer, input_tensor, output_tensor,
output_tensor_grad)
p2p_communication.send_backward(input_tensor_grad)
return losses_reduced
DIR_PATH = osp.dirname(osp.realpath(__file__))
CONFIG_PATH = osp.join(DIR_PATH, '../configs/pipeline_vanilla_vit.py')
@pytest.mark.skip(reason="This is only for debugging purpose, please ignore this test")
@pytest.mark.dist
def test_schedule():
initialize(CONFIG_PATH)
# build model
model = ModelInitializer(gpc.config.model, 1).model_initialize()
print_rank_0('model is created')
# keep the same sampler for all process
torch.manual_seed(1331)
dataset = build_dataset(gpc.config.data.dataset)
dataloader = DataLoader(dataset=dataset, **gpc.config.data.dataloader)
print_rank_0('train data is created')
# build optimizer and loss
optim = build_optimizer(gpc.config.optimizer, model)
loss = build_loss(gpc.config.loss)
print_rank_0('optim and loss is created')
forward_backward_pipelining_without_interleaving(
step_func(loss),
iter(dataloader),
model,
optim,
False
)
gpc.destroy()
print_rank_0('training finished')
if __name__ == '__main__':
test_schedule()
| [
"colossalai.communication.utils.recv_tensor_meta",
"colossalai.core.global_context.is_last_rank",
"colossalai.utils.get_current_device",
"colossalai.core.global_context.get_local_rank",
"colossalai.communication.p2p.send_backward",
"colossalai.core.global_context.get_world_size",
"colossalai.communicati... | [((6926, 6982), 'os.path.join', 'osp.join', (['DIR_PATH', '"""../configs/pipeline_vanilla_vit.py"""'], {}), "(DIR_PATH, '../configs/pipeline_vanilla_vit.py')\n", (6934, 6982), True, 'import os.path as osp\n'), ((6986, 7077), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""This is only for debugging purpose, please ignore this test"""'}), "(reason=\n 'This is only for debugging purpose, please ignore this test')\n", (7002, 7077), False, 'import pytest\n'), ((1669, 1708), 'colossalai.core.global_context.is_last_rank', 'gpc.is_last_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (1685, 1708), True, 'from colossalai.core import global_context as gpc\n'), ((2414, 2485), 'torch.autograd.backward', 'torch.autograd.backward', (['output_tensor'], {'grad_tensors': 'output_tensor_grad'}), '(output_tensor, grad_tensors=output_tensor_grad)\n', (2437, 2485), False, 'import torch\n'), ((6888, 6910), 'os.path.realpath', 'osp.realpath', (['__file__'], {}), '(__file__)\n', (6900, 6910), True, 'import os.path as osp\n'), ((7116, 7139), 'colossalai.initialize.initialize', 'initialize', (['CONFIG_PATH'], {}), '(CONFIG_PATH)\n', (7126, 7139), False, 'from colossalai.initialize import initialize\n'), ((7232, 7264), 'colossalai.utils.print_rank_0', 'print_rank_0', (['"""model is created"""'], {}), "('model is created')\n", (7244, 7264), False, 'from colossalai.utils import print_rank_0, get_current_device\n'), ((7314, 7337), 'torch.manual_seed', 'torch.manual_seed', (['(1331)'], {}), '(1331)\n', (7331, 7337), False, 'import torch\n'), ((7353, 7391), 'colossalai.builder.build_dataset', 'build_dataset', (['gpc.config.data.dataset'], {}), '(gpc.config.data.dataset)\n', (7366, 7391), False, 'from colossalai.builder import ModelInitializer, build_dataset, build_optimizer, build_loss\n'), ((7409, 7466), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dataset'}), '(dataset=dataset, **gpc.config.data.dataloader)\n', (7419, 7466), False, 'from torch.utils.data import DataLoader\n'), ((7471, 7508), 'colossalai.utils.print_rank_0', 'print_rank_0', (['"""train data is created"""'], {}), "('train data is created')\n", (7483, 7508), False, 'from colossalai.utils import print_rank_0, get_current_device\n'), ((7553, 7597), 'colossalai.builder.build_optimizer', 'build_optimizer', (['gpc.config.optimizer', 'model'], {}), '(gpc.config.optimizer, model)\n', (7568, 7597), False, 'from colossalai.builder import ModelInitializer, build_dataset, build_optimizer, build_loss\n'), ((7609, 7636), 'colossalai.builder.build_loss', 'build_loss', (['gpc.config.loss'], {}), '(gpc.config.loss)\n', (7619, 7636), False, 'from colossalai.builder import ModelInitializer, build_dataset, build_optimizer, build_loss\n'), ((7641, 7682), 'colossalai.utils.print_rank_0', 'print_rank_0', (['"""optim and loss is created"""'], {}), "('optim and loss is created')\n", (7653, 7682), False, 'from colossalai.utils import print_rank_0, get_current_device\n'), ((7844, 7857), 'colossalai.core.global_context.destroy', 'gpc.destroy', ([], {}), '()\n', (7855, 7857), True, 'from colossalai.core import global_context as gpc\n'), ((7862, 7895), 'colossalai.utils.print_rank_0', 'print_rank_0', (['"""training finished"""'], {}), "('training finished')\n", (7874, 7895), False, 'from colossalai.utils import print_rank_0, get_current_device\n'), ((3984, 4024), 'colossalai.communication.p2p.recv_forward', 'p2p_communication.recv_forward', (['ft_shape'], {}), '(ft_shape)\n', (4014, 4024), True, 'from colossalai.communication import p2p as p2p_communication\n'), ((4346, 4391), 'colossalai.communication.p2p.send_forward', 'p2p_communication.send_forward', (['output_tensor'], {}), '(output_tensor)\n', (4376, 4391), True, 'from colossalai.communication import p2p as p2p_communication\n'), ((4863, 4903), 'colossalai.communication.p2p.recv_forward', 'p2p_communication.recv_forward', (['ft_shape'], {}), '(ft_shape)\n', (4893, 4903), True, 'from colossalai.communication import p2p as p2p_communication\n'), ((768, 788), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (786, 788), False, 'from colossalai.utils import print_rank_0, get_current_device\n'), ((823, 843), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (841, 843), False, 'from colossalai.utils import print_rank_0, get_current_device\n'), ((3146, 3187), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (3164, 3187), True, 'from colossalai.core import global_context as gpc\n'), ((3199, 3240), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (3217, 3240), True, 'from colossalai.core import global_context as gpc\n'), ((3869, 3909), 'colossalai.core.global_context.is_first_rank', 'gpc.is_first_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (3886, 3909), True, 'from colossalai.core import global_context as gpc\n'), ((3934, 3960), 'colossalai.communication.utils.recv_tensor_meta', 'recv_tensor_meta', (['ft_shape'], {}), '(ft_shape)\n', (3950, 3960), False, 'from colossalai.communication.utils import send_tensor_meta, recv_tensor_meta\n'), ((4185, 4224), 'colossalai.core.global_context.is_last_rank', 'gpc.is_last_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (4201, 4224), True, 'from colossalai.core import global_context as gpc\n'), ((4294, 4337), 'colossalai.communication.utils.send_tensor_meta', 'send_tensor_meta', (['output_tensor', 'fs_checker'], {}), '(output_tensor, fs_checker)\n', (4310, 4337), False, 'from colossalai.communication.utils import send_tensor_meta, recv_tensor_meta\n'), ((4748, 4788), 'colossalai.core.global_context.is_first_rank', 'gpc.is_first_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (4765, 4788), True, 'from colossalai.core import global_context as gpc\n'), ((4813, 4839), 'colossalai.communication.utils.recv_tensor_meta', 'recv_tensor_meta', (['ft_shape'], {}), '(ft_shape)\n', (4829, 4839), False, 'from colossalai.communication.utils import send_tensor_meta, recv_tensor_meta\n'), ((5233, 5278), 'colossalai.communication.p2p.send_forward', 'p2p_communication.send_forward', (['output_tensor'], {}), '(output_tensor)\n', (5263, 5278), True, 'from colossalai.communication import p2p as p2p_communication\n'), ((5453, 5522), 'colossalai.communication.p2p.send_forward_recv_backward', 'p2p_communication.send_forward_recv_backward', (['output_tensor', 'bt_shape'], {}), '(output_tensor, bt_shape)\n', (5497, 5522), True, 'from colossalai.communication import p2p as p2p_communication\n'), ((6575, 6616), 'colossalai.communication.p2p.recv_backward', 'p2p_communication.recv_backward', (['bt_shape'], {}), '(bt_shape)\n', (6606, 6616), True, 'from colossalai.communication import p2p as p2p_communication\n'), ((6785, 6835), 'colossalai.communication.p2p.send_backward', 'p2p_communication.send_backward', (['input_tensor_grad'], {}), '(input_tensor_grad)\n', (6816, 6835), True, 'from colossalai.communication import p2p as p2p_communication\n'), ((7171, 7208), 'colossalai.builder.ModelInitializer', 'ModelInitializer', (['gpc.config.model', '(1)'], {}), '(gpc.config.model, 1)\n', (7187, 7208), False, 'from colossalai.builder import ModelInitializer, build_dataset, build_optimizer, build_loss\n'), ((5346, 5386), 'colossalai.communication.p2p.recv_forward', 'p2p_communication.recv_forward', (['ft_shape'], {}), '(ft_shape)\n', (5376, 5386), True, 'from colossalai.communication import p2p as p2p_communication\n'), ((6136, 6186), 'colossalai.communication.p2p.send_backward', 'p2p_communication.send_backward', (['input_tensor_grad'], {}), '(input_tensor_grad)\n', (6167, 6186), True, 'from colossalai.communication import p2p as p2p_communication\n'), ((6258, 6331), 'colossalai.communication.p2p.send_backward_recv_forward', 'p2p_communication.send_backward_recv_forward', (['input_tensor_grad', 'ft_shape'], {}), '(input_tensor_grad, ft_shape)\n', (6302, 6331), True, 'from colossalai.communication import p2p as p2p_communication\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import math
from colossalai import context
import torch
from torch import nn as nn, Tensor, distributed as dist
from torch.nn.init import _calculate_fan_in_and_fan_out
from colossalai.context import seed, ParallelMode
from colossalai.core import global_context as gpc
from colossalai.nn.layer._common_utils import divide, ACT2FN
from colossalai.registry import LAYERS
from colossalai.utils import checkpoint
from colossalai.utils import get_current_device
from .layers import Linear1D_Col, Linear1D_Row
from ..base_layer import ParallelLayer
from .._common_utils import to_2tuple
from ..fused_bias_gelu import bias_gelu_impl
@LAYERS.register_module
class ViTMLP1D(ParallelLayer):
"""MLP layer for 1D parallel Vision Transformer
:param in_features: size of each input sample
:type in_features: int
:param mlp_ratio: hidden size of MLP divided by embedding dim
:type mlp_ratio: int
:param act_func: activation function, defaults to 'gelu'
:type act_func: str, optional
:param dropout_prob: dropout probability, defaults to 0.
:type dropout_prob: float, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param checkpoint: whether to checkpoint the layer, defaults to False
:type checkpoint: bool, optional
"""
def __init__(self,
in_features: int,
mlp_ratio: int,
act_func: str = 'gelu',
dropout_prob: float = 0.,
dtype=None,
checkpoint: bool = False,
skip_bias_add: bool = False,
weight_init='torch'
):
super().__init__()
self.in_features = in_features
self.mlp_ratio = mlp_ratio
self.checkpoint = checkpoint
self.skip_bias_add = skip_bias_add
assert weight_init in ('torch', 'jax')
if act_func == 'fused_gelu':
self.act = bias_gelu_impl
skip_dense_1_add_bias = True
else:
self.act = ACT2FN[act_func]
skip_dense_1_add_bias = False
# Project to mlp_ratio * h.
self.dense_1 = Linear1D_Col(
self.in_features,
int(self.mlp_ratio * self.in_features),
dtype=dtype,
gather_output=False,
skip_bias_add=skip_dense_1_add_bias,
init_weight=weight_init,
init_bias=weight_init
)
# Project back to h.
self.dense_2 = Linear1D_Row(
int(self.mlp_ratio * self.in_features),
self.in_features,
dtype=dtype,
parallel_input=True,
init_weight=weight_init, init_bias=weight_init
)
self.dropout = nn.Dropout(dropout_prob)
def _forward(self, hidden_states: Tensor) -> Tensor:
if self.act == bias_gelu_impl:
intermediate_output, bias = self.dense_1(hidden_states)
intermediate_output = self.act(intermediate_output, bias)
else:
intermediate_output = self.dense_1(hidden_states)
intermediate_output = self.act(intermediate_output)
with seed(ParallelMode.TENSOR):
intermediate_output = self.dropout(intermediate_output)
output = self.dense_2(intermediate_output)
output = self.dropout(output)
return output
def _checkpoint_forward(self, hidden_states: Tensor) -> Tensor:
return checkpoint(self._forward, hidden_states)
def forward(self, hidden_states: Tensor) -> Tensor:
if self.checkpoint:
return self._checkpoint_forward(hidden_states)
else:
return self._forward(hidden_states)
@LAYERS.register_module
class ViTSelfAttention1D(ParallelLayer):
"""Self-attention layer for 1D parallel Vision Transformer
:param hidden_size: hidden size
:type hidden_size: int
:param num_attention_heads: number of attention heads
:type num_attention_heads: int
:param attention_dropout_prob: dropout probability for attention layers
:type attention_dropout_prob: float
:param hidden_dropout_prob: dropout probability for hidden layers
:type hidden_dropout_prob: float
:param dtype: dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param checkpoint: whether to checkpoint the layer, defaults to False
:type checkpoint: bool, optional
"""
def __init__(self,
hidden_size: int,
num_attention_heads: int,
attention_dropout_prob: float,
hidden_dropout_prob: float,
dtype=None,
checkpoint: bool = False,
weight_init='torch'
):
super().__init__()
self.hidden_size = hidden_size
self.attention_head_size = divide(hidden_size, num_attention_heads)
self.num_attention_heads_per_partition = divide(num_attention_heads, gpc.tensor_parallel_size)
self.hidden_size_per_partition = divide(hidden_size, gpc.tensor_parallel_size)
self.checkpoint = checkpoint
assert weight_init in ('torch', 'jax')
if weight_init == 'jax':
init_bias = 'zero'
else:
init_bias = weight_init
self.query_key_value = Linear1D_Col(
hidden_size,
3 * hidden_size,
dtype=dtype,
init_weight=weight_init,
init_bias=init_bias
)
self.attention_dropout = nn.Dropout(attention_dropout_prob)
self.dense = Linear1D_Row(
hidden_size,
hidden_size,
dtype=dtype,
parallel_input=True,
init_weight=weight_init, init_bias=init_bias
)
self.dropout = nn.Dropout(hidden_dropout_prob)
self.softmax = nn.Softmax(dim=-1)
def _forward(self, hidden_states: Tensor) -> Tensor:
query_key_value = self.query_key_value(hidden_states)
new_qkv_shape = query_key_value.shape[:-1] + \
(self.num_attention_heads_per_partition, 3 * self.attention_head_size)
query_key_value = query_key_value.view(new_qkv_shape)
query_key_value = query_key_value.permute((0, 2, 1, 3))
query_layer, key_layer, value_layer = torch.chunk(
query_key_value, 3, dim=-1)
attention_scores = torch.matmul(
query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / \
math.sqrt(self.attention_head_size)
attention_probs = self.softmax(attention_scores)
with seed(ParallelMode.TENSOR):
attention_probs = self.attention_dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(1, 2)
new_context_layer_shape = context_layer.size()[
:-2] + (self.hidden_size_per_partition,)
context_layer = context_layer.reshape(new_context_layer_shape)
output = self.dense(context_layer)
output = self.dropout(output)
return output
def _checkpoint_forward(self, hidden_states: Tensor) -> Tensor:
return checkpoint(self._forward, hidden_states)
def forward(self, hidden_states: Tensor) -> Tensor:
if self.checkpoint:
return self._checkpoint_forward(hidden_states)
else:
return self._forward(hidden_states)
@LAYERS.register_module
class ViTHead1D(ParallelLayer):
"""Output layer for 1D parallel Vision Transformer
:param hidden_size: hidden size
:type hidden_size: int
:param num_classes: number of classes
:type num_classes: int
:param dtype: dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
"""
def __init__(self,
hidden_size,
num_classes,
dtype=None,
weight_init='torch'
):
super().__init__()
assert weight_init in ('torch', 'jax')
if weight_init == 'jax':
init_weight = 'zero'
init_bias = 'zero'
else:
init_weight = weight_init
init_bias = weight_init
self.linear = Linear1D_Col(
hidden_size,
num_classes,
dtype=dtype,
gather_output=True,
init_weight=init_weight,
init_bias=init_bias
)
def forward(self, x: Tensor) -> Tensor:
x = x[:, 0]
x = self.linear(x)
return x
@LAYERS.register_module
class ViTHead(ParallelLayer):
"""Output layer for 1D parallel Vision Transformer
:param hidden_size: hidden size
:type hidden_size: int
:param num_classes: number of classes
:type num_classes: int
:param dtype: dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
"""
def __init__(self,
hidden_size,
num_classes,
dtype=None,
):
super().__init__()
self.linear = nn.Linear(
hidden_size,
num_classes,
dtype=dtype
)
self._broadcast_linear_params()
def _broadcast_linear_params(self) -> None:
self.to(get_current_device())
ranks = gpc.get_ranks_in_group(ParallelMode.PARALLEL_1D)
dist.broadcast(self.linear.weight, src=ranks[0],
group=gpc.get_group(ParallelMode.PARALLEL_1D))
dist.broadcast(self.linear.bias, src=ranks[0],
group=gpc.get_group(ParallelMode.PARALLEL_1D))
def forward(self, x: Tensor) -> Tensor:
x = x[:, 0]
x = self.linear(x)
return x
@LAYERS.register_module
class ViTPatchEmbedding1D(ParallelLayer):
""" 2D Image to Patch Embedding
:param img_size: iamge size
:type img_size: int
:param patch_size: patch size
:type patch_size: int
:param embed_dim: dimension of embedding
:type embed_dim: int
:param in_chans: number of channels of input image, defaults to 3
:type in_chans: int, optional
:param flatten: whether to flatten output tensor, defaults to True
:type flatten: bool, optional
"""
def __init__(self,
img_size,
patch_size,
embed_dim,
in_chans=3,
flatten=True,
weight_init='torch'):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0],
img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans,
self.embed_dim,
kernel_size=patch_size,
stride=patch_size
)
if weight_init == 'jax':
fan_in, _ = _calculate_fan_in_and_fan_out(self.proj.weight)
std = math.sqrt(1.0 / fan_in)
nn.init.trunc_normal_(self.proj.weight, std=std / .87962566103423978)
nn.init.zeros_(self.proj.bias)
# sync
self._broadcast_conv_params()
def _broadcast_conv_params(self) -> None:
self.to(get_current_device())
ranks = gpc.get_ranks_in_group(ParallelMode.PARALLEL_1D)
dist.broadcast(self.proj.weight, src=ranks[0],
group=gpc.get_group(ParallelMode.PARALLEL_1D))
dist.broadcast(self.proj.bias, src=ranks[0],
group=gpc.get_group(ParallelMode.PARALLEL_1D))
def forward(self, x: Tensor) -> Tensor:
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
return x
@LAYERS.register_module
class ViTTokenFuser1D(ParallelLayer):
"""
Fuse cls token and pos embedding to the input
:param img_size: image size
:type img_size: int
:param patch_size: patch size
:type patch_size: int
:param embed_dim: dimension of embedding
:type embed_dim: int
:param drop_rate: dropout probability, defaults to 0.
:type drop_rate: float, optional
"""
def __init__(self,
img_size,
patch_size,
embed_dim,
drop_rate=0.
):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0],
img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.embed_dim = embed_dim
self.cls_token = nn.Parameter(torch.zeros(
1, 1, self.embed_dim))
self.pos_embed = nn.Parameter(torch.empty(
1, self.num_patches + 1, self.embed_dim))
nn.init.trunc_normal_(self.pos_embed, std=.02)
# move to cuda before broadcast
self.to(get_current_device())
dist.broadcast(self.pos_embed,
src=gpc.get_ranks_in_group(ParallelMode.TENSOR)[0],
group=gpc.get_group(ParallelMode.TENSOR))
self.pos_drop = nn.Dropout(p=drop_rate)
def forward(self, x: Tensor) -> Tensor:
cls_token = self.cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_token, x), dim=1)
x = self.pos_drop(x + self.pos_embed)
return x.contiguous()
| [
"colossalai.utils.get_current_device",
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_ranks_in_group",
"colossalai.nn.layer._common_utils.divide",
"colossalai.utils.checkpoint",
"colossalai.context.seed"
] | [((2805, 2829), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_prob'], {}), '(dropout_prob)\n', (2815, 2829), True, 'from torch import nn as nn, Tensor, distributed as dist\n'), ((3509, 3549), 'colossalai.utils.checkpoint', 'checkpoint', (['self._forward', 'hidden_states'], {}), '(self._forward, hidden_states)\n', (3519, 3549), False, 'from colossalai.utils import checkpoint\n'), ((4906, 4946), 'colossalai.nn.layer._common_utils.divide', 'divide', (['hidden_size', 'num_attention_heads'], {}), '(hidden_size, num_attention_heads)\n', (4912, 4946), False, 'from colossalai.nn.layer._common_utils import divide, ACT2FN\n'), ((4996, 5049), 'colossalai.nn.layer._common_utils.divide', 'divide', (['num_attention_heads', 'gpc.tensor_parallel_size'], {}), '(num_attention_heads, gpc.tensor_parallel_size)\n', (5002, 5049), False, 'from colossalai.nn.layer._common_utils import divide, ACT2FN\n'), ((5091, 5136), 'colossalai.nn.layer._common_utils.divide', 'divide', (['hidden_size', 'gpc.tensor_parallel_size'], {}), '(hidden_size, gpc.tensor_parallel_size)\n', (5097, 5136), False, 'from colossalai.nn.layer._common_utils import divide, ACT2FN\n'), ((5573, 5607), 'torch.nn.Dropout', 'nn.Dropout', (['attention_dropout_prob'], {}), '(attention_dropout_prob)\n', (5583, 5607), True, 'from torch import nn as nn, Tensor, distributed as dist\n'), ((5841, 5872), 'torch.nn.Dropout', 'nn.Dropout', (['hidden_dropout_prob'], {}), '(hidden_dropout_prob)\n', (5851, 5872), True, 'from torch import nn as nn, Tensor, distributed as dist\n'), ((5896, 5914), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (5906, 5914), True, 'from torch import nn as nn, Tensor, distributed as dist\n'), ((6345, 6384), 'torch.chunk', 'torch.chunk', (['query_key_value', '(3)'], {'dim': '(-1)'}), '(query_key_value, 3, dim=-1)\n', (6356, 6384), False, 'import torch\n'), ((6784, 6826), 'torch.matmul', 'torch.matmul', (['attention_probs', 'value_layer'], {}), '(attention_probs, value_layer)\n', (6796, 6826), False, 'import torch\n'), ((7249, 7289), 'colossalai.utils.checkpoint', 'checkpoint', (['self._forward', 'hidden_states'], {}), '(self._forward, hidden_states)\n', (7259, 7289), False, 'from colossalai.utils import checkpoint\n'), ((9136, 9184), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_classes'], {'dtype': 'dtype'}), '(hidden_size, num_classes, dtype=dtype)\n', (9145, 9184), True, 'from torch import nn as nn, Tensor, distributed as dist\n'), ((9374, 9422), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (9396, 9422), True, 'from colossalai.core import global_context as gpc\n'), ((10944, 11022), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_chans', 'self.embed_dim'], {'kernel_size': 'patch_size', 'stride': 'patch_size'}), '(in_chans, self.embed_dim, kernel_size=patch_size, stride=patch_size)\n', (10953, 11022), True, 'from torch import nn as nn, Tensor, distributed as dist\n'), ((11572, 11620), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (11594, 11620), True, 'from colossalai.core import global_context as gpc\n'), ((13402, 13449), 'torch.nn.init.trunc_normal_', 'nn.init.trunc_normal_', (['self.pos_embed'], {'std': '(0.02)'}), '(self.pos_embed, std=0.02)\n', (13423, 13449), True, 'from torch import nn as nn, Tensor, distributed as dist\n'), ((13731, 13754), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'drop_rate'}), '(p=drop_rate)\n', (13741, 13754), True, 'from torch import nn as nn, Tensor, distributed as dist\n'), ((13874, 13906), 'torch.cat', 'torch.cat', (['(cls_token, x)'], {'dim': '(1)'}), '((cls_token, x), dim=1)\n', (13883, 13906), False, 'import torch\n'), ((3219, 3244), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (3223, 3244), False, 'from colossalai.context import seed, ParallelMode\n'), ((6554, 6589), 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), '(self.attention_head_size)\n', (6563, 6589), False, 'import math\n'), ((6662, 6687), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (6666, 6687), False, 'from colossalai.context import seed, ParallelMode\n'), ((9336, 9356), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (9354, 9356), False, 'from colossalai.utils import get_current_device\n'), ((11202, 11249), 'torch.nn.init._calculate_fan_in_and_fan_out', '_calculate_fan_in_and_fan_out', (['self.proj.weight'], {}), '(self.proj.weight)\n', (11231, 11249), False, 'from torch.nn.init import _calculate_fan_in_and_fan_out\n'), ((11268, 11291), 'math.sqrt', 'math.sqrt', (['(1.0 / fan_in)'], {}), '(1.0 / fan_in)\n', (11277, 11291), False, 'import math\n'), ((11304, 11373), 'torch.nn.init.trunc_normal_', 'nn.init.trunc_normal_', (['self.proj.weight'], {'std': '(std / 0.8796256610342398)'}), '(self.proj.weight, std=std / 0.8796256610342398)\n', (11325, 11373), True, 'from torch import nn as nn, Tensor, distributed as dist\n'), ((11386, 11416), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.proj.bias'], {}), '(self.proj.bias)\n', (11400, 11416), True, 'from torch import nn as nn, Tensor, distributed as dist\n'), ((11534, 11554), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (11552, 11554), False, 'from colossalai.utils import get_current_device\n'), ((13241, 13274), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', 'self.embed_dim'], {}), '(1, 1, self.embed_dim)\n', (13252, 13274), False, 'import torch\n'), ((13327, 13379), 'torch.empty', 'torch.empty', (['(1)', '(self.num_patches + 1)', 'self.embed_dim'], {}), '(1, self.num_patches + 1, self.embed_dim)\n', (13338, 13379), False, 'import torch\n'), ((13506, 13526), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (13524, 13526), False, 'from colossalai.utils import get_current_device\n'), ((9510, 9549), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (9523, 9549), True, 'from colossalai.core import global_context as gpc\n'), ((9635, 9674), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (9648, 9674), True, 'from colossalai.core import global_context as gpc\n'), ((11706, 11745), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (11719, 11745), True, 'from colossalai.core import global_context as gpc\n'), ((11829, 11868), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (11842, 11868), True, 'from colossalai.core import global_context as gpc\n'), ((13671, 13705), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (13684, 13705), True, 'from colossalai.core import global_context as gpc\n'), ((13594, 13637), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (13616, 13637), True, 'from colossalai.core import global_context as gpc\n')] |
from colossalai.context.parallel_mode import ParallelMode
import torch
import torch.nn as nn
import inspect
from .layers import Embedding, BertLayer, BertDualHead, PreProcessor, VocabEmbedding
from .layers.init_method import init_normal, output_init_normal
from colossalai.core import global_context as gpc
from colossalai.context import ParallelMode
from colossalai.kernel import LayerNorm
from colossalai.nn.layer.wrapper import PipelineSharedModuleWrapper
from colossalai.logging import get_dist_logger
from colossalai.builder.pipeline import partition_uniform
class BertForPretrain(nn.Module):
def __init__(self,
vocab_size,
hidden_size,
max_sequence_length,
num_attention_heads,
num_layers,
add_binary_head,
is_naive_fp16,
num_tokentypes=2,
dropout_prob=0.1,
mlp_ratio=4,
init_std=0.02,
convert_fp16_to_fp32_in_softmax=False,
):
super().__init__()
self.seq_parallel_size = gpc.get_world_size(ParallelMode.SEQUENCE)
assert max_sequence_length % self.seq_parallel_size == 0, 'sequence length is not divisible by the sequence parallel size'
self.sub_seq_length = max_sequence_length // self.seq_parallel_size
self.init_std = init_std
self.num_layers = num_layers
if not add_binary_head:
num_tokentypes = 0
self.preprocessor = PreProcessor(self.sub_seq_length)
self.embedding = Embedding(hidden_size=hidden_size,
vocab_size=vocab_size,
max_sequence_length=max_sequence_length,
embedding_dropout_prob=dropout_prob,
num_tokentypes=num_tokentypes)
self.bert_layers = nn.ModuleList()
for i in range(num_layers):
bert_layer = BertLayer(layer_number=i+1,
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
attention_dropout=dropout_prob,
mlp_ratio=mlp_ratio,
hidden_dropout=dropout_prob,
convert_fp16_to_fp32_in_softmax=convert_fp16_to_fp32_in_softmax,
is_naive_fp16=is_naive_fp16
)
self.bert_layers.append(bert_layer)
self.layer_norm = LayerNorm(hidden_size)
self.head = BertDualHead(hidden_size, self.embedding.word_embedding_weight.size(0),
add_binary_head=add_binary_head)
self.reset_parameters()
def _init_normal(self, tensor):
init_normal(tensor, sigma=self.init_std)
def _output_init_normal(self, tensor):
output_init_normal(tensor, sigma=self.init_std, num_layers=self.num_layers)
def reset_parameters(self):
# initialize embedding
self._init_normal(self.embedding.word_embedding_weight)
self._init_normal(self.embedding.position_embeddings.weight)
if self.embedding.tokentype_embeddings:
self._init_normal(self.embedding.tokentype_embeddings.weight)
# initialize bert layer
for layer in self.bert_layers:
# initialize self attention
self._init_normal(layer.self_attention.query_key_value.weight)
self._output_init_normal(layer.self_attention.dense.weight)
self._init_normal(layer.mlp.dense_h_to_4h.weight)
self._output_init_normal(layer.mlp.dense_4h_to_h.weight)
# initializer head
self._init_normal(self.head.lm_head.dense.weight)
if self.head.binary_head is not None:
self._init_normal(self.head.binary_head.pooler.dense.weight)
self._init_normal(self.head.binary_head.dense.weight)
def forward(self, input_ids, attention_masks, tokentype_ids, lm_labels):
# inputs of the forward function
# input_ids: [batch_size, sub_seq_len]
# attention_mask: [batch_size, seq_len]
# tokentype_ids: [batch_size, sub_seq_len]
# outputs of preprocessor
# pos_ids: [batch_size, sub_seq_len]
# attention_masks: [batch_size, 1, sub_seq_len, seq_len]
pos_ids, attention_masks = self.preprocessor(input_ids, attention_masks)
hidden_states = self.embedding(input_ids, pos_ids, tokentype_ids)
# hidden_states shape change:
# [batch_size, sub_seq_len, hidden_size] -> [sub_seq_len, batch_size, hidden_size]
hidden_states = hidden_states.transpose(0, 1).contiguous()
for idx, layer in enumerate(self.bert_layers):
hidden_states = layer(hidden_states, attention_masks)
hidden_states = hidden_states.transpose(0, 1).contiguous()
output = self.layer_norm(hidden_states)
# hidden_states: [sub_seq_len, batch_size, hidden_size]
# word_embedding: [vocab_size, hidden_size]
return self.head(output, self.embedding.word_embedding_weight, lm_labels)
class PipelineBertForPretrain(nn.Module):
def __init__(self,
vocab_size,
hidden_size,
max_sequence_length,
num_attention_heads,
num_layers,
add_binary_head,
is_naive_fp16,
num_tokentypes=2,
dropout_prob=0.1,
mlp_ratio=4,
init_std=0.02,
convert_fp16_to_fp32_in_softmax=False,
first_stage=True,
last_stage=True,
start_idx=None,
end_idx=None):
super().__init__()
self.seq_parallel_size = gpc.get_world_size(ParallelMode.SEQUENCE)
assert max_sequence_length % self.seq_parallel_size == 0, 'sequence length is not divisible by the sequence parallel size'
self.sub_seq_length = max_sequence_length // self.seq_parallel_size
self.init_std = init_std
self.num_layers = num_layers
if not add_binary_head:
num_tokentypes = 0
self.first_stage = first_stage
self.last_stage = last_stage
self.preprocessor = PreProcessor(self.sub_seq_length)
if self.first_stage:
self.embedding = Embedding(hidden_size=hidden_size,
vocab_size=vocab_size,
max_sequence_length=max_sequence_length,
embedding_dropout_prob=dropout_prob,
num_tokentypes=num_tokentypes)
# transformer layers
self.bert_layers = nn.ModuleList()
if start_idx is None and end_idx is None:
start_idx = 0
end_idx = num_layers
for i in range(start_idx, end_idx):
bert_layer = BertLayer(layer_number=i+1,
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
attention_dropout=dropout_prob,
mlp_ratio=mlp_ratio,
hidden_dropout=dropout_prob,
convert_fp16_to_fp32_in_softmax=convert_fp16_to_fp32_in_softmax,
is_naive_fp16=is_naive_fp16
)
self.bert_layers.append(bert_layer)
if self.last_stage:
self.word_embeddings = VocabEmbedding(vocab_size, hidden_size)
self.layer_norm = LayerNorm(hidden_size)
self.head = BertDualHead(hidden_size, vocab_size,
add_binary_head=add_binary_head)
self.reset_parameters()
def _init_normal(self, tensor):
init_normal(tensor, sigma=self.init_std)
def _output_init_normal(self, tensor):
output_init_normal(tensor, sigma=self.init_std, num_layers=self.num_layers)
def reset_parameters(self):
# initialize embedding
if self.first_stage:
self._init_normal(self.embedding.word_embedding_weight)
self._init_normal(self.embedding.position_embeddings.weight)
if self.embedding.tokentype_embeddings:
self._init_normal(self.embedding.tokentype_embeddings.weight)
# initialize bert layer
for layer in self.bert_layers:
# initialize self attention
self._init_normal(layer.self_attention.query_key_value.weight)
self._output_init_normal(layer.self_attention.dense.weight)
self._init_normal(layer.mlp.dense_h_to_4h.weight)
self._output_init_normal(layer.mlp.dense_4h_to_h.weight)
# initializer head
if self.last_stage:
self._init_normal(self.head.lm_head.dense.weight)
if self.head.binary_head is not None:
self._init_normal(self.head.binary_head.pooler.dense.weight)
self._init_normal(self.head.binary_head.dense.weight)
def forward(self, input_ids, attention_masks, tokentype_ids, lm_labels):
# inputs of the forward function
# input_ids: [batch_size, sub_seq_len]
# attention_mask: [batch_size, seq_len]
# tokentype_ids: [batch_size, sub_seq_len]
# outputs of preprocessor
# pos_ids: [batch_size, sub_seq_len]
# attention_masks: [batch_size, 1, sub_seq_len, seq_len]
if self.first_stage:
pos_ids, attention_masks = self.preprocessor(input_ids, attention_masks)
else:
_, attention_masks = self.preprocessor(None, attention_masks)
if self.first_stage:
hidden_states = self.embedding(input_ids, pos_ids, tokentype_ids)
hidden_states = hidden_states.transpose(0, 1).contiguous()
else:
hidden_states = input_ids
# hidden_states shape change:
# [batch_size, sub_seq_len, hidden_size] -> [sub_seq_len, batch_size, hidden_size]
for idx, layer in enumerate(self.bert_layers):
hidden_states = layer(hidden_states, attention_masks)
if self.last_stage:
hidden_states = hidden_states.transpose(0, 1).contiguous()
output = self.layer_norm(hidden_states)
output = self.head(output, self.word_embeddings.weight, lm_labels)
else:
output = hidden_states
# hidden_states: [sub_seq_len, batch_size, hidden_size]
# word_embedding: [vocab_size, hidden_size]
return output
def _filter_kwargs(func, kwargs):
sig = inspect.signature(func)
return {k: v for k, v in kwargs.items() if k in sig.parameters}
def build_pipeline_bert(num_layers, num_chunks, device=torch.device('cuda'), **kwargs):
logger = get_dist_logger()
pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE)
pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE)
rank = gpc.get_global_rank()
wrapper = PipelineSharedModuleWrapper([0, pipeline_size - 1])
parts = partition_uniform(num_layers, pipeline_size, num_chunks)[pipeline_rank]
models = []
for start, end in parts:
kwargs['num_layers'] = num_layers
kwargs['start_idx'] = start
kwargs['end_idx'] = end
kwargs['first_stage'] = start == 0
kwargs['last_stage'] = end == num_layers
logger.info(f'Rank{rank} build layer {start}-{end}, {end-start}/{num_layers} layers')
chunk = PipelineBertForPretrain(**_filter_kwargs(PipelineBertForPretrain.__init__, kwargs)).to(device)
if start == 0:
wrapper.register_module(chunk.embedding.word_embeddings)
elif end == num_layers:
wrapper.register_module(chunk.word_embeddings)
models.append(chunk)
if len(models) == 1:
model = models[0]
else:
model = nn.ModuleList(models)
return model
| [
"colossalai.core.global_context.get_local_rank",
"colossalai.logging.get_dist_logger",
"colossalai.nn.layer.wrapper.PipelineSharedModuleWrapper",
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.get_global_rank",
"colossalai.kernel.LayerNorm",
"colossalai.builder.pipeline... | [((10836, 10859), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (10853, 10859), False, 'import inspect\n'), ((10985, 11005), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (10997, 11005), False, 'import torch\n'), ((11031, 11048), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (11046, 11048), False, 'from colossalai.logging import get_dist_logger\n'), ((11069, 11110), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (11087, 11110), True, 'from colossalai.core import global_context as gpc\n'), ((11131, 11172), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (11149, 11172), True, 'from colossalai.core import global_context as gpc\n'), ((11184, 11205), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (11203, 11205), True, 'from colossalai.core import global_context as gpc\n'), ((11220, 11271), 'colossalai.nn.layer.wrapper.PipelineSharedModuleWrapper', 'PipelineSharedModuleWrapper', (['[0, pipeline_size - 1]'], {}), '([0, pipeline_size - 1])\n', (11247, 11271), False, 'from colossalai.nn.layer.wrapper import PipelineSharedModuleWrapper\n'), ((1122, 1163), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.SEQUENCE'], {}), '(ParallelMode.SEQUENCE)\n', (1140, 1163), True, 'from colossalai.core import global_context as gpc\n'), ((1927, 1942), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1940, 1942), True, 'import torch.nn as nn\n'), ((2631, 2653), 'colossalai.kernel.LayerNorm', 'LayerNorm', (['hidden_size'], {}), '(hidden_size)\n', (2640, 2653), False, 'from colossalai.kernel import LayerNorm\n'), ((5917, 5958), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.SEQUENCE'], {}), '(ParallelMode.SEQUENCE)\n', (5935, 5958), True, 'from colossalai.core import global_context as gpc\n'), ((6879, 6894), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6892, 6894), True, 'import torch.nn as nn\n'), ((11284, 11340), 'colossalai.builder.pipeline.partition_uniform', 'partition_uniform', (['num_layers', 'pipeline_size', 'num_chunks'], {}), '(num_layers, pipeline_size, num_chunks)\n', (11301, 11340), False, 'from colossalai.builder.pipeline import partition_uniform\n'), ((12097, 12118), 'torch.nn.ModuleList', 'nn.ModuleList', (['models'], {}), '(models)\n', (12110, 12118), True, 'import torch.nn as nn\n'), ((7808, 7830), 'colossalai.kernel.LayerNorm', 'LayerNorm', (['hidden_size'], {}), '(hidden_size)\n', (7817, 7830), False, 'from colossalai.kernel import LayerNorm\n')] |
import torch
from typing import Union
from colossalai.tensor.op_wrapper import colo_op_impl
from colossalai.nn.layer.parallel_1d._utils import split_forward_gather_backward, reduce_input, reduce_grad
from colossalai.nn.layer.utils import divide
from colossalai.core import global_context as gpc
from colossalai.tensor import ComputePattern, TensorSpec, ComputePattern, ParallelAction, ColoTensor
from colossalai.tensor.graph import GraphOpNode, GraphGlobalEnv
from colossalai.tensor import dist_spec
def colo_addmm_1Drow(input_tensor: ColoTensor, mat1: ColoTensor, mat2: ColoTensor, beta: Union[int, float],
alpha: Union[int, float]) -> ColoTensor:
parallel_action = mat2.spec.get_action_by_compute_pattern(ComputePattern.TP1DRow)
# mat1:S[1] x mat2:S[0] = Output:P
# beta * input + alpha * All-Reduce(Output) = res
mat1.to_dist_spec(dist_spec.shard(mat2.spec.get_process_group(), [-1], [mat2.spec.get_process_group().size()]))
# Output:P
partial_output = torch.mm(mat1.torch_tensor(), mat2.torch_tensor())
# Reduce(Output)
output = reduce_input(partial_output, parallel_action.parallel_mode)
# input
assert not input_tensor.has_spec(), 'Invalid input spec for 1Drow addmm op'
output = beta * input_tensor.torch_tensor() + alpha * output
output = ColoTensor.init_from_torch_tensor(output,
spec=TensorSpec(dist_spec.replicate(mat2.spec.get_process_group())))
return output
def colo_addmm_1Dcol(input_tensor: ColoTensor, mat1: ColoTensor, mat2: ColoTensor, beta: Union[int, float],
alpha: Union[int, float]) -> ColoTensor:
# mat1:B x mat2:S[1] + input:S[1] = Output:S[1]
parallel_action = mat2.spec.get_action_by_compute_pattern(ComputePattern.TP1DCol)
mat1.to_dist_spec(dist_spec.replicate(mat2.spec.get_process_group()))
mat1_torch_tensor = reduce_grad(mat1.torch_tensor(), parallel_action.parallel_mode)
output_parallel = torch.addmm(input_tensor.torch_tensor(),
mat1_torch_tensor,
mat2.torch_tensor(),
beta=beta,
alpha=alpha)
output_spec = TensorSpec(
dist_spec.shard(mat2.spec.get_process_group(), [-1], [mat2.spec.get_process_group().size()]),
[ParallelAction(priority=1, parallel_mode=parallel_action.parallel_mode)])
output = ColoTensor.init_from_torch_tensor(output_parallel, spec=output_spec)
if parallel_action.gather_out:
# All-Gather(Output)
output.to_dist_spec(dist_spec.replicate(mat2.spec.get_process_group()))
return output
@colo_op_impl(torch.addmm)
def colo_addmm(types, args, kwargs, pg):
"""Handles ``__torch_function__`` dispatch for ``torch.nn.functional.linear``.
This method computes a linear.
"""
input_tensor, mat1, mat2 = args[:3]
to_colo_tensor = lambda t: t if isinstance(t, ColoTensor) else ColoTensor.init_from_torch_tensor(t)
input_tensor = to_colo_tensor(input_tensor)
mat2 = to_colo_tensor(mat2)
beta = kwargs.get('beta', 1) if kwargs else 1
alpha = kwargs.get('alpha', 1) if kwargs else 1
# building the computing graph, inputs -> op
# if GraphGlobalEnv().graph_building:
# cur_op_node = GraphOpNode('linear', [weight, bias])
# cur_op_node.add_prev_tensor(input_tensor)
# Add communication logic before and after linear call.
ret_tensor = None
if not mat2.has_spec(): # No Model Parallel Applied
assert not input_tensor.has_spec(), 'Invalid input spec for native addmm op'
ret_tensor = ColoTensor.init_from_torch_tensor(
torch.addbmm(input_tensor.torch_tensor(), mat1, mat2.torch_tensor(), beta=beta, alpha=alpha))
elif mat2.spec.num_action == 1: # Single Model Parallel Applied
spec = TensorSpec(dist_spec.replicate(mat2.spec.get_process_group()))
mat1 = args[1] if isinstance(args[1], ColoTensor) else ColoTensor.init_from_torch_tensor(args[1], spec=spec)
compute_patterns = mat2.spec.compute_patterns
if ComputePattern.TP1DRow in compute_patterns:
ret_tensor = colo_addmm_1Drow(input_tensor, mat1, mat2, beta, alpha)
elif ComputePattern.TP1DCol in compute_patterns:
ret_tensor = colo_addmm_1Dcol(input_tensor, mat1, mat2, beta, alpha)
else:
raise NotImplementedError
else:
raise NotImplementedError
# building the computing graph, op -> output
# if GraphGlobalEnv().graph_building:
# cur_op_node.add_post_tensor(ret_tensor)
return ret_tensor
| [
"colossalai.tensor.ColoTensor.init_from_torch_tensor",
"colossalai.tensor.op_wrapper.colo_op_impl",
"colossalai.tensor.ParallelAction",
"colossalai.nn.layer.parallel_1d._utils.reduce_input"
] | [((2694, 2719), 'colossalai.tensor.op_wrapper.colo_op_impl', 'colo_op_impl', (['torch.addmm'], {}), '(torch.addmm)\n', (2706, 2719), False, 'from colossalai.tensor.op_wrapper import colo_op_impl\n'), ((1090, 1149), 'colossalai.nn.layer.parallel_1d._utils.reduce_input', 'reduce_input', (['partial_output', 'parallel_action.parallel_mode'], {}), '(partial_output, parallel_action.parallel_mode)\n', (1102, 1149), False, 'from colossalai.nn.layer.parallel_1d._utils import split_forward_gather_backward, reduce_input, reduce_grad\n'), ((2460, 2528), 'colossalai.tensor.ColoTensor.init_from_torch_tensor', 'ColoTensor.init_from_torch_tensor', (['output_parallel'], {'spec': 'output_spec'}), '(output_parallel, spec=output_spec)\n', (2493, 2528), False, 'from colossalai.tensor import ComputePattern, TensorSpec, ComputePattern, ParallelAction, ColoTensor\n'), ((2373, 2444), 'colossalai.tensor.ParallelAction', 'ParallelAction', ([], {'priority': '(1)', 'parallel_mode': 'parallel_action.parallel_mode'}), '(priority=1, parallel_mode=parallel_action.parallel_mode)\n', (2387, 2444), False, 'from colossalai.tensor import ComputePattern, TensorSpec, ComputePattern, ParallelAction, ColoTensor\n'), ((2994, 3030), 'colossalai.tensor.ColoTensor.init_from_torch_tensor', 'ColoTensor.init_from_torch_tensor', (['t'], {}), '(t)\n', (3027, 3030), False, 'from colossalai.tensor import ComputePattern, TensorSpec, ComputePattern, ParallelAction, ColoTensor\n'), ((4020, 4073), 'colossalai.tensor.ColoTensor.init_from_torch_tensor', 'ColoTensor.init_from_torch_tensor', (['args[1]'], {'spec': 'spec'}), '(args[1], spec=spec)\n', (4053, 4073), False, 'from colossalai.tensor import ComputePattern, TensorSpec, ComputePattern, ParallelAction, ColoTensor\n')] |
from typing import Any, Optional, Tuple
import torch
import torch.distributed as dist
from colossalai.communication.collective import (all_gather, all_reduce, reduce, reduce_scatter)
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device
from torch import Tensor
from torch.cuda.amp import custom_bwd, custom_fwd
from colossalai.global_variables import tensor_parallel_env as env
def matmul_2d(
a,
b,
summa_dim,
out_shape,
row_rank=None,
col_rank=None,
row_parallel_mode=ParallelMode.PARALLEL_2D_ROW,
col_parallel_mode=ParallelMode.PARALLEL_2D_COL,
):
r"""Matrix multiplication for 2D parallelism.
Args:
a (:class:`torch.tensor`): matrix :math:`A`.
b (:class:`torch.tensor`): matrix :math:`B`.
summa_dim (int): dimension of SUMMA fo 2D parallelism.
out_shape (:class:`torch.size`): shape of output tensor.
row_rank (int, optional): the rank of row, defaults to None.
col_rank (int, optional): the rank of column, defaults to None.
row_parallel_mode (:class:`colossalai.context.ParallelMode`, optional):
row parallel mode, defaults to ParallelMode.PARALLEL_2D_ROW.
col_parallel_mode (:class:`colossalai.context.ParallelMode`, optional):
column parallel mode, defaults to ParallelMode.PARALLEL_2D_COL.
Returns:
:class:`torch.tensor`: :math:`C = AB`.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
if row_rank is None:
row_rank = gpc.get_local_rank(col_parallel_mode)
if col_rank is None:
col_rank = gpc.get_local_rank(row_parallel_mode)
data_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA)
pipeline_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank(
ParallelMode.PIPELINE)
pipeline_parallel_size = 1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size(
ParallelMode.PIPELINE)
tensor_parallel_size = summa_dim**2
return Matmul_AB_2D(a, b, summa_dim, out_shape, row_rank, col_rank, row_parallel_mode, col_parallel_mode,
data_parallel_rank, pipeline_parallel_rank, pipeline_parallel_size, tensor_parallel_size)
class _Classifier2D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(
ctx: Any,
A: Tensor,
B: Tensor,
bias: Optional[Tensor],
summa_dim: int,
out_shape: Tuple[int, ...],
row_rank: int,
col_rank: int,
row_parallel_mode: ParallelMode,
col_parallel_mode: ParallelMode,
data_parallel_rank: int,
pipeline_parallel_rank: int,
pipeline_parallel_size: int,
tensor_parallel_size: int,
) -> Tensor:
A_shape = A.shape
A = A.reshape((-1, A_shape[-1]))
B_shape = B.shape
B = B.reshape((-1, B_shape[-1]))
B_temp = all_gather(B, -1, col_parallel_mode)
if ctx:
ctx.save_for_backward(A, B_temp)
C = torch.matmul(A, B_temp.transpose(0, 1))
C = all_reduce(C, row_parallel_mode)
ctx.use_bias = bias is not None
if bias is not None:
C = C + bias
out = C.reshape(out_shape)
if ctx:
ctx.summa_dim = summa_dim
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.row_parallel_mode = row_parallel_mode
ctx.col_parallel_mode = col_parallel_mode
ctx.A_shape = A_shape
ctx.B_shape = B_shape
ctx.data_parallel_rank = data_parallel_rank
ctx.pipeline_parallel_rank = pipeline_parallel_rank
ctx.pipeline_parallel_size = pipeline_parallel_size
ctx.tensor_parallel_size = tensor_parallel_size
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = torch.matmul(output_grad, B)
A_grad = A_grad.reshape(ctx.A_shape)
B_grad = torch.matmul(output_grad.reshape(-1, output_grad.shape[-1]).transpose(0, 1), A)
B_grad = reduce_scatter(B_grad, -1, ctx.col_parallel_mode)
B_grad = B_grad.reshape(ctx.B_shape)
if ctx.use_bias:
bias_grad = torch.sum(output_grad, dim=tuple(range(output_grad.ndim - 1)))
bias_grad = all_reduce(bias_grad, ctx.col_parallel_mode)
else:
bias_grad = None
return A_grad, B_grad, bias_grad, None, None, None, None, None, None, None, None, None, None
def classifier_2d(A: Tensor, B: Tensor, bias: Optional[Tensor], summa_dim: int, out_shape: Tuple[int, ...],
row_rank: int, col_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode,
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
tensor_parallel_size: int) -> Tensor:
r"""2D parallel classifier.
Args:
A (:class:`torch.tensor`): matrix :math:`A`.
B (:class:`torch.tensor`): matrix :math:`B`.
bias (:class:`torch.tensor`, optional): matrix of bias.
summa_dim (int): dimension of SUMMA fo 2D parallelism.
out_shape (:class:`torch.size`): shape of output tensor.
row_rank (int, optional): the rank of row, defaults to None.
col_rank (int, optional): the rank of column, defaults to None.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
data_parallel_rank (int): data parallel rank.
pipeline_parallel_rank (int): pipeline parallel rank
pipeline_parallel_size (int): pipeline parallel size.
tensor_parallel_size (int): tensor parallel size.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _Classifier2D.apply(A, B, bias, summa_dim, out_shape, row_rank, col_rank, row_parallel_mode,
col_parallel_mode, data_parallel_rank, pipeline_parallel_rank, pipeline_parallel_size,
tensor_parallel_size)
class Matmul_AB_2D(torch.autograd.Function):
r"""Matrix multiplication for :math:`C = AB`.
Args:
A (:class:`torch.tensor`): matrix :math:`A`.
B (:class:`torch.tensor`): matrix :math:`B`.
summa_dim (int): dimension of SUMMA fo 2D parallelism.
out_shape (:class:`torch.size`): shape of output tensor.
row_rank (int, optional): the rank of row, defaults to None.
col_rank (int, optional): the rank of column, defaults to None.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
data_parallel_rank (int): data parallel rank.
pipeline_parallel_rank (int): pipeline parallel rank
pipeline_parallel_size (int): pipeline parallel size.
tensor_parallel_size (int): tensor parallel size.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(
ctx: Any,
A: Tensor,
B: Tensor,
summa_dim: int,
out_shape: Tuple[int, ...],
row_rank: int,
col_rank: int,
row_parallel_mode: ParallelMode,
col_parallel_mode: ParallelMode,
data_parallel_rank: int,
pipeline_parallel_rank: int,
pipeline_parallel_size: int,
tensor_parallel_size: int,
) -> Tensor:
# A: [b / q, s, h / q] -> [(b * s) / q, h / q]
# B: [h / q, s / q]
# C: [b / q, s, s / q] -> [(b * s) / q, s / q]
assert A.shape[-1] == B.shape[-2], \
'Invalid shapes: A={}, B={} for AB.'.format(A.shape, B.shape)
if ctx:
ctx.save_for_backward(A, B)
A_shape = A.shape
A = A.reshape((-1, A_shape[-1]))
B_shape = B.shape
B = B.reshape((-1, B_shape[-1]))
C_shape = (A.shape[0], B.shape[-1])
C = torch.zeros(C_shape, dtype=A.dtype, device=get_current_device())
# use circular buffer to store the communication tensor
# 2 is enough for all cases
A_list = [torch.empty_like(A) for _ in range(2)]
B_list = [torch.empty_like(B) for _ in range(2)]
row_group = gpc.get_group(row_parallel_mode)
col_group = gpc.get_group(col_parallel_mode)
src_a = summa_dim * row_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
src_b = col_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
opa = [None] * 2
opb = [None] * 2
A_list[0].copy_(A)
B_list[0].copy_(B)
opa[0] = dist.broadcast(A_list[0], src=src_a, group=row_group, async_op=True)
opb[0] = dist.broadcast(B_list[0], src=src_b, group=col_group, async_op=True)
cur = 0
for i in range(summa_dim):
if i != summa_dim - 1:
A_list[1 - cur].copy_(A)
opa[1 - cur] = dist.broadcast(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True)
B_list[1 - cur].copy_(B)
opb[1 - cur] = dist.broadcast(B_list[1 - cur], src=src_b + summa_dim, group=col_group, async_op=True)
if opa[cur] is not None:
opa[cur].wait()
if opb[cur] is not None:
opb[cur].wait()
torch.addmm(C, A_list[cur], B_list[cur], out=C)
cur = 1 - cur
src_a += 1
src_b += summa_dim
out = C.reshape(out_shape)
if ctx:
ctx.summa_dim = summa_dim
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.row_parallel_mode = row_parallel_mode
ctx.col_parallel_mode = col_parallel_mode
ctx.A_shape = A_shape
ctx.B_shape = B_shape
ctx.data_parallel_rank = data_parallel_rank
ctx.pipeline_parallel_rank = pipeline_parallel_rank
ctx.pipeline_parallel_size = pipeline_parallel_size
ctx.tensor_parallel_size = tensor_parallel_size
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_2D.apply(output_grad, B, ctx.summa_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank,
ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank,
ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size,
ctx.tensor_parallel_size)
B_grad = Matmul_ATB_2D.apply(A, output_grad, ctx.summa_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank,
ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank,
ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size,
ctx.tensor_parallel_size)
return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None
class Matmul_ABT_2D(torch.autograd.Function):
r"""Matrix multiplication for :math:`C = AB^T`
Args:
A (:class:`torch.tensor`): matrix :math:`A`.
B (:class:`torch.tensor`): matrix :math:`B`.
summa_dim (int): dimension of SUMMA fo 2D parallelism.
out_shape (:class:`torch.size`): shape of output tensor.
row_rank (int, optional): the rank of row, defaults to None.
col_rank (int, optional): the rank of column, defaults to None.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
column parallel mode, defaults to ParallelMode.PARALLEL_2D_COL.
data_parallel_rank (int): data parallel rank.
pipeline_parallel_rank (int): pipeline parallel rank
pipeline_parallel_size (int): pipeline parallel size.
tensor_parallel_size (int): tensor parallel size.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(
ctx: Any,
A: Tensor,
B: Tensor,
summa_dim: int,
out_shape: Tuple[int, ...],
row_rank: int,
col_rank: int,
row_parallel_mode: ParallelMode,
col_parallel_mode: ParallelMode,
data_parallel_rank: int,
pipeline_parallel_rank: int,
pipeline_parallel_size: int,
tensor_parallel_size: int,
) -> Tensor:
assert A.shape[-1] == B.shape[-1], \
'Invalid shapes: A={}, B={} for ABT.'.format(A.shape, B.shape)
if ctx:
ctx.save_for_backward(A, B)
A_shape = A.shape
A = A.reshape((-1, A_shape[-1]))
B_shape = B.shape
B = B.reshape((-1, B_shape[-1]))
C_shape = (A.shape[0], B.shape[0])
C = torch.empty(C_shape, dtype=A.dtype, device=get_current_device())
# use circular buffer to store the communication tensor
# 2 is enough for all cases
B_list = [torch.empty_like(B) for _ in range(2)]
C_list = [torch.empty_like(C) for _ in range(2)]
row_group = gpc.get_group(row_parallel_mode)
col_group = gpc.get_group(col_parallel_mode)
src_b = col_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
src_c = summa_dim * row_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
opb = [None] * 2
opr = [None] * 2
B_list[0].copy_(B)
opb[0] = dist.broadcast(B_list[0], src=src_b, group=col_group, async_op=True)
cur = 0
for i in range(summa_dim):
if i != summa_dim - 1:
B_list[1 - cur].copy_(B)
opb[1 - cur] = dist.broadcast(B_list[1 - cur], src=src_b + summa_dim, group=col_group, async_op=True)
if opr[cur] is not None:
opr[cur].wait()
if i - 2 == col_rank:
C.copy_(C_list[cur])
if opb[cur] is not None:
opb[cur].wait()
torch.matmul(A, B_list[cur].transpose(0, 1), out=C_list[cur])
opr[cur] = dist.reduce(C_list[cur], dst=src_c, group=row_group, async_op=True)
cur = 1 - cur
src_b += summa_dim
src_c += 1
for op in opr:
op.wait()
if summa_dim - 2 == col_rank:
C.copy_(C_list[cur])
if summa_dim - 1 == col_rank:
C.copy_(C_list[1 - cur])
out = C.reshape(out_shape)
if ctx:
ctx.summa_dim = summa_dim
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.row_parallel_mode = row_parallel_mode
ctx.col_parallel_mode = col_parallel_mode
ctx.A_shape = A_shape
ctx.B_shape = B_shape
ctx.data_parallel_rank = data_parallel_rank
ctx.pipeline_parallel_rank = pipeline_parallel_rank
ctx.pipeline_parallel_size = pipeline_parallel_size
ctx.tensor_parallel_size = tensor_parallel_size
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_AB_2D.apply(output_grad, B, ctx.summa_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank,
ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank,
ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size,
ctx.tensor_parallel_size)
B_grad = Matmul_ATB_2D.apply(output_grad, A, ctx.summa_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank,
ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank,
ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size,
ctx.tensor_parallel_size)
return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None
class Matmul_ATB_2D(torch.autograd.Function):
r"""Matrix multiplication for :math:`C = A^TB`.
Args:
A (:class:`torch.tensor`): matrix :math:`A`.
B (:class:`torch.tensor`): matrix :math:`B`.
summa_dim (int): dimension of SUMMA fo 2D parallelism.
out_shape (:class:`torch.size`): shape of output tensor.
row_rank (int, optional): the rank of row, defaults to None.
col_rank (int, optional): the rank of column, defaults to None.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
data_parallel_rank (int): data parallel rank.
pipeline_parallel_rank (int): pipeline parallel rank
pipeline_parallel_size (int): pipeline parallel size.
tensor_parallel_size (int): tensor parallel size.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(
ctx: Any,
A: Tensor,
B: Tensor,
summa_dim: int,
out_shape: Tuple[int, ...],
row_rank: int,
col_rank: int,
row_parallel_mode: ParallelMode,
col_parallel_mode: ParallelMode,
data_parallel_rank: int,
pipeline_parallel_rank: int,
pipeline_parallel_size: int,
tensor_parallel_size: int,
) -> Tensor:
assert A.shape[-2] == B.shape[-2], \
'Invalid shapes: A={}, B={} for ATB.'.format(A.shape, B.shape)
if ctx:
ctx.save_for_backward(A, B)
A_shape = A.shape
A = A.reshape((-1, A_shape[-1]))
B_shape = B.shape
B = B.reshape((-1, B_shape[-1]))
C_shape = (A.shape[-1], B.shape[-1])
C = torch.empty(C_shape, dtype=A.dtype, device=get_current_device())
# use circular buffer to store the communication tensor
# 2 is enough for all cases
A_list = [torch.empty_like(A) for _ in range(2)]
C_list = [torch.empty_like(C) for _ in range(2)]
row_group = gpc.get_group(row_parallel_mode)
col_group = gpc.get_group(col_parallel_mode)
src_a = summa_dim * row_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
src_c = col_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
pipeline_parallel_rank * tensor_parallel_size
opa = [None] * 2
opr = [None] * 2
A_list[0].copy_(A)
opa[0] = dist.broadcast(A_list[0], src=src_a, group=row_group, async_op=True)
cur = 0
for i in range(summa_dim):
if i != summa_dim - 1:
A_list[1 - cur].copy_(A)
opa[1 - cur] = dist.broadcast(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True)
if opr[cur] is not None:
opr[cur].wait()
if i - 2 == row_rank:
C.copy_(C_list[cur])
if opa[cur] is not None:
opa[cur].wait()
torch.matmul(A_list[cur].transpose(0, 1), B, out=C_list[cur])
opr[cur] = dist.reduce(C_list[cur], dst=src_c, group=col_group, async_op=True)
cur = 1 - cur
src_a += 1
src_c += summa_dim
for op in opr:
op.wait()
if summa_dim - 2 == row_rank:
C.copy_(C_list[cur])
if summa_dim - 1 == row_rank:
C.copy_(C_list[1 - cur])
out = C.reshape(out_shape)
if ctx:
ctx.summa_dim = summa_dim
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.row_parallel_mode = row_parallel_mode
ctx.col_parallel_mode = col_parallel_mode
ctx.A_shape = A_shape
ctx.B_shape = B_shape
ctx.data_parallel_rank = data_parallel_rank
ctx.pipeline_parallel_rank = pipeline_parallel_rank
ctx.pipeline_parallel_size = pipeline_parallel_size
ctx.tensor_parallel_size = tensor_parallel_size
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_2D.apply(B, output_grad, ctx.summa_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank,
ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank,
ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size,
ctx.tensor_parallel_size)
B_grad = Matmul_AB_2D.apply(A, output_grad, ctx.summa_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank,
ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank,
ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size,
ctx.tensor_parallel_size)
return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None
class _Add_Bias_2D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(
ctx: Any,
input_: Tensor,
bias: Tensor,
output_size_per_partition: int,
row_rank: int,
col_rank: int,
row_parallel_mode: ParallelMode,
col_parallel_mode: ParallelMode,
skip_bias_add: bool,
data_parallel_rank: int,
pipeline_parallel_rank: int,
pipeline_parallel_size: int,
tensor_parallel_size: int,
) -> Tensor:
bias_temp = all_gather(bias, -1, col_parallel_mode)
ctx.row_rank = row_rank
ctx.col_rank = col_rank
ctx.row_parallel_mode = row_parallel_mode
ctx.col_parallel_mode = col_parallel_mode
ctx.bias = skip_bias_add
ctx.data_parallel_rank = data_parallel_rank
ctx.pipeline_parallel_rank = pipeline_parallel_rank
ctx.pipeline_parallel_size = pipeline_parallel_size
ctx.tensor_parallel_size = tensor_parallel_size
if skip_bias_add:
return bias_temp
else:
output = input_ + bias_temp
return output
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
col_parallel_mode = ctx.col_parallel_mode
if ctx.bias:
grad = reduce_scatter(output_grad, -1, col_parallel_mode)
return None, grad, None, None, None, None, None, None, None, None, None, None
else:
reduce_dim = tuple(range(output_grad.ndim - 1))
reduce = torch.sum(output_grad, dim=reduce_dim)
grad = reduce_scatter(reduce, -1, col_parallel_mode)
return output_grad, grad, None, None, None, None, None, None, None, None, None, None
def add_bias_2d(input_: Tensor, bias: Tensor, output_size_per_partition: int, row_rank: int, col_rank: int,
row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, skip_bias_add: bool,
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
tensor_parallel_size: int) -> Tensor:
r"""Matrix add bias: :math:`C = A + b`.
Args:
input_ (:class:`torch.tensor`): matrix :math:`A`.
bias (:class:`torch.tensor`): matrix :math:`B`.
output_size_per_partition (int): size of output per partition.
row_rank (int, optional): the rank of row, defaults to None.
col_rank (int, optional): the rank of column, defaults to None.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
skip_bias_add (bool):
If set to ``True``, it will skip bias add for linear layer, which is preserved for kernel fusion.
data_parallel_rank (int): data parallel rank.
pipeline_parallel_rank (int): pipeline parallel rank
pipeline_parallel_size (int): pipeline parallel size.
tensor_parallel_size (int): tensor parallel size.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _Add_Bias_2D.apply(input_, bias, output_size_per_partition, row_rank, col_rank, row_parallel_mode,
col_parallel_mode, skip_bias_add, data_parallel_rank, pipeline_parallel_rank,
pipeline_parallel_size, tensor_parallel_size)
class _Layernorm_2D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(ctx: Any, input_: Tensor, E_x: Tensor, Var_x: Tensor, hidden_size: int, row_parallel_mode: ParallelMode,
col_parallel_mode: ParallelMode) -> Tensor:
input_ = input_ - E_x
# in here, input = x - E[x], Var_x = 1 / sqrt(Var[x] + eps)
ctx.normalized_shape = hidden_size
output = input_ * Var_x
ctx.save_for_backward(output, Var_x)
ctx.row_parallel_mode = row_parallel_mode
ctx.col_parallel_mode = col_parallel_mode
return output
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
row_parallel_mode = ctx.row_parallel_mode
col_parallel_mode = ctx.col_parallel_mode
x, Var_x = ctx.saved_tensors
# in here, Var_x = 1 / sqrt(Var[x] + eps), x = (x - E[x]) * Var_x
output_grad_sum = torch.sum(output_grad, dim=-1, keepdim=True)
torch.distributed.all_reduce(output_grad_sum, group=gpc.get_group(row_parallel_mode))
output_grad_sum /= ctx.normalized_shape
output_grad_mul_x_sum = torch.sum(output_grad * x, dim=-1, keepdim=True)
torch.distributed.all_reduce(output_grad_mul_x_sum, group=gpc.get_group(row_parallel_mode))
output_grad_mul_x_sum /= ctx.normalized_shape
input_grad = output_grad.clone()
input_grad -= x * output_grad_mul_x_sum
input_grad -= output_grad_sum
input_grad *= Var_x
return input_grad, None, None, None, None, None
def layernorm_2d(input_: Tensor, E_x: Tensor, Var_x: Tensor, hidden_size: int, row_parallel_mode: ParallelMode,
col_parallel_mode: ParallelMode) -> Tensor:
r"""Layernorm.
Args:
input_ (:class:`torch.tensor`): input matrix.
E_x (:class:`torch.tensor`): mean.
Var_x (:class:`torch.tensor`): variance.
hidden_size (int): hidden size.
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _Layernorm_2D.apply(input_, E_x, Var_x, hidden_size, row_parallel_mode, col_parallel_mode)
class _AllGatherTensor2D(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, inputs: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
ctx.dim = dim
ctx.parallel_mode = parallel_mode
outputs = all_gather(inputs, dim, parallel_mode)
return outputs
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
grad = reduce_scatter(output_grad, ctx.dim, ctx.parallel_mode)
return grad.contiguous(), None, None
def all_gather_tensor_2d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
r"""All gather the tensor of 2D parallelism.
Args:
tensor (:class:`torch.tensor`): Input tensor.
dim (int): Dimension to gather.
parallel_mode (:class:`colossalai.context.ParallelMode`): The parallel mode tensor used.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _AllGatherTensor2D.apply(tensor, dim, parallel_mode)
def split_tensor_2d(input_: Tensor, dim: int = 0) -> Tensor:
"""Splits 2D tensor in specified dimension across cols.
Args:
input_ (:class:`torch.tensor`): Input tensor.
dim (int): Specified dimension in which to split.
Returns:
:class:`torch.tensor`: The tensor has been split.
"""
if input_.size(dim) <= 1:
return input_
return torch.chunk(input_, gpc.get_world_size(ParallelMode.PARALLEL_2D_COL),
dim=dim)[gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL)].contiguous()
class _ReduceTensor2D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, parallel_mode):
return all_reduce(input_, parallel_mode)
@staticmethod
def backward(ctx, output_grad):
return output_grad, None
def reduce_tensor_2d(input_: Tensor, parallel_mode: ParallelMode) -> Tensor:
r"""All-reduce the input.
Args:
input_ (:class:`torch.tensor`): Input tensor.
parallel_mode (:class:`colossalai.context.ParallelMode`): The parallel mode tensor used.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _ReduceTensor2D.apply(input_, parallel_mode)
class _ReduceScatterTensor2D(torch.autograd.Function):
@staticmethod
def forward(ctx, input_, dim, parallel_mode):
ctx.dim = dim
ctx.parallel_mode = parallel_mode
return reduce_scatter(input_, dim, parallel_mode)
@staticmethod
def backward(ctx, output_grad):
return all_gather(output_grad, ctx.dim, ctx.parallel_mode), None, None
def reduce_scatter_tensor_2d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
r"""Reduce-scatter the input.
Args:
tensor (:class:`torch.tensor`): Input tensor.
dim (int): Dimension to reduce.
parallel_mode (:class:`colossalai.context.ParallelMode`): The parallel mode tensor used.
Note:
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
"""
return _ReduceScatterTensor2D.apply(tensor, dim, parallel_mode)
class _ReduceByBatch2D(torch.autograd.Function):
@staticmethod
def symbolic(graph, input_, reduce_mean: bool = False):
output = all_reduce(input_, ParallelMode.PARALLEL_2D_COL)
if reduce_mean:
reduce_size = gpc.get_world_size(ParallelMode.PARALLEL_2D_COL)
return output / reduce_size
return output
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(ctx, input_, reduce_mean: bool = False):
output = all_reduce(input_, ParallelMode.PARALLEL_2D_COL)
ctx.reduce_mean = reduce_mean
if reduce_mean:
reduce_size = gpc.get_world_size(ParallelMode.PARALLEL_2D_COL)
ctx.reduce_size = reduce_size
return output.clone() / reduce_size
return output.clone()
@staticmethod
@custom_bwd
def backward(ctx, output_grad):
if ctx.reduce_mean:
return output_grad / ctx.reduce_size, None
else:
return output_grad, None
def reduce_by_batch_2d(input_, reduce_mean: bool = False) -> Tensor:
r"""All-reduce the input from the model parallel region.
Args:
input_ (:class:`torch.tensor`): input matrix.
reduce_mean (bool, optional):
If set to ``True``, it will divide the output by column parallel size, default to False.
"""
return _ReduceByBatch2D.apply(input_, reduce_mean) | [
"colossalai.core.global_context.get_local_rank",
"colossalai.communication.collective.reduce_scatter",
"colossalai.communication.collective.all_gather",
"colossalai.core.global_context.get_group",
"colossalai.communication.collective.all_reduce",
"colossalai.core.global_context.get_world_size",
"colossa... | [((2634, 2671), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (2644, 2671), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((7989, 8026), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (7999, 8026), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((13519, 13556), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (13529, 13556), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((18947, 18984), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (18957, 18984), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((23260, 23297), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (23270, 23297), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((26912, 26949), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float32'}), '(cast_inputs=torch.float32)\n', (26922, 26949), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((29453, 29490), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (29463, 29490), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((33410, 33447), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float32'}), '(cast_inputs=torch.float32)\n', (33420, 33447), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((1791, 1828), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['col_parallel_mode'], {}), '(col_parallel_mode)\n', (1809, 1828), True, 'from colossalai.core import global_context as gpc\n'), ((1873, 1910), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['row_parallel_mode'], {}), '(row_parallel_mode)\n', (1891, 1910), True, 'from colossalai.core import global_context as gpc\n'), ((1989, 2026), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (2007, 2026), True, 'from colossalai.core import global_context as gpc\n'), ((2112, 2153), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (2130, 2153), True, 'from colossalai.core import global_context as gpc\n'), ((2248, 2289), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (2266, 2289), True, 'from colossalai.core import global_context as gpc\n'), ((3276, 3312), 'colossalai.communication.collective.all_gather', 'all_gather', (['B', '(-1)', 'col_parallel_mode'], {}), '(B, -1, col_parallel_mode)\n', (3286, 3312), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce, reduce_scatter\n'), ((3440, 3472), 'colossalai.communication.collective.all_reduce', 'all_reduce', (['C', 'row_parallel_mode'], {}), '(C, row_parallel_mode)\n', (3450, 3472), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce, reduce_scatter\n'), ((9254, 9286), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['row_parallel_mode'], {}), '(row_parallel_mode)\n', (9267, 9286), True, 'from colossalai.core import global_context as gpc\n'), ((9307, 9339), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['col_parallel_mode'], {}), '(col_parallel_mode)\n', (9320, 9339), True, 'from colossalai.core import global_context as gpc\n'), ((9796, 9864), 'torch.distributed.broadcast', 'dist.broadcast', (['A_list[0]'], {'src': 'src_a', 'group': 'row_group', 'async_op': '(True)'}), '(A_list[0], src=src_a, group=row_group, async_op=True)\n', (9810, 9864), True, 'import torch.distributed as dist\n'), ((9882, 9950), 'torch.distributed.broadcast', 'dist.broadcast', (['B_list[0]'], {'src': 'src_b', 'group': 'col_group', 'async_op': '(True)'}), '(B_list[0], src=src_b, group=col_group, async_op=True)\n', (9896, 9950), True, 'import torch.distributed as dist\n'), ((14646, 14678), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['row_parallel_mode'], {}), '(row_parallel_mode)\n', (14659, 14678), True, 'from colossalai.core import global_context as gpc\n'), ((14699, 14731), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['col_parallel_mode'], {}), '(col_parallel_mode)\n', (14712, 14731), True, 'from colossalai.core import global_context as gpc\n'), ((15161, 15229), 'torch.distributed.broadcast', 'dist.broadcast', (['B_list[0]'], {'src': 'src_b', 'group': 'col_group', 'async_op': '(True)'}), '(B_list[0], src=src_b, group=col_group, async_op=True)\n', (15175, 15229), True, 'import torch.distributed as dist\n'), ((20076, 20108), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['row_parallel_mode'], {}), '(row_parallel_mode)\n', (20089, 20108), True, 'from colossalai.core import global_context as gpc\n'), ((20129, 20161), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['col_parallel_mode'], {}), '(col_parallel_mode)\n', (20142, 20161), True, 'from colossalai.core import global_context as gpc\n'), ((20591, 20659), 'torch.distributed.broadcast', 'dist.broadcast', (['A_list[0]'], {'src': 'src_a', 'group': 'row_group', 'async_op': '(True)'}), '(A_list[0], src=src_a, group=row_group, async_op=True)\n', (20605, 20659), True, 'import torch.distributed as dist\n'), ((23755, 23794), 'colossalai.communication.collective.all_gather', 'all_gather', (['bias', '(-1)', 'col_parallel_mode'], {}), '(bias, -1, col_parallel_mode)\n', (23765, 23794), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce, reduce_scatter\n'), ((27814, 27858), 'torch.sum', 'torch.sum', (['output_grad'], {'dim': '(-1)', 'keepdim': '(True)'}), '(output_grad, dim=-1, keepdim=True)\n', (27823, 27858), False, 'import torch\n'), ((28034, 28082), 'torch.sum', 'torch.sum', (['(output_grad * x)'], {'dim': '(-1)', 'keepdim': '(True)'}), '(output_grad * x, dim=-1, keepdim=True)\n', (28043, 28082), False, 'import torch\n'), ((29666, 29704), 'colossalai.communication.collective.all_gather', 'all_gather', (['inputs', 'dim', 'parallel_mode'], {}), '(inputs, dim, parallel_mode)\n', (29676, 29704), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce, reduce_scatter\n'), ((29849, 29904), 'colossalai.communication.collective.reduce_scatter', 'reduce_scatter', (['output_grad', 'ctx.dim', 'ctx.parallel_mode'], {}), '(output_grad, ctx.dim, ctx.parallel_mode)\n', (29863, 29904), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce, reduce_scatter\n'), ((31293, 31326), 'colossalai.communication.collective.all_reduce', 'all_reduce', (['input_', 'parallel_mode'], {}), '(input_, parallel_mode)\n', (31303, 31326), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce, reduce_scatter\n'), ((32199, 32241), 'colossalai.communication.collective.reduce_scatter', 'reduce_scatter', (['input_', 'dim', 'parallel_mode'], {}), '(input_, dim, parallel_mode)\n', (32213, 32241), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce, reduce_scatter\n'), ((33176, 33224), 'colossalai.communication.collective.all_reduce', 'all_reduce', (['input_', 'ParallelMode.PARALLEL_2D_COL'], {}), '(input_, ParallelMode.PARALLEL_2D_COL)\n', (33186, 33224), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce, reduce_scatter\n'), ((33522, 33570), 'colossalai.communication.collective.all_reduce', 'all_reduce', (['input_', 'ParallelMode.PARALLEL_2D_COL'], {}), '(input_, ParallelMode.PARALLEL_2D_COL)\n', (33532, 33570), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce, reduce_scatter\n'), ((1946, 1983), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (1964, 1983), True, 'from colossalai.core import global_context as gpc\n'), ((2065, 2106), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (2083, 2106), True, 'from colossalai.core import global_context as gpc\n'), ((2201, 2242), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (2219, 2242), True, 'from colossalai.core import global_context as gpc\n'), ((4324, 4339), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4337, 4339), False, 'import torch\n'), ((4362, 4390), 'torch.matmul', 'torch.matmul', (['output_grad', 'B'], {}), '(output_grad, B)\n', (4374, 4390), False, 'import torch\n'), ((4562, 4611), 'colossalai.communication.collective.reduce_scatter', 'reduce_scatter', (['B_grad', '(-1)', 'ctx.col_parallel_mode'], {}), '(B_grad, -1, ctx.col_parallel_mode)\n', (4576, 4611), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce, reduce_scatter\n'), ((9137, 9156), 'torch.empty_like', 'torch.empty_like', (['A'], {}), '(A)\n', (9153, 9156), False, 'import torch\n'), ((9194, 9213), 'torch.empty_like', 'torch.empty_like', (['B'], {}), '(B)\n', (9210, 9213), False, 'import torch\n'), ((10500, 10547), 'torch.addmm', 'torch.addmm', (['C', 'A_list[cur]', 'B_list[cur]'], {'out': 'C'}), '(C, A_list[cur], B_list[cur], out=C)\n', (10511, 10547), False, 'import torch\n'), ((11382, 11397), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11395, 11397), False, 'import torch\n'), ((14529, 14548), 'torch.empty_like', 'torch.empty_like', (['B'], {}), '(B)\n', (14545, 14548), False, 'import torch\n'), ((14586, 14605), 'torch.empty_like', 'torch.empty_like', (['C'], {}), '(C)\n', (14602, 14605), False, 'import torch\n'), ((15793, 15860), 'torch.distributed.reduce', 'dist.reduce', (['C_list[cur]'], {'dst': 'src_c', 'group': 'row_group', 'async_op': '(True)'}), '(C_list[cur], dst=src_c, group=row_group, async_op=True)\n', (15804, 15860), True, 'import torch.distributed as dist\n'), ((16889, 16904), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16902, 16904), False, 'import torch\n'), ((19959, 19978), 'torch.empty_like', 'torch.empty_like', (['A'], {}), '(A)\n', (19975, 19978), False, 'import torch\n'), ((20016, 20035), 'torch.empty_like', 'torch.empty_like', (['C'], {}), '(C)\n', (20032, 20035), False, 'import torch\n'), ((21215, 21282), 'torch.distributed.reduce', 'dist.reduce', (['C_list[cur]'], {'dst': 'src_c', 'group': 'col_group', 'async_op': '(True)'}), '(C_list[cur], dst=src_c, group=col_group, async_op=True)\n', (21226, 21282), True, 'import torch.distributed as dist\n'), ((22311, 22326), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22324, 22326), False, 'import torch\n'), ((24554, 24604), 'colossalai.communication.collective.reduce_scatter', 'reduce_scatter', (['output_grad', '(-1)', 'col_parallel_mode'], {}), '(output_grad, -1, col_parallel_mode)\n', (24568, 24604), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce, reduce_scatter\n'), ((24790, 24828), 'torch.sum', 'torch.sum', (['output_grad'], {'dim': 'reduce_dim'}), '(output_grad, dim=reduce_dim)\n', (24799, 24828), False, 'import torch\n'), ((24848, 24893), 'colossalai.communication.collective.reduce_scatter', 'reduce_scatter', (['reduce', '(-1)', 'col_parallel_mode'], {}), '(reduce, -1, col_parallel_mode)\n', (24862, 24893), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce, reduce_scatter\n'), ((32312, 32363), 'colossalai.communication.collective.all_gather', 'all_gather', (['output_grad', 'ctx.dim', 'ctx.parallel_mode'], {}), '(output_grad, ctx.dim, ctx.parallel_mode)\n', (32322, 32363), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce, reduce_scatter\n'), ((33275, 33323), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_2D_COL'], {}), '(ParallelMode.PARALLEL_2D_COL)\n', (33293, 33323), True, 'from colossalai.core import global_context as gpc\n'), ((33659, 33707), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_2D_COL'], {}), '(ParallelMode.PARALLEL_2D_COL)\n', (33677, 33707), True, 'from colossalai.core import global_context as gpc\n'), ((4809, 4853), 'colossalai.communication.collective.all_reduce', 'all_reduce', (['bias_grad', 'ctx.col_parallel_mode'], {}), '(bias_grad, ctx.col_parallel_mode)\n', (4819, 4853), False, 'from colossalai.communication.collective import all_gather, all_reduce, reduce, reduce_scatter\n'), ((8996, 9016), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (9014, 9016), False, 'from colossalai.utils import get_current_device\n'), ((10110, 10188), 'torch.distributed.broadcast', 'dist.broadcast', (['A_list[1 - cur]'], {'src': '(src_a + 1)', 'group': 'row_group', 'async_op': '(True)'}), '(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True)\n', (10124, 10188), True, 'import torch.distributed as dist\n'), ((10261, 10351), 'torch.distributed.broadcast', 'dist.broadcast', (['B_list[1 - cur]'], {'src': '(src_b + summa_dim)', 'group': 'col_group', 'async_op': '(True)'}), '(B_list[1 - cur], src=src_b + summa_dim, group=col_group,\n async_op=True)\n', (10275, 10351), True, 'import torch.distributed as dist\n'), ((14388, 14408), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (14406, 14408), False, 'from colossalai.utils import get_current_device\n'), ((15389, 15479), 'torch.distributed.broadcast', 'dist.broadcast', (['B_list[1 - cur]'], {'src': '(src_b + summa_dim)', 'group': 'col_group', 'async_op': '(True)'}), '(B_list[1 - cur], src=src_b + summa_dim, group=col_group,\n async_op=True)\n', (15403, 15479), True, 'import torch.distributed as dist\n'), ((19818, 19838), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (19836, 19838), False, 'from colossalai.utils import get_current_device\n'), ((20819, 20897), 'torch.distributed.broadcast', 'dist.broadcast', (['A_list[1 - cur]'], {'src': '(src_a + 1)', 'group': 'row_group', 'async_op': '(True)'}), '(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True)\n', (20833, 20897), True, 'import torch.distributed as dist\n'), ((27919, 27951), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['row_parallel_mode'], {}), '(row_parallel_mode)\n', (27932, 27951), True, 'from colossalai.core import global_context as gpc\n'), ((28149, 28181), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['row_parallel_mode'], {}), '(row_parallel_mode)\n', (28162, 28181), True, 'from colossalai.core import global_context as gpc\n'), ((31102, 31150), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2D_COL'], {}), '(ParallelMode.PARALLEL_2D_COL)\n', (31120, 31150), True, 'from colossalai.core import global_context as gpc\n'), ((31020, 31068), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_2D_COL'], {}), '(ParallelMode.PARALLEL_2D_COL)\n', (31038, 31068), True, 'from colossalai.core import global_context as gpc\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.nn.layer.parallel_1d._utils import vocab_range_from_per_partition_vocab_size
class _VocabParallelCrossEntropy_1D(torch.autograd.Function):
@staticmethod
def forward(ctx, vocab_parallel_logits, target):
# Maximum value along vocab dimension across all GPUs.
logits_max = torch.max(vocab_parallel_logits, dim=-1)[0]
torch.distributed.all_reduce(logits_max,
op=torch.distributed.ReduceOp.MAX,
group=gpc.get_group(ParallelMode.PARALLEL_1D))
# Subtract the maximum value.
vocab_parallel_logits.sub_(logits_max.unsqueeze(dim=-1))
# Get the partition's vocab indecies
partition_vocab_size = vocab_parallel_logits.size()[-1]
rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
world_size = gpc.get_world_size(ParallelMode.PARALLEL_1D)
vocab_start_index, vocab_end_index = vocab_range_from_per_partition_vocab_size(
partition_vocab_size, rank, world_size)
# Create a mask of valid vocab ids (1 means it needs to be masked).
target_mask = (target < vocab_start_index) | (target >= vocab_end_index)
masked_target = target.clone() - vocab_start_index
masked_target[target_mask] = 0
# Get predicted-logits = logits[target].
# For Simplicity, we convert logits to a 2-D tensor with size
# [*, partition-vocab-size] and target to a 1-D tensor of size [*].
logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size)
masked_target_1d = masked_target.view(-1)
arange_1d = torch.arange(start=0, end=logits_2d.size()[0],
device=logits_2d.device)
predicted_logits_1d = logits_2d[arange_1d, masked_target_1d]
predicted_logits_1d = predicted_logits_1d.clone().contiguous()
predicted_logits = predicted_logits_1d.view_as(target)
predicted_logits[target_mask] = 0.0
# All reduce is needed to get the chunks from other GPUs.
torch.distributed.all_reduce(predicted_logits,
op=torch.distributed.ReduceOp.SUM,
group=gpc.get_group(ParallelMode.PARALLEL_1D))
# Sum of exponential of logits along vocab dimension across all GPUs.
exp_logits = vocab_parallel_logits
torch.exp(vocab_parallel_logits, out=exp_logits)
sum_exp_logits = exp_logits.sum(dim=-1)
torch.distributed.all_reduce(sum_exp_logits,
op=torch.distributed.ReduceOp.SUM,
group=gpc.get_group(ParallelMode.PARALLEL_1D))
# Loss = log(sum(exp(logits))) - predicted-logit.
loss = torch.log(sum_exp_logits) - predicted_logits
# Store softmax, target-mask and masked-target for backward pass.
exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))
ctx.save_for_backward(exp_logits, target_mask, masked_target_1d)
return loss
@staticmethod
def backward(ctx, grad_output):
# Retreive tensors from the forward path.
softmax, target_mask, masked_target_1d = ctx.saved_tensors
# All the inputs have softmax as thier gradient.
grad_input = softmax
# For simplicity, work with the 2D gradient.
partition_vocab_size = softmax.size()[-1]
grad_2d = grad_input.view(-1, partition_vocab_size)
# Add the gradient from matching classes.
arange_1d = torch.arange(start=0, end=grad_2d.size()[0],
device=grad_2d.device)
grad_2d[arange_1d, masked_target_1d] -= (
1.0 - target_mask.view(-1).float())
# Finally elementwise multiplication with the output gradients.
grad_input.mul_(grad_output.unsqueeze(dim=-1))
return grad_input, None
class LmLoss1D(_Loss):
def forward(self, lm_logits, lm_labels, loss_mask):
lm_loss = _VocabParallelCrossEntropy_1D.apply(lm_logits, lm_labels)
lm_loss = torch.sum(
lm_loss.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
return lm_loss
class SopLoss1D(_Loss):
def forward(self, sop_logits, sentence_order):
sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(),
sentence_order.view(-1),
ignore_index=-1)
return sop_loss
class BERTDualHeadLoss(_Loss):
def __init__(self):
self.lm_loss = LmLoss1D()
self.sop_loss = SopLoss1D()
def forward(self, lm_logits, sop_logits, lm_labels, loss_mask, sentence_order):
lm_loss = self.lm_loss(lm_logits, lm_labels, loss_mask)
sop_loss = self.sop_loss(sop_logits, sentence_order)
return lm_loss + sop_loss
| [
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_world_size",
"colossalai.nn.layer.parallel_1d._utils.vocab_range_from_per_partition_vocab_size"
] | [((1033, 1077), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (1051, 1077), True, 'from colossalai.core import global_context as gpc\n'), ((1099, 1143), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (1117, 1143), True, 'from colossalai.core import global_context as gpc\n'), ((1189, 1274), 'colossalai.nn.layer.parallel_1d._utils.vocab_range_from_per_partition_vocab_size', 'vocab_range_from_per_partition_vocab_size', (['partition_vocab_size', 'rank', 'world_size'], {}), '(partition_vocab_size, rank,\n world_size)\n', (1230, 1274), False, 'from colossalai.nn.layer.parallel_1d._utils import vocab_range_from_per_partition_vocab_size\n'), ((2638, 2686), 'torch.exp', 'torch.exp', (['vocab_parallel_logits'], {'out': 'exp_logits'}), '(vocab_parallel_logits, out=exp_logits)\n', (2647, 2686), False, 'import torch\n'), ((556, 596), 'torch.max', 'torch.max', (['vocab_parallel_logits'], {'dim': '(-1)'}), '(vocab_parallel_logits, dim=-1)\n', (565, 596), False, 'import torch\n'), ((3018, 3043), 'torch.log', 'torch.log', (['sum_exp_logits'], {}), '(sum_exp_logits)\n', (3027, 3043), False, 'import torch\n'), ((764, 803), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (777, 803), True, 'from colossalai.core import global_context as gpc\n'), ((2467, 2506), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (2480, 2506), True, 'from colossalai.core import global_context as gpc\n'), ((2903, 2942), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (2916, 2942), True, 'from colossalai.core import global_context as gpc\n')] |
import torch
import colossalai
import colossalai.nn as col_nn
class MLP(torch.nn.Module):
def __init__(self, dim: int = 256):
super().__init__()
intermediate_dim = dim * 4
self.dense_1 = col_nn.Linear(dim, intermediate_dim)
self.activation = torch.nn.GELU()
self.dense_2 = col_nn.Linear(intermediate_dim, dim)
self.dropout = col_nn.Dropout(0.1)
def forward(self, x):
x = self.dense_1(x)
x = self.activation(x)
x = self.dense_2(x)
x = self.dropout(x)
return x
| [
"colossalai.nn.Linear",
"colossalai.nn.Dropout"
] | [((216, 252), 'colossalai.nn.Linear', 'col_nn.Linear', (['dim', 'intermediate_dim'], {}), '(dim, intermediate_dim)\n', (229, 252), True, 'import colossalai.nn as col_nn\n'), ((279, 294), 'torch.nn.GELU', 'torch.nn.GELU', ([], {}), '()\n', (292, 294), False, 'import torch\n'), ((318, 354), 'colossalai.nn.Linear', 'col_nn.Linear', (['intermediate_dim', 'dim'], {}), '(intermediate_dim, dim)\n', (331, 354), True, 'import colossalai.nn as col_nn\n'), ((378, 397), 'colossalai.nn.Dropout', 'col_nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (392, 397), True, 'import colossalai.nn as col_nn\n')] |
#!/usr/bin/env python
import torch.distributed as dist
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from colossalai.core import global_context as gpc
from colossalai.registry import GRADIENT_HANDLER
from ._base_gradient_handler import BaseGradientHandler
from ...context.parallel_mode import ParallelMode
@GRADIENT_HANDLER.register_module
class DataParallelGradientHandler(BaseGradientHandler):
"""A helper class to handle all-reduce operations in a data parallel group.
A all-reduce collective communication will be operated in
:func:`handle_gradient` among a data parallel group.
For better performance, it bucketizes the gradients of all parameters that are
the same type to improve the efficiency of communication.
"""
def handle_gradient(self):
"""A method running a all-reduce operation in a data parallel group.
"""
# TODO: add memory buffer
if gpc.data_parallel_size > 1:
# bucketize and all-reduce
buckets = {}
# Pack the buckets.
for param in self._model.parameters():
if param.requires_grad and param.grad is not None:
tp = param.data.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
# param.main_grad = param.grad
# For each bucket, all-reduce and copy all-reduced grads.
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
coalesced /= gpc.get_world_size(ParallelMode.DATA)
dist.all_reduce(
coalesced, group=gpc.get_group(ParallelMode.DATA))
for buf, synced in zip(grads, _unflatten_dense_tensors(
coalesced, grads)):
buf.copy_(synced)
| [
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.get_group"
] | [((1642, 1671), 'torch._utils._flatten_dense_tensors', '_flatten_dense_tensors', (['grads'], {}), '(grads)\n', (1664, 1671), False, 'from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors\n'), ((1701, 1738), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (1719, 1738), True, 'from colossalai.core import global_context as gpc\n'), ((1890, 1932), 'torch._utils._unflatten_dense_tensors', '_unflatten_dense_tensors', (['coalesced', 'grads'], {}), '(coalesced, grads)\n', (1914, 1932), False, 'from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors\n'), ((1810, 1842), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (1823, 1842), True, 'from colossalai.core import global_context as gpc\n')] |
import sys
import os
import torch
import argparse
from DataSets import create_datasets, create_dataloader
from Utils.eval import eval_model
from Utils.tools import analysis_dataset
from Utils.ddp_tools import init_env, save_model, copy_model, DDP_SummaryWriter
from Models.Backbone import create_backbone
from Models.Loss import create_class_loss
from Models.Optimizer import create_optimizer
from Models.Scheduler import create_scheduler
from torchinfo import summary
import colossalai
cur_path = os.path.abspath(os.path.dirname(__file__))
if __name__ == "__main__":
"""分类任务"""
parser = colossalai.get_default_parser()
parser.add_argument("--config_file", help="训练配置", default="./Config/config.py")
# 初始化环境
ckpt_path, cfg, tb_writer, logger = init_env(parser.parse_args().config_file)
# 数据集
dataset = analysis_dataset(cfg.Txt)
tb_writer.add_dataset_info(dataset)
train_set = create_datasets(
dataset=dataset["train"], size=cfg.Size, process=cfg.Process, use_augment=True
)
val_set = create_datasets(
dataset=dataset["val"], size=cfg.Size, process=cfg.Process
)
train_dataloader = create_dataloader(cfg.Batch, train_set, cfg.Sampler)
val_dataloader = create_dataloader(cfg.Batch, val_set)
# 模型
model = create_backbone(cfg.Backbone, num_classes=len(dataset["all_labels"]))
model.info = {"task": "class", "all_labels": dataset["all_labels"]}
cp_model = copy_model(model)
tb_writer.add_model_info(model, cfg.Size)
# 损失函数
criterion = create_class_loss(cfg.Loss)
# 优化器
optimizer = create_optimizer(cfg.Optimizer, model.parameters(), lr=cfg.LR)
# 学习率调度器
lr_scheduler = create_scheduler(cfg.Scheduler, cfg.Epochs, optimizer)
# colossalai封装
engine, train_dataloader, val_dataloader, _ = colossalai.initialize(
model,
optimizer,
criterion,
train_dataloader,
val_dataloader,
)
best_acc = 0.0
for epoch in range(cfg.Epochs):
engine.train()
logger.info(f"Starting {epoch} / {cfg.Epochs}", ranks=[0])
for batch_idx, (imgs, labels) in enumerate(train_dataloader):
imgs, labels = imgs.cuda(), labels.cuda()
engine.zero_grad()
output = engine(imgs)
loss = engine.criterion(output, labels)
engine.backward(loss)
engine.step()
if batch_idx % 100 == 0:
iter_num = int(batch_idx + epoch * len(train_dataloader))
tb_writer.add_scalar("Train/loss", loss.item(), iter_num)
# 验证集评估
engine.eval()
acc = eval_model(engine, val_dataloader).Overall_ACC
if best_acc <= acc:
best_acc = acc
save_model(engine.model, cp_model, ckpt_path + cfg.Backbone + "_best.pt")
# 可视化
tb_writer.add_augment_imgs(epoch, imgs, labels, dataset["all_labels"])
tb_writer.add_scalar("Train/lr", lr_scheduler.get_last_lr()[0], epoch)
tb_writer.add_scalar("Val/acc", acc, epoch)
lr_scheduler.step()
save_model(engine.model, cp_model, ckpt_path + cfg.Backbone + "_last.pt")
tb_writer.close()
| [
"colossalai.initialize",
"colossalai.get_default_parser"
] | [((515, 540), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (530, 540), False, 'import os\n'), ((598, 629), 'colossalai.get_default_parser', 'colossalai.get_default_parser', ([], {}), '()\n', (627, 629), False, 'import colossalai\n'), ((834, 859), 'Utils.tools.analysis_dataset', 'analysis_dataset', (['cfg.Txt'], {}), '(cfg.Txt)\n', (850, 859), False, 'from Utils.tools import analysis_dataset\n'), ((916, 1016), 'DataSets.create_datasets', 'create_datasets', ([], {'dataset': "dataset['train']", 'size': 'cfg.Size', 'process': 'cfg.Process', 'use_augment': '(True)'}), "(dataset=dataset['train'], size=cfg.Size, process=cfg.\n Process, use_augment=True)\n", (931, 1016), False, 'from DataSets import create_datasets, create_dataloader\n'), ((1040, 1115), 'DataSets.create_datasets', 'create_datasets', ([], {'dataset': "dataset['val']", 'size': 'cfg.Size', 'process': 'cfg.Process'}), "(dataset=dataset['val'], size=cfg.Size, process=cfg.Process)\n", (1055, 1115), False, 'from DataSets import create_datasets, create_dataloader\n'), ((1153, 1205), 'DataSets.create_dataloader', 'create_dataloader', (['cfg.Batch', 'train_set', 'cfg.Sampler'], {}), '(cfg.Batch, train_set, cfg.Sampler)\n', (1170, 1205), False, 'from DataSets import create_datasets, create_dataloader\n'), ((1227, 1264), 'DataSets.create_dataloader', 'create_dataloader', (['cfg.Batch', 'val_set'], {}), '(cfg.Batch, val_set)\n', (1244, 1264), False, 'from DataSets import create_datasets, create_dataloader\n'), ((1444, 1461), 'Utils.ddp_tools.copy_model', 'copy_model', (['model'], {}), '(model)\n', (1454, 1461), False, 'from Utils.ddp_tools import init_env, save_model, copy_model, DDP_SummaryWriter\n'), ((1536, 1563), 'Models.Loss.create_class_loss', 'create_class_loss', (['cfg.Loss'], {}), '(cfg.Loss)\n', (1553, 1563), False, 'from Models.Loss import create_class_loss\n'), ((1687, 1741), 'Models.Scheduler.create_scheduler', 'create_scheduler', (['cfg.Scheduler', 'cfg.Epochs', 'optimizer'], {}), '(cfg.Scheduler, cfg.Epochs, optimizer)\n', (1703, 1741), False, 'from Models.Scheduler import create_scheduler\n'), ((1812, 1900), 'colossalai.initialize', 'colossalai.initialize', (['model', 'optimizer', 'criterion', 'train_dataloader', 'val_dataloader'], {}), '(model, optimizer, criterion, train_dataloader,\n val_dataloader)\n', (1833, 1900), False, 'import colossalai\n'), ((3075, 3148), 'Utils.ddp_tools.save_model', 'save_model', (['engine.model', 'cp_model', "(ckpt_path + cfg.Backbone + '_last.pt')"], {}), "(engine.model, cp_model, ckpt_path + cfg.Backbone + '_last.pt')\n", (3085, 3148), False, 'from Utils.ddp_tools import init_env, save_model, copy_model, DDP_SummaryWriter\n'), ((2630, 2664), 'Utils.eval.eval_model', 'eval_model', (['engine', 'val_dataloader'], {}), '(engine, val_dataloader)\n', (2640, 2664), False, 'from Utils.eval import eval_model\n'), ((2744, 2817), 'Utils.ddp_tools.save_model', 'save_model', (['engine.model', 'cp_model', "(ckpt_path + cfg.Backbone + '_best.pt')"], {}), "(engine.model, cp_model, ckpt_path + cfg.Backbone + '_best.pt')\n", (2754, 2817), False, 'from Utils.ddp_tools import init_env, save_model, copy_model, DDP_SummaryWriter\n')] |
import pytest
import colossalai
from colossalai.context.parallel_mode import ParallelMode
import torch.multiprocessing as mp
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from colossalai.utils import ColoInitContext
from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, distspec
from colossalai.core import global_context as gpc
from functools import partial
from _utils import tensor_equal, tensor_shard_equal, set_seed
from tests.components_to_test.registry import non_distributed_component_funcs
from torch.nn.parallel import DistributedDataParallel as DDP
from colossalai.nn.parallel import ColoDDP
def init_1d_row_spec(model):
spec = TensorSpec(
distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [0], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
ParallelAction(ComputePattern.TP1D))
with DistSpecManager.no_grad():
for n, p in model.named_parameters():
if 'weight' in n and 'ln' not in n:
p.set_spec(spec)
def init_1d_col_spec(model):
spec = TensorSpec(
distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [-1], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
ParallelAction(ComputePattern.TP1D))
with DistSpecManager.no_grad():
for n, p in model.named_parameters():
if 'ln' not in n and ('weight' in n or 'bias' in n):
p.set_spec(spec)
def check_param_equal(model, torch_model):
for p, torch_p in zip(model.parameters(), torch_model.parameters()):
assert tensor_shard_equal(torch_p, p)
def check_grad_equal(model, torch_model):
for p, torch_p in zip(model.parameters(), torch_model.parameters()):
assert tensor_shard_equal(torch_p.grad, p.grad)
def run_gpt(init_spec_func, use_ddp):
get_components_func = non_distributed_component_funcs.get_callable('gpt2')
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
with ColoInitContext(device=get_current_device()):
model = model_builder()
model = model.cuda()
torch_model = model_builder().cuda()
if use_ddp:
model = ColoDDP(model)
torch_model = DDP(torch_model,
device_ids=[gpc.get_global_rank()],
process_group=gpc.get_group(ParallelMode.DATA))
for torch_p, p in zip(torch_model.parameters(), model.parameters()):
torch_p.data.copy_(p)
init_spec_func(model)
check_param_equal(model, torch_model)
model.train()
torch_model.train()
set_seed(gpc.get_local_rank(ParallelMode.DATA))
for i, (input_ids, attn_mask) in enumerate(train_dataloader):
logits = model(input_ids, attn_mask)
torch_logits = torch_model(input_ids, attn_mask)
assert tensor_equal(torch_logits, logits)
loss = criterion(logits, input_ids)
torch_loss = criterion(torch_logits, input_ids)
if use_ddp:
model.backward(loss)
else:
loss.backward()
torch_loss.backward()
check_grad_equal(model, torch_model)
if i > 0:
break
def run_dist(rank, world_size, port, use_ddp):
if use_ddp and world_size == 1:
return
tp_world_size = world_size // 2 if use_ddp else world_size
config = dict(parallel=dict(tensor=dict(mode="1d", size=tp_world_size),))
colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_gpt(init_1d_row_spec, use_ddp)
run_gpt(init_1d_col_spec, use_ddp)
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 4])
@pytest.mark.parametrize('use_ddp', [False, True])
@rerun_if_address_is_in_use()
def test_gpt(world_size, use_ddp):
run_func = partial(run_dist, world_size=world_size, port=free_port(), use_ddp=use_ddp)
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_gpt(4)
| [
"colossalai.tensor.ParallelAction",
"colossalai.nn.parallel.ColoDDP",
"colossalai.launch",
"colossalai.core.global_context.get_local_rank",
"colossalai.utils.free_port",
"colossalai.utils.cuda.get_current_device",
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_world_si... | [((3704, 3749), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 4]'], {}), "('world_size', [1, 4])\n", (3727, 3749), False, 'import pytest\n'), ((3751, 3800), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_ddp"""', '[False, True]'], {}), "('use_ddp', [False, True])\n", (3774, 3800), False, 'import pytest\n'), ((3802, 3830), 'colossalai.testing.rerun_if_address_is_in_use', 'rerun_if_address_is_in_use', ([], {}), '()\n', (3828, 3830), False, 'from colossalai.testing import rerun_if_address_is_in_use\n'), ((1925, 1977), 'tests.components_to_test.registry.non_distributed_component_funcs.get_callable', 'non_distributed_component_funcs.get_callable', (['"""gpt2"""'], {}), "('gpt2')\n", (1969, 1977), False, 'from tests.components_to_test.registry import non_distributed_component_funcs\n'), ((3493, 3609), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'config', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=config, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (3510, 3609), False, 'import colossalai\n'), ((3961, 3998), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (3969, 3998), True, 'import torch.multiprocessing as mp\n'), ((924, 959), 'colossalai.tensor.ParallelAction', 'ParallelAction', (['ComputePattern.TP1D'], {}), '(ComputePattern.TP1D)\n', (938, 959), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, distspec\n'), ((970, 995), 'colossalai.tensor.DistSpecManager.no_grad', 'DistSpecManager.no_grad', ([], {}), '()\n', (993, 995), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, distspec\n'), ((1305, 1340), 'colossalai.tensor.ParallelAction', 'ParallelAction', (['ComputePattern.TP1D'], {}), '(ComputePattern.TP1D)\n', (1319, 1340), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, distspec\n'), ((1351, 1376), 'colossalai.tensor.DistSpecManager.no_grad', 'DistSpecManager.no_grad', ([], {}), '()\n', (1374, 1376), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, distspec\n'), ((1655, 1685), '_utils.tensor_shard_equal', 'tensor_shard_equal', (['torch_p', 'p'], {}), '(torch_p, p)\n', (1673, 1685), False, 'from _utils import tensor_equal, tensor_shard_equal, set_seed\n'), ((1818, 1858), '_utils.tensor_shard_equal', 'tensor_shard_equal', (['torch_p.grad', 'p.grad'], {}), '(torch_p.grad, p.grad)\n', (1836, 1858), False, 'from _utils import tensor_equal, tensor_shard_equal, set_seed\n'), ((2269, 2283), 'colossalai.nn.parallel.ColoDDP', 'ColoDDP', (['model'], {}), '(model)\n', (2276, 2283), False, 'from colossalai.nn.parallel import ColoDDP\n'), ((2685, 2722), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (2703, 2722), True, 'from colossalai.core import global_context as gpc\n'), ((2907, 2941), '_utils.tensor_equal', 'tensor_equal', (['torch_logits', 'logits'], {}), '(torch_logits, logits)\n', (2919, 2941), False, 'from _utils import tensor_equal, tensor_shard_equal, set_seed\n'), ((821, 860), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (834, 860), True, 'from colossalai.core import global_context as gpc\n'), ((1201, 1240), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (1214, 1240), True, 'from colossalai.core import global_context as gpc\n'), ((3927, 3938), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (3936, 3938), False, 'from colossalai.utils import free_port\n'), ((868, 912), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (886, 912), True, 'from colossalai.core import global_context as gpc\n'), ((1249, 1293), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (1267, 1293), True, 'from colossalai.core import global_context as gpc\n'), ((2116, 2136), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (2134, 2136), False, 'from colossalai.utils.cuda import get_current_device\n'), ((2425, 2457), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (2438, 2457), True, 'from colossalai.core import global_context as gpc\n'), ((2361, 2382), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (2380, 2382), True, 'from colossalai.core import global_context as gpc\n')] |
from functools import partial
import torch
import torch.distributed as dist
import torch.nn as nn
from colossalai.logging import get_dist_logger
from colossalai.utils import checkpoint
from colossalai.zero.sharded_model import ShardedModelV2
LOGGER = get_dist_logger()
CONFIG = dict(fp16=dict(mode=None,),
zero=dict(level=3,
verbose=False,
offload_optimizer_config=dict(device='cpu', pin_memory=True, buffer_count=5, fast_init=False),
offload_param_config=dict(device='cpu',
pin_memory=True,
buffer_count=5,
buffer_size=1e8,
max_in_cpu=1e9)),
parallel=dict(pipeline=dict(size=1), tensor=dict(size=1, mode=None)))
def run_fwd_bwd(model, data, label, criterion, enable_autocast=False):
model.train()
with torch.cuda.amp.autocast(enabled=enable_autocast):
if criterion:
y = model(data)
loss = criterion(y, label)
else:
loss = model(data, label)
loss = loss.float()
if isinstance(model, ShardedModelV2):
model.backward(loss)
else:
loss.backward()
def checkpoint_wrapper(module, enable=True):
if enable:
module.forward = partial(checkpoint, module.forward)
return module
def allclose(tensor_a: torch.Tensor, tensor_b: torch.Tensor, loose=False) -> bool:
if loose:
return torch.allclose(tensor_a, tensor_b, atol=1e-2, rtol=1e-3)
return torch.allclose(tensor_a, tensor_b)
def check_grads(model, zero_model, loose=False):
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_grad = zero_p.grad.clone().to(p.device)
grad = p.grad.float()
assert grad.dtype == zero_grad.dtype
assert allclose(grad, zero_grad, loose=loose)
def check_params(model, zero_model, loose=False):
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_p = zero_p.clone().to(p.device)
assert p.dtype == zero_p.dtype
assert allclose(p, zero_p, loose=loose)
def check_grads_padding(model, zero_model, loose=False):
rank = dist.get_rank()
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_grad = zero_p.grad.clone().to(p.device)
chunks = torch.flatten(p.grad).chunk(dist.get_world_size())
if rank >= len(chunks):
continue
grad = chunks[rank].float()
if zero_grad.size(0) > grad.size(0):
zero_grad = zero_grad[:grad.size(0)]
assert grad.dtype == zero_grad.dtype
assert allclose(grad, zero_grad, loose=loose), f'diff: {grad - zero_grad}'
def check_params_padding(model, zero_model, loose=False):
rank = dist.get_rank()
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_p = zero_p.clone().to(p.device)
chunks = torch.flatten(p).chunk(dist.get_world_size())
if rank >= len(chunks):
continue
p = chunks[rank]
if zero_p.size(0) > p.size(0):
zero_p = zero_p[:p.size(0)]
assert p.dtype == zero_p.dtype
assert allclose(p, zero_p, loose=loose)
def check_sharded_params_padding(model, zero_model, loose=False):
rank = dist.get_rank()
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_p = zero_p.col_attr.data.payload.to(p.device).float()
chunks = torch.flatten(p).chunk(dist.get_world_size())
if rank >= len(chunks):
continue
p = chunks[rank].float()
if zero_p.size(0) > p.size(0):
zero_p = zero_p[:p.size(0)]
assert p.dtype == zero_p.dtype
assert allclose(p, zero_p, loose=loose), f'{p} vs {zero_p}'
| [
"colossalai.logging.get_dist_logger"
] | [((254, 271), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (269, 271), False, 'from colossalai.logging import get_dist_logger\n'), ((1664, 1698), 'torch.allclose', 'torch.allclose', (['tensor_a', 'tensor_b'], {}), '(tensor_a, tensor_b)\n', (1678, 1698), False, 'import torch\n'), ((2328, 2343), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2341, 2343), True, 'import torch.distributed as dist\n'), ((2918, 2933), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2931, 2933), True, 'import torch.distributed as dist\n'), ((3436, 3451), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (3449, 3451), True, 'import torch.distributed as dist\n'), ((1017, 1065), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': 'enable_autocast'}), '(enabled=enable_autocast)\n', (1040, 1065), False, 'import torch\n'), ((1428, 1463), 'functools.partial', 'partial', (['checkpoint', 'module.forward'], {}), '(checkpoint, module.forward)\n', (1435, 1463), False, 'from functools import partial\n'), ((1596, 1653), 'torch.allclose', 'torch.allclose', (['tensor_a', 'tensor_b'], {'atol': '(0.01)', 'rtol': '(0.001)'}), '(tensor_a, tensor_b, atol=0.01, rtol=0.001)\n', (1610, 1653), False, 'import torch\n'), ((2513, 2534), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2532, 2534), True, 'import torch.distributed as dist\n'), ((3090, 3111), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3109, 3111), True, 'import torch.distributed as dist\n'), ((3630, 3651), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3649, 3651), True, 'import torch.distributed as dist\n'), ((2485, 2506), 'torch.flatten', 'torch.flatten', (['p.grad'], {}), '(p.grad)\n', (2498, 2506), False, 'import torch\n'), ((3067, 3083), 'torch.flatten', 'torch.flatten', (['p'], {}), '(p)\n', (3080, 3083), False, 'import torch\n'), ((3607, 3623), 'torch.flatten', 'torch.flatten', (['p'], {}), '(p)\n', (3620, 3623), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from .amp_type import AMP_TYPE
from colossalai.context import Config
import torch.nn as nn
from torch.optim import Optimizer
from torch.nn.modules.loss import _Loss
from .torch_amp import convert_to_torch_amp
from .apex_amp import convert_to_apex_amp
from .naive_amp import convert_to_naive_amp
def convert_to_amp(model: nn.Module, optimizer: Optimizer, criterion: _Loss, mode: AMP_TYPE, amp_config: Config = None):
"""A helper function to wrap training components with Torch AMP modules
:param model: your model object
:type model: :class:`torch.nn.Module`
:param optimizer: your optimizer object
:type optimizer: :class:`torch.optim.Optimizer`
:param criterion: your loss function object
:type criterion: :class:`torch.nn.modules.loss._Loss`
:param mode: amp mode
:type mode: :class:`colossalai.amp.AMP_TYPE`
:param amp_config: configuration for different amp modes
:type amp_config: :class:`colossalai.context.Config` or dict
:return: (model, optimizer, criterion)
:rtype: Tuple
"""
assert isinstance(mode, AMP_TYPE), \
f'expected the argument mode be AMP_TYPE, but got {type(mode)}'
if amp_config is None:
amp_config = Config()
if mode == AMP_TYPE.TORCH:
model, optimizer, criterion = convert_to_torch_amp(model, optimizer, criterion, amp_config)
elif mode == AMP_TYPE.APEX:
model, optimizer = convert_to_apex_amp(model, optimizer, amp_config)
elif mode == AMP_TYPE.NAIVE:
model, optimizer = convert_to_naive_amp(model, optimizer, amp_config)
return model, optimizer, criterion
| [
"colossalai.context.Config"
] | [((1257, 1265), 'colossalai.context.Config', 'Config', ([], {}), '()\n', (1263, 1265), False, 'from colossalai.context import Config\n')] |
from typing import Optional
from colossalai.utils import get_current_device
from torch import nn
from ... import init as init
from ..parallel_1d import *
from ..parallel_2d import *
from ..parallel_2p5d import *
from ..parallel_3d import *
from ..utils import get_tensor_parallel_mode
from ..vanilla import *
_parallel_layernorm = {'2d': LayerNorm2D, '2.5d': LayerNorm2p5D, '3d': LayerNorm3D}
class LayerNorm(nn.Module):
r"""
Layer Normalization for colossalai
:param normalized_shape: input shape from an expected input
of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]`
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
:type normalized_shape: int
:param eps: a value added to the denominator for numerical stability, defaults to 1e-05
:type eps: float, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
"""
def __init__(self, normalized_shape: int, eps=1e-05, dtype=None) -> None:
super().__init__()
tensor_parallel = get_tensor_parallel_mode()
if tensor_parallel in ['None', '1d']:
self.norm = nn.LayerNorm(normalized_shape, eps=eps, device=get_current_device(), dtype=dtype)
else:
self.norm = _parallel_layernorm[tensor_parallel](normalized_shape, eps=eps, dtype=dtype)
@property
def weight(self):
return self.norm.weight
@property
def bias(self):
return self.norm.bias
def forward(self, *args):
return self.norm(*args)
| [
"colossalai.utils.get_current_device"
] | [((1438, 1458), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1456, 1458), False, 'from colossalai.utils import get_current_device\n')] |
from colossalai.utils.memory_tracer.memstats_collector import MemStatsCollector
from colossalai.utils.memory_tracer.model_data_memtracer import ModelDataTracer
import torch
def test_mem_collector():
collector = MemStatsCollector()
collector.start_collection()
a = torch.randn(10).cuda()
# sampling at time 0
collector.sample_memstats()
m_a = torch.randn(10).cuda()
ModelDataTracer().add_tensor(m_a)
b = torch.randn(10).cuda()
# sampling at time 1
collector.sample_memstats()
a = b
# sampling at time 2
collector.sample_memstats()
collector.finish_collection()
collector.reset()
# do nothing after collection, just advance sampling cnter
collector.sample_memstats()
collector.sample_memstats()
cuda_use, overall_use = collector.fetch_memstats()
print(cuda_use, overall_use)
print(collector._model_data_cuda)
print(collector._overall_cuda)
if __name__ == '__main__':
test_mem_collector()
| [
"colossalai.utils.memory_tracer.model_data_memtracer.ModelDataTracer",
"colossalai.utils.memory_tracer.memstats_collector.MemStatsCollector"
] | [((217, 236), 'colossalai.utils.memory_tracer.memstats_collector.MemStatsCollector', 'MemStatsCollector', ([], {}), '()\n', (234, 236), False, 'from colossalai.utils.memory_tracer.memstats_collector import MemStatsCollector\n'), ((280, 295), 'torch.randn', 'torch.randn', (['(10)'], {}), '(10)\n', (291, 295), False, 'import torch\n'), ((372, 387), 'torch.randn', 'torch.randn', (['(10)'], {}), '(10)\n', (383, 387), False, 'import torch\n'), ((399, 416), 'colossalai.utils.memory_tracer.model_data_memtracer.ModelDataTracer', 'ModelDataTracer', ([], {}), '()\n', (414, 416), False, 'from colossalai.utils.memory_tracer.model_data_memtracer import ModelDataTracer\n'), ((441, 456), 'torch.randn', 'torch.randn', (['(10)'], {}), '(10)\n', (452, 456), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from typing import Union, List
from colossalai import engine
from colossalai.context.parallel_mode import ParallelMode
import torch
from torch import Tensor
from torch.utils.data import DataLoader
from tqdm import tqdm
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.engine.schedule import NonPipelineSchedule, BaseSchedule
from colossalai.logging import DistributedLogger
from colossalai.utils import MultiTimer
from colossalai.utils import is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage
from .hooks import BaseHook
class Trainer:
"""This a class tending for easy deployments of users' training and evaluation instead of
writing their own scripts. It is similar with ``ignite.engine`` and ``keras.engine``, but is
called `Trainer`.
:param engine: Engine responsible for the process function
:type engine: :class:`Engine`
:param schedule: Schedule responsible for forward and backward steps
:type schedule: :class:`BaseSchedule`, optional
:param timer: Timer used to monitor the whole training
:type timer: :class:`MultiTimer`, optional
:param logger: Logger used to record the whole training
:type logger: :class:`colossalai.logging.DistributedLogger`, optional
"""
def __init__(self,
engine: Engine,
schedule: BaseSchedule = None,
timer: MultiTimer = None,
logger: DistributedLogger = None):
# training-ralated params
self._engine = engine
self._max_epochs = 0
self._cur_epoch = 0
self._max_steps = 0
self._cur_step = 0
self._steps_per_epoch = 0
# misc params
self._logger = logger
self._verbose = logger is not None
# hooks can store states in this dict, and could be consumed by other hooks
self.states = dict()
# build hooks
self.hooks = list()
# multi-timer for time benchmarking
self._timer = timer
# set schedule which specifies the training iteration for the engine
if schedule is None:
schedule = NonPipelineSchedule()
if gpc.is_initialized(ParallelMode.PIPELINE) and gpc.get_world_size(ParallelMode.PIPELINE) > 1:
assert not isinstance(schedule, NonPipelineSchedule), \
'NonPipelineSchedule cannot be used for pipeline parallel training, please use PipelineSchedule instead.'
self._schedule = schedule
self._schedule.pre_processing(engine)
@property
def cur_epoch(self):
"""Returns the index of the current epoch.
"""
return self._cur_epoch
@cur_epoch.setter
def cur_epoch(self, epoch: int):
"""Set how many epochs have been processed.
"""
# allow setter for training resumption
self._cur_epoch = epoch
@property
def cur_step(self):
"""Returns how many iteration steps have been processed.
"""
return self._cur_step
@property
def max_epochs(self):
return self._max_epochs
@property
def max_steps(self):
return self._max_steps
@property
def steps_per_epoch(self):
return self._steps_per_epoch
@property
def engine(self):
return self._engine
@property
def schedule(self):
return self._schedule
def _set_current_step(self, epoch: int):
"""Sets current step number.
:param epoch: Step number to be set
:type epoch: int
"""
self._cur_step = epoch * self._steps_per_epoch
def _call_timer(self, action: str, item: str, *args, **kwargs) -> None:
"""Call timer funciton with a given timer name.
:param action: Function to be called on timer
:type action: str
:param item: Name of the timer
:type item: str
:param args: args used for action function
:param kwargs: kwargs used for action function
"""
if self._timer is not None:
getattr(self._timer, action)(item, *args, **kwargs)
def _reset_states(self) -> None:
"""Clear trainer states
"""
self.states = dict()
def _call_hooks(self, func, output=None):
"""Calls specific hooks in the current time point.
:param func: A string represents the time point
:param output: Output of the model after running a iteration or None in any other time points
:type func: str
:type output: optional
"""
# Only after iter hook will receive output
for hook in self.hooks:
if output is None:
getattr(hook, func)(self)
else:
getattr(hook, func)(self, *output)
@staticmethod
def _should_display_progress(display_progress: bool):
""" Only display progress on DP rank 0, TP rank 0 and PP last rank
"""
return display_progress and is_dp_rank_0() and is_tp_rank_0() and is_no_pp_or_last_stage()
def _train_epoch(self,
train_dataloader: DataLoader,
epoch: int = None,
display_progress: bool = False,
return_output_label: bool = True):
# set training state
self._engine.train()
data_iter = iter(train_dataloader)
progress = range(self._steps_per_epoch)
if display_progress:
if epoch is None:
progress = tqdm(progress, desc='[Train]')
else:
progress = tqdm(progress, desc=f'[Epoch {epoch} / Train]')
self._call_hooks('before_train_epoch')
self._call_timer(action='start', item='Train-epoch')
for i in progress:
self._call_hooks('before_train_iter')
self._call_timer(action='start', item='Train-step')
# run 1 training step
self.engine.zero_grad()
logits, label, loss = self.schedule.forward_backward_step(
self.engine, data_iter, forward_only=False, return_loss=True, return_output_label=return_output_label)
self.engine.step()
self._call_timer(action='stop', item='Train-step', keep_in_history=True)
self._call_hooks('after_train_iter', output=(logits, label, loss))
self._cur_step += 1
if display_progress:
if 'step_metrics' in self.states:
progress.set_postfix(**self.states['step_metrics'])
# stop when max iter is reached
if self._exceed_max_step():
break
self._call_timer(action='stop', item='Train-epoch', keep_in_history=True)
self._call_hooks('after_train_epoch')
self._call_timer(action='reset', item='Train-epoch')
def _eval(self,
test_dataloader: DataLoader,
epoch: int = None,
display_progress: bool = False,
return_output_label: bool = True):
# switch engine status
self._engine.eval()
data_iter = iter(test_dataloader)
num_steps = len(test_dataloader)
self._call_hooks('before_test')
# prepare progress bar
progress = range(num_steps)
if display_progress:
desc = 'Evaluation'
if epoch is not None:
desc = '[Epoch %d / Test]' % epoch
progress = tqdm(progress, desc=desc)
self._call_hooks('before_test_epoch')
self._call_timer(action='start', item='Test-epoch')
with torch.no_grad():
for _ in progress:
self._call_hooks('before_test_iter')
self._call_timer(action='start', item='Test-step')
logits, label, loss = self.schedule.forward_backward_step(
self.engine, data_iter, forward_only=True, return_loss=True, return_output_label=return_output_label)
self._call_timer(action='stop', item='Test-step', keep_in_history=True)
self._call_hooks('after_test_iter',
output=(logits, label, loss))
if display_progress:
if 'step_metrics' in self.states:
progress.set_postfix(**self.states['step_metrics'])
self._call_timer(action='stop', item='Test-epoch', keep_in_history=True)
self._call_hooks('after_test_epoch')
self._call_hooks('after_test')
self._call_timer(action='reset', item='Test-step')
self._call_timer(action='reset', item='Test-epoch')
def _exceed_max_step(self):
return self._max_steps is not None and self._cur_step >= self._max_steps
def fit(self,
train_dataloader: DataLoader,
epochs: int,
max_steps: int = None,
test_dataloader: DataLoader = None,
test_interval: int = 1,
hooks: List[BaseHook] = None,
display_progress: bool = False,
return_output_label: bool = True,
):
"""Trains the model to fit training data.
:param train_dataloader: DataLoader in training
:param epochs: Maximum number of epoches
:param max_steps: Maximum number of running iterations
:param test_dataloader: DataLoader in testing
:param test_interval: Interval of testing
:param hooks: A list of hooks used in training
:param display_progress: If True, the training progress will be printed
:param return_output_label: If True, the output of model and the label will be returned
:type train_dataloader: DataLoader
:type epochs: int
:type max_steps: int, optional
:type test_dataloader: DataLoader, optional
:type test_interval: int, optional
:type hooks: list, optional
:type display_progress: bool, optional
:type return_output_label: bool, optional
"""
# set epochs and steps, consider gradient accumulation
self._steps_per_epoch = len(train_dataloader)
self._max_steps = max_steps
self._max_epochs = epochs
# check if testing is required
should_test = False
if test_dataloader is not None:
should_test = True
display_progress = self._should_display_progress(display_progress)
# reset hooks
self._reset_states()
if hooks is not None:
assert isinstance(hooks, list), f'expected argument hooks be to list, but got {type(hooks)}'
else:
hooks = []
self.hooks = hooks
self.hooks.sort(key=lambda hook: hook.priority)
if self._verbose:
for hook in self.hooks:
self._logger.info(
f'Using {hook.__class__.__name__} for training, priority = {hook.priority}', ranks=[0])
self._logger.info("Lower value means higher priority for calling hook function", ranks=[0])
self._call_hooks('after_hook_is_attached')
# start train
self._engine.train()
self._call_hooks('before_train')
# recover step value if resuming training
last_epoch = self._cur_epoch
if self.cur_epoch != 0:
self._set_current_step(last_epoch)
for epoch in range(last_epoch, epochs):
# train for one epoch
self._train_epoch(
train_dataloader=train_dataloader,
epoch=epoch,
display_progress=display_progress,
return_output_label=return_output_label
)
# start eval
if should_test and epoch % test_interval == 0:
self._eval(test_dataloader=test_dataloader,
display_progress=display_progress,
epoch=epoch,
return_output_label=return_output_label
)
self._cur_epoch += 1
# check for termination
if self._exceed_max_step():
self._logger.info(
f"Max number of steps {max_steps} has been reached, training is stopped automatically",
ranks=[0])
break
self._call_hooks('after_train')
self._call_timer('reset', 'Train-epoch')
def evaluate(self,
test_dataloader: DataLoader,
hooks: List[BaseHook] = None,
display_progress: bool = False,
return_output_label: bool = True):
"""Evaluates the model with testing data.
:param test_dataloader: DataLoader in testing
:param hooks: A list of hooks used in evaluation
:param display_progress: If True, the evaluation progress will be printed
:param return_output_label: If True, the output of model and the label will be returned
:type test_dataloader: DataLoader
:type hooks: list, optional
:type display_progress: bool, optional
:type return_output_label: bool
"""
# set display
display_progress = self._should_display_progress(display_progress)
# reset hooks
self._reset_states()
if hooks is not None:
assert isinstance(hooks, list), f'expected argument hooks be to list, but got {type(hooks)}'
else:
hooks = []
self.hooks = hooks
self.hooks.sort(key=lambda hook: hook.priority)
if self._verbose:
for hook in self.hooks:
self._logger.info(
f'Using {hook.__class__.__name__} for training, priority = {hook.priority}', ranks=[0])
self._logger.info("Lower value means higher priority for calling hook function", ranks=[0])
self._call_hooks('after_hook_is_attached')
# eval
self._eval(test_dataloader=test_dataloader,
display_progress=display_progress,
return_output_label=return_output_label
)
def predict(self, data: Union[Tensor, List[Tensor]]):
"""Uses trained model to make a prediction for a tensor or a tensor list.
:param data: Data as the input
:type data: Union[Tensor, List[Tensor]
:return: The output of model as the prediction
:rtype: Tensor
"""
# predict without labels
if isinstance(data, (list, tuple)):
assert isinstance(data[0], Tensor)
else:
assert isinstance(data, Tensor)
self._engine.eval()
# prepare a list of (data, label) to make it iterable
# for compatibility with schedule
simple_dataloader = [(data, None)]
data_iter = iter(simple_dataloader)
output, _, _ = self.schedule.forward_backward_step(
self.engine, data_iter, forward_only=True, return_loss=False)
return output
| [
"colossalai.utils.is_no_pp_or_last_stage",
"colossalai.engine.schedule.NonPipelineSchedule",
"colossalai.utils.is_tp_rank_0",
"colossalai.utils.is_dp_rank_0",
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.is_initialized"
] | [((2204, 2225), 'colossalai.engine.schedule.NonPipelineSchedule', 'NonPipelineSchedule', ([], {}), '()\n', (2223, 2225), False, 'from colossalai.engine.schedule import NonPipelineSchedule, BaseSchedule\n'), ((2237, 2278), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (2255, 2278), True, 'from colossalai.core import global_context as gpc\n'), ((5028, 5042), 'colossalai.utils.is_dp_rank_0', 'is_dp_rank_0', ([], {}), '()\n', (5040, 5042), False, 'from colossalai.utils import is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((5047, 5061), 'colossalai.utils.is_tp_rank_0', 'is_tp_rank_0', ([], {}), '()\n', (5059, 5061), False, 'from colossalai.utils import is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((5066, 5090), 'colossalai.utils.is_no_pp_or_last_stage', 'is_no_pp_or_last_stage', ([], {}), '()\n', (5088, 5090), False, 'from colossalai.utils import is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage\n'), ((7482, 7507), 'tqdm.tqdm', 'tqdm', (['progress'], {'desc': 'desc'}), '(progress, desc=desc)\n', (7486, 7507), False, 'from tqdm import tqdm\n'), ((7628, 7643), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7641, 7643), False, 'import torch\n'), ((2283, 2324), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (2301, 2324), True, 'from colossalai.core import global_context as gpc\n'), ((5554, 5584), 'tqdm.tqdm', 'tqdm', (['progress'], {'desc': '"""[Train]"""'}), "(progress, desc='[Train]')\n", (5558, 5584), False, 'from tqdm import tqdm\n'), ((5630, 5677), 'tqdm.tqdm', 'tqdm', (['progress'], {'desc': 'f"""[Epoch {epoch} / Train]"""'}), "(progress, desc=f'[Epoch {epoch} / Train]')\n", (5634, 5677), False, 'from tqdm import tqdm\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
import torch.distributed as dist
from torch.distributed import ReduceOp
from torch import Tensor
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device
def all_gather(tensor: Tensor, dim: int, parallel_mode: ParallelMode, async_op: bool = False) -> Tensor:
"""Gathers all tensors from the parallel group and concatenates them in a
specific dimension.
:param tensor: Tensor to be gathered
:param dim: The dimension concatenating in
:param parallel_mode: Parallel group mode used in this communication
:type tensor: :class:`torch.Tensor`
:type dim: int
:type parallel_mode: :class:`colossalai.context.ParallelMode`
:return: The tensor generated by all-gather
:rtype: :class:`torch.Tensor`
"""
depth = gpc.get_world_size(parallel_mode)
if depth == 1:
out = [tensor]
work = None
else:
shape = list(tensor.shape)
shape[0], shape[dim] = shape[dim], shape[0]
shape[0] *= depth
out = torch.empty(shape, dtype=tensor.dtype, device=get_current_device())
temp = list(torch.chunk(out, depth, dim=0))
work = dist.all_gather(tensor_list=temp,
tensor=tensor.transpose(0, dim).contiguous(),
group=gpc.get_group(parallel_mode),
async_op=async_op)
out = torch.transpose(out, 0, dim)
if async_op:
return out, work
else:
return out
def reduce_scatter(tensor: Tensor,
dim: int,
parallel_mode: ParallelMode,
op: ReduceOp = ReduceOp.SUM,
async_op: bool = False) -> Tensor:
"""Reduces all tensors then scatters it in a specific dimension to all
members in the parallel group.
:param tensor: Tensor to be reduced and scattered
:param dim: The dimension scattering in
:param parallel_mode: Parallel group mode used in this communication
:type tensor: :class:`torch.Tensor`
:type dim: int
:type parallel_mode: :class:`colossalai.context.ParallelMode`
:return: The tensor generated by reduce-scatter
:rtype: :class:`Tensor`
"""
depth = gpc.get_world_size(parallel_mode)
if depth == 1:
out = tensor
work = None
else:
temp = list(map(lambda x: x.contiguous(), torch.chunk(tensor, depth, dim=dim)))
out = torch.empty(temp[0].shape, dtype=tensor.dtype, device=get_current_device())
work = dist.reduce_scatter(output=out,
input_list=temp,
op=op,
group=gpc.get_group(parallel_mode),
async_op=async_op)
if async_op:
return out, work
else:
return out
def all_reduce(tensor: Tensor,
parallel_mode: ParallelMode,
op: ReduceOp = ReduceOp.SUM,
async_op: bool = False) -> Tensor:
depth = gpc.get_world_size(parallel_mode)
if depth == 1:
work = None
else:
work = dist.all_reduce(tensor.contiguous(), op=op, group=gpc.get_group(parallel_mode), async_op=async_op)
if async_op:
return tensor, work
else:
return tensor
def broadcast(tensor: Tensor, src: int, parallel_mode: ParallelMode, async_op: bool = False):
depth = gpc.get_world_size(parallel_mode)
if depth == 1:
work = None
else:
work = dist.broadcast(tensor.contiguous(), src=src, group=gpc.get_group(parallel_mode), async_op=async_op)
if async_op:
return tensor, work
else:
return tensor
def reduce(tensor: Tensor, dst: int, parallel_mode: ParallelMode, op: ReduceOp = ReduceOp.SUM, async_op: bool = False):
depth = gpc.get_world_size(parallel_mode)
if depth == 1:
work = None
else:
work = dist.reduce(tensor.contiguous(), dst=dst, op=op, group=gpc.get_group(parallel_mode), async_op=async_op)
if async_op:
return tensor, work
else:
return tensor
| [
"colossalai.core.global_context.get_group",
"colossalai.utils.get_current_device",
"colossalai.core.global_context.get_world_size"
] | [((905, 938), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['parallel_mode'], {}), '(parallel_mode)\n', (923, 938), True, 'from colossalai.core import global_context as gpc\n'), ((2343, 2376), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['parallel_mode'], {}), '(parallel_mode)\n', (2361, 2376), True, 'from colossalai.core import global_context as gpc\n'), ((3145, 3178), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['parallel_mode'], {}), '(parallel_mode)\n', (3163, 3178), True, 'from colossalai.core import global_context as gpc\n'), ((3527, 3560), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['parallel_mode'], {}), '(parallel_mode)\n', (3545, 3560), True, 'from colossalai.core import global_context as gpc\n'), ((3936, 3969), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['parallel_mode'], {}), '(parallel_mode)\n', (3954, 3969), True, 'from colossalai.core import global_context as gpc\n'), ((1515, 1543), 'torch.transpose', 'torch.transpose', (['out', '(0)', 'dim'], {}), '(out, 0, dim)\n', (1530, 1543), False, 'import torch\n'), ((1226, 1256), 'torch.chunk', 'torch.chunk', (['out', 'depth'], {'dim': '(0)'}), '(out, depth, dim=0)\n', (1237, 1256), False, 'import torch\n'), ((1184, 1204), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1202, 1204), False, 'from colossalai.utils import get_current_device\n'), ((1421, 1449), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (1434, 1449), True, 'from colossalai.core import global_context as gpc\n'), ((2497, 2532), 'torch.chunk', 'torch.chunk', (['tensor', 'depth'], {'dim': 'dim'}), '(tensor, depth, dim=dim)\n', (2508, 2532), False, 'import torch\n'), ((2603, 2623), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (2621, 2623), False, 'from colossalai.utils import get_current_device\n'), ((2807, 2835), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (2820, 2835), True, 'from colossalai.core import global_context as gpc\n'), ((3293, 3321), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (3306, 3321), True, 'from colossalai.core import global_context as gpc\n'), ((3676, 3704), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (3689, 3704), True, 'from colossalai.core import global_context as gpc\n'), ((4089, 4117), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (4102, 4117), True, 'from colossalai.core import global_context as gpc\n')] |
import torch
import torch.distributed as dist
from colossalai.initialize import parse_args
from colossalai.utils import get_current_device
ARGS = parse_args()
size = ARGS.world_size
rank = ARGS.local_rank
init_method = f'tcp://{ARGS.host}:{ARGS.port}'
dist.init_process_group(backend='nccl', rank=rank, world_size=size, init_method=init_method)
print('Rank {} / {}'.format(dist.get_rank(), dist.get_world_size()))
SIZE = 8
tensor = torch.randn(SIZE)
tensor = tensor.to(get_current_device())
dist.all_reduce(tensor)
print('Rank {0}: {1}'.format(rank, tensor.detach().cpu().numpy().tolist()))
| [
"colossalai.initialize.parse_args",
"colossalai.utils.get_current_device"
] | [((148, 160), 'colossalai.initialize.parse_args', 'parse_args', ([], {}), '()\n', (158, 160), False, 'from colossalai.initialize import parse_args\n'), ((255, 351), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""nccl"""', 'rank': 'rank', 'world_size': 'size', 'init_method': 'init_method'}), "(backend='nccl', rank=rank, world_size=size,\n init_method=init_method)\n", (278, 351), True, 'import torch.distributed as dist\n'), ((436, 453), 'torch.randn', 'torch.randn', (['SIZE'], {}), '(SIZE)\n', (447, 453), False, 'import torch\n'), ((495, 518), 'torch.distributed.all_reduce', 'dist.all_reduce', (['tensor'], {}), '(tensor)\n', (510, 518), True, 'import torch.distributed as dist\n'), ((473, 493), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (491, 493), False, 'from colossalai.utils import get_current_device\n'), ((376, 391), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (389, 391), True, 'import torch.distributed as dist\n'), ((393, 414), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (412, 414), True, 'import torch.distributed as dist\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import copy
from functools import partial
import colossalai
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.nn.optimizer import CPUAdam
from colossalai.utils import free_port
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
from colossalai.zero.sharded_model import ShardedModelV2
from colossalai.zero.sharded_optim import ShardedOptimizerV2
from tests.components_to_test.registry import non_distributed_component_funcs
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import Adam
from common import CONFIG, check_sharded_params_padding
def run_step(model, optimizer, data, label, criterion, enable_autocast=False):
model.train()
optimizer.zero_grad()
with torch.cuda.amp.autocast(enabled=enable_autocast):
y = model(data)
loss = criterion(y, label)
loss = loss.float()
if isinstance(model, ShardedModelV2):
optimizer.backward(loss)
else:
loss.backward()
optimizer.step()
def run_step_no_criterion(model, optimizer, data, label, enable_autocast=False):
model.train()
optimizer.zero_grad()
with torch.cuda.amp.autocast(enabled=enable_autocast):
loss = model(data, label)
if isinstance(model, ShardedModelV2):
optimizer.backward(loss)
else:
loss.backward()
optimizer.step()
def run_dist(rank, world_size, port, shard_strategy):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
test_models = ['repeated_computed_layers', 'resnet18', 'bert']
shard_strategy = shard_strategy()
for model_name in test_models:
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model, train_dataloader, test_dataloader, optimizer, criterion = get_components_func()
model = model(checkpoint=True).cuda()
zero_model = ShardedModelV2(copy.deepcopy(model), shard_strategy, offload_config={'device': 'cpu'})
if dist.get_world_size() > 1:
model = DDP(model)
optim = Adam(model.parameters(), lr=1e-3)
sharded_optim = ShardedOptimizerV2(zero_model, CPUAdam, initial_scale=2**5, cpu_offload=True, lr=1e-3)
for i, (data, label) in enumerate(train_dataloader):
if i > 2:
break
data, label = data.cuda(), label.cuda()
if criterion is None:
run_step_no_criterion(model, optim, data, label, False)
run_step_no_criterion(zero_model, sharded_optim, data, label, False)
else:
run_step(model, optim, data, label, criterion, False)
run_step(zero_model, sharded_optim, data, label, criterion, False)
check_sharded_params_padding(model, zero_model, loose=True)
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
@pytest.mark.parametrize("shard_strategy", [TensorShardStrategy, BucketTensorShardStrategy])
def test_sharded_optim_v2(world_size, shard_strategy):
run_func = partial(run_dist, world_size=world_size, port=free_port(), shard_strategy=shard_strategy)
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_sharded_optim_v2(world_size=2, shard_strategy=TensorShardStrategy)
| [
"colossalai.launch",
"colossalai.utils.free_port",
"colossalai.zero.sharded_optim.ShardedOptimizerV2"
] | [((2957, 3002), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 2]'], {}), "('world_size', [1, 2])\n", (2980, 3002), False, 'import pytest\n'), ((3004, 3099), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shard_strategy"""', '[TensorShardStrategy, BucketTensorShardStrategy]'], {}), "('shard_strategy', [TensorShardStrategy,\n BucketTensorShardStrategy])\n", (3027, 3099), False, 'import pytest\n'), ((1527, 1643), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (1544, 1643), False, 'import colossalai\n'), ((3260, 3297), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (3268, 3297), True, 'import torch.multiprocessing as mp\n'), ((854, 902), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': 'enable_autocast'}), '(enabled=enable_autocast)\n', (877, 902), False, 'import torch\n'), ((1253, 1301), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': 'enable_autocast'}), '(enabled=enable_autocast)\n', (1276, 1301), False, 'import torch\n'), ((1809, 1865), 'tests.components_to_test.registry.non_distributed_component_funcs.get_callable', 'non_distributed_component_funcs.get_callable', (['model_name'], {}), '(model_name)\n', (1853, 1865), False, 'from tests.components_to_test.registry import non_distributed_component_funcs\n'), ((2258, 2352), 'colossalai.zero.sharded_optim.ShardedOptimizerV2', 'ShardedOptimizerV2', (['zero_model', 'CPUAdam'], {'initial_scale': '(2 ** 5)', 'cpu_offload': '(True)', 'lr': '(0.001)'}), '(zero_model, CPUAdam, initial_scale=2 ** 5, cpu_offload=\n True, lr=0.001)\n', (2276, 2352), False, 'from colossalai.zero.sharded_optim import ShardedOptimizerV2\n'), ((2043, 2063), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (2056, 2063), False, 'import copy\n'), ((2126, 2147), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2145, 2147), True, 'import torch.distributed as dist\n'), ((2173, 2183), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['model'], {}), '(model)\n', (2176, 2183), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((2876, 2935), 'common.check_sharded_params_padding', 'check_sharded_params_padding', (['model', 'zero_model'], {'loose': '(True)'}), '(model, zero_model, loose=True)\n', (2904, 2935), False, 'from common import CONFIG, check_sharded_params_padding\n'), ((3212, 3223), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (3221, 3223), False, 'from colossalai.utils import free_port\n')] |
import pytest
import colossalai
from colossalai.context.parallel_mode import ParallelMode
import torch.multiprocessing as mp
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_port
from colossalai.utils import ColoInitContext
from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, distspec, ColoParameter, ChunkManager
from colossalai.core import global_context as gpc
from functools import partial
from _utils import tensor_equal, tensor_shard_equal, set_seed
from tests.components_to_test.registry import non_distributed_component_funcs
from torch.nn.parallel import DistributedDataParallel as DDP
from colossalai.nn.parallel import ColoDDP, ColoDDPV2
from colossalai.testing import parameterize
def check_param_equal(model, torch_model):
for p, torch_p in zip(model.parameters(), torch_model.parameters()):
if p.storage().size() > 0:
assert tensor_equal(torch_p, p.float()), f'{torch_p} vs {p}'
def check_grad_equal(model, torch_model):
for p, torch_p in zip(model.parameters(), torch_model.parameters()):
if p.grad is not None:
assert tensor_equal(torch_p.grad, p.grad.float())
@parameterize('use_chunk', [False, True])
@parameterize('use_zero', [False, True])
def run_gpt(use_chunk, use_zero):
set_seed(42)
get_components_func = non_distributed_component_funcs.get_callable('gpt2')
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
with ColoInitContext(device=get_current_device()):
model = model_builder(checkpoint=True)
model = model.cuda()
torch_model = model_builder().cuda()
for torch_p, p in zip(torch_model.parameters(), model.parameters()):
torch_p.data.copy_(p)
model = model.half()
chunk_size = 38 * 1024**2 if use_chunk else None
chunk_manager = ChunkManager(chunk_size, enable_distributed_storage=use_zero)
model = ColoDDPV2(model, chunk_manager)
torch_model = DDP(torch_model, device_ids=[gpc.get_global_rank()], process_group=gpc.get_group(ParallelMode.DATA))
print(chunk_manager)
check_param_equal(model, torch_model)
model.train()
torch_model.train()
set_seed(gpc.get_local_rank(ParallelMode.DATA))
for i, (input_ids, attn_mask) in enumerate(train_dataloader):
logits = model(input_ids, attn_mask)
torch_logits = torch_model(input_ids, attn_mask)
assert tensor_equal(torch_logits, logits.float())
loss = criterion(logits, input_ids)
torch_loss = criterion(torch_logits, input_ids)
model.backward(loss)
torch_loss.backward()
check_grad_equal(model, torch_model)
break
def run_dist(rank, world_size, port):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_gpt()
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 4])
@rerun_if_address_is_in_use()
def test_gpt(world_size):
run_func = partial(run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_gpt(4)
| [
"colossalai.testing.rerun_if_address_is_in_use",
"colossalai.testing.parameterize",
"colossalai.launch",
"colossalai.nn.parallel.ColoDDPV2",
"colossalai.utils.free_port",
"colossalai.utils.cuda.get_current_device",
"colossalai.tensor.ChunkManager",
"colossalai.core.global_context.get_global_rank",
"... | [((1267, 1307), 'colossalai.testing.parameterize', 'parameterize', (['"""use_chunk"""', '[False, True]'], {}), "('use_chunk', [False, True])\n", (1279, 1307), False, 'from colossalai.testing import parameterize\n'), ((1309, 1348), 'colossalai.testing.parameterize', 'parameterize', (['"""use_zero"""', '[False, True]'], {}), "('use_zero', [False, True])\n", (1321, 1348), False, 'from colossalai.testing import parameterize\n'), ((2971, 3016), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 4]'], {}), "('world_size', [1, 4])\n", (2994, 3016), False, 'import pytest\n'), ((3018, 3046), 'colossalai.testing.rerun_if_address_is_in_use', 'rerun_if_address_is_in_use', ([], {}), '()\n', (3044, 3046), False, 'from colossalai.testing import rerun_if_address_is_in_use\n'), ((1387, 1399), '_utils.set_seed', 'set_seed', (['(42)'], {}), '(42)\n', (1395, 1399), False, 'from _utils import tensor_equal, tensor_shard_equal, set_seed\n'), ((1426, 1478), 'tests.components_to_test.registry.non_distributed_component_funcs.get_callable', 'non_distributed_component_funcs.get_callable', (['"""gpt2"""'], {}), "('gpt2')\n", (1470, 1478), False, 'from tests.components_to_test.registry import non_distributed_component_funcs\n'), ((1954, 2015), 'colossalai.tensor.ChunkManager', 'ChunkManager', (['chunk_size'], {'enable_distributed_storage': 'use_zero'}), '(chunk_size, enable_distributed_storage=use_zero)\n', (1966, 2015), False, 'from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager, distspec, ColoParameter, ChunkManager\n'), ((2028, 2059), 'colossalai.nn.parallel.ColoDDPV2', 'ColoDDPV2', (['model', 'chunk_manager'], {}), '(model, chunk_manager)\n', (2037, 2059), False, 'from colossalai.nn.parallel import ColoDDP, ColoDDPV2\n'), ((2828, 2940), 'colossalai.launch', 'colossalai.launch', ([], {'config': '{}', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config={}, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (2845, 2940), False, 'import colossalai\n'), ((3151, 3188), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (3159, 3188), True, 'import torch.multiprocessing as mp\n'), ((2301, 2338), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (2319, 2338), True, 'from colossalai.core import global_context as gpc\n'), ((2145, 2177), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (2158, 2177), True, 'from colossalai.core import global_context as gpc\n'), ((3134, 3145), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (3143, 3145), False, 'from colossalai.utils import free_port\n'), ((1617, 1637), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (1635, 1637), False, 'from colossalai.utils.cuda import get_current_device\n'), ((2107, 2128), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (2126, 2128), True, 'from colossalai.core import global_context as gpc\n')] |
import os
from abc import ABC, abstractmethod
import torch
import torch.distributed as dist
from colossalai.communication import all_gather
from colossalai.constants import (INPUT_GROUP_3D, OUTPUT_GROUP_3D,
WEIGHT_GROUP_3D)
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.nn.layer._parallel_utilities import _gather
from colossalai.nn.layer.parallel_3d._utils import (get_last_group,
get_parallel_mode_from_env)
from colossalai.utils import get_current_device
class Metric(ABC):
"""A basic class of metric collectors. It collects a specific
metric during training or evaluation and it's always used with
:class:`MetricHook` to help it update its states and show the
metric. So please use corresponding hook class to make the metric
collector works.
:param epoch_only: Whether the metric only read for the full epoch
:type epoch_only: bool
"""
def __init__(self, epoch_only: bool):
# is the metric only read for the full epoch
self._epoch_only = epoch_only
@property
def epoch_only(self):
"""Returns :attr:`epoch_only`.
"""
return self._epoch_only
@abstractmethod
def reset(self) -> None:
"""Resets the metric to it's initial state.
By default, this is called at the start of each epoch.
"""
pass
@abstractmethod
def update(self, *args, **kwargs) -> None:
"""Updates the metric's state using the passed batch output.
By default, this is called once for each batch.
"""
pass
@abstractmethod
def get_last_step_value(self):
"""Returns the metric value in the last iteration.
"""
pass
@abstractmethod
def get_accumulated_value(self):
"""Computes the metric based on it's accumulated state.
By default, this is called at the end of each epoch.
:return: the actual quantity of interest
:rtype: Any
"""
pass
@staticmethod
@abstractmethod
def is_better(a, b) -> bool:
"""Compares a and b, and returns whether a is better than b
:return: The result of comparison
:rtype: bool
"""
pass
class Loss(Metric):
"""A metric collector for loss.
:param epoch_only: Whether the metric only read for the full epoch
:type epoch_only: bool
"""
def __init__(self, epoch_only):
super().__init__(epoch_only=epoch_only)
self.last_step_loss = torch.zeros(1, device=get_current_device())
self.accum_loss = torch.zeros(1, device=get_current_device())
self.count = 0
def reset(self) -> None:
"""Sets :attr:`last_step_loss` and :attr:`accum_loss` to zero.
"""
self.last_step_loss.zero_()
self.accum_loss.zero_()
self.count = 0
def update(self, loss) -> None:
"""Updates :attr:`last_step_loss` and :attr:`accum_loss` with current loss.
It expects the output has loss.
:param loss: Current loss of the output
"""
# expect output to be logits, label and loss
loss_ = loss.detach()
self.last_step_loss.copy_(loss_)
self.accum_loss.add_(loss_)
self.count += 1
def get_accumulated_value(self):
"""Returns accumulated loss.
"""
if gpc.is_initialized(ParallelMode.DATA):
dist.all_reduce(self.accum_loss,
op=dist.ReduceOp.SUM,
group=gpc.get_group(ParallelMode.DATA))
self.accum_loss.div_(gpc.get_world_size(ParallelMode.DATA))
self.accum_loss.div_(self.count)
return self.accum_loss.item()
def get_last_step_value(self):
"""Returns :attr:`last_step_loss`.
"""
return self.last_step_loss
def is_better(a, b):
return a < b
class LearningRate(Metric):
"""A metric collector for learning rate.
:param epoch_only: Whether the metric only read for the full epoch
:type epoch_only: bool
"""
def __init__(self, epoch_only: bool, initial_lr: float = 0.):
super().__init__(epoch_only=epoch_only)
self.lr = 0.
def reset(self) -> None:
pass
def update(self, lr) -> None:
self.lr = lr
def get_last_step_value(self):
return self.lr
def get_accumulated_value(self):
return self.lr
def is_better(a, b) -> bool:
pass
class Accuracy(Metric):
"""A metric collector for accuracy. It only works for classification
tasks.
:param epoch_only: Whether the metric only read for the full epoch
:type epoch_only: bool
"""
def __init__(self, epoch_only: bool):
super().__init__(epoch_only=epoch_only)
self.last_step_sum = torch.zeros(1, device=get_current_device())
self.last_step_correct = torch.zeros(1, device=get_current_device())
self.accumulated_sum = torch.zeros(1, device=get_current_device())
self.accumulated_correct = torch.zeros(1, device=get_current_device())
def reset(self) -> None:
self.last_step_sum.zero_()
self.last_step_correct.zero_()
self.accumulated_sum.zero_()
self.accumulated_correct.zero_()
def update(self, logits, label) -> None:
"""Updates last step accuracy and accumulated accuracy with current logits
and labels. It expects the output has logits and labels.
:param logits: The logits output of the model
:param label: The labels of the input data
"""
if isinstance(logits, (list, tuple)):
logits = logits[0]
if isinstance(label, (list, tuple)):
label = label[0]
# update
preds = torch.argmax(logits, dim=-1)
correct = torch.sum(label == preds)
self.last_step_sum.fill_(label.size(0))
self.last_step_correct.fill_(correct)
self.accumulated_sum += self.last_step_sum
self.accumulated_correct += self.last_step_correct
def get_last_step_value(self):
dist.all_reduce(self.last_step_sum,
group=gpc.get_group(ParallelMode.DATA))
dist.all_reduce(self.last_step_correct,
group=gpc.get_group(ParallelMode.DATA))
return (self.last_step_sum / self.last_step_correct).item()
def get_accumulated_value(self):
dist.all_reduce(self.accumulated_sum,
group=gpc.get_group(ParallelMode.DATA))
dist.all_reduce(self.accumulated_correct,
group=gpc.get_group(ParallelMode.DATA))
return (self.accumulated_correct / self.accumulated_sum).item()
def is_better(a, b) -> bool:
return a > b
class Accuracy2D(Accuracy):
"""A metric collector for accuracy. It only works for classification
tasks. This class is the same as :class:`Accuracy` but used in 2D
model parallelism.
:param epoch_only: Whether the metric only read for the full epoch
:type epoch_only: bool
"""
def __init__(self, epoch_only: bool):
super().__init__(epoch_only=epoch_only)
def update(self, logits, label) -> None:
if isinstance(logits, (list, tuple)):
logits = logits[0]
if isinstance(label, (list, tuple)):
label = label[0]
logits = _gather(logits, ParallelMode.PARALLEL_2D_ROW, 1)
logits = _gather(
logits,
ParallelMode.PARALLEL_2D_COL,
0,
)
# update
preds = torch.argmax(logits, dim=-1)
correct = torch.sum(label == preds)
self.last_step_sum.fill_(label.size(0))
self.last_step_correct.fill_(correct)
self.accumulated_sum += self.last_step_sum
self.accumulated_correct += self.last_step_correct
class Accuracy1D(Accuracy):
"""A metric collector for accuracy. It only works for classification
tasks. This class is the same as :class:`Accuracy` but used in 2D
model parallelism.
:param epoch_only: Whether the metric only read for the full epoch
:type epoch_only: bool
"""
def __init__(self, epoch_only: bool):
super().__init__(epoch_only=epoch_only)
def update(self, logits, label) -> None:
if isinstance(logits, (list, tuple)):
logits = logits[0]
if isinstance(label, (list, tuple)):
label = label[0]
logits = _gather(
logits,
ParallelMode.PARALLEL_1D,
1
)
# update
preds = torch.argmax(logits, dim=-1)
correct = torch.sum(label == preds)
self.last_step_sum.fill_(label.size(0))
self.last_step_correct.fill_(correct)
self.accumulated_sum += self.last_step_sum
self.accumulated_correct += self.last_step_correct
class Accuracy2p5D(Accuracy):
def __init__(self, epoch_only: bool):
super().__init__(epoch_only=epoch_only)
def update(self, logits, label) -> None:
if isinstance(logits, (list, tuple)):
logits = logits[0]
if isinstance(label, (list, tuple)):
label = label[0]
logits = _gather(logits, ParallelMode.PARALLEL_2P5D_ROW, 1)
logits = _gather(
logits,
ParallelMode.PARALLEL_2P5D_COL,
0,
)
logits = _gather(
logits,
ParallelMode.PARALLEL_2P5D_DEP,
0,
)
# update
preds = torch.argmax(logits, dim=-1)
correct = torch.sum(label == preds)
self.last_step_sum.fill_(label.size(0))
self.last_step_correct.fill_(correct)
self.accumulated_sum += self.last_step_sum
self.accumulated_correct += self.last_step_correct
def is_better(a, b) -> bool:
return a > b
class Accuracy3D(Accuracy):
"""A metric collector for accuracy. It only works for classification
tasks. This class is the same as :class:`Accuracy` but used in 3D
model parallelism.
:param input_parallel_mode: The parallel mode of the input, generally it should be `ParallelMode.PARALLEL_3D_OUTPUT`
:type input_parallel_mode: `ParallelMode`
:param weight_parallel_mode: The parallel mode of the weight, generally it should be `ParallelMode.PARALLEL_3D_WEIGHT`
:type weight_parallel_mode: `ParallelMode`
:param epoch_only: Whether the metric only read for the full epoch
:type epoch_only: bool
"""
def __init__(self, epoch_only):
# input_parallel_mode, weight_parallel_mode):
super().__init__(epoch_only=epoch_only)
self.depth = int(os.environ['DEPTH_3D'])
self.input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)
self.weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)
self.output_parallel_mode = get_last_group(self.input_parallel_mode,
self.weight_parallel_mode)
def update(self, logits, target):
if isinstance(logits, (list, tuple)):
logits = logits[0]
if isinstance(target, (list, tuple)):
target = target[0]
batch_size = target.size(0)
j = gpc.get_local_rank(self.input_parallel_mode)
i = gpc.get_local_rank(self.weight_parallel_mode)
target = torch.chunk(target, self.depth, dim=0)[i]
target = torch.chunk(target, self.depth, dim=0)[j]
logits = all_gather(logits, -1, self.output_parallel_mode)
logits = torch.cat(logits, dim=-1)
prediction = torch.argmax(logits, dim=-1)
correct = torch.sum(prediction == target)
dist.all_reduce(correct, group=gpc.get_group(self.input_parallel_mode))
dist.all_reduce(correct,
group=gpc.get_group(self.weight_parallel_mode))
self.last_step_sum.fill_(batch_size)
self.last_step_correct.fill_(correct)
self.accumulated_sum += self.last_step_sum
self.accumulated_correct += self.last_step_correct
| [
"colossalai.core.global_context.get_world_size",
"colossalai.communication.all_gather",
"colossalai.nn.layer._parallel_utilities._gather",
"colossalai.nn.layer.parallel_3d._utils.get_parallel_mode_from_env",
"colossalai.core.global_context.is_initialized",
"colossalai.utils.get_current_device",
"colossa... | [((3577, 3614), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (3595, 3614), True, 'from colossalai.core import global_context as gpc\n'), ((6054, 6082), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (6066, 6082), False, 'import torch\n'), ((6102, 6127), 'torch.sum', 'torch.sum', (['(label == preds)'], {}), '(label == preds)\n', (6111, 6127), False, 'import torch\n'), ((7693, 7741), 'colossalai.nn.layer._parallel_utilities._gather', '_gather', (['logits', 'ParallelMode.PARALLEL_2D_ROW', '(1)'], {}), '(logits, ParallelMode.PARALLEL_2D_ROW, 1)\n', (7700, 7741), False, 'from colossalai.nn.layer._parallel_utilities import _gather\n'), ((7760, 7808), 'colossalai.nn.layer._parallel_utilities._gather', '_gather', (['logits', 'ParallelMode.PARALLEL_2D_COL', '(0)'], {}), '(logits, ParallelMode.PARALLEL_2D_COL, 0)\n', (7767, 7808), False, 'from colossalai.nn.layer._parallel_utilities import _gather\n'), ((7895, 7923), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (7907, 7923), False, 'import torch\n'), ((7943, 7968), 'torch.sum', 'torch.sum', (['(label == preds)'], {}), '(label == preds)\n', (7952, 7968), False, 'import torch\n'), ((8806, 8850), 'colossalai.nn.layer._parallel_utilities._gather', '_gather', (['logits', 'ParallelMode.PARALLEL_1D', '(1)'], {}), '(logits, ParallelMode.PARALLEL_1D, 1)\n', (8813, 8850), False, 'from colossalai.nn.layer._parallel_utilities import _gather\n'), ((8938, 8966), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (8950, 8966), False, 'import torch\n'), ((8986, 9011), 'torch.sum', 'torch.sum', (['(label == preds)'], {}), '(label == preds)\n', (8995, 9011), False, 'import torch\n'), ((9570, 9620), 'colossalai.nn.layer._parallel_utilities._gather', '_gather', (['logits', 'ParallelMode.PARALLEL_2P5D_ROW', '(1)'], {}), '(logits, ParallelMode.PARALLEL_2P5D_ROW, 1)\n', (9577, 9620), False, 'from colossalai.nn.layer._parallel_utilities import _gather\n'), ((9639, 9689), 'colossalai.nn.layer._parallel_utilities._gather', '_gather', (['logits', 'ParallelMode.PARALLEL_2P5D_COL', '(0)'], {}), '(logits, ParallelMode.PARALLEL_2P5D_COL, 0)\n', (9646, 9689), False, 'from colossalai.nn.layer._parallel_utilities import _gather\n'), ((9759, 9809), 'colossalai.nn.layer._parallel_utilities._gather', '_gather', (['logits', 'ParallelMode.PARALLEL_2P5D_DEP', '(0)'], {}), '(logits, ParallelMode.PARALLEL_2P5D_DEP, 0)\n', (9766, 9809), False, 'from colossalai.nn.layer._parallel_utilities import _gather\n'), ((9896, 9924), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (9908, 9924), False, 'import torch\n'), ((9944, 9969), 'torch.sum', 'torch.sum', (['(label == preds)'], {}), '(label == preds)\n', (9953, 9969), False, 'import torch\n'), ((11119, 11161), 'colossalai.nn.layer.parallel_3d._utils.get_parallel_mode_from_env', 'get_parallel_mode_from_env', (['INPUT_GROUP_3D'], {}), '(INPUT_GROUP_3D)\n', (11145, 11161), False, 'from colossalai.nn.layer.parallel_3d._utils import get_last_group, get_parallel_mode_from_env\n'), ((11199, 11242), 'colossalai.nn.layer.parallel_3d._utils.get_parallel_mode_from_env', 'get_parallel_mode_from_env', (['WEIGHT_GROUP_3D'], {}), '(WEIGHT_GROUP_3D)\n', (11225, 11242), False, 'from colossalai.nn.layer.parallel_3d._utils import get_last_group, get_parallel_mode_from_env\n'), ((11280, 11347), 'colossalai.nn.layer.parallel_3d._utils.get_last_group', 'get_last_group', (['self.input_parallel_mode', 'self.weight_parallel_mode'], {}), '(self.input_parallel_mode, self.weight_parallel_mode)\n', (11294, 11347), False, 'from colossalai.nn.layer.parallel_3d._utils import get_last_group, get_parallel_mode_from_env\n'), ((11653, 11697), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['self.input_parallel_mode'], {}), '(self.input_parallel_mode)\n', (11671, 11697), True, 'from colossalai.core import global_context as gpc\n'), ((11711, 11756), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['self.weight_parallel_mode'], {}), '(self.weight_parallel_mode)\n', (11729, 11756), True, 'from colossalai.core import global_context as gpc\n'), ((11897, 11946), 'colossalai.communication.all_gather', 'all_gather', (['logits', '(-1)', 'self.output_parallel_mode'], {}), '(logits, -1, self.output_parallel_mode)\n', (11907, 11946), False, 'from colossalai.communication import all_gather\n'), ((11965, 11990), 'torch.cat', 'torch.cat', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (11974, 11990), False, 'import torch\n'), ((12013, 12041), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (12025, 12041), False, 'import torch\n'), ((12061, 12092), 'torch.sum', 'torch.sum', (['(prediction == target)'], {}), '(prediction == target)\n', (12070, 12092), False, 'import torch\n'), ((11775, 11813), 'torch.chunk', 'torch.chunk', (['target', 'self.depth'], {'dim': '(0)'}), '(target, self.depth, dim=0)\n', (11786, 11813), False, 'import torch\n'), ((11835, 11873), 'torch.chunk', 'torch.chunk', (['target', 'self.depth'], {'dim': '(0)'}), '(target, self.depth, dim=0)\n', (11846, 11873), False, 'import torch\n'), ((2728, 2748), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (2746, 2748), False, 'from colossalai.utils import get_current_device\n'), ((2799, 2819), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (2817, 2819), False, 'from colossalai.utils import get_current_device\n'), ((3816, 3853), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (3834, 3853), True, 'from colossalai.core import global_context as gpc\n'), ((5098, 5118), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (5116, 5118), False, 'from colossalai.utils import get_current_device\n'), ((5176, 5196), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (5194, 5196), False, 'from colossalai.utils import get_current_device\n'), ((5252, 5272), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (5270, 5272), False, 'from colossalai.utils import get_current_device\n'), ((5332, 5352), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (5350, 5352), False, 'from colossalai.utils import get_current_device\n'), ((6450, 6482), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (6463, 6482), True, 'from colossalai.core import global_context as gpc\n'), ((6564, 6596), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (6577, 6596), True, 'from colossalai.core import global_context as gpc\n'), ((6785, 6817), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (6798, 6817), True, 'from colossalai.core import global_context as gpc\n'), ((6901, 6933), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (6914, 6933), True, 'from colossalai.core import global_context as gpc\n'), ((12135, 12174), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['self.input_parallel_mode'], {}), '(self.input_parallel_mode)\n', (12148, 12174), True, 'from colossalai.core import global_context as gpc\n'), ((12241, 12281), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['self.weight_parallel_mode'], {}), '(self.weight_parallel_mode)\n', (12254, 12281), True, 'from colossalai.core import global_context as gpc\n'), ((3748, 3780), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (3761, 3780), True, 'from colossalai.core import global_context as gpc\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch import Tensor
from torch.nn.parameter import Parameter
from typing import Tuple
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.registry import LAYERS
from colossalai.utils import get_current_device
from .._common_utils import divide
from .._parallel_utilities import reduce_grad, reduce_input, gather_forward_split_backward, \
split_forward_gather_backward
from ..base_layer import ParallelLayer
class Linear1D_Col(ParallelLayer):
"""Linear layer with column parallelism.
The linear layer is defined as :math:`Y = XA + b`. A is parallelized along
its second dimension as :math:`A = [A_1, ..., A_p]`.
:param in_features: first dimension of matrix A.
:type in_features: int
:param output_size: second dimension of matrix A.
:type output_size: int
:param bias: If true, add bias, defaults to True
:type bias: bool, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param gather_output: If true, call all-gether on output and make Y avaiable
to all GPUs, otherwise, every GPU will have its output
which is :math:`Y_i = XA_i`, defaults to False
:type gather_output: bool, optional
"""
def __init__(self,
in_features: int,
output_size: int,
bias: bool = True,
dtype: torch.dtype = None,
gather_output: bool = False):
super().__init__()
# Keep input parameters
self.input_size = in_features
self.output_size = output_size
self.gather_output = gather_output
self.skip_bias_add = not bias
world_size = gpc.get_world_size(ParallelMode.PARALLEL_1D)
self.output_size_per_partition = divide(output_size, world_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.weight = Parameter(torch.empty(
self.output_size_per_partition, self.input_size,
**factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(
self.output_size_per_partition,
**factory_kwargs))
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter('bias', None)
def forward(self, input_: Tensor) -> Tuple[Tensor, Tensor]:
# Set up backprop all-reduce.
input_parallel = reduce_grad(input_, ParallelMode.PARALLEL_1D)
# Matrix multiply.
bias = self.bias if not self.skip_bias_add else None
output_parallel = F.linear(input_parallel, self.weight, bias)
if self.gather_output:
# All-gather across the partitions.
output = gather_forward_split_backward(
output_parallel, ParallelMode.PARALLEL_1D, dim=-1)
else:
output = output_parallel
if self.skip_bias_add:
return output, self.bias
else:
return output
@LAYERS.register_module
class Linear1D_Row(ParallelLayer):
""" Linear layer with row parallelism
:param in_features: size of each input sample
:type in_features: int
:param out_features: size of each output sample
:type out_features: int
:param bias: If set to ``False``, the layer will not learn an additive bias, defaults to True
:type bias: bool, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param parallel_input: If set to ``False``, it's assumed that the input is splitted, defaults to False
:type parallel_input: bool, optional
"""
def __init__(self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: torch.dtype = None,
parallel_input: bool = False
):
super().__init__()
# Keep input parameters
self.in_features = in_features
self.out_features = out_features
self.parallel_input = parallel_input
self.skip_bias_add = not bias
# Divide the weight matrix along the last dimension.
world_size = gpc.get_world_size(ParallelMode.PARALLEL_1D)
self.input_size_per_partition = divide(in_features, world_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.weight = Parameter(torch.empty(
self.out_features,
self.input_size_per_partition,
**factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(
self.out_features,
**factory_kwargs
))
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter('bias', None)
def reset_parameters(self) -> None:
init.xavier_normal_(self.weight)
def forward(self, input_: Tensor) -> Tensor:
# Set up backprop all-reduce.
if self.parallel_input:
input_ = input_
else:
input_ = split_forward_gather_backward(
input_, ParallelMode.PARALLEL_1D, dim=-1)
output_parallel = F.linear(input_, self.weight)
output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D)
if not self.skip_bias_add:
output = output + self.bias
return output
| [
"colossalai.core.global_context.get_world_size",
"colossalai.utils.get_current_device"
] | [((1930, 1974), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (1948, 1974), True, 'from colossalai.core import global_context as gpc\n'), ((2926, 2969), 'torch.nn.functional.linear', 'F.linear', (['input_parallel', 'self.weight', 'bias'], {}), '(input_parallel, self.weight, bias)\n', (2934, 2969), True, 'import torch.nn.functional as F\n'), ((4520, 4564), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (4538, 4564), True, 'from colossalai.core import global_context as gpc\n'), ((5289, 5321), 'torch.nn.init.xavier_normal_', 'init.xavier_normal_', (['self.weight'], {}), '(self.weight)\n', (5308, 5321), True, 'import torch.nn.init as init\n'), ((5621, 5650), 'torch.nn.functional.linear', 'F.linear', (['input_', 'self.weight'], {}), '(input_, self.weight)\n', (5629, 5650), True, 'import torch.nn.functional as F\n'), ((2136, 2156), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (2154, 2156), False, 'from colossalai.utils import get_current_device\n'), ((2206, 2284), 'torch.empty', 'torch.empty', (['self.output_size_per_partition', 'self.input_size'], {}), '(self.output_size_per_partition, self.input_size, **factory_kwargs)\n', (2217, 2284), False, 'import torch\n'), ((4725, 4745), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (4743, 4745), False, 'from colossalai.utils import get_current_device\n'), ((4795, 4874), 'torch.empty', 'torch.empty', (['self.out_features', 'self.input_size_per_partition'], {}), '(self.out_features, self.input_size_per_partition, **factory_kwargs)\n', (4806, 4874), False, 'import torch\n'), ((2363, 2424), 'torch.empty', 'torch.empty', (['self.output_size_per_partition'], {}), '(self.output_size_per_partition, **factory_kwargs)\n', (2374, 2424), False, 'import torch\n'), ((2522, 2537), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2535, 2537), False, 'import torch\n'), ((4965, 5013), 'torch.empty', 'torch.empty', (['self.out_features'], {}), '(self.out_features, **factory_kwargs)\n', (4976, 5013), False, 'import torch\n'), ((5125, 5140), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5138, 5140), False, 'import torch\n')] |
from cmath import e
import json
import os
import transformers
import torch
import numpy as np
from data import get_train_features, gen_tensor_dataset, convert_examples_to_features
from metrics import compute_metrics
from tqdm import trange, tqdm
from colossalai.core import global_context as gpc
from torch.utils.data import SequentialSampler, DataLoader
from colossalai.nn.optimizer import FusedAdam
from colossalai.nn.lr_scheduler import LinearWarmupLR
from colossalai.utils import get_dataloader
from transformers import BertForSequenceClassification
__all__ = [
'get_model', 'get_optimizer', 'get_lr_scheduler', 'get_train_dataloader', 'run_train', 'get_eval_dataloader',
'run_eval'
]
def get_model(config_file, num_labels, use_hf_pretrain=False):
config = transformers.BertConfig.from_json_file(config_file)
config.num_labels = num_labels
if use_hf_pretrain:
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', config=config)
else:
model = transformers.BertForSequenceClassification(config=config)
return model
def get_optimizer(model, lr):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.weight', 'LayerNorm.bias']
# configure the weight decay for layernorm and bias
optimizer_grouped_parameters = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01
},
{
'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay': 0.0
},
]
optimizer = FusedAdam(params=optimizer_grouped_parameters, lr=lr, bias_correction=False)
return optimizer
def get_lr_scheduler(optimizer, total_steps, warmup_ratio):
warmup_steps = int(total_steps * warmup_ratio)
lr_scheduler = LinearWarmupLR(optimizer, total_steps=total_steps, warmup_steps=warmup_steps)
return lr_scheduler
def get_train_dataloader(args, tokenizer, processor, logger):
# build dataset
train_features = get_train_features(
args.data_dir,
args.vocab_file,
args.max_seq_length,
args.do_lower_case,
tokenizer,
processor,
)
# build dataloader
train_data = gen_tensor_dataset(train_features)
train_dataloader = get_dataloader(dataset=train_data,
shuffle=True,
add_sampler=True,
batch_size=args.train_batch_size)
logger.info(f"Num examples = {len(train_features)}", ranks=[0])
return train_dataloader
def run_train(args, engine, train_dataloader, lr_scheduler, logger):
results = {}
# prep
global_step = 0
num_train_steps = 0
train_loss = 0
num_train_examples = 0
engine.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
train_loss, num_train_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
# stop if max steps exceeded
if args.max_steps > 0 and global_step > args.max_steps:
break
engine.zero_grad()
# forward
batch = tuple(t.cuda() for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
outputs = engine(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
logits = outputs['logits']
loss = engine.criterion(logits, label_ids)
engine.backward(loss)
# step
engine.step()
lr_scheduler.step()
train_loss += loss.item()
num_train_examples += input_ids.size(0)
num_train_steps += 1
global_step += 1
train_loss = train_loss / num_train_steps
results.update({
'global_step': global_step,
'train:loss': train_loss,
'train:num_samples_per_gpu': num_train_examples,
'train:num_steps': num_train_steps,
})
logger.info(results, ranks=[0])
model_to_save = engine.model
state_dict = model_to_save.state_dict()
if gpc.get_global_rank() == 0 and not args.skip_checkpoint:
torch.save(
{"model": state_dict},
args.output_dir.joinpath('glue_weights.pth'),
)
# look for the model config and save
while True:
if hasattr(model_to_save, 'config'):
with open(args.output_dir.joinpath('bert_config.json'), 'w') as f:
f.write(model_to_save.config.to_json_string())
break
else:
if hasattr(model_to_save, 'module'):
model_to_save = model_to_save.module
elif hasattr(model_to_save, 'model'):
model_to_save = model_to_save.model
else:
raise AttributeError(
f"Cannot find attribute config or inner module in {model_to_save.__class__.__name__}")
torch.distributed.barrier()
def get_eval_dataloader(args, tokenizer, processor, logger):
# get eval dataset
eval_examples = processor.get_dev_examples(args.data_dir)
eval_features, label_map = convert_examples_to_features(
eval_examples,
processor.get_labels(),
args.max_seq_length,
tokenizer,
)
logger.info(f"Num examples = {len(eval_examples)}", ranks=[0])
eval_data = gen_tensor_dataset(eval_features)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(
eval_data,
sampler=eval_sampler,
batch_size=args.eval_batch_size,
)
return eval_dataloader, eval_examples, label_map
def dump_predictions(path, label_map, preds, examples):
label_rmap = {label_idx: label for label, label_idx in label_map.items()}
predictions = {example.guid: label_rmap[preds[i]] for i, example in enumerate(examples)}
with open(path, "w") as writer:
json.dump(predictions, writer)
def run_eval(args, engine, eval_dataloader, eval_examples, num_labels, label_map, logger):
results = {}
engine.eval()
preds = None
out_label_ids = None
eval_loss = 0
nb_eval_steps, nb_eval_examples = 0, 0
cuda_events = [(torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True))
for _ in range(len(eval_dataloader))]
for i, batch_data in tqdm(
enumerate(eval_dataloader),
desc="Evaluating",
):
input_ids, input_mask, segment_ids, label_ids = batch_data
input_ids = input_ids.cuda()
input_mask = input_mask.cuda()
segment_ids = segment_ids.cuda()
label_ids = label_ids.cuda()
with torch.no_grad():
cuda_events[i][0].record()
outputs = engine(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
logits = outputs['logits']
cuda_events[i][1].record()
if args.eval:
eval_loss += engine.criterion(
logits.view(-1, num_labels),
label_ids.view(-1),
).mean().item()
nb_eval_steps += 1
nb_eval_examples += input_ids.size(0)
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = label_ids.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids,
label_ids.detach().cpu().numpy(),
axis=0,
)
torch.cuda.synchronize()
eval_latencies = [event_start.elapsed_time(event_end) for event_start, event_end in cuda_events]
eval_latencies = list(sorted(eval_latencies))
def infer_latency_sli(threshold):
index = int(len(eval_latencies) * threshold) - 1
index = min(max(index, 0), len(eval_latencies) - 1)
return eval_latencies[index]
eval_throughput = (args.eval_batch_size / (np.mean(eval_latencies) / 1000))
results.update({
'eval:num_samples_per_gpu': nb_eval_examples,
'eval:num_steps': nb_eval_steps,
'infer:latency(ms):50%': infer_latency_sli(0.5),
'infer:latency(ms):90%': infer_latency_sli(0.9),
'infer:latency(ms):95%': infer_latency_sli(0.95),
'infer:latency(ms):99%': infer_latency_sli(0.99),
'infer:latency(ms):100%': infer_latency_sli(1.0),
'infer:latency(ms):avg': np.mean(eval_latencies),
'infer:latency(ms):std': np.std(eval_latencies),
'infer:latency(ms):sum': np.sum(eval_latencies),
'infer:throughput(samples/s):avg': eval_throughput,
})
preds = np.argmax(preds, axis=1)
if args.predict:
dump_predictions(
os.path.join(args.output_dir, 'predictions.json'),
label_map,
preds,
eval_examples,
)
if args.eval:
results['eval:loss'] = eval_loss / nb_eval_steps
eval_result = compute_metrics(args.task_name, preds, out_label_ids)
results.update(eval_result)
logger.info(results, ranks=[0])
| [
"colossalai.nn.lr_scheduler.LinearWarmupLR",
"colossalai.nn.optimizer.FusedAdam",
"colossalai.utils.get_dataloader",
"colossalai.core.global_context.get_global_rank"
] | [((778, 829), 'transformers.BertConfig.from_json_file', 'transformers.BertConfig.from_json_file', (['config_file'], {}), '(config_file)\n', (816, 829), False, 'import transformers\n'), ((1644, 1720), 'colossalai.nn.optimizer.FusedAdam', 'FusedAdam', ([], {'params': 'optimizer_grouped_parameters', 'lr': 'lr', 'bias_correction': '(False)'}), '(params=optimizer_grouped_parameters, lr=lr, bias_correction=False)\n', (1653, 1720), False, 'from colossalai.nn.optimizer import FusedAdam\n'), ((1874, 1951), 'colossalai.nn.lr_scheduler.LinearWarmupLR', 'LinearWarmupLR', (['optimizer'], {'total_steps': 'total_steps', 'warmup_steps': 'warmup_steps'}), '(optimizer, total_steps=total_steps, warmup_steps=warmup_steps)\n', (1888, 1951), False, 'from colossalai.nn.lr_scheduler import LinearWarmupLR\n'), ((2081, 2198), 'data.get_train_features', 'get_train_features', (['args.data_dir', 'args.vocab_file', 'args.max_seq_length', 'args.do_lower_case', 'tokenizer', 'processor'], {}), '(args.data_dir, args.vocab_file, args.max_seq_length,\n args.do_lower_case, tokenizer, processor)\n', (2099, 2198), False, 'from data import get_train_features, gen_tensor_dataset, convert_examples_to_features\n'), ((2291, 2325), 'data.gen_tensor_dataset', 'gen_tensor_dataset', (['train_features'], {}), '(train_features)\n', (2309, 2325), False, 'from data import get_train_features, gen_tensor_dataset, convert_examples_to_features\n'), ((2349, 2453), 'colossalai.utils.get_dataloader', 'get_dataloader', ([], {'dataset': 'train_data', 'shuffle': '(True)', 'add_sampler': '(True)', 'batch_size': 'args.train_batch_size'}), '(dataset=train_data, shuffle=True, add_sampler=True,\n batch_size=args.train_batch_size)\n', (2363, 2453), False, 'from colossalai.utils import get_dataloader\n'), ((5080, 5107), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (5105, 5107), False, 'import torch\n'), ((5510, 5543), 'data.gen_tensor_dataset', 'gen_tensor_dataset', (['eval_features'], {}), '(eval_features)\n', (5528, 5543), False, 'from data import get_train_features, gen_tensor_dataset, convert_examples_to_features\n'), ((5599, 5627), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['eval_data'], {}), '(eval_data)\n', (5616, 5627), False, 'from torch.utils.data import SequentialSampler, DataLoader\n'), ((5650, 5726), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_data'], {'sampler': 'eval_sampler', 'batch_size': 'args.eval_batch_size'}), '(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n', (5660, 5726), False, 'from torch.utils.data import SequentialSampler, DataLoader\n'), ((7740, 7764), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (7762, 7764), False, 'import torch\n'), ((8847, 8871), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (8856, 8871), True, 'import numpy as np\n'), ((906, 992), 'transformers.BertForSequenceClassification.from_pretrained', 'BertForSequenceClassification.from_pretrained', (['"""bert-base-uncased"""'], {'config': 'config'}), "('bert-base-uncased', config=\n config)\n", (951, 992), False, 'from transformers import BertForSequenceClassification\n'), ((1014, 1071), 'transformers.BertForSequenceClassification', 'transformers.BertForSequenceClassification', ([], {'config': 'config'}), '(config=config)\n', (1056, 1071), False, 'import transformers\n'), ((6084, 6114), 'json.dump', 'json.dump', (['predictions', 'writer'], {}), '(predictions, writer)\n', (6093, 6114), False, 'import json\n'), ((9160, 9213), 'metrics.compute_metrics', 'compute_metrics', (['args.task_name', 'preds', 'out_label_ids'], {}), '(args.task_name, preds, out_label_ids)\n', (9175, 9213), False, 'from metrics import compute_metrics\n'), ((3016, 3056), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'desc': '"""Iteration"""'}), "(train_dataloader, desc='Iteration')\n", (3020, 3056), False, 'from tqdm import trange, tqdm\n'), ((4195, 4216), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (4214, 4216), True, 'from colossalai.core import global_context as gpc\n'), ((6367, 6403), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (6383, 6403), False, 'import torch\n'), ((6405, 6441), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (6421, 6441), False, 'import torch\n'), ((6844, 6859), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6857, 6859), False, 'import torch\n'), ((8157, 8180), 'numpy.mean', 'np.mean', (['eval_latencies'], {}), '(eval_latencies)\n', (8164, 8180), True, 'import numpy as np\n'), ((8628, 8651), 'numpy.mean', 'np.mean', (['eval_latencies'], {}), '(eval_latencies)\n', (8635, 8651), True, 'import numpy as np\n'), ((8686, 8708), 'numpy.std', 'np.std', (['eval_latencies'], {}), '(eval_latencies)\n', (8692, 8708), True, 'import numpy as np\n'), ((8743, 8765), 'numpy.sum', 'np.sum', (['eval_latencies'], {}), '(eval_latencies)\n', (8749, 8765), True, 'import numpy as np\n'), ((8932, 8981), 'os.path.join', 'os.path.join', (['args.output_dir', '"""predictions.json"""'], {}), "(args.output_dir, 'predictions.json')\n", (8944, 8981), False, 'import os\n')] |
import torch
import torch.distributed as dist
from colossalai.core import global_context as gpc
from colossalai.context import ParallelMode
from functools import partial
from colossalai.zero.utils.zero_hook_v2 import ZeROHookV2
from colossalai.tensor.chunk import ChunkManager, TensorState
from colossalai.tensor.param_op_hook import use_param_op_hooks
__all__ = ['ColoDDP', 'ColoDDPV2']
def free_storage(data: torch.Tensor) -> None:
"""Free underlying storage of a Tensor."""
if data.storage().size() > 0:
# Since we're modifying the Tensor's Storage directly, make sure the Tensor
# is the sole occupant of the Storage.
assert data.storage_offset() == 0
data.storage().resize_(0)
class ColoDDP(torch.nn.Module):
def __init__(self, module: torch.nn.Module) -> None:
super().__init__()
self.module = module
self.comm_stream: torch.cuda.Stream = torch.cuda.Stream()
self.dp_world_size = gpc.get_world_size(ParallelMode.DATA)
for p in module.parameters():
if p.requires_grad:
p.register_hook(partial(self.grad_handle, p))
def parameters(self, recurse: bool = True):
return self.module.parameters(recurse)
def named_parameters(self, prefix: str = '', recurse: bool = True):
return self.module.named_parameters(prefix, recurse)
def forward(self, *args, **kwargs):
self.module.zero_grad(set_to_none=True)
return self.module(*args, **kwargs)
def backward(self, loss: torch.Tensor):
loss.backward()
torch.cuda.current_stream().wait_stream(self.comm_stream)
for p in self.module.parameters():
p.grad = p._saved_grad
def grad_handle(self, p, grad):
empty_grad = torch.empty_like(grad)
free_storage(empty_grad)
if self.dp_world_size > 1:
grad = grad / self.dp_world_size
self.comm_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.comm_stream):
dist.all_reduce(grad, group=gpc.get_group(ParallelMode.DATA))
ColoDDP._save_grad(p, grad)
grad.record_stream(self.comm_stream)
else:
ColoDDP._save_grad(p, grad)
return empty_grad
@staticmethod
def _save_grad(p, grad):
if hasattr(p, '_saved_grad'):
p._saved_grad.add_(grad)
else:
p._saved_grad = grad
def zero_grad(self, set_to_none: bool = False) -> None:
self.module.zero_grad(set_to_none=True)
for p in self.module.parameters():
if getattr(p, '_saved_grad', None) is not None:
if set_to_none:
p._saved_grad = None
else:
if p._saved_grad.grad_fn is not None:
p._saved_grad.detach_()
else:
p._saved_grad.requires_grad_(False)
p._saved_grad.zero_()
class ColoDDPV2(ColoDDP):
def __init__(self, module: torch.nn.Module, chunk_manager: ChunkManager) -> None:
super().__init__(module)
self.chunk_manager = chunk_manager
self.param_op_hook = ZeROHookV2(chunk_manager)
self.fp32_params = []
self.overflow_counter = 0
# TODO: get param order and filter unused params
for p in module.parameters():
assert p.dtype == torch.half
fp32_p = p.float().detach()
self.chunk_manager.append_tensor(p, 'fp16_param')
self.chunk_manager.append_tensor(fp32_p, 'fp32_param')
self.fp32_params.append(fp32_p)
def forward(self, *args, **kwargs):
self.module.zero_grad(set_to_none=True)
with use_param_op_hooks(self.param_op_hook):
outputs = self.module(*args, **kwargs)
self.chunk_manager.exec_lazy_release()
return outputs
def _post_backward(self):
self.chunk_manager.exec_lazy_release()
for p in self.module.parameters():
if self.chunk_manager.is_chunk_free(p) or not p.requires_grad:
p.grad = None
else:
p.grad = p.data
def backward(self, loss: torch.Tensor):
with self.param_op_hook.switch_to_backward(), use_param_op_hooks(self.param_op_hook):
loss.backward()
self._post_backward()
def backward_by_grad(self, tensor, grad):
with self.param_op_hook.switch_to_backward(), use_param_op_hooks(self.param_op_hook):
torch.autograd.backward(tensor, grad)
self._post_backward()
def grad_handle(self, p, grad):
empty_grad = torch.empty_like(grad)
free_storage(empty_grad)
with torch._C.DisableTorchFunction():
self.chunk_manager.trans_tensor_state(p, TensorState.READY_FOR_REDUCE)
if self.dp_world_size > 1:
grad = grad / self.dp_world_size
self.chunk_manager.copy_tensor_to_chunk_slice(p, grad)
chunk = self.chunk_manager.get_chunk(p)
reduced = self.chunk_manager.reduce_chunk(p)
self.chunk_manager.release_chunk(p)
if reduced and not chunk.is_free:
self.overflow_counter += chunk.has_inf_or_nan
return empty_grad
def zero_grad(self, set_to_none: bool = False) -> None:
self.module.zero_grad(set_to_none=True)
| [
"colossalai.core.global_context.get_world_size",
"colossalai.tensor.param_op_hook.use_param_op_hooks",
"colossalai.zero.utils.zero_hook_v2.ZeROHookV2",
"colossalai.core.global_context.get_group"
] | [((919, 938), 'torch.cuda.Stream', 'torch.cuda.Stream', ([], {}), '()\n', (936, 938), False, 'import torch\n'), ((968, 1005), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (986, 1005), True, 'from colossalai.core import global_context as gpc\n'), ((1772, 1794), 'torch.empty_like', 'torch.empty_like', (['grad'], {}), '(grad)\n', (1788, 1794), False, 'import torch\n'), ((3214, 3239), 'colossalai.zero.utils.zero_hook_v2.ZeROHookV2', 'ZeROHookV2', (['chunk_manager'], {}), '(chunk_manager)\n', (3224, 3239), False, 'from colossalai.zero.utils.zero_hook_v2 import ZeROHookV2\n'), ((4668, 4690), 'torch.empty_like', 'torch.empty_like', (['grad'], {}), '(grad)\n', (4684, 4690), False, 'import torch\n'), ((3755, 3793), 'colossalai.tensor.param_op_hook.use_param_op_hooks', 'use_param_op_hooks', (['self.param_op_hook'], {}), '(self.param_op_hook)\n', (3773, 3793), False, 'from colossalai.tensor.param_op_hook import use_param_op_hooks\n'), ((4291, 4329), 'colossalai.tensor.param_op_hook.use_param_op_hooks', 'use_param_op_hooks', (['self.param_op_hook'], {}), '(self.param_op_hook)\n', (4309, 4329), False, 'from colossalai.tensor.param_op_hook import use_param_op_hooks\n'), ((4490, 4528), 'colossalai.tensor.param_op_hook.use_param_op_hooks', 'use_param_op_hooks', (['self.param_op_hook'], {}), '(self.param_op_hook)\n', (4508, 4528), False, 'from colossalai.tensor.param_op_hook import use_param_op_hooks\n'), ((4542, 4579), 'torch.autograd.backward', 'torch.autograd.backward', (['tensor', 'grad'], {}), '(tensor, grad)\n', (4565, 4579), False, 'import torch\n'), ((4737, 4768), 'torch._C.DisableTorchFunction', 'torch._C.DisableTorchFunction', ([], {}), '()\n', (4766, 4768), False, 'import torch\n'), ((1578, 1605), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (1603, 1605), False, 'import torch\n'), ((1949, 1976), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (1974, 1976), False, 'import torch\n'), ((1995, 2030), 'torch.cuda.stream', 'torch.cuda.stream', (['self.comm_stream'], {}), '(self.comm_stream)\n', (2012, 2030), False, 'import torch\n'), ((1108, 1136), 'functools.partial', 'partial', (['self.grad_handle', 'p'], {}), '(self.grad_handle, p)\n', (1115, 1136), False, 'from functools import partial\n'), ((2076, 2108), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (2089, 2108), True, 'from colossalai.core import global_context as gpc\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import colossalai
import logging
from pathlib import Path
from typing import Union, List
import inspect
from colossalai.context.parallel_mode import ParallelMode
try:
from rich.logging import RichHandler
_FORMAT = 'colossalai - %(name)s - %(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO,
format=_FORMAT,
handlers=[RichHandler(show_path=False, markup=True, rich_tracebacks=True)])
except ImportError:
_FORMAT = 'colossalai - %(name)s - %(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=_FORMAT)
class DistributedLogger:
"""This is a distributed event logger class essentially based on :class:`logging`.
Args:
name (str): The name of the logger.
Note:
The parallel_mode used in ``info``, ``warning``, ``debug`` and ``error``
should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
"""
__instances = dict()
@staticmethod
def get_instance(name: str):
"""Get the unique single logger instance based on name.
Args:
name (str): The name of the logger.
Returns:
DistributedLogger: A DistributedLogger object
"""
if name in DistributedLogger.__instances:
return DistributedLogger.__instances[name]
else:
logger = DistributedLogger(name=name)
return logger
def __init__(self, name):
if name in DistributedLogger.__instances:
raise Exception(
'Logger with the same name has been created, you should use colossalai.logging.get_dist_logger')
else:
self._name = name
self._logger = logging.getLogger(name)
DistributedLogger.__instances[name] = self
@staticmethod
def __get_call_info():
stack = inspect.stack()
# stack[1] gives previous function ('info' in our case)
# stack[2] gives before previous function and so on
fn = stack[2][1]
ln = stack[2][2]
func = stack[2][3]
return fn, ln, func
@staticmethod
def _check_valid_logging_level(level: str):
assert level in ['INFO', 'DEBUG', 'WARNING', 'ERROR'], 'found invalid logging level'
def set_level(self, level: str) -> None:
"""Set the logging level
Args:
level (str): Can only be INFO, DEBUG, WARNING and ERROR.
"""
self._check_valid_logging_level(level)
self._logger.setLevel(getattr(logging, level))
def log_to_file(self, path: Union[str, Path], mode: str = 'a', level: str = 'INFO', suffix: str = None) -> None:
"""Save the logs to file
Args:
path (A string or pathlib.Path object): The file to save the log.
mode (str): The mode to write log into the file.
level (str): Can only be INFO, DEBUG, WARNING and ERROR.
suffix (str): The suffix string of log's name.
"""
assert isinstance(path, (str, Path)), \
f'expected argument path to be type str or Path, but got {type(path)}'
self._check_valid_logging_level(level)
if isinstance(path, str):
path = Path(path)
# create log directory
path.mkdir(parents=True, exist_ok=True)
# set the default file name if path is a directory
if not colossalai.core.global_context.is_initialized(ParallelMode.GLOBAL):
rank = 0
else:
rank = colossalai.core.global_context.get_global_rank()
if suffix is not None:
log_file_name = f'rank_{rank}_{suffix}.log'
else:
log_file_name = f'rank_{rank}.log'
path = path.joinpath(log_file_name)
# add file handler
file_handler = logging.FileHandler(path, mode)
file_handler.setLevel(getattr(logging, level))
formatter = logging.Formatter(_FORMAT)
file_handler.setFormatter(formatter)
self._logger.addHandler(file_handler)
def _log(self,
level,
message: str,
parallel_mode: ParallelMode = ParallelMode.GLOBAL,
ranks: List[int] = None) -> None:
if ranks is None:
getattr(self._logger, level)(message)
else:
local_rank = colossalai.core.global_context.get_local_rank(parallel_mode)
if local_rank in ranks:
getattr(self._logger, level)(message)
def info(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: List[int] = None) -> None:
"""Log an info message.
Args:
message (str): The message to be logged.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`):
The parallel mode used for logging. Defaults to ParallelMode.GLOBAL.
ranks (List[int]): List of parallel ranks.
"""
message_prefix = "{}:{} {}".format(*self.__get_call_info())
self._log('info', message_prefix, parallel_mode, ranks)
self._log('info', message, parallel_mode, ranks)
def warning(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: List[int] = None) -> None:
"""Log a warning message.
Args:
message (str): The message to be logged.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`):
The parallel mode used for logging. Defaults to ParallelMode.GLOBAL.
ranks (List[int]): List of parallel ranks.
"""
message_prefix = "{}:{} {}".format(*self.__get_call_info())
self._log('warning', message_prefix, parallel_mode, ranks)
self._log('warning', message, parallel_mode, ranks)
def debug(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: List[int] = None) -> None:
"""Log a debug message.
Args:
message (str): The message to be logged.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`):
The parallel mode used for logging. Defaults to ParallelMode.GLOBAL.
ranks (List[int]): List of parallel ranks.
"""
message_prefix = "{}:{} {}".format(*self.__get_call_info())
self._log('debug', message_prefix, parallel_mode, ranks)
self._log('debug', message, parallel_mode, ranks)
def error(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: List[int] = None) -> None:
"""Log an error message.
Args:
message (str): The message to be logged.
parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`):
The parallel mode used for logging. Defaults to ParallelMode.GLOBAL.
ranks (List[int]): List of parallel ranks.
"""
message_prefix = "{}:{} {}".format(*self.__get_call_info())
self._log('error', message_prefix, parallel_mode, ranks)
self._log('error', message, parallel_mode, ranks)
| [
"colossalai.core.global_context.is_initialized",
"colossalai.core.global_context.get_global_rank",
"colossalai.core.global_context.get_local_rank"
] | [((601, 656), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '_FORMAT'}), '(level=logging.INFO, format=_FORMAT)\n', (620, 656), False, 'import logging\n'), ((2074, 2089), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (2087, 2089), False, 'import inspect\n'), ((4017, 4048), 'logging.FileHandler', 'logging.FileHandler', (['path', 'mode'], {}), '(path, mode)\n', (4036, 4048), False, 'import logging\n'), ((4124, 4150), 'logging.Formatter', 'logging.Formatter', (['_FORMAT'], {}), '(_FORMAT)\n', (4141, 4150), False, 'import logging\n'), ((1933, 1956), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1950, 1956), False, 'import logging\n'), ((3436, 3446), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (3440, 3446), False, 'from pathlib import Path\n'), ((3602, 3668), 'colossalai.core.global_context.is_initialized', 'colossalai.core.global_context.is_initialized', (['ParallelMode.GLOBAL'], {}), '(ParallelMode.GLOBAL)\n', (3647, 3668), False, 'import colossalai\n'), ((3724, 3772), 'colossalai.core.global_context.get_global_rank', 'colossalai.core.global_context.get_global_rank', ([], {}), '()\n', (3770, 3772), False, 'import colossalai\n'), ((4535, 4595), 'colossalai.core.global_context.get_local_rank', 'colossalai.core.global_context.get_local_rank', (['parallel_mode'], {}), '(parallel_mode)\n', (4580, 4595), False, 'import colossalai\n'), ((444, 507), 'rich.logging.RichHandler', 'RichHandler', ([], {'show_path': '(False)', 'markup': '(True)', 'rich_tracebacks': '(True)'}), '(show_path=False, markup=True, rich_tracebacks=True)\n', (455, 507), False, 'from rich.logging import RichHandler\n')] |
import torch
import torch.nn as nn
from colossalai.core import global_context as gpc
from colossalai.context import ParallelMode
from colossalai.logging import get_dist_logger
import torch.nn.functional as F
import torch.distributed as dist
from .cross_entropy import vocab_cross_entropy
class BertLoss(nn.Module):
def forward(self,
lm_loss,
sop_logits,
loss_mask,
sentence_order):
lm_loss_ = lm_loss.float()
loss_mask = loss_mask.float()
loss_mask_sum = loss_mask.sum()
lm_loss = torch.sum(
lm_loss_.view(-1) * loss_mask.reshape(-1))
lm_loss /= loss_mask_sum
torch.distributed.all_reduce(
lm_loss,
group=gpc.get_group(ParallelMode.SEQUENCE)
)
if sop_logits is not None:
sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(),
sentence_order.view(-1),
ignore_index=-1)
sop_loss = sop_loss.float()
loss = lm_loss + sop_loss * gpc.get_world_size(ParallelMode.SEQUENCE)
else:
sop_loss = None
loss = lm_loss
return loss
| [
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.get_group"
] | [((762, 798), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.SEQUENCE'], {}), '(ParallelMode.SEQUENCE)\n', (775, 798), True, 'from colossalai.core import global_context as gpc\n'), ((1116, 1157), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.SEQUENCE'], {}), '(ParallelMode.SEQUENCE)\n', (1134, 1157), True, 'from colossalai.core import global_context as gpc\n')] |
from copy import deepcopy
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.testing import parameterize
from colossalai.utils import free_port
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
from colossalai.zero.sharded_param import ShardedTensor
from colossalai.zero.sharded_param.sharded_param import ShardedParamV2
from colossalai.testing import rerun_on_exception
from tests.test_zero_data_parallel.common import CONFIG, allclose
@parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy])
def run_shard_tensor_with_strategy(shard_strategy_class, world_size):
t = ShardedTensor(tensor=torch.randn(world_size * 2, 3))
assert list(t.origin_shape) == [world_size * 2, 3]
assert list(t.shape) == [world_size * 2, 3]
shard_strategy = shard_strategy_class()
# test shard strategy
shard_strategy.shard([t])
assert list(t.shape) == [6], f"{list(t.shape)} vs 6"
shard_strategy.gather([t])
assert list(t.shape) == [world_size * 2, 3], f"{list(t.shape)} vs {[world_size * 2, 3]}"
def _run_shard_tensor(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_shard_tensor_with_strategy(world_size=world_size)
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_shard_tensor(world_size):
run_func = partial(_run_shard_tensor, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
def _run_shard_param_v2(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
param = torch.nn.Parameter(torch.randn(2, 3))
param_ref = deepcopy(param)
sparam = ShardedParamV2(param=param, process_group=None)
allclose(sparam.sharded_data_tensor.payload, param_ref.data)
sparam.remove_torch_payload()
assert (param.data.numel() == 1)
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_shard_param_v2(world_size):
run_func = partial(_run_shard_param_v2, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_shard_tensor(2)
test_shard_param_v2(2)
| [
"colossalai.testing.parameterize",
"colossalai.launch",
"colossalai.testing.rerun_on_exception",
"colossalai.utils.free_port",
"colossalai.zero.sharded_param.sharded_param.ShardedParamV2"
] | [((555, 645), 'colossalai.testing.parameterize', 'parameterize', (['"""shard_strategy_class"""', '[TensorShardStrategy, BucketTensorShardStrategy]'], {}), "('shard_strategy_class', [TensorShardStrategy,\n BucketTensorShardStrategy])\n", (567, 645), False, 'from colossalai.testing import parameterize\n'), ((1403, 1448), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 2]'], {}), "('world_size', [1, 2])\n", (1426, 1448), False, 'import pytest\n'), ((1450, 1553), 'colossalai.testing.rerun_on_exception', 'rerun_on_exception', ([], {'exception_type': 'mp.ProcessRaisedException', 'pattern': '""".*Address already in use.*"""'}), "(exception_type=mp.ProcessRaisedException, pattern=\n '.*Address already in use.*')\n", (1468, 1553), False, 'from colossalai.testing import rerun_on_exception\n'), ((2179, 2224), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 2]'], {}), "('world_size', [1, 2])\n", (2202, 2224), False, 'import pytest\n'), ((2226, 2329), 'colossalai.testing.rerun_on_exception', 'rerun_on_exception', ([], {'exception_type': 'mp.ProcessRaisedException', 'pattern': '""".*Address already in use.*"""'}), "(exception_type=mp.ProcessRaisedException, pattern=\n '.*Address already in use.*')\n", (2244, 2329), False, 'from colossalai.testing import rerun_on_exception\n'), ((1212, 1328), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (1229, 1328), False, 'import colossalai\n'), ((1671, 1708), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (1679, 1708), True, 'import torch.multiprocessing as mp\n'), ((1764, 1880), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (1781, 1880), False, 'import colossalai\n'), ((1943, 1958), 'copy.deepcopy', 'deepcopy', (['param'], {}), '(param)\n', (1951, 1958), False, 'from copy import deepcopy\n'), ((1972, 2019), 'colossalai.zero.sharded_param.sharded_param.ShardedParamV2', 'ShardedParamV2', ([], {'param': 'param', 'process_group': 'None'}), '(param=param, process_group=None)\n', (1986, 2019), False, 'from colossalai.zero.sharded_param.sharded_param import ShardedParamV2\n'), ((2025, 2085), 'tests.test_zero_data_parallel.common.allclose', 'allclose', (['sparam.sharded_data_tensor.payload', 'param_ref.data'], {}), '(sparam.sharded_data_tensor.payload, param_ref.data)\n', (2033, 2085), False, 'from tests.test_zero_data_parallel.common import CONFIG, allclose\n'), ((2451, 2488), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (2459, 2488), True, 'import torch.multiprocessing as mp\n'), ((1908, 1925), 'torch.randn', 'torch.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (1919, 1925), False, 'import torch\n'), ((741, 771), 'torch.randn', 'torch.randn', (['(world_size * 2)', '(3)'], {}), '(world_size * 2, 3)\n', (752, 771), False, 'import torch\n'), ((1654, 1665), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (1663, 1665), False, 'from colossalai.utils import free_port\n'), ((2434, 2445), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (2443, 2445), False, 'from colossalai.utils import free_port\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
from pathlib import Path
import pytest
import torch.autograd
import colossalai
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
from colossalai.nn.layer._parallel_utilities import _gather
level = os.environ['LEVEL']
CONFIG_PATH = Path(__file__).parent.parent.joinpath(f'configs/vit_2d_zero{level}.py')
def eval(engine):
engine.eval()
accumulated_loss = 0
correct_sum = 0
total_sum = 0
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
accumulated_loss += loss.detach().cpu().numpy()
output = _gather(
output[0],
ParallelMode.PARALLEL_2D_ROW,
1
)
output = _gather(
output,
ParallelMode.PARALLEL_2D_COL,
0,
)
output = torch.argmax(output, dim=-1)
correct = torch.sum(label[0] == output)
correct_sum += correct
total_sum += label[0].size(0)
avg_loss = accumulated_loss / engine.schedule.num_steps
return correct_sum, total_sum, avg_loss
def train(engine):
engine.train()
accumulated_loss = 0
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
accumulated_loss += loss.detach().cpu().numpy()
avg_loss = accumulated_loss / engine.schedule.num_steps
return avg_loss
@pytest.mark.dist
@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus")
def test_2d_parallel_vision_transformer():
# init dist
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize(
CONFIG_PATH)
logger = get_global_dist_logger()
engine = Engine(model=model,
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
schedule=schedule)
# for param in model.parameters():
# if isinstance(param, torch.HalfTensor):
# print(param.shape)
logger.info('start training')
for epoch in range(gpc.config.num_epochs):
train_loss = train(engine)
logger.info(f'epoch {epoch} - train loss: {train_loss}')
if epoch % 2 == 0:
correct_sum, total_sum, eval_loss = eval(engine)
logger.info(
f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, '
f'correct: {correct_sum}, acc: {correct_sum / total_sum}')
if __name__ == '__main__':
test_2d_parallel_vision_transformer()
| [
"colossalai.nn.layer._parallel_utilities._gather",
"colossalai.initialize",
"colossalai.logging.get_global_dist_logger",
"colossalai.engine.Engine"
] | [((1573, 1688), 'pytest.mark.skip', 'pytest.mark.skip', (['"""This test should be invoked by test.sh in the same folder as it runs on multiple gpus"""'], {}), "(\n 'This test should be invoked by test.sh in the same folder as it runs on multiple gpus'\n )\n", (1589, 1688), False, 'import pytest\n'), ((1831, 1865), 'colossalai.initialize', 'colossalai.initialize', (['CONFIG_PATH'], {}), '(CONFIG_PATH)\n', (1852, 1865), False, 'import colossalai\n'), ((1888, 1912), 'colossalai.logging.get_global_dist_logger', 'get_global_dist_logger', ([], {}), '()\n', (1910, 1912), False, 'from colossalai.logging import get_global_dist_logger\n'), ((1927, 2112), 'colossalai.engine.Engine', 'Engine', ([], {'model': 'model', 'train_dataloader': 'train_dataloader', 'test_dataloader': 'test_dataloader', 'criterion': 'criterion', 'optimizer': 'optimizer', 'lr_scheduler': 'lr_scheduler', 'schedule': 'schedule'}), '(model=model, train_dataloader=train_dataloader, test_dataloader=\n test_dataloader, criterion=criterion, optimizer=optimizer, lr_scheduler\n =lr_scheduler, schedule=schedule)\n', (1933, 2112), False, 'from colossalai.engine import Engine\n'), ((781, 832), 'colossalai.nn.layer._parallel_utilities._gather', '_gather', (['output[0]', 'ParallelMode.PARALLEL_2D_ROW', '(1)'], {}), '(output[0], ParallelMode.PARALLEL_2D_ROW, 1)\n', (788, 832), False, 'from colossalai.nn.layer._parallel_utilities import _gather\n'), ((896, 944), 'colossalai.nn.layer._parallel_utilities._gather', '_gather', (['output', 'ParallelMode.PARALLEL_2D_COL', '(0)'], {}), '(output, ParallelMode.PARALLEL_2D_COL, 0)\n', (903, 944), False, 'from colossalai.nn.layer._parallel_utilities import _gather\n'), ((442, 456), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (446, 456), False, 'from pathlib import Path\n')] |
from typing import List, Optional
import torch
import torch.distributed as dist
from colossalai.utils import get_current_device
from colossalai.zero.sharded_param.sharded_tensor import ShardedTensor
from torch._utils import _flatten_dense_tensors as flatten
from .tensor_shard_strategy import TensorShardStrategy
class BucketTensorShardStrategy(TensorShardStrategy):
"""Use the same shard scheme as `TensorShardStrategy`'s, but it gathers tensors of a sub-module together,
which will fully utilize network bandwidth.
It is especially useful when sub-module contains bias,
since we cannot utilize network bandwidth well if we only gather a bias tensor (bias is usaully small).
"""
def gather(self, tensor_list: List[ShardedTensor], process_group: Optional[dist.ProcessGroup] = None):
tensor_list: List[ShardedTensor] = [t for t in tensor_list if t.is_sharded]
if len(tensor_list) == 0:
return
target_device = tensor_list[0].device
dtype = tensor_list[0].dtype
buffer_list: List[torch.Tensor] = []
tensor_numels = [t.payload.numel() for t in tensor_list]
buffer_size = sum(tensor_numels)
world_size = dist.get_world_size(process_group)
rank = dist.get_rank(process_group)
for i in range(world_size):
if i == rank:
buffer_list.append(flatten([t.payload for t in tensor_list]).cuda(get_current_device()))
# Release payload here, to decrease peak memory usage
for t in tensor_list:
t.reset_payload(None)
else:
buffer_list.append(torch.zeros(buffer_size, dtype=dtype, device=get_current_device()))
dist.all_gather(buffer_list, buffer_list[rank], group=process_group)
# Move to target device before splitting buffer
# Ensure we utilize maximum PCIE bandwidth
buffer_list = [buffer.to(target_device) for buffer in buffer_list]
offset = 0
for i, t in enumerate(tensor_list):
gathered_payload = [buffer[offset:offset + tensor_numels[i]] for buffer in buffer_list]
gathered_payload = torch.cat(gathered_payload)[:t.origin_numel].view(t.origin_shape)
t.reset_payload(gathered_payload)
t.is_sharded = False
offset += tensor_numels[i]
| [
"colossalai.utils.get_current_device"
] | [((1208, 1242), 'torch.distributed.get_world_size', 'dist.get_world_size', (['process_group'], {}), '(process_group)\n', (1227, 1242), True, 'import torch.distributed as dist\n'), ((1258, 1286), 'torch.distributed.get_rank', 'dist.get_rank', (['process_group'], {}), '(process_group)\n', (1271, 1286), True, 'import torch.distributed as dist\n'), ((1733, 1801), 'torch.distributed.all_gather', 'dist.all_gather', (['buffer_list', 'buffer_list[rank]'], {'group': 'process_group'}), '(buffer_list, buffer_list[rank], group=process_group)\n', (1748, 1801), True, 'import torch.distributed as dist\n'), ((1431, 1451), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1449, 1451), False, 'from colossalai.utils import get_current_device\n'), ((2178, 2205), 'torch.cat', 'torch.cat', (['gathered_payload'], {}), '(gathered_payload)\n', (2187, 2205), False, 'import torch\n'), ((1384, 1425), 'torch._utils._flatten_dense_tensors', 'flatten', (['[t.payload for t in tensor_list]'], {}), '([t.payload for t in tensor_list])\n', (1391, 1425), True, 'from torch._utils import _flatten_dense_tensors as flatten\n'), ((1702, 1722), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1720, 1722), False, 'from colossalai.utils import get_current_device\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os.path as osp
from colossalai.registry import HOOKS
from colossalai.trainer import Trainer
from colossalai.trainer.hooks import BaseHook
from colossalai.utils import is_dp_rank_0
from colossalai.utils.checkpointing import get_latest_checkpoint_path, get_checkpoint_path
from colossalai.utils.checkpointing import save_checkpoint, load_checkpoint
from ._lr_scheduler_hook import LRSchedulerHook
@HOOKS.register_module
class SaveCheckpointHook(BaseHook):
"""Saves the model by interval in training process.
:param trainer: Trainer attached with current hook
:param interval: Saving interval
:param checkpoint_dir: Directory of saving checkpoint
:param suffix: Saving suffix of the file
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type trainer: Trainer
:type interval: int, optional
:type checkpoint_dir: int, optional
:type suffix: str, optional
:type priority: int, optional
"""
def __init__(self,
trainer: Trainer,
interval: int = 1,
checkpoint_dir: str = None,
suffix: str = '',
priority: int = 10):
super().__init__(trainer=trainer, priority=priority)
assert isinstance(trainer, Trainer), \
f'SaveCheckpointHook expects a Trainer, got {type(trainer)}'
self.interval = interval
self.checkpoint_dir = checkpoint_dir
self.suffix = suffix
# get lr scheduler from the LRSchedulerHook before train
self._lr_scheduler = None
def before_train(self):
# check if lr scheduler is present in LRSchedulerHook
for hook in self.trainer.hooks:
if isinstance(hook, LRSchedulerHook):
self._lr_scheduler = hook.lr_scheduler
break
def after_train_epoch(self):
"""Saves the model after a training epoch.
"""
# save by interval
if self.trainer.cur_epoch % self.interval == 0:
# only gpus with data parallel rank equals to 0 write to the disk
if is_dp_rank_0():
save_path = get_checkpoint_path(self.checkpoint_dir,
self.trainer.cur_epoch,
suffix=self.suffix)
save_checkpoint(save_path,
self.trainer.cur_epoch,
self.trainer.engine.model,
self.trainer.engine.optimizer,
self._lr_scheduler)
self.logger.info(
f'checkpoint for epoch {self.trainer.cur_epoch} is saved to {self.checkpoint_dir}')
@HOOKS.register_module
class LoadCheckpointHook(BaseHook):
"""Loads the model before training process.
:param trainer: Trainer attached with current hook
:param checkpoint_dir: Directory of saving checkpoint
:param epoch: Epoch number to be set
:param finetune: Whether allows to load a part of the model
:param strict: Whether loads a model that has the same shape of parameters
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type trainer: Trainer
:type checkpoint_dir: str, optional
:type epoch: str, optional
:type finetune: bool, optional
:type strict: bool, optional
:type priority: int, optional
"""
def __init__(self,
trainer: Trainer = None,
checkpoint_dir: str = None,
epoch: int = -1,
finetune: bool = False,
strict: bool = False,
suffix: str = '',
priority: int = 0) -> None:
super().__init__(trainer=trainer, priority=priority)
assert isinstance(trainer, Trainer), \
f'LoadLatestCheckpointHook excepts a Trainer, got {type(trainer)}'
self.epoch = epoch
self.checkpoint_dir = checkpoint_dir
self.finetune = finetune
self.suffix = suffix
self.strict = strict
def before_train(self):
"""Loads parameters to the model before training.
"""
# check if lr scheduler is present in LRSchedulerHook
lr_scheduler = None
for hook in self.trainer.hooks:
if isinstance(hook, LRSchedulerHook):
lr_scheduler = hook.lr_scheduler
break
# use latest checkpoint if epoch = -1
if self.epoch == -1:
path = get_latest_checkpoint_path(self.checkpoint_dir, suffix=self.suffix)
else:
path = get_checkpoint_path(self.checkpoint_dir, epoch=self.epoch, suffix=self.suffix)
if osp.exists(path):
last_epoch, _ = load_checkpoint(path,
self.trainer.engine.model,
self.trainer.engine.optimizer,
lr_scheduler,
finetune=self.finetune,
strict=self.strict)
if self.finetune:
self.trainer.cur_epoch = 0
else:
self.trainer.cur_epoch = last_epoch
self.logger.info(
f'loaded checkpoint from {path}')
else:
raise FileNotFoundError(f'checkpoint is not found at {path}')
| [
"colossalai.utils.checkpointing.save_checkpoint",
"colossalai.utils.checkpointing.get_latest_checkpoint_path",
"colossalai.utils.is_dp_rank_0",
"colossalai.utils.checkpointing.get_checkpoint_path",
"colossalai.utils.checkpointing.load_checkpoint"
] | [((4816, 4832), 'os.path.exists', 'osp.exists', (['path'], {}), '(path)\n', (4826, 4832), True, 'import os.path as osp\n'), ((2172, 2186), 'colossalai.utils.is_dp_rank_0', 'is_dp_rank_0', ([], {}), '()\n', (2184, 2186), False, 'from colossalai.utils import is_dp_rank_0\n'), ((4624, 4691), 'colossalai.utils.checkpointing.get_latest_checkpoint_path', 'get_latest_checkpoint_path', (['self.checkpoint_dir'], {'suffix': 'self.suffix'}), '(self.checkpoint_dir, suffix=self.suffix)\n', (4650, 4691), False, 'from colossalai.utils.checkpointing import get_latest_checkpoint_path, get_checkpoint_path\n'), ((4725, 4803), 'colossalai.utils.checkpointing.get_checkpoint_path', 'get_checkpoint_path', (['self.checkpoint_dir'], {'epoch': 'self.epoch', 'suffix': 'self.suffix'}), '(self.checkpoint_dir, epoch=self.epoch, suffix=self.suffix)\n', (4744, 4803), False, 'from colossalai.utils.checkpointing import get_latest_checkpoint_path, get_checkpoint_path\n'), ((4862, 5004), 'colossalai.utils.checkpointing.load_checkpoint', 'load_checkpoint', (['path', 'self.trainer.engine.model', 'self.trainer.engine.optimizer', 'lr_scheduler'], {'finetune': 'self.finetune', 'strict': 'self.strict'}), '(path, self.trainer.engine.model, self.trainer.engine.\n optimizer, lr_scheduler, finetune=self.finetune, strict=self.strict)\n', (4877, 5004), False, 'from colossalai.utils.checkpointing import save_checkpoint, load_checkpoint\n'), ((2216, 2305), 'colossalai.utils.checkpointing.get_checkpoint_path', 'get_checkpoint_path', (['self.checkpoint_dir', 'self.trainer.cur_epoch'], {'suffix': 'self.suffix'}), '(self.checkpoint_dir, self.trainer.cur_epoch, suffix=\n self.suffix)\n', (2235, 2305), False, 'from colossalai.utils.checkpointing import get_latest_checkpoint_path, get_checkpoint_path\n'), ((2414, 2547), 'colossalai.utils.checkpointing.save_checkpoint', 'save_checkpoint', (['save_path', 'self.trainer.cur_epoch', 'self.trainer.engine.model', 'self.trainer.engine.optimizer', 'self._lr_scheduler'], {}), '(save_path, self.trainer.cur_epoch, self.trainer.engine.\n model, self.trainer.engine.optimizer, self._lr_scheduler)\n', (2429, 2547), False, 'from colossalai.utils.checkpointing import save_checkpoint, load_checkpoint\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
import colossalai
import torch
import torchvision
from colossalai.builder import *
from colossalai.core import global_context as gpc
from colossalai.logging import get_dist_logger
from colossalai.nn import Accuracy, CrossEntropyLoss
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
from colossalai.trainer import Trainer, hooks
from colossalai.utils import MultiTimer, get_dataloader
from model_zoo.vit import vit_lite_depth7_patch4_32
from torchvision import transforms
DATASET_PATH = str(os.environ['DATA'])
def build_cifar(batch_size):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_dataset = torchvision.datasets.CIFAR10(root=DATASET_PATH,
train=True,
download=True,
transform=transform_train)
test_dataset = torchvision.datasets.CIFAR10(root=DATASET_PATH, train=False, transform=transform_test)
train_dataloader = get_dataloader(dataset=train_dataset,
shuffle=True,
batch_size=batch_size,
num_workers=4,
pin_memory=True)
test_dataloader = get_dataloader(dataset=test_dataset, batch_size=batch_size, num_workers=4, pin_memory=True)
return train_dataloader, test_dataloader
def train_cifar():
args = colossalai.get_default_parser().parse_args()
# standard launch
# colossalai.launch(config=args.config,
# rank=args.rank,
# world_size=args.world_size,
# local_rank=args.local_rank,
# host=args.host,
# port=args.port)
# launch from torchrun
colossalai.launch_from_torch(config=args.config)
logger = get_dist_logger()
if hasattr(gpc.config, 'LOG_PATH'):
if gpc.get_global_rank() == 0:
log_path = gpc.config.LOG_PATH
if not os.path.exists(log_path):
os.mkdir(log_path)
logger.log_to_file(log_path)
model = vit_lite_depth7_patch4_32()
train_dataloader, test_dataloader = build_cifar(gpc.config.BATCH_SIZE // gpc.data_parallel_size)
criterion = CrossEntropyLoss(label_smoothing=0.1)
optimizer = torch.optim.AdamW(model.parameters(), lr=gpc.config.LEARNING_RATE, weight_decay=gpc.config.WEIGHT_DECAY)
steps_per_epoch = len(train_dataloader)
lr_scheduler = CosineAnnealingWarmupLR(optimizer=optimizer,
total_steps=gpc.config.NUM_EPOCHS * steps_per_epoch,
warmup_steps=gpc.config.WARMUP_EPOCHS * steps_per_epoch)
engine, train_dataloader, test_dataloader, lr_scheduler = colossalai.initialize(model=model,
optimizer=optimizer,
criterion=criterion,
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
lr_scheduler=lr_scheduler)
logger.info("Engine is built", ranks=[0])
timer = MultiTimer()
trainer = Trainer(engine=engine, logger=logger, timer=timer)
logger.info("Trainer is built", ranks=[0])
hook_list = [
hooks.LogMetricByEpochHook(logger=logger),
hooks.LogMetricByStepHook(),
# hooks.LogTimingByEpochHook(timer=timer, logger=logger),
# hooks.LogMemoryByEpochHook(logger=logger),
hooks.AccuracyHook(accuracy_func=Accuracy()),
hooks.LossHook(),
hooks.ThroughputHook(),
hooks.LRSchedulerHook(lr_scheduler=lr_scheduler, by_epoch=False)
]
logger.info("Train start", ranks=[0])
trainer.fit(train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
epochs=gpc.config.NUM_EPOCHS,
hooks=hook_list,
display_progress=True,
test_interval=1)
if __name__ == '__main__':
train_cifar()
| [
"colossalai.trainer.hooks.LogMetricByEpochHook",
"colossalai.get_default_parser",
"colossalai.nn.lr_scheduler.CosineAnnealingWarmupLR",
"colossalai.trainer.hooks.LogMetricByStepHook",
"colossalai.utils.MultiTimer",
"colossalai.logging.get_dist_logger",
"colossalai.launch_from_torch",
"colossalai.utils... | [((1116, 1221), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'DATASET_PATH', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), '(root=DATASET_PATH, train=True, download=True,\n transform=transform_train)\n', (1144, 1221), False, 'import torchvision\n'), ((1384, 1475), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'DATASET_PATH', 'train': '(False)', 'transform': 'transform_test'}), '(root=DATASET_PATH, train=False, transform=\n transform_test)\n', (1412, 1475), False, 'import torchvision\n'), ((1494, 1604), 'colossalai.utils.get_dataloader', 'get_dataloader', ([], {'dataset': 'train_dataset', 'shuffle': '(True)', 'batch_size': 'batch_size', 'num_workers': '(4)', 'pin_memory': '(True)'}), '(dataset=train_dataset, shuffle=True, batch_size=batch_size,\n num_workers=4, pin_memory=True)\n', (1508, 1604), False, 'from colossalai.utils import MultiTimer, get_dataloader\n'), ((1775, 1870), 'colossalai.utils.get_dataloader', 'get_dataloader', ([], {'dataset': 'test_dataset', 'batch_size': 'batch_size', 'num_workers': '(4)', 'pin_memory': '(True)'}), '(dataset=test_dataset, batch_size=batch_size, num_workers=4,\n pin_memory=True)\n', (1789, 1870), False, 'from colossalai.utils import MultiTimer, get_dataloader\n'), ((2311, 2359), 'colossalai.launch_from_torch', 'colossalai.launch_from_torch', ([], {'config': 'args.config'}), '(config=args.config)\n', (2339, 2359), False, 'import colossalai\n'), ((2374, 2391), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (2389, 2391), False, 'from colossalai.logging import get_dist_logger\n'), ((2648, 2675), 'model_zoo.vit.vit_lite_depth7_patch4_32', 'vit_lite_depth7_patch4_32', ([], {}), '()\n', (2673, 2675), False, 'from model_zoo.vit import vit_lite_depth7_patch4_32\n'), ((2795, 2832), 'colossalai.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {'label_smoothing': '(0.1)'}), '(label_smoothing=0.1)\n', (2811, 2832), False, 'from colossalai.nn import Accuracy, CrossEntropyLoss\n'), ((3020, 3183), 'colossalai.nn.lr_scheduler.CosineAnnealingWarmupLR', 'CosineAnnealingWarmupLR', ([], {'optimizer': 'optimizer', 'total_steps': '(gpc.config.NUM_EPOCHS * steps_per_epoch)', 'warmup_steps': '(gpc.config.WARMUP_EPOCHS * steps_per_epoch)'}), '(optimizer=optimizer, total_steps=gpc.config.\n NUM_EPOCHS * steps_per_epoch, warmup_steps=gpc.config.WARMUP_EPOCHS *\n steps_per_epoch)\n', (3043, 3183), False, 'from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR\n'), ((3324, 3503), 'colossalai.initialize', 'colossalai.initialize', ([], {'model': 'model', 'optimizer': 'optimizer', 'criterion': 'criterion', 'train_dataloader': 'train_dataloader', 'test_dataloader': 'test_dataloader', 'lr_scheduler': 'lr_scheduler'}), '(model=model, optimizer=optimizer, criterion=criterion,\n train_dataloader=train_dataloader, test_dataloader=test_dataloader,\n lr_scheduler=lr_scheduler)\n', (3345, 3503), False, 'import colossalai\n'), ((3976, 3988), 'colossalai.utils.MultiTimer', 'MultiTimer', ([], {}), '()\n', (3986, 3988), False, 'from colossalai.utils import MultiTimer, get_dataloader\n'), ((4004, 4054), 'colossalai.trainer.Trainer', 'Trainer', ([], {'engine': 'engine', 'logger': 'logger', 'timer': 'timer'}), '(engine=engine, logger=logger, timer=timer)\n', (4011, 4054), False, 'from colossalai.trainer import Trainer, hooks\n'), ((4129, 4170), 'colossalai.trainer.hooks.LogMetricByEpochHook', 'hooks.LogMetricByEpochHook', ([], {'logger': 'logger'}), '(logger=logger)\n', (4155, 4170), False, 'from colossalai.trainer import Trainer, hooks\n'), ((4180, 4207), 'colossalai.trainer.hooks.LogMetricByStepHook', 'hooks.LogMetricByStepHook', ([], {}), '()\n', (4205, 4207), False, 'from colossalai.trainer import Trainer, hooks\n'), ((4390, 4406), 'colossalai.trainer.hooks.LossHook', 'hooks.LossHook', ([], {}), '()\n', (4404, 4406), False, 'from colossalai.trainer import Trainer, hooks\n'), ((4416, 4438), 'colossalai.trainer.hooks.ThroughputHook', 'hooks.ThroughputHook', ([], {}), '()\n', (4436, 4438), False, 'from colossalai.trainer import Trainer, hooks\n'), ((4448, 4512), 'colossalai.trainer.hooks.LRSchedulerHook', 'hooks.LRSchedulerHook', ([], {'lr_scheduler': 'lr_scheduler', 'by_epoch': '(False)'}), '(lr_scheduler=lr_scheduler, by_epoch=False)\n', (4469, 4512), False, 'from colossalai.trainer import Trainer, hooks\n'), ((667, 703), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (688, 703), False, 'from torchvision import transforms\n'), ((713, 780), 'torchvision.transforms.AutoAugment', 'transforms.AutoAugment', ([], {'policy': 'transforms.AutoAugmentPolicy.CIFAR10'}), '(policy=transforms.AutoAugmentPolicy.CIFAR10)\n', (735, 780), False, 'from torchvision import transforms\n'), ((790, 811), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (809, 811), False, 'from torchvision import transforms\n'), ((821, 892), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (841, 892), False, 'from torchvision import transforms\n'), ((952, 973), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32)'], {}), '(32)\n', (969, 973), False, 'from torchvision import transforms\n'), ((983, 1004), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1002, 1004), False, 'from torchvision import transforms\n'), ((1014, 1085), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (1034, 1085), False, 'from torchvision import transforms\n'), ((1944, 1975), 'colossalai.get_default_parser', 'colossalai.get_default_parser', ([], {}), '()\n', (1973, 1975), False, 'import colossalai\n'), ((2443, 2464), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (2462, 2464), True, 'from colossalai.core import global_context as gpc\n'), ((2533, 2557), 'os.path.exists', 'os.path.exists', (['log_path'], {}), '(log_path)\n', (2547, 2557), False, 'import os\n'), ((2575, 2593), 'os.mkdir', 'os.mkdir', (['log_path'], {}), '(log_path)\n', (2583, 2593), False, 'import os\n'), ((4369, 4379), 'colossalai.nn.Accuracy', 'Accuracy', ([], {}), '()\n', (4377, 4379), False, 'from colossalai.nn import Accuracy, CrossEntropyLoss\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch.distributed as dist
from torchvision.datasets import Caltech101
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.registry import DATASETS
from .base_dataset import BaseDataset
@DATASETS.register_module
class Caltech101Dataset(BaseDataset):
"""`Caltech 101 <http://www.vision.caltech.edu/Image_Datasets/Caltech101/>`_ Dataset.
:param transform_pipeline: A list of functions' config, which takes in an PIL image
and returns a transformed version
:type transform_pipeline: list
"""
def __init__(self, transform_pipeline: list, *args, **kwargs):
super().__init__(transform_pipeline)
if gpc.is_initialized(ParallelMode.GLOBAL) and gpc.get_global_rank() != 0:
dist.barrier()
self._dataset = Caltech101(
transform=self._transform_pipeline, *args, **kwargs)
if gpc.is_initialized(ParallelMode.GLOBAL) and gpc.get_global_rank() == 0:
dist.barrier()
def __len__(self):
return len(self._dataset)
def __getitem__(self, item):
"""
:param item: Index
:type item: int
:return: ((image,), (target,)) where the type of target specified by target_type.
:rtype: tuple
"""
img, label = self._dataset.__getitem__(item)
return (img,), (label,)
| [
"colossalai.core.global_context.get_global_rank",
"colossalai.core.global_context.is_initialized"
] | [((881, 944), 'torchvision.datasets.Caltech101', 'Caltech101', (['*args'], {'transform': 'self._transform_pipeline'}), '(*args, transform=self._transform_pipeline, **kwargs)\n', (891, 944), False, 'from torchvision.datasets import Caltech101\n'), ((758, 797), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.GLOBAL'], {}), '(ParallelMode.GLOBAL)\n', (776, 797), True, 'from colossalai.core import global_context as gpc\n'), ((842, 856), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (854, 856), True, 'import torch.distributed as dist\n'), ((969, 1008), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.GLOBAL'], {}), '(ParallelMode.GLOBAL)\n', (987, 1008), True, 'from colossalai.core import global_context as gpc\n'), ((1053, 1067), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (1065, 1067), True, 'import torch.distributed as dist\n'), ((802, 823), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (821, 823), True, 'from colossalai.core import global_context as gpc\n'), ((1013, 1034), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (1032, 1034), True, 'from colossalai.core import global_context as gpc\n')] |
import math
from typing import Callable
from colossalai.utils import get_current_device
from torch import dtype, nn
from ... import init as init
from ..parallel_1d import Embedding1D, PatchEmbedding1D, VocabParallelEmbedding1D
from ..parallel_2d import Embedding2D, PatchEmbedding2D, VocabParallelEmbedding2D
from ..parallel_2p5d import Embedding2p5D, PatchEmbedding2p5D, VocabParallelEmbedding2p5D
from ..parallel_3d import Embedding3D, PatchEmbedding3D, VocabParallelEmbedding3D
from ..utils import get_tensor_parallel_mode
from ..vanilla import VanillaPatchEmbedding
from ._utils import ColossalaiModule
_parallel_embedding = {
'1d': Embedding1D,
'2d': Embedding2D,
'2.5d': Embedding2p5D,
'3d': Embedding3D,
}
_vocab_parallel_embedding = {
'1d': VocabParallelEmbedding1D,
'2d': VocabParallelEmbedding2D,
'2.5d': VocabParallelEmbedding2p5D,
'3d': VocabParallelEmbedding3D
}
_parallel_patchembedding = {
None: VanillaPatchEmbedding,
'1d': PatchEmbedding1D,
'2d': PatchEmbedding2D,
'2.5d': PatchEmbedding2p5D,
'3d': PatchEmbedding3D
}
class Embedding(ColossalaiModule):
r"""Embedding for colossalai.
Args:
num_embeddings (int): number of embeddings.
embedding_dim (int): dimension of embedding.
padding_idx (int, optional): If specified, the entries at padding_idx do not contribute to the gradient;
therefore, the embedding vector at padding_idx is not updated during training,
i.e. it remains as a fixed “pad”, defaults to None.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
weight_initializer (:class:`typing.Callable`, optional):
he initializer of weight, defaults to normal initializer.
The ``args`` and ``kwargs`` used in :class:`torch.nn.functional.embedding` should contain:
::
max_norm (float, optional): If given, each embedding vector with norm larger than max_norm is
renormalized to have norm max_norm. Note: this will modify weight in-place.
norm_type (float, optional): The p of the p-norm to compute for the max_norm option. Default 2.
scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse
of frequency of the words in the mini-batch. Default False.
sparse (bool, optional): If True, gradient w.r.t. weight will be a sparse tensor. Default False.
More details about ``args`` and ``kwargs`` could be found in
`Embedding <https://pytorch.org/docs/stable/generated/torch.nn.functional.embedding.html#torch.nn.functional.embedding>`_.
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_
"""
def __init__(self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int = None,
dtype: dtype = None,
weight_initializer: Callable = init.normal_(),
vocab_parallel_limit: int = 2048,
*args,
**kwargs) -> None:
tensor_parallel = get_tensor_parallel_mode()
if tensor_parallel is None:
embed = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx, *args,
**kwargs).to(dtype).to(get_current_device())
weight_initializer(embed.weight, fan_in=num_embeddings, fan_out=embedding_dim)
elif num_embeddings <= vocab_parallel_limit:
embed = _parallel_embedding[tensor_parallel](
num_embeddings,
embedding_dim,
padding_idx=padding_idx,
dtype=dtype,
weight_initializer=weight_initializer,
*args,
**kwargs,
)
else:
embed = _vocab_parallel_embedding[tensor_parallel](
num_embeddings,
embedding_dim,
padding_idx=padding_idx,
dtype=dtype,
weight_initializer=weight_initializer,
*args,
**kwargs,
)
super().__init__(embed)
class PatchEmbedding(ColossalaiModule):
"""2D Image to Patch Embedding.
Args:
img_size (int): image size.
patch_size (int): patch size.
in_chans (int): number of channels of input image.
embed_size (int): size of embedding.
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
flatten (bool, optional): whether to flatten output tensor, defaults to True.
weight_initializer (:class:`typing.Callable`, optional):
The initializer of weight, defaults to kaiming uniform initializer.
bias_initializer (:class:`typing.Callable`, optional):
The initializer of bias, defaults to xavier uniform initializer.
position_embed_initializer (:class:`typing.Callable`, optional):
The initializer of position embedding, defaults to zeros initializer.
More details about ``initializer`` please refer to
`init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_.
"""
def __init__(
self,
img_size: int,
patch_size: int,
in_chans: int,
embed_size: int,
dtype: dtype = None,
flatten: bool = True,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1),
position_embed_initializer: Callable = init.zeros_()
) -> None:
tensor_parallel = get_tensor_parallel_mode()
embed = _parallel_patchembedding[tensor_parallel](
img_size,
patch_size,
in_chans,
embed_size,
dtype=dtype,
flatten=flatten,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
position_embed_initializer=position_embed_initializer,
)
super().__init__(embed)
| [
"colossalai.utils.get_current_device"
] | [((3478, 3498), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (3496, 3498), False, 'from colossalai.utils import get_current_device\n'), ((5654, 5666), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (5663, 5666), False, 'import math\n'), ((3345, 3434), 'torch.nn.Embedding', 'nn.Embedding', (['num_embeddings', 'embedding_dim', '*args'], {'padding_idx': 'padding_idx'}), '(num_embeddings, embedding_dim, *args, padding_idx=padding_idx,\n **kwargs)\n', (3357, 3434), False, 'from torch import dtype, nn\n')] |
import torch.distributed as dist
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from colossalai.core import global_context as gpc
from colossalai.registry import GRADIENT_HANDLER
from colossalai.global_variables import moe_env
from ._base_gradient_handler import BaseGradientHandler
from ...context.parallel_mode import ParallelMode
@GRADIENT_HANDLER.register_module
class MoeGradientHandler(BaseGradientHandler):
"""A helper class to handle all-reduce operations in a data parallel group and
moe tensor parallel. A all-reduce collective communication will be operated in
:func:`handle_gradient` among a data parallel group.
For better performance, it bucketizes the gradients of all parameters that are
the same type to improve the efficiency of communication.
"""
def handle_gradient(self):
"""A method running an all-reduce operation in a data parallel group.
Then running an all-reduce operation for all parameters in experts
across moe tensor parallel group
"""
moe_data = moe_env.data_parallel_size
global_data = gpc.data_parallel_size
if global_data > 1:
# bucketize and all-reduce
buckets = {}
# Pack the buckets.
for param in self._model.parameters():
if param.requires_grad and \
param.grad is not None and \
not hasattr(param, 'moe_param'):
tp = param.data.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
# param.main_grad = param.grad
# For each bucket, all-reduce and copy all-reduced grads.
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
coalesced /= gpc.get_world_size(ParallelMode.DATA)
dist.all_reduce(
coalesced, group=gpc.get_group(ParallelMode.DATA))
for buf, synced in zip(grads, _unflatten_dense_tensors(
coalesced, grads)):
buf.copy_(synced)
if global_data > 1:
for param in self._model.parameters():
if not param.requires_grad or param.grad is None:
continue
if moe_data > 1 and hasattr(param, 'moe_param'):
param.grad.data /= moe_data
dist.all_reduce(param.grad.data,
group=gpc.get_group(ParallelMode.MOE_DATA))
| [
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.get_group"
] | [((1975, 2004), 'torch._utils._flatten_dense_tensors', '_flatten_dense_tensors', (['grads'], {}), '(grads)\n', (1997, 2004), False, 'from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors\n'), ((2035, 2072), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (2053, 2072), True, 'from colossalai.core import global_context as gpc\n'), ((2228, 2270), 'torch._utils._unflatten_dense_tensors', '_unflatten_dense_tensors', (['coalesced', 'grads'], {}), '(coalesced, grads)\n', (2252, 2270), False, 'from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors\n'), ((2147, 2179), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (2160, 2179), True, 'from colossalai.core import global_context as gpc\n'), ((2730, 2766), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.MOE_DATA'], {}), '(ParallelMode.MOE_DATA)\n', (2743, 2766), True, 'from colossalai.core import global_context as gpc\n')] |
from colossalai.utils import get_current_device
from torch import nn
from colossalai import kernel
from ... import init as init
from ..parallel_1d import *
from ..parallel_2d import *
from ..parallel_2p5d import *
from ..parallel_3d import *
from ..utils import get_tensor_parallel_mode
from ..vanilla import *
_parallel_layernorm = {
'1d': kernel.LayerNorm,
'2d': LayerNorm2D,
'2.5d': LayerNorm2p5D,
'3d': LayerNorm3D
}
class LayerNorm(nn.Module):
r"""Layer Normalization for colossalai.
Args:
normalized_shape (int): input shape from an expected input of size.
:math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]`
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps (float, optional): a value added to the denominator for numerical stability, defaults to 1e-05
dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None.
"""
def __init__(self, normalized_shape: int, eps=1e-05, dtype=None) -> None:
super().__init__()
tensor_parallel = get_tensor_parallel_mode()
if tensor_parallel is None:
self.norm = nn.LayerNorm(normalized_shape, eps=eps).to(dtype).to(get_current_device())
else:
self.norm = _parallel_layernorm[tensor_parallel](normalized_shape, eps=eps, dtype=dtype)
@property
def weight(self):
return self.norm.weight
@property
def bias(self):
return self.norm.bias
def forward(self, *args):
return self.norm(*args)
| [
"colossalai.utils.get_current_device"
] | [((1445, 1465), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1463, 1465), False, 'from colossalai.utils import get_current_device\n'), ((1392, 1431), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['normalized_shape'], {'eps': 'eps'}), '(normalized_shape, eps=eps)\n', (1404, 1431), False, 'from torch import nn\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from copy import deepcopy
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.utils import free_port
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
from colossalai.zero.sharded_model import ShardedModelV2
from tests.components_to_test.registry import non_distributed_component_funcs
from common import CONFIG
def run_dist(rank, world_size, port, shard_strategy):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
test_models = ['repeated_computed_layers', 'resnet18']
shard_strategy = shard_strategy()
for model_name in test_models:
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, test_dataloader, optimizer, criterion = get_components_func()
model = model_builder()
model = model.half().cuda()
zero_model = ShardedModelV2(deepcopy(model), shard_strategy)
zero_state_dict = zero_model.state_dict()
for key, val in model.state_dict().items():
assert torch.equal(val, zero_state_dict[key])
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
@pytest.mark.parametrize("shard_strategy", [TensorShardStrategy, BucketTensorShardStrategy])
def test_zero_state_dict(world_size, shard_strategy):
run_func = partial(run_dist, world_size=world_size, port=free_port(), shard_strategy=shard_strategy)
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_zero_state_dict(2, TensorShardStrategy)
| [
"colossalai.launch",
"colossalai.utils.free_port"
] | [((1288, 1333), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 2]'], {}), "('world_size', [1, 2])\n", (1311, 1333), False, 'import pytest\n'), ((1335, 1430), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shard_strategy"""', '[TensorShardStrategy, BucketTensorShardStrategy]'], {}), "('shard_strategy', [TensorShardStrategy,\n BucketTensorShardStrategy])\n", (1358, 1430), False, 'import pytest\n'), ((536, 652), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (553, 652), False, 'import colossalai\n'), ((1590, 1627), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (1598, 1627), True, 'import torch.multiprocessing as mp\n'), ((810, 866), 'tests.components_to_test.registry.non_distributed_component_funcs.get_callable', 'non_distributed_component_funcs.get_callable', (['model_name'], {}), '(model_name)\n', (854, 866), False, 'from tests.components_to_test.registry import non_distributed_component_funcs\n'), ((1074, 1089), 'copy.deepcopy', 'deepcopy', (['model'], {}), '(model)\n', (1082, 1089), False, 'from copy import deepcopy\n'), ((1228, 1266), 'torch.equal', 'torch.equal', (['val', 'zero_state_dict[key]'], {}), '(val, zero_state_dict[key])\n', (1239, 1266), False, 'import torch\n'), ((1542, 1553), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (1551, 1553), False, 'from colossalai.utils import free_port\n')] |
import os
from typing import Callable, List, Optional, Type, Union
import torch
import torch.nn as nn
import colossalai
import colossalai.nn as col_nn
from colossalai.core import global_context as gpc
from colossalai.logging import disable_existing_loggers, get_dist_logger
from colossalai.trainer import Trainer, hooks
from colossalai.utils import MultiTimer, get_dataloader
from colossalai.context import ParallelMode
from colossalai.utils.model.pipelinable import PipelinableContext
from titans.dataloader.cifar10 import build_cifar
from torchvision.models import resnet50
from torchvision.models.resnet import BasicBlock, Bottleneck, conv1x1
# Train
BATCH_SIZE = 64
NUM_EPOCHS = 2
NUM_CHUNKS = 1
CONFIG = dict(NUM_MICRO_BATCHES=4, parallel=dict(pipeline=2))
def train():
disable_existing_loggers()
parser = colossalai.get_default_parser()
args = parser.parse_args()
colossalai.launch_from_torch(backend=args.backend, config=CONFIG)
logger = get_dist_logger()
pipelinable = PipelinableContext()
# build model
with pipelinable:
model = resnet50()
exec_seq = [
'conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4', 'avgpool',
(lambda x: torch.flatten(x, 1), "behind"), 'fc'
]
pipelinable.to_layer_list(exec_seq)
model = pipelinable.partition(NUM_CHUNKS, gpc.pipeline_parallel_size, gpc.get_local_rank(ParallelMode.PIPELINE))
# build criterion
criterion = nn.CrossEntropyLoss()
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
# build dataloader
root = os.environ.get('DATA', './data')
train_dataloader, test_dataloader = build_cifar(BATCH_SIZE, root, padding=4, crop=32, resize=32)
lr_scheduler = col_nn.lr_scheduler.LinearWarmupLR(optimizer, NUM_EPOCHS, warmup_steps=1)
engine, train_dataloader, test_dataloader, lr_scheduler = colossalai.initialize(model, optimizer, criterion,
train_dataloader, test_dataloader,
lr_scheduler)
timer = MultiTimer()
trainer = Trainer(engine=engine, timer=timer, logger=logger)
hook_list = [
hooks.LossHook(),
hooks.AccuracyHook(col_nn.metric.Accuracy()),
hooks.LogMetricByEpochHook(logger),
hooks.LRSchedulerHook(lr_scheduler, by_epoch=True)
]
trainer.fit(train_dataloader=train_dataloader,
epochs=NUM_EPOCHS,
test_dataloader=test_dataloader,
test_interval=1,
hooks=hook_list,
display_progress=True)
if __name__ == '__main__':
train()
| [
"colossalai.trainer.hooks.LogMetricByEpochHook",
"colossalai.get_default_parser",
"colossalai.utils.model.pipelinable.PipelinableContext",
"colossalai.nn.lr_scheduler.LinearWarmupLR",
"colossalai.nn.metric.Accuracy",
"colossalai.utils.MultiTimer",
"colossalai.logging.get_dist_logger",
"colossalai.core... | [((785, 811), 'colossalai.logging.disable_existing_loggers', 'disable_existing_loggers', ([], {}), '()\n', (809, 811), False, 'from colossalai.logging import disable_existing_loggers, get_dist_logger\n'), ((825, 856), 'colossalai.get_default_parser', 'colossalai.get_default_parser', ([], {}), '()\n', (854, 856), False, 'import colossalai\n'), ((892, 957), 'colossalai.launch_from_torch', 'colossalai.launch_from_torch', ([], {'backend': 'args.backend', 'config': 'CONFIG'}), '(backend=args.backend, config=CONFIG)\n', (920, 957), False, 'import colossalai\n'), ((971, 988), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (986, 988), False, 'from colossalai.logging import disable_existing_loggers, get_dist_logger\n'), ((1007, 1027), 'colossalai.utils.model.pipelinable.PipelinableContext', 'PipelinableContext', ([], {}), '()\n', (1025, 1027), False, 'from colossalai.utils.model.pipelinable import PipelinableContext\n'), ((1466, 1487), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1485, 1487), True, 'import torch.nn as nn\n'), ((1602, 1634), 'os.environ.get', 'os.environ.get', (['"""DATA"""', '"""./data"""'], {}), "('DATA', './data')\n", (1616, 1634), False, 'import os\n'), ((1675, 1735), 'titans.dataloader.cifar10.build_cifar', 'build_cifar', (['BATCH_SIZE', 'root'], {'padding': '(4)', 'crop': '(32)', 'resize': '(32)'}), '(BATCH_SIZE, root, padding=4, crop=32, resize=32)\n', (1686, 1735), False, 'from titans.dataloader.cifar10 import build_cifar\n'), ((1756, 1829), 'colossalai.nn.lr_scheduler.LinearWarmupLR', 'col_nn.lr_scheduler.LinearWarmupLR', (['optimizer', 'NUM_EPOCHS'], {'warmup_steps': '(1)'}), '(optimizer, NUM_EPOCHS, warmup_steps=1)\n', (1790, 1829), True, 'import colossalai.nn as col_nn\n'), ((1892, 1995), 'colossalai.initialize', 'colossalai.initialize', (['model', 'optimizer', 'criterion', 'train_dataloader', 'test_dataloader', 'lr_scheduler'], {}), '(model, optimizer, criterion, train_dataloader,\n test_dataloader, lr_scheduler)\n', (1913, 1995), False, 'import colossalai\n'), ((2172, 2184), 'colossalai.utils.MultiTimer', 'MultiTimer', ([], {}), '()\n', (2182, 2184), False, 'from colossalai.utils import MultiTimer, get_dataloader\n'), ((2200, 2250), 'colossalai.trainer.Trainer', 'Trainer', ([], {'engine': 'engine', 'timer': 'timer', 'logger': 'logger'}), '(engine=engine, timer=timer, logger=logger)\n', (2207, 2250), False, 'from colossalai.trainer import Trainer, hooks\n'), ((1085, 1095), 'torchvision.models.resnet50', 'resnet50', ([], {}), '()\n', (1093, 1095), False, 'from torchvision.models import resnet50\n'), ((1384, 1425), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (1402, 1425), True, 'from colossalai.core import global_context as gpc\n'), ((2278, 2294), 'colossalai.trainer.hooks.LossHook', 'hooks.LossHook', ([], {}), '()\n', (2292, 2294), False, 'from colossalai.trainer import Trainer, hooks\n'), ((2358, 2392), 'colossalai.trainer.hooks.LogMetricByEpochHook', 'hooks.LogMetricByEpochHook', (['logger'], {}), '(logger)\n', (2384, 2392), False, 'from colossalai.trainer import Trainer, hooks\n'), ((2402, 2452), 'colossalai.trainer.hooks.LRSchedulerHook', 'hooks.LRSchedulerHook', (['lr_scheduler'], {'by_epoch': '(True)'}), '(lr_scheduler, by_epoch=True)\n', (2423, 2452), False, 'from colossalai.trainer import Trainer, hooks\n'), ((2323, 2347), 'colossalai.nn.metric.Accuracy', 'col_nn.metric.Accuracy', ([], {}), '()\n', (2345, 2347), True, 'import colossalai.nn as col_nn\n'), ((1227, 1246), 'torch.flatten', 'torch.flatten', (['x', '(1)'], {}), '(x, 1)\n', (1240, 1246), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from pathlib import Path
import pytest
import torch
from colossalai.builder import build_model
from colossalai.context import Config
CONFIG_PATH = Path(__file__).parent.joinpath('configs/vanilla_vit.py')
@pytest.mark.cpu
def test_with_vanilla_vit_config():
config = Config.from_file(CONFIG_PATH)
model = build_model(config.model)
model.build_from_cfg()
img = torch.randn(1, 3, config.IMG_SIZE, config.IMG_SIZE)
out = model(img)
loss = out.mean()
loss.backward()
if __name__ == '__main__':
test_with_vanilla_vit_config()
| [
"colossalai.builder.build_model",
"colossalai.context.Config.from_file"
] | [((324, 353), 'colossalai.context.Config.from_file', 'Config.from_file', (['CONFIG_PATH'], {}), '(CONFIG_PATH)\n', (340, 353), False, 'from colossalai.context import Config\n'), ((366, 391), 'colossalai.builder.build_model', 'build_model', (['config.model'], {}), '(config.model)\n', (377, 391), False, 'from colossalai.builder import build_model\n'), ((430, 481), 'torch.randn', 'torch.randn', (['(1)', '(3)', 'config.IMG_SIZE', 'config.IMG_SIZE'], {}), '(1, 3, config.IMG_SIZE, config.IMG_SIZE)\n', (441, 481), False, 'import torch\n'), ((199, 213), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (203, 213), False, 'from pathlib import Path\n')] |
import torch.nn as nn
from torch.optim import Optimizer
from colossalai.utils import is_no_pp_or_last_stage
from .naive_amp import NaiveAMPOptimizer, NaiveAMPModel
def convert_to_naive_amp(model: nn.Module,
optimizer: Optimizer,
amp_config):
"""A helper function to wrap training components with Torch AMP modules
:param model: your model object
:type model: :class:`torch.nn.Module`
:param optimizer: your optimizer object
:type optimizer: :class:`torch.optim.Optimzer`
:param amp_config: configuration for naive mode amp
:type amp_config: :class:`colossalai.context.Config` or dict
:return: (model, optimizer)
:rtype: Tuple
"""
if is_no_pp_or_last_stage():
model = NaiveAMPModel(model, output_to_fp32=True)
else:
model = NaiveAMPModel(model, output_to_fp32=False)
optimizer = NaiveAMPOptimizer(optimizer, **amp_config)
return model, optimizer
__all__ = ['convert_to_naive_amp', 'NaiveAMPOptimizer']
| [
"colossalai.utils.is_no_pp_or_last_stage"
] | [((732, 756), 'colossalai.utils.is_no_pp_or_last_stage', 'is_no_pp_or_last_stage', ([], {}), '()\n', (754, 756), False, 'from colossalai.utils import is_no_pp_or_last_stage\n')] |
import torch
from colossalai.tensor import ColoTensor
from numpy import allclose
def test_tensor_indexing():
torch_t = torch.randn(2, 3)
colo_t = ColoTensor.init_from_torch_tensor(torch_t)
assert allclose(torch_t[:, 1], colo_t[:, 1].torch_tensor())
def test_lazy_init_tensor():
lazy_t = ColoTensor(2, 3, dtype=torch.float32, requires_grad=True)
assert lazy_t._torch_tensor.numel() == 0
assert lazy_t.numel() == 6 == lazy_t.torch_tensor().numel()
def test_wrapped_tensor_func():
t_ref = torch.randn(4, 5)
t = ColoTensor.init_from_torch_tensor(t_ref.clone())
# non-func attr
assert t.is_cuda == t_ref.is_cuda
# TODO I don't find out a tensor function which returns None.
# return 1 torch.Tensor
t_abs = t.abs()
assert isinstance(t_abs, ColoTensor) and torch.equal(t_abs.torch_tensor(), t_ref.abs())
# return 1 non-torch.Tensor
assert t.dim() == t_ref.dim()
# return >1 torch.Tensor
t_split1, t_split2 = t.split(2)
assert isinstance(t_split1, ColoTensor) and isinstance(t_split2, ColoTensor)
def test_operand():
t_ref = torch.randn(4, 5)
t = ColoTensor.init_from_torch_tensor(t_ref.clone())
t_ref_res = t_ref + t_ref
t_res = t + t
assert torch.allclose(t_ref_res, t_res)
| [
"colossalai.tensor.ColoTensor",
"colossalai.tensor.ColoTensor.init_from_torch_tensor"
] | [((125, 142), 'torch.randn', 'torch.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (136, 142), False, 'import torch\n'), ((156, 198), 'colossalai.tensor.ColoTensor.init_from_torch_tensor', 'ColoTensor.init_from_torch_tensor', (['torch_t'], {}), '(torch_t)\n', (189, 198), False, 'from colossalai.tensor import ColoTensor\n'), ((307, 364), 'colossalai.tensor.ColoTensor', 'ColoTensor', (['(2)', '(3)'], {'dtype': 'torch.float32', 'requires_grad': '(True)'}), '(2, 3, dtype=torch.float32, requires_grad=True)\n', (317, 364), False, 'from colossalai.tensor import ColoTensor\n'), ((520, 537), 'torch.randn', 'torch.randn', (['(4)', '(5)'], {}), '(4, 5)\n', (531, 537), False, 'import torch\n'), ((1110, 1127), 'torch.randn', 'torch.randn', (['(4)', '(5)'], {}), '(4, 5)\n', (1121, 1127), False, 'import torch\n'), ((1245, 1277), 'torch.allclose', 'torch.allclose', (['t_ref_res', 't_res'], {}), '(t_ref_res, t_res)\n', (1259, 1277), False, 'import torch\n')] |
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Union, List
from colossalai.core import global_context as gpc
# copied from high version pytorch to support low version
def _format_time(time_us):
"""Defines how to format time in FunctionEvent"""
US_IN_SECOND = 1000.0 * 1000.0
US_IN_MS = 1000.0
if time_us >= US_IN_SECOND:
return '{:.3f}s'.format(time_us / US_IN_SECOND)
if time_us >= US_IN_MS:
return '{:.3f}ms'.format(time_us / US_IN_MS)
return '{:.3f}us'.format(time_us)
# copied from high version pytorch to support low version
def _format_memory(nbytes):
"""Returns a formatted memory size string"""
KB = 1024
MB = 1024 * KB
GB = 1024 * MB
if (abs(nbytes) >= GB):
return '{:.2f} GB'.format(nbytes * 1.0 / GB)
elif (abs(nbytes) >= MB):
return '{:.2f} MB'.format(nbytes * 1.0 / MB)
elif (abs(nbytes) >= KB):
return '{:.2f} KB'.format(nbytes * 1.0 / KB)
else:
return str(nbytes) + ' B'
def _format_bandwidth(volme: float or int, time_us: int):
sec_div_mb = (1000.0 / 1024.0)**2
mb_per_sec = volme / time_us * sec_div_mb
if mb_per_sec >= 1024.0:
return '{:.3f} GB/s'.format(mb_per_sec / 1024.0)
else:
return '{:.3f} MB/s'.format(mb_per_sec)
class BaseProfiler(ABC):
def __init__(self, profiler_name: str, priority: int):
self.name = profiler_name
self.priority = priority
@abstractmethod
def enable(self):
pass
@abstractmethod
def disable(self):
pass
@abstractmethod
def to_tensorboard(self, writer):
pass
@abstractmethod
def to_file(self, filename: Path):
pass
@abstractmethod
def show(self):
pass
class ProfilerContext(object):
"""
Profiler context manager
Usage:
::
```python
world_size = 4
inputs = torch.randn(10, 10, dtype=torch.float32, device=get_current_device())
outputs = torch.empty(world_size, 10, 10, dtype=torch.float32, device=get_current_device())
outputs_list = list(torch.chunk(outputs, chunks=world_size, dim=0))
cc_prof = CommProfiler()
with ProfilerContext([cc_prof]) as prof:
op = dist.all_reduce(inputs, async_op=True)
dist.all_gather(outputs_list, inputs)
op.wait()
dist.reduce_scatter(inputs, outputs_list)
dist.broadcast(inputs, 0)
dist.reduce(inputs, 0)
prof.show()
```
"""
def __init__(self, profilers: List[BaseProfiler] = None, enable: bool = True):
self.enable = enable
self.profilers = sorted(profilers, key=lambda prof: prof.priority)
def __enter__(self):
if self.enable:
for prof in self.profilers:
prof.enable()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.enable:
for prof in self.profilers:
prof.disable()
def to_tensorboard(self, writer):
from torch.utils.tensorboard import SummaryWriter
assert isinstance(writer, SummaryWriter), \
f'torch.utils.tensorboard.SummaryWriter is required, but found {type(writer)}.'
for prof in self.profilers:
prof.to_tensorboard(writer)
def to_file(self, log_dir: Union[str, Path]):
if isinstance(log_dir, str):
log_dir = Path(log_dir)
if not log_dir.exists():
log_dir.mkdir(parents=True, exist_ok=True)
for prof in self.profilers:
log_file = log_dir.joinpath(f'{prof.name}_rank_{gpc.get_global_rank()}.log')
prof.to_file(log_file)
def show(self):
for prof in self.profilers:
prof.show()
| [
"colossalai.core.global_context.get_global_rank"
] | [((3634, 3647), 'pathlib.Path', 'Path', (['log_dir'], {}), '(log_dir)\n', (3638, 3647), False, 'from pathlib import Path\n'), ((3838, 3859), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (3857, 3859), True, 'from colossalai.core import global_context as gpc\n')] |
import torch
import os
import pickle
from colossalai.logging import get_dist_logger
from colossalai.core import global_context as gpc
from torch.utils.data import TensorDataset
from processors import convert_examples_to_features
def gen_tensor_dataset(features):
all_input_ids = torch.tensor(
[f.input_ids for f in features],
dtype=torch.long,
)
all_input_mask = torch.tensor(
[f.input_mask for f in features],
dtype=torch.long,
)
all_segment_ids = torch.tensor(
[f.segment_ids for f in features],
dtype=torch.long,
)
all_label_ids = torch.tensor(
[f.label_id for f in features],
dtype=torch.long,
)
return TensorDataset(
all_input_ids,
all_input_mask,
all_segment_ids,
all_label_ids,
)
def get_train_features(data_dir, vocab_file, max_seq_length, do_lower_case, tokenizer, processor):
cached_train_features_file = os.path.join(
data_dir,
'{0}_{1}_{2}'.format(
vocab_file,
str(max_seq_length),
str(do_lower_case),
),
)
train_features = None
logger = get_dist_logger()
try:
with open(cached_train_features_file, "rb") as reader:
train_features = pickle.load(reader)
logger.info("Loaded pre-processed features from {}".format(cached_train_features_file))
except:
logger.info("Did not find pre-processed features from {}".format(cached_train_features_file))
train_examples = processor.get_train_examples(data_dir)
train_features, _ = convert_examples_to_features(
train_examples,
processor.get_labels(),
max_seq_length,
tokenizer,
)
if gpc.get_global_rank() == 0:
logger.info(" Saving train features into cached file %s", cached_train_features_file)
with open(cached_train_features_file, "wb") as writer:
pickle.dump(train_features, writer)
return train_features
| [
"colossalai.core.global_context.get_global_rank",
"colossalai.logging.get_dist_logger"
] | [((285, 348), 'torch.tensor', 'torch.tensor', (['[f.input_ids for f in features]'], {'dtype': 'torch.long'}), '([f.input_ids for f in features], dtype=torch.long)\n', (297, 348), False, 'import torch\n'), ((393, 457), 'torch.tensor', 'torch.tensor', (['[f.input_mask for f in features]'], {'dtype': 'torch.long'}), '([f.input_mask for f in features], dtype=torch.long)\n', (405, 457), False, 'import torch\n'), ((503, 568), 'torch.tensor', 'torch.tensor', (['[f.segment_ids for f in features]'], {'dtype': 'torch.long'}), '([f.segment_ids for f in features], dtype=torch.long)\n', (515, 568), False, 'import torch\n'), ((612, 674), 'torch.tensor', 'torch.tensor', (['[f.label_id for f in features]'], {'dtype': 'torch.long'}), '([f.label_id for f in features], dtype=torch.long)\n', (624, 674), False, 'import torch\n'), ((709, 785), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_mask', 'all_segment_ids', 'all_label_ids'], {}), '(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n', (722, 785), False, 'from torch.utils.data import TensorDataset\n'), ((1167, 1184), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (1182, 1184), False, 'from colossalai.logging import get_dist_logger\n'), ((1286, 1305), 'pickle.load', 'pickle.load', (['reader'], {}), '(reader)\n', (1297, 1305), False, 'import pickle\n'), ((1774, 1795), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (1793, 1795), True, 'from colossalai.core import global_context as gpc\n'), ((1984, 2019), 'pickle.dump', 'pickle.dump', (['train_features', 'writer'], {}), '(train_features, writer)\n', (1995, 2019), False, 'import pickle\n')] |
from functools import partial
import torch
import torch.distributed as dist
from colossalai.logging import get_dist_logger
from colossalai.utils import checkpoint
from colossalai.zero.shard_utils import TensorShardStrategy
from colossalai.zero.sharded_model import ShardedModelV2
LOGGER = get_dist_logger('zero_test')
MP_PARALLEL_CONFIG = dict(fp16=dict(mode=None,), parallel=dict(pipeline=dict(size=1), tensor=dict(size=2, mode=None)))
_ZERO_MODEL_CONFIG = dict(reduce_scatter_bucket_size_mb=25,
fp32_reduce_scatter=False,
offload_config=None,
gradient_predivide_factor=1.0,
use_memory_tracer=False,
shard_strategy=TensorShardStrategy())
_ZERO_OPTIMIZER_CONFIG = dict(cpu_offload=False,
initial_scale=2**5,
min_scale=1,
growth_factor=2,
backoff_factor=0.5,
growth_interval=1000,
hysteresis=2,
max_scale=2**32)
ZERO_PARALLEL_CONFIG = dict(fp16=dict(mode=None,),
zero=dict(
model_config=_ZERO_MODEL_CONFIG,
optimizer_config=_ZERO_OPTIMIZER_CONFIG,
),
parallel=dict(pipeline=dict(size=1), tensor=dict(size=1, mode=None)))
CONFIG = dict(fp16=dict(mode=None,),
zero=dict(level=3,
verbose=False,
offload_optimizer_config=dict(device='cpu', pin_memory=True, buffer_count=5, fast_init=False),
offload_param_config=dict(device='cpu',
pin_memory=True,
buffer_count=5,
buffer_size=1e8,
max_in_cpu=1e9)),
parallel=dict(pipeline=dict(size=1), tensor=dict(size=1, mode=None)))
def run_fwd_bwd(model, data, label, criterion, enable_autocast=False):
model.train()
with torch.cuda.amp.autocast(enabled=enable_autocast):
if criterion:
y = model(data)
loss = criterion(y, label)
else:
loss = model(data, label)
loss = loss.float()
if isinstance(model, ShardedModelV2):
model.backward(loss)
else:
loss.backward()
def checkpoint_wrapper(module, enable=True):
if enable:
module.forward = partial(checkpoint, module.forward)
return module
def allclose(tensor_a: torch.Tensor, tensor_b: torch.Tensor, loose=False) -> bool:
if loose:
return torch.allclose(tensor_a, tensor_b, atol=1e-2, rtol=1e-3)
return torch.allclose(tensor_a, tensor_b)
def check_grads(model, zero_model, loose=False):
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_grad = zero_p.grad.clone().to(p.device)
grad = p.grad.float()
assert grad.dtype == zero_grad.dtype
assert allclose(grad, zero_grad, loose=loose)
def check_params(model, zero_model, loose=False):
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_p = zero_p.clone().to(p.device)
# assert p.dtype == zero_p.dtype
assert allclose(p.float(), zero_p.float(), loose=loose), f"diff {p.float() - zero_p.float()}"
def check_grads_padding(model, zero_model, loose=False):
rank = dist.get_rank()
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_grad = zero_p.grad.clone().to(p.device)
chunks = torch.flatten(p.grad).chunk(dist.get_world_size())
if rank >= len(chunks):
continue
grad = chunks[rank].float()
if zero_grad.size(0) > grad.size(0):
zero_grad = zero_grad[:grad.size(0)]
assert grad.dtype == zero_grad.dtype
assert allclose(grad, zero_grad, loose=loose), f'diff: {grad - zero_grad}'
def check_params_padding(model, zero_model, loose=False):
rank = dist.get_rank()
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_p = zero_p.clone().to(p.device)
chunks = torch.flatten(p).chunk(dist.get_world_size())
if rank >= len(chunks):
continue
p = chunks[rank]
if zero_p.size(0) > p.size(0):
zero_p = zero_p[:p.size(0)]
assert p.dtype == zero_p.dtype
assert allclose(p, zero_p, loose=loose)
def check_sharded_params_padding(model, zero_model, loose=False):
rank = dist.get_rank()
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_p = zero_p.col_attr.data.payload.to(p.device).float()
chunks = torch.flatten(p).chunk(dist.get_world_size())
if rank >= len(chunks):
continue
p = chunks[rank].float()
if zero_p.size(0) > p.size(0):
zero_p = zero_p[:p.size(0)]
assert p.dtype == zero_p.dtype
assert allclose(p, zero_p, loose=loose), f'{p} vs {zero_p}'
| [
"colossalai.zero.shard_utils.TensorShardStrategy",
"colossalai.logging.get_dist_logger"
] | [((291, 319), 'colossalai.logging.get_dist_logger', 'get_dist_logger', (['"""zero_test"""'], {}), "('zero_test')\n", (306, 319), False, 'from colossalai.logging import get_dist_logger\n'), ((2906, 2940), 'torch.allclose', 'torch.allclose', (['tensor_a', 'tensor_b'], {}), '(tensor_a, tensor_b)\n', (2920, 2940), False, 'import torch\n'), ((3626, 3641), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (3639, 3641), True, 'import torch.distributed as dist\n'), ((4216, 4231), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (4229, 4231), True, 'import torch.distributed as dist\n'), ((4734, 4749), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (4747, 4749), True, 'import torch.distributed as dist\n'), ((750, 771), 'colossalai.zero.shard_utils.TensorShardStrategy', 'TensorShardStrategy', ([], {}), '()\n', (769, 771), False, 'from colossalai.zero.shard_utils import TensorShardStrategy\n'), ((2259, 2307), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {'enabled': 'enable_autocast'}), '(enabled=enable_autocast)\n', (2282, 2307), False, 'import torch\n'), ((2670, 2705), 'functools.partial', 'partial', (['checkpoint', 'module.forward'], {}), '(checkpoint, module.forward)\n', (2677, 2705), False, 'from functools import partial\n'), ((2838, 2895), 'torch.allclose', 'torch.allclose', (['tensor_a', 'tensor_b'], {'atol': '(0.01)', 'rtol': '(0.001)'}), '(tensor_a, tensor_b, atol=0.01, rtol=0.001)\n', (2852, 2895), False, 'import torch\n'), ((3811, 3832), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3830, 3832), True, 'import torch.distributed as dist\n'), ((4388, 4409), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4407, 4409), True, 'import torch.distributed as dist\n'), ((4928, 4949), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4947, 4949), True, 'import torch.distributed as dist\n'), ((3783, 3804), 'torch.flatten', 'torch.flatten', (['p.grad'], {}), '(p.grad)\n', (3796, 3804), False, 'import torch\n'), ((4365, 4381), 'torch.flatten', 'torch.flatten', (['p'], {}), '(p)\n', (4378, 4381), False, 'import torch\n'), ((4905, 4921), 'torch.flatten', 'torch.flatten', (['p'], {}), '(p)\n', (4918, 4921), False, 'import torch\n')] |
from pathlib import Path
from colossalai.logging import get_dist_logger
import colossalai
import torch
import os
from colossalai.core import global_context as gpc
from colossalai.utils import get_dataloader
from torchvision import transforms
from colossalai.nn.lr_scheduler import CosineAnnealingLR
from torchvision.datasets import CIFAR10
from torchvision.models import resnet34
from tqdm import tqdm
def main():
colossalai.launch_from_torch(config='./config.py')
logger = get_dist_logger()
# build resnet
model = resnet34(num_classes=10)
# build dataloaders
train_dataset = CIFAR10(
root=Path(os.environ['DATA']),
download=True,
transform=transforms.Compose(
[
transforms.RandomCrop(size=32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[
0.2023, 0.1994, 0.2010]),
]
)
)
test_dataset = CIFAR10(
root=Path(os.environ['DATA']),
train=False,
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[
0.2023, 0.1994, 0.2010]),
]
)
)
train_dataloader = get_dataloader(dataset=train_dataset,
shuffle=True,
batch_size=gpc.config.BATCH_SIZE,
num_workers=1,
pin_memory=True,
)
test_dataloader = get_dataloader(dataset=test_dataset,
add_sampler=False,
batch_size=gpc.config.BATCH_SIZE,
num_workers=1,
pin_memory=True,
)
# build criterion
criterion = torch.nn.CrossEntropyLoss()
# optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
# lr_scheduler
lr_scheduler = CosineAnnealingLR(optimizer, total_steps=gpc.config.NUM_EPOCHS)
engine, train_dataloader, test_dataloader, _ = colossalai.initialize(model,
optimizer,
criterion,
train_dataloader,
test_dataloader,
)
# verify gradient accumulation
engine.train()
for idx, (img, label) in enumerate(train_dataloader):
img = img.cuda()
label = label.cuda()
engine.zero_grad()
output = engine(img)
train_loss = engine.criterion(output, label)
engine.backward(train_loss)
engine.step()
lr_scheduler.step()
logger.info(f'iteration {idx}, loss: {train_loss}')
# only run for 4 iterations
if idx == 3:
break
if __name__ == '__main__':
main()
| [
"colossalai.utils.get_dataloader",
"colossalai.launch_from_torch",
"colossalai.nn.lr_scheduler.CosineAnnealingLR",
"colossalai.initialize",
"colossalai.logging.get_dist_logger"
] | [((420, 470), 'colossalai.launch_from_torch', 'colossalai.launch_from_torch', ([], {'config': '"""./config.py"""'}), "(config='./config.py')\n", (448, 470), False, 'import colossalai\n'), ((485, 502), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (500, 502), False, 'from colossalai.logging import get_dist_logger\n'), ((535, 559), 'torchvision.models.resnet34', 'resnet34', ([], {'num_classes': '(10)'}), '(num_classes=10)\n', (543, 559), False, 'from torchvision.models import resnet34\n'), ((1381, 1503), 'colossalai.utils.get_dataloader', 'get_dataloader', ([], {'dataset': 'train_dataset', 'shuffle': '(True)', 'batch_size': 'gpc.config.BATCH_SIZE', 'num_workers': '(1)', 'pin_memory': '(True)'}), '(dataset=train_dataset, shuffle=True, batch_size=gpc.config.\n BATCH_SIZE, num_workers=1, pin_memory=True)\n', (1395, 1503), False, 'from colossalai.utils import get_dataloader\n'), ((1714, 1840), 'colossalai.utils.get_dataloader', 'get_dataloader', ([], {'dataset': 'test_dataset', 'add_sampler': '(False)', 'batch_size': 'gpc.config.BATCH_SIZE', 'num_workers': '(1)', 'pin_memory': '(True)'}), '(dataset=test_dataset, add_sampler=False, batch_size=gpc.\n config.BATCH_SIZE, num_workers=1, pin_memory=True)\n', (1728, 1840), False, 'from colossalai.utils import get_dataloader\n'), ((2062, 2089), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2087, 2089), False, 'import torch\n'), ((2239, 2302), 'colossalai.nn.lr_scheduler.CosineAnnealingLR', 'CosineAnnealingLR', (['optimizer'], {'total_steps': 'gpc.config.NUM_EPOCHS'}), '(optimizer, total_steps=gpc.config.NUM_EPOCHS)\n', (2256, 2302), False, 'from colossalai.nn.lr_scheduler import CosineAnnealingLR\n'), ((2355, 2444), 'colossalai.initialize', 'colossalai.initialize', (['model', 'optimizer', 'criterion', 'train_dataloader', 'test_dataloader'], {}), '(model, optimizer, criterion, train_dataloader,\n test_dataloader)\n', (2376, 2444), False, 'import colossalai\n'), ((627, 651), 'pathlib.Path', 'Path', (["os.environ['DATA']"], {}), "(os.environ['DATA'])\n", (631, 651), False, 'from pathlib import Path\n'), ((1069, 1093), 'pathlib.Path', 'Path', (["os.environ['DATA']"], {}), "(os.environ['DATA'])\n", (1073, 1093), False, 'from pathlib import Path\n'), ((744, 785), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', ([], {'size': '(32)', 'padding': '(4)'}), '(size=32, padding=4)\n', (765, 785), False, 'from torchvision import transforms\n'), ((803, 836), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (834, 836), False, 'from torchvision import transforms\n'), ((854, 875), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (873, 875), False, 'from torchvision import transforms\n'), ((893, 978), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.4914, 0.4822, 0.4465]', 'std': '[0.2023, 0.1994, 0.201]'}), '(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]\n )\n', (913, 978), False, 'from torchvision import transforms\n'), ((1184, 1205), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1203, 1205), False, 'from torchvision import transforms\n'), ((1223, 1308), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.4914, 0.4822, 0.4465]', 'std': '[0.2023, 0.1994, 0.201]'}), '(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]\n )\n', (1243, 1308), False, 'from torchvision import transforms\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import logging
from pathlib import Path
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
_FORMAT = 'colossalai - %(name)s - %(asctime)s %(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=_FORMAT)
class DistributedLogger:
"""This is a distributed event logger class essentially based on :class:`logging`.
:param name: The name of the logger
:type name: str
:param level: The threshold for the logger. Logging messages which are less severe than `level`
will be ignored
:type level: str
:param root_path: The root path where logs are stored
:type root_path: str, optional
:param mode: The mode that the file is opened in. Defaults to 'a'
:type mode: str, optional
"""
def __init__(self, name, level='INFO', root_path: str = None, mode='a'):
self._logger = logging.getLogger(name)
self._logger.setLevel(getattr(logging, level))
if root_path is not None:
log_root_path = Path(root_path)
# create path if not exists
log_root_path.mkdir(parents=True, exist_ok=True)
log_path = log_root_path.joinpath(f'{name}.log')
file_handler = logging.FileHandler(log_path, mode)
file_handler.setLevel(getattr(logging, level))
formatter = logging.Formatter(_FORMAT)
file_handler.setFormatter(formatter)
self._logger.addHandler(file_handler)
def _log(self, level, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: list = None):
if ranks is None:
getattr(self._logger, level)(message)
else:
local_rank = gpc.get_local_rank(parallel_mode)
if local_rank in ranks:
getattr(self._logger, level)(message)
def info(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: list = None):
"""Stores an info log message.
:param message:
:type message:
:param parallel_mode:
:type parallel_mode:
:param ranks:
:type ranks:
"""
self._log('info', message, parallel_mode, ranks)
def warning(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: list = None):
"""Stores a warning log message.
:param message: The message to be logged
:type message: str
:param parallel_mode: The parallel mode used for logging. Defaults to ParallelMode.GLOBAL
:type parallel_mode: :class:`colossalai.context.parallel_mode.ParallelMode`
:param ranks: List of parallel ranks
:type ranks: list
"""
self._log('warning', message, parallel_mode, ranks)
def debug(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: list = None):
"""Stores a debug log message.
:param message: The message to be logged
:type message: str
:param parallel_mode: The parallel mode used for logging. Defaults to ParallelMode.GLOBAL
:type parallel_mode: :class:`colossalai.context.parallel_mode.ParallelMode`
:param ranks: List of parallel ranks
:type ranks: list
"""
self._log('debug', message, parallel_mode, ranks)
def error(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: list = None):
"""Stores an error log message.
:param message: The message to be logged
:type message: str
:param parallel_mode: The parallel mode used for logging. Defaults to ParallelMode.GLOBAL
:type parallel_mode: :class:`colossalai.context.parallel_mode.ParallelMode`
:param ranks: List of parallel ranks
:type ranks: list
"""
self._log('error', message, parallel_mode, ranks)
| [
"colossalai.core.global_context.get_local_rank"
] | [((274, 329), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '_FORMAT'}), '(level=logging.INFO, format=_FORMAT)\n', (293, 329), False, 'import logging\n'), ((952, 975), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (969, 975), False, 'import logging\n'), ((1094, 1109), 'pathlib.Path', 'Path', (['root_path'], {}), '(root_path)\n', (1098, 1109), False, 'from pathlib import Path\n'), ((1299, 1334), 'logging.FileHandler', 'logging.FileHandler', (['log_path', 'mode'], {}), '(log_path, mode)\n', (1318, 1334), False, 'import logging\n'), ((1418, 1444), 'logging.Formatter', 'logging.Formatter', (['_FORMAT'], {}), '(_FORMAT)\n', (1435, 1444), False, 'import logging\n'), ((1772, 1805), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['parallel_mode'], {}), '(parallel_mode)\n', (1790, 1805), True, 'from colossalai.core import global_context as gpc\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import math
from typing import Callable, Tuple
import torch
import torch.nn.functional as F
from colossalai.communication import broadcast
from colossalai.context import ParallelMode, seed
from colossalai.core import global_context as gpc
from colossalai.global_variables import tensor_parallel_env as env
from colossalai.nn import init as init
from colossalai.registry import LAYERS
from colossalai.utils.cuda import get_current_device
from torch import Tensor
from torch.nn.parameter import Parameter
from ..base_layer import ParallelLayer
from ..utils import divide, set_tensor_parallel_attribute_by_partition
from ._utils import (gather_forward_split_backward, get_parallel_input, reduce_grad, reduce_input, set_parallel_input,
split_forward_gather_backward)
@LAYERS.register_module
class Linear1D(torch.nn.Module):
"""
Linear layer for 1D parallelism
:param in_features: size of each input sample
:type in_features: int
:param out_features: size of each output sample
:type out_features: int
:param bias: If set to ``False``, the layer will not learn an additive bias, defaults to True
:type bias: bool, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param skip_bias_add: If set to ``True``, it will skip bias add for linear layer,
which is preserved for kernel fusion, defaults to False
:type skip_bias_add: bool, optional
:param weight_initializer: The intializer of weight, defaults to kaiming uniform initializer
:type weight_initializer: typing.Callable, optional
:param bias_initializer: The intializer of bias, defaults to xavier uniform initializer
:type bias_initializer: typing.Callable, optional
"""
def __init__(self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: torch.dtype = None,
gather_output: bool = False,
skip_bias_add: bool = False,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
parallel_input = get_parallel_input()
if not parallel_input:
self.layer = Linear1D_Col(in_features,
out_features,
bias=bias,
dtype=dtype,
gather_output=gather_output,
skip_bias_add=skip_bias_add,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
else:
self.layer = Linear1D_Row(in_features,
out_features,
bias=bias,
dtype=dtype,
parallel_input=parallel_input,
skip_bias_add=skip_bias_add,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
@property
def weight(self):
return self.layer.weight
@property
def bias(self):
return self.layer.bias
def forward(self, input_: Tensor) -> Tensor:
return self.layer(input_)
@LAYERS.register_module
class Classifier1D(ParallelLayer):
"""RowLinear with given weight
Classifier of 1D parallelism
:param in_features: size of input features
:type in_features: int
:param num_classes: number of classes in the dataset
:type num_classes: int
:param weight: weight of the classifier, defaults to True
:type weight: torch.nn.Parameter, optional
:param bias: If set to ``False``, the layer will not learn an additive bias, defaults to ``True``
:type bias: bool, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param weight_initializer: The intializer of weight, defaults to kaiming uniform initializer
:type weight_initializer: typing.Callable, optional
:param bias_initializer: The intializer of bias, defaults to xavier uniform initializer
:type bias_initializer: typing.Callable, optional
"""
def __init__(self,
in_features: int,
num_classes: int,
weight: Parameter = None,
bias: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
self.in_features = in_features
self.num_classes = num_classes
self.parallel_input = get_parallel_input()
# Divide the weight matrix along the last dimension.
self.input_size_per_partition = divide(in_features, gpc.tensor_parallel_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
if weight is not None:
self.weight = weight
self.has_weight = False
else:
self.weight = Parameter(torch.empty(self.num_classes, self.input_size_per_partition, **factory_kwargs))
self.has_weight = True
if bias:
self.bias = Parameter(torch.empty(self.num_classes, **factory_kwargs))
else:
self.bias = None
with seed(ParallelMode.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(False)
env.vocab_parallel = False
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.num_classes
if self.has_weight:
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
broadcast(self.bias, gpc.get_ranks_in_group(ParallelMode.PARALLEL_1D)[0], ParallelMode.PARALLEL_1D)
def _set_tensor_parallel_attributes(self):
if self.has_weight:
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
set_tensor_parallel_attribute_by_partition(self.weight, num_partition)
def forward(self, input_: Tensor) -> Tensor:
# Set up backprop all-reduce.
if self.parallel_input:
input_ = input_
else:
input_ = split_forward_gather_backward(input_, ParallelMode.PARALLEL_1D, dim=-1)
output_parallel = F.linear(input_, self.weight)
output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D)
if self.bias is not None:
output = output + self.bias
return output
@LAYERS.register_module
class VocabParallelClassifier1D(ParallelLayer):
"""ColLinear with given weight
Classifier of 1D parallelism
:param in_features: size of input features
:type in_features: int
:param num_classes: number of classes in the dataset
:type num_classes: int
:param weight: weight of the classifier, defaults to True
:type weight: torch.nn.Parameter, optional
:param bias: If set to ``False``, the layer will not learn an additive bias, defaults to ``True``
:type bias: bool, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param weight_initializer: The intializer of weight, defaults to kaiming uniform initializer
:type weight_initializer: typing.Callable, optional
:param bias_initializer: The intializer of bias, defaults to xavier uniform initializer
:type bias_initializer: typing.Callable, optional
"""
def __init__(self,
in_features: int,
num_classes: int,
weight: Parameter = None,
bias: bool = True,
dtype: torch.dtype = None,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
self.in_features = in_features
self.num_classes = num_classes
self.parallel_input = get_parallel_input()
# Divide the weight matrix along the last dimension.
self.num_classes_per_partition = divide(num_classes, gpc.tensor_parallel_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
if weight is not None:
self.weight = weight
self.has_weight = False
else:
self.weight = Parameter(torch.empty(self.num_classes_per_partition, self.in_features, **factory_kwargs))
self.has_weight = True
if bias:
self.bias = Parameter(torch.empty(self.num_classes_per_partition, **factory_kwargs))
else:
self.bias = None
with seed(ParallelMode.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(False)
env.vocab_parallel = True
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.num_classes
if self.has_weight:
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
def _set_tensor_parallel_attributes(self):
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
if self.has_weight:
set_tensor_parallel_attribute_by_partition(self.weight, num_partition)
if self.bias is not None:
set_tensor_parallel_attribute_by_partition(self.bias, num_partition)
def forward(self, input_: Tensor) -> Tensor:
# Set up backprop all-reduce.
input_parallel = reduce_grad(input_, ParallelMode.PARALLEL_1D)
# Matrix multiply.
output = F.linear(input_parallel, self.weight, self.bias)
return output
@LAYERS.register_module
class Linear1D_Col(ParallelLayer):
"""Linear layer with column parallelism.
The linear layer is defined as :math:`Y = XA + b`. A is parallelized along
its second dimension as :math:`A = [A_1, ..., A_p]`.
:param in_features: first dimension of matrix A.
:type in_features: int
:param output_size: second dimension of matrix A.
:type output_size: int
:param bias: If set to ``False``, the layer will not learn an additive bias, defaults to ``True``
:type bias: bool, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param gather_output: If true, call all-gether on output and make Y avaiable
to all GPUs, otherwise, every GPU will have its output
which is :math:`Y_i = XA_i`, defaults to False
:type gather_output: bool, optional
:param skip_bias_add: If set to ``True``, it will skip bias add for linear layer,
which is preserved for kernel fusion, defaults to False
:type skip_bias_add: bool, optional
:param weight_initializer: The intializer of weight, defaults to kaiming uniform initializer
:type weight_initializer: typing.Callable, optional
:param bias_initializer: The intializer of bias, defaults to xavier uniform initializer
:type bias_initializer: typing.Callable, optional
"""
def __init__(self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: torch.dtype = None,
gather_output: bool = False,
skip_bias_add: bool = False,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
# Keep input parameters
self.in_features = in_features
self.out_features = out_features
self.gather_output = gather_output
self.skip_bias_add = skip_bias_add
if skip_bias_add and not bias:
raise ValueError('cannot skip bias addition if bias is None')
self.out_features_per_partition = divide(out_features, gpc.tensor_parallel_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.weight = Parameter(torch.empty(self.out_features_per_partition, self.in_features, **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(self.out_features_per_partition, **factory_kwargs))
else:
self.bias = None
with seed(ParallelMode.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(True)
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.out_features
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
def _set_tensor_parallel_attributes(self):
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
set_tensor_parallel_attribute_by_partition(self.weight, num_partition)
if self.bias is not None:
set_tensor_parallel_attribute_by_partition(self.bias, num_partition)
def forward(self, input_: Tensor) -> Tuple[Tensor, Tensor]:
# Set up backprop all-reduce.
input_parallel = reduce_grad(input_, ParallelMode.PARALLEL_1D)
# Matrix multiply.
bias = self.bias if not self.skip_bias_add else None
output_parallel = F.linear(input_parallel, self.weight, bias)
if self.gather_output:
# All-gather across the partitions.
output = gather_forward_split_backward(output_parallel, ParallelMode.PARALLEL_1D, dim=-1)
else:
output = output_parallel
if self.skip_bias_add:
return output, self.bias
else:
return output
@LAYERS.register_module
class Linear1D_Row(ParallelLayer):
""" Linear layer with row parallelism
:param in_features: size of each input sample
:type in_features: int
:param out_features: size of each output sample
:type out_features: int
:param bias: If set to ``False``, the layer will not learn an additive bias, defaults to ``True``
:type bias: bool, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param parallel_input: If set to ``True``, it's assumed that the input is splitted, defaults to False
:type parallel_input: bool, optional
:param skip_bias_add: If set to ``True``, it will skip bias add for linear layer,
which is preserved for kernel fusion, defaults to False
:type skip_bias_add: bool, optional
:param weight_initializer: The intializer of weight, defaults to kaiming uniform initializer
:type weight_initializer: typing.Callable, optional
:param bias_initializer: The intializer of bias, defaults to xavier uniform initializer
:type bias_initializer: typing.Callable, optional
"""
def __init__(self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: torch.dtype = None,
parallel_input: bool = True,
skip_bias_add: bool = False,
weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)),
bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1)):
super().__init__()
# Keep input parameters
self.in_features = in_features
self.out_features = out_features
self.parallel_input = parallel_input
self.skip_bias_add = skip_bias_add
if skip_bias_add and not bias:
raise ValueError('cannot skip bias addition if bias is None')
# Divide the weight matrix along the last dimension.
self.input_size_per_partition = divide(in_features, gpc.tensor_parallel_size)
# Parameters.
# Initialize weight.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.weight = Parameter(torch.empty(self.out_features, self.input_size_per_partition, **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(self.out_features, **factory_kwargs))
else:
self.bias = None
with seed(ParallelMode.TENSOR):
self.reset_parameters(weight_initializer, bias_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(False)
def reset_parameters(self, weight_initializer, bias_initializer) -> None:
fan_in, fan_out = self.in_features, self.out_features
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
if self.bias is not None:
bias_initializer(self.bias, fan_in=fan_in)
broadcast(self.bias, gpc.get_ranks_in_group(ParallelMode.PARALLEL_1D)[0], ParallelMode.PARALLEL_1D)
def _set_tensor_parallel_attributes(self):
num_partition = gpc.get_world_size(ParallelMode.TENSOR)
set_tensor_parallel_attribute_by_partition(self.weight, num_partition)
def forward(self, input_: Tensor) -> Tensor:
# Set up backprop all-reduce.
if self.parallel_input:
input_ = input_
else:
input_ = split_forward_gather_backward(input_, ParallelMode.PARALLEL_1D, dim=-1)
output_parallel = F.linear(input_, self.weight)
output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D)
if not self.skip_bias_add:
if self.bias is not None:
output = output + self.bias
return output
else:
return output, self.bias
@LAYERS.register_module
class Embedding1D(ParallelLayer):
"""
Embedding for 1D parallelism
:param num_embeddings: number of embeddings
:type num_embeddings: int
:param embedding_dim: dimension of embedding
:type embedding_dim: int
:param padding_idx: index of padding, defaults to None
:type padding_idx: int, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param weight_initializer: The intializer of weight, defaults to normal initializer
:type weight_initializer: typing.Callable, optional
:param args: Args used in F.embedding
:param kwargs: Kwargs used in F.embedding
"""
def __init__(self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int = None,
dtype: torch.dtype = None,
weight_initializer: Callable = init.normal_(),
*args,
**kwargs):
super().__init__()
self.num_embeddings = num_embeddings
self.embed_dim = embedding_dim
embed_dim_per_partition = divide(embedding_dim, gpc.tensor_parallel_size)
self.padding_idx = padding_idx
self.embed_args = args
self.embed_kwargs = kwargs
self.weight = Parameter(
torch.empty((num_embeddings, embed_dim_per_partition), device=get_current_device(), dtype=dtype))
self.reset_parameters(weight_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(False)
def _set_tensor_parallel_attributes(self):
set_tensor_parallel_attribute_by_partition(self.weight, gpc.tensor_parallel_size)
def reset_parameters(self, weight_initializer) -> None:
with seed(ParallelMode.TENSOR):
fan_in, fan_out = self.num_embeddings, self.embed_dim
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
self._fill_padding_idx_with_zero()
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input_: Tensor) -> Tensor:
output_parallel = F.embedding(input_, self.weight, self.padding_idx, *self.embed_args, **self.embed_kwargs)
output = gather_forward_split_backward(output_parallel, ParallelMode.PARALLEL_1D, dim=-1)
return output
@LAYERS.register_module
class VocabParallelEmbedding1D(torch.nn.Module):
"""Embedding parallelized in the vocabulary dimension.
:param num_embeddings: number of embeddings
:type num_embeddings: int
:param embedding_dim: dimension of embedding
:type embedding_dim: int
:param padding_idx: index of padding, defaults to None
:type padding_idx: int, optional
:param dtype: The dtype of parameters, defaults to None
:type dtype: torch.dtype, optional
:param weight_initializer: The intializer of weight, defaults to normal initializer
:type weight_initializer: typing.Callable, optional
:param args: Args used in F.embedding
:param kwargs: Kwargs used in F.embedding
"""
def __init__(self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int = None,
dtype: torch.dtype = None,
weight_initializer: Callable = init.normal_(),
*args,
**kwargs):
super().__init__()
self.num_embeddings = num_embeddings
self.embed_dim = embedding_dim
self.padding_idx = padding_idx
self.embed_args = args
self.embed_kwargs = kwargs
tensor_parallel_size = gpc.get_world_size(ParallelMode.PARALLEL_1D)
tensor_parallel_rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
self.num_embeddings_per_partition = divide(num_embeddings, tensor_parallel_size)
self.vocab_start_index = tensor_parallel_rank * self.num_embeddings_per_partition
self.vocab_end_index = self.vocab_start_index + self.num_embeddings_per_partition
self.weight = Parameter(
torch.empty((self.num_embeddings_per_partition, self.embed_dim), device=get_current_device(), dtype=dtype))
self.reset_parameters(weight_initializer)
self._set_tensor_parallel_attributes()
set_parallel_input(False)
env.vocab_parallel = True
def _set_tensor_parallel_attributes(self):
set_tensor_parallel_attribute_by_partition(self.weight, gpc.tensor_parallel_size)
def reset_parameters(self, weight_initializer) -> None:
with seed(ParallelMode.TENSOR):
fan_in, fan_out = self.num_embeddings, self.embed_dim
weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out)
self._fill_padding_idx_with_zero()
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None and \
self.padding_idx >= self.vocab_start_index and self.padding_idx < self.vocab_end_index:
with torch.no_grad():
self.weight[self.padding_idx - self.vocab_start_index].fill_(0)
def forward(self, input_: Tensor) -> Tensor:
# Build the mask.
input_mask = (input_ < self.vocab_start_index) | (input_ >= self.vocab_end_index)
# Mask the input.
masked_input = input_.clone() - self.vocab_start_index
masked_input[input_mask] = 0
output_parallel = F.embedding(masked_input, self.weight, self.padding_idx, *self.embed_args,
**self.embed_kwargs)
# Mask the output embedding.
output_parallel[input_mask, :] = 0.
# Reduce across all the model parallel GPUs.
output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D)
return output
@LAYERS.register_module
class Dropout1D(ParallelLayer):
"""
Dropout layer of 1D parallelism
:param p: dropout rate, defaults to 0.5
:type p: float, optional
:param inplace: If set to ``True``, will do this operation in-place, defaults tp ``False``
:type inplace: bool, optional
"""
def __init__(self, p: float = 0.5, inplace: bool = False):
super().__init__()
self.parallel_input = get_parallel_input()
self.p = p
self.inplace = inplace
def forward(self, input_: Tensor) -> Tensor:
if self.parallel_input:
with seed(ParallelMode.TENSOR):
output = F.dropout(input_, self.p, self.training, self.inplace)
else:
output = F.dropout(input_, self.p, self.training, self.inplace)
return output
| [
"colossalai.nn.init.normal_",
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.get_ranks_in_group",
"colossalai.nn.init.xavier_uniform_",
"colossalai.utils.cuda.get_current_device",
"colossalai.context.seed",
"colossalai.core.global_context.get_local_rank"
] | [((2221, 2255), 'colossalai.nn.init.xavier_uniform_', 'init.xavier_uniform_', ([], {'a': '(1)', 'scale': '(1)'}), '(a=1, scale=1)\n', (2241, 2255), True, 'from colossalai.nn import init as init\n'), ((4862, 4896), 'colossalai.nn.init.xavier_uniform_', 'init.xavier_uniform_', ([], {'a': '(1)', 'scale': '(1)'}), '(a=1, scale=1)\n', (4882, 4896), True, 'from colossalai.nn import init as init\n'), ((6919, 6948), 'torch.nn.functional.linear', 'F.linear', (['input_', 'self.weight'], {}), '(input_, self.weight)\n', (6927, 6948), True, 'import torch.nn.functional as F\n'), ((8421, 8455), 'colossalai.nn.init.xavier_uniform_', 'init.xavier_uniform_', ([], {'a': '(1)', 'scale': '(1)'}), '(a=1, scale=1)\n', (8441, 8455), True, 'from colossalai.nn import init as init\n'), ((9944, 9983), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (9962, 9983), True, 'from colossalai.core import global_context as gpc\n'), ((10413, 10461), 'torch.nn.functional.linear', 'F.linear', (['input_parallel', 'self.weight', 'self.bias'], {}), '(input_parallel, self.weight, self.bias)\n', (10421, 10461), True, 'import torch.nn.functional as F\n'), ((12281, 12315), 'colossalai.nn.init.xavier_uniform_', 'init.xavier_uniform_', ([], {'a': '(1)', 'scale': '(1)'}), '(a=1, scale=1)\n', (12301, 12315), True, 'from colossalai.nn import init as init\n'), ((13713, 13752), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (13731, 13752), True, 'from colossalai.core import global_context as gpc\n'), ((14236, 14279), 'torch.nn.functional.linear', 'F.linear', (['input_parallel', 'self.weight', 'bias'], {}), '(input_parallel, self.weight, bias)\n', (14244, 14279), True, 'import torch.nn.functional as F\n'), ((16158, 16192), 'colossalai.nn.init.xavier_uniform_', 'init.xavier_uniform_', ([], {'a': '(1)', 'scale': '(1)'}), '(a=1, scale=1)\n', (16178, 16192), True, 'from colossalai.nn import init as init\n'), ((17748, 17787), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (17766, 17787), True, 'from colossalai.core import global_context as gpc\n'), ((18149, 18178), 'torch.nn.functional.linear', 'F.linear', (['input_', 'self.weight'], {}), '(input_, self.weight)\n', (18157, 18178), True, 'import torch.nn.functional as F\n'), ((19373, 19387), 'colossalai.nn.init.normal_', 'init.normal_', ([], {}), '()\n', (19385, 19387), True, 'from colossalai.nn import init as init\n'), ((20704, 20798), 'torch.nn.functional.embedding', 'F.embedding', (['input_', 'self.weight', 'self.padding_idx', '*self.embed_args'], {}), '(input_, self.weight, self.padding_idx, *self.embed_args, **self\n .embed_kwargs)\n', (20715, 20798), True, 'import torch.nn.functional as F\n'), ((21875, 21889), 'colossalai.nn.init.normal_', 'init.normal_', ([], {}), '()\n', (21887, 21889), True, 'from colossalai.nn import init as init\n'), ((22191, 22235), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (22209, 22235), True, 'from colossalai.core import global_context as gpc\n'), ((22267, 22311), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (22285, 22311), True, 'from colossalai.core import global_context as gpc\n'), ((23960, 24059), 'torch.nn.functional.embedding', 'F.embedding', (['masked_input', 'self.weight', 'self.padding_idx', '*self.embed_args'], {}), '(masked_input, self.weight, self.padding_idx, *self.embed_args,\n **self.embed_kwargs)\n', (23971, 24059), True, 'import torch.nn.functional as F\n'), ((5291, 5311), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (5309, 5311), False, 'from colossalai.utils.cuda import get_current_device\n'), ((5750, 5775), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (5754, 5775), False, 'from colossalai.context import ParallelMode, seed\n'), ((6514, 6553), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (6532, 6553), True, 'from colossalai.core import global_context as gpc\n'), ((8851, 8871), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (8869, 8871), False, 'from colossalai.utils.cuda import get_current_device\n'), ((9325, 9350), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (9329, 9350), False, 'from colossalai.context import ParallelMode, seed\n'), ((12836, 12856), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (12854, 12856), False, 'from colossalai.utils.cuda import get_current_device\n'), ((12906, 12991), 'torch.empty', 'torch.empty', (['self.out_features_per_partition', 'self.in_features'], {}), '(self.out_features_per_partition, self.in_features, **factory_kwargs\n )\n', (12917, 12991), False, 'import torch\n'), ((13160, 13185), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (13164, 13185), False, 'from colossalai.context import ParallelMode, seed\n'), ((16773, 16793), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (16791, 16793), False, 'from colossalai.utils.cuda import get_current_device\n'), ((16843, 16922), 'torch.empty', 'torch.empty', (['self.out_features', 'self.input_size_per_partition'], {}), '(self.out_features, self.input_size_per_partition, **factory_kwargs)\n', (16854, 16922), False, 'import torch\n'), ((17082, 17107), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (17086, 17107), False, 'from colossalai.context import ParallelMode, seed\n'), ((20229, 20254), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (20233, 20254), False, 'from colossalai.context import ParallelMode, seed\n'), ((23113, 23138), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (23117, 23138), False, 'from colossalai.context import ParallelMode, seed\n'), ((25070, 25124), 'torch.nn.functional.dropout', 'F.dropout', (['input_', 'self.p', 'self.training', 'self.inplace'], {}), '(input_, self.p, self.training, self.inplace)\n', (25079, 25124), True, 'import torch.nn.functional as F\n'), ((2160, 2172), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (2169, 2172), False, 'import math\n'), ((4801, 4813), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (4810, 4813), False, 'import math\n'), ((5479, 5557), 'torch.empty', 'torch.empty', (['self.num_classes', 'self.input_size_per_partition'], {}), '(self.num_classes, self.input_size_per_partition, **factory_kwargs)\n', (5490, 5557), False, 'import torch\n'), ((5645, 5692), 'torch.empty', 'torch.empty', (['self.num_classes'], {}), '(self.num_classes, **factory_kwargs)\n', (5656, 5692), False, 'import torch\n'), ((8360, 8372), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (8369, 8372), False, 'import math\n'), ((9039, 9118), 'torch.empty', 'torch.empty', (['self.num_classes_per_partition', 'self.in_features'], {}), '(self.num_classes_per_partition, self.in_features, **factory_kwargs)\n', (9050, 9118), False, 'import torch\n'), ((9206, 9267), 'torch.empty', 'torch.empty', (['self.num_classes_per_partition'], {}), '(self.num_classes_per_partition, **factory_kwargs)\n', (9217, 9267), False, 'import torch\n'), ((12220, 12232), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (12229, 12232), False, 'import math\n'), ((13040, 13102), 'torch.empty', 'torch.empty', (['self.out_features_per_partition'], {}), '(self.out_features_per_partition, **factory_kwargs)\n', (13051, 13102), False, 'import torch\n'), ((16097, 16109), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (16106, 16109), False, 'import math\n'), ((16976, 17024), 'torch.empty', 'torch.empty', (['self.out_features'], {}), '(self.out_features, **factory_kwargs)\n', (16987, 17024), False, 'import torch\n'), ((20555, 20570), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (20568, 20570), False, 'import torch\n'), ((23544, 23559), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (23557, 23559), False, 'import torch\n'), ((24928, 24953), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (24932, 24953), False, 'from colossalai.context import ParallelMode, seed\n'), ((24980, 25034), 'torch.nn.functional.dropout', 'F.dropout', (['input_', 'self.p', 'self.training', 'self.inplace'], {}), '(input_, self.p, self.training, self.inplace)\n', (24989, 25034), True, 'import torch.nn.functional as F\n'), ((6331, 6379), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (6353, 6379), True, 'from colossalai.core import global_context as gpc\n'), ((17597, 17645), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (17619, 17645), True, 'from colossalai.core import global_context as gpc\n'), ((19849, 19869), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (19867, 19869), False, 'from colossalai.utils.cuda import get_current_device\n'), ((22699, 22719), 'colossalai.utils.cuda.get_current_device', 'get_current_device', ([], {}), '()\n', (22717, 22719), False, 'from colossalai.utils.cuda import get_current_device\n')] |
from pathlib import Path
from colossalai.logging import get_dist_logger
import colossalai
import torch
import os
from colossalai.core import global_context as gpc
from colossalai.utils import get_dataloader, MultiTimer
from torchvision import transforms
from colossalai.trainer import hooks, Trainer
from torchvision.datasets import CIFAR10
from torchvision.models import resnet34
from colossalai.nn import CosineAnnealingLR
from tqdm import tqdm
def main():
colossalai.launch_from_torch(config='./config.py',
host='localhost',
port=29500)
logger = get_dist_logger()
# build resnet
model = resnet34(num_classes=10)
# build dataloaders
train_dataset = CIFAR10(
root=Path(os.environ['DATA']),
download=True,
transform=transforms.Compose(
[
transforms.RandomCrop(size=32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[
0.2023, 0.1994, 0.2010]),
]
)
)
test_dataset = CIFAR10(
root=Path(os.environ['DATA']),
train=False,
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[
0.2023, 0.1994, 0.2010]),
]
)
)
train_dataloader = get_dataloader(dataset=train_dataset,
shuffle=True,
batch_size=gpc.config.BATCH_SIZE,
num_workers=1,
pin_memory=True,
)
test_dataloader = get_dataloader(dataset=test_dataset,
add_sampler=False,
batch_size=gpc.config.BATCH_SIZE,
num_workers=1,
pin_memory=True,
)
# build criterion
criterion = torch.nn.CrossEntropyLoss()
# optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
# lr_scheduler
lr_scheduler = CosineAnnealingLR(optimizer, total_steps=gpc.config.NUM_EPOCHS)
engine, train_dataloader, test_dataloader, _ = colossalai.initialize(model,
optimizer,
criterion,
train_dataloader,
test_dataloader,
)
# build a timer to measure time
timer = MultiTimer()
# create a trainer object
trainer = Trainer(
engine=engine,
timer=timer,
logger=logger
)
# define the hooks to attach to the trainer
hook_list = [
hooks.LossHook(),
hooks.LRSchedulerHook(lr_scheduler=lr_scheduler, by_epoch=True),
hooks.AccuracyHook(),
hooks.LogMetricByEpochHook(logger),
hooks.LogMemoryByEpochHook(logger),
hooks.LogTimingByEpochHook(timer, logger),
# you can uncomment these lines if you wish to use them
# hooks.TensorboardHook(log_dir='./tb_logs', ranks=[0]),
# hooks.SaveCheckpointHook(checkpoint_dir='./ckpt')
]
# start training
trainer.fit(
train_dataloader=train_dataloader,
epochs=gpc.config.NUM_EPOCHS,
test_dataloader=test_dataloader,
test_interval=1,
hooks=hook_list,
display_progress=True
)
if __name__ == '__main__':
main()
| [
"colossalai.trainer.hooks.LogMemoryByEpochHook",
"colossalai.trainer.hooks.LogMetricByEpochHook",
"colossalai.trainer.hooks.LogTimingByEpochHook",
"colossalai.trainer.hooks.AccuracyHook",
"colossalai.utils.MultiTimer",
"colossalai.logging.get_dist_logger",
"colossalai.utils.get_dataloader",
"colossala... | [((465, 550), 'colossalai.launch_from_torch', 'colossalai.launch_from_torch', ([], {'config': '"""./config.py"""', 'host': '"""localhost"""', 'port': '(29500)'}), "(config='./config.py', host='localhost', port=29500\n )\n", (493, 550), False, 'import colossalai\n'), ((626, 643), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (641, 643), False, 'from colossalai.logging import get_dist_logger\n'), ((676, 700), 'torchvision.models.resnet34', 'resnet34', ([], {'num_classes': '(10)'}), '(num_classes=10)\n', (684, 700), False, 'from torchvision.models import resnet34\n'), ((1522, 1644), 'colossalai.utils.get_dataloader', 'get_dataloader', ([], {'dataset': 'train_dataset', 'shuffle': '(True)', 'batch_size': 'gpc.config.BATCH_SIZE', 'num_workers': '(1)', 'pin_memory': '(True)'}), '(dataset=train_dataset, shuffle=True, batch_size=gpc.config.\n BATCH_SIZE, num_workers=1, pin_memory=True)\n', (1536, 1644), False, 'from colossalai.utils import get_dataloader, MultiTimer\n'), ((1855, 1981), 'colossalai.utils.get_dataloader', 'get_dataloader', ([], {'dataset': 'test_dataset', 'add_sampler': '(False)', 'batch_size': 'gpc.config.BATCH_SIZE', 'num_workers': '(1)', 'pin_memory': '(True)'}), '(dataset=test_dataset, add_sampler=False, batch_size=gpc.\n config.BATCH_SIZE, num_workers=1, pin_memory=True)\n', (1869, 1981), False, 'from colossalai.utils import get_dataloader, MultiTimer\n'), ((2203, 2230), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2228, 2230), False, 'import torch\n'), ((2380, 2443), 'colossalai.nn.CosineAnnealingLR', 'CosineAnnealingLR', (['optimizer'], {'total_steps': 'gpc.config.NUM_EPOCHS'}), '(optimizer, total_steps=gpc.config.NUM_EPOCHS)\n', (2397, 2443), False, 'from colossalai.nn import CosineAnnealingLR\n'), ((2496, 2585), 'colossalai.initialize', 'colossalai.initialize', (['model', 'optimizer', 'criterion', 'train_dataloader', 'test_dataloader'], {}), '(model, optimizer, criterion, train_dataloader,\n test_dataloader)\n', (2517, 2585), False, 'import colossalai\n'), ((2997, 3009), 'colossalai.utils.MultiTimer', 'MultiTimer', ([], {}), '()\n', (3007, 3009), False, 'from colossalai.utils import get_dataloader, MultiTimer\n'), ((3055, 3105), 'colossalai.trainer.Trainer', 'Trainer', ([], {'engine': 'engine', 'timer': 'timer', 'logger': 'logger'}), '(engine=engine, timer=timer, logger=logger)\n', (3062, 3105), False, 'from colossalai.trainer import hooks, Trainer\n'), ((3211, 3227), 'colossalai.trainer.hooks.LossHook', 'hooks.LossHook', ([], {}), '()\n', (3225, 3227), False, 'from colossalai.trainer import hooks, Trainer\n'), ((3237, 3300), 'colossalai.trainer.hooks.LRSchedulerHook', 'hooks.LRSchedulerHook', ([], {'lr_scheduler': 'lr_scheduler', 'by_epoch': '(True)'}), '(lr_scheduler=lr_scheduler, by_epoch=True)\n', (3258, 3300), False, 'from colossalai.trainer import hooks, Trainer\n'), ((3310, 3330), 'colossalai.trainer.hooks.AccuracyHook', 'hooks.AccuracyHook', ([], {}), '()\n', (3328, 3330), False, 'from colossalai.trainer import hooks, Trainer\n'), ((3340, 3374), 'colossalai.trainer.hooks.LogMetricByEpochHook', 'hooks.LogMetricByEpochHook', (['logger'], {}), '(logger)\n', (3366, 3374), False, 'from colossalai.trainer import hooks, Trainer\n'), ((3384, 3418), 'colossalai.trainer.hooks.LogMemoryByEpochHook', 'hooks.LogMemoryByEpochHook', (['logger'], {}), '(logger)\n', (3410, 3418), False, 'from colossalai.trainer import hooks, Trainer\n'), ((3428, 3469), 'colossalai.trainer.hooks.LogTimingByEpochHook', 'hooks.LogTimingByEpochHook', (['timer', 'logger'], {}), '(timer, logger)\n', (3454, 3469), False, 'from colossalai.trainer import hooks, Trainer\n'), ((768, 792), 'pathlib.Path', 'Path', (["os.environ['DATA']"], {}), "(os.environ['DATA'])\n", (772, 792), False, 'from pathlib import Path\n'), ((1210, 1234), 'pathlib.Path', 'Path', (["os.environ['DATA']"], {}), "(os.environ['DATA'])\n", (1214, 1234), False, 'from pathlib import Path\n'), ((885, 926), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', ([], {'size': '(32)', 'padding': '(4)'}), '(size=32, padding=4)\n', (906, 926), False, 'from torchvision import transforms\n'), ((944, 977), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (975, 977), False, 'from torchvision import transforms\n'), ((995, 1016), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1014, 1016), False, 'from torchvision import transforms\n'), ((1034, 1119), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.4914, 0.4822, 0.4465]', 'std': '[0.2023, 0.1994, 0.201]'}), '(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]\n )\n', (1054, 1119), False, 'from torchvision import transforms\n'), ((1325, 1346), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1344, 1346), False, 'from torchvision import transforms\n'), ((1364, 1449), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.4914, 0.4822, 0.4465]', 'std': '[0.2023, 0.1994, 0.201]'}), '(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]\n )\n', (1384, 1449), False, 'from torchvision import transforms\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.