code stringlengths 197 38.4k | apis sequence | extract_api stringlengths 137 20.3k |
|---|---|---|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from typing import Any, Optional, Tuple
import torch
import torch.distributed as dist
from colossalai.communication import all_gather, all_reduce, reduce_scatter
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from torch import Tensor
from torch.cuda.amp import custom_bwd, custom_fwd
class linear_3d(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any,
input_: Tensor,
weight: Tensor,
bias: Optional[Tensor],
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
assert input_.shape[-1] == weight.shape[0], \
'Invalid shapes: input = {}, weight = {}.'.format(input_.shape, weight.shape)
ctx.use_bias = bias is not None
input_ = all_gather(input_, input_dim, input_parallel_mode)
input_ = torch.cat(input_, dim=input_dim)
# weight = all_gather(weight, weight_dim, weight_parallel_mode)
ctx.save_for_backward(input_, weight)
output = torch.matmul(input_, weight)
output = reduce_scatter(output, output_dim, output_parallel_mode)
if bias is not None:
# ranks_in_group = gpc.get_ranks_in_group(output_parallel_mode)
# src_rank = ranks_in_group[gpc.get_local_rank(input_parallel_mode)]
# dist.broadcast(bias,
# src=src_rank,
# group=gpc.get_group(output_parallel_mode))
# bias = all_gather(bias, -1, weight_parallel_mode)
output += bias
# ctx.src_rank = src_rank
# ctx.save_for_backward(input_, weight)
# output = torch.matmul(input_, weight)
# dist.all_reduce(output, group=gpc.get_group(output_parallel_mode))
# output += bias
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
ctx.input_dim = input_dim
ctx.weight_dim = weight_dim
ctx.output_dim = output_dim
return output
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
input_, weight = ctx.saved_tensors
with torch.no_grad():
# input_grad = torch.matmul(output_grad, weight.transpose(0, 1))
# dist.all_reduce(input_grad,
# group=gpc.get_group(ctx.input_parallel_mode))
# weight_grad = torch.matmul(
# input_.reshape(-1, input_.shape[-1]).transpose(0, 1),
# output_grad.reshape(-1, output_grad.shape[-1]))
# dist.all_reduce(weight_grad,
# group=gpc.get_group(ctx.weight_parallel_mode))
# bias_grad = torch.sum(output_grad,
# dim=tuple(
# range(len(output_grad.shape))[:-1]))
# bias_grad = reduce_scatter(bias_grad, -1,
# ctx.weight_parallel_mode)
# dist.reduce(bias_grad,
# dst=ctx.src_rank,
# group=gpc.get_group(ctx.output_parallel_mode))
# if gpc.get_local_rank(
# ctx.output_parallel_mode) != gpc.get_local_rank(
# ctx.input_parallel_mode):
# bias_grad = None
# input_ = all_gather(input_, ctx.input_dim, ctx.input_parallel_mode)
# weight = all_gather(weight, ctx.weight_dim,
# ctx.weight_parallel_mode)
output_grad = all_gather(output_grad, ctx.output_dim,
ctx.output_parallel_mode)
output_grad = torch.cat(output_grad, dim=ctx.output_dim)
input_grad = torch.matmul(output_grad, weight.transpose(0, 1))
input_grad, input_op = reduce_scatter(input_grad, ctx.input_dim,
ctx.input_parallel_mode,
async_op=True)
weight_grad = torch.matmul(
input_.reshape(-1, input_.shape[-1]).transpose(0, 1),
output_grad.reshape(-1, output_grad.shape[-1]))
# weight_grad = torch.matmul(
# input_.reshape(-1, input_.shape[-1]).transpose(0, 1),
# output_grad.reshape(-1, output_grad.shape[-1]))
# weight_grad = reduce_scatter(weight_grad, ctx.weight_dim,
# ctx.weight_parallel_mode)
if ctx.use_bias:
bias_grad = torch.sum(output_grad,
dim=tuple(
range(len(output_grad.shape))[:-1]))
# bias_grad =all_reduce(bias_grad, ctx.output_parallel_mode)
# dist.all_reduce(bias_grad,
# group=gpc.get_group(ctx.weight_parallel_mode))
weight_grad = torch.cat([weight_grad, torch.unsqueeze(bias_grad, dim=0)])
weight_grad, weight_op = all_reduce(weight_grad, ctx.weight_parallel_mode, async_op=True)
input_op.wait()
weight_op.wait()
if ctx.use_bias:
bias_grad = weight_grad[-1]
weight_grad = weight_grad[:-1]
return input_grad, weight_grad, bias_grad, None, None, None, None, None, None
class layer_norm_3d(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, input_: Tensor, weight: Tensor, bias: Tensor,
normalized_shape: int, eps: float,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
# mean = torch.sum(input_, dim=-1)
# dist.all_reduce(mean, group=gpc.get_group(output_parallel_mode))
# mean /= normalized_shape
# mu = input_ - mean
# var = torch.sum(torch.pow(mu, 2), dim=-1)
# dist.all_reduce(var, group=gpc.get_group(output_parallel_mode))
# var /= normalized_shape
# std_dev = torch.sqrt(var + eps)
# ctx.save_for_backward(input_, mu, std_dev, weight)
# output = weight * mu / std_dev + bias
mean = all_reduce(torch.sum(input_, dim=-1, keepdim=True),
output_parallel_mode) / normalized_shape
mu = input_ - mean
var = all_reduce(torch.sum(mu**2, dim=-1, keepdim=True),
output_parallel_mode) / normalized_shape
sigma = torch.sqrt(var + eps)
# ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
# src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
# transforms = torch.stack([weight, bias]).contiguous()
# dist.broadcast(transforms,
# src=src_rank,
# group=gpc.get_group(input_parallel_mode))
# transforms = all_gather(transforms, -1, weight_parallel_mode)
# weight, bias = transforms[0], transforms[1]
ctx.save_for_backward(mu, sigma, weight)
z = mu / sigma
output = weight * z + bias
# ctx.src_rank = src_rank
ctx.normalized_shape = normalized_shape
ctx.input_parallel_mode = input_parallel_mode
ctx.weight_parallel_mode = weight_parallel_mode
ctx.output_parallel_mode = output_parallel_mode
return output
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
mu, sigma, weight = ctx.saved_tensors
with torch.no_grad():
bias_grad, weight_grad = output_grad, output_grad * mu / sigma
grads = torch.stack([bias_grad, weight_grad]).contiguous()
grads = torch.sum(grads, dim=tuple(range(len(grads.shape))[1:-1]))
grads = all_reduce(grads, ctx.weight_parallel_mode)
grads = all_reduce(grads, ctx.input_parallel_mode)
bias_grad, weight_grad = grads[0], grads[1]
# grads = reduce_scatter(grads, -1, ctx.weight_parallel_mode)
# dist.reduce(grads,
# dst=ctx.src_rank,
# group=gpc.get_group(ctx.input_parallel_mode))
# if gpc.get_local_rank(
# ctx.input_parallel_mode) == gpc.get_local_rank(
# ctx.output_parallel_mode):
# bias_grad, weight_grad = grads[0], grads[1]
# else:
# bias_grad, weight_grad = None, None
dz = output_grad * weight
dvar = dz * mu * (-0.5) * sigma**(-3)
dvar = all_reduce(torch.sum(dvar, dim=-1, keepdim=True), ctx.output_parallel_mode)
dmean = dz * (-1 / sigma) + dvar * -2 * mu / ctx.normalized_shape
dmean = all_reduce(torch.sum(dmean, dim=-1, keepdim=True), ctx.output_parallel_mode)
input_grad = dz / sigma + dvar * 2 * mu / ctx.normalized_shape + dmean / ctx.normalized_shape
return input_grad, weight_grad, bias_grad, None, None, None, None, None
class Matmul_AB_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = AB`
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any,
A: Tensor,
B: Tensor,
depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
# A: [m/q^2, n, k/q]
# B: [k/q, h/q^2]
# C: [m/q^2, n, h/q]
ctx.save_for_backward(A, B)
assert A.shape[-1] == B.shape[0], \
'Invalid shapes: A={}, B={}.'.format(A.shape, B.shape)
A_temp = all_gather(A, input_dim, input_parallel_mode)
B_temp = all_gather(B, weight_dim, weight_parallel_mode)
C = torch.matmul(A_temp, B_temp)
out = reduce_scatter(C, output_dim, output_parallel_mode)
ctx.depth = depth
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
ctx.A_dim = input_dim
ctx.B_dim = weight_dim
ctx.C_dim = output_dim
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_3D.apply(output_grad, B, ctx.depth,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode,
ctx.A_group_parallel_mode, ctx.C_dim,
ctx.B_dim, ctx.A_dim)
B_grad = Matmul_ATB_3D.apply(A, output_grad, ctx.depth,
ctx.A_group_parallel_mode,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode, ctx.A_dim,
ctx.C_dim, ctx.B_dim)
return A_grad, B_grad, None, None, None, None, None, None, None
class Matmul_ABT_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = AB^T`
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any,
A: Tensor,
B: Tensor,
depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
# A: [m/q^2, n, h/q]
# B: [k/q, h/q^2]
# C: [m/q^2, n, k/q]
ctx.save_for_backward(A, B)
A_temp = all_gather(A, input_dim, input_parallel_mode)
B_temp = all_gather(B, weight_dim, weight_parallel_mode)
C = torch.matmul(A_temp, B_temp.transpose(0, 1))
out = reduce_scatter(C, output_dim, output_parallel_mode)
ctx.depth = depth
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
ctx.A_dim = input_dim
ctx.B_dim = weight_dim
ctx.C_dim = output_dim
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_AB_3D.apply(output_grad, B, ctx.depth,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode,
ctx.A_group_parallel_mode, ctx.C_dim,
ctx.B_dim, ctx.A_dim)
B_grad = Matmul_ATB_3D.apply(output_grad, A, ctx.depth,
ctx.C_group_parallel_mode,
ctx.A_group_parallel_mode,
ctx.B_group_parallel_mode, ctx.C_dim,
ctx.A_dim, ctx.B_dim)
return A_grad, B_grad, None, None, None, None, None, None, None
class Matmul_ATB_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = A^TB`
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any,
A: Tensor,
B: Tensor,
depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = 0,
output_dim: int = -1) -> Tensor:
# A: [m/q^2, n, k/q]
# B: [m/q^2, n, h/q]
# C: [k/q, h/q^2]
ctx.save_for_backward(A, B)
A_temp = all_gather(A, input_dim, input_parallel_mode)
A_temp = A_temp.reshape(-1, A.shape[-1])
B_temp = all_gather(B, weight_dim, weight_parallel_mode)
B_temp = B_temp.reshape(-1, B.shape[-1])
C = torch.matmul(A_temp.transpose(0, 1), B_temp)
out = reduce_scatter(C, output_dim, output_parallel_mode)
ctx.depth = depth
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
ctx.A_dim = input_dim
ctx.B_dim = weight_dim
ctx.C_dim = output_dim
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_3D.apply(B, output_grad, ctx.depth,
ctx.B_group_parallel_mode,
ctx.C_group_parallel_mode,
ctx.A_group_parallel_mode, ctx.B_dim,
ctx.C_dim, ctx.A_dim)
B_grad = Matmul_AB_3D.apply(A, output_grad, ctx.depth,
ctx.A_group_parallel_mode,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode, ctx.A_dim,
ctx.C_dim, ctx.B_dim)
return A_grad, B_grad, None, None, None, None, None, None, None
class Add_3D(torch.autograd.Function):
"""Matrix add bias: :math:`C = A + b`
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, input_: Tensor, bias: Tensor, depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
# input: [m/q^2, n, h/q]
# bias: [h/q^2]
ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
bias_temp = bias.clone()
dist.broadcast(bias_temp,
src=src_rank,
group=gpc.get_group(input_parallel_mode))
# [h/q]
bias_temp = all_gather(bias_temp, -1, weight_parallel_mode)
out = input_ + bias_temp
ctx.depth = depth
ctx.src_rank = src_rank
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
# output_grad: [m/q^2, n, h/q]
with torch.no_grad():
# [h/q]
grad = torch.sum(output_grad,
dim=tuple(range(len(output_grad.shape))[:-1]))
bias_grad = reduce_scatter(grad, -1, ctx.B_group_parallel_mode)
dist.reduce(bias_grad,
dst=ctx.src_rank,
group=gpc.get_group(ctx.A_group_parallel_mode))
if gpc.get_local_rank(
ctx.A_group_parallel_mode) != gpc.get_local_rank(
ctx.C_group_parallel_mode):
bias_grad = None
return output_grad, bias_grad, None, None, None, None
class Mul_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = A * b`
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, input_: Tensor, bias: Tensor, depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
# input: [m/q^2, n, h/q]
# bias: [h/q^2]
ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
# [h/q^2]
bias_temp = bias.clone()
dist.broadcast(bias_temp,
src=src_rank,
group=gpc.get_group(input_parallel_mode))
# [h/q]
bias_temp = all_gather(bias_temp, -1, weight_parallel_mode)
# empty_cache()
ctx.save_for_backward(input_, bias_temp)
out = torch.mul(input_, bias_temp)
ctx.depth = depth
ctx.src_rank = src_rank
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
# output_grad: [m/q^2, n, h/q]
with torch.no_grad():
input_, bias = ctx.saved_tensors
# [m/q^2, n, h/q]
input_grad = torch.mul(output_grad, bias)
# [h/q]
grad = torch.mul(output_grad, input_)
grad = torch.sum(grad,
dim=tuple(range(len(output_grad.shape))[:-1]))
bias_grad = reduce_scatter(grad, -1, ctx.B_group_parallel_mode)
dist.reduce(bias_grad,
dst=ctx.src_rank,
group=gpc.get_group(ctx.A_group_parallel_mode))
if gpc.get_local_rank(
ctx.A_group_parallel_mode) != gpc.get_local_rank(
ctx.C_group_parallel_mode):
bias_grad = None
return input_grad, bias_grad, None, None, None, None
class Sum_3D(torch.autograd.Function):
"""Compute the sum of input tensors
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any,
input_: Tensor,
dim: int,
depth: int,
parallel_mode: ParallelMode,
keepdim: bool = False) -> Tensor:
# input: [m/q^2, n, h/q]
out = torch.sum(input_, dim=dim, keepdim=keepdim)
dist.all_reduce(out, group=gpc.get_group(parallel_mode))
ctx.input_shape = input_.shape
ctx.depth = depth
ctx.group = parallel_mode
ctx.dim = dim
return out
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
with torch.no_grad():
output_grad = output_grad.contiguous()
dist.all_reduce(output_grad, group=gpc.get_group(ctx.group))
if len(output_grad.shape) < len(ctx.input_shape):
output_grad = torch.unsqueeze(output_grad, ctx.dim)
dims = [1 for _ in range(len(output_grad.shape))]
dims[ctx.dim] = ctx.input_shape[ctx.dim]
input_grad = output_grad.repeat(tuple(dims))
return input_grad, None, None, None, None, None
class Reduce_3D(torch.autograd.Function):
"""Reduce input tensors
"""
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx: Any, input_: Tensor, depth: int,
parallel_mode: ParallelMode) -> Tensor:
dist.all_reduce(input_, group=gpc.get_group(parallel_mode))
return input_.clone()
@staticmethod
@custom_bwd
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
return output_grad, None, None
# class Slice_3D(torch.autograd.Function):
# """Slice input tensor
# """
# @staticmethod
# @custom_fwd(cast_inputs=torch.float16)
# def forward(ctx: Any, input_: Tensor, dim: int, depth: int,
# parallel_mode: ParallelMode) -> Tensor:
# rank = gpc.get_local_rank(parallel_mode)
# out = torch.chunk(input_, depth, dim=dim)[rank].contiguous()
# ctx.depth = depth
# ctx.parallel_mode = parallel_mode
# ctx.dim = dim
# ctx.input_shape = input_.shape
# return out
# @staticmethod
# @custom_bwd
# def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
# with torch.no_grad():
# input_grad = all_gather(output_grad, ctx.dim, ctx.parallel_mode)
# input_grad.reshape(ctx.input_shape)
# return input_grad, None, None, None
| [
"colossalai.core.global_context.get_local_rank",
"colossalai.communication.all_gather",
"colossalai.communication.reduce_scatter",
"colossalai.core.global_context.get_group",
"colossalai.communication.all_reduce",
"colossalai.core.global_context.get_ranks_in_group"
] | [((462, 499), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (472, 499), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((5920, 5957), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (5930, 5957), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((9720, 9757), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (9730, 9757), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((11983, 12020), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (11993, 12020), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((14145, 14182), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (14155, 14182), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((16390, 16427), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (16400, 16427), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((18313, 18350), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (18323, 18350), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((20498, 20535), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (20508, 20535), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((21761, 21798), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (21771, 21798), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((1110, 1160), 'colossalai.communication.all_gather', 'all_gather', (['input_', 'input_dim', 'input_parallel_mode'], {}), '(input_, input_dim, input_parallel_mode)\n', (1120, 1160), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((1178, 1210), 'torch.cat', 'torch.cat', (['input_'], {'dim': 'input_dim'}), '(input_, dim=input_dim)\n', (1187, 1210), False, 'import torch\n'), ((1347, 1375), 'torch.matmul', 'torch.matmul', (['input_', 'weight'], {}), '(input_, weight)\n', (1359, 1375), False, 'import torch\n'), ((1393, 1449), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['output', 'output_dim', 'output_parallel_mode'], {}), '(output, output_dim, output_parallel_mode)\n', (1407, 1449), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((7051, 7072), 'torch.sqrt', 'torch.sqrt', (['(var + eps)'], {}), '(var + eps)\n', (7061, 7072), False, 'import torch\n'), ((10393, 10438), 'colossalai.communication.all_gather', 'all_gather', (['A', 'input_dim', 'input_parallel_mode'], {}), '(A, input_dim, input_parallel_mode)\n', (10403, 10438), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((10456, 10503), 'colossalai.communication.all_gather', 'all_gather', (['B', 'weight_dim', 'weight_parallel_mode'], {}), '(B, weight_dim, weight_parallel_mode)\n', (10466, 10503), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((10517, 10545), 'torch.matmul', 'torch.matmul', (['A_temp', 'B_temp'], {}), '(A_temp, B_temp)\n', (10529, 10545), False, 'import torch\n'), ((10560, 10611), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['C', 'output_dim', 'output_parallel_mode'], {}), '(C, output_dim, output_parallel_mode)\n', (10574, 10611), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((12544, 12589), 'colossalai.communication.all_gather', 'all_gather', (['A', 'input_dim', 'input_parallel_mode'], {}), '(A, input_dim, input_parallel_mode)\n', (12554, 12589), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((12607, 12654), 'colossalai.communication.all_gather', 'all_gather', (['B', 'weight_dim', 'weight_parallel_mode'], {}), '(B, weight_dim, weight_parallel_mode)\n', (12617, 12654), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((12727, 12778), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['C', 'output_dim', 'output_parallel_mode'], {}), '(C, output_dim, output_parallel_mode)\n', (12741, 12778), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((14706, 14751), 'colossalai.communication.all_gather', 'all_gather', (['A', 'input_dim', 'input_parallel_mode'], {}), '(A, input_dim, input_parallel_mode)\n', (14716, 14751), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((14818, 14865), 'colossalai.communication.all_gather', 'all_gather', (['B', 'weight_dim', 'weight_parallel_mode'], {}), '(B, weight_dim, weight_parallel_mode)\n', (14828, 14865), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((14987, 15038), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['C', 'output_dim', 'output_parallel_mode'], {}), '(C, output_dim, output_parallel_mode)\n', (15001, 15038), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((16744, 16787), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (16766, 16787), True, 'from colossalai.core import global_context as gpc\n'), ((17069, 17116), 'colossalai.communication.all_gather', 'all_gather', (['bias_temp', '(-1)', 'weight_parallel_mode'], {}), '(bias_temp, -1, weight_parallel_mode)\n', (17079, 17116), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((18667, 18710), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (18689, 18710), True, 'from colossalai.core import global_context as gpc\n'), ((19010, 19057), 'colossalai.communication.all_gather', 'all_gather', (['bias_temp', '(-1)', 'weight_parallel_mode'], {}), '(bias_temp, -1, weight_parallel_mode)\n', (19020, 19057), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((19147, 19175), 'torch.mul', 'torch.mul', (['input_', 'bias_temp'], {}), '(input_, bias_temp)\n', (19156, 19175), False, 'import torch\n'), ((20790, 20833), 'torch.sum', 'torch.sum', (['input_'], {'dim': 'dim', 'keepdim': 'keepdim'}), '(input_, dim=dim, keepdim=keepdim)\n', (20799, 20833), False, 'import torch\n'), ((2572, 2587), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2585, 2587), False, 'import torch\n'), ((4006, 4071), 'colossalai.communication.all_gather', 'all_gather', (['output_grad', 'ctx.output_dim', 'ctx.output_parallel_mode'], {}), '(output_grad, ctx.output_dim, ctx.output_parallel_mode)\n', (4016, 4071), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((4135, 4177), 'torch.cat', 'torch.cat', (['output_grad'], {'dim': 'ctx.output_dim'}), '(output_grad, dim=ctx.output_dim)\n', (4144, 4177), False, 'import torch\n'), ((4302, 4388), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['input_grad', 'ctx.input_dim', 'ctx.input_parallel_mode'], {'async_op': '(True)'}), '(input_grad, ctx.input_dim, ctx.input_parallel_mode, async_op\n =True)\n', (4316, 4388), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((5519, 5583), 'colossalai.communication.all_reduce', 'all_reduce', (['weight_grad', 'ctx.weight_parallel_mode'], {'async_op': '(True)'}), '(weight_grad, ctx.weight_parallel_mode, async_op=True)\n', (5529, 5583), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((8102, 8117), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8115, 8117), False, 'import torch\n'), ((8364, 8407), 'colossalai.communication.all_reduce', 'all_reduce', (['grads', 'ctx.weight_parallel_mode'], {}), '(grads, ctx.weight_parallel_mode)\n', (8374, 8407), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((8428, 8470), 'colossalai.communication.all_reduce', 'all_reduce', (['grads', 'ctx.input_parallel_mode'], {}), '(grads, ctx.input_parallel_mode)\n', (8438, 8470), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((11073, 11088), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11086, 11088), False, 'import torch\n'), ((13240, 13255), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13253, 13255), False, 'import torch\n'), ((15500, 15515), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15513, 15515), False, 'import torch\n'), ((16822, 16862), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['output_parallel_mode'], {}), '(output_parallel_mode)\n', (16840, 16862), True, 'from colossalai.core import global_context as gpc\n'), ((17558, 17573), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17571, 17573), False, 'import torch\n'), ((17737, 17788), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['grad', '(-1)', 'ctx.B_group_parallel_mode'], {}), '(grad, -1, ctx.B_group_parallel_mode)\n', (17751, 17788), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((18745, 18785), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['output_parallel_mode'], {}), '(output_parallel_mode)\n', (18763, 18785), True, 'from colossalai.core import global_context as gpc\n'), ((19583, 19598), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19596, 19598), False, 'import torch\n'), ((19700, 19728), 'torch.mul', 'torch.mul', (['output_grad', 'bias'], {}), '(output_grad, bias)\n', (19709, 19728), False, 'import torch\n'), ((19768, 19798), 'torch.mul', 'torch.mul', (['output_grad', 'input_'], {}), '(output_grad, input_)\n', (19777, 19798), False, 'import torch\n'), ((19934, 19985), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['grad', '(-1)', 'ctx.B_group_parallel_mode'], {}), '(grad, -1, ctx.B_group_parallel_mode)\n', (19948, 19985), False, 'from colossalai.communication import all_gather, all_reduce, reduce_scatter\n'), ((21159, 21174), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21172, 21174), False, 'import torch\n'), ((6768, 6807), 'torch.sum', 'torch.sum', (['input_'], {'dim': '(-1)', 'keepdim': '(True)'}), '(input_, dim=-1, keepdim=True)\n', (6777, 6807), False, 'import torch\n'), ((6928, 6968), 'torch.sum', 'torch.sum', (['(mu ** 2)'], {'dim': '(-1)', 'keepdim': '(True)'}), '(mu ** 2, dim=-1, keepdim=True)\n', (6937, 6968), False, 'import torch\n'), ((9166, 9203), 'torch.sum', 'torch.sum', (['dvar'], {'dim': '(-1)', 'keepdim': '(True)'}), '(dvar, dim=-1, keepdim=True)\n', (9175, 9203), False, 'import torch\n'), ((9340, 9378), 'torch.sum', 'torch.sum', (['dmean'], {'dim': '(-1)', 'keepdim': '(True)'}), '(dmean, dim=-1, keepdim=True)\n', (9349, 9378), False, 'import torch\n'), ((16997, 17031), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (17010, 17031), True, 'from colossalai.core import global_context as gpc\n'), ((17953, 17998), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (17971, 17998), True, 'from colossalai.core import global_context as gpc\n'), ((18023, 18068), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.C_group_parallel_mode'], {}), '(ctx.C_group_parallel_mode)\n', (18041, 18068), True, 'from colossalai.core import global_context as gpc\n'), ((18938, 18972), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (18951, 18972), True, 'from colossalai.core import global_context as gpc\n'), ((20150, 20195), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (20168, 20195), True, 'from colossalai.core import global_context as gpc\n'), ((20220, 20265), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.C_group_parallel_mode'], {}), '(ctx.C_group_parallel_mode)\n', (20238, 20265), True, 'from colossalai.core import global_context as gpc\n'), ((20869, 20897), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (20882, 20897), True, 'from colossalai.core import global_context as gpc\n'), ((21392, 21429), 'torch.unsqueeze', 'torch.unsqueeze', (['output_grad', 'ctx.dim'], {}), '(output_grad, ctx.dim)\n', (21407, 21429), False, 'import torch\n'), ((21947, 21975), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (21960, 21975), True, 'from colossalai.core import global_context as gpc\n'), ((8214, 8251), 'torch.stack', 'torch.stack', (['[bias_grad, weight_grad]'], {}), '([bias_grad, weight_grad])\n', (8225, 8251), False, 'import torch\n'), ((17896, 17936), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (17909, 17936), True, 'from colossalai.core import global_context as gpc\n'), ((20093, 20133), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (20106, 20133), True, 'from colossalai.core import global_context as gpc\n'), ((21274, 21298), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.group'], {}), '(ctx.group)\n', (21287, 21298), True, 'from colossalai.core import global_context as gpc\n'), ((5445, 5478), 'torch.unsqueeze', 'torch.unsqueeze', (['bias_grad'], {'dim': '(0)'}), '(bias_grad, dim=0)\n', (5460, 5478), False, 'import torch\n')] |
import torch
from colossalai.tensor.op_wrapper import colo_op_impl
from colossalai.tensor import ColoTensor
@colo_op_impl(torch.nn.functional.cross_entropy)
def colo_cross_entropy(types, args=(), kwargs=None, pg=None):
arg_num = len(args)
if arg_num > 0:
input_tensor = args[0]
if arg_num > 1:
target = args[1]
if arg_num > 2:
weight = args[3]
if 'input' in kwargs:
input_tensor = kwargs['input']
if 'target' in kwargs:
target = kwargs['target']
if 'weight' in kwargs:
weight = kwargs['weight']
if isinstance(input_tensor, ColoTensor):
input_tensor = input_tensor.torch_tensor()
if isinstance(target, ColoTensor):
target = target.torch_tensor()
return ColoTensor.init_from_torch_tensor(torch.nn.functional.cross_entropy(input_tensor, target, weight))
| [
"colossalai.tensor.op_wrapper.colo_op_impl"
] | [((111, 158), 'colossalai.tensor.op_wrapper.colo_op_impl', 'colo_op_impl', (['torch.nn.functional.cross_entropy'], {}), '(torch.nn.functional.cross_entropy)\n', (123, 158), False, 'from colossalai.tensor.op_wrapper import colo_op_impl\n'), ((796, 859), 'torch.nn.functional.cross_entropy', 'torch.nn.functional.cross_entropy', (['input_tensor', 'target', 'weight'], {}), '(input_tensor, target, weight)\n', (829, 859), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
try:
import colossal_C
except:
print('Colossalai should be built with cuda extension to use the FP16 optimizer')
from torch.optim import Optimizer
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.logging import get_dist_logger
from colossalai.utils import (print_rank_0, copy_tensor_parallel_attributes,
clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier)
def _zero_grad_group_helper(group, set_to_none):
"""Zero out the gradient for a group of parameters.
Note: copied from torch.optim.optimizer."""
for param in group:
if param.grad is not None:
if set_to_none:
param.grad = None
else:
if param.grad.grad_fn is not None:
param.grad.detach_()
else:
param.grad.requires_grad_(False)
param.grad.zero_()
def _multi_tensor_copy_this_to_that(this, that, overflow_buf=None):
"""Use multi-tensor-applier to copy values from one list to another.
We don't have a blfoat16 implementation so for now if the overflow_buf
is not provided, we default back to simple loop copy to be compatible
with bfloat16."""
if overflow_buf:
overflow_buf.fill_(0)
# Scaling with factor `1.0` is equivalent to copy.
multi_tensor_applier(colossal_C.multi_tensor_scale,
overflow_buf,
[this, that],
1.0)
else:
for this_, that_ in zip(this, that):
that_.copy_(this_)
class DynamicGradScaler:
def __init__(self,
initial_scale,
min_scale,
growth_factor,
backoff_factor,
growth_interval,
hysteresis,
max_scale: int = None):
""""Grad scaler with dynamic scale that gets adjusted
during training."""
assert initial_scale > 0.0
self._scale = torch.cuda.FloatTensor([initial_scale])
# Lower bound on the scale.
assert min_scale > 0.0
assert min_scale <= initial_scale
self.min_scale = torch.cuda.FloatTensor([min_scale])
# Growth and backoff factors for the scale.
assert growth_factor > 1.0
self.growth_factor = torch.cuda.FloatTensor([growth_factor])
assert backoff_factor < 1.0
assert backoff_factor > 0.0
self.backoff_factor = torch.cuda.FloatTensor([backoff_factor])
# Interval over which if we don't see any inf/nan,
# we will scale the grad scale by the growth factor.
assert growth_interval > 0
self.growth_interval = growth_interval
# Number of inf/nans we should see before scaling down
# the grad scale by the backoff factor.
assert hysteresis > 0
self.hysteresis = hysteresis
if max_scale is not None:
assert max_scale > 1 and initial_scale <= max_scale
self._max_scale = max_scale
# Trackers.
self._growth_tracker = 0
self._hysteresis_tracker = self.hysteresis
self._logger = get_dist_logger()
@property
def scale(self):
return self._scale
@property
def inv_scale(self):
return self._scale.double().reciprocal().float()
def update(self, found_inf):
# If we have an inf/nan, growth tracker is set to 0
# and hysterisis tracker is reduced by 1.
if found_inf:
self._growth_tracker = 0
self._hysteresis_tracker -= 1
# Now if we are out of hysteresis count, scale down the loss.
if self._hysteresis_tracker <= 0:
self._scale = torch.max(self._scale * self.backoff_factor,
self.min_scale)
self._logger.info(f'overflow occurs, loss scale is adjusted to {self._scale}', ranks=[0])
else:
# If there is no nan/inf, increment the growth tracker.
self._growth_tracker += 1
# If we have had enough consequitive intervals with no nan/inf:
if self._growth_tracker == self.growth_interval:
# Reset the tracker and hysteresis trackers,
self._growth_tracker = 0
self._hysteresis_tracker = self.hysteresis
# and scale up the loss scale.
if self._max_scale is not None and self._scale >= self._max_scale:
self._logger.info(
f'Current loss scale {self._scale} has reached the max scale {self._max_scale} allowed', ranks=[0])
else:
self._scale = self._scale * self.growth_factor
self._logger.info(f'no consecutive overflow, loss scale is adjusted to {self._scale}', ranks=[0])
def state_dict(self):
state_dict = {}
state_dict['max_scale'] = self._max_scale
state_dict['scale'] = self._scale
state_dict['growth_tracker'] = self._growth_tracker
state_dict['hysteresis_tracker'] = self._hysteresis_tracker
return state_dict
def load_state_dict(self, state_dict):
self._scale = state_dict['scale'].cuda(torch.cuda.current_device())
self._growth_tracker = state_dict['growth_tracker']
self._hysteresis_tracker = state_dict['hysteresis_tracker']
self._max_scale = state_dict['max_scale']
class FP16Optimizer(Optimizer):
"""Float16 optimizer for fp16 and bf16 data types.
:param optimizer: base optimizer such as Adam or SGD
:type optimizer: torch.optim.Optimizer
:param clip_grad: clip gradeints with this global L2 norm. Note that clipping is ignored if clip_grad == 0
:type param clip_grad: float
:param log_num_zeros_in_grad: return number of zeros in the gradients.
:type log_num_zeros_in_grad: bool
:param initial_scale: initial scale of gradient scaler
:type initial_scale: int
:param growth_factor: the growth rate of loss scale
:type growth_factor: int
:param backoff_factor: the decrease rate of loss scale
:type backoff_factor: float
:param hysterisis: delay shift in dynamic loss scaling
:type hysterisis: int
:param max_scale: maximum loss scale allowed
:type max_scale: int
"""
def __init__(self,
optimizer,
clip_grad=0,
log_num_zeros_in_grad=False,
initial_scale=2 ** 32,
min_scale=1,
growth_factor=2,
backoff_factor=0.5,
growth_interval=1000,
hysteresis=2,
max_scale: int = 2 ** 32):
# default args for compatibility
bf16 = False
params_have_main_grad = True
# have a defaults for compatibility with pytorch optim
self.defaults = optimizer.defaults
# log config
self._logger = get_dist_logger()
self._logger.info(f"\n========= FP16 Optimizer Config =========\n"
f"Optimizer: {optimizer.__class__.__name__}\n"
f"clip_grad = {clip_grad}\n"
f"log_num_zeros_in_grad = {log_num_zeros_in_grad}\n"
f"initial_scale = {initial_scale}\n"
f"min_scale = {min_scale}\n"
f"growth_factor = {growth_factor}\n"
f"backoff_factor = {backoff_factor}\n"
f"growth_interval = {growth_interval}\n"
f"hysteresis = {hysteresis}\n"
f"==========================================", ranks=[0])
"""Input optimizer is the base optimizer for example Adam."""
self.optimizer = optimizer
assert self.optimizer, 'no optimizer is provided.'
# Set gradient clipping and logging params.
self.clip_grad = clip_grad
self.log_num_zeros_in_grad = log_num_zeros_in_grad
self.params_have_main_grad = params_have_main_grad
self.bf16 = bf16
self.grad_scaler = DynamicGradScaler(
initial_scale=initial_scale,
min_scale=min_scale,
growth_factor=growth_factor,
backoff_factor=backoff_factor,
growth_interval=growth_interval,
hysteresis=hysteresis,
max_scale=max_scale
)
# None grad scaler is only supported for bf16.
if self.grad_scaler is None:
assert self.bf16, 'fp16 expects a grad scaler.'
# Tensor used to determine if a nan/if has happend.
# Any non-zero value indicates inf/nan.
# Note that we keep this for the cases that grad scaler is none.
# We still record nan/inf if we have a bfloat16 with a grad scaler.
if self.grad_scaler:
self.found_inf = torch.cuda.FloatTensor([0.0])
# Dummy tensor needed for apex multi-apply tensor.
# For bfloat, we don't have multi-tensor apply and for now
# we set it to none so the multi-tensor apply gets ignored.
if bf16:
self._dummy_overflow_buf = None
else:
self._dummy_overflow_buf = torch.cuda.IntTensor([0])
# In case grad scaler is not passed, define the unity scale.
if self.grad_scaler is None:
self._scale_one = torch.cuda.FloatTensor([1.0])
# ======================
# main parameter stuff
# ======================
# Three groups of parameters:
# float16_groups: original float16 parameters
# fp32_from_float16_groups: fp32 copy of float16 parameters
# fp32_from_fp32_groups: original fp32 parameters
self.float16_groups = []
self.fp32_from_float16_groups = []
self.fp32_from_fp32_groups = []
# For all the groups in the original optimizer:
for param_group in self.optimizer.param_groups:
float16_params_this_group = []
fp32_params_this_group = []
fp32_from_float16_params_this_group = []
# For all the parameters in this group:
for i, param in enumerate(param_group['params']):
if param.requires_grad:
# float16 params:
if param.type() in ['torch.cuda.HalfTensor',
'torch.cuda.BFloat16Tensor']:
float16_params_this_group.append(param)
# Create a copy
main_param = param.detach().clone().float()
# Copy tensor model parallel attributes.
copy_tensor_parallel_attributes(param, main_param)
# if hasattr(param, 'shared'):
# main_param.shared = param.shared
# Replace the optimizer params with the new fp32 copy.
param_group['params'][i] = main_param
fp32_from_float16_params_this_group.append(main_param)
# Reset existing state dict key to the new main param.
if param in self.optimizer.state:
self.optimizer.state[main_param] \
= self.optimizer.state.pop(param)
# fp32 params.
elif param.type() == 'torch.cuda.FloatTensor':
fp32_params_this_group.append(param)
param_group['params'][i] = param
else:
raise TypeError('Wrapped parameters must be one of '
'torch.cuda.FloatTensor, '
'torch.cuda.HalfTensor, or '
'torch.cuda.BFloat16Tensor. '
'Received {}'.format(param.type()))
self.float16_groups.append(float16_params_this_group)
self.fp32_from_float16_groups.append(
fp32_from_float16_params_this_group)
self.fp32_from_fp32_groups.append(fp32_params_this_group)
# Leverage state_dict() and load_state_dict() to
# recast preexisting per-param state tensors
self.optimizer.load_state_dict(self.optimizer.state_dict())
def zero_grad(self, set_to_none=False):
"""We only need to zero the model related parameters, i.e.,
float16_groups & fp32_from_fp32_groups."""
for group in self.float16_groups:
_zero_grad_group_helper(group, set_to_none)
for group in self.fp32_from_fp32_groups:
_zero_grad_group_helper(group, set_to_none)
def get_loss_scale(self):
if self.grad_scaler is None:
return self._scale_one
return self.grad_scaler.scale
def _copy_model_grads_to_main_grads(self):
# This only needs to be done for the float16 group.
for model_group, main_group in zip(self.float16_groups,
self.fp32_from_float16_groups):
for model_param, main_param in zip(model_group, main_group):
if self.params_have_main_grad:
main_param.grad = model_param.main_grad.float()
else:
if model_param.grad is not None:
main_param.grad = model_param.grad.float()
# For fp32 grads, we need to reset the grads to main grad.
if self.params_have_main_grad:
for model_group in self.fp32_from_fp32_groups:
for model_param in model_group:
model_param.grad = model_param.main_grad
def _unscale_main_grads_and_check_for_nan(self):
main_grads = []
# fp32 params fromm float16 ones.
for main_group in self.fp32_from_float16_groups:
for main_param in main_group:
if main_param.grad is not None:
main_grads.append(main_param.grad.data)
# Append fp32 parameters.
for main_group in self.fp32_from_fp32_groups:
for main_param in main_group:
if main_param.grad is not None:
main_grads.append(main_param.grad.data)
# Reset found inf.
self.found_inf.fill_(0.0)
# Unscale and set found inf/nan
torch._amp_foreach_non_finite_check_and_unscale_(
main_grads, self.found_inf, self.grad_scaler.inv_scale)
# Update across all model parallel instances.
torch.distributed.all_reduce(self.found_inf,
op=torch.distributed.ReduceOp.MAX,
group=gpc.get_group(ParallelMode.TENSOR))
# Check for nan.
found_inf_flag = (self.found_inf.item() > 0)
return found_inf_flag
def _get_model_and_main_params_data_float16(self):
model_data = []
main_data = []
for model_group, main_group in zip(self.float16_groups,
self.fp32_from_float16_groups):
for model_param, main_param in zip(model_group, main_group):
model_data.append(model_param.data)
main_data.append(main_param.data)
return model_data, main_data
def _copy_main_params_to_model_params(self):
# Only needed for the float16 params.
model_data, main_data = self._get_model_and_main_params_data_float16()
_multi_tensor_copy_this_to_that(this=main_data, that=model_data,
overflow_buf=self._dummy_overflow_buf)
def _copy_model_params_to_main_params(self):
# Only needed for the float16 params.
model_data, main_data = self._get_model_and_main_params_data_float16()
_multi_tensor_copy_this_to_that(this=model_data, that=main_data,
overflow_buf=self._dummy_overflow_buf)
def reload_model_params(self):
self._copy_model_params_to_main_params()
@torch.no_grad()
def step(self):
# Copy gradients from model params to main params.
self._copy_model_grads_to_main_grads()
# Do unscale, check for inf, and update grad scaler only for
# the case that grad scaler is provided.
if self.grad_scaler:
# Unscale and check for inf/nan.
found_inf_flag = self._unscale_main_grads_and_check_for_nan()
# We are done with scaling gradients
# so we can update the loss scale.
self.grad_scaler.update(found_inf_flag)
# If we found inf/nan, skip the update.
if found_inf_flag:
return False, None, None
# Clip the main gradients.
grad_norm = None
if self.clip_grad > 0.0:
grad_norm = self.clip_grad_norm(self.clip_grad)
# count the zeros in the grads
num_zeros_in_grad = self.count_zeros() if \
self.log_num_zeros_in_grad else None
# Step the optimizer.
self.optimizer.step()
# Update params from main params.
self._copy_main_params_to_model_params()
# Successful update.
return True, grad_norm, num_zeros_in_grad
def state_dict(self):
state_dict = {}
state_dict['optimizer'] = self.optimizer.state_dict()
if self.grad_scaler:
state_dict['grad_scaler'] = self.grad_scaler.state_dict()
state_dict['fp32_from_fp16_params'] = self.fp32_from_float16_groups
return state_dict
def load_state_dict(self, state_dict):
# Optimizer.
optimizer_key = 'optimizer'
if optimizer_key not in state_dict:
optimizer_key = 'optimizer_state_dict'
print_rank_0('***WARNING*** loading optimizer from '
'an old checkpoint ...')
self.optimizer.load_state_dict(state_dict[optimizer_key])
# Grad scaler.
if 'grad_scaler' not in state_dict:
print_rank_0('***WARNING*** found an old checkpoint, will not '
'load grad scaler ...')
else:
if self.grad_scaler:
self.grad_scaler.load_state_dict(state_dict['grad_scaler'])
else:
print_rank_0('***WARNING*** fould the grad scaler in the '
'checkpoint but it is None in the class. '
'Skipping loading grad scaler ...')
# Copy data for the main params.
fp32_from_float16_params_key = 'fp32_from_fp16_params'
if fp32_from_float16_params_key not in state_dict:
fp32_from_float16_params_key = 'fp32_from_fp16'
for current_group, saved_group in zip(
self.fp32_from_float16_groups,
state_dict[fp32_from_float16_params_key]):
for current_param, saved_param in zip(current_group, saved_group):
current_param.data.copy_(saved_param.data)
def get_parameters(self):
params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
params.append(param)
return params
def clip_grad_norm(self, clip_grad):
params = self.get_parameters()
return clip_grad_norm_fp32(params, clip_grad)
def count_zeros(self):
params = self.get_parameters()
return count_zeros_fp32(params)
def scale_loss(self, loss):
"""Simple scaling."""
return self.get_loss_scale() * loss
# Promote state so it can be retrieved or set via
# "optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via
# "optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
| [
"colossalai.utils.print_rank_0",
"colossalai.logging.get_dist_logger",
"colossalai.utils.clip_grad_norm_fp32",
"colossalai.core.global_context.get_group",
"colossalai.utils.copy_tensor_parallel_attributes",
"colossalai.utils.count_zeros_fp32",
"colossalai.utils.multi_tensor_applier"
] | [((16274, 16289), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16287, 16289), False, 'import torch\n'), ((1471, 1559), 'colossalai.utils.multi_tensor_applier', 'multi_tensor_applier', (['colossal_C.multi_tensor_scale', 'overflow_buf', '[this, that]', '(1.0)'], {}), '(colossal_C.multi_tensor_scale, overflow_buf, [this,\n that], 1.0)\n', (1491, 1559), False, 'from colossalai.utils import print_rank_0, copy_tensor_parallel_attributes, clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier\n'), ((2156, 2195), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['[initial_scale]'], {}), '([initial_scale])\n', (2178, 2195), False, 'import torch\n'), ((2331, 2366), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['[min_scale]'], {}), '([min_scale])\n', (2353, 2366), False, 'import torch\n'), ((2483, 2522), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['[growth_factor]'], {}), '([growth_factor])\n', (2505, 2522), False, 'import torch\n'), ((2625, 2665), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['[backoff_factor]'], {}), '([backoff_factor])\n', (2647, 2665), False, 'import torch\n'), ((3309, 3326), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (3324, 3326), False, 'from colossalai.logging import get_dist_logger\n'), ((7111, 7128), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (7126, 7128), False, 'from colossalai.logging import get_dist_logger\n'), ((14590, 14698), 'torch._amp_foreach_non_finite_check_and_unscale_', 'torch._amp_foreach_non_finite_check_and_unscale_', (['main_grads', 'self.found_inf', 'self.grad_scaler.inv_scale'], {}), '(main_grads, self.found_inf,\n self.grad_scaler.inv_scale)\n', (14638, 14698), False, 'import torch\n'), ((19548, 19586), 'colossalai.utils.clip_grad_norm_fp32', 'clip_grad_norm_fp32', (['params', 'clip_grad'], {}), '(params, clip_grad)\n', (19567, 19586), False, 'from colossalai.utils import print_rank_0, copy_tensor_parallel_attributes, clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier\n'), ((19669, 19693), 'colossalai.utils.count_zeros_fp32', 'count_zeros_fp32', (['params'], {}), '(params)\n', (19685, 19693), False, 'from colossalai.utils import print_rank_0, copy_tensor_parallel_attributes, clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier\n'), ((5392, 5419), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (5417, 5419), False, 'import torch\n'), ((9057, 9086), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['[0.0]'], {}), '([0.0])\n', (9079, 9086), False, 'import torch\n'), ((9396, 9421), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['[0]'], {}), '([0])\n', (9416, 9421), False, 'import torch\n'), ((9559, 9588), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['[1.0]'], {}), '([1.0])\n', (9581, 9588), False, 'import torch\n'), ((18008, 18082), 'colossalai.utils.print_rank_0', 'print_rank_0', (['"""***WARNING*** loading optimizer from an old checkpoint ..."""'], {}), "('***WARNING*** loading optimizer from an old checkpoint ...')\n", (18020, 18082), False, 'from colossalai.utils import print_rank_0, copy_tensor_parallel_attributes, clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier\n'), ((18257, 18346), 'colossalai.utils.print_rank_0', 'print_rank_0', (['"""***WARNING*** found an old checkpoint, will not load grad scaler ..."""'], {}), "(\n '***WARNING*** found an old checkpoint, will not load grad scaler ...')\n", (18269, 18346), False, 'from colossalai.utils import print_rank_0, copy_tensor_parallel_attributes, clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier\n'), ((3883, 3943), 'torch.max', 'torch.max', (['(self._scale * self.backoff_factor)', 'self.min_scale'], {}), '(self._scale * self.backoff_factor, self.min_scale)\n', (3892, 3943), False, 'import torch\n'), ((14930, 14964), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (14943, 14964), True, 'from colossalai.core import global_context as gpc\n'), ((18527, 18668), 'colossalai.utils.print_rank_0', 'print_rank_0', (['"""***WARNING*** fould the grad scaler in the checkpoint but it is None in the class. Skipping loading grad scaler ..."""'], {}), "(\n '***WARNING*** fould the grad scaler in the checkpoint but it is None in the class. Skipping loading grad scaler ...'\n )\n", (18539, 18668), False, 'from colossalai.utils import print_rank_0, copy_tensor_parallel_attributes, clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier\n'), ((10865, 10915), 'colossalai.utils.copy_tensor_parallel_attributes', 'copy_tensor_parallel_attributes', (['param', 'main_param'], {}), '(param, main_param)\n', (10896, 10915), False, 'from colossalai.utils import print_rank_0, copy_tensor_parallel_attributes, clip_grad_norm_fp32, count_zeros_fp32, multi_tensor_applier\n')] |
#Adapted from https://github.com/hpcaitech/ColossalAI-Examples/blob/main/language/gpt/model/gpt1d.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import math
from colossalai.utils.activation_checkpoint import checkpoint
import torch
from torch import nn as nn, Tensor
from colossalai.core import global_context as gpc
from colossalai.nn.layer.utils import divide, ACT2FN
from colossalai.utils import checkpoint
from colossalai.nn.layer import Linear1D_Col, Linear1D_Row
from colossalai.nn.layer.base_layer import ParallelLayer
from colossalai import kernel
from colossalai.kernel.cuda_native.scaled_softmax import AttnMaskType
from colossalai import nn as col_nn
class MLP1D(ParallelLayer):
def __init__(self,
in_features: int,
mlp_ratio: float,
act_func: str = 'gelu',
dropout_prob: float = 0.,
dtype=None,
checkpoint: bool = False,
skip_bias_add: bool = False,
):
super().__init__()
self.in_features = in_features
self.mlp_ratio = mlp_ratio
self.checkpoint = checkpoint
self.skip_bias_add = skip_bias_add
self.act = ACT2FN[act_func]
skip_dense_1_add_bias = False
# Project to mlp_ratio * h.
self.dense_1 = Linear1D_Col(
self.in_features,
int(self.mlp_ratio * self.in_features),
dtype=dtype,
gather_output=False,
skip_bias_add=skip_dense_1_add_bias,
)
# Project back to h.
self.dense_2 = Linear1D_Row(
int(self.mlp_ratio * self.in_features),
self.in_features,
dtype=dtype,
parallel_input=True,
)
self.dropout = col_nn.Dropout(dropout_prob)
def _forward(self, hidden_states: Tensor) -> Tensor:
intermediate_output = self.dense_1(hidden_states)
intermediate_output = self.act(intermediate_output)
output = self.dense_2(intermediate_output)
output = self.dropout(output)
return output
def _checkpoint_forward(self, hidden_states: Tensor) -> Tensor:
return checkpoint(self._forward, hidden_states)
def forward(self, hidden_states: Tensor) -> Tensor:
if self.checkpoint:
return self._checkpoint_forward(hidden_states)
else:
return self._forward(hidden_states)
class GenericSelfAttention1D(ParallelLayer):
def __init__(self,
hidden_size: int,
num_attention_heads: int,
attention_dropout_prob: float,
hidden_dropout_prob: float,
dtype=None,
checkpoint: bool = False,
max_position_embeddings=1024,
):
super().__init__()
self.hidden_size = hidden_size
self.attention_head_size = divide(hidden_size, num_attention_heads)
self.num_attention_heads_per_partition = divide(num_attention_heads, gpc.tensor_parallel_size)
self.hidden_size_per_partition = divide(hidden_size, gpc.tensor_parallel_size)
self.checkpoint = checkpoint
self.query_key_value = Linear1D_Col(
hidden_size,
3 * hidden_size,
dtype=dtype,
)
self.attention_dropout = col_nn.Dropout(attention_dropout_prob)
self.dense = Linear1D_Row(
hidden_size,
hidden_size,
dtype=dtype,
parallel_input=True,
)
self.dropout = col_nn.Dropout(hidden_dropout_prob)
def softmax_forward(self, attention_scores, attention_mask, query_layer, key_layer):
raise NotImplementedError
def _forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor:
query_key_value = self.query_key_value(hidden_states)
new_qkv_shape = query_key_value.shape[:-1] + \
(self.num_attention_heads_per_partition, 3 * self.attention_head_size)
query_key_value = query_key_value.view(new_qkv_shape)
query_key_value = query_key_value.permute((0, 2, 1, 3))
query_layer, key_layer, value_layer = torch.chunk(
query_key_value, 3, dim=-1)
attention_scores = torch.matmul(
query_layer, key_layer.transpose(-1, -2))
attention_scores = self.softmax_forward(attention_scores, attention_mask, query_layer, key_layer)
attention_scores = attention_scores.type(value_layer.dtype)
attention_probs = self.attention_dropout(attention_scores)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(1, 2)
new_context_layer_shape = context_layer.size()[
:-2] + (self.hidden_size_per_partition,)
context_layer = context_layer.reshape(new_context_layer_shape)
output = self.dense(context_layer)
output = self.dropout(output)
return output
def _checkpoint_forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor:
return checkpoint(self._forward, hidden_states, attention_mask)
def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor:
if self.checkpoint:
return self._checkpoint_forward(hidden_states, attention_mask)
else:
return self._forward(hidden_states, attention_mask)
class DeepNetSelfAttention1D(GenericSelfAttention1D):
def __init__(self, hidden_size: int, num_attention_heads: int, attention_dropout_prob: float, hidden_dropout_prob: float, dtype=None, checkpoint: bool = False, max_position_embeddings=1024):
super().__init__(hidden_size, num_attention_heads, attention_dropout_prob, hidden_dropout_prob,
dtype=dtype, checkpoint=checkpoint, max_position_embeddings=max_position_embeddings)
self.softmax = nn.Softmax(dim=-1)
max_positions = max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
def softmax_forward(self, attention_scores, attention_mask, query_layer, key_layer):
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# causal mask
query_length, key_length = query_layer.size(-2), key_layer.size(-2)
causal_mask = self.bias[:, :, key_length - query_length: key_length, :key_length].bool()
attention_scores = torch.where(causal_mask, attention_scores, self.masked_bias.to(attention_scores))
if attention_mask is not None:
# Apply the attention mask
attention_scores = attention_scores + attention_mask
attention_scores = self.softmax(attention_scores)
return attention_scores
class FusedDeepNetSelfAttention1D(GenericSelfAttention1D):
def __init__(self, hidden_size: int, num_attention_heads: int, attention_dropout_prob: float, hidden_dropout_prob: float, dtype=None, checkpoint: bool = False, max_position_embeddings=1024):
super().__init__(hidden_size, num_attention_heads, attention_dropout_prob, hidden_dropout_prob,
dtype=dtype, checkpoint=checkpoint, max_position_embeddings=max_position_embeddings)
self.softmax = kernel.FusedScaleMaskSoftmax(input_in_fp16=True,
input_in_bf16=False,
attn_mask_type=AttnMaskType.causal,
scaled_masked_softmax_fusion=True,
mask_func=None,
softmax_in_fp32=True,
scale=math.sqrt(self.attention_head_size))
def softmax_forward(self, attention_scores, attention_mask, query_layer, key_layer):
return self.softmax(attention_scores, attention_mask)
class GenericDeepNetTransformerLayer1D(ParallelLayer):
def __init__(self,
hidden_size: int,
num_attention_heads: int,
act_func: str = 'gelu',
mlp_ratio: float = 4.0,
attention_dropout_prob: float = 0.,
hidden_dropout_prob: float = 0.,
dtype=None,
checkpoint: bool = False,
max_position_embeddings: int = 1024,
alpha: float = 1.0,
layer_norm_epsilon: float = 1e-5,
apply_post_layer_norm: bool = False,
attention=None,
layer_norm=None
):
super().__init__()
self.checkpoint = checkpoint
self.dtype = dtype
self.norm1 = layer_norm(hidden_size, eps=layer_norm_epsilon)
self.attention = attention(
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
attention_dropout_prob=attention_dropout_prob,
hidden_dropout_prob=hidden_dropout_prob,
dtype=dtype,
max_position_embeddings=max_position_embeddings,
checkpoint=False,
)
self.alpha = alpha
self.norm2 = layer_norm(hidden_size, eps=layer_norm_epsilon)
self.mlp = MLP1D(
in_features=hidden_size,
dropout_prob=hidden_dropout_prob,
act_func=act_func,
mlp_ratio=mlp_ratio,
dtype=dtype,
checkpoint=False,
)
def _forward(self, hidden_states, attention_mask) -> Tensor:
residual = hidden_states
attention_output = self.attention(hidden_states, attention_mask)
hidden_states = self.norm1(residual*self.alpha + attention_output)
residual = hidden_states
feed_forward_hidden_states = self.mlp(hidden_states)
hidden_states = self.norm2(residual*self.alpha + feed_forward_hidden_states)
output = (hidden_states, attention_mask)
return output
def forward(self, hidden_states, attention_mask):
if self.checkpoint:
return checkpoint(self._forward, hidden_states, attention_mask)
else:
return self._forward(hidden_states, attention_mask)
class DeepNetTransformerLayer1D(GenericDeepNetTransformerLayer1D):
def __init__(self, hidden_size: int, num_attention_heads: int, act_func: str = 'gelu',
mlp_ratio: float = 4, attention_dropout_prob: float = 0, hidden_dropout_prob: float = 0,
dtype=None, checkpoint: bool = False, max_position_embeddings: int = 1024,
layer_norm_epsilon: float = 0.00001, alpha: float = 1.0):
attention = DeepNetSelfAttention1D
layer_norm = nn.LayerNorm
super().__init__(hidden_size, num_attention_heads, act_func=act_func, mlp_ratio=mlp_ratio, attention_dropout_prob=attention_dropout_prob, hidden_dropout_prob=hidden_dropout_prob, dtype=dtype,
checkpoint=checkpoint, max_position_embeddings=max_position_embeddings, layer_norm_epsilon=layer_norm_epsilon, alpha=alpha, attention=attention, layer_norm=layer_norm)
class FusedDeepNetTransformerLayer1D(GenericDeepNetTransformerLayer1D):
def __init__(self, hidden_size: int, num_attention_heads: int, act_func: str = 'gelu',
mlp_ratio: float = 4, attention_dropout_prob: float = 0, hidden_dropout_prob: float = 0,
dtype=None, checkpoint: bool = False, max_position_embeddings: int = 1024,
layer_norm_epsilon: float = 0.00001, alpha: float = 1.0):
attention = FusedDeepNetSelfAttention1D
layer_norm = kernel.LayerNorm
super().__init__(hidden_size, num_attention_heads, act_func=act_func, mlp_ratio=mlp_ratio, attention_dropout_prob=attention_dropout_prob, hidden_dropout_prob=hidden_dropout_prob, dtype=dtype,
checkpoint=checkpoint, max_position_embeddings=max_position_embeddings, layer_norm_epsilon=layer_norm_epsilon, alpha=alpha, attention=attention, layer_norm=layer_norm)
| [
"colossalai.utils.checkpoint",
"colossalai.nn.layer.Linear1D_Col",
"colossalai.nn.Dropout",
"colossalai.nn.layer.Linear1D_Row",
"colossalai.nn.layer.utils.divide"
] | [((1840, 1868), 'colossalai.nn.Dropout', 'col_nn.Dropout', (['dropout_prob'], {}), '(dropout_prob)\n', (1854, 1868), True, 'from colossalai import nn as col_nn\n'), ((2252, 2292), 'colossalai.utils.checkpoint', 'checkpoint', (['self._forward', 'hidden_states'], {}), '(self._forward, hidden_states)\n', (2262, 2292), False, 'from colossalai.utils import checkpoint\n'), ((3001, 3041), 'colossalai.nn.layer.utils.divide', 'divide', (['hidden_size', 'num_attention_heads'], {}), '(hidden_size, num_attention_heads)\n', (3007, 3041), False, 'from colossalai.nn.layer.utils import divide, ACT2FN\n'), ((3092, 3145), 'colossalai.nn.layer.utils.divide', 'divide', (['num_attention_heads', 'gpc.tensor_parallel_size'], {}), '(num_attention_heads, gpc.tensor_parallel_size)\n', (3098, 3145), False, 'from colossalai.nn.layer.utils import divide, ACT2FN\n'), ((3188, 3233), 'colossalai.nn.layer.utils.divide', 'divide', (['hidden_size', 'gpc.tensor_parallel_size'], {}), '(hidden_size, gpc.tensor_parallel_size)\n', (3194, 3233), False, 'from colossalai.nn.layer.utils import divide, ACT2FN\n'), ((3304, 3359), 'colossalai.nn.layer.Linear1D_Col', 'Linear1D_Col', (['hidden_size', '(3 * hidden_size)'], {'dtype': 'dtype'}), '(hidden_size, 3 * hidden_size, dtype=dtype)\n', (3316, 3359), False, 'from colossalai.nn.layer import Linear1D_Col, Linear1D_Row\n'), ((3445, 3483), 'colossalai.nn.Dropout', 'col_nn.Dropout', (['attention_dropout_prob'], {}), '(attention_dropout_prob)\n', (3459, 3483), True, 'from colossalai import nn as col_nn\n'), ((3506, 3578), 'colossalai.nn.layer.Linear1D_Row', 'Linear1D_Row', (['hidden_size', 'hidden_size'], {'dtype': 'dtype', 'parallel_input': '(True)'}), '(hidden_size, hidden_size, dtype=dtype, parallel_input=True)\n', (3518, 3578), False, 'from colossalai.nn.layer import Linear1D_Col, Linear1D_Row\n'), ((3667, 3702), 'colossalai.nn.Dropout', 'col_nn.Dropout', (['hidden_dropout_prob'], {}), '(hidden_dropout_prob)\n', (3681, 3702), True, 'from colossalai import nn as col_nn\n'), ((4289, 4328), 'torch.chunk', 'torch.chunk', (['query_key_value', '(3)'], {'dim': '(-1)'}), '(query_key_value, 3, dim=-1)\n', (4300, 4328), False, 'import torch\n'), ((4719, 4761), 'torch.matmul', 'torch.matmul', (['attention_probs', 'value_layer'], {}), '(attention_probs, value_layer)\n', (4731, 4761), False, 'import torch\n'), ((5216, 5272), 'colossalai.utils.checkpoint', 'checkpoint', (['self._forward', 'hidden_states', 'attention_mask'], {}), '(self._forward, hidden_states, attention_mask)\n', (5226, 5272), False, 'from colossalai.utils import checkpoint\n'), ((6033, 6051), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (6043, 6051), True, 'from torch import nn as nn, Tensor\n'), ((6370, 6392), 'torch.tensor', 'torch.tensor', (['(-10000.0)'], {}), '(-10000.0)\n', (6382, 6392), False, 'import torch\n'), ((6529, 6564), 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), '(self.attention_head_size)\n', (6538, 6564), False, 'import math\n'), ((10520, 10576), 'colossalai.utils.checkpoint', 'checkpoint', (['self._forward', 'hidden_states', 'attention_mask'], {}), '(self._forward, hidden_states, attention_mask)\n', (10530, 10576), False, 'from colossalai.utils import checkpoint\n'), ((8114, 8149), 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), '(self.attention_head_size)\n', (8123, 8149), False, 'import math\n'), ((6177, 6238), 'torch.ones', 'torch.ones', (['(max_positions, max_positions)'], {'dtype': 'torch.uint8'}), '((max_positions, max_positions), dtype=torch.uint8)\n', (6187, 6238), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
from colossalai.constants import (DEPTH_3D, INPUT_GROUP_3D, OUTPUT_GROUP_3D,
WEIGHT_GROUP_3D)
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from torch import Tensor
def get_depth_from_env() -> int:
try:
depth = os.environ[DEPTH_3D]
depth = int(depth)
assert depth > 0, 'DEPTH must be greater than zero'
return depth
except KeyError as e:
raise EnvironmentError(
'DEPTH is not found in the current environment, '
'please make sure that you have used the correct process group initializer'
)
def get_parallel_mode_from_env(group):
return getattr(ParallelMode, os.environ[group])
def get_last_group(a, b):
mapping = {
ParallelMode.PARALLEL_3D_INPUT: 'A',
ParallelMode.PARALLEL_3D_WEIGHT: 'B',
ParallelMode.PARALLEL_3D_OUTPUT: 'C',
}
res = chr(
ord('A') + ord('B') + ord('C') - ord(mapping[a]) - ord(mapping[b]))
if res == 'A':
return ParallelMode.PARALLEL_3D_INPUT
elif res == 'B':
return ParallelMode.PARALLEL_3D_WEIGHT
elif res == 'C':
return ParallelMode.PARALLEL_3D_OUTPUT
def swap_in_out_group():
os.environ[INPUT_GROUP_3D], os.environ[OUTPUT_GROUP_3D] = \
os.environ[OUTPUT_GROUP_3D], os.environ[INPUT_GROUP_3D]
def dbg_check_shape(tensor: Tensor, shape: tuple):
rank = gpc.get_global_rank()
if rank == 0:
print(tensor.shape)
assert tensor.shape == shape, \
'{} does not match {}'.format(tensor.shape, shape)
| [
"colossalai.core.global_context.get_global_rank"
] | [((1522, 1543), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (1541, 1543), True, 'from colossalai.core import global_context as gpc\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os.path as osp
import pytest
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import initialize
from colossalai.logging import get_global_dist_logger
NUM_BATCH = 128
BATCH_SIZE = 32
SEQ_LENGTH = 128
HIDDEN_SIZE = 512
DIR_PATH = osp.dirname(osp.realpath(__file__))
CONFIG_PATH = osp.join(DIR_PATH, '../configs/pipeline_vanilla_resnet.py')
@pytest.mark.skip("This test should be invoked using the test.sh provided")
@pytest.mark.dist
def test_schedule():
engine, train_dataloader, test_dataloader = initialize(CONFIG_PATH)
logger = get_global_dist_logger()
model = engine.model
optimizer = engine.optimizer
criterion = engine.criterion
schedule = engine._schedule
output, label, loss = schedule.forward_backward_step(
data_iter=iter(train_dataloader),
model=model,
optimizer=optimizer,
criterion=criterion,
forward_only=False
)
schedule.optimizer_step(model, optimizer)
if gpc.is_last_rank(ParallelMode.PIPELINE):
logger.info('losses: {}'.format(loss))
gpc.destroy()
logger.info('training finished')
if __name__ == '__main__':
test_schedule()
| [
"colossalai.initialize.initialize",
"colossalai.core.global_context.is_last_rank",
"colossalai.core.global_context.destroy",
"colossalai.logging.get_global_dist_logger"
] | [((411, 470), 'os.path.join', 'osp.join', (['DIR_PATH', '"""../configs/pipeline_vanilla_resnet.py"""'], {}), "(DIR_PATH, '../configs/pipeline_vanilla_resnet.py')\n", (419, 470), True, 'import os.path as osp\n'), ((474, 548), 'pytest.mark.skip', 'pytest.mark.skip', (['"""This test should be invoked using the test.sh provided"""'], {}), "('This test should be invoked using the test.sh provided')\n", (490, 548), False, 'import pytest\n'), ((373, 395), 'os.path.realpath', 'osp.realpath', (['__file__'], {}), '(__file__)\n', (385, 395), True, 'import os.path as osp\n'), ((636, 659), 'colossalai.initialize.initialize', 'initialize', (['CONFIG_PATH'], {}), '(CONFIG_PATH)\n', (646, 659), False, 'from colossalai.initialize import initialize\n'), ((673, 697), 'colossalai.logging.get_global_dist_logger', 'get_global_dist_logger', ([], {}), '()\n', (695, 697), False, 'from colossalai.logging import get_global_dist_logger\n'), ((1089, 1128), 'colossalai.core.global_context.is_last_rank', 'gpc.is_last_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (1105, 1128), True, 'from colossalai.core import global_context as gpc\n'), ((1182, 1195), 'colossalai.core.global_context.destroy', 'gpc.destroy', ([], {}), '()\n', (1193, 1195), True, 'from colossalai.core import global_context as gpc\n')] |
import torch
import torch.distributed as dist
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device
def send_tensor_meta(tensor, need_meta=True, next_rank=None):
"""Sends tensor meta information before sending a specific tensor.
Since the recipient must know the shape of the tensor in p2p communications,
meta information of the tensor should be sent before communications. This function
synchronizes with :func:`recv_tensor_meta`.
:param tensor: Tensor to be sent
:param need_meta: If False, meta information won't be sent
:param next_rank: The rank of the next member in pipeline parallel group
:type tensor: Tensor
:type need_meta: bool, optional
:type next_rank: int
:return: False
:rtype: bool
"""
if need_meta:
if next_rank is None:
next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE)
tensor_kwargs = {'dtype': torch.long, 'device': get_current_device()}
send_shape = torch.tensor(tensor.size(), **tensor_kwargs)
send_ndims = torch.tensor(len(tensor.size()), **tensor_kwargs)
dist.send(send_ndims, next_rank)
dist.send(send_shape, next_rank)
return False
def recv_tensor_meta(tensor_shape, prev_rank=None):
"""Recieves tensor meta information before recieving a specific tensor.
Since the recipient must know the shape of the tensor in p2p communications,
meta information of the tensor should be recieved before communications. This function
synchronizes with :func:`send_tensor_meta`.
:param tensor_shape: The shape of the tensor to be recieved
:param prev_rank: The rank of the source of the tensor
:type tensor_shape: torch.Size
:type prev_rank: int, optional
:return: The shape of the tensor to be recieved
:rtype: torch.Size
"""
if tensor_shape is None:
if prev_rank is None:
prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE)
tensor_kwargs = {'dtype': torch.long, 'device': get_current_device()}
recv_ndims = torch.empty((), **tensor_kwargs)
dist.recv(recv_ndims, prev_rank)
recv_shape = torch.empty(recv_ndims, **tensor_kwargs)
dist.recv(recv_shape, prev_rank)
tensor_shape = torch.Size(recv_shape)
return tensor_shape
def split_tensor_into_1d_equal_chunks(tensor, new_buffer=False):
"""Break a tensor into equal 1D chunks."""
partition_size = torch.numel(tensor) // gpc.get_world_size(ParallelMode.PARALLEL_1D)
start_index = partition_size * gpc.get_local_rank(ParallelMode.PARALLEL_1D)
end_index = start_index + partition_size
if new_buffer:
data = torch.empty(partition_size, dtype=tensor.dtype,
device=torch.cuda.current_device(),
requires_grad=False)
data.copy_(tensor.view(-1)[start_index:end_index])
else:
data = tensor.view(-1)[start_index:end_index]
return data
def gather_split_1d_tensor(tensor):
"""Opposite of above function, gather values from model parallel ranks."""
world_size = gpc.get_world_size(ParallelMode.PARALLEL_1D)
numel = torch.numel(tensor)
numel_gathered = world_size * numel
gathered = torch.empty(numel_gathered, dtype=tensor.dtype,
device=torch.cuda.current_device(),
requires_grad=False)
chunks = [gathered[i*numel:(i+1)*numel] for i in range(world_size)]
dist.all_gather(chunks, tensor, group=gpc.get_group(ParallelMode.PARALLEL_1D))
return gathered
| [
"colossalai.core.global_context.get_prev_global_rank",
"colossalai.core.global_context.get_local_rank",
"colossalai.utils.get_current_device",
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.get_next_global_rank"
] | [((3203, 3247), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (3221, 3247), True, 'from colossalai.core import global_context as gpc\n'), ((3260, 3279), 'torch.numel', 'torch.numel', (['tensor'], {}), '(tensor)\n', (3271, 3279), False, 'import torch\n'), ((1208, 1240), 'torch.distributed.send', 'dist.send', (['send_ndims', 'next_rank'], {}), '(send_ndims, next_rank)\n', (1217, 1240), True, 'import torch.distributed as dist\n'), ((1249, 1281), 'torch.distributed.send', 'dist.send', (['send_shape', 'next_rank'], {}), '(send_shape, next_rank)\n', (1258, 1281), True, 'import torch.distributed as dist\n'), ((2160, 2192), 'torch.empty', 'torch.empty', (['()'], {}), '((), **tensor_kwargs)\n', (2171, 2192), False, 'import torch\n'), ((2201, 2233), 'torch.distributed.recv', 'dist.recv', (['recv_ndims', 'prev_rank'], {}), '(recv_ndims, prev_rank)\n', (2210, 2233), True, 'import torch.distributed as dist\n'), ((2255, 2295), 'torch.empty', 'torch.empty', (['recv_ndims'], {}), '(recv_ndims, **tensor_kwargs)\n', (2266, 2295), False, 'import torch\n'), ((2304, 2336), 'torch.distributed.recv', 'dist.recv', (['recv_shape', 'prev_rank'], {}), '(recv_shape, prev_rank)\n', (2313, 2336), True, 'import torch.distributed as dist\n'), ((2361, 2383), 'torch.Size', 'torch.Size', (['recv_shape'], {}), '(recv_shape)\n', (2371, 2383), False, 'import torch\n'), ((2544, 2563), 'torch.numel', 'torch.numel', (['tensor'], {}), '(tensor)\n', (2555, 2563), False, 'import torch\n'), ((2567, 2611), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (2585, 2611), True, 'from colossalai.core import global_context as gpc\n'), ((2647, 2691), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (2665, 2691), True, 'from colossalai.core import global_context as gpc\n'), ((935, 982), 'colossalai.core.global_context.get_next_global_rank', 'gpc.get_next_global_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (959, 982), True, 'from colossalai.core import global_context as gpc\n'), ((1040, 1060), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1058, 1060), False, 'from colossalai.utils import get_current_device\n'), ((2011, 2058), 'colossalai.core.global_context.get_prev_global_rank', 'gpc.get_prev_global_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (2035, 2058), True, 'from colossalai.core import global_context as gpc\n'), ((2116, 2136), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (2134, 2136), False, 'from colossalai.utils import get_current_device\n'), ((3417, 3444), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (3442, 3444), False, 'import torch\n'), ((3608, 3647), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARALLEL_1D)\n', (3621, 3647), True, 'from colossalai.core import global_context as gpc\n'), ((2853, 2880), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (2878, 2880), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from typing import Any, Tuple
import torch
import torch.distributed as dist
from colossalai.communication import all_gather, reduce_scatter, scatter
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import empty_cache, get_current_device
from torch import Tensor
class Matmul_AB_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = AB`
"""
@staticmethod
def forward(ctx: Any,
A: Tensor,
B: Tensor,
depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
# A: [m/q^2, n, k/q]
# B: [k/q, h/q^2]
# C: [m/q^2, n, h/q]
empty_cache()
ctx.save_for_backward(A, B)
assert A.shape[-1] == B.shape[0], \
'Invalid shapes: A={}, B={}.'.format(A.shape, B.shape)
A_temp = all_gather(A, input_dim, input_parallel_mode)
B_temp = all_gather(B, weight_dim, weight_parallel_mode)
C = torch.matmul(A_temp, B_temp)
out = reduce_scatter(C, output_dim, output_parallel_mode)
ctx.depth = depth
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
ctx.A_dim = input_dim
ctx.B_dim = weight_dim
ctx.C_dim = output_dim
return out
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_3D.apply(output_grad, B, ctx.depth,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode,
ctx.A_group_parallel_mode, ctx.C_dim,
ctx.B_dim, ctx.A_dim)
B_grad = Matmul_ATB_3D.apply(A, output_grad, ctx.depth,
ctx.A_group_parallel_mode,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode, ctx.A_dim,
ctx.C_dim, ctx.B_dim)
return A_grad, B_grad, None, None, None, None, None, None, None
class Matmul_ABT_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = AB^T`
"""
@staticmethod
def forward(ctx: Any,
A: Tensor,
B: Tensor,
depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = -1,
output_dim: int = 0) -> Tensor:
# A: [m/q^2, n, h/q]
# B: [k/q, h/q^2]
# C: [m/q^2, n, k/q]
empty_cache()
ctx.save_for_backward(A, B)
A_temp = all_gather(A, input_dim, input_parallel_mode)
B_temp = all_gather(B, weight_dim, weight_parallel_mode)
C = torch.matmul(A_temp, B_temp.transpose(0, 1))
out = reduce_scatter(C, output_dim, output_parallel_mode)
ctx.depth = depth
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
ctx.A_dim = input_dim
ctx.B_dim = weight_dim
ctx.C_dim = output_dim
return out
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_AB_3D.apply(output_grad, B, ctx.depth,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode,
ctx.A_group_parallel_mode, ctx.C_dim,
ctx.B_dim, ctx.A_dim)
B_grad = Matmul_ATB_3D.apply(output_grad, A, ctx.depth,
ctx.C_group_parallel_mode,
ctx.A_group_parallel_mode,
ctx.B_group_parallel_mode, ctx.C_dim,
ctx.A_dim, ctx.B_dim)
return A_grad, B_grad, None, None, None, None, None, None, None
class Matmul_ATB_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = A^TB`
"""
@staticmethod
def forward(ctx: Any,
A: Tensor,
B: Tensor,
depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode,
input_dim: int = 0,
weight_dim: int = 0,
output_dim: int = -1) -> Tensor:
# A: [m/q^2, n, k/q]
# B: [m/q^2, n, h/q]
# C: [k/q, h/q^2]
empty_cache()
ctx.save_for_backward(A, B)
A_temp = all_gather(A, input_dim, input_parallel_mode)
A_temp = A_temp.reshape(-1, A.shape[-1])
B_temp = all_gather(B, weight_dim, weight_parallel_mode)
B_temp = B_temp.reshape(-1, B.shape[-1])
C = torch.matmul(A_temp.transpose(0, 1), B_temp)
out = reduce_scatter(C, output_dim, output_parallel_mode)
ctx.depth = depth
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
ctx.A_dim = input_dim
ctx.B_dim = weight_dim
ctx.C_dim = output_dim
return out
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
A, B = ctx.saved_tensors
with torch.no_grad():
A_grad = Matmul_ABT_3D.apply(B, output_grad, ctx.depth,
ctx.B_group_parallel_mode,
ctx.C_group_parallel_mode,
ctx.A_group_parallel_mode, ctx.B_dim,
ctx.C_dim, ctx.A_dim)
B_grad = Matmul_AB_3D.apply(A, output_grad, ctx.depth,
ctx.A_group_parallel_mode,
ctx.C_group_parallel_mode,
ctx.B_group_parallel_mode, ctx.A_dim,
ctx.C_dim, ctx.B_dim)
return A_grad, B_grad, None, None, None, None, None, None, None
class Add_3D(torch.autograd.Function):
"""Matrix add bias: :math:`C = A + b`
"""
@staticmethod
def forward(ctx: Any, input_: Tensor, bias: Tensor, depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
# input: [m/q^2, n, h/q]
# bias: [h/q^2]
ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
bias_temp = bias.clone()
dist.broadcast(bias_temp,
src=src_rank,
group=gpc.get_group(input_parallel_mode))
# [h/q]
bias_temp = all_gather(bias_temp, -1, weight_parallel_mode)
out = input_ + bias_temp
ctx.depth = depth
ctx.src_rank = src_rank
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
return out
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
# output_grad: [m/q^2, n, h/q]
with torch.no_grad():
# [h/q]
grad = torch.sum(output_grad,
dim=tuple(range(len(output_grad.shape))[:-1]))
bias_grad = reduce_scatter(grad, -1, ctx.B_group_parallel_mode)
dist.reduce(bias_grad,
dst=ctx.src_rank,
group=gpc.get_group(ctx.A_group_parallel_mode))
if gpc.get_local_rank(
ctx.A_group_parallel_mode) != gpc.get_local_rank(
ctx.C_group_parallel_mode):
bias_grad = None
return output_grad, bias_grad, None, None, None, None
class Mul_3D(torch.autograd.Function):
"""Matrix multiplication for :math:`C = A * b`
"""
@staticmethod
def forward(ctx: Any, input_: Tensor, bias: Tensor, depth: int,
input_parallel_mode: ParallelMode,
weight_parallel_mode: ParallelMode,
output_parallel_mode: ParallelMode) -> Tensor:
# input: [m/q^2, n, h/q]
# bias: [h/q^2]
ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode)
src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)]
# [h/q^2]
bias_temp = bias.clone()
dist.broadcast(bias_temp,
src=src_rank,
group=gpc.get_group(input_parallel_mode))
# [h/q]
bias_temp = all_gather(bias_temp, -1, weight_parallel_mode)
empty_cache()
ctx.save_for_backward(input_, bias_temp)
out = torch.mul(input_, bias_temp)
ctx.depth = depth
ctx.src_rank = src_rank
ctx.A_group_parallel_mode = input_parallel_mode
ctx.B_group_parallel_mode = weight_parallel_mode
ctx.C_group_parallel_mode = output_parallel_mode
return out
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
# output_grad: [m/q^2, n, h/q]
with torch.no_grad():
input_, bias = ctx.saved_tensors
# [m/q^2, n, h/q]
input_grad = torch.mul(output_grad, bias)
# [h/q]
grad = torch.mul(output_grad, input_)
grad = torch.sum(grad,
dim=tuple(range(len(output_grad.shape))[:-1]))
bias_grad = reduce_scatter(grad, -1, ctx.B_group_parallel_mode)
dist.reduce(bias_grad,
dst=ctx.src_rank,
group=gpc.get_group(ctx.A_group_parallel_mode))
if gpc.get_local_rank(
ctx.A_group_parallel_mode) != gpc.get_local_rank(
ctx.C_group_parallel_mode):
bias_grad = None
return input_grad, bias_grad, None, None, None, None
class Sum_3D(torch.autograd.Function):
"""Compute the sum of input tensors
"""
@staticmethod
def forward(ctx: Any,
input_: Tensor,
dim: int,
depth: int,
parallel_mode: ParallelMode,
keepdim: bool = False) -> Tensor:
# input: [m/q^2, n, h/q]
out = torch.sum(input_, dim=dim, keepdim=keepdim)
dist.all_reduce(out, group=gpc.get_group(parallel_mode))
ctx.input_shape = input_.shape
ctx.depth = depth
ctx.group = parallel_mode
ctx.dim = dim
return out
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
with torch.no_grad():
output_grad = output_grad.contiguous()
dist.all_reduce(output_grad, group=gpc.get_group(ctx.group))
if len(output_grad.shape) < len(ctx.input_shape):
output_grad = torch.unsqueeze(output_grad, ctx.dim)
dims = [1 for _ in range(len(output_grad.shape))]
dims[ctx.dim] = ctx.input_shape[ctx.dim]
input_grad = output_grad.repeat(tuple(dims))
return input_grad, None, None, None, None, None
class Reduce_3D(torch.autograd.Function):
"""Reduce input tensors
"""
@staticmethod
def forward(ctx: Any, input_: Tensor, depth: int,
parallel_mode: ParallelMode) -> Tensor:
dist.all_reduce(input_, group=gpc.get_group(parallel_mode))
return input_.clone()
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
return output_grad, None, None
class Slice_3D(torch.autograd.Function):
"""Slice input tensor
"""
@staticmethod
def forward(ctx: Any, input_: Tensor, dim: int, depth: int,
parallel_mode: ParallelMode) -> Tensor:
rank = gpc.get_local_rank(parallel_mode)
out = torch.chunk(input_, depth, dim=dim)[rank].contiguous()
ctx.depth = depth
ctx.parallel_mode = parallel_mode
ctx.dim = dim
ctx.input_shape = input_.shape
return out
@staticmethod
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
with torch.no_grad():
input_grad = all_gather(output_grad, ctx.dim, ctx.parallel_mode)
input_grad.reshape(ctx.input_shape)
return input_grad, None, None, None
| [
"colossalai.utils.empty_cache",
"colossalai.core.global_context.get_local_rank",
"colossalai.communication.all_gather",
"colossalai.communication.reduce_scatter",
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_ranks_in_group"
] | [((991, 1004), 'colossalai.utils.empty_cache', 'empty_cache', ([], {}), '()\n', (1002, 1004), False, 'from colossalai.utils import empty_cache, get_current_device\n'), ((1171, 1216), 'colossalai.communication.all_gather', 'all_gather', (['A', 'input_dim', 'input_parallel_mode'], {}), '(A, input_dim, input_parallel_mode)\n', (1181, 1216), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((1234, 1281), 'colossalai.communication.all_gather', 'all_gather', (['B', 'weight_dim', 'weight_parallel_mode'], {}), '(B, weight_dim, weight_parallel_mode)\n', (1244, 1281), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((1295, 1323), 'torch.matmul', 'torch.matmul', (['A_temp', 'B_temp'], {}), '(A_temp, B_temp)\n', (1307, 1323), False, 'import torch\n'), ((1338, 1389), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['C', 'output_dim', 'output_parallel_mode'], {}), '(C, output_dim, output_parallel_mode)\n', (1352, 1389), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((3217, 3230), 'colossalai.utils.empty_cache', 'empty_cache', ([], {}), '()\n', (3228, 3230), False, 'from colossalai.utils import empty_cache, get_current_device\n'), ((3285, 3330), 'colossalai.communication.all_gather', 'all_gather', (['A', 'input_dim', 'input_parallel_mode'], {}), '(A, input_dim, input_parallel_mode)\n', (3295, 3330), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((3348, 3395), 'colossalai.communication.all_gather', 'all_gather', (['B', 'weight_dim', 'weight_parallel_mode'], {}), '(B, weight_dim, weight_parallel_mode)\n', (3358, 3395), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((3468, 3519), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['C', 'output_dim', 'output_parallel_mode'], {}), '(C, output_dim, output_parallel_mode)\n', (3482, 3519), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((5342, 5355), 'colossalai.utils.empty_cache', 'empty_cache', ([], {}), '()\n', (5353, 5355), False, 'from colossalai.utils import empty_cache, get_current_device\n'), ((5410, 5455), 'colossalai.communication.all_gather', 'all_gather', (['A', 'input_dim', 'input_parallel_mode'], {}), '(A, input_dim, input_parallel_mode)\n', (5420, 5455), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((5522, 5569), 'colossalai.communication.all_gather', 'all_gather', (['B', 'weight_dim', 'weight_parallel_mode'], {}), '(B, weight_dim, weight_parallel_mode)\n', (5532, 5569), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((5691, 5742), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['C', 'output_dim', 'output_parallel_mode'], {}), '(C, output_dim, output_parallel_mode)\n', (5705, 5742), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((7389, 7432), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (7411, 7432), True, 'from colossalai.core import global_context as gpc\n'), ((7714, 7761), 'colossalai.communication.all_gather', 'all_gather', (['bias_temp', '(-1)', 'weight_parallel_mode'], {}), '(bias_temp, -1, weight_parallel_mode)\n', (7724, 7761), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((9245, 9288), 'colossalai.core.global_context.get_ranks_in_group', 'gpc.get_ranks_in_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (9267, 9288), True, 'from colossalai.core import global_context as gpc\n'), ((9588, 9635), 'colossalai.communication.all_gather', 'all_gather', (['bias_temp', '(-1)', 'weight_parallel_mode'], {}), '(bias_temp, -1, weight_parallel_mode)\n', (9598, 9635), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((9645, 9658), 'colossalai.utils.empty_cache', 'empty_cache', ([], {}), '()\n', (9656, 9658), False, 'from colossalai.utils import empty_cache, get_current_device\n'), ((9723, 9751), 'torch.mul', 'torch.mul', (['input_', 'bias_temp'], {}), '(input_, bias_temp)\n', (9732, 9751), False, 'import torch\n'), ((11299, 11342), 'torch.sum', 'torch.sum', (['input_'], {'dim': 'dim', 'keepdim': 'keepdim'}), '(input_, dim=dim, keepdim=keepdim)\n', (11308, 11342), False, 'import torch\n'), ((12816, 12849), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['parallel_mode'], {}), '(parallel_mode)\n', (12834, 12849), True, 'from colossalai.core import global_context as gpc\n'), ((1835, 1850), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1848, 1850), False, 'import torch\n'), ((3965, 3980), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3978, 3980), False, 'import torch\n'), ((6188, 6203), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6201, 6203), False, 'import torch\n'), ((7467, 7507), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['output_parallel_mode'], {}), '(output_parallel_mode)\n', (7485, 7507), True, 'from colossalai.core import global_context as gpc\n'), ((8187, 8202), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8200, 8202), False, 'import torch\n'), ((8366, 8417), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['grad', '(-1)', 'ctx.B_group_parallel_mode'], {}), '(grad, -1, ctx.B_group_parallel_mode)\n', (8380, 8417), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((9323, 9363), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['output_parallel_mode'], {}), '(output_parallel_mode)\n', (9341, 9363), True, 'from colossalai.core import global_context as gpc\n'), ((10143, 10158), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10156, 10158), False, 'import torch\n'), ((10260, 10288), 'torch.mul', 'torch.mul', (['output_grad', 'bias'], {}), '(output_grad, bias)\n', (10269, 10288), False, 'import torch\n'), ((10328, 10358), 'torch.mul', 'torch.mul', (['output_grad', 'input_'], {}), '(output_grad, input_)\n', (10337, 10358), False, 'import torch\n'), ((10494, 10545), 'colossalai.communication.reduce_scatter', 'reduce_scatter', (['grad', '(-1)', 'ctx.B_group_parallel_mode'], {}), '(grad, -1, ctx.B_group_parallel_mode)\n', (10508, 10545), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((11652, 11667), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11665, 11667), False, 'import torch\n'), ((13172, 13187), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13185, 13187), False, 'import torch\n'), ((13214, 13265), 'colossalai.communication.all_gather', 'all_gather', (['output_grad', 'ctx.dim', 'ctx.parallel_mode'], {}), '(output_grad, ctx.dim, ctx.parallel_mode)\n', (13224, 13265), False, 'from colossalai.communication import all_gather, reduce_scatter, scatter\n'), ((7642, 7676), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (7655, 7676), True, 'from colossalai.core import global_context as gpc\n'), ((8582, 8627), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (8600, 8627), True, 'from colossalai.core import global_context as gpc\n'), ((8648, 8693), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.C_group_parallel_mode'], {}), '(ctx.C_group_parallel_mode)\n', (8666, 8693), True, 'from colossalai.core import global_context as gpc\n'), ((9516, 9550), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['input_parallel_mode'], {}), '(input_parallel_mode)\n', (9529, 9550), True, 'from colossalai.core import global_context as gpc\n'), ((10710, 10755), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (10728, 10755), True, 'from colossalai.core import global_context as gpc\n'), ((10776, 10821), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ctx.C_group_parallel_mode'], {}), '(ctx.C_group_parallel_mode)\n', (10794, 10821), True, 'from colossalai.core import global_context as gpc\n'), ((11378, 11406), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (11391, 11406), True, 'from colossalai.core import global_context as gpc\n'), ((11885, 11922), 'torch.unsqueeze', 'torch.unsqueeze', (['output_grad', 'ctx.dim'], {}), '(output_grad, ctx.dim)\n', (11900, 11922), False, 'import torch\n'), ((12397, 12425), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (12410, 12425), True, 'from colossalai.core import global_context as gpc\n'), ((8525, 8565), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (8538, 8565), True, 'from colossalai.core import global_context as gpc\n'), ((10653, 10693), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.A_group_parallel_mode'], {}), '(ctx.A_group_parallel_mode)\n', (10666, 10693), True, 'from colossalai.core import global_context as gpc\n'), ((11767, 11791), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ctx.group'], {}), '(ctx.group)\n', (11780, 11791), True, 'from colossalai.core import global_context as gpc\n'), ((12864, 12899), 'torch.chunk', 'torch.chunk', (['input_', 'depth'], {'dim': 'dim'}), '(input_, depth, dim=dim)\n', (12875, 12899), False, 'import torch\n')] |
import inspect
from pathlib import Path
from functools import partial
import torch
from torch.autograd.profiler import profile
import torch.distributed as dist
from torch.distributed import ReduceOp
from colossalai.utils import get_current_device
from .prof_utils import BaseProfiler, _format_time, _format_memory, _format_bandwidth
from typing import List, Optional
def _get_code_location(depth: int):
ret = []
length = min(len(inspect.stack()), depth + 1)
for i in range(3, length):
upper_frame = inspect.stack()[i]
function_name = inspect.stack()[i - 1].function
ret.append(upper_frame.filename)
ret.append('(')
ret.append(str(upper_frame.lineno))
ret.append('): ')
ret.append(function_name)
if i != length - 1:
ret.append('\n')
return ''.join(ret)
torch_all_reduce = dist.all_reduce
torch_all_gather = dist.all_gather
torch_reduce_scatter = dist.reduce_scatter
torch_broadcast = dist.broadcast
torch_reduce = dist.reduce
class CommEvent(object):
"""Communication Event. Used for communication time and communication
volume recording.
"""
def __init__(self, count: int = 0, comm_vol: float = 0., cuda_time: int = 0):
self.self_count = count
self.self_comm_vol = comm_vol
self.self_cuda_time = cuda_time
def add(self, rhs):
self.self_count += rhs.self_count
self.self_comm_vol += rhs.self_comm_vol
self.self_cuda_time += rhs.self_cuda_time
class CommProfiler(BaseProfiler):
"""Communication profiler. Records all communication events.
"""
def __init__(self, depth: int = 0, total_count: int = 0, total_comm_vol: float = 0, total_cuda_time: int = 0):
super().__init__(profiler_name="Collective_Communication", priority=0)
self.depth = 3 + depth
self.total_count = total_count
self.total_comm_vol = total_comm_vol
self.total_cuda_time = total_cuda_time
self.ops_record = dict()
self.profiler = None
self.pending_op = None
self.pending_metadata = None
self.warn_flag = False
def reset(self):
self.total_count = 0
self.total_comm_vol = 0
self.total_cuda_time = 0
self.ops_record = dict()
self.profiler = None
self.pending_op = None
self.pending_metadata = None
self.warn_flag = False
def enable(self):
dist.all_reduce = partial(all_reduce, profiler=self)
dist.all_gather = partial(all_gather, profiler=self)
dist.reduce_scatter = partial(reduce_scatter, profiler=self)
dist.broadcast = partial(broadcast, profiler=self)
dist.reduce = partial(reduce, profiler=self)
def disable(self):
dist.all_reduce = torch_all_reduce
dist.all_gather = torch_all_gather
dist.reduce_scatter = torch_reduce_scatter
dist.broadcast = torch_broadcast
dist.reduce = torch_reduce
def to_tensorboard(self, writer):
writer.add_text(tag="Collective Communication", text_string=self.result_str("\n\n"))
def to_file(self, filename: Path):
with open(filename, "w") as f:
f.write(self.result_str())
def show(self):
print(self.result_str())
def result_str(self, sep: str = "\n"):
res = []
def append(s: str = None):
if s is not None:
res.append(s)
res.append(sep)
if self.warn_flag:
append("Warnning: there exists multiple communication operations in the same time. As a result, "
"the profiling result is not accurate.")
if self.total_cuda_time == 0:
return "No collective communication has been called yet!"
append("Collective communication profiling result:")
append("total cuda time: {}".format(_format_time(self.total_cuda_time)))
append("average bandwidth: {}".format(_format_bandwidth(self.total_comm_vol, self.total_cuda_time)))
append("total number of calls: {}".format(self.total_count))
append("All events:")
seperation = '-' * 74
row_format = '{:^10}' + '{:^12}' * 2 + '{:^16}' + '{:^12}' * 2
append(seperation)
append(row_format.format('Location', 'GPU time', 'Percentage', 'Comm volume', 'Bandwidth', 'Num of calls'))
append(seperation)
show_list = sorted(self.ops_record.items(), key=lambda kv: -kv[1].self_cuda_time)
for location, event in show_list:
append(location)
append(
row_format.format('', _format_time(event.self_cuda_time),
'{:.1f}%'.format(event.self_cuda_time / self.total_cuda_time * 100.0),
_format_memory(event.self_comm_vol),
_format_bandwidth(event.self_comm_vol, event.self_cuda_time), event.self_count))
append()
return ''.join(res)
@property
def has_aync_op(self):
return self.pending_op is not None
def activate_profiler(self, kn: str, vol: float):
self.pending_metadata = (kn, _get_code_location(self.depth), vol)
self.profiler = profile(enabled=True, use_cuda=True, use_cpu=True, use_kineto=True)
self.profiler.__enter__()
def close_profiler(self, group=None):
assert self.profiler is not None, "There is no running dist op"
kernel_name, code_location, vol = self.pending_metadata
self.profiler.__exit__(None, None, None)
if self.profiler.enabled and dist.get_world_size(group) > 1:
assert_flag = 0
current_comm_event = None
events = self.profiler.function_events
for event in events:
if kernel_name in event.name:
assert assert_flag == 0, "Multiple dist ops has been called "
current_comm_event = CommEvent(1, vol, event.self_cuda_time_total)
assert_flag += 1
assert current_comm_event is not None, "dist op has not been found"
buffer = torch.tensor([current_comm_event.self_cuda_time], device=get_current_device())
torch_all_reduce(buffer, op=ReduceOp.MIN, group=group)
current_comm_event.self_cuda_time = buffer.item()
self.total_count += current_comm_event.self_count
self.total_comm_vol += current_comm_event.self_comm_vol
self.total_cuda_time += current_comm_event.self_cuda_time
if code_location in self.ops_record:
self.ops_record[code_location].add(current_comm_event)
else:
self.ops_record[code_location] = current_comm_event
self.profiler = None
self.pending_op = None
self.pending_metadata = None
def wait_async_op(self):
if self.pending_op is not None:
op = self.pending_op
op.wait()
self.close_profiler()
class CommHandler(object):
"""Communication handler. A dummy handler to wait aync operations.
"""
def __init__(self, profiler: CommProfiler):
super().__init__()
self.prof = profiler
def wait(self):
self.prof.wait_async_op()
def async_check(profiler: CommProfiler):
if profiler.pending_op is not None:
profiler.warn_flag = True
profiler.wait_async_op()
def all_reduce(tensor: torch.Tensor,
op: ReduceOp = ReduceOp.SUM,
group=None,
async_op: bool = False,
profiler: CommProfiler = None) -> Optional[CommHandler]:
async_check(profiler)
comm_size = dist.get_world_size(group)
correction = 2 * (comm_size - 1) / comm_size
comm_vol = correction * tensor.element_size() * tensor.numel()
profiler.activate_profiler("ncclKernel_AllReduce_", comm_vol)
profiler.pending_op = torch_all_reduce(tensor, op, group, async_op)
if async_op:
return CommHandler(profiler)
profiler.close_profiler(group)
def reduce_scatter(output: torch.Tensor,
input_list: List[torch.Tensor],
op: ReduceOp = ReduceOp.SUM,
group=None,
async_op: bool = False,
profiler: CommProfiler = None) -> Optional[CommHandler]:
async_check(profiler)
comm_size = dist.get_world_size(group)
correction = (comm_size - 1) / comm_size
comm_vol = 0
for tensor in input_list:
comm_vol += tensor.element_size() * tensor.numel()
comm_vol *= correction
profiler.activate_profiler("ncclKernel_ReduceScatter_", comm_vol)
profiler.pending_op = torch_reduce_scatter(output, input_list, op, group, async_op)
if async_op:
return CommHandler(profiler)
profiler.close_profiler(group)
def all_gather(tensor_list: List[torch.Tensor],
tensor: torch.Tensor,
group=None,
async_op: bool = False,
profiler: CommProfiler = None) -> Optional[CommHandler]:
async_check(profiler)
comm_size = dist.get_world_size(group)
correction = (comm_size - 1) / comm_size
comm_vol = 0
for ten in tensor_list:
comm_vol += ten.element_size() * ten.numel()
comm_vol *= correction
profiler.activate_profiler("ncclKernel_AllGather_", comm_vol)
profiler.pending_op = torch_all_gather(tensor_list, tensor, group, async_op)
if async_op:
return CommHandler(profiler)
profiler.close_profiler(group)
def broadcast(tensor: torch.Tensor,
src: int,
group=None,
async_op: bool = False,
profiler: CommProfiler = None) -> Optional[CommHandler]:
async_check(profiler)
comm_vol = 1.0 * tensor.element_size() * tensor.numel()
profiler.activate_profiler("ncclKernel_Broadcast_", comm_vol)
profiler.pending_op = torch_broadcast(tensor, src, group, async_op)
if async_op:
return CommHandler(profiler)
profiler.close_profiler(group)
def reduce(tensor: torch.Tensor,
dst: int,
op: ReduceOp = ReduceOp.SUM,
group=None,
async_op: bool = False,
profiler: CommProfiler = None) -> Optional[CommHandler]:
async_check(profiler)
comm_vol = 1.0 * tensor.element_size() * tensor.numel()
profiler.activate_profiler("ncclKernel_Reduce_", comm_vol)
profiler.pending_op = torch_reduce(tensor, dst, op, group, async_op)
if async_op:
return CommHandler(profiler)
profiler.close_profiler(group)
| [
"colossalai.utils.get_current_device"
] | [((7912, 7938), 'torch.distributed.get_world_size', 'dist.get_world_size', (['group'], {}), '(group)\n', (7931, 7938), True, 'import torch.distributed as dist\n'), ((8639, 8665), 'torch.distributed.get_world_size', 'dist.get_world_size', (['group'], {}), '(group)\n', (8658, 8665), True, 'import torch.distributed as dist\n'), ((9383, 9409), 'torch.distributed.get_world_size', 'dist.get_world_size', (['group'], {}), '(group)\n', (9402, 9409), True, 'import torch.distributed as dist\n'), ((2547, 2581), 'functools.partial', 'partial', (['all_reduce'], {'profiler': 'self'}), '(all_reduce, profiler=self)\n', (2554, 2581), False, 'from functools import partial\n'), ((2609, 2643), 'functools.partial', 'partial', (['all_gather'], {'profiler': 'self'}), '(all_gather, profiler=self)\n', (2616, 2643), False, 'from functools import partial\n'), ((2675, 2713), 'functools.partial', 'partial', (['reduce_scatter'], {'profiler': 'self'}), '(reduce_scatter, profiler=self)\n', (2682, 2713), False, 'from functools import partial\n'), ((2740, 2773), 'functools.partial', 'partial', (['broadcast'], {'profiler': 'self'}), '(broadcast, profiler=self)\n', (2747, 2773), False, 'from functools import partial\n'), ((2797, 2827), 'functools.partial', 'partial', (['reduce'], {'profiler': 'self'}), '(reduce, profiler=self)\n', (2804, 2827), False, 'from functools import partial\n'), ((5385, 5452), 'torch.autograd.profiler.profile', 'profile', ([], {'enabled': '(True)', 'use_cuda': '(True)', 'use_cpu': '(True)', 'use_kineto': '(True)'}), '(enabled=True, use_cuda=True, use_cpu=True, use_kineto=True)\n', (5392, 5452), False, 'from torch.autograd.profiler import profile\n'), ((453, 468), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (466, 468), False, 'import inspect\n'), ((537, 552), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (550, 552), False, 'import inspect\n'), ((581, 596), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (594, 596), False, 'import inspect\n'), ((5761, 5787), 'torch.distributed.get_world_size', 'dist.get_world_size', (['group'], {}), '(group)\n', (5780, 5787), True, 'import torch.distributed as dist\n'), ((6367, 6387), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (6385, 6387), False, 'from colossalai.utils import get_current_device\n')] |
import contextlib
import os
import colossalai
import torch
from colossalai.core import global_context as gpc
from colossalai.engine.schedule import (InterleavedPipelineSchedule, PipelineSchedule)
from colossalai.logging import get_dist_logger
from colossalai.nn import CosineAnnealingWarmupLR
from colossalai.trainer import Trainer, hooks
from colossalai.utils import MultiTimer, get_dataloader
from colossalai.zero import zero3_model_context
from model_zoo.gpt import GPTLMLoss, gpt2_small, gpt2_medium, gpt2_large, gpt2_xl
from data import WebtextDataset
def train_gpt():
args = colossalai.get_default_parser().parse_args()
# standard launch
# colossalai.launch(config=args.config,
# rank=args.rank,
# world_size=args.world_size,
# local_rank=args.local_rank,
# host=args.host,
# port=args.port)
# launch from torchrun
colossalai.launch_from_torch(config=args.config)
logger = get_dist_logger()
if hasattr(gpc.config, 'LOG_PATH'):
if gpc.get_global_rank() == 0:
log_path = gpc.config.LOG_PATH
if not os.path.exists(log_path):
os.mkdir(log_path)
logger.log_to_file(log_path)
train_dataset = WebtextDataset(os.environ['DATA'], seq_len=gpc.config.SEQ_LENGTH)
train_dataloader = get_dataloader(train_dataset,
seed=42,
batch_size=gpc.config.BATCH_SIZE // gpc.data_parallel_size,
pin_memory=True,
shuffle=True,
drop_last=True)
logger.info(f'Loaded {len(train_dataset)}/{len(train_dataloader)} samples/batches', ranks=[0])
# zero3 under test
# use_zero3 = hasattr(gpc.config, 'zero') and gpc.config.zero.level == 3
# cm = zero3_model_context() if use_zero3 else contextlib.nullcontext()
# with cm:
# model = gpc.config.model.pop('type')(**gpc.config.model)
model = gpt2_medium(vocab_size=gpc.config.VOCAB_SIZE,
max_position_embeddings=gpc.config.SEQ_LENGTH,
checkpoint=True)
criterion = GPTLMLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.00015, weight_decay=1e-2)
steps_per_epoch = len(train_dataloader) // gpc.config.gradient_accumulation
lr_scheduler = CosineAnnealingWarmupLR(optimizer=optimizer,
total_steps=gpc.config.NUM_EPOCHS * steps_per_epoch,
warmup_steps=gpc.config.WARMUP_EPOCHS * steps_per_epoch,
eta_min=1e-5)
engine, train_dataloader, _, lr_scheduler = colossalai.initialize(model=model,
optimizer=optimizer,
criterion=criterion,
train_dataloader=train_dataloader,
lr_scheduler=lr_scheduler)
# pipeline under test
# num_model_chunks = getattr(gpc.config.model, 'num_chunks', 1)
# if num_model_chunks > 1:
# logger.info('Build InterleavedPipelineSchedule', ranks=[0])
# schedule = InterleavedPipelineSchedule(gpc.config.NUM_MICRO_BATCHES, num_model_chunks)
# else:
# logger.info('Build PipelineSchedule', ranks=[0])
# schedule = PipelineSchedule(gpc.config.NUM_MICRO_BATCHES)
timer = MultiTimer()
trainer = Trainer(engine=engine, logger=logger, timer=timer)
hook_list = [
hooks.LogMetricByEpochHook(logger=logger),
hooks.LogMetricByStepHook(),
hooks.LossHook(),
hooks.ThroughputHook(),
hooks.LRSchedulerHook(lr_scheduler=lr_scheduler, by_epoch=False),
# hooks.TensorboardHook(log_dir='./tb_logs', ranks=[0]),
# hooks.LogMemoryByEpochHook(logger),
# hooks.LogTimingByEpochHook(timer, logger),
# hooks.SaveCheckpointHook(checkpoint_dir='./ckpt')
]
logger.info("Training start", ranks=[0])
trainer.fit(train_dataloader=train_dataloader, epochs=gpc.config.NUM_EPOCHS, hooks=hook_list, display_progress=True)
if __name__ == '__main__':
train_gpt()
| [
"colossalai.trainer.hooks.ThroughputHook",
"colossalai.trainer.Trainer",
"colossalai.trainer.hooks.LogMetricByStepHook",
"colossalai.trainer.hooks.LossHook",
"colossalai.logging.get_dist_logger",
"colossalai.initialize",
"colossalai.launch_from_torch",
"colossalai.utils.MultiTimer",
"colossalai.trai... | [((984, 1032), 'colossalai.launch_from_torch', 'colossalai.launch_from_torch', ([], {'config': 'args.config'}), '(config=args.config)\n', (1012, 1032), False, 'import colossalai\n'), ((1049, 1066), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (1064, 1066), False, 'from colossalai.logging import get_dist_logger\n'), ((1339, 1404), 'data.WebtextDataset', 'WebtextDataset', (["os.environ['DATA']"], {'seq_len': 'gpc.config.SEQ_LENGTH'}), "(os.environ['DATA'], seq_len=gpc.config.SEQ_LENGTH)\n", (1353, 1404), False, 'from data import WebtextDataset\n'), ((1429, 1578), 'colossalai.utils.get_dataloader', 'get_dataloader', (['train_dataset'], {'seed': '(42)', 'batch_size': '(gpc.config.BATCH_SIZE // gpc.data_parallel_size)', 'pin_memory': '(True)', 'shuffle': '(True)', 'drop_last': '(True)'}), '(train_dataset, seed=42, batch_size=gpc.config.BATCH_SIZE //\n gpc.data_parallel_size, pin_memory=True, shuffle=True, drop_last=True)\n', (1443, 1578), False, 'from colossalai.utils import MultiTimer, get_dataloader\n'), ((2150, 2264), 'model_zoo.gpt.gpt2_medium', 'gpt2_medium', ([], {'vocab_size': 'gpc.config.VOCAB_SIZE', 'max_position_embeddings': 'gpc.config.SEQ_LENGTH', 'checkpoint': '(True)'}), '(vocab_size=gpc.config.VOCAB_SIZE, max_position_embeddings=gpc.\n config.SEQ_LENGTH, checkpoint=True)\n', (2161, 2264), False, 'from model_zoo.gpt import GPTLMLoss, gpt2_small, gpt2_medium, gpt2_large, gpt2_xl\n'), ((2329, 2340), 'model_zoo.gpt.GPTLMLoss', 'GPTLMLoss', ([], {}), '()\n', (2338, 2340), False, 'from model_zoo.gpt import GPTLMLoss, gpt2_small, gpt2_medium, gpt2_large, gpt2_xl\n'), ((2533, 2711), 'colossalai.nn.CosineAnnealingWarmupLR', 'CosineAnnealingWarmupLR', ([], {'optimizer': 'optimizer', 'total_steps': '(gpc.config.NUM_EPOCHS * steps_per_epoch)', 'warmup_steps': '(gpc.config.WARMUP_EPOCHS * steps_per_epoch)', 'eta_min': '(1e-05)'}), '(optimizer=optimizer, total_steps=gpc.config.\n NUM_EPOCHS * steps_per_epoch, warmup_steps=gpc.config.WARMUP_EPOCHS *\n steps_per_epoch, eta_min=1e-05)\n', (2556, 2711), False, 'from colossalai.nn import CosineAnnealingWarmupLR\n'), ((2885, 3027), 'colossalai.initialize', 'colossalai.initialize', ([], {'model': 'model', 'optimizer': 'optimizer', 'criterion': 'criterion', 'train_dataloader': 'train_dataloader', 'lr_scheduler': 'lr_scheduler'}), '(model=model, optimizer=optimizer, criterion=criterion,\n train_dataloader=train_dataloader, lr_scheduler=lr_scheduler)\n', (2906, 3027), False, 'import colossalai\n'), ((3764, 3776), 'colossalai.utils.MultiTimer', 'MultiTimer', ([], {}), '()\n', (3774, 3776), False, 'from colossalai.utils import MultiTimer, get_dataloader\n'), ((3794, 3844), 'colossalai.trainer.Trainer', 'Trainer', ([], {'engine': 'engine', 'logger': 'logger', 'timer': 'timer'}), '(engine=engine, logger=logger, timer=timer)\n', (3801, 3844), False, 'from colossalai.trainer import Trainer, hooks\n'), ((3875, 3916), 'colossalai.trainer.hooks.LogMetricByEpochHook', 'hooks.LogMetricByEpochHook', ([], {'logger': 'logger'}), '(logger=logger)\n', (3901, 3916), False, 'from colossalai.trainer import Trainer, hooks\n'), ((3927, 3954), 'colossalai.trainer.hooks.LogMetricByStepHook', 'hooks.LogMetricByStepHook', ([], {}), '()\n', (3952, 3954), False, 'from colossalai.trainer import Trainer, hooks\n'), ((3965, 3981), 'colossalai.trainer.hooks.LossHook', 'hooks.LossHook', ([], {}), '()\n', (3979, 3981), False, 'from colossalai.trainer import Trainer, hooks\n'), ((3992, 4014), 'colossalai.trainer.hooks.ThroughputHook', 'hooks.ThroughputHook', ([], {}), '()\n', (4012, 4014), False, 'from colossalai.trainer import Trainer, hooks\n'), ((4025, 4089), 'colossalai.trainer.hooks.LRSchedulerHook', 'hooks.LRSchedulerHook', ([], {'lr_scheduler': 'lr_scheduler', 'by_epoch': '(False)'}), '(lr_scheduler=lr_scheduler, by_epoch=False)\n', (4046, 4089), False, 'from colossalai.trainer import Trainer, hooks\n'), ((607, 638), 'colossalai.get_default_parser', 'colossalai.get_default_parser', ([], {}), '()\n', (636, 638), False, 'import colossalai\n'), ((1120, 1141), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([], {}), '()\n', (1139, 1141), True, 'from colossalai.core import global_context as gpc\n'), ((1212, 1236), 'os.path.exists', 'os.path.exists', (['log_path'], {}), '(log_path)\n', (1226, 1236), False, 'import os\n'), ((1255, 1273), 'os.mkdir', 'os.mkdir', (['log_path'], {}), '(log_path)\n', (1263, 1273), False, 'import os\n')] |
import click
from .utils import *
from .benchmark import run_benchmark
from colossalai.context import Config
__all__ = ['benchmark']
@click.command()
@click.option("-g", "--gpus", type=int, default=None, help="Total number of devices to use.")
@click.option("-b", "--batch_size", type=int, default=8, help="Batch size of the input tensor.")
@click.option("-s", "--seq_len", type=int, default=512, help="Sequence length of the input tensor.")
@click.option("-d", "--dimension", type=int, default=1024, help="Hidden dimension of the input tensor.")
@click.option("-w", "--warmup_steps", type=int, default=10, help="The number of warmup steps.")
@click.option("-p", "--profile_steps", type=int, default=50, help="The number of profiling steps.")
@click.option("-l", "--layers", type=int, default=2)
@click.option("-m",
"--model",
type=click.Choice(['mlp'], case_sensitive=False),
default='mlp',
help="Select the model to benchmark, currently only supports MLP")
def benchmark(gpus: int, batch_size: int, seq_len: int, dimension: int, warmup_steps: int, profile_steps: int,
layers: int, model: str):
args_dict = locals()
args = Config(args_dict)
run_benchmark(args)
| [
"colossalai.context.Config"
] | [((138, 153), 'click.command', 'click.command', ([], {}), '()\n', (151, 153), False, 'import click\n'), ((155, 252), 'click.option', 'click.option', (['"""-g"""', '"""--gpus"""'], {'type': 'int', 'default': 'None', 'help': '"""Total number of devices to use."""'}), "('-g', '--gpus', type=int, default=None, help=\n 'Total number of devices to use.')\n", (167, 252), False, 'import click\n'), ((249, 349), 'click.option', 'click.option', (['"""-b"""', '"""--batch_size"""'], {'type': 'int', 'default': '(8)', 'help': '"""Batch size of the input tensor."""'}), "('-b', '--batch_size', type=int, default=8, help=\n 'Batch size of the input tensor.')\n", (261, 349), False, 'import click\n'), ((346, 450), 'click.option', 'click.option', (['"""-s"""', '"""--seq_len"""'], {'type': 'int', 'default': '(512)', 'help': '"""Sequence length of the input tensor."""'}), "('-s', '--seq_len', type=int, default=512, help=\n 'Sequence length of the input tensor.')\n", (358, 450), False, 'import click\n'), ((447, 555), 'click.option', 'click.option', (['"""-d"""', '"""--dimension"""'], {'type': 'int', 'default': '(1024)', 'help': '"""Hidden dimension of the input tensor."""'}), "('-d', '--dimension', type=int, default=1024, help=\n 'Hidden dimension of the input tensor.')\n", (459, 555), False, 'import click\n'), ((552, 651), 'click.option', 'click.option', (['"""-w"""', '"""--warmup_steps"""'], {'type': 'int', 'default': '(10)', 'help': '"""The number of warmup steps."""'}), "('-w', '--warmup_steps', type=int, default=10, help=\n 'The number of warmup steps.')\n", (564, 651), False, 'import click\n'), ((648, 751), 'click.option', 'click.option', (['"""-p"""', '"""--profile_steps"""'], {'type': 'int', 'default': '(50)', 'help': '"""The number of profiling steps."""'}), "('-p', '--profile_steps', type=int, default=50, help=\n 'The number of profiling steps.')\n", (660, 751), False, 'import click\n'), ((748, 799), 'click.option', 'click.option', (['"""-l"""', '"""--layers"""'], {'type': 'int', 'default': '(2)'}), "('-l', '--layers', type=int, default=2)\n", (760, 799), False, 'import click\n'), ((1206, 1223), 'colossalai.context.Config', 'Config', (['args_dict'], {}), '(args_dict)\n', (1212, 1223), False, 'from colossalai.context import Config\n'), ((864, 907), 'click.Choice', 'click.Choice', (["['mlp']"], {'case_sensitive': '(False)'}), "(['mlp'], case_sensitive=False)\n", (876, 907), False, 'import click\n')] |
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.logging import get_dist_logger
from colossalai.testing import parameterize
from colossalai.utils import free_port
from colossalai.context import MOE_CONTEXT
from colossalai.nn.layer import MoeModule
from colossalai.zero.init_ctx import ZeroInitContext
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
from colossalai.testing import rerun_on_exception
from colossalai.utils import get_current_device
from tests.test_zero_data_parallel.common import CONFIG
class MoeModel(nn.Module):
def __init__(self):
super().__init__()
self.proj1 = nn.Linear(4, 16)
expert_cls = nn.Linear
expert_args_dict = dict(in_features=16, out_features=16)
self.moe = MoeModule(dim_model=16, num_experts=8, use_residual=True, expert_cls=expert_cls, **expert_args_dict)
self.proj2 = nn.Linear(16, 4)
def forward(self, x):
x = self.proj1(x)
x = self.moe(x)
x = self.proj2(x)
return x
@parameterize("init_device_type", ['cpu', 'cuda'])
@parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy])
def run_moe_zero_init(init_device_type, shard_strategy_class):
logger = get_dist_logger("test_moe_zero_init")
if init_device_type == 'cuda':
init_device = torch.device(f"cuda:{get_current_device()}")
elif init_device_type == 'cpu':
init_device = torch.device("cpu")
else:
raise NotImplementedError("Unknown device found.")
model_numel_tensor = torch.zeros(1, dtype=torch.int)
with ZeroInitContext(target_device=init_device,
shard_strategy=shard_strategy_class(),
shard_param=True,
model_numel_tensor=model_numel_tensor,
rm_torch_payload_on_the_fly=False):
model = MoeModel()
for name, param in model.named_parameters():
assert hasattr(param, 'colo_attr')
# the weights in the gate should be fp32
if 'gate' in name:
assert param.colo_attr.sharded_data_tensor.dtype == torch.float32
else:
assert param.colo_attr.sharded_data_tensor.dtype == torch.half
# the parameters in moe experts and its gate should not be sharded
if ('experts' in name) or ('gate' in name) or ('residual_combine' in name):
assert not param.colo_attr.sharded_data_tensor.is_sharded
else:
assert param.colo_attr.sharded_data_tensor.is_sharded
# the parameters in moe experts is not replicated
if 'experts' in name:
assert not param.is_replicated
else:
assert param.is_replicated
assert param.colo_attr.sharded_data_tensor.payload.device.type == init_device.type, \
f'{param.colo_attr.sharded_data_tensor.payload.device.type} vs. {init_device.type}'
def _run_dist(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
MOE_CONTEXT.setup(seed=42)
run_moe_zero_init()
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [2, 4])
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_moe_zero_init(world_size):
run_func = partial(_run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_moe_zero_init(world_size=2)
| [
"colossalai.launch",
"colossalai.logging.get_dist_logger",
"colossalai.utils.free_port",
"colossalai.testing.rerun_on_exception",
"colossalai.utils.get_current_device",
"colossalai.context.MOE_CONTEXT.setup",
"colossalai.nn.layer.MoeModule",
"colossalai.testing.parameterize"
] | [((1178, 1227), 'colossalai.testing.parameterize', 'parameterize', (['"""init_device_type"""', "['cpu', 'cuda']"], {}), "('init_device_type', ['cpu', 'cuda'])\n", (1190, 1227), False, 'from colossalai.testing import parameterize\n'), ((1230, 1320), 'colossalai.testing.parameterize', 'parameterize', (['"""shard_strategy_class"""', '[TensorShardStrategy, BucketTensorShardStrategy]'], {}), "('shard_strategy_class', [TensorShardStrategy,\n BucketTensorShardStrategy])\n", (1242, 1320), False, 'from colossalai.testing import parameterize\n'), ((3433, 3478), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[2, 4]'], {}), "('world_size', [2, 4])\n", (3456, 3478), False, 'import pytest\n'), ((3481, 3584), 'colossalai.testing.rerun_on_exception', 'rerun_on_exception', ([], {'exception_type': 'mp.ProcessRaisedException', 'pattern': '""".*Address already in use.*"""'}), "(exception_type=mp.ProcessRaisedException, pattern=\n '.*Address already in use.*')\n", (3499, 3584), False, 'from colossalai.testing import rerun_on_exception\n'), ((1395, 1432), 'colossalai.logging.get_dist_logger', 'get_dist_logger', (['"""test_moe_zero_init"""'], {}), "('test_moe_zero_init')\n", (1410, 1432), False, 'from colossalai.logging import get_dist_logger\n'), ((1718, 1749), 'torch.zeros', 'torch.zeros', (['(1)'], {'dtype': 'torch.int'}), '(1, dtype=torch.int)\n', (1729, 1749), False, 'import torch\n'), ((3239, 3355), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (3256, 3355), False, 'import colossalai\n'), ((3356, 3382), 'colossalai.context.MOE_CONTEXT.setup', 'MOE_CONTEXT.setup', ([], {'seed': '(42)'}), '(seed=42)\n', (3373, 3382), False, 'from colossalai.context import MOE_CONTEXT\n'), ((3698, 3735), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (3706, 3735), True, 'import torch.multiprocessing as mp\n'), ((771, 787), 'torch.nn.Linear', 'nn.Linear', (['(4)', '(16)'], {}), '(4, 16)\n', (780, 787), True, 'import torch.nn as nn\n'), ((906, 1011), 'colossalai.nn.layer.MoeModule', 'MoeModule', ([], {'dim_model': '(16)', 'num_experts': '(8)', 'use_residual': '(True)', 'expert_cls': 'expert_cls'}), '(dim_model=16, num_experts=8, use_residual=True, expert_cls=\n expert_cls, **expert_args_dict)\n', (915, 1011), False, 'from colossalai.nn.layer import MoeModule\n'), ((1029, 1045), 'torch.nn.Linear', 'nn.Linear', (['(16)', '(4)'], {}), '(16, 4)\n', (1038, 1045), True, 'import torch.nn as nn\n'), ((1599, 1618), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1611, 1618), False, 'import torch\n'), ((3680, 3691), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (3689, 3691), False, 'from colossalai.utils import free_port\n'), ((1515, 1535), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1533, 1535), False, 'from colossalai.utils import get_current_device\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from colossalai.context import ParallelMode
from colossalai.registry import HOOKS
from colossalai.utils import is_no_pp_or_last_stage
from ._base_hook import BaseHook
from ..metric import Loss, Accuracy1D, Accuracy2D, Accuracy, Accuracy2p5D, Accuracy3D
class MetricHook(BaseHook):
"""Specialized hook classes for :class:`Metric`.
Some help metric collectors initialize, reset and
update their states. Others are used to display and
record the metric.
:param trainer: Trainer attached with current hook
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type trainer: Trainer
:type priority: int
"""
def __init__(self,
priority: int,
):
super().__init__(priority)
self._is_stage_to_compute = is_no_pp_or_last_stage()
def _check_metric_states_initialization(self, trainer):
if 'metrics' not in trainer.states:
self.init_runner_states(trainer, 'metrics', dict(train={}, test={}))
@HOOKS.register_module
class LossHook(MetricHook):
"""Specialized hook class for :class:`Loss`.
:param trainer: Trainer attached with current hook
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type trainer: Trainer
:type priority: int, optional
"""
def __init__(self, priority: int = 0):
super().__init__(priority)
def after_hook_is_attached(self, trainer):
self._check_metric_states_initialization(trainer)
if self._is_stage_to_compute:
self.train_loss = Loss(epoch_only=False)
self.test_loss = Loss(epoch_only=True)
# register the metric calculator
trainer.states['metrics']['train'][
self.train_loss.__class__.__name__] = self.train_loss
trainer.states['metrics']['test'][
self.test_loss.__class__.__name__] = self.test_loss
def before_train_epoch(self, trainer):
if self._is_stage_to_compute:
self.train_loss.reset()
def after_train_iter(self, trainer, logits, label, loss):
if self._is_stage_to_compute:
self.train_loss.update(loss)
def before_test_epoch(self, trainer):
if self._is_stage_to_compute:
self.test_loss.reset()
def after_test_iter(self, trainer, logits, label, loss):
if self._is_stage_to_compute:
self.test_loss.update(loss)
@HOOKS.register_module
class Accuracy1DHook(MetricHook):
"""Specialized hook class for :class:`Accuracy1D`.
It acts the same as :class:`AccuracyHook`.
:param trainer: Trainer attached with current hook
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type trainer: Trainer
:type priority: int, optional
"""
def __init__(self, priority: int = 10):
super().__init__(priority)
def after_hook_is_attached(self, trainer):
self._check_metric_states_initialization(trainer)
if self._is_stage_to_compute:
self.metric = Accuracy1D(epoch_only=True)
# register the metric
trainer.states['metrics']['test'][
self.metric.__class__.__name__] = self.metric
def before_test(self, trainer):
if self._is_stage_to_compute:
self.metric.reset()
def after_test_iter(self, trainer, logits, label, *args):
if self._is_stage_to_compute:
self.metric.update(logits, label)
@HOOKS.register_module
class Accuracy2DHook(MetricHook):
"""Specialized hook class for :class:`Accuracy2D`.
It acts the same as :class:`AccuracyHook`.
:param trainer: Trainer attached with current hook
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type trainer: Trainer
:type priority: int, optional
"""
def __init__(self, priority: int = 0):
super().__init__(priority)
def after_hook_is_attached(self, trainer):
self._check_metric_states_initialization(trainer)
if self._is_stage_to_compute:
self.metric = Accuracy2D(epoch_only=True)
# register the metric
trainer.states['metrics']['test'][
self.metric.__class__.__name__] = self.metric
def before_test(self, trainer):
if self._is_stage_to_compute:
self.metric.reset()
def after_test_iter(self, trainer, logits, label, *args):
if self._is_stage_to_compute:
self.metric.update(logits, label)
@HOOKS.register_module
class Accuracy2p5DHook(MetricHook):
def __init__(self, priority: int = 0):
super().__init__(priority)
def after_hook_is_attached(self, trainer):
self._check_metric_states_initialization(trainer)
if self._is_stage_to_compute:
self.metric = Accuracy2p5D(epoch_only=True)
# register the metric
trainer.states['metrics']['test'][
self.metric.__class__.__name__] = self.metric
def before_test(self, trainer):
if self._is_stage_to_compute:
self.metric.reset()
def after_test_iter(self, trainer, logits, label, *args):
if self._is_stage_to_compute:
self.metric.update(logits, label)
@HOOKS.register_module
class Accuracy3DHook(MetricHook):
"""Specialized hook class for :class:`Accuracy3D`.
:param trainer: Trainer attached with current hook
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type trainer: Trainer
:type priority: int
"""
def __init__(self,
priority: int = 10):
super().__init__(priority)
def after_hook_is_attached(self, trainer):
if self._is_stage_to_compute:
self.metric = Accuracy3D(epoch_only=True)
# register the metric
trainer.states['metrics']['test'][
self.metric.__class__.__name__] = self.metric
def before_test(self, trainer):
if self._is_stage_to_compute:
self.metric.reset()
def after_test_iter(self, trainer, logits, label, *args):
if self._is_stage_to_compute:
self.metric.update(logits, label)
@HOOKS.register_module
class AccuracyHook(MetricHook):
"""Specialized hook class for :class:`Accuracy`.
:param trainer: Trainer attached with current hook
:param priority: Priority in the printing, hooks with small priority will be printed in front
:type trainer: Trainer
:type priority: int
"""
def __init__(self, priority: int = 0):
super().__init__(priority)
def after_hook_is_attached(self, trainer):
if self._is_stage_to_compute:
self.metric = Accuracy(epoch_only=True)
# register the metric
trainer.states['metrics']['test'][
self.metric.__class__.__name__] = self.metric
def before_test(self, trainer):
if self._is_stage_to_compute:
self.metric.reset()
def after_test_iter(self, trainer, logits, label, *args):
if self._is_stage_to_compute:
self.metric.update(logits, label)
| [
"colossalai.utils.is_no_pp_or_last_stage"
] | [((881, 905), 'colossalai.utils.is_no_pp_or_last_stage', 'is_no_pp_or_last_stage', ([], {}), '()\n', (903, 905), False, 'from colossalai.utils import is_no_pp_or_last_stage\n')] |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 7