python_code
stringlengths
0
108k
from torch.optim.lr_scheduler import OneCycleLR as _OneCycleLR from colossalai.registry import LR_SCHEDULERS @LR_SCHEDULERS.register_module class OneCycleLR(_OneCycleLR): r"""Sets the learning rate of each parameter group according to the 1cycle learning rate policy. The 1cycle policy anneals the learning ...
from typing import List from torch.optim.lr_scheduler import MultiStepLR as _MultiStepLR from colossalai.registry import LR_SCHEDULERS from .delayed import WarmupScheduler @LR_SCHEDULERS.register_module class MultiStepLR(_MultiStepLR): """Decays the learning rate of each parameter group by gamma once the nu...
from .cosine import CosineAnnealingLR, CosineAnnealingWarmupLR, FlatAnnealingLR, FlatAnnealingWarmupLR from .linear import LinearWarmupLR from .multistep import MultiStepLR, MultiStepWarmupLR from .onecycle import OneCycleLR from .poly import PolynomialLR, PolynomialWarmupLR from .torch import LambdaLR, MultiplicativeL...
from torch.optim.lr_scheduler import _LRScheduler from colossalai.registry import LR_SCHEDULERS from .delayed import WarmupScheduler @LR_SCHEDULERS.register_module class PolynomialLR(_LRScheduler): """Polynomial learning rate scheduler. Args: optimizer (:class:`torch.optim.Optimizer`): Wrapped optim...
from torch.optim.lr_scheduler import _LRScheduler class _enable_get_lr_call: def __init__(self, o): self.o = o def __enter__(self): self.o._get_lr_called_within_step = True return self def __exit__(self, type, value, traceback): self.o._get_lr_called_within_step = False ...
from torch.optim.lr_scheduler import LambdaLR as _LambdaLR from torch.optim.lr_scheduler import MultiplicativeLR as _MultiplicativeLR from torch.optim.lr_scheduler import StepLR as _StepLR from torch.optim.lr_scheduler import ExponentialLR as _ExponentialLR from colossalai.registry import LR_SCHEDULERS @LR_SCHEDULER...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.nn as nn from colossalai.context import ParallelMode from colossalai.core import global_context as gpc class ParallelLayer(nn.Module): def __init__(self): super().__init__() self.data_parallel_rank = 0 if not gpc.is_initialized(Parall...
from .colossalai_layer import * from .parallel_1d import * from .parallel_2d import * from .parallel_2p5d import * from .parallel_3d import * from .parallel_sequence import * from .moe import * from .utils import * from .vanilla import * from .wrapper import *
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from torch import distributed as dist from colossalai.communication import ring_forward from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.nn.layer.parallel_sequence._utils import _cal...
from ._operation import RingQK, RingAV from .layers import TransformerSelfAttentionRing __all__ = ['TransformerSelfAttentionRing', 'RingAV', 'RingQK']
#!/usr/bin/env python # -*- encoding: utf-8 -*- import math import colossalai import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.nn.layer.parallel...
#!/usr/bin/env python # -*- encoding: utf-8 -*- def _calc_incoming_device_range(i, rank, world_size, sub_seq_length): device_of_incoming_k = (rank - i - 1) % world_size start_idx = sub_seq_length * device_of_incoming_k end_idx = sub_seq_length * (device_of_incoming_k + 1) return start_idx, end_idx d...
import torch.nn as nn import torch.distributed as dist from typing import List, Tuple, Union from colossalai.context import ParallelMode from colossalai.core import global_context as gpc class PipelineSharedModuleWrapper: def __init__(self, pipeline_ranks: Union[List[int], Tuple[int]]) -> None: assert len...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.nn as nn from colossalai.builder import build_layer from colossalai.registry import LAYERS @LAYERS.register_module class LambdaWrapper(nn.Module): """Wrap a function to nn.Module, which takes a config of layers and can fully access them. Args: ...
from .lambda_wrapper import LambdaWrapper from .pipeline_wrapper import PipelineSharedModuleWrapper __all__ = ['LambdaWrapper', 'PipelineSharedModuleWrapper']
from typing import Any, Tuple import torch import torch.distributed as dist from colossalai.communication.collective import (all_gather, all_reduce, reduce_scatter) from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.utils import get_current_devic...
from ._operation import reduce_by_batch_2p5d, split_batch_2p5d from .layers import (Classifier2p5D, Embedding2p5D, LayerNorm2p5D, Linear2p5D, PatchEmbedding2p5D, VocabParallelClassifier2p5D, VocabParallelEmbedding2p5D) __all__ = [ 'split_batch_2p5d', 'reduce_by_batch_2p5d', 'Linear2p5D', 'Laye...
import math from collections import OrderedDict from typing import Callable import torch import torch.nn as nn import torch.nn.functional as F from colossalai.communication import broadcast from colossalai.context import ParallelMode, seed from colossalai.core import global_context as gpc from colossalai.global_variab...
from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.global_variables import tensor_parallel_env as env def get_tesseract_dim_dep_from_env(): try: tesseract_dim = env.tesseract_dim tesseract_dep = env.tesseract_dep asse...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Optional, Tuple import torch from colossalai.communication import (all_gather, all_reduce, broadcast, reduce, reduce_scatter) from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from torch import...
from ._operation import reduce_by_batch_3d, split_batch_3d, split_tensor_3d from .layers import (Classifier3D, Embedding3D, LayerNorm3D, Linear3D, PatchEmbedding3D, VocabParallelClassifier3D, VocabParallelEmbedding3D) __all__ = [ 'reduce_by_batch_3d', 'split_tensor_3d', 'split_batch_3d', 'Line...
import math from collections import OrderedDict from typing import Callable import torch import torch.nn as nn import torch.nn.functional as F from colossalai.communication import all_reduce, broadcast from colossalai.constants import INPUT_GROUP_3D, WEIGHT_GROUP_3D from colossalai.context import ParallelMode, seed fr...
from colossalai.constants import INPUT_GROUP_3D, WEIGHT_GROUP_3D, OUTPUT_GROUP_3D from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.global_variables import tensor_parallel_env as env from torch import Tensor def get_depth_from_env() -> int: ...
from .common import (ACT2FN, CheckpointModule, _ntuple, divide, get_tensor_parallel_mode, set_tensor_parallel_attribute_by_partition, set_tensor_parallel_attribute_by_size, to_2tuple) __all__ = [ 'CheckpointModule', 'divide', 'ACT2FN', 'set_tensor_parallel_attribute_by_size', 'set_tensor_p...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import collections.abc from itertools import repeat import numpy as np import torch from colossalai.constants import IS_TENSOR_PARALLEL, NUM_PARTITIONS from colossalai.global_variables import tensor_parallel_env as env from colossalai.utils import checkpoint from torch ...
import torch try: import fused_mix_prec_layer_norm_cuda except: fused_mix_prec_layer_norm_cuda = None class FusedLayerNormAffineFunction1D(torch.autograd.Function): r"""Layernorm Args: input: input matrix. weight: weight matrix. bias: bias matrix. normalized_shape: in...
from .layers import (Classifier1D, Dropout1D, Embedding1D, LayerNorm1D, Linear1D, Linear1D_Col, Linear1D_Row, PatchEmbedding1D, VocabParallelClassifier1D, VocabParallelEmbedding1D) __all__ = [ 'Linear1D', 'Linear1D_Col', 'Linear1D_Row', 'Embedding1D', 'Dropout1D', 'Classifier1D', 'VocabParalle...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import math from collections import OrderedDict from typing import Callable, Tuple import torch import torch.nn.functional as F from colossalai.communication import broadcast from colossalai.context import ParallelMode, seed from colossalai.core import global_context as...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch import torch.distributed as dist from colossalai.core import global_context as gpc from colossalai.global_variables import tensor_parallel_env as env from ..utils import divide def set_parallel_input(input_parallel: bool): env.parallel_input_1d = inpu...
from typing import Any, Optional, Tuple import torch import torch.distributed as dist from colossalai.communication.collective import (all_gather, all_reduce, reduce, reduce_scatter) from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.utils import...
from ._operation import reduce_by_batch_2d, split_batch_2d from .layers import (Classifier2D, Embedding2D, LayerNorm2D, Linear2D, PatchEmbedding2D, VocabParallelClassifier2D, VocabParallelEmbedding2D) __all__ = [ 'split_batch_2d', 'reduce_by_batch_2d', 'Linear2D', 'LayerNorm2D', 'Classifier2D'...
import math from collections import OrderedDict from typing import Callable import torch import torch.nn as nn import torch.nn.functional as F from colossalai.communication import broadcast from colossalai.context import ParallelMode, seed from colossalai.core import global_context as gpc from colossalai.global_variab...
from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.global_variables import tensor_parallel_env as env def get_summa_dim_from_env() -> int: try: summa_dim = env.summa_dim assert summa_dim > 0, 'SUMMA_DIM must be larger than ze...
import math from typing import Callable from colossalai.utils import get_current_device from torch import dtype, nn from ... import init as init from ..parallel_1d import Embedding1D, PatchEmbedding1D, VocabParallelEmbedding1D from ..parallel_2d import Embedding2D, PatchEmbedding2D, VocabParallelEmbedding2D from ..pa...
import math import inspect from typing import Callable from colossalai.utils import get_current_device from torch import dtype, nn from ... import init as init from ..parallel_1d import * from ..parallel_2d import * from ..parallel_2p5d import * from ..parallel_3d import * from ..utils import get_tensor_parallel_mode...
from ._utils import partition_batch from .dropout import Dropout from .embedding import Embedding, PatchEmbedding from .linear import Classifier, Linear from .normalization import LayerNorm __all__ = ['Linear', 'Classifier', 'Embedding', 'PatchEmbedding', 'LayerNorm', 'Dropout', 'partition_batch']
import torch.nn as nn from colossalai.context import ParallelMode, seed from ..parallel_1d import * from ..utils import get_tensor_parallel_mode from ._utils import ColossalaiModule class Dropout(ColossalaiModule): """Dropout layer of colossalai. Args: p (float, optional): probability of an element ...
from colossalai.utils import get_current_device from torch import nn from ..parallel_1d import LayerNorm1D from ..parallel_2d import LayerNorm2D from ..parallel_2p5d import LayerNorm2p5D from ..parallel_3d import LayerNorm3D from ..utils import get_tensor_parallel_mode from ..vanilla import VanillaLayerNorm from ._uti...
import torch.nn as nn from torch import Tensor from ..parallel_2d._operation import split_batch_2d from ..parallel_2p5d._operation import split_batch_2p5d from ..parallel_3d._operation import split_batch_3d from ..utils import get_tensor_parallel_mode _parallel_split_batch = {'2d': split_batch_2d, '2.5d': split_batch...
from .layers import (DropPath, VanillaClassifier, VanillaLayerNorm, VanillaPatchEmbedding, WrappedDropout, WrappedDropPath) __all__ = [ "VanillaLayerNorm", "VanillaPatchEmbedding", "VanillaClassifier", "DropPath", "WrappedDropout", "WrappedDropPath" ]
import math from typing import Callable import torch import torch.nn.functional as F from colossalai.context import seed from colossalai.nn import init as init from colossalai.registry import LAYERS from colossalai.utils.cuda import get_current_device from torch import Tensor from torch import nn as nn from ..utils i...
import torch import torch.distributed as dist from torch import Tensor from typing import Any, Tuple, Optional from torch.distributed import ProcessGroup COL_MOE_KERNEL_FLAG = False try: import colossal_moe_cuda COL_MOE_KERNEL_FLAG = True except ImportError: print("If you want to activate cuda mode for Mo...
from .experts import Experts, FFNExperts, TPExperts from .layers import MoeLayer, Top1Router, Top2Router, MoeModule from .utils import NormalNoiseGenerator, UniformNoiseGenerator, build_ffn_experts __all__ = [ 'Experts', 'FFNExperts', 'TPExperts', 'Top1Router', 'Top2Router', 'MoeLayer', 'NormalNoiseGenerator', ...
import torch import torch.nn.functional as F from colossalai.utils import get_current_device from colossalai.context.moe_context import MOE_CONTEXT from .experts import FFNExperts, TPExperts class ForceFP32Parameter(torch.nn.Parameter): def half(self, memory_format=None): return self.data class NormalN...
import math import torch import torch.nn as nn from colossalai.context import ParallelMode, seed from colossalai.utils import get_current_device from colossalai.context.moe_context import MOE_CONTEXT from colossalai.zero.init_ctx import no_shard_zero_decrator from typing import Type class MoeExperts(nn.Module): ...
import functools import math import torch import torch.nn as nn import torch.nn.functional as F import torch.distributed as dist from colossalai.context.moe_context import MOE_CONTEXT from colossalai.utils import get_current_device from ._operation import COL_MOE_KERNEL_FLAG, AllToAll, AllGather, ReduceScatter, MoeDis...
from .model_from_config import ModelFromConfig __all__ = ['ModelFromConfig']
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod import torch.nn as nn from colossalai.builder import build_layer class ModelFromConfig(nn.Module, ABC): def __init__(self): super(ModelFromConfig, self).__init__() self.layers = nn.ModuleList() self.lay...
from torch import nn from ._utils import calc_acc from .accuracy_2d import Accuracy2D from .accuracy_2p5d import Accuracy2p5D from .accuracy_3d import Accuracy3D from colossalai.nn.layer.utils import get_tensor_parallel_mode _parallel_accuracy = { '2d': Accuracy2D, '2.5d': Accuracy2p5D, '3d': Accuracy3D, ...
import torch from colossalai.nn.layer.parallel_2d import reduce_by_batch_2d, split_batch_2d from torch import nn from ._utils import calc_acc class Accuracy2D(nn.Module): """Accuracy for 2D parallelism """ def __init__(self): super().__init__() def forward(self, logits, targets): """...
import torch from colossalai.constants import INPUT_GROUP_3D, WEIGHT_GROUP_3D from colossalai.nn.layer.parallel_3d import reduce_by_batch_3d, split_tensor_3d from colossalai.nn.layer.parallel_3d._utils import get_parallel_mode_from_env from torch import nn from ._utils import calc_acc class Accuracy3D(nn.Module): ...
import torch from colossalai.nn.layer.parallel_2p5d import reduce_by_batch_2p5d, split_batch_2p5d from torch import nn from ._utils import calc_acc class Accuracy2p5D(nn.Module): """Accuracy for 2p5D parallelism """ def __init__(self): super().__init__() def forward(self, logits, targets): ...
import torch def calc_acc(logits, targets): preds = torch.argmax(logits, dim=-1) correct = torch.sum(targets == preds) return correct
import torch import gc import psutil from collections import namedtuple from colossalai.context.parallel_mode import ParallelMode from colossalai.utils import get_current_device from colossalai.core import global_context as gpc from colossalai.context.parallel_mode import ParallelMode from colossalai.logging import ge...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import time from typing import Tuple from .cuda import synchronize class Timer: """A timer object which helps to log the execution times, and provides different tools to assess the times. """ def __init__(self): self._started = False self._s...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from torch.utils.checkpoint import check_backward_validity, detach_variable from colossalai.context.random import get_states, get_current_mode, set_seed_states, set_mode, sync_states from .cuda import get_current_device def copy_to_device(obj, device): ...
from .cuda import empty_cache, get_current_device, set_to_cuda, synchronize from .activation_checkpoint import checkpoint from .checkpointing import load_checkpoint, save_checkpoint from .common import (clip_grad_norm_fp32, conditional_context, copy_tensor_parallel_attributes, count_zeros_fp32, ens...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import os import random import socket from pathlib import Path from typing import Callable, List, Union import functools import torch from torch._six import inf from torch.nn.parameter import Parameter try: import colossal_C except: pass from contextlib import c...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch def set_to_cuda(models): """Send model to gpu. :param models: nn.module or a list of module """ if isinstance(models, list) and len(models) > 1: ret = [] for model in models: ret.append(model.to(get_current_devi...
import torch.nn as nn import torch.distributed as dist from colossalai.core import global_context as gpc from colossalai.context.moe_context import MOE_CONTEXT from colossalai.context import ParallelMode from .common import is_using_ddp from typing import Dict, List def get_moe_epsize_param_dict(model: nn.Module) -> ...
from collections import OrderedDict from itertools import chain import torch import torch.distributed as dist from colossalai.communication.collective import scatter_object_list from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc try: from torch.nn.modules.mo...
from colossalai.context.singleton_meta import SingletonMeta import torch from typing import Tuple, Optional from colossalai.logging import DistributedLogger def colo_model_optimizer_usage(optim) -> Tuple[int, int]: """Trace the optimizer memory usage Args: optim (ShardedOptimV2): an instance of Shard...
from .memory_monitor import AsyncMemoryMonitor, SyncCudaMemoryMonitor from .memstats_collector import MemStatsCollector __all__ = ['AsyncMemoryMonitor', 'SyncCudaMemoryMonitor', 'MemStatsCollector']
from abc import abstractmethod from concurrent.futures import ThreadPoolExecutor from time import sleep, time import json import torch from colossalai.utils.memory import colo_device_memory_used from colossalai.utils import get_current_device class MemoryMonitor: """Base class for all types of memory monitor. ...
from colossalai.utils.memory_tracer.model_data_memtracer import GLOBAL_MODEL_DATA_TRACER from colossalai.utils.memory import colo_device_memory_used from colossalai.utils.memory_tracer import SyncCudaMemoryMonitor import torch import time from typing import List class MemStatsCollector: """ A Memory statistic...
from .multi_tensor_apply import MultiTensorApply multi_tensor_applier = MultiTensorApply(2048 * 32)
# modified from https://github.com/NVIDIA/apex/blob/master/apex/multi_tensor_apply/multi_tensor_apply.py class MultiTensorApply(object): """ Apply an operation to a list of tensors efficiently. Args: chunk_size (int): Size of a chunk. """ available = False warned = False def __i...
import torch.nn as nn from typing import List from colossalai.engine import BaseGradientHandler from typing import Iterable from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler from ._gradient_accumulation import GradAccumDataloader, GradAccumOptimizer, GradAccumLrSchedulerByStep, GradAcc...
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.nn as nn from torch import Tensor from typing import Iterable, Any from colossalai.nn.optimizer import ColossalaiOptimizer from torch.nn.parallel.distributed import DistributedDataParallel from torch.optim import Optimizer from torch.optim.lr_scheduler impor...
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod class BaseSampler(ABC): def __init__(self, dataset, batch_size): self.dataset = dataset self.batch_size = batch_size @abstractmethod def __len__(self): pass @abstractmethod def __iter__(...
from .base_sampler import BaseSampler from .data_parallel_sampler import DataParallelSampler, get_dataloader __all__ = ['BaseSampler', 'DataParallelSampler', 'get_dataloader']
#!/usr/bin/env python # -*- encoding: utf-8 -*- # adpated from torch.utils.data.DistributedSampler import math import random import numpy as np from typing import TypeVar, Iterator import torch from torch.utils.data import Sampler, Dataset, DataLoader from colossalai.context.parallel_mode import ParallelMode from co...
import gc import inspect import torch import torch.nn as nn from typing import Optional from collections import defaultdict LINE_WIDTH = 108 LINE = '-' * LINE_WIDTH + '\n' class TensorDetector(): def __init__(self, show_info: bool = True, log: str = None, inclu...
from .tensor_detector import TensorDetector
from pathlib import Path from torch.autograd.profiler import profile from .prof_utils import BaseProfiler, _format_time, _format_memory, _format_bandwidth from typing import List def _get_size(dtype: str): if dtype == "fp16": return 2 elif dtype == "fp32": return 4 else: raise NotI...
import inspect from pathlib import Path from functools import partial import torch from torch.autograd.profiler import profile import torch.distributed as dist from torch.distributed import ReduceOp from colossalai.utils import get_current_device from .prof_utils import BaseProfiler, _format_time, _format_memory, _form...
from .comm_profiler import CommProfiler from .pcie_profiler import PcieProfiler from .prof_utils import ProfilerContext, BaseProfiler from .mem_profiler import MemProfiler __all__ = ['BaseProfiler', 'CommProfiler', 'PcieProfiler', 'MemProfiler', 'ProfilerContext']
from pathlib import Path from typing import Union from colossalai.engine import Engine from torch.utils.tensorboard import SummaryWriter from colossalai.engine.ophooks import MemTracerOpHook from colossalai.utils.profiler import BaseProfiler class MemProfiler(BaseProfiler): """Wraper of MemOpHook, used to show GP...
from abc import ABC, abstractmethod from pathlib import Path from typing import Union, List from colossalai.core import global_context as gpc # copied from high version pytorch to support low version def _format_time(time_us): """Defines how to format time in FunctionEvent""" US_IN_SECOND = 1000.0 * 1000.0 ...
from .comparison import assert_equal, assert_not_equal, assert_close, assert_close_loose, assert_equal_in_group from .utils import parameterize, rerun_on_exception, rerun_if_address_is_in_use __all__ = [ 'assert_equal', 'assert_not_equal', 'assert_close', 'assert_close_loose', 'assert_equal_in_group', 'parameteriz...
import torch import torch.distributed as dist from torch import Tensor from torch.distributed import ProcessGroup def assert_equal(a: Tensor, b: Tensor): assert torch.all(a == b), f'expected a and b to be equal but they are not, {a} vs {b}' def assert_not_equal(a: Tensor, b: Tensor): assert not torch.all(a =...
import re import torch from typing import Callable, List, Any from functools import partial from inspect import signature from packaging import version def parameterize(argument: str, values: List[Any]) -> Callable: """ This function is to simulate the same behavior as pytest.mark.parameterize. As we want...
from typing import Tuple import torch import torch.nn as nn from colossalai.logging import get_dist_logger from colossalai.zero.sharded_model.sharded_model_v2 import ShardedModelV2 from colossalai.zero.sharded_optim.sharded_optim_v2 import ShardedOptimizerV2 def convert_to_zero_v2(model: nn.Module, optimizer: torch....
from .init_context import ZeroInitContext, no_shard_zero_context, no_shard_zero_decrator __all__ = ['ZeroInitContext', 'no_shard_zero_context', 'no_shard_zero_decrator']
import contextlib import functools from typing import Optional import torch import torch.nn as nn import torch.distributed as dist from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.context.singleton_meta import SingletonMeta from colossalai.logg...
from .sharded_optim_v2 import ShardedOptimizerV2 __all__ = ['ShardedOptimizerV2']
from enum import Enum from os import stat from typing import Dict, Optional, Tuple import torch import torch.distributed as dist import torch.nn as nn from colossalai.amp.naive_amp.grad_scaler import DynamicGradScaler from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context ...
import math import torch from torch._six import inf from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from colossalai.core import global_context as gpc from colossalai.context import ParallelMode from colossalai.utils import is_model_parallel_parameter import torch.distributed as dist def flat...
from typing import Optional import torch import torch.distributed as dist from colossalai.registry import OPHOOKS from colossalai.utils import get_current_device from colossalai.utils.memory_tracer.memstats_collector import MemStatsCollector from colossalai.zero.shard_utils import BaseShardStrategy from colossalai.z...
from .zero_hook import ZeroHook __all__ = ['ZeroHook']
import torch from colossalai.zero.sharded_param.tensorful_state import StatefulTensor, TensorState from typing import Optional class ShardedTensor(StatefulTensor): def __init__(self, tensor: torch.Tensor, state: TensorState = TensorState.HOLD) -> None: r""" A tensor sharded in multiple processes....
import torch from colossalai.zero.sharded_param import ShardedTensor from typing import Optional, Tuple from colossalai.zero.sharded_param.tensor_utils import colo_tensor_mem_usage from .tensorful_state import StatefulTensor, TensorState from typing import List EMPTY_TENSOR_DICT = {} def get_empty_tensor(device: tor...
from colossalai.zero.sharded_param.sharded_tensor import ShardedTensor from colossalai.zero.sharded_param.sharded_param import ShardedParamV2 from colossalai.zero.sharded_param.tensor_utils import (colo_model_data_tensor_move, colo_model_data_tensor_move_inline, c...
import torch from colossalai.zero.sharded_param.tensorful_state import StatefulTensor from typing import Union, Tuple def colo_tensor_mem_usage(tensor: Union[torch.Tensor, StatefulTensor]) -> Tuple[int, int]: if issubclass(type(tensor), StatefulTensor): t = tensor.payload elif isinstance(tensor, torch...
from enum import Enum from typing import Optional import torch class TensorState(Enum): FREE = 0 HOLD = 1 HOLD_AFTER_FWD = 2 HOLD_AFTER_BWD = 3 COMPUTE = 4 class StatefulTensor(object): """A Structure stores a Torch Tensor and labeled states. Inspired from the paper: PatrickStar: Pa...
from typing import List, Optional import torch import torch.distributed as dist from colossalai.utils import get_current_device from colossalai.zero.sharded_param.sharded_tensor import ShardedTensor from torch._utils import _flatten_dense_tensors as flatten from .tensor_shard_strategy import TensorShardStrategy cla...
from .base_shard_strategy import BaseShardStrategy from .bucket_tensor_shard_strategy import BucketTensorShardStrategy from .tensor_shard_strategy import TensorShardStrategy __all__ = ['BaseShardStrategy', 'TensorShardStrategy', 'BucketTensorShardStrategy']
from abc import ABC, abstractmethod from typing import List, Optional import torch.distributed as dist from colossalai.zero.sharded_param.sharded_tensor import ShardedTensor class BaseShardStrategy(ABC): def __init__(self) -> None: """Abstract Shard Strategy. Use to shard a tensors on multiple GPUs. ...
import torch import torch.nn.functional as F from typing import Tuple def get_shard(tensor: torch.Tensor, rank: int, world_size: int) -> Tuple[torch.Tensor, int]: """Return the local shard of a full tensor.""" # Shard using torch.chunk to match all-gather/reduce-scatter. chunks = list(torch.flatten(tensor...
from typing import List, Optional import torch import torch.distributed as dist from colossalai.utils import get_current_device from colossalai.zero.sharded_param.tensor_utils import colo_model_data_tensor_move_inline from colossalai.zero.shard_utils import BaseShardStrategy from colossalai.zero.shard_utils.commons im...