code stringlengths 197 38.4k | apis list | extract_api stringlengths 137 20.3k |
|---|---|---|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from colossalai.logging import get_dist_logger
from colossalai.registry import HOOKS
from colossalai.trainer.hooks import BaseHook
from colossalai.utils.checkpointing import save_checkpoint
from ._lr_scheduler_hook import LRSchedulerHook
@HOOKS.register_module
class S... | [
"colossalai.utils.checkpointing.save_checkpoint",
"colossalai.logging.get_dist_logger"
] | [((1160, 1177), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (1175, 1177), False, 'from colossalai.logging import get_dist_logger\n'), ((1721, 1849), 'colossalai.utils.checkpointing.save_checkpoint', 'save_checkpoint', (['self.checkpoint_dir', 'trainer.cur_epoch', 'trainer.engine.model', '... |
import pytest
import colossalai
from colossalai.utils.cuda import get_current_device
from colossalai.utils.memory import colo_set_process_memory_fraction, colo_device_memory_capacity
from colossalai.utils import free_port
from functools import partial
import torch.multiprocessing as mp
def _run_colo_set_process_mem... | [
"colossalai.launch",
"colossalai.utils.memory.colo_set_process_memory_fraction",
"colossalai.utils.free_port",
"colossalai.utils.cuda.get_current_device"
] | [((813, 858), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[4, 5]'], {}), "('world_size', [4, 5])\n", (836, 858), False, 'import pytest\n'), ((434, 471), 'colossalai.utils.memory.colo_set_process_memory_fraction', 'colo_set_process_memory_fraction', (['(0.5)'], {}), '(0.5)\n', (466, 471)... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.initialize import launch, get_default_parser
from checks_3d.check_layer_3d import *
from checks_3d.check_operation_3d import *
from colossalai.logging import get_dist_logger
from functools impo... | [
"colossalai.initialize.launch",
"colossalai.logging.get_dist_logger"
] | [((596, 613), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (611, 613), False, 'from colossalai.logging import get_dist_logger\n'), ((1527, 1632), 'colossalai.initialize.launch', 'launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port... |
import colossalai
import torch
import pytest
import torch.nn as nn
import torch.multiprocessing as mp
from colossalai.tensor import ColoTensor
from colossalai.tensor import dist_spec
from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, DistSpecManager
from colossalai.context import ParallelMode
fro... | [
"colossalai.testing.rerun_if_address_is_in_use",
"colossalai.tensor.ParallelAction",
"colossalai.core.global_context.get_world_size",
"colossalai.launch",
"colossalai.tensor.DistSpecManager.no_grad",
"colossalai.utils.free_port",
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global... | [((3540, 3585), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 4]'], {}), "('world_size', [1, 4])\n", (3563, 3585), False, 'import pytest\n'), ((3587, 3615), 'colossalai.testing.rerun_if_address_is_in_use', 'rerun_if_address_is_in_use', ([], {}), '()\n', (3613, 3615), False, 'from colo... |
import torch
import torch.distributed as dist
from torch.distributed import distributed_c10d
from colossalai.gemini.tensor.stateful_tensor import StatefulTensorV2
def _convert_tensor(tensor: torch.Tensor) -> StatefulTensorV2:
if not tensor.is_contiguous():
raise ValueError('input tensor is not a contiguo... | [
"colossalai.gemini.tensor.stateful_tensor.StatefulTensorV2"
] | [((343, 367), 'colossalai.gemini.tensor.stateful_tensor.StatefulTensorV2', 'StatefulTensorV2', (['tensor'], {}), '(tensor)\n', (359, 367), False, 'from colossalai.gemini.tensor.stateful_tensor import StatefulTensorV2\n')] |
import torch
from colossalai.zero.sharded_param import ShardedParamV2
from colossalai.utils import get_current_device
from typing import List
class BucketizedTensorCopy(object):
def __init__(
self,
chunk_size: int,
):
r"""
torch.nn.Parameter CPU (fp32) -> ShardedParam GPU (fp... | [
"colossalai.utils.get_current_device"
] | [((580, 601), 'torch.device', 'torch.device', (['"""cpu:0"""'], {}), "('cpu:0')\n", (592, 601), False, 'import torch\n'), ((798, 818), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (816, 818), False, 'from colossalai.utils import get_current_device\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
import torch.distributed as dist
from torch import Tensor
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device
def all_gather(tensor: Tensor, dim: int,
... | [
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.get_group"
] | [((873, 906), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['parallel_mode'], {}), '(parallel_mode)\n', (891, 906), True, 'from colossalai.core import global_context as gpc\n'), ((2259, 2292), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['parallel_mode'], {}), '(pa... |
import os
import colossalai
import torch
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.logging import disable_existing_loggers, get_dist_logger
from colossalai.utils import get_dataloader
from colossalai.nn.lr_scheduler import LinearWarmupLR
from... | [
"colossalai.launch_from_torch",
"colossalai.logging.disable_existing_loggers",
"colossalai.nn.metric.Accuracy",
"colossalai.trainer.hooks.LogMetricByEpochHook",
"colossalai.trainer.Trainer",
"colossalai.initialize",
"colossalai.nn.lr_scheduler.LinearWarmupLR",
"colossalai.trainer.hooks.LossHook",
"c... | [((573, 604), 'colossalai.get_default_parser', 'colossalai.get_default_parser', ([], {}), '()\n', (602, 604), False, 'import colossalai\n'), ((642, 668), 'colossalai.logging.disable_existing_loggers', 'disable_existing_loggers', ([], {}), '()\n', (666, 668), False, 'from colossalai.logging import disable_existing_logge... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from copy import deepcopy
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.utils import free_port
from colossalai.zero.shard_utils.tensor_shard_strategy import \
TensorShardStrategy
from co... | [
"colossalai.launch",
"colossalai.zero.shard_utils.tensor_shard_strategy.TensorShardStrategy",
"colossalai.utils.free_port"
] | [((519, 635), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (536, 635), False,... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from colossalai.initialize import init_dist
from colossalai.logging import get_global_dist_logger
from test_layer import *
CONFIG = dict(
parallel=dict(
pipeline=1,
tensor=dict(mode='sequence', size=4)
)
)
def check_layer():
check_selfatten... | [
"colossalai.logging.get_global_dist_logger",
"colossalai.initialize.init_dist"
] | [((367, 384), 'colossalai.initialize.init_dist', 'init_dist', (['CONFIG'], {}), '(CONFIG)\n', (376, 384), False, 'from colossalai.initialize import init_dist\n'), ((398, 422), 'colossalai.logging.get_global_dist_logger', 'get_global_dist_logger', ([], {}), '()\n', (420, 422), False, 'from colossalai.logging import get_... |
import torch.nn as nn
from colossalai.context import ParallelMode, seed
from ..parallel_1d import *
from ..utils import get_tensor_parallel_mode
class Dropout(nn.Module):
"""
Dropout layer of colossalai
:param p: dropout rate, defaults to 0.5
:type p: float, optional
:param inplace:... | [
"colossalai.context.seed"
] | [((728, 750), 'torch.nn.Dropout', 'nn.Dropout', (['p', 'inplace'], {}), '(p, inplace)\n', (738, 750), True, 'import torch.nn as nn\n'), ((904, 929), 'colossalai.context.seed', 'seed', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (908, 929), False, 'from colossalai.context import ParallelMode, seed\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.testing import parameterize
from colossalai.utils import free_port
from colossalai.utils.cuda import get_current_device
from colossalai.utils.me... | [
"colossalai.testing.parameterize",
"colossalai.launch",
"colossalai.utils.free_port",
"colossalai.utils.cuda.get_current_device"
] | [((739, 829), 'colossalai.testing.parameterize', 'parameterize', (['"""shard_strategy_class"""', '[TensorShardStrategy, BucketTensorShardStrategy]'], {}), "('shard_strategy_class', [TensorShardStrategy,\n BucketTensorShardStrategy])\n", (751, 829), False, 'from colossalai.testing import parameterize\n'), ((2161, 220... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
try:
import apex.amp as apex_amp
except:
print('apex is required for mixed precision training')
try:
import torch.cuda.amp as torch_amp
except:
print('PyTorch amp is not supported with the current PyTorch version')
from colossalai.context import Parallel... | [
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.is_initialized"
] | [((1614, 1653), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (1632, 1653), True, 'from colossalai.core import global_context as gpc\n'), ((1658, 1697), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMo... |
import torch
from typing import Union, Optional
from colossalai.tensor import ColoTensor
GeneralTensor = Union[ColoTensor, torch.Tensor]
Number = Union[int, float]
def convert_to_colo_tensor(tensor: Optional[GeneralTensor]) -> Optional[ColoTensor]:
if tensor is not None and not isinstance(tensor, ColoTensor):
... | [
"colossalai.tensor.ColoTensor.from_torch_tensor"
] | [((335, 371), 'colossalai.tensor.ColoTensor.from_torch_tensor', 'ColoTensor.from_torch_tensor', (['tensor'], {}), '(tensor)\n', (363, 371), False, 'from colossalai.tensor import ColoTensor\n')] |
from colossalai.context.parallel_mode import ParallelMode
import math
import torch.nn as nn
import torch
from .deepnet_configs import DeepNetTransformerLayer1D, FusedDeepNetTransformerLayer1D
from .embed import HiddenParallelEmbedding, HiddenParallelLMHead1D, VocabParallelEmbedding, VocabParallelLMHead1D
from col... | [
"colossalai.core.global_context.get_world_size",
"colossalai.kernel.LayerNorm",
"colossalai.nn.layer.wrapper.PipelineSharedModuleWrapper",
"colossalai.core.global_context.get_global_rank",
"colossalai.builder.pipeline.partition_uniform",
"colossalai.logging.get_dist_logger",
"colossalai.core.global_cont... | [((6450, 6473), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (6467, 6473), False, 'import inspect\n'), ((6630, 6650), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6642, 6650), False, 'import torch\n'), ((6677, 6694), 'colossalai.logging.get_dist_logger', 'get_dist_logger... |
import os
import copy
import torch
import colossalai
from colossalai.core import global_context as gpc
from colossalai.logging import get_dist_logger
from colossalai.trainer import Trainer, hooks
from colossalai.nn.lr_scheduler import LinearWarmupLR
from torch.nn.modules.loss import *
from torch.nn.modules.loss import... | [
"colossalai.trainer.hooks.TensorboardHook",
"colossalai.launch_from_openmpi",
"colossalai.trainer.hooks.SaveCheckpointHook",
"colossalai.launch_from_slurm",
"colossalai.nn.lr_scheduler.LinearWarmupLR",
"colossalai.trainer.hooks.LogMetricByStepHook",
"colossalai.context.config.Config.from_file",
"colos... | [((1438, 1460), 'copy.deepcopy', 'copy.deepcopy', (['_config'], {}), '(_config)\n', (1451, 1460), False, 'import copy\n'), ((1493, 1516), 'colossalai.core.global_context.load_config', 'gpc.load_config', (['config'], {}), '(config)\n', (1508, 1516), True, 'from colossalai.core import global_context as gpc\n'), ((2294, 2... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.communication import (recv_backward, recv_forward, recv_obj_meta, send_backward,
send_backw... | [
"colossalai.communication.send_backward_recv_forward",
"colossalai.core.global_context.is_first_rank",
"colossalai.core.global_context.get_next_global_rank",
"colossalai.communication.send_backward",
"colossalai.communication.send_forward",
"colossalai.core.global_context.is_last_rank",
"colossalai.util... | [((3700, 3803), 'colossalai.testing.rerun_on_exception', 'rerun_on_exception', ([], {'exception_type': 'mp.ProcessRaisedException', 'pattern': '""".*Address already in use.*"""'}), "(exception_type=mp.ProcessRaisedException, pattern=\n '.*Address already in use.*')\n", (3718, 3803), False, 'from colossalai.testing i... |
import torch
import torch.distributed as dist
from .parallel_mode import ParallelMode
from typing import Tuple
def _check_sanity():
from colossalai.core import global_context as gpc
if gpc.tensor_parallel_size > 1 or gpc.pipeline_parallel_size > 1:
raise NotImplementedError("Moe is not compat... | [
"colossalai.core.global_context.config.get",
"colossalai.core.global_context.get_group"
] | [((1573, 1588), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1586, 1588), True, 'import torch.distributed as dist\n'), ((3364, 3389), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3387, 3389), False, 'import torch\n'), ((3456, 3477), 'torch.distributed.get_world_size', 'd... |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.cuda.amp import autocast
from colossalai.global_variables import moe_env
from colossalai.context import ParallelMode, seed
from colossalai.utils import get_current_device
from ._operation import AllToAll
class Norma... | [
"colossalai.global_variables.moe_env.add_loss",
"colossalai.context.seed",
"colossalai.utils.get_current_device"
] | [((1881, 1931), 'torch.chunk', 'torch.chunk', (['inputs', 'self.num_local_experts'], {'dim': '(0)'}), '(inputs, self.num_local_experts, dim=0)\n', (1892, 1931), False, 'import torch\n'), ((2099, 2130), 'torch.cat', 'torch.cat', (['expert_output'], {'dim': '(0)'}), '(expert_output, dim=0)\n', (2108, 2130), False, 'impor... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.core import global_context as gpc
from colossalai.initialize import launch, get_default_parser
from checks_2d.check_layer_2d import check_linear, check_layernorm, check_attention, check_mlp, c... | [
"colossalai.core.global_context.destroy",
"colossalai.initialize.launch"
] | [((623, 633), 'checks_2d.check_operation_2d.check_AB', 'check_AB', ([], {}), '()\n', (631, 633), False, 'from checks_2d.check_operation_2d import check_AB, check_ABT, check_ATB\n'), ((638, 649), 'checks_2d.check_operation_2d.check_ABT', 'check_ABT', ([], {}), '()\n', (647, 649), False, 'from checks_2d.check_operation_2... |
import torch
import torch.distributed as dist
from torch.nn import Parameter
import time
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.nn import Linear1D_Col, Linear1D_Row
from colossalai.utils import get_current_device, print_rank_0
... | [
"colossalai.utils.print_rank_0",
"colossalai.nn.Linear1D_Col",
"colossalai.nn.Linear1D_Row",
"colossalai.utils.get_current_device",
"colossalai.core.global_context.get_local_rank"
] | [((462, 482), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (480, 482), False, 'from colossalai.utils import get_current_device, print_rank_0\n'), ((586, 630), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_1D'], {}), '(ParallelMode.PARAL... |
from colossalai.utils.commons.singleton_meta import SingletonMeta
from colossalai.utils.memory_tracer.commons import col_tensor_mem_usage
import torch
class ModelDataTracer(metaclass=SingletonMeta):
"""
A singleton to trace model data usage during runtime.
We have to trigger our API (trace_tensor, detach_... | [
"colossalai.utils.memory_tracer.commons.col_tensor_mem_usage"
] | [((705, 728), 'colossalai.utils.memory_tracer.commons.col_tensor_mem_usage', 'col_tensor_mem_usage', (['t'], {}), '(t)\n', (725, 728), False, 'from colossalai.utils.memory_tracer.commons import col_tensor_mem_usage\n'), ((938, 961), 'colossalai.utils.memory_tracer.commons.col_tensor_mem_usage', 'col_tensor_mem_usage', ... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from copy import deepcopy
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.zero.in... | [
"colossalai.testing.rerun_if_address_is_in_use",
"colossalai.testing.parameterize",
"colossalai.launch",
"colossalai.zero.sharded_model.ShardedModelV2",
"colossalai.utils.free_port",
"colossalai.zero.sharded_model.utils.col_model_deepcopy"
] | [((671, 761), 'colossalai.testing.parameterize', 'parameterize', (['"""shard_strategy_class"""', '[TensorShardStrategy, BucketTensorShardStrategy]'], {}), "('shard_strategy_class', [TensorShardStrategy,\n BucketTensorShardStrategy])\n", (683, 761), False, 'from colossalai.testing import parameterize, rerun_if_addres... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
import torch.distributed as dist
from torch.distributed import ReduceOp
from torch import Tensor
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
def all_gather(tensor: Tensor,
dim: int,
... | [
"colossalai.core.global_context.get_cpu_group",
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_world_size"
] | [((1446, 1479), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['parallel_mode'], {}), '(parallel_mode)\n', (1464, 1479), True, 'from colossalai.core import global_context as gpc\n'), ((3817, 3850), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['parallel_mode'], {}), ... |
import colossalai
from colossalai.core import global_context as gpc
from colossalai.logging import get_dist_logger
from colossalai.trainer import Trainer, hooks
from colossalai.utils import get_dataloader, MultiTimer
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
from colossalai.engine.schedule import N... | [
"colossalai.trainer.hooks.TensorboardHook",
"colossalai.launch_from_torch",
"colossalai.utils.get_dataloader",
"colossalai.utils.MultiTimer",
"colossalai.trainer.hooks.LogMetricByEpochHook",
"colossalai.initialize",
"colossalai.trainer.hooks.SaveCheckpointHook",
"colossalai.trainer.hooks.LRSchedulerHo... | [((573, 590), 'augmentation.SimCLRTransform', 'SimCLRTransform', ([], {}), '()\n', (588, 590), False, 'from augmentation import SimCLRTransform\n'), ((611, 698), 'torchvision.datasets.CIFAR10', 'CIFAR10', ([], {'root': 'gpc.config.dataset.root', 'transform': 'augment', 'train': '(True)', 'download': '(True)'}), '(root=... |
from typing import List, Optional
import torch
import torch.distributed as dist
from colossalai.utils import get_current_device
from colossalai.zero.shard_utils import BaseShardStrategy
from colossalai.zero.sharded_model._zero3_utils import get_shard
from colossalai.zero.sharded_param.sharded_tensor import ShardedTens... | [
"colossalai.utils.get_current_device"
] | [((1426, 1460), 'torch.distributed.get_world_size', 'dist.get_world_size', (['process_group'], {}), '(process_group)\n', (1445, 1460), True, 'import torch.distributed as dist\n'), ((1476, 1504), 'torch.distributed.get_rank', 'dist.get_rank', (['process_group'], {}), '(process_group)\n', (1489, 1504), True, 'import torc... |
from common.utils import CONFIG, print_log
from torch.cuda import max_memory_allocated, reset_peak_memory_stats
from torch.distributed import get_rank
def init_w_col(builder):
import colossalai
from colossalai.core import global_context as gpc
from colossalai.logging import disable_existing_logger... | [
"colossalai.launch_from_torch",
"colossalai.logging.disable_existing_loggers",
"colossalai.zero.sharded_model.ShardedModelV2",
"colossalai.zero.sharded_model.ShardedModel",
"colossalai.zero.shard_utils.TensorShardStrategy",
"colossalai.core.global_context.config.zero.pop"
] | [((645, 671), 'colossalai.logging.disable_existing_loggers', 'disable_existing_loggers', ([], {}), '()\n', (669, 671), False, 'from colossalai.logging import disable_existing_loggers\n'), ((677, 720), 'colossalai.launch_from_torch', 'colossalai.launch_from_torch', ([], {'config': 'CONFIG'}), '(config=CONFIG)\n', (705, ... |
import colossalai
import click
import torch.multiprocessing as mp
from functools import partial
from typing import List, Dict
from colossalai.context import Config
from colossalai.context.random import reset_seeds
from colossalai.core import global_context as gpc
from colossalai.logging import disable_existing_logger... | [
"colossalai.cli.benchmark.utils.find_all_configs",
"colossalai.core.global_context.destroy",
"colossalai.logging.disable_existing_loggers",
"colossalai.launch",
"colossalai.context.random.reset_seeds",
"colossalai.core.global_context.get_global_rank",
"colossalai.utils.free_port",
"colossalai.utils.Mu... | [((846, 891), 'click.echo', 'click.echo', (['"""=== Benchmarking Parameters ==="""'], {}), "('=== Benchmarking Parameters ===')\n", (856, 891), False, 'import click\n'), ((958, 972), 'click.echo', 'click.echo', (['""""""'], {}), "('')\n", (968, 972), False, 'import click\n'), ((992, 1019), 'colossalai.cli.benchmark.uti... |
from enum import Enum
from typing import Dict, Optional
import torch
import torch.distributed as dist
import torch.nn as nn
from colossalai.amp.naive_amp._fp16_optimizer import DynamicGradScaler
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.... | [
"colossalai.zero.sharded_model._zero3_utils.cast_tensor_to_fp32",
"colossalai.amp.naive_amp._fp16_optimizer.DynamicGradScaler",
"colossalai.core.global_context.get_group"
] | [((2323, 2540), 'colossalai.amp.naive_amp._fp16_optimizer.DynamicGradScaler', 'DynamicGradScaler', ([], {'initial_scale': 'initial_scale', 'min_scale': 'min_scale', 'growth_factor': 'growth_factor', 'backoff_factor': 'backoff_factor', 'growth_interval': 'growth_interval', 'hysteresis': 'hysteresis', 'max_scale': 'max_s... |
import math
from typing import Callable, Optional
from colossalai.utils import get_current_device
from torch import dtype, nn
from ... import init as init
from ..parallel_1d import *
from ..parallel_2d import *
from ..parallel_2p5d import *
from ..parallel_3d import *
from ..utils import get_tensor_paralle... | [
"colossalai.utils.get_current_device"
] | [((4070, 4082), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (4079, 4082), False, 'import math\n'), ((1981, 2001), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1999, 2001), False, 'from colossalai.utils import get_current_device\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.initialize import launch
from colossalai.logging import get_dist_logger
from checks_seq.check_layer_seq import *
from functools import partial
from colossalai.utils import free_port
CONFIG = ... | [
"colossalai.logging.get_dist_logger",
"colossalai.utils.free_port",
"colossalai.initialize.launch"
] | [((535, 639), 'colossalai.initialize.launch', 'launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host='localhost',\n port=port, backend='nccl')\n", (541, 639), False, ... |
from typing import Any, Tuple
import torch
import torch.distributed as dist
from torch import Tensor
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device
from torch.cuda.amp import custom_bwd, custom_fwd
def matmu... | [
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_world_size",
"colossalai.utils.get_current_device",
"colossalai.core.global_context.is_initialized"
] | [((2401, 2438), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs=torch.float16)\n', (2411, 2438), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((7233, 7270), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float16'}), '(cast_inputs... |
import torch
import gc
import psutil
from collections import namedtuple
from colossalai.context.parallel_mode import ParallelMode
from colossalai.utils import get_current_device
from colossalai.core import global_context as gpc
from colossalai.context.parallel_mode import ParallelMode
from colossalai.logging import ge... | [
"colossalai.logging.get_dist_logger",
"colossalai.utils.get_current_device",
"colossalai.core.global_context.is_initialized"
] | [((713, 786), 'collections.namedtuple', 'namedtuple', (['"""ps_mem_info"""', "['total', 'free', 'cached', 'buffers', 'used']"], {}), "('ps_mem_info', ['total', 'free', 'cached', 'buffers', 'used'])\n", (723, 786), False, 'from collections import namedtuple\n'), ((2383, 2422), 'colossalai.core.global_context.is_initiali... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from abc import ABC, abstractmethod
import torch
from torch import Tensor
from typing import Iterable, Union, List, Callable
from .._base_engine import Engine
from colossalai.logging import get_dist_logger
from colossalai.utils import get_current_device
class BaseSch... | [
"colossalai.utils.get_current_device",
"colossalai.logging.get_dist_logger"
] | [((741, 758), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (756, 758), False, 'from colossalai.logging import get_dist_logger\n'), ((883, 907), 'torch.is_tensor', 'torch.is_tensor', (['element'], {}), '(element)\n', (898, 907), False, 'import torch\n'), ((1334, 1355), 'torch.is_tensor', 't... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
from functools import partial
from pathlib import Path
import pytest
import torch.cuda
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
import colossalai
from colossalai.builder import build_dataset
f... | [
"colossalai.builder.build_dataset",
"colossalai.core.global_context.set_seed",
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global_context.get_group",
"colossalai.init_dist"
] | [((1314, 1347), 'colossalai.init_dist', 'colossalai.init_dist', ([], {}), '(**dist_args)\n', (1334, 1347), False, 'import colossalai\n'), ((1352, 1366), 'colossalai.core.global_context.set_seed', 'gpc.set_seed', ([], {}), '()\n', (1364, 1366), True, 'from colossalai.core import global_context as gpc\n'), ((1420, 1464),... |
from typing import Optional
import torch
import torch.distributed as dist
from colossalai.registry import OPHOOKS
from colossalai.utils import get_current_device
from colossalai.utils.memory_tracer.memstats_collector import MemStatsCollector
from colossalai.zero.shard_utils import BaseShardStrategy
from colossalai.zer... | [
"colossalai.utils.get_current_device",
"colossalai.utils.memory_utils.utils.colo_model_data_tensor_move_inline"
] | [((1559, 1657), 'colossalai.utils.memory_utils.utils.colo_model_data_tensor_move_inline', 'colo_model_data_tensor_move_inline', (['param.colo_attr.sharded_data_tensor', 'self.computing_device'], {}), '(param.colo_attr.sharded_data_tensor,\n self.computing_device)\n', (1593, 1657), False, 'from colossalai.utils.memor... |
from colossalai.core import global_context as gpc
from colossalai.registry import GRADIENT_HANDLER
from ._base_gradient_handler import BaseGradientHandler
from ...context.parallel_mode import ParallelMode
from .utils import bucket_allreduce
@GRADIENT_HANDLER.register_module
class SequenceParallelGradientHandler(BaseG... | [
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.get_group"
] | [((966, 1010), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.SEQUENCE_DP'], {}), '(ParallelMode.SEQUENCE_DP)\n', (984, 1010), True, 'from colossalai.core import global_context as gpc\n'), ((1088, 1127), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['ParallelMode... |
from tests.components_to_test.registry import non_distributed_component_funcs
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils.cuda import get_current_device
from colossalai.utils import free_po... | [
"colossalai.tensor.named_params_with_colotensor",
"colossalai.tensor.ParallelAction",
"colossalai.testing.parameterize",
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global_context.get_group",
"colossalai.utils.cuda.get_current_device",
"colossalai.tensor.TensorSpec",
"colossalai.... | [((7791, 7825), 'colossalai.testing.parameterize', 'parameterize', (['"""world_size"""', '[1, 4]'], {}), "('world_size', [1, 4])\n", (7803, 7825), False, 'from colossalai.testing import parameterize, rerun_if_address_is_in_use\n'), ((7827, 7855), 'colossalai.testing.rerun_if_address_is_in_use', 'rerun_if_address_is_in_... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.core import global_context as gpc
from colossalai.initialize import launch, get_default_parser
from functools import partial
from checks_1d.check_layer_1d import *
CONFIG = dict(
... | [
"colossalai.core.global_context.destroy",
"colossalai.initialize.launch"
] | [((505, 610), 'colossalai.initialize.launch', 'launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': '(29920)', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host='localhost',\n port=29920, backend='nccl')\n", (511, 610), Fal... |
from enum import Enum
from os import stat
from typing import Dict, Optional, Tuple
import torch
import torch.distributed as dist
import torch.nn as nn
from colossalai.amp.naive_amp.grad_scaler import DynamicGradScaler
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context ... | [
"colossalai.logging.get_dist_logger",
"colossalai.core.global_context.get_group",
"colossalai.amp.naive_amp.grad_scaler.DynamicGradScaler",
"colossalai.zero.sharded_optim._utils.has_inf_or_nan",
"colossalai.utils.memory_tracer.model_data_memtracer.GLOBAL_MODEL_DATA_TRACER.register_optimizer",
"colossalai.... | [((5553, 5770), 'colossalai.amp.naive_amp.grad_scaler.DynamicGradScaler', 'DynamicGradScaler', ([], {'initial_scale': 'initial_scale', 'min_scale': 'min_scale', 'growth_factor': 'growth_factor', 'backoff_factor': 'backoff_factor', 'growth_interval': 'growth_interval', 'hysteresis': 'hysteresis', 'max_scale': 'max_scale... |
from .op_wrapper import _COLOSSAL_OPS
from copy import copy
import torch
from colossalai.tensor import TensorSpec
from .const import TensorType
from colossalai.tensor import distspec
from colossalai.tensor.dist_spec_mgr import DistSpecManager
from colossalai.tensor.distspec import _DistSpec
from torch.overrides import ... | [
"colossalai.tensor.dist_spec_mgr.DistSpecManager.handle_trans_spec",
"colossalai.tensor.distspec.replicate",
"colossalai.tensor.dist_spec_mgr.DistSpecManager.no_grad"
] | [((1214, 1272), 'torch.Tensor._make_subclass', 'torch.Tensor._make_subclass', (['cls', 'data', 'data.requires_grad'], {}), '(cls, data, data.requires_grad)\n', (1241, 1272), False, 'import torch\n'), ((1400, 1410), 'copy.copy', 'copy', (['spec'], {}), '(spec)\n', (1404, 1410), False, 'from copy import copy\n'), ((1625,... |
import torch
import torch.nn.functional as F
from colossalai.utils import get_current_device
from colossalai.core import MOE_CONTEXT
from .experts import FFNExperts, TPExperts
class NormalNoiseGenerator:
"""Generates a random noisy mask for logtis tensor.
All noise is generated from a normal distri... | [
"colossalai.utils.get_current_device"
] | [((2093, 2117), 'torch.nn.functional.softmax', 'F.softmax', (['sm_input', 'dim'], {}), '(sm_input, dim)\n', (2102, 2117), True, 'import torch.nn.functional as F\n'), ((596, 616), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (614, 616), False, 'from colossalai.utils import get_current_d... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import math
import time
import numpy as np
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context
from colossalai.logging import get_dist_logger
from colossalai.registry import LAYERS, LOSSES
from colossalai.utils import get... | [
"colossalai.registry.LAYERS.get_module",
"colossalai.logging.get_dist_logger",
"colossalai.core.global_context.get_local_rank",
"colossalai.registry.LOSSES.get_module",
"colossalai.nn.layer.parallel_3d._utils.get_parallel_mode_from_env",
"colossalai.utils.get_current_device"
] | [((608, 625), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (623, 625), False, 'from colossalai.logging import get_dist_logger\n'), ((639, 659), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (657, 659), False, 'from colossalai.utils import get_current_device... |
import torch
from colossalai.tensor.op_wrapper import colo_op_impl
from colossalai.nn.layer.parallel_1d._utils import split_forward_gather_backward, reduce_input, reduce_grad
from colossalai.nn.layer.utils import divide
from colossalai.core import global_context as gpc
from packaging import version
from colossalai.tens... | [
"colossalai.tensor.ColoTensor.init_from_torch_tensor",
"colossalai.tensor.graph.GraphGlobalEnv",
"colossalai.tensor.ParallelAction",
"colossalai.nn.layer.parallel_1d._utils.reduce_input",
"colossalai.tensor.op_wrapper.colo_op_impl",
"colossalai.tensor.graph.GraphOpNode"
] | [((2495, 2535), 'colossalai.tensor.op_wrapper.colo_op_impl', 'colo_op_impl', (['torch.nn.functional.linear'], {}), '(torch.nn.functional.linear)\n', (2507, 2535), False, 'from colossalai.tensor.op_wrapper import colo_op_impl\n'), ((1051, 1110), 'colossalai.nn.layer.parallel_1d._utils.reduce_input', 'reduce_input', (['p... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os.path as osp
import pytest
from colossalai.core import global_context as gpc
from colossalai.initialize import initialize
from colossalai.logging import get_global_dist_logger
NUM_BATCH = 128
BATCH_SIZE = 32
SEQ_LENGTH = 128
HIDDEN_SIZE = 512
DIR_PATH = osp... | [
"colossalai.core.global_context.destroy",
"colossalai.initialize.initialize",
"colossalai.logging.get_global_dist_logger"
] | [((367, 426), 'os.path.join', 'osp.join', (['DIR_PATH', '"""../configs/pipeline_vanilla_resnet.py"""'], {}), "(DIR_PATH, '../configs/pipeline_vanilla_resnet.py')\n", (375, 426), True, 'import os.path as osp\n'), ((430, 504), 'pytest.mark.skip', 'pytest.mark.skip', (['"""This test should be invoked using the test.sh pro... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
from pathlib import Path
import pytest
from torchvision import transforms
from torch.utils.data import DataLoader
from colossalai.builder import build_dataset, build_transform
from colossalai.context import Config
TRAIN_DATA = dict(
dataset=dict(
... | [
"colossalai.builder.build_dataset",
"colossalai.context.Config",
"colossalai.builder.build_transform"
] | [((727, 745), 'colossalai.context.Config', 'Config', (['TRAIN_DATA'], {}), '(TRAIN_DATA)\n', (733, 745), False, 'from colossalai.context import Config\n'), ((985, 1023), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_pipeline'], {}), '(transform_pipeline)\n', (1003, 1023), False, 'from torchvision... |
import os
import sys
from pathlib import Path
import colossalai
import torch
from colossalai.core import global_context as gpc
from colossalai.logging import get_dist_logger
from colossalai.nn import CosineAnnealingLR
from colossalai.nn.metric import Accuracy
from colossalai.trainer import Trainer, hooks
from colossala... | [
"colossalai.launch",
"colossalai.trainer.hooks.LRSchedulerHook",
"colossalai.trainer.hooks.TensorboardHook",
"colossalai.trainer.hooks.SaveCheckpointHook",
"colossalai.logging.get_dist_logger",
"colossalai.trainer.hooks.LossHook",
"colossalai.trainer.hooks.LogMemoryByEpochHook",
"colossalai.trainer.Tr... | [((1004, 1031), 'os.path.join', 'os.path.join', (['DATA', '"""train"""'], {}), "(DATA, 'train')\n", (1016, 1031), False, 'import os\n'), ((1046, 1078), 'os.path.join', 'os.path.join', (['DATA', '"""trainannot"""'], {}), "(DATA, 'trainannot')\n", (1058, 1078), False, 'import os\n'), ((1093, 1119), 'os.path.join', 'os.pa... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from abc import ABC
from torch.utils.data import Dataset
from torchvision.transforms import transforms
from colossalai.builder import build_transform
class BaseDataset(Dataset, ABC):
def __init__(self, transform_pipeline: list):
transform_list = [build_t... | [
"colossalai.builder.build_transform"
] | [((385, 419), 'torchvision.transforms.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (403, 419), False, 'from torchvision.transforms import transforms\n'), ((313, 333), 'colossalai.builder.build_transform', 'build_transform', (['cfg'], {}), '(cfg)\n', (328, 333), False, 'from... |
import pytest
from colossalai.utils.cuda import get_current_device
from colossalai.utils.memory_tracer.model_data_memtracer import GLOBAL_MODEL_DATA_TRACER
from colossalai.utils.memory_utils.utils import colo_model_data_tensor_move, colo_model_data_tensor_move_inline
from colossalai.zero.sharded_param import ShardedTe... | [
"colossalai.utils.memory_tracer.model_data_memtracer.GLOBAL_MODEL_DATA_TRACER.close",
"colossalai.utils.free_port",
"colossalai.utils.memory_tracer.model_data_memtracer.GLOBAL_MODEL_DATA_TRACER.start",
"colossalai.utils.memory_utils.utils.colo_model_data_tensor_move",
"colossalai.utils.cuda.get_current_devi... | [((2228, 2273), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 4]'], {}), "('world_size', [1, 4])\n", (2251, 2273), False, 'import pytest\n'), ((570, 602), 'colossalai.utils.memory_tracer.model_data_memtracer.GLOBAL_MODEL_DATA_TRACER.start', 'GLOBAL_MODEL_DATA_TRACER.start', ([], {}), ... |
import torch
from colossalai.gemini.tensor import stateful_op_impl
from colossalai.gemini.tensor.stateful_tensor import StatefulTensorV2
@stateful_op_impl(torch.mean)
def stateful_mean(types, args=(), kwargs=None, pg=None):
stateful_tensor = args[0]
return torch.mean(stateful_tensor.torch_tensor())
def regi... | [
"colossalai.gemini.tensor.stateful_op_impl"
] | [((140, 168), 'colossalai.gemini.tensor.stateful_op_impl', 'stateful_op_impl', (['torch.mean'], {}), '(torch.mean)\n', (156, 168), False, 'from colossalai.gemini.tensor import stateful_op_impl\n'), ((351, 371), 'colossalai.gemini.tensor.stateful_op_impl', 'stateful_op_impl', (['op'], {}), '(op)\n', (367, 371), False, '... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch.nn as nn
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
class ParallelLayer(nn.Module):
def __init__(self):
super().__init__()
self.data_parallel_rank = 0 if not gpc.is_initialized(Parall... | [
"colossalai.core.global_context.get_world_size",
"colossalai.core.global_context.is_initialized",
"colossalai.core.global_context.get_local_rank"
] | [((338, 375), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.DATA'], {}), '(ParallelMode.DATA)\n', (356, 375), True, 'from colossalai.core import global_context as gpc\n'), ((475, 512), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.DATA'],... |
import torch
import torch.nn as nn
from .registry import non_distributed_component_funcs
from transformers import GPT2Config, GPT2LMHeadModel
from .utils.dummy_data_generator import DummyDataGenerator
from colossalai.utils.cuda import get_current_device
class DummyDataLoader(DummyDataGenerator):
vocab_size = 5030... | [
"colossalai.utils.cuda.get_current_device"
] | [((627, 653), 'torch.ones_like', 'torch.ones_like', (['input_ids'], {}), '(input_ids)\n', (642, 653), False, 'import torch\n'), ((2044, 2065), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2063, 2065), True, 'import torch.nn as nn\n'), ((1077, 1280), 'transformers.GPT2Config', 'GPT2Config', ([]... |
from functools import partial
import colossalai
from colossalai.utils.cuda import get_current_device
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.amp import convert_to_apex_amp
from colossalai.nn.optimizer import CPUAdam
from colossalai.testing import p... | [
"colossalai.testing.parameterize",
"colossalai.zero.sharded_model.utils.col_model_deepcopy",
"colossalai.zero.sharded_optim.ShardedOptimizerV2",
"colossalai.amp.convert_to_apex_amp",
"colossalai.testing.rerun_on_exception",
"colossalai.zero.sharded_optim._utils.has_inf_or_nan",
"colossalai.launch",
"c... | [((1460, 1502), 'colossalai.testing.parameterize', 'parameterize', (['"""cpu_offload"""', '[True, False]'], {}), "('cpu_offload', [True, False])\n", (1472, 1502), False, 'from colossalai.testing import parameterize, rerun_on_exception\n'), ((1504, 1546), 'colossalai.testing.parameterize', 'parameterize', (['"""use_cpua... |
from torch import Tensor
from colossalai.builder import build_lr_scheduler
from colossalai.registry import HOOKS
from ._metric_hook import MetricHook
from .._trainer import Trainer
from ..metric import LearningRate
@HOOKS.register_module
class LRSchedulerHook(MetricHook):
"""Build LR scheduler
:param traine... | [
"colossalai.builder.build_lr_scheduler"
] | [((1460, 1522), 'colossalai.builder.build_lr_scheduler', 'build_lr_scheduler', (['lr_scheduler_cfg', 'trainer.engine.optimizer'], {}), '(lr_scheduler_cfg, trainer.engine.optimizer)\n', (1478, 1522), False, 'from colossalai.builder import build_lr_scheduler\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import inspect
from typing import Callable, List, Tuple, Union
import colossalai.communication as comm
import torch.cuda
from colossalai.amp.naive_amp import NaiveAMPModel
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_conte... | [
"colossalai.core.global_context.is_first_rank",
"colossalai.communication.send_forward",
"colossalai.communication.send_backward",
"colossalai.communication.send_forward_recv_backward",
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global_context.set_virtual_pipeline_parallel_size",
... | [((734, 775), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (752, 775), True, 'from colossalai.core import global_context as gpc\n'), ((993, 1030), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMod... |
from typing import List, Optional
import torch
import torch.distributed as dist
from colossalai.zero.shard_utils import BaseShardStrategy
from colossalai.zero.sharded_model._zero3_utils import get_shard
from colossalai.zero.sharded_param.sharded_tensor import ShardedTensor
from colossalai.utils import get_current_devi... | [
"colossalai.utils.get_current_device",
"colossalai.zero.sharded_model._zero3_utils.get_shard"
] | [((858, 912), 'colossalai.zero.sharded_model._zero3_utils.get_shard', 'get_shard', (['t.payload', 'self.local_rank', 'self.world_size'], {}), '(t.payload, self.local_rank, self.world_size)\n', (867, 912), False, 'from colossalai.zero.sharded_model._zero3_utils import get_shard\n'), ((1464, 1581), 'torch.distributed.all... |
# referenced from Megatron and used to testify communication
import colossalai
import os
import os.path as osp
import pytest
import torch
import torch.multiprocessing as mp
import model
from colossalai.builder import PipelineModelInitializer
from colossalai.communication import p2p as p2p_communication
from colossala... | [
"colossalai.core.global_context.destroy",
"colossalai.utils.print_rank_0",
"colossalai.utils.get_dataloader",
"colossalai.builder.PipelineModelInitializer",
"colossalai.initialize",
"colossalai.engine.schedule.PipelineSchedule",
"colossalai.initialize.launch"
] | [((893, 933), 'os.path.join', 'osp.join', (['DIR_PATH', '"""./resnet_config.py"""'], {}), "(DIR_PATH, './resnet_config.py')\n", (901, 933), True, 'import os.path as osp\n'), ((855, 877), 'os.path.realpath', 'osp.realpath', (['__file__'], {}), '(__file__)\n', (867, 877), True, 'import os.path as osp\n'), ((976, 1087), '... |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re... | [
"colossalai.logging.get_dist_logger",
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.is_initialized"
] | [((3432, 3449), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (3447, 3449), False, 'from colossalai.logging import get_dist_logger\n'), ((5920, 5946), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['[1]'], {}), '([1])\n', (5941, 5946), False, 'import torch\n'), ((6035, 6076), 'colossal... |
import colossalai
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from data import build_train_valid_test_data_iterators
from data.tokenizer import initialize_tokenizer, get_padded_vocab_size
from data.bert_helper import get_batch_for_sequence_parallel, Sequen... | [
"colossalai.core.global_context.is_first_rank",
"colossalai.core.global_context.get_ranks_in_group",
"colossalai.logging.get_dist_logger",
"colossalai.utils.MultiTimer",
"colossalai.core.global_context.config.fp16.get",
"colossalai.launch_from_torch",
"colossalai.core.global_context.is_initialized",
"... | [((960, 1000), 'colossalai.core.global_context.is_first_rank', 'gpc.is_first_rank', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (977, 1000), True, 'from colossalai.core import global_context as gpc\n'), ((1458, 1535), 'colossalai.launch_from_torch', 'colossalai.launch_from_torch', ([], {'config': '"""... |
import torch
import torch.distributed as dist
from colossalai.zero.sharded_param import ShardedTensor
from typing import Optional, Tuple
class ShardedParamV2(object):
def __init__(self,
param: torch.nn.Parameter,
process_group: Optional[dist.ProcessGroup] = None,
... | [
"colossalai.zero.sharded_param.ShardedTensor"
] | [((406, 446), 'colossalai.zero.sharded_param.ShardedTensor', 'ShardedTensor', (['param.data', 'process_group'], {}), '(param.data, process_group)\n', (419, 446), False, 'from colossalai.zero.sharded_param import ShardedTensor\n'), ((1360, 1425), 'torch.empty', 'torch.empty', (['[]'], {'dtype': 'self.param.dtype', 'devi... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from pathlib import Path
import pytest
import torch.autograd
import colossalai
from colossalai.builder import build_lr_scheduler
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.logging import g... | [
"colossalai.builder.build_lr_scheduler",
"colossalai.initialize",
"colossalai.logging.get_global_dist_logger",
"colossalai.nn.layer._parallel_utilities._gather"
] | [((1714, 1829), 'pytest.mark.skip', 'pytest.mark.skip', (['"""This test should be invoked by test.sh in the same folder as it runs on multiple gpus"""'], {}), "(\n 'This test should be invoked by test.sh in the same folder as it runs on multiple gpus'\n )\n", (1730, 1829), False, 'import pytest\n'), ((1927, 1961)... |
import glob
import os
import colossalai
import torch
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.logging import disable_existing_loggers, get_dist_logger
from colossalai.nn.lr_scheduler import LinearWarmupLR
from colossalai.trainer import Trainer, hooks... | [
"colossalai.trainer.hooks.LogMetricByEpochHook",
"colossalai.logging.disable_existing_loggers",
"colossalai.initialize",
"colossalai.nn.lr_scheduler.LinearWarmupLR",
"colossalai.logging.get_dist_logger",
"colossalai.launch_from_torch",
"colossalai.core.global_context.get_local_rank",
"colossalai.train... | [((584, 613), 'os.path.join', 'os.path.join', (['root', '"""train/*"""'], {}), "(root, 'train/*')\n", (596, 613), False, 'import os\n'), ((634, 673), 'os.path.join', 'os.path.join', (['root', '"""idx_files/train/*"""'], {}), "(root, 'idx_files/train/*')\n", (646, 673), False, 'import os\n'), ((1280, 1314), 'os.path.joi... |
import pytest
from colossalai.core import global_context as gpc
from colossalai.initialize import init_dist
from test_layer import check_linear, check_layernorm, check_attention, check_mlp, check_transformerlayer
from test_operation import check_AB, check_ABT, check_ATB
CONFIG = dict(
parallel=dict(
pipel... | [
"colossalai.core.global_context.destroy",
"colossalai.core.global_context.set_seed",
"colossalai.initialize.init_dist"
] | [((621, 736), 'pytest.mark.skip', 'pytest.mark.skip', (['"""This test should be invoked by test.sh in the same folder as it runs on multiple gpus"""'], {}), "(\n 'This test should be invoked by test.sh in the same folder as it runs on multiple gpus'\n )\n", (637, 736), False, 'import pytest\n'), ((428, 438), 'tes... |
import torch
from colossalai.utils import get_current_device
def col_cuda_memory_capacity():
"""
Get cuda memory capacity of the current cuda.
"""
return torch.cuda.get_device_properties(get_current_device()).total_memory
| [
"colossalai.utils.get_current_device"
] | [((205, 225), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (223, 225), False, 'from colossalai.utils import get_current_device\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
from torch import distributed as dist
from colossalai.communication import ring_forward
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.nn.layer.parallel_sequence._utils import _cal... | [
"colossalai.core.global_context.get_local_rank",
"colossalai.nn.layer.parallel_sequence._utils._calc_current_device_range",
"colossalai.nn.layer.parallel_sequence._utils._calc_incoming_device_range",
"colossalai.core.global_context.get_group",
"colossalai.core.global_context.get_world_size",
"colossalai.u... | [((1254, 1295), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.SEQUENCE'], {}), '(ParallelMode.SEQUENCE)\n', (1272, 1295), True, 'from colossalai.core import global_context as gpc\n'), ((1323, 1364), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['Parall... |
import colossalai
import torch
import os
from pathlib import Path
from colossalai.logging import get_dist_logger
from colossalai.core import global_context as gpc
from colossalai.utils import get_dataloader
from torchvision import transforms
from torchvision.datasets import CIFAR10
from torchvision.models import resne... | [
"colossalai.initialize",
"colossalai.logging.get_dist_logger",
"colossalai.launch_from_torch",
"colossalai.utils.get_dataloader"
] | [((383, 433), 'colossalai.launch_from_torch', 'colossalai.launch_from_torch', ([], {'config': '"""./config.py"""'}), "(config='./config.py')\n", (411, 433), False, 'import colossalai\n'), ((448, 465), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (463, 465), False, 'from colossalai.logging ... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import argparse
import pprint
import random
from pathlib import Path
from typing import Callable, Iterable, Optional, Union
from typing import Tuple
import numpy as np
import torch
from torch.utils.data import DataLoader
from colossalai.engine import AMP_TYPE, NoPipeli... | [
"colossalai.nn.DataParallelSampler",
"colossalai.logging.init_global_dist_logger",
"colossalai.logging.get_global_dist_logger",
"colossalai.engine.NoPipelineSchedule"
] | [((1200, 1225), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1223, 1225), False, 'import argparse\n'), ((4571, 4596), 'colossalai.logging.init_global_dist_logger', 'init_global_dist_logger', ([], {}), '()\n', (4594, 4596), False, 'from colossalai.logging import get_global_dist_logger, init_g... |
import inspect
import torch.nn as nn
from torch.optim import Optimizer
from colossalai.utils import is_no_pp_or_last_stage
from .naive_amp import NaiveAMPOptimizer, NaiveAMPModel
from .grad_scaler import DynamicGradScaler, ConstantGradScaler
def convert_to_naive_amp(model: nn.Module, optimizer: Optimizer, amp_config)... | [
"colossalai.utils.is_no_pp_or_last_stage"
] | [((1435, 1475), 'inspect.signature', 'inspect.signature', (['scaler_class.__init__'], {}), '(scaler_class.__init__)\n', (1452, 1475), False, 'import inspect\n'), ((1070, 1096), 'torch.nn.ModuleList', 'nn.ModuleList', (['module_list'], {}), '(module_list)\n', (1083, 1096), True, 'import torch.nn as nn\n'), ((1132, 1156)... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import gc
import psutil
import torch
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.logging import get_global_dist_logger
def bytes_to_GB(val, decimal=2):
'''A byte-to-Gigabyte converter... | [
"colossalai.logging.get_global_dist_logger",
"colossalai.core.global_context.is_initialized"
] | [((971, 983), 'gc.collect', 'gc.collect', ([], {}), '()\n', (981, 983), False, 'import gc\n'), ((999, 1022), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (1020, 1022), False, 'import psutil\n'), ((745, 784), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.GL... |
import torch
import torch.nn as nn
from colossalai.nn import CheckpointModule
from .utils.dummy_data_generator import DummyDataGenerator
from .registry import non_distributed_component_funcs
from colossalai.utils.cuda import get_current_device
class SimpleNet(CheckpointModule):
"""
In this no-leaf module, it ... | [
"colossalai.utils.cuda.get_current_device"
] | [((1268, 1295), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (1293, 1295), False, 'import torch\n'), ((495, 510), 'torch.nn.Linear', 'nn.Linear', (['(4)', '(8)'], {}), '(4, 8)\n', (504, 510), True, 'import torch.nn as nn\n'), ((530, 545), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['(8)'], {}... |
"""
文件操作:分布式并行训练时,仅当rank=0的进程才可写入文件
"""
import os
import torch
import copy
import random
import time
import numpy as np
from collections import OrderedDict, Counter
from torch.utils.tensorboard import SummaryWriter
from torchinfo import summary
from Utils.tools import convert_vis
from bobotools.torch_tools import Torch... | [
"colossalai.logging.get_dist_logger",
"colossalai.launch_from_torch",
"colossalai.core.global_context.get_global_rank"
] | [((470, 495), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (485, 495), False, 'import os\n'), ((742, 759), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (753, 759), False, 'import random\n'), ((764, 784), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (778, 7... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch.nn as nn
from colossalai.registry import LAYERS
from .conv import conv1x1
@LAYERS.register_module
class ResLayer(nn.Module):
def __init__(self,
block_type: str,
norm_layer_type: str,
inplanes: int,
... | [
"colossalai.registry.LAYERS.get_module"
] | [((620, 649), 'colossalai.registry.LAYERS.get_module', 'LAYERS.get_module', (['block_type'], {}), '(block_type)\n', (637, 649), False, 'from colossalai.registry import LAYERS\n'), ((676, 710), 'colossalai.registry.LAYERS.get_module', 'LAYERS.get_module', (['norm_layer_type'], {}), '(norm_layer_type)\n', (693, 710), Fal... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from typing import Optional
from typing import Union, List
import torch
from torch import Tensor
from torch.utils.data import DataLoader
from tqdm import tqdm
from colossalai.builder import build_hooks
from colossalai.checkpointing import save_checkpoint, load_checkpoi... | [
"colossalai.utils.is_dp_rank_0",
"colossalai.checkpointing.get_checkpoint_path",
"colossalai.utils.is_no_pp_or_last_stage",
"colossalai.utils.is_tp_rank_0",
"colossalai.logging.get_global_dist_logger",
"colossalai.utils.get_global_multitimer",
"colossalai.builder.build_hooks"
] | [((1685, 1709), 'colossalai.logging.get_global_dist_logger', 'get_global_dist_logger', ([], {}), '()\n', (1707, 1709), False, 'from colossalai.logging import get_global_dist_logger\n'), ((2354, 2377), 'colossalai.utils.get_global_multitimer', 'get_global_multitimer', ([], {}), '()\n', (2375, 2377), False, 'from colossa... |
import os
import colossalai
import torch
from colossalai.core import global_context as gpc
from colossalai.engine.schedule import InterleavedPipelineSchedule
from colossalai.logging import disable_existing_loggers, get_dist_logger
from colossalai.nn import CosineAnnealingWarmupLR
from colossalai.trainer import... | [
"colossalai.logging.disable_existing_loggers",
"colossalai.trainer.hooks.ThroughputHook",
"colossalai.launch",
"colossalai.trainer.hooks.LRSchedulerHook",
"colossalai.nn.CosineAnnealingWarmupLR",
"colossalai.logging.get_dist_logger",
"colossalai.trainer.hooks.LossHook",
"colossalai.trainer.Trainer",
... | [((505, 531), 'colossalai.logging.disable_existing_loggers', 'disable_existing_loggers', ([], {}), '()\n', (529, 531), False, 'from colossalai.logging import disable_existing_loggers, get_dist_logger\n'), ((546, 577), 'colossalai.get_default_parser', 'colossalai.get_default_parser', ([], {}), '()\n', (575, 577), False,... |
import contextlib
import os
import colossalai
import colossalai.utils as utils
import torch
import torch.nn as nn
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.engine.schedule import (InterleavedPipelineSchedule,
... | [
"colossalai.logging.disable_existing_loggers",
"colossalai.trainer.hooks.ThroughputHook",
"colossalai.nn.LinearWarmupLR",
"colossalai.utils.timer.MultiTimer",
"colossalai.trainer.hooks.LRSchedulerHook",
"colossalai.core.global_context.config.optimizer.pop",
"colossalai.logging.get_dist_logger",
"colos... | [((761, 792), 'colossalai.get_default_parser', 'colossalai.get_default_parser', ([], {}), '()\n', (790, 792), False, 'import colossalai\n'), ((904, 930), 'colossalai.logging.disable_existing_loggers', 'disable_existing_loggers', ([], {}), '()\n', (928, 930), False, 'from colossalai.logging import disable_existing_logge... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import colossalai
from colossalai.core import global_context as gpc
from colossalai.logging import get_global_dist_logger
from colossalai.trainer import Trainer
def run_trainer():
engine, train_dataloader, test_dataloader = colossalai.initialize()
logger = get_... | [
"colossalai.trainer.Trainer",
"colossalai.initialize",
"colossalai.logging.get_global_dist_logger"
] | [((279, 302), 'colossalai.initialize', 'colossalai.initialize', ([], {}), '()\n', (300, 302), False, 'import colossalai\n'), ((316, 340), 'colossalai.logging.get_global_dist_logger', 'get_global_dist_logger', ([], {}), '()\n', (338, 340), False, 'from colossalai.logging import get_global_dist_logger\n'), ((441, 477), '... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from .amp_type import AMP_TYPE
from colossalai.context import Config
import torch.nn as nn
from torch.optim import Optimizer
from torch.nn.modules.loss import _Loss
from .torch_amp import convert_to_torch_amp
from .apex_amp import convert_to_apex_amp
from .naive_amp impo... | [
"colossalai.context.Config"
] | [((1910, 1918), 'colossalai.context.Config', 'Config', ([], {}), '()\n', (1916, 1918), False, 'from colossalai.context import Config\n')] |
from colossalai.nn.optimizer import HybridAdam
from colossalai.zero.shard_utils import TensorShardStrategy
from titans.model.gpt import gpt2_small
BATCH_SIZE = 2
NUM_EPOCHS = 60
SEQ_LEN = 1024
zero = dict(
model_config=dict(
tensor_placement_policy='cpu',
shard_strategy=TensorShardStrategy(),
... | [
"colossalai.zero.shard_utils.TensorShardStrategy"
] | [((294, 315), 'colossalai.zero.shard_utils.TensorShardStrategy', 'TensorShardStrategy', ([], {}), '()\n', (313, 315), False, 'from colossalai.zero.shard_utils import TensorShardStrategy\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
from abc import ABC, abstractmethod
from colossalai.logging import get_dist_logger
from torch import Tensor
from typing import Dict
__all__ = ['BaseGradScaler']
class BaseGradScaler(ABC):
"""A base class for the gradient scaler.
Args:
ini... | [
"colossalai.logging.get_dist_logger"
] | [((536, 575), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['[initial_scale]'], {}), '([initial_scale])\n', (558, 575), False, 'import torch\n'), ((662, 679), 'colossalai.logging.get_dist_logger', 'get_dist_logger', ([], {}), '()\n', (677, 679), False, 'from colossalai.logging import get_dist_logger\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os.path as osp
import pytest
import torch
from torch.utils.data import DataLoader
import colossalai
from colossalai.builder import build_dataset, build_loss, build_data_sampler, build_model
from colossalai.core import global_context
from colossalai.engine.gradie... | [
"colossalai.builder.build_data_sampler",
"colossalai.builder.build_model",
"colossalai.nn.optimizer.ZeroRedundancyOptimizer_Level_2",
"colossalai.core.global_context.config.train_data.dataloader.pop",
"colossalai.utils.print_rank_0",
"colossalai.engine.gradient_handler.DataParallelGradientHandler",
"col... | [((609, 640), 'os.path.join', 'osp.join', (['DIR_PATH', '"""config.py"""'], {}), "(DIR_PATH, 'config.py')\n", (617, 640), True, 'import os.path as osp\n'), ((5477, 5564), 'pytest.mark.skip', 'pytest.mark.skip', (['"""This test should be invoked manually using the script provided"""'], {}), "(\n 'This test should be ... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os.path as osp
import torch.distributed as dist
from colossalai.checkpointing import get_latest_checkpoint_path, get_checkpoint_path
from colossalai.registry import HOOKS
from colossalai.trainer.hooks import BaseHook
from colossalai.trainer import Trainer
from c... | [
"colossalai.utils.is_dp_rank_0",
"colossalai.checkpointing.get_checkpoint_path",
"colossalai.checkpointing.get_latest_checkpoint_path"
] | [((3668, 3684), 'os.path.exists', 'osp.exists', (['path'], {}), '(path)\n', (3678, 3684), True, 'import os.path as osp\n'), ((4052, 4073), 'torch.distributed.is_initialized', 'dist.is_initialized', ([], {}), '()\n', (4071, 4073), True, 'import torch.distributed as dist\n'), ((1717, 1731), 'colossalai.utils.is_dp_rank_0... |
import torch
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.nn.layer.parallel_2p5d._operation import Matmul_AB_2p5D, Matmul_ABT_2p5D, \
Matmul_ATB_2p5D
from colossalai.utils import get_current_device
from colossalai.utils import print_rank_0
from commo... | [
"colossalai.utils.print_rank_0",
"colossalai.core.global_context.is_initialized",
"colossalai.nn.layer.parallel_2p5d._operation.Matmul_ATB_2p5D.apply",
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global_context.get_world_size",
"colossalai.utils.get_current_device",
"colossalai.nn.... | [((763, 802), 'colossalai.core.global_context.get_world_size', 'gpc.get_world_size', (['ParallelMode.TENSOR'], {}), '(ParallelMode.TENSOR)\n', (781, 802), True, 'from colossalai.core import global_context as gpc\n'), ((840, 890), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PAR... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
import colossalai
from colossalai.zero.sharded_model.param_manager import Zero3ParameterManager
from colossalai.core import global_context as gpc
from colossalai.context.paralle... | [
"colossalai.core.global_context.init_parallel_groups",
"colossalai.core.global_context.get_group",
"colossalai.launch",
"colossalai.utils.free_port"
] | [((953, 1001), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[1, 2, 4]'], {}), "('world_size', [1, 2, 4])\n", (976, 1001), False, 'import pytest\n'), ((469, 585), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""lo... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from abc import ABC, abstractmethod
import torch.nn as nn
from colossalai.builder import build_layer
class BaseModel(nn.Module, ABC):
def __init__(self):
super(BaseModel, self).__init__()
self.layers = nn.ModuleList()
self.layers_cfg = []... | [
"colossalai.builder.build_layer"
] | [((276, 291), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (289, 291), True, 'import torch.nn as nn\n'), ((766, 782), 'colossalai.builder.build_layer', 'build_layer', (['cfg'], {}), '(cfg)\n', (777, 782), False, 'from colossalai.builder import build_layer\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import random
import socket
from typing import List, Union
import torch
from torch._six import inf
from torch.nn.parameter import Parameter
try:
import colossal_C
except:
pass
from contextlib import contextmanager
import torch.distributed as dist
from colossal... | [
"colossalai.core.global_context.is_first_rank",
"colossalai.core.global_context.is_last_rank",
"colossalai.core.global_context.set_virtual_pipeline_parallel_rank",
"colossalai.core.global_context.get_ranks_in_group",
"colossalai.global_variables.tensor_parallel_env.load",
"colossalai.core.global_context.g... | [((10600, 10641), 'colossalai.core.global_context.is_initialized', 'gpc.is_initialized', (['ParallelMode.PIPELINE'], {}), '(ParallelMode.PIPELINE)\n', (10618, 10641), True, 'from colossalai.core import global_context as gpc\n'), ((984, 1005), 'colossalai.core.global_context.get_global_rank', 'gpc.get_global_rank', ([],... |
import torch
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.registry import LOSSES
from torch.cuda.amp import custom_bwd, custom_fwd
from torch.nn.modules.loss import _Loss
class _VocabParallelCrossEntropy1D(torch.autograd.Function):
@stati... | [
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global_context.get_group"
] | [((334, 371), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float32'}), '(cast_inputs=torch.float32)\n', (344, 371), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((1001, 1045), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_1D']... |
import torch
import torch.distributed as dist
from colossalai.constants import INPUT_GROUP_3D, WEIGHT_GROUP_3D, OUTPUT_GROUP_3D
from colossalai.core import global_context as gpc
from colossalai.nn.layer.parallel_3d import reduce_by_batch_3d, split_tensor_3d
from colossalai.nn.layer.parallel_3d._utils import get_paralle... | [
"colossalai.nn.layer.parallel_3d.split_tensor_3d",
"colossalai.nn.layer.parallel_3d.reduce_by_batch_3d",
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global_context.get_group",
"colossalai.nn.layer.parallel_3d._utils.get_parallel_mode_from_env",
"colossalai.utils.get_current_device"
] | [((2566, 2603), 'torch.cuda.amp.custom_fwd', 'custom_fwd', ([], {'cast_inputs': 'torch.float32'}), '(cast_inputs=torch.float32)\n', (2576, 2603), False, 'from torch.cuda.amp import custom_bwd, custom_fwd\n'), ((1388, 1430), 'colossalai.nn.layer.parallel_3d._utils.get_parallel_mode_from_env', 'get_parallel_mode_from_env... |
import torch
import torch.distributed as dist
from torch import Tensor
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from typing import Any, Tuple
class AllToAll(torch.autograd.Function):
"""Dispatches input tensor [e, c, h] to all experts by all_to_all_s... | [
"colossalai.core.global_context.get_group"
] | [((653, 677), 'torch.empty_like', 'torch.empty_like', (['inputs'], {}), '(inputs)\n', (669, 677), False, 'import torch\n'), ((764, 792), 'colossalai.core.global_context.get_group', 'gpc.get_group', (['parallel_mode'], {}), '(parallel_mode)\n', (777, 792), True, 'from colossalai.core import global_context as gpc\n')] |
import torch
from torch.cuda import max_memory_allocated, reset_peak_memory_stats
from torch.distributed import get_rank
from zero.common.utils import (CONFIG, get_gpu_memory_mb, get_model_size, print_log)
def init_w_col(builder):
import colossalai
from colossalai.core import global_context as gpc
... | [
"colossalai.logging.disable_existing_loggers",
"colossalai.core.global_context.config.zero.pop",
"colossalai.zero.shard_utils.TensorShardStrategy",
"colossalai.launch_from_torch",
"colossalai.zero.sharded_model.ShardedModelV2",
"colossalai.zero.sharded_model.ShardedModel"
] | [((728, 754), 'colossalai.logging.disable_existing_loggers', 'disable_existing_loggers', ([], {}), '()\n', (752, 754), False, 'from colossalai.logging import disable_existing_loggers\n'), ((760, 803), 'colossalai.launch_from_torch', 'colossalai.launch_from_torch', ([], {'config': 'CONFIG'}), '(config=CONFIG)\n', (788, ... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import math
from typing import Callable
import torch
import torch.nn as nn
import torch.nn.functional as F
from colossalai.communication import all_reduce, broadcast
from colossalai.constants import INPUT_GROUP_3D, WEIGHT_GROUP_3D
from colossalai.context import ParallelM... | [
"colossalai.core.global_context.get_ranks_in_group",
"colossalai.nn.init.ones_",
"colossalai.nn.init.zeros_",
"colossalai.nn.init.xavier_uniform_",
"colossalai.communication.all_reduce",
"colossalai.nn.init.normal_",
"colossalai.context.seed",
"colossalai.utils.get_current_device",
"colossalai.commu... | [((4224, 4258), 'colossalai.nn.init.xavier_uniform_', 'init.xavier_uniform_', ([], {'a': '(1)', 'scale': '(1)'}), '(a=1, scale=1)\n', (4244, 4258), True, 'from colossalai.nn import init as init\n'), ((7905, 7939), 'colossalai.nn.init.xavier_uniform_', 'init.xavier_uniform_', ([], {'a': '(1)', 'scale': '(1)'}), '(a=1, s... |
import torch
from torch import nn
from colossalai import nn as col_nn
from colossalai.context import ParallelMode
from colossalai.registry import MODELS
__all__ = [
'VisionTransformer3D',
'vit_tiny_3d_patch4_32',
'vit_tiny_3d_patch16_224',
'vit_tiny_3d_patch16_384',
'vit_small_3d_patch16_224',
... | [
"colossalai.nn.ViTSelfAttention3D",
"colossalai.nn.ViTPatchEmbedding3D",
"colossalai.nn.VanillaViTDropPath",
"colossalai.nn.ViTMLP3D",
"colossalai.nn.LayerNorm3D",
"colossalai.nn.ViTHead3D"
] | [((979, 1083), 'colossalai.nn.LayerNorm3D', 'col_nn.LayerNorm3D', (['dim', 'ParallelMode.PARALLEL_3D_INPUT', 'ParallelMode.PARALLEL_3D_WEIGHT'], {'eps': '(1e-06)'}), '(dim, ParallelMode.PARALLEL_3D_INPUT, ParallelMode.\n PARALLEL_3D_WEIGHT, eps=1e-06)\n', (997, 1083), True, 'from colossalai import nn as col_nn\n'), ... |
import os
import colossalai
import colossalai.utils as utils
import torch
import torch.nn as nn
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.logging import disable_existing_loggers, get_dist_logger
from colossalai.nn import LinearWarmupLR
f... | [
"colossalai.logging.disable_existing_loggers",
"colossalai.trainer.hooks.ThroughputHook",
"colossalai.nn.LinearWarmupLR",
"colossalai.trainer.hooks.LRSchedulerHook",
"colossalai.logging.get_dist_logger",
"colossalai.trainer.hooks.LossHook",
"colossalai.trainer.hooks.LogMemoryByEpochHook",
"colossalai.... | [((1008, 1039), 'colossalai.get_default_parser', 'colossalai.get_default_parser', ([], {}), '()\n', (1037, 1039), False, 'import colossalai\n'), ((1150, 1176), 'colossalai.logging.disable_existing_loggers', 'disable_existing_loggers', ([], {}), '()\n', (1174, 1176), False, 'from colossalai.logging import disable_existi... |
import torch
from zero.common.utils import CONFIG, print_log
from torch.cuda import max_memory_allocated, reset_peak_memory_stats
from torch.distributed import get_rank
def init_w_col(builder):
import colossalai
from colossalai.core import global_context as gpc
from colossalai.nn.optimizer import... | [
"colossalai.zero.shard_utils.BucketTensorShardStrategy",
"colossalai.initialize",
"colossalai.launch_from_torch",
"colossalai.utils.memory_utils.utils.colo_set_process_memory_fraction"
] | [((553, 590), 'colossalai.utils.memory_utils.utils.colo_set_process_memory_fraction', 'colo_set_process_memory_fraction', (['(0.2)'], {}), '(0.2)\n', (585, 590), False, 'from colossalai.utils.memory_utils.utils import colo_set_process_memory_fraction\n'), ((598, 641), 'colossalai.launch_from_torch', 'colossalai.launch_... |
import torch
import torch.distributed as dist
from colossalai.zero.sharded_param import ShardedTensor
from typing import Optional, Tuple
from colossalai.utils.memory_utils.utils import colo_tensor_mem_usage
from .tensorful_state import StatefulTensor, TensorState
class ShardedParamV2(object):
def __init__(self, ... | [
"colossalai.zero.sharded_param.ShardedTensor",
"colossalai.utils.memory_utils.utils.colo_tensor_mem_usage"
] | [((431, 456), 'colossalai.zero.sharded_param.ShardedTensor', 'ShardedTensor', (['param.data'], {}), '(param.data)\n', (444, 456), False, 'from colossalai.zero.sharded_param import ShardedTensor\n'), ((1215, 1280), 'torch.empty', 'torch.empty', (['[]'], {'dtype': 'self.param.dtype', 'device': 'self.param.device'}), '([]... |
from concurrent.futures import ThreadPoolExecutor
from time import sleep, time
import pickle
import torch
from colossalai.utils.memory_utils.utils import colo_device_memory_used
from colossalai.utils import get_current_device
class AsyncMemoryMonitor:
"""
An Async Memory Monitor runing during computing. Sam... | [
"colossalai.utils.get_current_device"
] | [((1323, 1343), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (1341, 1343), False, 'from colossalai.utils import get_current_device\n'), ((1452, 1515), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(1)', 'initializer': '_set_cuda_device'}), '(max_w... |
import transformers
import torch
import lddl
import logging
from colossalai.nn.lr_scheduler import LinearWarmupLR
from titans.dataloader.bert import get_bert_pretrain_data_loader
__all__ = ['get_model', 'get_optimizer', 'get_lr_scheduler', 'get_dataloader_for_pretraining']
def get_model(config_file):
config = tr... | [
"colossalai.nn.lr_scheduler.LinearWarmupLR"
] | [((318, 369), 'transformers.BertConfig.from_json_file', 'transformers.BertConfig.from_json_file', (['config_file'], {}), '(config_file)\n', (356, 369), False, 'import transformers\n'), ((382, 421), 'transformers.BertForPreTraining', 'transformers.BertForPreTraining', (['config'], {}), '(config)\n', (413, 421), False, '... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import math
import os
from typing import Tuple
import torch
import torch.distributed as dist
import torch.nn as nn
from colossalai.constants import (INPUT_GROUP_3D, OUTPUT_GROUP_3D,
WEIGHT_GROUP_3D)
from colossalai.context import Parall... | [
"colossalai.core.global_context.get_ranks_in_group",
"colossalai.nn.init.init_weight_",
"colossalai.nn.init.init_bias_",
"colossalai.core.global_context.get_group",
"colossalai.utils.get_current_device"
] | [((2425, 2447), 'torch.nn.init.zeros_', 'init.zeros_', (['self.bias'], {}), '(self.bias)\n', (2436, 2447), True, 'from torch.nn import init as init\n'), ((2456, 2479), 'torch.nn.init.ones_', 'init.ones_', (['self.weight'], {}), '(self.weight)\n', (2466, 2479), True, 'from torch.nn import init as init\n'), ((6402, 6469)... |
import torch
import torch.distributed as dist
from torch.nn.modules.loss import _Loss
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.nn.layer.parallel_2d._utils import assert_summa_initialization, get_summa_dim_from_env
from colossalai.regist... | [
"colossalai.nn.layer.parallel_2d._utils.get_summa_dim_from_env",
"colossalai.core.global_context.get_local_rank",
"colossalai.core.global_context.get_group",
"colossalai.nn.layer.parallel_2d._utils.assert_summa_initialization",
"colossalai.utils.get_current_device"
] | [((1155, 1203), 'colossalai.core.global_context.get_local_rank', 'gpc.get_local_rank', (['ParallelMode.PARALLEL_2D_ROW'], {}), '(ParallelMode.PARALLEL_2D_ROW)\n', (1173, 1203), True, 'from colossalai.core import global_context as gpc\n'), ((1779, 1796), 'torch.exp', 'torch.exp', (['logits'], {}), '(logits)\n', (1788, 1... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
from colossalai.utils import free_port
from checks_3d.check_layer_3d import *
CONFIG =... | [
"colossalai.core.global_context.destroy",
"colossalai.utils.free_port",
"colossalai.initialize.launch"
] | [((613, 717), 'colossalai.initialize.launch', 'launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host='localhost',\n port=port, backend='nccl')\n", (619, 717), False, ... |
from functools import partial
import colossalai
from colossalai.utils.cuda import get_current_device
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.amp import convert_to_apex_amp
from colossalai.nn.optimizer import CPUAdam
from colossalai.testing import parameterize, rerun_on_ex... | [
"colossalai.utils.free_port",
"colossalai.context.MOE_CONTEXT.reset_loss",
"colossalai.testing.parameterize",
"colossalai.zero.sharded_model.utils.col_model_deepcopy",
"colossalai.zero.sharded_optim.ShardedOptimizerV2",
"colossalai.amp.convert_to_apex_amp",
"colossalai.launch",
"colossalai.testing.rer... | [((1697, 1739), 'colossalai.testing.parameterize', 'parameterize', (['"""cpu_offload"""', '[True, False]'], {}), "('cpu_offload', [True, False])\n", (1709, 1739), False, 'from colossalai.testing import parameterize, rerun_on_exception\n'), ((1742, 1784), 'colossalai.testing.parameterize', 'parameterize', (['"""use_cpua... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.