python_code stringlengths 0 108k |
|---|
#!/usr/bin/env python3
"""Setup script"""
from pathlib import Path
import re
import os
import setuptools
if __name__ == "__main__":
# Read metadata from version.py
with Path("autofaiss/version.py").open(encoding="utf-8") as file:
metadata = dict(re.findall(r'__([a-z]+)__\s*=\s*"([^"]+)"', file.read(... |
"""Check version and git tag script."""
from pathlib import Path
import re
import sys
import subprocess
if __name__ == "__main__":
# Read package version
with Path("autofaiss/version.py").open(encoding="utf-8") as file:
metadata = dict(re.findall(r'__([a-z]+)__\s*=\s*"([^"]+)"', file.read()))
... |
"""Test version."""
from autofaiss import version
def test_version():
"""Test version."""
assert len(version.__version__.split(".")) == 3
assert isinstance(version.__author__, str)
|
""" test utils functions """
# pylint: disable= invalid-name
import numpy as np
import pytest
from autofaiss.utils.array_functions import multi_array_split
def test_multi_array_split():
"""test multi_array_split fct number 1"""
assert len(list(multi_array_split([np.zeros((123, 2)), np.zeros((123, 5))], 41))... |
import numpy as np
from autofaiss import build_index, tune_index, score_index
def test_scoring_tuning():
embs = np.ones((100, 512), "float32")
index, index_infos = build_index(embs, save_on_disk=False)
index = tune_index(index, index_infos["index_key"], save_on_disk=False)
infos = score_index(index, e... |
import logging
import faiss
import numpy as np
import pytest
from autofaiss.external.optimize import (
get_min_param_value_for_best_neighbors_coverage,
get_optimal_hyperparameters,
get_optimal_index_keys_v2,
)
from autofaiss.external.quantize import build_index
from autofaiss.indices.index_factory import i... |
import logging
import os
import py
import random
from tempfile import TemporaryDirectory, NamedTemporaryFile
from typing import Tuple, List
import faiss
import numpy as np
import pandas as pd
import pyarrow.parquet as pq
import pytest
from numpy.testing import assert_array_equal
LOGGER = logging.getLogger(__name__)
... |
import numpy as np
from autofaiss import build_index
def test_np_quantize():
embs = np.ones((100, 512), "float32")
index, _ = build_index(embs, save_on_disk=False)
_, I = index.search(embs, 1)
assert I[0][0] == 0
|
from autofaiss.external.build import estimate_memory_required_for_index_creation
#
# def test_estimate_memory_required_for_index_creation():
# needed_memory, _ = estimate_memory_required_for_index_creation(
# nb_vectors=4_000_000_000,
# vec_dim=512,
# index_key="OPQ4_28,IVF131072_HNSW32,PQ4... |
""" Test that the memory efficient flat index give same results as the faiss flat index """
import time
import faiss
import numpy as np
import pytest
from autofaiss.indices.memory_efficient_flat_index import MemEfficientFlatIndex
@pytest.fixture(name="prod_emb")
def fixture_prod_emb():
"""generate random datab... |
from autofaiss.indices.distributed import _batch_loader
def test_batch_loader():
for input_size in range(2, 500):
for output_size in range(1, input_size):
batches = list(_batch_loader(nb_batches=output_size, total_size=input_size))
# test output size is expected
assert ... |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If ex... |
"""
An example of running autofaiss by pyspark to produce N indices.
You need to install pyspark before using the following example.
"""
from typing import Dict
import faiss
import numpy as np
from autofaiss import build_index
# You'd better create a spark session before calling build_index,
# otherwise, a spark se... |
"""
Given a partitioned dataset of embeddings, create an index per partition
"""
import os
from autofaiss import build_partitioned_indexes
from pyspark.sql import SparkSession # pylint: disable=import-outside-toplevel
def create_spark_session():
# PEX file packaging your Python environment and accessible on ya... |
import faiss
import numpy as np
from autofaiss import build_index
embeddings = np.float32(np.random.rand(5000, 100))
# Example on how to build a memory-mapped index and load it from disk
_, index_infos = build_index(
embeddings,
save_on_disk=True,
should_be_memory_mappable=True,
index_path="my_index_f... |
from autofaiss import build_index
import numpy as np
embeddings = np.float32(np.random.rand(100, 512))
index, index_infos = build_index(embeddings, save_on_disk=False)
_, I = index.search(embeddings, 1)
print(I)
|
import numpy as np
from autofaiss import build_index, tune_index, score_index
embs = np.float32(np.random.rand(100, 512))
index, index_infos = build_index(embs, save_on_disk=False)
index = tune_index(index, index_infos["index_key"], save_on_disk=False)
infos = score_index(index, embs, save_on_disk=False)
|
"""
An example of running autofaiss by pyspark.
You need to install pyspark before using the following example.
"""
from autofaiss import build_index
# You'd better create a spark session before calling build_index,
# otherwise, a spark session would be created by autofaiss with the least configuration.
index, index... |
from autofaiss import build_index
build_index(
embeddings="embeddings",
index_path="knn.index",
index_infos_path="infos.json",
max_index_memory_usage="4G",
current_memory_available="5G",
)
|
# pylint: disable=all
__version__ = "2.15.5"
__author__ = "Criteo"
MAJOR = __version__.split(".")[0]
MINOR = __version__.split(".")[1]
PATCH = __version__.split(".")[2]
|
# pylint: disable=unused-import,missing-docstring
from autofaiss.external.quantize import build_index, score_index, tune_index, build_partitioned_indexes
from autofaiss.version import __author__, __version__
|
""" function to compute different kind of recalls """
from typing import List, Optional
import faiss
import numpy as np
def r_recall_at_r_single(
query: np.ndarray,
ground_truth: np.ndarray,
other_index: faiss.Index,
r_max: int = 40,
eval_item_ids: Optional[np.ndarray] = None,
) -> List[int]:
... |
""" function to compute the reconstruction error """
from typing import Optional
import numpy as np
import faiss
def reconstruction_error(before, after, avg_norm_before: Optional[float] = None) -> float:
"""Computes the average reconstruction error"""
diff = np.mean(np.linalg.norm(after - before, axis=1))
... |
""" functions to compare different indices """
import time
import numpy as np
from matplotlib import pyplot as plt
from tqdm import tqdm as tq
from autofaiss.indices.index_utils import format_speed_ms_per_query, get_index_size, speed_test_ms_per_query
from autofaiss.metrics.recalls import r_recall_at_r_single, one_r... |
# pylint: disable=unused-import,missing-docstring
|
""" Common functions to build an index """
import logging
from typing import Dict, Optional, Tuple, Union, Callable, Any
import uuid
import re
import os
import tempfile
import fsspec
import faiss
import pandas as pd
from embedding_reader import EmbeddingReader
from autofaiss.external.optimize import optimize_and_mea... |
""" functions that fixe faiss index_factory function """
# pylint: disable=invalid-name
import re
from typing import Optional
import faiss
def index_factory(d: int, index_key: str, metric_type: int, ef_construction: Optional[int] = None):
"""
custom index_factory that fix some issues of
faiss.index_fact... |
""" useful functions to apply on an index """
import os
import time
from functools import partial
from itertools import chain, repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Dict, Optional, Union, List, Tuple
import logging
from f... |
# pylint: disable=unused-import,missing-docstring
|
"""
Building the index with pyspark.
"""
import math
import multiprocessing
import os
import logging
from tempfile import TemporaryDirectory
import tempfile
from typing import Dict, Optional, Iterator, Tuple, Callable, Any, Union, List
from functools import partial
from multiprocessing.pool import ThreadPool
import f... |
""" function related to search on indices """
from typing import Iterable, Tuple
import numpy as np
def knn_query(index, query, ksearch: int) -> Iterable[Tuple[Tuple[int, int], float]]:
"""Do a knn search and return a list of the closest items and the associated distance"""
dist, ind = index.search(np.expa... |
""" This file contain a class describing a memory efficient flat index """
import heapq
from typing import List, Optional, Tuple
from embedding_reader import EmbeddingReader
import faiss
import numpy as np
from tqdm import trange
from autofaiss.indices.faiss_index_wrapper import FaissIndexWrapper
class MemEfficie... |
""" This file contains a wrapper class to create Faiss-like indices """
from abc import ABC, abstractmethod
import faiss
import numpy as np
class FaissIndexWrapper(ABC):
"""
This abstract class is describing a Faiss-like index
It is useful to use this wrapper to use benchmarking functions written for
... |
"""Index training"""
from typing import Union, NamedTuple, Optional, List
import logging
import multiprocessing
import faiss
from embedding_reader import EmbeddingReader
from autofaiss.external.metadata import IndexMetadata
from autofaiss.external.optimize import check_if_index_needs_training, get_optimal_train_size... |
""" function to cast variables in others """
import re
from math import floor
from typing import Union
import faiss
def cast_memory_to_bytes(memory_string: str) -> float:
"""
Parse a memory string and returns the number of bytes
>>> cast_memory_to_bytes("16B")
16
>>> cast_memory_to_bytes("16G")... |
""" Various optimization algorithms """
from typing import Callable
# pylint: disable=invalid-name
def discrete_binary_search(is_ok: Callable[[int], bool], n: int) -> int:
"""
Binary search in a function domain
Parameters
----------
is_ok : bool
Boolean monotone function defined on range(... |
# pylint: disable=unused-import,missing-docstring
|
""" useful functions t apply on numpy arrays """
import numpy as np
def sanitize(x):
return np.ascontiguousarray(x, dtype="float32")
def multi_array_split(array_list, nb_chunk):
total_length = len(array_list[0])
chunk_size = (total_length - 1) // nb_chunk + 1
assert all(len(x) == total_length for x... |
"""path"""
import os
import fsspec
def make_path_absolute(path: str) -> str:
fs, p = fsspec.core.url_to_fs(path, use_listings_cache=False)
if fs.protocol == "file":
return os.path.abspath(p)
return path
def extract_partition_name_from_path(path: str) -> str:
"""Extract partition name from p... |
""" Useful decorators for fast debuging """
import functools
import time
import logging
from contextlib import ContextDecorator
from datetime import datetime
from typing import Optional
logger = logging.getLogger("autofaiss")
class Timeit(ContextDecorator):
"""Timing class, used as a context manager"""
def... |
""" gather functions necessary to build an index """
import logging
from typing import Dict, Optional, Tuple, Union, Callable, Any, List
import faiss
import pandas as pd
from embedding_reader import EmbeddingReader
from autofaiss.external.metadata import IndexMetadata
from autofaiss.external.optimize import check_if... |
""" Functions to find optimal index parameters """
import json
import logging
import re
from functools import partial, reduce
from math import floor, log2, sqrt
from operator import mul
from typing import Callable, List, Optional, TypeVar
import faiss
import fsspec
import numpy as np
from autofaiss.external.metadata i... |
"""
Index metadata for Faiss indices.
"""
import re
from enum import Enum
from math import ceil
from autofaiss.utils.cast import cast_bytes_to_memory_string
from autofaiss.external.descriptions import (
INDEX_DESCRIPTION_BLOCKS,
IndexBlock,
TUNABLE_PARAMETERS_DESCRIPTION_BLOCKS,
TunableParam,
)
clas... |
""" main file to create an index from the the begining """
import json
import logging
import logging.config
import multiprocessing
import os
import tempfile
from typing import Dict, List, Optional, Tuple, Union
from embedding_reader import EmbeddingReader
import faiss
import fire
import fsspec
import numpy as np
fro... |
""" Functions to compute metrics on an index """
import fsspec
from typing import Dict, Union, Optional
import numpy as np
import faiss
from embedding_reader import EmbeddingReader
from autofaiss.indices.index_utils import get_index_size, search_speed_test
from autofaiss.indices.memory_efficient_flat_index import Me... |
"""
File that contains the descriptions of the different indices features.
"""
from enum import Enum
class IndexBlock(Enum):
FLAT = 0
IVF = 1
IVF_HNSW = 2
HNSW = 3
PQ = 4
OPQ = 5
PAD = 6
class TunableParam(Enum):
EFSEARCH = 0
NPROBE = 1
HT = 2
INDEX_DESCRIPTION_BLOCKS = {
... |
import torch
from torch import einsum, nn
import torch.nn.functional as F
from einops import rearrange
# rotary positional embedding
# https://arxiv.org/abs/2104.09864
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, ... |
import os
import subprocess
import sys
from setuptools import find_packages, setup
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
build_cuda_ext = True
ext_modules = []
if '--no_cuda_ext' in sys.argv:
sys.argv.remove('--no_cuda_ext')
build_c... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
from pathlib import Path
import pytest
import torch
import torch.multiprocessing as mp
from colossalai import launch
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
parallel = dict(
pipeline=dict(size=2),
tensor=dict(
size=4,
mode='2d'
)
)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
parallel = dict(
pipeline=dict(size=2),
tensor=dict(
size=8,
mode='3d'
)
)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
parallel = dict(
pipeline=dict(size=2),
tensor=dict(
size=8,
depth=2,
mode='2.5d'
)
)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import colossalai
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.tes... |
from functools import partial
import colossalai
from colossalai.utils.cuda import get_current_device
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.nn.optimizer import HybridAdam
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_por... |
import pytest
import colossalai
from colossalai.utils.cuda import get_current_device
from colossalai.zero.sharded_param import (StatefulTensor, colo_tensor_mem_usage, colo_model_data_tensor_move,
colo_model_data_tensor_move_inline, colo_model_data_move_to_cpu,
... |
from copy import deepcopy
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, Tens... |
from functools import partial
import colossalai
from colossalai.utils.cuda import get_current_device
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.amp import convert_to_apex_amp
from colossalai.nn.optimizer import CPUAdam
from colossalai.testing import p... |
import torch
import colossalai
import pytest
import torch.multiprocessing as mp
from colossalai.utils.cuda import get_current_device
from colossalai.utils.memory_tracer import MemStatsCollector
from colossalai.utils.memory_tracer.model_data_memtracer import GLOBAL_MODEL_DATA_TRACER
from colossalai.utils.memory import c... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import colossalai
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.core import global_context as gpc
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.uti... |
from functools import partial
import torch
import torch.distributed as dist
from colossalai.logging import get_dist_logger
from colossalai.utils import checkpoint
from colossalai.zero.shard_utils import TensorShardStrategy
from colossalai.zero.sharded_model import ShardedModelV2
LOGGER = get_dist_logger('zero_test')
... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.logging import get_dist_logger
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
fr... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.zero.init_ctx import ZeroInitCont... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from copy import deepcopy
from functools import partial
import colossalai
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.zero.in... |
import torch
import colossalai
import pytest
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
from colossalai.utils.cuda import get_current_device
from colossalai.utils.memory import colo_device_memory_capacity, colo_set_process_memory_fraction
from colossalai.zero.init_ctx impor... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import copy
import colossalai
from colossalai.zero.sharded_model.sharded_model_v2 import ShardedModelV2
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.logging import disable_existing_l... |
import os
from functools import partial
from pathlib import Path
import colossalai
from colossalai.testing.utils import rerun_if_address_is_in_use
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.core import global_context as gpc
from colossalai.logging import get_dis... |
from colossalai.zero.sharded_param.tensor_utils import colo_model_data_tensor_move, colo_model_data_tensor_move_inline
from colossalai.utils import free_port
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.zero.sharded_param import ShardedTensor
import colossalai
import torch
import torch.mu... |
import pytest
import colossalai
from colossalai.utils.cuda import get_current_device
from colossalai.utils.memory import colo_set_process_memory_fraction, colo_device_memory_capacity
from colossalai.utils import free_port
from functools import partial
import torch.multiprocessing as mp
def _run_colo_set_process_mem... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pytest
import torch
import torch.nn.functional as F
from colossalai.context.parallel_mode import ParallelMode
from colossalai.context.random import add_seed, seed, set_mode, reset_seeds
from colossalai.utils import checkpoint
def forward(x, weight):
out = to... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pprint
from functools import partial
import colossalai.nn as col_nn
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
fro... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pprint
from functools import partial
import colossalai.nn as col_nn
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
fro... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pprint
from functools import partial
import colossalai.nn as col_nn
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
fro... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pprint
from functools import partial
import colossalai.nn as col_nn
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
fro... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
from pathlib import Path
import pytest
from torchvision import transforms
from torch.utils.data import DataLoader
from colossalai.builder import build_dataset, build_transform
from colossalai.context import Config
TRAIN_DATA = dict(
dataset=dict(
... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
from functools import partial
from pathlib import Path
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
import colossalai
from colossalai.builder import build_dataset, build... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
from functools import partial
from pathlib import Path
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torchvision import transforms
from torch.utils.data import DataLoader
import colossalai
from colossalai.... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.core import global_context as gpc
from colossalai.logging import disable_existing_loggers
from colossalai.initialize import launch
from colossalai.utils import fr... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
DEPTH = 4
BATCH_SIZE = 8
SEQ_LENGTH = 8
IMG_SIZE = 16
HIDDEN_SIZE = 8
NUM_CLASSES = 8
VOCAB_SIZE = 16
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-3, atol=1e-1) == True
|
import torch
import torch.distributed as dist
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.global_variables import tensor_parallel_env as env
from colossalai.nn import (Classifier1D, Embedding1D, Linear1D_Col, Linear1D_Row, VanillaClassifier... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.utils import fr... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
DEPTH = 2
BATCH_SIZE = 8
SEQ_LENGTH = 8
HIDDEN_SIZE = 8
NUM_CLASSES = 8
VOCAB_SIZE = 16
IMG_SIZE = 16
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-3, atol=1e-2)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.nn.layer.parallel_2d._operation import Matmul_AB_2D, Matmul_ABT_2D, Matmul_ATB_2D
from colossalai.utils import get_current_device
fro... |
import torch
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.nn import (Classifier2D, CrossEntropyLoss2D, Embedding2D, LayerNorm2D, Linear2D, PatchEmbedding2D,
VanillaClassifier, VanillaPatchEmbedding, VocabParallelCl... |
import colossalai
import colossalai.nn as col_nn
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import pytest
from colossalai.core import global_context as gpc
from colossalai.context import ParallelMode
from colossalai.testing import rerun_if_address_is_in_use
from functools import p... |
import torch
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.nn import TransformerSelfAttentionRing
from colossalai.utils import get_current_device
def check_selfattention():
WORLD_SIZE = gpc.get_world_size(ParallelMode.SEQUENCE)
SUB_SEQ_LENGTH = ... |
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.utils import free_port
from colossalai.testing import rerun_if_a... |
import torch
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.nn import (Classifier2p5D, CrossEntropyLoss2p5D, Embedding2p5D, LayerNorm2p5D, Linear2p5D,
PatchEmbedding2p5D, VanillaClassifier, VanillaPatchEmbedding, Voc... |
import torch
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.nn.layer.parallel_2p5d._operation import Matmul_AB_2p5D, Matmul_ABT_2p5D, \
Matmul_ATB_2p5D
from colossalai.utils import get_current_device
from colossalai.utils import print_rank_0
from .comm... |
import torch
TESSERACT_DIM = 2
TESSERACT_DEP = 2
BATCH_SIZE = 8
SEQ_LENGTH = 8
HIDDEN_SIZE = 8
NUM_CLASSES = 8
VOCAB_SIZE = 16
IMG_SIZE = 16
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-5, atol=1e-2) |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from functools import partial
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.core import global_context as gpc
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.utils import fre... |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
DEPTH = 2
BATCH_SIZE = 8
SEQ_LENGTH = 8
HIDDEN_SIZE = 8
NUM_CLASSES = 8
NUM_BLOCKS = 2
IMG_SIZE = 16
VOCAB_SIZE = 16
def check_equal(A, B):
eq = torch.allclose(A, B, rtol=1e-3, atol=1e-2)
assert eq
return eq
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.