repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
psf/black | https://github.com/psf/black/blob/c3cc5a95d4f72e6ccc27ebae23344fce8cc70786/tests/data/gitignore_used_on_multiple_sources/dir1/a.py | tests/data/gitignore_used_on_multiple_sources/dir1/a.py | python | MIT | c3cc5a95d4f72e6ccc27ebae23344fce8cc70786 | 2026-01-04T14:40:23.735327Z | false | |
psf/black | https://github.com/psf/black/blob/c3cc5a95d4f72e6ccc27ebae23344fce8cc70786/action/main.py | action/main.py | import os
import re
import shlex
import shutil
import sys
from pathlib import Path
from subprocess import PIPE, STDOUT, run
ACTION_PATH = Path(os.environ["GITHUB_ACTION_PATH"])
ENV_PATH = ACTION_PATH / ".black-env"
ENV_BIN = ENV_PATH / ("Scripts" if sys.platform == "win32" else "bin")
OPTIONS = os.getenv("INPUT_OPTIONS", default="")
SRC = os.getenv("INPUT_SRC", default="")
JUPYTER = os.getenv("INPUT_JUPYTER") == "true"
BLACK_ARGS = os.getenv("INPUT_BLACK_ARGS", default="")
VERSION = os.getenv("INPUT_VERSION", default="")
USE_PYPROJECT = os.getenv("INPUT_USE_PYPROJECT") == "true"
OUTPUT_FILE = os.getenv("OUTPUT_FILE", default="")
BLACK_VERSION_RE = re.compile(r"^black([^A-Z0-9._-]+.*)$", re.IGNORECASE)
EXTRAS_RE = re.compile(r"\[.*\]")
EXPORT_SUBST_FAIL_RE = re.compile(r"\$Format:.*\$")
def determine_version_specifier() -> str:
"""Determine the version of Black to install.
The version can be specified either via the `with.version` input or via the
pyproject.toml file if `with.use_pyproject` is set to `true`.
"""
if USE_PYPROJECT and VERSION:
print(
"::error::'with.version' and 'with.use_pyproject' inputs are "
"mutually exclusive.",
file=sys.stderr,
flush=True,
)
sys.exit(1)
if USE_PYPROJECT:
return read_version_specifier_from_pyproject()
elif VERSION and VERSION[0] in "0123456789":
return f"=={VERSION}"
else:
return VERSION
def read_version_specifier_from_pyproject() -> str:
if sys.version_info < (3, 11):
print(
"::error::'with.use_pyproject' input requires Python 3.11 or later.",
file=sys.stderr,
flush=True,
)
sys.exit(1)
import tomllib # type: ignore[import-not-found,unreachable]
try:
with Path("pyproject.toml").open("rb") as fp:
pyproject = tomllib.load(fp)
except FileNotFoundError:
print(
"::error::'with.use_pyproject' input requires a pyproject.toml file.",
file=sys.stderr,
flush=True,
)
sys.exit(1)
version = pyproject.get("tool", {}).get("black", {}).get("required-version")
if version is not None:
# Match the two supported usages of `required-version`:
if "." in version:
return f"=={version}"
else:
return f"~={version}.0"
arrays = [
*pyproject.get("dependency-groups", {}).values(),
pyproject.get("project", {}).get("dependencies"),
*pyproject.get("project", {}).get("optional-dependencies", {}).values(),
]
for array in arrays:
version = find_black_version_in_array(array)
if version is not None:
break
if version is None:
print(
"::error::'black' dependency missing from pyproject.toml.",
file=sys.stderr,
flush=True,
)
sys.exit(1)
return version
def find_black_version_in_array(array: object) -> str | None:
if not isinstance(array, list):
return None
try:
for item in array:
# Rudimentary PEP 508 parsing.
item = item.split(";")[0]
item = EXTRAS_RE.sub("", item).strip()
if item == "black":
print(
"::error::Version specifier missing for 'black' dependency in "
"pyproject.toml.",
file=sys.stderr,
flush=True,
)
sys.exit(1)
elif m := BLACK_VERSION_RE.match(item):
return m.group(1).strip()
except TypeError:
pass
return None
run([sys.executable, "-m", "venv", str(ENV_PATH)], check=True)
version_specifier = determine_version_specifier()
if JUPYTER:
extra_deps = "[colorama,jupyter]"
else:
extra_deps = "[colorama]"
if version_specifier:
req = f"black{extra_deps}{version_specifier}"
else:
describe_name = ""
with open(ACTION_PATH / ".git_archival.txt", encoding="utf-8") as fp:
for line in fp:
if line.startswith("describe-name: "):
describe_name = line[len("describe-name: ") :].rstrip()
break
if not describe_name:
print("::error::Failed to detect action version.", file=sys.stderr, flush=True)
sys.exit(1)
# expected format is one of:
# - 23.1.0
# - 23.1.0-51-g448bba7
# - $Format:%(describe:tags=true,match=*[0-9]*)$ (if export-subst fails)
if (
describe_name.count("-") < 2
and EXPORT_SUBST_FAIL_RE.match(describe_name) is None
):
# the action's commit matches a tag exactly, install exact version from PyPI
req = f"black{extra_deps}=={describe_name}"
else:
# the action's commit does not match any tag, install from the local git repo
req = f".{extra_deps}"
print(f"Installing {req}...", flush=True)
pip_proc = run(
[str(ENV_BIN / "python"), "-m", "pip", "install", req],
stdout=PIPE,
stderr=STDOUT,
encoding="utf-8",
cwd=ACTION_PATH,
)
if pip_proc.returncode:
print(pip_proc.stdout)
print("::error::Failed to install Black.", file=sys.stderr, flush=True)
sys.exit(pip_proc.returncode)
base_cmd = [str(ENV_BIN / "black")]
if BLACK_ARGS:
# TODO: remove after a while since this is deprecated in favour of SRC + OPTIONS.
proc = run(
[*base_cmd, *shlex.split(BLACK_ARGS)],
stdout=PIPE,
stderr=STDOUT,
encoding="utf-8",
)
else:
proc = run(
[*base_cmd, *shlex.split(OPTIONS), *shlex.split(SRC)],
stdout=PIPE,
stderr=STDOUT,
encoding="utf-8",
)
shutil.rmtree(ENV_PATH, ignore_errors=True)
# Write output to file if specified
if OUTPUT_FILE:
try:
with open(OUTPUT_FILE, "w", encoding="utf-8") as f:
f.write(proc.stdout)
print(f"Black output written to {OUTPUT_FILE}")
except Exception as e:
print(f"::error::Failed to write output to {OUTPUT_FILE}: {e}", file=sys.stderr)
sys.exit(1)
print(proc.stdout)
sys.exit(proc.returncode)
| python | MIT | c3cc5a95d4f72e6ccc27ebae23344fce8cc70786 | 2026-01-04T14:40:23.735327Z | false |
psf/black | https://github.com/psf/black/blob/c3cc5a95d4f72e6ccc27ebae23344fce8cc70786/profiling/list_huge.py | profiling/list_huge.py | config = some.Structure(
value=set([u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
| python | MIT | c3cc5a95d4f72e6ccc27ebae23344fce8cc70786 | 2026-01-04T14:40:23.735327Z | true |
psf/black | https://github.com/psf/black/blob/c3cc5a95d4f72e6ccc27ebae23344fce8cc70786/profiling/mix_big.py | profiling/mix_big.py | config = some.Structure(
globalMap = {
103310322020340: [100000031211103,101042000320420,100100001202021,112320301100420,110101024402203,112001202000203,112101112010031,102130400200010,100401014300441,103000401422033],
110040120003212: [114413100031332,102101001412002,100210000032130,214000110100040,103031420121210,112114222301010,110133330100020,100001001203011,102210220202130,102200120234012],
244402003102200: [110212012114431,100001140020000,100012101223021,110031301200114,114002020044120,100021004302202,102202200240222,114102010220042,102021301441201,104103102103201],
122013242003223: [100014100100001,102100004130301,111120004100414,101034024000101,100021424301033,102003004003400,103340410140122,100102114100420,111012202111021,100103144302200],
1120010021223330: [110332202020000,104120130021200,112421004012141,111100220022101,100021104201130,102224410201003,110030021010001,101300401002320,112001321113132,101110434020010],
214100003030021: [102122000214201,100242141004122,102024240221040,110320011200230,100011114300334,102303004110022,100110201042101,110134201140010,112101044000202,100040024340013],
1000220132200020: [102231130213210,103214010102022,102000402041014,100043324210140,100023024211011,102404021403141,100010004313001,100003201021001,100122020011232,100121031014040],
1200041022422001: [100021300101110,103010301402112,100011401031000,100020034100101,100122214300222,103134021420204,102042210220100,100103200021130,103204214043011,103020320102002],
1000232101122040: [110011414240022,102202310203222,100042001020203,102002320220202,100010044300003,114130210231020,103301410110110,112114324040000,102124031023100,100204104320231],
110002000000001: [110010000111020,102011041320000,114240012324120,100022010022031,102014140200013,101240110302200,100031204311023,101232001021020,100012121040101,111243002222100],
200211022142211: [102231130213210,103214010102022,100043324210140,102324014112020,102022004012201,100023024211011,111130210020000,102404021403141,100003201021001,100122020011232],
114121130201204: [100040031020241,101100104000222,102324141000002,101010234004042,102041224010012,100411400030100,100002000102100,114340102322021,112033124022002,102120221120200],
104101002123040: [100032320000112,103140011442322,100312120141341,111144020011120,112142204020032,110044031204131,114012010020012,100001420412010,100102110020102,103022000123142],
241101103112120: [110333404433042,112240002043300,110021100132240,103104221443021,101100304000101,100202210410011,114402212310021,102411011111011,103020141414101,100113001040420],
1004020030320422: [100022214203010,110310011234031,110123111300000,100020031041304,102120004000101,102020002000420,110100302010110,100422020030044,101220100303222,100002220411002],
1041010100001110: [111124004101012,114103220220104,102120004001310,102430121110340,101021204000200,112141104020201,101200111020111,110001200422320,103210114021430,114223242332100],
121103011132000: [110021104200001,110441140102024,111140124100100,110310020040401,100002024303323,110030002130201,102103320232241,110302000400221,110111001302000,110210304220000],
1042022101004111: [110021104200001,111140124100100,110031301200114,102103320232241,110302000400221,110111001302000,110210304220000,102033124000111,111000212204213,110010324410130],
102220010222003: [114003212210013,110302012103114,103023101400002,110010120440200,110024140022040,100002100100320,110100034200300,100401201010220,103200024021204,102100204000234],
1442130103121040: [114413100031332,112121020204344,102211030203010,100210000032130,214000110100040,103031420121210,112114222301010,100001001203011,114214404142034,102210220202130],
110204013021024: [110301402000211,103030201402004,110420104221220,103200004030020,100320014310021,102000110221110,110240214432004,102122141142000,103041204010000,114432234113040],
100001440133234: [102111040210242,114110000002430,102010011412143,100424220032043,114013422300032,102021004001004,100131414303044,100201010110320,100000221030033,110101101120302],
114002033000020: [100001121002141,102000040201003,101132131221321,100004440004031,102002011033030,110310300003402,110220022132003,101001000302103,110044112100220,100010120031210],
112123022000131: [100304201000221,100112021020320,103010001422020,102300124023122,114221004100020,100002100124101,102304100210334,100403210010142,112003100241220,102204304000304],
1143001230040001: [101033124003122,100313144100202,103201004024412,114014222302011,103011020110211,110203420111140,111003412201430,101411214000203,110222422042203],
1002403200000231: [100122004300410,102140014001101,102000104012232,112400202020003,102000224000011,114122134140002,102101010202004,114400202310210,100042030201341,112233332034000],
1003123041101031: [110043132100300,102220120203200,100002004201122,110132031134031,1320303202002000,1321200010022312,110130330012140,114102134002330,140403400240000,100003230110201],
242220101103400: [111130112100210,102202200240222,110131044200121,100100004102234,110223332120231,100001024301322,114013044102010,114220114140122,101023021034304,110200210110130],
121030040400021: [110011011212100,101134030011000,103220211421421,110123230124040,131000001020102,112200412033030,110420001000202,100102110402220,110412310030001,114012022302022],
100104011300401: [110320414420310,110100100101041,100420124100211,103010211222002,101040120320230,110013021300010,101043234000412,101220000040002,100422001002123,114010010010002],
1004003211220032: [102400100230000,101100300302102,114120420223002,100002210201212,102302020212101,112410424012010,100030004104210,103030201434002,110232110004010,103021201430000],
223011102020040: [110012420131001,100132040031022,100020310000200,100220004030002,110020002003334,110014000442004,101441204020001,102014201411102,103231320100042,104121201242032],
100200110422111: [103100221440010,114003344011001,100021101000000,102140000212141,101014201030204,101203001010111,102424114020143,100031201030102,101041001003300,114012444020220],
134300111030221: [112100124023402,101020044011003,100002414210010,102010042000041,102242204040102,100021014100124,100130244302034,100122040012210,100014010104404,101020104000001],
114001000330100: [401142101000022,400114001102410,102100000012031,431232000323104,1012100023110102,232023302143031,120200122422404,1040100003200241,111414004440203,1020220210340010],
211222012120321: [112000112000031,100121214300000,102123214020000,102022124012030,114002320022312],
1122000200402140: [102401141113031,103042420143020,102304314110202,110210400002012,113032012204012,112310002020302,100204311000400,100403012200201,112002111121013,114211100001224],
332000024200013: [102120010220042,110103312142034,102210210312302,101120100320100,114140014100000,102110004002301,100130020001030,112022100213011,100101231202322,111210020001013],
120041442412123: [102133011413110,111001200041102,101223020300040,102034324000220,100210000032400,101230020303000,111340130010314,110200422121211,110214220002020,112220414010040],
1011011320300100: [100102024301030,100111134302041,100112234000041,110004244214343,101002101020003,102214021120301,114221224100022,101330210310300,112003021111000,102012141134211],
220020021000101: [100301001000202,104101112102403,100023121223031,114201432320014,120000002023011,102133120200123,101014020301201,102000031130401,101010111002141,114123124143310],
1024011342000021: [102001001130100,111204203012002,124020002020003,122222120003214,133332002141010,144000013213001,124010030100142,112310202021010,110014020020011,100140044020011],
1131011002310011: [102230040200031,110122001010214,114043140010022,102101204000010,110022300420031,100100401040001,114230230000123,100222024320003,103323001400013,114013012300240],
1142024120224002: [102203344011410,100021324100000,102103430210003,100012014300120,102414014022212,102012220241003,101004411032102,101430211012120,100204021000012,103242044020102],
1100004410231120: [110013202003320,101031241000010,102120231343224,110030332100203,100314114322101,114404232310120,103100034001310,114002202210331,100031301020100,110111032140220],
1440220030001122: [114010330030011,103021220104200,101010020320000,112000210211020,100010324210003,101000000343443,110002400011111,100402132200000,111100300024000,103144040104204],
121414301110004: [110144240011020,110413401240204,112000244001222,114441114123013,103220211421421,114000012334102,101000014012442,100312401002102,111022210021013,103110001420121],
130004004220120: [111240340004003,102021024000010,111101222244030,112011012004300,102300010242330,102000401120420,102004012043122,114011102210402,100120001014040,114300100000041],
1013303000401020: [101000024004442,100002100000100,110130030020000],
220041302111231: [100002014200331,100034244210020,102012004000003,100411000030110,102041201121230,103011014042120,100000030120242,102110400210023,101012204221200,111212422222300],
1000031031200013: [101302134233230,100000130010033,101030404212000,114102224000201,100022021041122,100020001042002,100013011020311,100120041020012,102012204000242,114143024003322],
1000303301100221: [111333104124202,101000304014100,100040011023101,110301030010140,104100002101431,101123232010002,114421242204443,110100222001100,103102000121300,110010331230210],
1410211342331412: [111002100000102,114021010040140,114222302321101,102101024002121,110014024202014,110220130100010,100020011030330,102404221402210,110203022032320,101222014230110],
1411020122202044: [100141401021003,102010000231120,101000400320211,101001010300214,103010020142023,110132002212113,110010100040100,102101002002002,111020302232033,110224030114311],
101010042001102: [101221020040220,100103104302044,101041030320401,102141212000200,101203121020430,102020004000014,100000211023014,114144014122041,100201111002224,101410304041000],
204204011102020: [100212030414010,101400004022210,102031021441200,101200020303202,102301324112020,111340300010010,102013224003020,103013020123142,102240344041020,102140202001100],
240000420312002: [110002004410202,102103114000024,102240221000001,112041002002124,114000024101102,140343100002103,400200234320200,100020124204330,100001424102041,100100021040230],
1030000001422430: [102343230224321,103211200100400,102112231020231,100022004300020,102320000240102,100042144200000,102030304001101,100020420121003,103020004011414,100001104301200],
1104234221030010: [110000322130121,101023001002020,111300222202234,100200001210021,103204230111030,104130020011020,101114422122000,102001314013400,114110414140400,111201100011141],
121341130003000: [111102004100101,102021100220101,114000040010421,112042110220004,100000214240410,100433201004014,102301102004413,102003000220111,102010100204023,102414040230400],
100101220022002: [100010024102110,101041200320012,114303400002201,110204211000331,112121014003422,114430102311021,100240444100022,103004411424400,111014002140322],
1023303420422001: [100043104240004,110002034200042,100001240100033,114100304002030,102100001340122,112030010234104,103414101212410,100123021223100,112302011102312,101020030343002],
101114102124321: [110403244220202,103113014002100,110120400402324,100402340010310,112010020211000,100102200000211,103030201240100,102300210210222,114100332340213,111031030024010],
1422302020100030: [114020000030002,114031000022030,100201211003004,102014002000401,103103241421322,114121012340044,102000400240203,102104304023201,103310300140324,100002224244002],
1121202400231120: [101211201202134,103120104030100,100004000100101,102020030220200,110031404423144,110003434410403,111110000014401,100000204312321,101004000304012,110300121220201],
1042001221000013: [114032104100141,101213114040141,102210101002412,111140100011101,110122421241103,112001144002010,101013030304101,100012011022000,102000004000013,102021241324040],
1102433033042110: [110104112010103,111102004100101,100122004300410,102202414041044,102140014001101,102000104012232,102021100220101,114443330001442,100230120001003,114000040010421],
100103201011144: [110102401201204,102400100230000,100212030414010,101200020303202,114120420223002,102013224003020,100002210201212,103013020123142,102302020212101,114303340000001],
240343043023011: [110120144200000,114022412330004,101200221022044,110241112020204,100002004104004,102100224000210,102310140240012,100014204201000,102103321411004,100400001001300],
1020301032424304: [101302134233230,100000130010033,101030404212000,114102224000201,100431000032001,100020001042002,100013011020311,102103331400000,100120041020012,100020001041231],
114200022220040: [100014100100001,102100004130301,111120004100414,101034024000101,101243024011000,103340410140122,100010021221221,111012202111021,100103144302200,101414100300221],
142041243300010: [102102002002242,101130104022002,101230331020012,100004244210201,102420124024204,122312222240401,102041014011340,110200130004300,100140101012000,101400000302141],
1002203102111022: [100434000032200,110004020020022,114032412303041,112000301122111,102020130212402,100010001020000,200000020021022,114321212322303,112302112002211,114202002333330],
1110102121041413: [101012310301211,103112200123144,114242304004011,102302200241241,110001420021023,110201040003402,112301421130130,110020012100302,114412202320010,110021030030202],
114020200220012: [102010114022011,103340041403223,114002200000421,101020321002002,114302210010001,114030004104220,100104004301000,102443211401140,102301041000014,100001111201214],
202011212123200: [102203301123001,103210400110122,101112020300011,114104302341000,100400201010000,102244400201444,101010001000121,102304000220032,102002131132000,100000031024222],
201032103003132: [110212041114210,113210300100002,112404024011000,102131034000220,111212124130140,101002014013010,103402020120010,112110100230023,112003044002004,103020200102200],
120222032001012: [110121100004131,111400214442024,111122200021102,101041100300001,102140011440001,101011220342010,110200004221020,103114211441300,110000222010004,100114000101041],
1100013040001020: [100003031030034,101001044211204,102100010233022,102120400212001,114313022302101,103001324000110,100002014200021,102300401001010,103212320212011,111120034400000],
1001031021300022: [102000020220000,101020104000040,114412142320040,100003044100101,114012402332300,102220211122102,101010110302010,100032121020101,100013224313301,100012244240030],
124000010122242: [100032320000112,103140011442322,100312120141341,111144020011120,112142204020032,110044031204131,114012010020012,100001420412010,100102110020102,102002204001140],
1121211020204132: [110020301320101,102014001130210,110401041140300,110002302201420,102202001120002,110200010003201,102421004041011,102240040202421,101001110300111,100130004300103],
121320202004301: [111014240041102,101100004200110,112021221130424,112200041103002,110400040402041,112001011112202,100112004304314,100232041001122,100223030001010,100104231000300],
1024242230240244: [110322104424202,101202421002031,102102220312102,103004204021310,112220102043102,110012310020200,102030130241300,103030120100220,100232224340041,112400002011010],
1002100010130110: [101400001011042,100002011020211,100100110010010,111110002213142,100002131030310,111102214100400,103220201402330,102321000221140,103113124030200,102110300311400],
200010402222410: [101210320301302,102100221340112,100114104101001,114002244103320,101023221000031,101400014040400,102012034000402,114221004002212,102100122002001,101000011021414],
112300020202111: [103141411420221,122102400312000,110002100110002,1340024302404122,100002001043010,113110330121030,410020013004430,1002300040412102,1210020204140003,123211320000102],
100102203210034: [102023021100022,111200302222011,112040241120204,111000022200000,100010011232324,110220030000133,110000330430311,101211221014003,103111230120100,102221220200021],
1021001032000012: [102020010203200,100011144312020,102011204001010,102001410221023,110130201302200,103041021430301,101100440320434,114000402211404,101000100302003,110000030430422],
1031102424140120: [100011010414200,111121102240240,102002121101110,102403100202003,110000100041101,100400000010033,100101211001320,101141020321000,103224101400400,102000002043020],
102001021434201: [110131122012210,114010200040441,110032014420232,100000344100100,111304022202211,102302011002003,102011021121200,100012441030002,110222042022111,103131004002200],
220100132400104: [1010400230221020,111320012221132,102302144110440,114140004123122,102143202000400,111020002202333,101321311004010,102110241342210,114122302311011,100002320411400],
1110121023020002: [100022041001002,111240340004003,111101222244030,114032424122040,112011012004300,101021401003220,100301020420101,102002202000000,100022024100041,102010410213111],
1042030001044121: [101230040301234,110033032100000,102303014024221,100100224304110,100400432200321,100020200012311,114140144122230,101400201102014,100000020004004,101040040303102],
1100100224104011: [111130112100210,102202200240222,110131044200121,100100004102234,101023014004000,110223332120231,100001024301322,114013044102010,114220114140122,110200210110130],
212002042300000: [100030220100110,110011202100142,112400102013231,114012002212100,100002114314022,114203110001031,112030122000211,111004012200210,100413131013301,110001102103432],
221001240043040: [100420021010013,112101302302202,102212011120200,102344111104233,100432104320110,114021014020000,102121321340000,114200214002024,100111221011100,100023104101340],
1040021200420124: [100042111220111,112220100202000,100042041220010,100403211010100,103240301400123,114102210214041,100021300103041,101010204000242,111334004121414,114101000213410],
1101110030041022: [112013300240132,114303044030300,102340044111034,100110021021010,120003030010112,114200212320001,101020240313001,102330220242002,121400042034110,100033201030040],
1030000302042203: [102110231441040,100113214302002,110000002133140,101233341022410,103021000144000,100100124100031,100040204240000,114100012313002,114440440000034,120240000020201],
1403121230220020: [110200400000020,100324140141402,100002204340200,110000410001034,111140000000000,103102224034002,110022002120332,111110230014001,110020041310021,110001001200323],
203110414022010: [111240220002100,110030344201413,101001014001021,100103224301304,114000100020200,102102100231000,114040202330200,100410331002222,100032204311200,102202111110404],
133032004200001: [110314100012011,111020422203010,103121124002320,102240300202212,114140422203401,110210420000001,102210331401030,101102104000100,102031010200122,111104202112001],
1120000102422014: [102140211342044,110020301220411,110100200004404,110131142010001,101032441003040,112004400210201,110004102230402,112021004020000,101012004001204,110420001344420],
1000030222010034: [100320031001240,102104320220212,102100200232044,102434104021201,102300004022020,103003301240122,100210024101140,112220011101022,101000130304001,103020024043010],
300210002300114: [112100021100243,101123001100112,112003002001002,111000200012220,102120100230203,100111124101112,112001010233001,103313021401232,110430034224001,114110200221240],
1421020010140012: [110010002014100,112030001120220,114231034004040,100100301202103,110301021331320,112001100232420,114020244023001,110342014420100,110200402220240,100114200402104],
300310000040102: [110301222101110,102023301102400,102012204010044,104120110010402,111010214402000,110401440001204,110204310112224,110303104423010,110340022023321,114010200012200],
101010002031100: [110324020012330,100001134302201,102100011402012,100020221031000,102032220220223,114042004020130,114030400021223,102022021100102,103301304031211,102000004013440],
1032221241200404: [104120011242011,110020002104100,102000301101102,111122200021102,114032234033303,100000020201032,112011204004112,100003000002034,110102022000200,102200034040443],
1131211404001020: [110200100002031,101420310301020,100020330100100,1002443410030003,111103224103001,100011231042030,102100141413100,102110021342213,112002310211210,100010031020102],
132013120010104: [111300232202103,102301401002404,110122120100022,102012004010000,102400100220000,102001104012021,100242444300104,102002104014323,100401241010122,100010010102122],
130000310202012: [114200044114004,102202010204130,100220101204311,100002000102100,102100430233212,103003210141014,110421222011222,100201000410010,112110202014400,100400030013200],
1111023010042112: [100000201220420,114000030021040,101002011020001,113141112413242,102021111130104,110401030101001,102201214011331,100013011043231,102031114002042,112302012020342],
121010001111310: [101400220300033,100012021032040,102001130224032,100012021040320,102130224023220,102100401143124,100010001030140,102110101410144,114142204121324,112102421102022],
1113001011001223: [110202400003213,114112014140000,110001012101334,114200124000134,100001114302424,101222134011002,100003011231420,103230231200010,100431031014023,100120120401041],
142034000000112: [100310304310342,100010041031020,100042400001023,112402000200101,100020201221000,101010020301243,102030001103000,100040021031201,111010130022401,112140214000020],
1012401424032042: [102401121113302,111201210000103,100101101010100,101320040302031,114010102300010,102321044112101,102000014002000,101034014011332,101203011023020,100021231020200],
1103400010414230: [102010114022011,103340041403223,114302210010001,114030004104220,102443211401140,100000221020014,102003224000022,100010031222001,103001114001210,100001111201214],
1400000301203010: [114122134140002,103012011434022,101032010342004,102322330243202,103101411424112,100100114103001,102134131144121,114030222300123,102102301021403,114103402201140],
214024214401412: [102220242000201,103010021410203,101000304214111,102120040200200,103202220100114,111100132103413,112203220204012,100113040003021,100010204200010,102410134022300],
1221011020200210: [110041030411301,110001041213031,101110044200100,102411000200000,110414300030001,100030020011022,114100122342002,111202012222402,110033041200104,102000221442214],
1000120020110040: [110200411140130,110114120120100,103124210100201,111014200024034,102301002004242,112020014000340,110021404214210,101003014210212,100130220400031,100210024300001],
201001342101001: [102001444000222,101400134043222,100010400013202,114340312301134,114310242320310,102110021401100,100113201011032,102230000204000,103102044031002,100011421002120],
1000000133120020: [110230000000002,110220301202400,101003140322000,110401001002400,110210024220020,114201142324042,100401424300314,102213401111223,100040120000031,102002104011244],
101400102142302: [100004211221210,110201001212001,110200134240420,110403221000002,101201304200320,114022042302201,112002320212232,100002210413202,103140021401211,102404042001400],
100203202010202: [101000114000402,100043100123004,102014014000100,112212042024100,112410322024122,102300110221100,110002332102000,112001111111212,100020104300430,112041020221402],
1011401013024410: [114444244144110,114201004110120,114033234032101,100412114304000,103001020120202,101004411032102,101000020302400,102021014012210,102244221114031,112400000200011],
131012202321232: [110041030411301,110001041213031,103301210142043,100002024200142,101110044200100,102411000200000,110414300030001,100030020011022,100400014321101,114100122342002],
100303010210441: [102304034110400,100043010120010,102101124001310,110030110122402,102430300201034,100004121030301,102143432000102,101031001004201,114001110033210,110120011014412],
1111011030001020: [100032041021210,110001041213031,101110044200100,110414300030001,100030020011022,111202012222402,102000221442214,102040041320010,102402104022000,103203401400132],
1420144022031000: [101240220300022,102200130202201,100002010014040,100022000410041,103020000102011,103001031420203,102301324112020,101040421033204,103130011441212,102240344041020],
104012440012200: [103000001432331,110131110401120,103000004011102,100120041014034,112111110422120,102130300200301,100121100002240,110224032100120,101104220322100,100001001201301],
120311002000121: [101220014012231,100104024303320,114422004110130,112210132031001,102010310213001,103244320114100,103422430124013,100121424231222],
1002120002022122: [101201220300012,101232104042101,101220404012304,101000304000400,102020204010341,112020100202034,100140410020232,101221004040204,100412414304143,102110404001401],
1040331220001000: [102041310200042,110301030010140,104100002101431,110221000111302,101010001002000,110022140120012,111330020002213,102011440201423,114011212320331,100002204310140],
142030323441430: [110020120023302,100043120003103,110002000410040,100433001014104,100000004102121,101022411030002,102000021442024,100030314100004,100404130020000,110400302114103],
1020000001100400: [110001202000032,100024204211200,114200004110244,114230412332011,101210101010023,114422114140221,100001211042341,114032104123010,114303122321110,114330204034200],
1001130242231402: [100042320001030,110024001010110,100100014300230,102043104010014,100001004300242,111120044100110,102200201110443,114240314000100,101002411030000,102002324011002],
1014202010302102: [114200212322001,100401210020002,112210322040121,100402042200000,100010111022102,102000224000000,100002104302110,101022440300020,101402010301000,100000220111021],
144100013100013: [114020200020222,114330002323002,110213022100021,102202000240014,114424424144210,101224321014333,102100121342000,102204321003224,111222002224302,114100104030132],
101213002101214: [114014104033302,102120200221403,110323110104120,102110314001121,101020104000300,114003402300102,111202104100014,112402102021111,103344224020241,101002114000424],
1012442110402240: [104310001300102,114021132202032,112012004004324,103001314001021,114122220031004,103040024041111,101004004213014,111124002211203,112034420220403,101211231023001],
1002210200000101: [101232314010241,100301300121100,110042400130000,103020124010320,100021300101201,110230022122031,101000444214021,112102112301130,100414004300100,112102232300222],
102400210400022: [110010334200041,100001024102001,321114010100340,102322101000200,102110100322014,110413131000201,100000100000432,101020021114310,114200104110001,100010314202003],
1043003020000104: [114101024120020,102004011440212,100000001021013,100011204300012,100101234330123,110024000432310,102012101443110,110032220022103,102102104000102,101210031201114],
1243002022101103: [110203120112002,110000002100100,102221410200012,110213401202110,102300104113403,102213224110010,102300004112300,102420024020120,100040020001022,102141140210200],
1421204200010112: [100140101044224,101120004023340,103040031403031,101030014001004,102012421412000,100010044300131,111010130002222,110200300000010,110144102001321,100020304102100],
1003013101233010: [102400111402142,100040240004024,100010000002400,141030301403400,100000024100211,103224200113121,110140101300004,120020211120020,110230010010142,101002041000124],
101432210141203: [100310120141002,102324014112020,100301421002002,102404021403141,103022021431010,103200020111014,100010004313001,103104200210410,100002200410030,100121031014040],
1104003002031130: [102420020200202,1023413000203201],
112011130023123: [101020014011102],
1102310021242142: [100022041001002,111240340004003,102021024000010,111101222244030,114032424122040,112011012004300,101021401003220,102000401120420,102010410213111,102004012043122],
1002120410241114: [100222000144103,101240000310400,114022002211402,102311020240221,110211000004002,114101402200030,100000034200300,100010124300001,114200022322000,103133001441320],
1022312211002000: [102123140310122,110101214402023,110400014224001,101410030330122,111000144410222,100040001022000,100222404342030,112204022020043,112320100202120,110222241300101],
1402011002013421: [101043042401310,104001222102120,111412020003222,101113012101010,111212022200012,110023132100402,102022102010211,110231202040203,112112410232001,100021220002020],
1010121032032211: [100002024340032,110000011220401,102112340210300,114100214000010,100000104201200,103000304010211,100010000002132,114100042311312,101000014001020,103014141222013],
1020030002101022: [102040431121110,100400002201113,114111100021420,112024212301420,100300310140241,100240134323001,102111024000402,102001011100100,114402104142102,103312241402001],
1010240311230422: [110200400000020,110203131211112,102301004110004,110021100023210,110402420402001,103122000100403,110000112100402,100002204340200,114103412201420,111140000000000],
1401004204000003: [102102011412000,114204030210041,110033101044200,114440120000030,101002004014401,100400224323100,102112112004300,102010014001001,102000001440004,110321100402021],
244331203200002: [101221422120021,100001000002210,100400320033201,100004130002301,101210300302302,102020131413423,110020204200342,100000220011133,102144012001113,114431110001011],
1130301120301001: [100401140034220,114002002300222,101242004043300,101013111000000,102102131340404,102420041111041,112004312310301,100040121030000,102000404003020,103111020223101],
1002000403000204: [103343014022202,102003104011000,102220330202001,100022200120040,103002020110334,101021134012211,102140200310400,110010002103002,100220034341000,110200002121242],
221400100111330: [102031100200300,114102134122020,110024102010002,103010034004303,102023032000000,102101440201242,103000204003200,100021044343014,111433000002144,110001002002424],
112133300441020: [110120101220000,101040014002010,112001001133213,102000144014000,124204212002003,102311101002242,102110210210004,100000100412410,100012441220104,111032320020122],
133313202104310: [110013334202122,103330104030103,111200002220003,112043421121002,102212030201100,100111401011110,102010341440030,100001120100002,114001000024011,101021024012041],
100143040212102: [100422314300200,101210241204043,100123011004113,100420001002101,101134224001001,101021204000200,102321234111310,101200111020111,102000024000002,111122222240232],
1432344120130401: [101001040340004,101013004002011,112022334022101,101011324004222,101410401010030,103210241422022,103240001403000,102202204040220,102121010212140,110020124200230],
1100030203241342: [100440104303400,102122000214201,102220241100202,102024240221040,110320011200230,100342024100204,101422034042201,100011114300334,102300001002140,110040122000002],
1011102003042024: [104110021244203,102224101113104,102103022002001,102102224000012,102100102001032,100142431013000,101000031001011,101100001102230,101010130321200,114004322330012],
300210041431432: [103114001441122,100422004320024,114211110023303,111000302230100,114044144020400,103001101422012,4000113102314022,100022204104123,1400100000042343,1002411322210040],
241241343121111: [102021001132040,100032201024101,102301004110004,110402420402001,110000112100402,110021021210300,110404000000012,102000101120123,110221022101104,110023100402320],
1100142404020120: [101210144014403,102111420233012,110000020022324,100020231201012,102210424042300,110220032120121,102130240221020,100101101020002,114442102312001,100030201024402],
110001043032202: [114240012324120,101022011003022,101240101000020,100003101003001,102313100240202,102000014002101,102110422004402,102100411340400,101300414221100,114020210020424],
1100103200140000: [110013202003320,101031241000010,102120231343224,110030332100203,114404232310120,103100034001310,114002202210331,100031301020100,110111032140220,100020010200022],
212420003003124: [103000001432331,110131110401120,112111110422120,100121100002240,110011010001020,100001004340202,100220100432201,101000140200002,110140001201040,110001041310202],
| python | MIT | c3cc5a95d4f72e6ccc27ebae23344fce8cc70786 | 2026-01-04T14:40:23.735327Z | true |
psf/black | https://github.com/psf/black/blob/c3cc5a95d4f72e6ccc27ebae23344fce8cc70786/profiling/mix_huge.py | profiling/mix_huge.py | config = some.Structure(
globalMap = {
103310322020340: [100000031211103,101042000320420,100100001202021,112320301100420,110101024402203,112001202000203,112101112010031,102130400200010,100401014300441,103000401422033],
110040120003212: [114413100031332,102101001412002,100210000032130,214000110100040,103031420121210,112114222301010,110133330100020,100001001203011,102210220202130,102200120234012],
244402003102200: [110212012114431,100001140020000,100012101223021,110031301200114,114002020044120,100021004302202,102202200240222,114102010220042,102021301441201,104103102103201],
122013242003223: [100014100100001,102100004130301,111120004100414,101034024000101,100021424301033,102003004003400,103340410140122,100102114100420,111012202111021,100103144302200],
1120010021223330: [110332202020000,104120130021200,112421004012141,111100220022101,100021104201130,102224410201003,110030021010001,101300401002320,112001321113132,101110434020010],
214100003030021: [102122000214201,100242141004122,102024240221040,110320011200230,100011114300334,102303004110022,100110201042101,110134201140010,112101044000202,100040024340013],
1000220132200020: [102231130213210,103214010102022,102000402041014,100043324210140,100023024211011,102404021403141,100010004313001,100003201021001,100122020011232,100121031014040],
1200041022422001: [100021300101110,103010301402112,100011401031000,100020034100101,100122214300222,103134021420204,102042210220100,100103200021130,103204214043011,103020320102002],
1000232101122040: [110011414240022,102202310203222,100042001020203,102002320220202,100010044300003,114130210231020,103301410110110,112114324040000,102124031023100,100204104320231],
110002000000001: [110010000111020,102011041320000,114240012324120,100022010022031,102014140200013,101240110302200,100031204311023,101232001021020,100012121040101,111243002222100],
200211022142211: [102231130213210,103214010102022,100043324210140,102324014112020,102022004012201,100023024211011,111130210020000,102404021403141,100003201021001,100122020011232],
114121130201204: [100040031020241,101100104000222,102324141000002,101010234004042,102041224010012,100411400030100,100002000102100,114340102322021,112033124022002,102120221120200],
104101002123040: [100032320000112,103140011442322,100312120141341,111144020011120,112142204020032,110044031204131,114012010020012,100001420412010,100102110020102,103022000123142],
241101103112120: [110333404433042,112240002043300,110021100132240,103104221443021,101100304000101,100202210410011,114402212310021,102411011111011,103020141414101,100113001040420],
1004020030320422: [100022214203010,110310011234031,110123111300000,100020031041304,102120004000101,102020002000420,110100302010110,100422020030044,101220100303222,100002220411002],
1041010100001110: [111124004101012,114103220220104,102120004001310,102430121110340,101021204000200,112141104020201,101200111020111,110001200422320,103210114021430,114223242332100],
121103011132000: [110021104200001,110441140102024,111140124100100,110310020040401,100002024303323,110030002130201,102103320232241,110302000400221,110111001302000,110210304220000],
1042022101004111: [110021104200001,111140124100100,110031301200114,102103320232241,110302000400221,110111001302000,110210304220000,102033124000111,111000212204213,110010324410130],
102220010222003: [114003212210013,110302012103114,103023101400002,110010120440200,110024140022040,100002100100320,110100034200300,100401201010220,103200024021204,102100204000234],
1442130103121040: [114413100031332,112121020204344,102211030203010,100210000032130,214000110100040,103031420121210,112114222301010,100001001203011,114214404142034,102210220202130],
110204013021024: [110301402000211,103030201402004,110420104221220,103200004030020,100320014310021,102000110221110,110240214432004,102122141142000,103041204010000,114432234113040],
100001440133234: [102111040210242,114110000002430,102010011412143,100424220032043,114013422300032,102021004001004,100131414303044,100201010110320,100000221030033,110101101120302],
114002033000020: [100001121002141,102000040201003,101132131221321,100004440004031,102002011033030,110310300003402,110220022132003,101001000302103,110044112100220,100010120031210],
112123022000131: [100304201000221,100112021020320,103010001422020,102300124023122,114221004100020,100002100124101,102304100210334,100403210010142,112003100241220,102204304000304],
1143001230040001: [101033124003122,100313144100202,103201004024412,114014222302011,103011020110211,110203420111140,111003412201430,101411214000203,110222422042203],
1002403200000231: [100122004300410,102140014001101,102000104012232,112400202020003,102000224000011,114122134140002,102101010202004,114400202310210,100042030201341,112233332034000],
1003123041101031: [110043132100300,102220120203200,100002004201122,110132031134031,1320303202002000,1321200010022312,110130330012140,114102134002330,140403400240000,100003230110201],
242220101103400: [111130112100210,102202200240222,110131044200121,100100004102234,110223332120231,100001024301322,114013044102010,114220114140122,101023021034304,110200210110130],
121030040400021: [110011011212100,101134030011000,103220211421421,110123230124040,131000001020102,112200412033030,110420001000202,100102110402220,110412310030001,114012022302022],
100104011300401: [110320414420310,110100100101041,100420124100211,103010211222002,101040120320230,110013021300010,101043234000412,101220000040002,100422001002123,114010010010002],
1004003211220032: [102400100230000,101100300302102,114120420223002,100002210201212,102302020212101,112410424012010,100030004104210,103030201434002,110232110004010,103021201430000],
223011102020040: [110012420131001,100132040031022,100020310000200,100220004030002,110020002003334,110014000442004,101441204020001,102014201411102,103231320100042,104121201242032],
100200110422111: [103100221440010,114003344011001,100021101000000,102140000212141,101014201030204,101203001010111,102424114020143,100031201030102,101041001003300,114012444020220],
134300111030221: [112100124023402,101020044011003,100002414210010,102010042000041,102242204040102,100021014100124,100130244302034,100122040012210,100014010104404,101020104000001],
114001000330100: [401142101000022,400114001102410,102100000012031,431232000323104,1012100023110102,232023302143031,120200122422404,1040100003200241,111414004440203,1020220210340010],
211222012120321: [112000112000031,100121214300000,102123214020000,102022124012030,114002320022312],
1122000200402140: [102401141113031,103042420143020,102304314110202,110210400002012,113032012204012,112310002020302,100204311000400,100403012200201,112002111121013,114211100001224],
332000024200013: [102120010220042,110103312142034,102210210312302,101120100320100,114140014100000,102110004002301,100130020001030,112022100213011,100101231202322,111210020001013],
120041442412123: [102133011413110,111001200041102,101223020300040,102034324000220,100210000032400,101230020303000,111340130010314,110200422121211,110214220002020,112220414010040],
1011011320300100: [100102024301030,100111134302041,100112234000041,110004244214343,101002101020003,102214021120301,114221224100022,101330210310300,112003021111000,102012141134211],
220020021000101: [100301001000202,104101112102403,100023121223031,114201432320014,120000002023011,102133120200123,101014020301201,102000031130401,101010111002141,114123124143310],
1024011342000021: [102001001130100,111204203012002,124020002020003,122222120003214,133332002141010,144000013213001,124010030100142,112310202021010,110014020020011,100140044020011],
1131011002310011: [102230040200031,110122001010214,114043140010022,102101204000010,110022300420031,100100401040001,114230230000123,100222024320003,103323001400013,114013012300240],
1142024120224002: [102203344011410,100021324100000,102103430210003,100012014300120,102414014022212,102012220241003,101004411032102,101430211012120,100204021000012,103242044020102],
1100004410231120: [110013202003320,101031241000010,102120231343224,110030332100203,100314114322101,114404232310120,103100034001310,114002202210331,100031301020100,110111032140220],
1440220030001122: [114010330030011,103021220104200,101010020320000,112000210211020,100010324210003,101000000343443,110002400011111,100402132200000,111100300024000,103144040104204],
121414301110004: [110144240011020,110413401240204,112000244001222,114441114123013,103220211421421,114000012334102,101000014012442,100312401002102,111022210021013,103110001420121],
130004004220120: [111240340004003,102021024000010,111101222244030,112011012004300,102300010242330,102000401120420,102004012043122,114011102210402,100120001014040,114300100000041],
1013303000401020: [101000024004442,100002100000100,110130030020000],
220041302111231: [100002014200331,100034244210020,102012004000003,100411000030110,102041201121230,103011014042120,100000030120242,102110400210023,101012204221200,111212422222300],
1000031031200013: [101302134233230,100000130010033,101030404212000,114102224000201,100022021041122,100020001042002,100013011020311,100120041020012,102012204000242,114143024003322],
1000303301100221: [111333104124202,101000304014100,100040011023101,110301030010140,104100002101431,101123232010002,114421242204443,110100222001100,103102000121300,110010331230210],
1410211342331412: [111002100000102,114021010040140,114222302321101,102101024002121,110014024202014,110220130100010,100020011030330,102404221402210,110203022032320,101222014230110],
1411020122202044: [100141401021003,102010000231120,101000400320211,101001010300214,103010020142023,110132002212113,110010100040100,102101002002002,111020302232033,110224030114311],
101010042001102: [101221020040220,100103104302044,101041030320401,102141212000200,101203121020430,102020004000014,100000211023014,114144014122041,100201111002224,101410304041000],
204204011102020: [100212030414010,101400004022210,102031021441200,101200020303202,102301324112020,111340300010010,102013224003020,103013020123142,102240344041020,102140202001100],
240000420312002: [110002004410202,102103114000024,102240221000001,112041002002124,114000024101102,140343100002103,400200234320200,100020124204330,100001424102041,100100021040230],
1030000001422430: [102343230224321,103211200100400,102112231020231,100022004300020,102320000240102,100042144200000,102030304001101,100020420121003,103020004011414,100001104301200],
1104234221030010: [110000322130121,101023001002020,111300222202234,100200001210021,103204230111030,104130020011020,101114422122000,102001314013400,114110414140400,111201100011141],
121341130003000: [111102004100101,102021100220101,114000040010421,112042110220004,100000214240410,100433201004014,102301102004413,102003000220111,102010100204023,102414040230400],
100101220022002: [100010024102110,101041200320012,114303400002201,110204211000331,112121014003422,114430102311021,100240444100022,103004411424400,111014002140322],
1023303420422001: [100043104240004,110002034200042,100001240100033,114100304002030,102100001340122,112030010234104,103414101212410,100123021223100,112302011102312,101020030343002],
101114102124321: [110403244220202,103113014002100,110120400402324,100402340010310,112010020211000,100102200000211,103030201240100,102300210210222,114100332340213,111031030024010],
1422302020100030: [114020000030002,114031000022030,100201211003004,102014002000401,103103241421322,114121012340044,102000400240203,102104304023201,103310300140324,100002224244002],
1121202400231120: [101211201202134,103120104030100,100004000100101,102020030220200,110031404423144,110003434410403,111110000014401,100000204312321,101004000304012,110300121220201],
1042001221000013: [114032104100141,101213114040141,102210101002412,111140100011101,110122421241103,112001144002010,101013030304101,100012011022000,102000004000013,102021241324040],
1102433033042110: [110104112010103,111102004100101,100122004300410,102202414041044,102140014001101,102000104012232,102021100220101,114443330001442,100230120001003,114000040010421],
100103201011144: [110102401201204,102400100230000,100212030414010,101200020303202,114120420223002,102013224003020,100002210201212,103013020123142,102302020212101,114303340000001],
240343043023011: [110120144200000,114022412330004,101200221022044,110241112020204,100002004104004,102100224000210,102310140240012,100014204201000,102103321411004,100400001001300],
1020301032424304: [101302134233230,100000130010033,101030404212000,114102224000201,100431000032001,100020001042002,100013011020311,102103331400000,100120041020012,100020001041231],
114200022220040: [100014100100001,102100004130301,111120004100414,101034024000101,101243024011000,103340410140122,100010021221221,111012202111021,100103144302200,101414100300221],
142041243300010: [102102002002242,101130104022002,101230331020012,100004244210201,102420124024204,122312222240401,102041014011340,110200130004300,100140101012000,101400000302141],
1002203102111022: [100434000032200,110004020020022,114032412303041,112000301122111,102020130212402,100010001020000,200000020021022,114321212322303,112302112002211,114202002333330],
1110102121041413: [101012310301211,103112200123144,114242304004011,102302200241241,110001420021023,110201040003402,112301421130130,110020012100302,114412202320010,110021030030202],
114020200220012: [102010114022011,103340041403223,114002200000421,101020321002002,114302210010001,114030004104220,100104004301000,102443211401140,102301041000014,100001111201214],
202011212123200: [102203301123001,103210400110122,101112020300011,114104302341000,100400201010000,102244400201444,101010001000121,102304000220032,102002131132000,100000031024222],
201032103003132: [110212041114210,113210300100002,112404024011000,102131034000220,111212124130140,101002014013010,103402020120010,112110100230023,112003044002004,103020200102200],
120222032001012: [110121100004131,111400214442024,111122200021102,101041100300001,102140011440001,101011220342010,110200004221020,103114211441300,110000222010004,100114000101041],
1100013040001020: [100003031030034,101001044211204,102100010233022,102120400212001,114313022302101,103001324000110,100002014200021,102300401001010,103212320212011,111120034400000],
1001031021300022: [102000020220000,101020104000040,114412142320040,100003044100101,114012402332300,102220211122102,101010110302010,100032121020101,100013224313301,100012244240030],
124000010122242: [100032320000112,103140011442322,100312120141341,111144020011120,112142204020032,110044031204131,114012010020012,100001420412010,100102110020102,102002204001140],
1121211020204132: [110020301320101,102014001130210,110401041140300,110002302201420,102202001120002,110200010003201,102421004041011,102240040202421,101001110300111,100130004300103],
121320202004301: [111014240041102,101100004200110,112021221130424,112200041103002,110400040402041,112001011112202,100112004304314,100232041001122,100223030001010,100104231000300],
1024242230240244: [110322104424202,101202421002031,102102220312102,103004204021310,112220102043102,110012310020200,102030130241300,103030120100220,100232224340041,112400002011010],
1002100010130110: [101400001011042,100002011020211,100100110010010,111110002213142,100002131030310,111102214100400,103220201402330,102321000221140,103113124030200,102110300311400],
200010402222410: [101210320301302,102100221340112,100114104101001,114002244103320,101023221000031,101400014040400,102012034000402,114221004002212,102100122002001,101000011021414],
112300020202111: [103141411420221,122102400312000,110002100110002,1340024302404122,100002001043010,113110330121030,410020013004430,1002300040412102,1210020204140003,123211320000102],
100102203210034: [102023021100022,111200302222011,112040241120204,111000022200000,100010011232324,110220030000133,110000330430311,101211221014003,103111230120100,102221220200021],
1021001032000012: [102020010203200,100011144312020,102011204001010,102001410221023,110130201302200,103041021430301,101100440320434,114000402211404,101000100302003,110000030430422],
1031102424140120: [100011010414200,111121102240240,102002121101110,102403100202003,110000100041101,100400000010033,100101211001320,101141020321000,103224101400400,102000002043020],
102001021434201: [110131122012210,114010200040441,110032014420232,100000344100100,111304022202211,102302011002003,102011021121200,100012441030002,110222042022111,103131004002200],
220100132400104: [1010400230221020,111320012221132,102302144110440,114140004123122,102143202000400,111020002202333,101321311004010,102110241342210,114122302311011,100002320411400],
1110121023020002: [100022041001002,111240340004003,111101222244030,114032424122040,112011012004300,101021401003220,100301020420101,102002202000000,100022024100041,102010410213111],
1042030001044121: [101230040301234,110033032100000,102303014024221,100100224304110,100400432200321,100020200012311,114140144122230,101400201102014,100000020004004,101040040303102],
1100100224104011: [111130112100210,102202200240222,110131044200121,100100004102234,101023014004000,110223332120231,100001024301322,114013044102010,114220114140122,110200210110130],
212002042300000: [100030220100110,110011202100142,112400102013231,114012002212100,100002114314022,114203110001031,112030122000211,111004012200210,100413131013301,110001102103432],
221001240043040: [100420021010013,112101302302202,102212011120200,102344111104233,100432104320110,114021014020000,102121321340000,114200214002024,100111221011100,100023104101340],
1040021200420124: [100042111220111,112220100202000,100042041220010,100403211010100,103240301400123,114102210214041,100021300103041,101010204000242,111334004121414,114101000213410],
1101110030041022: [112013300240132,114303044030300,102340044111034,100110021021010,120003030010112,114200212320001,101020240313001,102330220242002,121400042034110,100033201030040],
1030000302042203: [102110231441040,100113214302002,110000002133140,101233341022410,103021000144000,100100124100031,100040204240000,114100012313002,114440440000034,120240000020201],
1403121230220020: [110200400000020,100324140141402,100002204340200,110000410001034,111140000000000,103102224034002,110022002120332,111110230014001,110020041310021,110001001200323],
203110414022010: [111240220002100,110030344201413,101001014001021,100103224301304,114000100020200,102102100231000,114040202330200,100410331002222,100032204311200,102202111110404],
133032004200001: [110314100012011,111020422203010,103121124002320,102240300202212,114140422203401,110210420000001,102210331401030,101102104000100,102031010200122,111104202112001],
1120000102422014: [102140211342044,110020301220411,110100200004404,110131142010001,101032441003040,112004400210201,110004102230402,112021004020000,101012004001204,110420001344420],
1000030222010034: [100320031001240,102104320220212,102100200232044,102434104021201,102300004022020,103003301240122,100210024101140,112220011101022,101000130304001,103020024043010],
300210002300114: [112100021100243,101123001100112,112003002001002,111000200012220,102120100230203,100111124101112,112001010233001,103313021401232,110430034224001,114110200221240],
1421020010140012: [110010002014100,112030001120220,114231034004040,100100301202103,110301021331320,112001100232420,114020244023001,110342014420100,110200402220240,100114200402104],
300310000040102: [110301222101110,102023301102400,102012204010044,104120110010402,111010214402000,110401440001204,110204310112224,110303104423010,110340022023321,114010200012200],
101010002031100: [110324020012330,100001134302201,102100011402012,100020221031000,102032220220223,114042004020130,114030400021223,102022021100102,103301304031211,102000004013440],
1032221241200404: [104120011242011,110020002104100,102000301101102,111122200021102,114032234033303,100000020201032,112011204004112,100003000002034,110102022000200,102200034040443],
1131211404001020: [110200100002031,101420310301020,100020330100100,1002443410030003,111103224103001,100011231042030,102100141413100,102110021342213,112002310211210,100010031020102],
132013120010104: [111300232202103,102301401002404,110122120100022,102012004010000,102400100220000,102001104012021,100242444300104,102002104014323,100401241010122,100010010102122],
130000310202012: [114200044114004,102202010204130,100220101204311,100002000102100,102100430233212,103003210141014,110421222011222,100201000410010,112110202014400,100400030013200],
1111023010042112: [100000201220420,114000030021040,101002011020001,113141112413242,102021111130104,110401030101001,102201214011331,100013011043231,102031114002042,112302012020342],
121010001111310: [101400220300033,100012021032040,102001130224032,100012021040320,102130224023220,102100401143124,100010001030140,102110101410144,114142204121324,112102421102022],
1113001011001223: [110202400003213,114112014140000,110001012101334,114200124000134,100001114302424,101222134011002,100003011231420,103230231200010,100431031014023,100120120401041],
142034000000112: [100310304310342,100010041031020,100042400001023,112402000200101,100020201221000,101010020301243,102030001103000,100040021031201,111010130022401,112140214000020],
1012401424032042: [102401121113302,111201210000103,100101101010100,101320040302031,114010102300010,102321044112101,102000014002000,101034014011332,101203011023020,100021231020200],
1103400010414230: [102010114022011,103340041403223,114302210010001,114030004104220,102443211401140,100000221020014,102003224000022,100010031222001,103001114001210,100001111201214],
1400000301203010: [114122134140002,103012011434022,101032010342004,102322330243202,103101411424112,100100114103001,102134131144121,114030222300123,102102301021403,114103402201140],
214024214401412: [102220242000201,103010021410203,101000304214111,102120040200200,103202220100114,111100132103413,112203220204012,100113040003021,100010204200010,102410134022300],
1221011020200210: [110041030411301,110001041213031,101110044200100,102411000200000,110414300030001,100030020011022,114100122342002,111202012222402,110033041200104,102000221442214],
1000120020110040: [110200411140130,110114120120100,103124210100201,111014200024034,102301002004242,112020014000340,110021404214210,101003014210212,100130220400031,100210024300001],
201001342101001: [102001444000222,101400134043222,100010400013202,114340312301134,114310242320310,102110021401100,100113201011032,102230000204000,103102044031002,100011421002120],
1000000133120020: [110230000000002,110220301202400,101003140322000,110401001002400,110210024220020,114201142324042,100401424300314,102213401111223,100040120000031,102002104011244],
101400102142302: [100004211221210,110201001212001,110200134240420,110403221000002,101201304200320,114022042302201,112002320212232,100002210413202,103140021401211,102404042001400],
100203202010202: [101000114000402,100043100123004,102014014000100,112212042024100,112410322024122,102300110221100,110002332102000,112001111111212,100020104300430,112041020221402],
1011401013024410: [114444244144110,114201004110120,114033234032101,100412114304000,103001020120202,101004411032102,101000020302400,102021014012210,102244221114031,112400000200011],
131012202321232: [110041030411301,110001041213031,103301210142043,100002024200142,101110044200100,102411000200000,110414300030001,100030020011022,100400014321101,114100122342002],
100303010210441: [102304034110400,100043010120010,102101124001310,110030110122402,102430300201034,100004121030301,102143432000102,101031001004201,114001110033210,110120011014412],
1111011030001020: [100032041021210,110001041213031,101110044200100,110414300030001,100030020011022,111202012222402,102000221442214,102040041320010,102402104022000,103203401400132],
1420144022031000: [101240220300022,102200130202201,100002010014040,100022000410041,103020000102011,103001031420203,102301324112020,101040421033204,103130011441212,102240344041020],
104012440012200: [103000001432331,110131110401120,103000004011102,100120041014034,112111110422120,102130300200301,100121100002240,110224032100120,101104220322100,100001001201301],
120311002000121: [101220014012231,100104024303320,114422004110130,112210132031001,102010310213001,103244320114100,103422430124013,100121424231222],
1002120002022122: [101201220300012,101232104042101,101220404012304,101000304000400,102020204010341,112020100202034,100140410020232,101221004040204,100412414304143,102110404001401],
1040331220001000: [102041310200042,110301030010140,104100002101431,110221000111302,101010001002000,110022140120012,111330020002213,102011440201423,114011212320331,100002204310140],
142030323441430: [110020120023302,100043120003103,110002000410040,100433001014104,100000004102121,101022411030002,102000021442024,100030314100004,100404130020000,110400302114103],
1020000001100400: [110001202000032,100024204211200,114200004110244,114230412332011,101210101010023,114422114140221,100001211042341,114032104123010,114303122321110,114330204034200],
1001130242231402: [100042320001030,110024001010110,100100014300230,102043104010014,100001004300242,111120044100110,102200201110443,114240314000100,101002411030000,102002324011002],
1014202010302102: [114200212322001,100401210020002,112210322040121,100402042200000,100010111022102,102000224000000,100002104302110,101022440300020,101402010301000,100000220111021],
144100013100013: [114020200020222,114330002323002,110213022100021,102202000240014,114424424144210,101224321014333,102100121342000,102204321003224,111222002224302,114100104030132],
101213002101214: [114014104033302,102120200221403,110323110104120,102110314001121,101020104000300,114003402300102,111202104100014,112402102021111,103344224020241,101002114000424],
1012442110402240: [104310001300102,114021132202032,112012004004324,103001314001021,114122220031004,103040024041111,101004004213014,111124002211203,112034420220403,101211231023001],
1002210200000101: [101232314010241,100301300121100,110042400130000,103020124010320,100021300101201,110230022122031,101000444214021,112102112301130,100414004300100,112102232300222],
102400210400022: [110010334200041,100001024102001,321114010100340,102322101000200,102110100322014,110413131000201,100000100000432,101020021114310,114200104110001,100010314202003],
1043003020000104: [114101024120020,102004011440212,100000001021013,100011204300012,100101234330123,110024000432310,102012101443110,110032220022103,102102104000102,101210031201114],
1243002022101103: [110203120112002,110000002100100,102221410200012,110213401202110,102300104113403,102213224110010,102300004112300,102420024020120,100040020001022,102141140210200],
1421204200010112: [100140101044224,101120004023340,103040031403031,101030014001004,102012421412000,100010044300131,111010130002222,110200300000010,110144102001321,100020304102100],
1003013101233010: [102400111402142,100040240004024,100010000002400,141030301403400,100000024100211,103224200113121,110140101300004,120020211120020,110230010010142,101002041000124],
101432210141203: [100310120141002,102324014112020,100301421002002,102404021403141,103022021431010,103200020111014,100010004313001,103104200210410,100002200410030,100121031014040],
1104003002031130: [102420020200202,1023413000203201],
112011130023123: [101020014011102],
1102310021242142: [100022041001002,111240340004003,102021024000010,111101222244030,114032424122040,112011012004300,101021401003220,102000401120420,102010410213111,102004012043122],
1002120410241114: [100222000144103,101240000310400,114022002211402,102311020240221,110211000004002,114101402200030,100000034200300,100010124300001,114200022322000,103133001441320],
1022312211002000: [102123140310122,110101214402023,110400014224001,101410030330122,111000144410222,100040001022000,100222404342030,112204022020043,112320100202120,110222241300101],
1402011002013421: [101043042401310,104001222102120,111412020003222,101113012101010,111212022200012,110023132100402,102022102010211,110231202040203,112112410232001,100021220002020],
1010121032032211: [100002024340032,110000011220401,102112340210300,114100214000010,100000104201200,103000304010211,100010000002132,114100042311312,101000014001020,103014141222013],
1020030002101022: [102040431121110,100400002201113,114111100021420,112024212301420,100300310140241,100240134323001,102111024000402,102001011100100,114402104142102,103312241402001],
1010240311230422: [110200400000020,110203131211112,102301004110004,110021100023210,110402420402001,103122000100403,110000112100402,100002204340200,114103412201420,111140000000000],
1401004204000003: [102102011412000,114204030210041,110033101044200,114440120000030,101002004014401,100400224323100,102112112004300,102010014001001,102000001440004,110321100402021],
244331203200002: [101221422120021,100001000002210,100400320033201,100004130002301,101210300302302,102020131413423,110020204200342,100000220011133,102144012001113,114431110001011],
1130301120301001: [100401140034220,114002002300222,101242004043300,101013111000000,102102131340404,102420041111041,112004312310301,100040121030000,102000404003020,103111020223101],
1002000403000204: [103343014022202,102003104011000,102220330202001,100022200120040,103002020110334,101021134012211,102140200310400,110010002103002,100220034341000,110200002121242],
221400100111330: [102031100200300,114102134122020,110024102010002,103010034004303,102023032000000,102101440201242,103000204003200,100021044343014,111433000002144,110001002002424],
112133300441020: [110120101220000,101040014002010,112001001133213,102000144014000,124204212002003,102311101002242,102110210210004,100000100412410,100012441220104,111032320020122],
133313202104310: [110013334202122,103330104030103,111200002220003,112043421121002,102212030201100,100111401011110,102010341440030,100001120100002,114001000024011,101021024012041],
100143040212102: [100422314300200,101210241204043,100123011004113,100420001002101,101134224001001,101021204000200,102321234111310,101200111020111,102000024000002,111122222240232],
1432344120130401: [101001040340004,101013004002011,112022334022101,101011324004222,101410401010030,103210241422022,103240001403000,102202204040220,102121010212140,110020124200230],
1100030203241342: [100440104303400,102122000214201,102220241100202,102024240221040,110320011200230,100342024100204,101422034042201,100011114300334,102300001002140,110040122000002],
1011102003042024: [104110021244203,102224101113104,102103022002001,102102224000012,102100102001032,100142431013000,101000031001011,101100001102230,101010130321200,114004322330012],
300210041431432: [103114001441122,100422004320024,114211110023303,111000302230100,114044144020400,103001101422012,4000113102314022,100022204104123,1400100000042343,1002411322210040],
241241343121111: [102021001132040,100032201024101,102301004110004,110402420402001,110000112100402,110021021210300,110404000000012,102000101120123,110221022101104,110023100402320],
1100142404020120: [101210144014403,102111420233012,110000020022324,100020231201012,102210424042300,110220032120121,102130240221020,100101101020002,114442102312001,100030201024402],
110001043032202: [114240012324120,101022011003022,101240101000020,100003101003001,102313100240202,102000014002101,102110422004402,102100411340400,101300414221100,114020210020424],
1100103200140000: [110013202003320,101031241000010,102120231343224,110030332100203,114404232310120,103100034001310,114002202210331,100031301020100,110111032140220,100020010200022],
212420003003124: [103000001432331,110131110401120,112111110422120,100121100002240,110011010001020,100001004340202,100220100432201,101000140200002,110140001201040,110001041310202],
| python | MIT | c3cc5a95d4f72e6ccc27ebae23344fce8cc70786 | 2026-01-04T14:40:23.735327Z | true |
psf/black | https://github.com/psf/black/blob/c3cc5a95d4f72e6ccc27ebae23344fce8cc70786/profiling/list_big.py | profiling/list_big.py | config = some.Structure(
value=set([u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
u'some_rather_long_text_value',
| python | MIT | c3cc5a95d4f72e6ccc27ebae23344fce8cc70786 | 2026-01-04T14:40:23.735327Z | true |
psf/black | https://github.com/psf/black/blob/c3cc5a95d4f72e6ccc27ebae23344fce8cc70786/profiling/dict_big.py | profiling/dict_big.py | config = some.Structure(
some_mapping={
"00501": "AB890X",
"00544": "AB890X",
"01001": "AB889X",
"01002": "AB889X",
"01003": "AB889X",
"01004": "AB889X",
"01005": "AB889X",
"01007": "AB889X",
"01008": "AB889X",
"01009": "AB889X",
"01010": "AB889X",
"01011": "AB889X",
"01012": "AB889X",
"01013": "AB889X",
"01014": "AB889X",
"01020": "AB889X",
"01021": "AB889X",
"01022": "AB889X",
"01026": "AB889X",
"01027": "AB889X",
"01028": "AB889X",
"01029": "AB889X",
"01030": "AB889X",
"01031": "AB889X",
"01032": "AB889X",
"01033": "AB889X",
"01034": "AB889X",
"01035": "AB889X",
"01036": "AB889X",
"01037": "AB889X",
"01038": "AB889X",
"01039": "AB889X",
"01040": "AB889X",
"01041": "AB889X",
"01050": "AB889X",
"01053": "AB889X",
"01054": "AB889X",
"01056": "AB889X",
"01057": "AB889X",
"01059": "AB889X",
"01060": "AB889X",
"01061": "AB889X",
"01062": "AB889X",
"01063": "AB889X",
"01066": "AB889X",
"01068": "AB889X",
"01069": "AB889X",
"01070": "AB889X",
"01071": "AB889X",
"01072": "AB889X",
"01073": "AB889X",
"01074": "AB889X",
"01075": "AB889X",
"01077": "AB889X",
"01079": "AB889X",
"01080": "AB889X",
"01081": "AB889X",
"01082": "AB889X",
"01083": "AB889X",
"01084": "AB889X",
"01085": "AB889X",
"01086": "AB889X",
"01088": "AB889X",
"01089": "AB889X",
"01090": "AB889X",
"01092": "AB889X",
"01093": "AB889X",
"01094": "AB889X",
"01095": "AB889X",
"01096": "AB889X",
"01097": "AB889X",
"01098": "AB889X",
"01101": "AB889X",
"01102": "AB889X",
"01103": "AB889X",
"01104": "AB889X",
"01105": "AB889X",
"01106": "AB889X",
"01107": "AB889X",
"01108": "AB889X",
"01109": "AB889X",
"01111": "AB889X",
"01115": "AB889X",
"01116": "AB889X",
"01118": "AB889X",
"01119": "AB889X",
"01128": "AB889X",
"01129": "AB889X",
"01133": "AB889X",
"01138": "AB889X",
"01139": "AB889X",
"01144": "AB889X",
"01151": "AB889X",
"01152": "AB889X",
"01195": "AB889X",
"01199": "AB889X",
"01201": "AB890X",
"01202": "AB889X",
"01203": "AB889X",
"01220": "AB890X",
"01222": "AB889X",
"01223": "AB889X",
"01224": "AB889X",
"01225": "AB889X",
"01226": "AB889X",
"01227": "AB889X",
"01229": "AB889X",
"01230": "AB889X",
"01235": "AB889X",
"01236": "AB889X",
"01237": "AB889X",
"01238": "AB889X",
"01240": "AB889X",
"01242": "AB889X",
"01243": "AB889X",
"01244": "AB889X",
"01245": "AB889X",
"01247": "AB890X",
"01252": "AB889X",
"01253": "AB889X",
"01254": "AB889X",
"01255": "AB889X",
"01256": "AB889X",
"01257": "AB890X",
"01258": "AB889X",
"01259": "AB889X",
"01260": "AB889X",
"01262": "AB889X",
"01263": "AB889X",
"01264": "AB889X",
"01266": "AB889X",
"01267": "AB890X",
"01270": "AB889X",
"01301": "AB889X",
"01302": "AB889X",
"01330": "AB889X",
"01331": "AB889X",
"01337": "AB889X",
"01338": "AB889X",
"01339": "AB889X",
"01340": "AB889X",
"01341": "AB889X",
"01342": "AB889X",
"01343": "AB889X",
"01344": "AB889X",
"01346": "AB889X",
"01347": "AB889X",
"01349": "AB889X",
"01350": "AB889X",
"01351": "AB889X",
"01354": "AB889X",
"01355": "AB889X",
"01360": "AB889X",
"01364": "AB889X",
"01366": "AB889X",
"01367": "AB890X",
"01368": "AB889X",
"01370": "AB889X",
"01373": "AB889X",
"01375": "AB889X",
"01376": "AB889X",
"01378": "AB889X",
"01379": "AB889X",
"01380": "AB889X",
"01420": "AB889X",
"01430": "AB889X",
"01431": "AB889X",
"01432": "AB889X",
"01434": "AB889X",
"01436": "AB889X",
"01438": "AB889X",
"01440": "AB889X",
"01441": "AB889X",
"01450": "AB889X",
"01451": "AB889X",
"01452": "AB889X",
"01453": "AB889X",
"01460": "AB889X",
"01462": "AB889X",
"01463": "AB889X",
"01464": "AB889X",
"01467": "AB889X",
"01468": "AB889X",
"01469": "AB889X",
"01470": "AB889X",
"01471": "AB889X",
"01472": "AB889X",
"01473": "AB889X",
"01474": "AB889X",
"01475": "AB889X",
"01477": "AB889X",
"01501": "AB889X",
"01503": "AB889X",
"01504": "AB889X",
"01505": "AB889X",
"01506": "AB889X",
"01507": "AB889X",
"01508": "AB889X",
"01509": "AB889X",
"01510": "AB889X",
"01515": "AB889X",
"01516": "AB889X",
"01517": "AB889X",
"01518": "AB889X",
"01519": "AB889X",
"01520": "AB889X",
"01521": "AB889X",
"01522": "AB889X",
"01523": "AB889X",
"01524": "AB889X",
"01525": "AB889X",
"01526": "AB889X",
"01527": "AB889X",
"01529": "AB889X",
"01531": "AB889X",
"01532": "AB889X",
"01534": "AB889X",
"01535": "AB889X",
"01536": "AB889X",
"01537": "AB889X",
"01538": "AB889X",
"01540": "AB889X",
"01541": "AB889X",
"01542": "AB889X",
"01543": "AB889X",
"01545": "AB889X",
"01546": "AB889X",
"01550": "AB889X",
"01560": "AB889X",
"01561": "AB889X",
"01562": "AB889X",
"01564": "AB889X",
"01566": "AB889X",
"01568": "AB889X",
"01569": "AB889X",
"01570": "AB889X",
"01571": "AB889X",
"01580": "AB889X",
"01581": "AB889X",
"01582": "AB889X",
"01583": "AB889X",
"01585": "AB889X",
"01586": "AB889X",
"01588": "AB889X",
"01590": "AB889X",
"01601": "AB889X",
"01602": "AB889X",
"01603": "AB889X",
"01604": "AB889X",
"01605": "AB889X",
"01606": "AB889X",
"01607": "AB889X",
"01608": "AB889X",
"01609": "AB889X",
"01610": "AB889X",
"01611": "AB889X",
"01612": "AB889X",
"01613": "AB889X",
"01614": "AB889X",
"01615": "AB889X",
"01653": "AB889X",
"01654": "AB889X",
"01655": "AB889X",
"01701": "AB889X",
"01702": "AB889X",
"01703": "AB889X",
"01704": "AB889X",
"01705": "AB889X",
"01718": "AB889X",
"01719": "AB889X",
"01720": "AB889X",
"01721": "AB889X",
"01730": "AB889X",
"01731": "AB889X",
"01740": "AB889X",
"01741": "AB889X",
"01742": "AB889X",
"01745": "AB889X",
"01746": "AB889X",
"01747": "AB889X",
"01748": "AB889X",
"01749": "AB889X",
"01752": "AB889X",
"01754": "AB889X",
"01756": "AB889X",
"01757": "AB889X",
"01760": "AB889X",
"01770": "AB889X",
"01772": "AB889X",
"01773": "AB889X",
"01775": "AB889X",
"01776": "AB889X",
"01778": "AB889X",
"01784": "AB889X",
"01801": "AB889X",
"01803": "AB889X",
"01805": "AB889X",
"01806": "AB889X",
"01807": "AB889X",
"01808": "AB889X",
"01810": "AB889X",
"01812": "AB889X",
"01813": "AB889X",
"01815": "AB889X",
"01821": "AB889X",
"01822": "AB889X",
"01824": "AB889X",
"01826": "AB889X",
"01827": "AB889X",
"01830": "AB889X",
"01831": "AB889X",
"01832": "AB889X",
"01833": "AB889X",
"01834": "AB889X",
"01835": "AB889X",
"01840": "AB889X",
"01841": "AB889X",
"01842": "AB889X",
"01843": "AB889X",
"01844": "AB889X",
"01845": "AB889X",
"01850": "AB889X",
"01851": "AB889X",
"01852": "AB889X",
"01853": "AB889X",
"01854": "AB889X",
"01860": "AB889X",
"01862": "AB889X",
"01863": "AB889X",
"01864": "AB889X",
"01865": "AB889X",
"01866": "AB889X",
"01867": "AB889X",
"01876": "AB889X",
"01879": "AB889X",
"01880": "AB889X",
"01885": "AB889X",
"01886": "AB889X",
"01887": "AB889X",
"01888": "AB889X",
"01889": "AB889X",
"01890": "AB889X",
"01899": "AB889X",
"01901": "AB889X",
"01902": "AB889X",
"01903": "AB889X",
"01904": "AB889X",
"01905": "AB889X",
"01906": "AB889X",
"01907": "AB889X",
"01908": "AB889X",
"01910": "AB889X",
"01913": "AB889X",
"01915": "AB889X",
"01921": "AB889X",
"01922": "AB889X",
"01923": "AB889X",
"01929": "AB889X",
"01930": "AB889X",
"01931": "AB889X",
"01936": "AB889X",
"01937": "AB889X",
"01938": "AB889X",
"01940": "AB889X",
"01944": "AB889X",
"01945": "AB889X",
"01949": "AB889X",
"01950": "AB889X",
"01951": "AB889X",
"01952": "AB889X",
"01960": "AB889X",
"01961": "AB889X",
"01965": "AB889X",
"01966": "AB889X",
"01969": "AB889X",
"01970": "AB889X",
"01971": "AB889X",
"01982": "AB889X",
"01983": "AB889X",
"01984": "AB889X",
"01985": "AB889X",
"02018": "AB889X",
"02019": "AB889X",
"02020": "AB889X",
"02021": "AB889X",
"02025": "AB889X",
"02026": "AB889X",
"02027": "AB889X",
"02030": "AB889X",
"02031": "AB889X",
"02032": "AB889X",
"02035": "AB889X",
"02038": "AB889X",
"02040": "AB889X",
"02041": "AB889X",
"02043": "AB889X",
"02044": "AB889X",
"02045": "AB889X",
"02047": "AB889X",
"02048": "AB889X",
"02050": "AB889X",
"02051": "AB889X",
"02052": "AB889X",
"02053": "AB889X",
"02054": "AB889X",
"02055": "AB889X",
"02056": "AB889X",
"02059": "AB889X",
"02060": "AB889X",
"02061": "AB889X",
"02062": "AB889X",
"02065": "AB889X",
"02066": "AB889X",
"02067": "AB889X",
"02070": "AB889X",
"02071": "AB889X",
"02072": "AB889X",
"02081": "AB889X",
"02090": "AB889X",
"02093": "AB889X",
"02108": "AB889X",
"02109": "AB889X",
"02110": "AB889X",
"02111": "AB889X",
"02112": "AB889X",
"02113": "AB889X",
"02114": "AB889X",
"02115": "AB889X",
"02116": "AB889X",
"02117": "AB889X",
"02118": "AB889X",
"02119": "AB889X",
"02120": "AB889X",
"02121": "AB889X",
"02122": "AB889X",
"02123": "AB889X",
"02124": "AB889X",
"02125": "AB889X",
"02126": "AB889X",
"02127": "AB889X",
"02128": "AB889X",
"02129": "AB889X",
"02130": "AB889X",
"02131": "AB889X",
"02132": "AB889X",
"02133": "AB889X",
"02134": "AB889X",
"02135": "AB889X",
"02136": "AB889X",
"02137": "AB889X",
"02138": "AB889X",
"02139": "AB889X",
"02140": "AB889X",
"02141": "AB889X",
"02142": "AB889X",
"02143": "AB889X",
"02144": "AB889X",
"02145": "AB889X",
"02148": "AB889X",
"02149": "AB889X",
"02150": "AB889X",
"02151": "AB889X",
"02152": "AB889X",
"02153": "AB889X",
"02155": "AB889X",
"02156": "AB889X",
"02163": "AB889X",
"02169": "AB889X",
"02170": "AB889X",
"02171": "AB889X",
"02176": "AB889X",
"02180": "AB889X",
"02184": "AB889X",
"02185": "AB889X",
"02186": "AB889X",
"02187": "AB889X",
"02188": "AB889X",
"02189": "AB889X",
"02190": "AB889X",
"02191": "AB889X",
"02196": "AB889X",
"02199": "AB889X",
"02201": "AB889X",
"02203": "AB889X",
"02204": "AB889X",
"02205": "AB889X",
"02206": "AB889X",
"02207": "AB889X",
"02210": "AB889X",
"02211": "AB889X",
"02212": "AB889X",
"02215": "AB889X",
"02216": "AB889X",
"02217": "AB889X",
"02222": "AB889X",
"02228": "AB889X",
"02238": "AB889X",
"02239": "AB889X",
"02241": "AB889X",
"02266": "AB889X",
"02269": "AB889X",
"02283": "AB889X",
"02284": "AB889X",
"02293": "AB889X",
"02295": "AB889X",
"02297": "AB889X",
"02298": "AB889X",
"02301": "AB889X",
"02302": "AB889X",
"02303": "AB889X",
"02304": "AB889X",
"02305": "AB889X",
"02322": "AB889X",
"02324": "AB889X",
"02325": "AB889X",
"02327": "AB889X",
"02330": "AB889X",
"02331": "AB889X",
"02332": "AB889X",
"02333": "AB889X",
"02334": "AB889X",
"02337": "AB889X",
"02338": "AB889X",
"02339": "AB889X",
"02340": "AB889X",
"02341": "AB889X",
"02343": "AB889X",
"02344": "AB889X",
"02345": "AB889X",
"02346": "AB889X",
"02347": "AB889X",
"02348": "AB889X",
"02349": "AB889X",
"02350": "AB889X",
"02351": "AB889X",
"02355": "AB889X",
"02356": "AB889X",
"02357": "AB889X",
"02358": "AB889X",
"02359": "AB889X",
"02360": "AB889X",
"02361": "AB889X",
"02362": "AB889X",
"02364": "AB889X",
"02366": "AB889X",
"02367": "AB889X",
"02368": "AB889X",
"02370": "AB889X",
"02375": "AB889X",
"02379": "AB889X",
"02381": "AB889X",
"02382": "AB889X",
"02420": "AB889X",
"02421": "AB889X",
"02445": "AB889X",
"02446": "AB889X",
"02447": "AB889X",
"02451": "AB889X",
"02452": "AB889X",
"02453": "AB889X",
"02454": "AB889X",
"02455": "AB889X",
"02456": "AB889X",
"02457": "AB889X",
"02458": "AB890X",
"02459": "AB889X",
"02460": "AB889X",
"02461": "AB889X",
"02462": "AB889X",
"02464": "AB889X",
"02465": "AB889X",
"02466": "AB889X",
"02467": "AB889X",
"02468": "AB889X",
"02471": "AB889X",
"02472": "AB889X",
"02474": "AB889X",
"02475": "AB889X",
"02476": "AB889X",
"02477": "AB889X",
"02478": "AB889X",
"02479": "AB889X",
"02481": "AB889X",
"02482": "AB889X",
"02492": "AB889X",
"02493": "AB889X",
"02494": "AB889X",
"02495": "AB889X",
"02532": "AB889X",
"02534": "AB889X",
"02535": "AB889X",
"02536": "AB889X",
"02537": "AB889X",
"02538": "AB889X",
"02539": "AB889X",
"02540": "AB889X",
"02541": "AB889X",
"02542": "AB889X",
"02543": "AB889X",
"02552": "AB889X",
"02553": "AB889X",
"02554": "AB889X",
"02556": "AB889X",
"02557": "AB889X",
"02558": "AB889X",
"02559": "AB889X",
"02561": "AB889X",
"02562": "AB889X",
"02563": "AB889X",
"02564": "AB889X",
"02565": "AB889X",
"02568": "AB889X",
"02571": "AB889X",
"02573": "AB889X",
"02574": "AB889X",
"02575": "AB889X",
"02576": "AB889X",
"02584": "AB889X",
"02601": "AB889X",
"02630": "AB889X",
"02631": "AB889X",
"02632": "AB889X",
"02633": "AB889X",
"02634": "AB889X",
"02635": "AB889X",
"02636": "AB889X",
"02637": "AB889X",
"02638": "AB889X",
"02639": "AB889X",
"02641": "AB889X",
"02642": "AB889X",
"02643": "AB889X",
"02644": "AB889X",
"02645": "AB889X",
"02646": "AB889X",
"02647": "AB889X",
"02648": "AB889X",
"02649": "AB889X",
"02650": "AB889X",
"02651": "AB889X",
"02652": "AB889X",
"02653": "AB889X",
"02655": "AB889X",
"02657": "AB889X",
"02659": "AB889X",
"02660": "AB889X",
"02661": "AB889X",
"02662": "AB889X",
"02663": "AB889X",
"02664": "AB889X",
"02666": "AB889X",
"02667": "AB889X",
"02668": "AB889X",
"02669": "AB889X",
"02670": "AB889X",
"02671": "AB889X",
"02672": "AB889X",
"02673": "AB889X",
"02675": "AB889X",
"02702": "AB889X",
"02703": "AB889X",
"02712": "AB889X",
"02713": "AB889X",
"02714": "AB889X",
"02715": "AB889X",
"02717": "AB889X",
"02718": "AB889X",
"02719": "AB889X",
"02720": "AB889X",
"02721": "AB889X",
"02722": "AB889X",
"02723": "AB889X",
"02724": "AB889X",
"02725": "AB889X",
"02726": "AB889X",
"02738": "AB889X",
"02739": "AB889X",
"02740": "AB889X",
"02741": "AB889X",
"02742": "AB889X",
"02743": "AB889X",
"02744": "AB889X",
"02745": "AB889X",
"02746": "AB889X",
"02747": "AB889X",
"02748": "AB889X",
"02760": "AB889X",
"02761": "AB889X",
"02762": "AB889X",
"02763": "AB889X",
"02764": "AB889X",
"02766": "AB889X",
"02767": "AB889X",
"02768": "AB889X",
"02769": "AB889X",
"02770": "AB889X",
"02771": "AB889X",
"02777": "AB889X",
"02779": "AB889X",
"02780": "AB889X",
"02783": "AB889X",
"02790": "AB889X",
"02791": "AB889X",
"02801": "AB889X",
"02802": "AB889X",
"02804": "AB889X",
"02806": "AB889X",
"02807": "AB889X",
"02808": "AB889X",
"02809": "AB889X",
"02812": "AB889X",
"02813": "AB889X",
"02814": "AB889X",
"02815": "AB889X",
"02816": "AB889X",
"02817": "AB889X",
"02818": "AB889X",
"02822": "AB889X",
"02823": "AB889X",
"02824": "AB889X",
"02825": "AB889X",
"02826": "AB889X",
"02827": "AB889X",
"02828": "AB889X",
"02829": "AB889X",
"02830": "AB889X",
"02831": "AB889X",
"02832": "AB889X",
"02833": "AB889X",
"02835": "AB889X",
"02836": "AB889X",
"02837": "AB889X",
"02838": "AB889X",
"02839": "AB889X",
"02840": "AB889X",
"02841": "AB889X",
"02842": "AB889X",
"02852": "AB889X",
"02854": "AB889X",
"02857": "AB889X",
"02858": "AB889X",
"02859": "AB889X",
"02860": "AB889X",
"02861": "AB889X",
"02862": "AB889X",
"02863": "AB889X",
"02864": "AB889X",
"02865": "AB889X",
"02871": "AB889X",
"02872": "AB889X",
"02873": "AB889X",
"02874": "AB889X",
"02875": "AB889X",
"02876": "AB889X",
"02877": "AB889X",
"02878": "AB889X",
"02879": "AB889X",
"02880": "AB889X",
"02881": "AB889X",
"02882": "AB889X",
"02883": "AB889X",
"02885": "AB889X",
"02886": "AB889X",
"02887": "AB889X",
"02888": "AB889X",
"02889": "AB889X",
"02891": "AB889X",
"02892": "AB889X",
"02893": "AB889X",
"02894": "AB889X",
"02895": "AB889X",
"02896": "AB889X",
"02898": "AB889X",
"02901": "AB889X",
"02902": "AB889X",
"02903": "AB889X",
"02904": "AB889X",
"02905": "AB889X",
"02906": "AB889X",
"02907": "AB889X",
"02908": "AB889X",
"02909": "AB889X",
"02910": "AB889X",
"02911": "AB889X",
"02912": "AB889X",
"02914": "AB889X",
"02915": "AB889X",
"02916": "AB889X",
"02917": "AB889X",
"02918": "AB889X",
"02919": "AB889X",
"02920": "AB889X",
"02921": "AB889X",
"02940": "AB889X",
"03031": "AB889X",
"03032": "AB889X",
"03033": "AB889X",
"03034": "AB889X",
"03036": "AB889X",
"03037": "AB889X",
"03038": "AB889X",
"03040": "AB889X",
"03041": "AB889X",
"03042": "AB889X",
"03043": "AB889X",
"03044": "AB889X",
"03045": "AB889X",
"03046": "AB889X",
"03047": "AB889X",
"03048": "AB889X",
"03049": "AB889X",
"03051": "AB889X",
"03052": "AB889X",
"03053": "AB889X",
"03054": "AB889X",
"03055": "AB889X",
"03057": "AB889X",
"03060": "AB889X",
"03061": "AB889X",
"03062": "AB889X",
"03063": "AB889X",
"03064": "AB889X",
"03070": "AB889X",
"03071": "AB889X",
"03073": "AB889X",
"03076": "AB889X",
"03077": "AB889X",
"03079": "AB889X",
"03082": "AB889X",
"03084": "AB889X",
"03086": "AB889X",
"03087": "AB889X",
"03101": "AB889X",
"03102": "AB889X",
"03103": "AB889X",
"03104": "AB889X",
"03105": "AB889X",
"03106": "AB889X",
"03107": "AB889X",
"03108": "AB889X",
"03109": "AB889X",
"03110": "AB889X",
"03111": "AB889X",
"03215": "AB889X",
"03216": "AB889X",
"03217": "AB889X",
"03218": "AB889X",
"03220": "AB889X",
"03221": "AB889X",
"03222": "AB889X",
"03223": "AB889X",
"03224": "AB889X",
"03225": "AB889X",
"03226": "AB889X",
"03227": "AB889X",
"03229": "AB889X",
"03230": "AB889X",
"03231": "AB889X",
"03233": "AB889X",
"03234": "AB889X",
"03235": "AB889X",
"03237": "AB889X",
"03238": "AB889X",
"03240": "AB889X",
"03241": "AB889X",
"03242": "AB889X",
"03243": "AB889X",
"03244": "AB889X",
"03245": "AB889X",
"03246": "AB889X",
"03247": "AB889X",
"03249": "AB889X",
"03251": "AB889X",
"03252": "AB889X",
"03253": "AB889X",
"03254": "AB889X",
"03255": "AB889X",
"03256": "AB889X",
"03257": "AB889X",
"03258": "AB889X",
"03259": "AB889X",
"03260": "AB889X",
"03261": "AB889X",
"03262": "AB889X",
"03263": "AB889X",
"03264": "AB889X",
"03266": "AB889X",
"03268": "AB889X",
"03269": "AB889X",
"03272": "AB889X",
"03273": "AB889X",
"03274": "AB889X",
"03275": "AB889X",
"03276": "AB889X",
"03278": "AB889X",
"03279": "AB889X",
"03280": "AB889X",
"03281": "AB889X",
"03282": "AB889X",
"03284": "AB889X",
"03287": "AB889X",
"03289": "AB889X",
"03290": "AB889X",
"03291": "AB889X",
"03293": "AB889X",
"03298": "AB889X",
"03299": "AB889X",
"03301": "AB889X",
"03302": "AB889X",
"03303": "AB889X",
"03304": "AB889X",
"03305": "AB889X",
"03307": "AB889X",
"03431": "AB889X",
"03435": "AB889X",
"03440": "AB889X",
"03441": "AB889X",
"03442": "AB889X",
"03443": "AB889X",
"03444": "AB889X",
"03445": "AB889X",
"03446": "AB889X",
"03447": "AB889X",
"03448": "AB889X",
"03449": "AB889X",
"03450": "AB889X",
"03451": "AB889X",
"03452": "AB889X",
"03455": "AB889X",
"03456": "AB889X",
"03457": "AB889X",
"03458": "AB889X",
"03461": "AB889X",
"03462": "AB889X",
"03464": "AB889X",
"03465": "AB889X",
"03466": "AB889X",
"03467": "AB889X",
"03468": "AB889X",
"03469": "AB889X",
"03470": "AB889X",
"03561": "AB889X",
"03570": "AB889X",
"03574": "AB889X",
"03575": "AB889X",
"03576": "AB889X",
"03579": "AB889X",
"03580": "AB889X",
"03581": "AB889X",
"03582": "AB889X",
"03583": "AB889X",
"03584": "AB889X",
"03585": "AB889X",
"03586": "AB889X",
"03588": "AB889X",
"03589": "AB889X",
"03590": "AB889X",
"03592": "AB889X",
"03593": "AB889X",
"03595": "AB889X",
"03597": "AB889X",
"03598": "AB889X",
"03601": "AB889X",
"03602": "AB889X",
"03603": "AB889X",
"03604": "AB889X",
"03605": "AB889X",
"03607": "AB889X",
"03608": "AB889X",
"03609": "AB889X",
"03740": "AB889X",
"03741": "AB889X",
"03743": "AB889X",
"03745": "AB889X",
"03746": "AB889X",
"03748": "AB889X",
"03749": "AB889X",
"03750": "AB889X",
"03751": "AB889X",
"03752": "AB889X",
"03753": "AB889X",
"03754": "AB889X",
"03755": "AB889X",
"03756": "AB889X",
"03765": "AB889X",
"03766": "AB889X",
"03768": "AB889X",
"03769": "AB889X",
"03770": "AB889X",
"03771": "AB889X",
"03773": "AB889X",
"03774": "AB889X",
"03777": "AB889X",
"03779": "AB889X",
"03780": "AB889X",
"03781": "AB889X",
"03782": "AB889X",
"03784": "AB889X",
"03785": "AB889X",
"03801": "AB889X",
"03802": "AB889X",
"03803": "AB889X",
"03804": "AB889X",
"03805": "AB889X",
"03809": "AB889X",
"03810": "AB889X",
"03811": "AB889X",
"03812": "AB889X",
"03813": "AB889X",
"03814": "AB889X",
"03815": "AB889X",
"03816": "AB889X",
"03817": "AB889X",
"03818": "AB889X",
"03819": "AB889X",
"03820": "AB889X",
"03821": "AB889X",
"03822": "AB889X",
"03823": "AB889X",
"03824": "AB889X",
"03825": "AB889X",
"03826": "AB889X",
"03827": "AB889X",
"03830": "AB889X",
"03832": "AB889X",
"03833": "AB889X",
"03835": "AB889X",
"03836": "AB889X",
"03837": "AB889X",
"03838": "AB889X",
"03839": "AB889X",
"03840": "AB889X",
"03841": "AB889X",
"03842": "AB889X",
"03843": "AB889X",
"03844": "AB889X",
"03845": "AB889X",
"03846": "AB889X",
"03847": "AB889X",
"03848": "AB889X",
"03849": "AB889X",
"03850": "AB889X",
"03851": "AB889X",
"03852": "AB889X",
"03853": "AB889X",
"03854": "AB889X",
"03855": "AB889X",
"03856": "AB889X",
"03857": "AB889X",
"03858": "AB889X",
"03859": "AB889X",
"03860": "AB889X",
"03861": "AB889X",
"03862": "AB889X",
"03864": "AB889X",
"03865": "AB889X",
"03866": "AB889X",
"03867": "AB889X",
"03868": "AB889X",
"03869": "AB889X",
"03870": "AB889X",
"03871": "AB889X",
"03872": "AB889X",
"03873": "AB889X",
"03874": "AB889X",
"03875": "AB889X",
"03878": "AB889X",
"03882": "AB889X",
"03883": "AB889X",
"03884": "AB889X",
"03885": "AB889X",
"03886": "AB889X",
"03887": "AB889X",
"03890": "AB889X",
"03894": "AB889X",
"03896": "AB889X",
"03897": "AB889X",
"03901": "AB889X",
"03902": "AB889X",
"03903": "AB889X",
"03904": "AB889X",
"03905": "AB889X",
"03906": "AB889X",
"03907": "AB889X",
"03908": "AB889X",
"03909": "AB889X",
"03910": "AB889X",
"03911": "AB889X",
"04001": "AB889X",
"04002": "AB889X",
"04003": "AB889X",
"04004": "AB889X",
"04005": "AB889X",
"04006": "AB889X",
"04007": "AB889X",
"04008": "AB889X",
"04009": "AB889X",
"04010": "AB889X",
"04011": "AB889X",
"04013": "AB889X",
"04014": "AB889X",
"04015": "AB889X",
"04016": "AB889X",
"04017": "AB889X",
"04019": "AB889X",
"04020": "AB889X",
"04021": "AB889X",
"04022": "AB889X",
"04024": "AB889X",
"04027": "AB889X",
"04028": "AB889X",
"04029": "AB889X",
"04030": "AB889X",
"04032": "AB889X",
"04033": "AB889X",
"04034": "AB889X",
"04037": "AB889X",
"04038": "AB889X",
"04039": "AB889X",
"04040": "AB889X",
"04041": "AB889X",
"04042": "AB889X",
"04043": "AB889X",
"04046": "AB889X",
"04047": "AB889X",
"04048": "AB889X",
"04049": "AB889X",
"04050": "AB889X",
"04051": "AB889X",
"04054": "AB889X",
"04055": "AB889X",
"04056": "AB889X",
"04057": "AB889X",
"04061": "AB889X",
"04062": "AB889X",
"04063": "AB889X",
"04064": "AB889X",
"04066": "AB889X",
"04068": "AB889X",
"04069": "AB889X",
"04070": "AB889X",
"04071": "AB889X",
"04072": "AB889X",
"04073": "AB889X",
"04074": "AB889X",
"04075": "AB889X",
"04076": "AB889X",
"04077": "AB889X",
"04078": "AB889X",
"04079": "AB889X",
"04082": "AB889X",
"04083": "AB889X",
"04084": "AB889X",
"04085": "AB889X",
"04086": "AB889X",
"04087": "AB889X",
"04088": "AB889X",
"04090": "AB889X",
"04091": "AB889X",
"04092": "AB889X",
"04093": "AB889X",
"04094": "AB889X",
"04095": "AB889X",
"04096": "AB889X",
"04097": "AB889X",
"04098": "AB889X",
"04101": "AB889X",
"04102": "AB889X",
"04103": "AB889X",
"04104": "AB889X",
"04105": "AB889X",
"04106": "AB889X",
"04107": "AB889X",
"04108": "AB889X",
"04109": "AB889X",
"04110": "AB889X",
"04112": "AB889X",
"04116": "AB889X",
"04122": "AB889X",
"04123": "AB889X",
"04124": "AB889X",
"04210": "AB889X",
"04211": "AB889X",
"04212": "AB889X",
"04216": "AB889X",
"04217": "AB889X",
"04219": "AB889X",
"04220": "AB889X",
"04221": "AB889X",
"04222": "AB889X",
"04223": "AB889X",
"04224": "AB889X",
"04225": "AB889X",
"04226": "AB889X",
"04227": "AB889X",
"04228": "AB889X",
"04230": "AB889X",
"04231": "AB889X",
"04234": "AB889X",
"04236": "AB889X",
"04237": "AB889X",
"04238": "AB889X",
"04239": "AB889X",
"04240": "AB889X",
"04241": "AB889X",
"04243": "AB889X",
"04250": "AB889X",
"04252": "AB889X",
"04253": "AB889X",
"04254": "AB889X",
"04255": "AB889X",
"04256": "AB889X",
| python | MIT | c3cc5a95d4f72e6ccc27ebae23344fce8cc70786 | 2026-01-04T14:40:23.735327Z | true |
psf/black | https://github.com/psf/black/blob/c3cc5a95d4f72e6ccc27ebae23344fce8cc70786/profiling/dict_huge.py | profiling/dict_huge.py | config = some.Structure(
some_mapping={
"00501": "AB890X",
"00544": "AB890X",
"01001": "AB889X",
"01002": "AB889X",
"01003": "AB889X",
"01004": "AB889X",
"01005": "AB889X",
"01007": "AB889X",
"01008": "AB889X",
"01009": "AB889X",
"01010": "AB889X",
"01011": "AB889X",
"01012": "AB889X",
"01013": "AB889X",
"01014": "AB889X",
"01020": "AB889X",
"01021": "AB889X",
"01022": "AB889X",
"01026": "AB889X",
"01027": "AB889X",
"01028": "AB889X",
"01029": "AB889X",
"01030": "AB889X",
"01031": "AB889X",
"01032": "AB889X",
"01033": "AB889X",
"01034": "AB889X",
"01035": "AB889X",
"01036": "AB889X",
"01037": "AB889X",
"01038": "AB889X",
"01039": "AB889X",
"01040": "AB889X",
"01041": "AB889X",
"01050": "AB889X",
"01053": "AB889X",
"01054": "AB889X",
"01056": "AB889X",
"01057": "AB889X",
"01059": "AB889X",
"01060": "AB889X",
"01061": "AB889X",
"01062": "AB889X",
"01063": "AB889X",
"01066": "AB889X",
"01068": "AB889X",
"01069": "AB889X",
"01070": "AB889X",
"01071": "AB889X",
"01072": "AB889X",
"01073": "AB889X",
"01074": "AB889X",
"01075": "AB889X",
"01077": "AB889X",
"01079": "AB889X",
"01080": "AB889X",
"01081": "AB889X",
"01082": "AB889X",
"01083": "AB889X",
"01084": "AB889X",
"01085": "AB889X",
"01086": "AB889X",
"01088": "AB889X",
"01089": "AB889X",
"01090": "AB889X",
"01092": "AB889X",
"01093": "AB889X",
"01094": "AB889X",
"01095": "AB889X",
"01096": "AB889X",
"01097": "AB889X",
"01098": "AB889X",
"01101": "AB889X",
"01102": "AB889X",
"01103": "AB889X",
"01104": "AB889X",
"01105": "AB889X",
"01106": "AB889X",
"01107": "AB889X",
"01108": "AB889X",
"01109": "AB889X",
"01111": "AB889X",
"01115": "AB889X",
"01116": "AB889X",
"01118": "AB889X",
"01119": "AB889X",
"01128": "AB889X",
"01129": "AB889X",
"01133": "AB889X",
"01138": "AB889X",
"01139": "AB889X",
"01144": "AB889X",
"01151": "AB889X",
"01152": "AB889X",
"01195": "AB889X",
"01199": "AB889X",
"01201": "AB890X",
"01202": "AB889X",
"01203": "AB889X",
"01220": "AB890X",
"01222": "AB889X",
"01223": "AB889X",
"01224": "AB889X",
"01225": "AB889X",
"01226": "AB889X",
"01227": "AB889X",
"01229": "AB889X",
"01230": "AB889X",
"01235": "AB889X",
"01236": "AB889X",
"01237": "AB889X",
"01238": "AB889X",
"01240": "AB889X",
"01242": "AB889X",
"01243": "AB889X",
"01244": "AB889X",
"01245": "AB889X",
"01247": "AB890X",
"01252": "AB889X",
"01253": "AB889X",
"01254": "AB889X",
"01255": "AB889X",
"01256": "AB889X",
"01257": "AB890X",
"01258": "AB889X",
"01259": "AB889X",
"01260": "AB889X",
"01262": "AB889X",
"01263": "AB889X",
"01264": "AB889X",
"01266": "AB889X",
"01267": "AB890X",
"01270": "AB889X",
"01301": "AB889X",
"01302": "AB889X",
"01330": "AB889X",
"01331": "AB889X",
"01337": "AB889X",
"01338": "AB889X",
"01339": "AB889X",
"01340": "AB889X",
"01341": "AB889X",
"01342": "AB889X",
"01343": "AB889X",
"01344": "AB889X",
"01346": "AB889X",
"01347": "AB889X",
"01349": "AB889X",
"01350": "AB889X",
"01351": "AB889X",
"01354": "AB889X",
"01355": "AB889X",
"01360": "AB889X",
"01364": "AB889X",
"01366": "AB889X",
"01367": "AB890X",
"01368": "AB889X",
"01370": "AB889X",
"01373": "AB889X",
"01375": "AB889X",
"01376": "AB889X",
"01378": "AB889X",
"01379": "AB889X",
"01380": "AB889X",
"01420": "AB889X",
"01430": "AB889X",
"01431": "AB889X",
"01432": "AB889X",
"01434": "AB889X",
"01436": "AB889X",
"01438": "AB889X",
"01440": "AB889X",
"01441": "AB889X",
"01450": "AB889X",
"01451": "AB889X",
"01452": "AB889X",
"01453": "AB889X",
"01460": "AB889X",
"01462": "AB889X",
"01463": "AB889X",
"01464": "AB889X",
"01467": "AB889X",
"01468": "AB889X",
"01469": "AB889X",
"01470": "AB889X",
"01471": "AB889X",
"01472": "AB889X",
"01473": "AB889X",
"01474": "AB889X",
"01475": "AB889X",
"01477": "AB889X",
"01501": "AB889X",
"01503": "AB889X",
"01504": "AB889X",
"01505": "AB889X",
"01506": "AB889X",
"01507": "AB889X",
"01508": "AB889X",
"01509": "AB889X",
"01510": "AB889X",
"01515": "AB889X",
"01516": "AB889X",
"01517": "AB889X",
"01518": "AB889X",
"01519": "AB889X",
"01520": "AB889X",
"01521": "AB889X",
"01522": "AB889X",
"01523": "AB889X",
"01524": "AB889X",
"01525": "AB889X",
"01526": "AB889X",
"01527": "AB889X",
"01529": "AB889X",
"01531": "AB889X",
"01532": "AB889X",
"01534": "AB889X",
"01535": "AB889X",
"01536": "AB889X",
"01537": "AB889X",
"01538": "AB889X",
"01540": "AB889X",
"01541": "AB889X",
"01542": "AB889X",
"01543": "AB889X",
"01545": "AB889X",
"01546": "AB889X",
"01550": "AB889X",
"01560": "AB889X",
"01561": "AB889X",
"01562": "AB889X",
"01564": "AB889X",
"01566": "AB889X",
"01568": "AB889X",
"01569": "AB889X",
"01570": "AB889X",
"01571": "AB889X",
"01580": "AB889X",
"01581": "AB889X",
"01582": "AB889X",
"01583": "AB889X",
"01585": "AB889X",
"01586": "AB889X",
"01588": "AB889X",
"01590": "AB889X",
"01601": "AB889X",
"01602": "AB889X",
"01603": "AB889X",
"01604": "AB889X",
"01605": "AB889X",
"01606": "AB889X",
"01607": "AB889X",
"01608": "AB889X",
"01609": "AB889X",
"01610": "AB889X",
"01611": "AB889X",
"01612": "AB889X",
"01613": "AB889X",
"01614": "AB889X",
"01615": "AB889X",
"01653": "AB889X",
"01654": "AB889X",
"01655": "AB889X",
"01701": "AB889X",
"01702": "AB889X",
"01703": "AB889X",
"01704": "AB889X",
"01705": "AB889X",
"01718": "AB889X",
"01719": "AB889X",
"01720": "AB889X",
"01721": "AB889X",
"01730": "AB889X",
"01731": "AB889X",
"01740": "AB889X",
"01741": "AB889X",
"01742": "AB889X",
"01745": "AB889X",
"01746": "AB889X",
"01747": "AB889X",
"01748": "AB889X",
"01749": "AB889X",
"01752": "AB889X",
"01754": "AB889X",
"01756": "AB889X",
"01757": "AB889X",
"01760": "AB889X",
"01770": "AB889X",
"01772": "AB889X",
"01773": "AB889X",
"01775": "AB889X",
"01776": "AB889X",
"01778": "AB889X",
"01784": "AB889X",
"01801": "AB889X",
"01803": "AB889X",
"01805": "AB889X",
"01806": "AB889X",
"01807": "AB889X",
"01808": "AB889X",
"01810": "AB889X",
"01812": "AB889X",
"01813": "AB889X",
"01815": "AB889X",
"01821": "AB889X",
"01822": "AB889X",
"01824": "AB889X",
"01826": "AB889X",
"01827": "AB889X",
"01830": "AB889X",
"01831": "AB889X",
"01832": "AB889X",
"01833": "AB889X",
"01834": "AB889X",
"01835": "AB889X",
"01840": "AB889X",
"01841": "AB889X",
"01842": "AB889X",
"01843": "AB889X",
"01844": "AB889X",
"01845": "AB889X",
"01850": "AB889X",
"01851": "AB889X",
"01852": "AB889X",
"01853": "AB889X",
"01854": "AB889X",
"01860": "AB889X",
"01862": "AB889X",
"01863": "AB889X",
"01864": "AB889X",
"01865": "AB889X",
"01866": "AB889X",
"01867": "AB889X",
"01876": "AB889X",
"01879": "AB889X",
"01880": "AB889X",
"01885": "AB889X",
"01886": "AB889X",
"01887": "AB889X",
"01888": "AB889X",
"01889": "AB889X",
"01890": "AB889X",
"01899": "AB889X",
"01901": "AB889X",
"01902": "AB889X",
"01903": "AB889X",
"01904": "AB889X",
"01905": "AB889X",
"01906": "AB889X",
"01907": "AB889X",
"01908": "AB889X",
"01910": "AB889X",
"01913": "AB889X",
"01915": "AB889X",
"01921": "AB889X",
"01922": "AB889X",
"01923": "AB889X",
"01929": "AB889X",
"01930": "AB889X",
"01931": "AB889X",
"01936": "AB889X",
"01937": "AB889X",
"01938": "AB889X",
"01940": "AB889X",
"01944": "AB889X",
"01945": "AB889X",
"01949": "AB889X",
"01950": "AB889X",
"01951": "AB889X",
"01952": "AB889X",
"01960": "AB889X",
"01961": "AB889X",
"01965": "AB889X",
"01966": "AB889X",
"01969": "AB889X",
"01970": "AB889X",
"01971": "AB889X",
"01982": "AB889X",
"01983": "AB889X",
"01984": "AB889X",
"01985": "AB889X",
"02018": "AB889X",
"02019": "AB889X",
"02020": "AB889X",
"02021": "AB889X",
"02025": "AB889X",
"02026": "AB889X",
"02027": "AB889X",
"02030": "AB889X",
"02031": "AB889X",
"02032": "AB889X",
"02035": "AB889X",
"02038": "AB889X",
"02040": "AB889X",
"02041": "AB889X",
"02043": "AB889X",
"02044": "AB889X",
"02045": "AB889X",
"02047": "AB889X",
"02048": "AB889X",
"02050": "AB889X",
"02051": "AB889X",
"02052": "AB889X",
"02053": "AB889X",
"02054": "AB889X",
"02055": "AB889X",
"02056": "AB889X",
"02059": "AB889X",
"02060": "AB889X",
"02061": "AB889X",
"02062": "AB889X",
"02065": "AB889X",
"02066": "AB889X",
"02067": "AB889X",
"02070": "AB889X",
"02071": "AB889X",
"02072": "AB889X",
"02081": "AB889X",
"02090": "AB889X",
"02093": "AB889X",
"02108": "AB889X",
"02109": "AB889X",
"02110": "AB889X",
"02111": "AB889X",
"02112": "AB889X",
"02113": "AB889X",
"02114": "AB889X",
"02115": "AB889X",
"02116": "AB889X",
"02117": "AB889X",
"02118": "AB889X",
"02119": "AB889X",
"02120": "AB889X",
"02121": "AB889X",
"02122": "AB889X",
"02123": "AB889X",
"02124": "AB889X",
"02125": "AB889X",
"02126": "AB889X",
"02127": "AB889X",
"02128": "AB889X",
"02129": "AB889X",
"02130": "AB889X",
"02131": "AB889X",
"02132": "AB889X",
"02133": "AB889X",
"02134": "AB889X",
"02135": "AB889X",
"02136": "AB889X",
"02137": "AB889X",
"02138": "AB889X",
"02139": "AB889X",
"02140": "AB889X",
"02141": "AB889X",
"02142": "AB889X",
"02143": "AB889X",
"02144": "AB889X",
"02145": "AB889X",
"02148": "AB889X",
"02149": "AB889X",
"02150": "AB889X",
"02151": "AB889X",
"02152": "AB889X",
"02153": "AB889X",
"02155": "AB889X",
"02156": "AB889X",
"02163": "AB889X",
"02169": "AB889X",
"02170": "AB889X",
"02171": "AB889X",
"02176": "AB889X",
"02180": "AB889X",
"02184": "AB889X",
"02185": "AB889X",
"02186": "AB889X",
"02187": "AB889X",
"02188": "AB889X",
"02189": "AB889X",
"02190": "AB889X",
"02191": "AB889X",
"02196": "AB889X",
"02199": "AB889X",
"02201": "AB889X",
"02203": "AB889X",
"02204": "AB889X",
"02205": "AB889X",
"02206": "AB889X",
"02207": "AB889X",
"02210": "AB889X",
"02211": "AB889X",
"02212": "AB889X",
"02215": "AB889X",
"02216": "AB889X",
"02217": "AB889X",
"02222": "AB889X",
"02228": "AB889X",
"02238": "AB889X",
"02239": "AB889X",
"02241": "AB889X",
"02266": "AB889X",
"02269": "AB889X",
"02283": "AB889X",
"02284": "AB889X",
"02293": "AB889X",
"02295": "AB889X",
"02297": "AB889X",
"02298": "AB889X",
"02301": "AB889X",
"02302": "AB889X",
"02303": "AB889X",
"02304": "AB889X",
"02305": "AB889X",
"02322": "AB889X",
"02324": "AB889X",
"02325": "AB889X",
"02327": "AB889X",
"02330": "AB889X",
"02331": "AB889X",
"02332": "AB889X",
"02333": "AB889X",
"02334": "AB889X",
"02337": "AB889X",
"02338": "AB889X",
"02339": "AB889X",
"02340": "AB889X",
"02341": "AB889X",
"02343": "AB889X",
"02344": "AB889X",
"02345": "AB889X",
"02346": "AB889X",
"02347": "AB889X",
"02348": "AB889X",
"02349": "AB889X",
"02350": "AB889X",
"02351": "AB889X",
"02355": "AB889X",
"02356": "AB889X",
"02357": "AB889X",
"02358": "AB889X",
"02359": "AB889X",
"02360": "AB889X",
"02361": "AB889X",
"02362": "AB889X",
"02364": "AB889X",
"02366": "AB889X",
"02367": "AB889X",
"02368": "AB889X",
"02370": "AB889X",
"02375": "AB889X",
"02379": "AB889X",
"02381": "AB889X",
"02382": "AB889X",
"02420": "AB889X",
"02421": "AB889X",
"02445": "AB889X",
"02446": "AB889X",
"02447": "AB889X",
"02451": "AB889X",
"02452": "AB889X",
"02453": "AB889X",
"02454": "AB889X",
"02455": "AB889X",
"02456": "AB889X",
"02457": "AB889X",
"02458": "AB890X",
"02459": "AB889X",
"02460": "AB889X",
"02461": "AB889X",
"02462": "AB889X",
"02464": "AB889X",
"02465": "AB889X",
"02466": "AB889X",
"02467": "AB889X",
"02468": "AB889X",
"02471": "AB889X",
"02472": "AB889X",
"02474": "AB889X",
"02475": "AB889X",
"02476": "AB889X",
"02477": "AB889X",
"02478": "AB889X",
"02479": "AB889X",
"02481": "AB889X",
"02482": "AB889X",
"02492": "AB889X",
"02493": "AB889X",
"02494": "AB889X",
"02495": "AB889X",
"02532": "AB889X",
"02534": "AB889X",
"02535": "AB889X",
"02536": "AB889X",
"02537": "AB889X",
"02538": "AB889X",
"02539": "AB889X",
"02540": "AB889X",
"02541": "AB889X",
"02542": "AB889X",
"02543": "AB889X",
"02552": "AB889X",
"02553": "AB889X",
"02554": "AB889X",
"02556": "AB889X",
"02557": "AB889X",
"02558": "AB889X",
"02559": "AB889X",
"02561": "AB889X",
"02562": "AB889X",
"02563": "AB889X",
"02564": "AB889X",
"02565": "AB889X",
"02568": "AB889X",
"02571": "AB889X",
"02573": "AB889X",
"02574": "AB889X",
"02575": "AB889X",
"02576": "AB889X",
"02584": "AB889X",
"02601": "AB889X",
"02630": "AB889X",
"02631": "AB889X",
"02632": "AB889X",
"02633": "AB889X",
"02634": "AB889X",
"02635": "AB889X",
"02636": "AB889X",
"02637": "AB889X",
"02638": "AB889X",
"02639": "AB889X",
"02641": "AB889X",
"02642": "AB889X",
"02643": "AB889X",
"02644": "AB889X",
"02645": "AB889X",
"02646": "AB889X",
"02647": "AB889X",
"02648": "AB889X",
"02649": "AB889X",
"02650": "AB889X",
"02651": "AB889X",
"02652": "AB889X",
"02653": "AB889X",
"02655": "AB889X",
"02657": "AB889X",
"02659": "AB889X",
"02660": "AB889X",
"02661": "AB889X",
"02662": "AB889X",
"02663": "AB889X",
"02664": "AB889X",
"02666": "AB889X",
"02667": "AB889X",
"02668": "AB889X",
"02669": "AB889X",
"02670": "AB889X",
"02671": "AB889X",
"02672": "AB889X",
"02673": "AB889X",
"02675": "AB889X",
"02702": "AB889X",
"02703": "AB889X",
"02712": "AB889X",
"02713": "AB889X",
"02714": "AB889X",
"02715": "AB889X",
"02717": "AB889X",
"02718": "AB889X",
"02719": "AB889X",
"02720": "AB889X",
"02721": "AB889X",
"02722": "AB889X",
"02723": "AB889X",
"02724": "AB889X",
"02725": "AB889X",
"02726": "AB889X",
"02738": "AB889X",
"02739": "AB889X",
"02740": "AB889X",
"02741": "AB889X",
"02742": "AB889X",
"02743": "AB889X",
"02744": "AB889X",
"02745": "AB889X",
"02746": "AB889X",
"02747": "AB889X",
"02748": "AB889X",
"02760": "AB889X",
"02761": "AB889X",
"02762": "AB889X",
"02763": "AB889X",
"02764": "AB889X",
"02766": "AB889X",
"02767": "AB889X",
"02768": "AB889X",
"02769": "AB889X",
"02770": "AB889X",
"02771": "AB889X",
"02777": "AB889X",
"02779": "AB889X",
"02780": "AB889X",
"02783": "AB889X",
"02790": "AB889X",
"02791": "AB889X",
"02801": "AB889X",
"02802": "AB889X",
"02804": "AB889X",
"02806": "AB889X",
"02807": "AB889X",
"02808": "AB889X",
"02809": "AB889X",
"02812": "AB889X",
"02813": "AB889X",
"02814": "AB889X",
"02815": "AB889X",
"02816": "AB889X",
"02817": "AB889X",
"02818": "AB889X",
"02822": "AB889X",
"02823": "AB889X",
"02824": "AB889X",
"02825": "AB889X",
"02826": "AB889X",
"02827": "AB889X",
"02828": "AB889X",
"02829": "AB889X",
"02830": "AB889X",
"02831": "AB889X",
"02832": "AB889X",
"02833": "AB889X",
"02835": "AB889X",
"02836": "AB889X",
"02837": "AB889X",
"02838": "AB889X",
"02839": "AB889X",
"02840": "AB889X",
"02841": "AB889X",
"02842": "AB889X",
"02852": "AB889X",
"02854": "AB889X",
"02857": "AB889X",
"02858": "AB889X",
"02859": "AB889X",
"02860": "AB889X",
"02861": "AB889X",
"02862": "AB889X",
"02863": "AB889X",
"02864": "AB889X",
"02865": "AB889X",
"02871": "AB889X",
"02872": "AB889X",
"02873": "AB889X",
"02874": "AB889X",
"02875": "AB889X",
"02876": "AB889X",
"02877": "AB889X",
"02878": "AB889X",
"02879": "AB889X",
"02880": "AB889X",
"02881": "AB889X",
"02882": "AB889X",
"02883": "AB889X",
"02885": "AB889X",
"02886": "AB889X",
"02887": "AB889X",
"02888": "AB889X",
"02889": "AB889X",
"02891": "AB889X",
"02892": "AB889X",
"02893": "AB889X",
"02894": "AB889X",
"02895": "AB889X",
"02896": "AB889X",
"02898": "AB889X",
"02901": "AB889X",
"02902": "AB889X",
"02903": "AB889X",
"02904": "AB889X",
"02905": "AB889X",
"02906": "AB889X",
"02907": "AB889X",
"02908": "AB889X",
"02909": "AB889X",
"02910": "AB889X",
"02911": "AB889X",
"02912": "AB889X",
"02914": "AB889X",
"02915": "AB889X",
"02916": "AB889X",
"02917": "AB889X",
"02918": "AB889X",
"02919": "AB889X",
"02920": "AB889X",
"02921": "AB889X",
"02940": "AB889X",
"03031": "AB889X",
"03032": "AB889X",
"03033": "AB889X",
"03034": "AB889X",
"03036": "AB889X",
"03037": "AB889X",
"03038": "AB889X",
"03040": "AB889X",
"03041": "AB889X",
"03042": "AB889X",
"03043": "AB889X",
"03044": "AB889X",
"03045": "AB889X",
"03046": "AB889X",
"03047": "AB889X",
"03048": "AB889X",
"03049": "AB889X",
"03051": "AB889X",
"03052": "AB889X",
"03053": "AB889X",
"03054": "AB889X",
"03055": "AB889X",
"03057": "AB889X",
"03060": "AB889X",
"03061": "AB889X",
"03062": "AB889X",
"03063": "AB889X",
"03064": "AB889X",
"03070": "AB889X",
"03071": "AB889X",
"03073": "AB889X",
"03076": "AB889X",
"03077": "AB889X",
"03079": "AB889X",
"03082": "AB889X",
"03084": "AB889X",
"03086": "AB889X",
"03087": "AB889X",
"03101": "AB889X",
"03102": "AB889X",
"03103": "AB889X",
"03104": "AB889X",
"03105": "AB889X",
"03106": "AB889X",
"03107": "AB889X",
"03108": "AB889X",
"03109": "AB889X",
"03110": "AB889X",
"03111": "AB889X",
"03215": "AB889X",
"03216": "AB889X",
"03217": "AB889X",
"03218": "AB889X",
"03220": "AB889X",
"03221": "AB889X",
"03222": "AB889X",
"03223": "AB889X",
"03224": "AB889X",
"03225": "AB889X",
"03226": "AB889X",
"03227": "AB889X",
"03229": "AB889X",
"03230": "AB889X",
"03231": "AB889X",
"03233": "AB889X",
"03234": "AB889X",
"03235": "AB889X",
"03237": "AB889X",
"03238": "AB889X",
"03240": "AB889X",
"03241": "AB889X",
"03242": "AB889X",
"03243": "AB889X",
"03244": "AB889X",
"03245": "AB889X",
"03246": "AB889X",
"03247": "AB889X",
"03249": "AB889X",
"03251": "AB889X",
"03252": "AB889X",
"03253": "AB889X",
"03254": "AB889X",
"03255": "AB889X",
"03256": "AB889X",
"03257": "AB889X",
"03258": "AB889X",
"03259": "AB889X",
"03260": "AB889X",
"03261": "AB889X",
"03262": "AB889X",
"03263": "AB889X",
"03264": "AB889X",
"03266": "AB889X",
"03268": "AB889X",
"03269": "AB889X",
"03272": "AB889X",
"03273": "AB889X",
"03274": "AB889X",
"03275": "AB889X",
"03276": "AB889X",
"03278": "AB889X",
"03279": "AB889X",
"03280": "AB889X",
"03281": "AB889X",
"03282": "AB889X",
"03284": "AB889X",
"03287": "AB889X",
"03289": "AB889X",
"03290": "AB889X",
"03291": "AB889X",
"03293": "AB889X",
"03298": "AB889X",
"03299": "AB889X",
"03301": "AB889X",
"03302": "AB889X",
"03303": "AB889X",
"03304": "AB889X",
"03305": "AB889X",
"03307": "AB889X",
"03431": "AB889X",
"03435": "AB889X",
"03440": "AB889X",
"03441": "AB889X",
"03442": "AB889X",
"03443": "AB889X",
"03444": "AB889X",
"03445": "AB889X",
"03446": "AB889X",
"03447": "AB889X",
"03448": "AB889X",
"03449": "AB889X",
"03450": "AB889X",
"03451": "AB889X",
"03452": "AB889X",
"03455": "AB889X",
"03456": "AB889X",
"03457": "AB889X",
"03458": "AB889X",
"03461": "AB889X",
"03462": "AB889X",
"03464": "AB889X",
"03465": "AB889X",
"03466": "AB889X",
"03467": "AB889X",
"03468": "AB889X",
"03469": "AB889X",
"03470": "AB889X",
"03561": "AB889X",
"03570": "AB889X",
"03574": "AB889X",
"03575": "AB889X",
"03576": "AB889X",
"03579": "AB889X",
"03580": "AB889X",
"03581": "AB889X",
"03582": "AB889X",
"03583": "AB889X",
"03584": "AB889X",
"03585": "AB889X",
"03586": "AB889X",
"03588": "AB889X",
"03589": "AB889X",
"03590": "AB889X",
"03592": "AB889X",
"03593": "AB889X",
"03595": "AB889X",
"03597": "AB889X",
"03598": "AB889X",
"03601": "AB889X",
"03602": "AB889X",
"03603": "AB889X",
"03604": "AB889X",
"03605": "AB889X",
"03607": "AB889X",
"03608": "AB889X",
"03609": "AB889X",
"03740": "AB889X",
"03741": "AB889X",
"03743": "AB889X",
"03745": "AB889X",
"03746": "AB889X",
"03748": "AB889X",
"03749": "AB889X",
"03750": "AB889X",
"03751": "AB889X",
"03752": "AB889X",
"03753": "AB889X",
"03754": "AB889X",
"03755": "AB889X",
"03756": "AB889X",
"03765": "AB889X",
"03766": "AB889X",
"03768": "AB889X",
"03769": "AB889X",
"03770": "AB889X",
"03771": "AB889X",
"03773": "AB889X",
"03774": "AB889X",
"03777": "AB889X",
"03779": "AB889X",
"03780": "AB889X",
"03781": "AB889X",
"03782": "AB889X",
"03784": "AB889X",
"03785": "AB889X",
"03801": "AB889X",
"03802": "AB889X",
"03803": "AB889X",
"03804": "AB889X",
"03805": "AB889X",
"03809": "AB889X",
"03810": "AB889X",
"03811": "AB889X",
"03812": "AB889X",
"03813": "AB889X",
"03814": "AB889X",
"03815": "AB889X",
"03816": "AB889X",
"03817": "AB889X",
"03818": "AB889X",
"03819": "AB889X",
"03820": "AB889X",
"03821": "AB889X",
"03822": "AB889X",
"03823": "AB889X",
"03824": "AB889X",
"03825": "AB889X",
"03826": "AB889X",
"03827": "AB889X",
"03830": "AB889X",
"03832": "AB889X",
"03833": "AB889X",
"03835": "AB889X",
"03836": "AB889X",
"03837": "AB889X",
"03838": "AB889X",
"03839": "AB889X",
"03840": "AB889X",
"03841": "AB889X",
"03842": "AB889X",
"03843": "AB889X",
"03844": "AB889X",
"03845": "AB889X",
"03846": "AB889X",
"03847": "AB889X",
"03848": "AB889X",
"03849": "AB889X",
"03850": "AB889X",
"03851": "AB889X",
"03852": "AB889X",
"03853": "AB889X",
"03854": "AB889X",
"03855": "AB889X",
"03856": "AB889X",
"03857": "AB889X",
"03858": "AB889X",
"03859": "AB889X",
"03860": "AB889X",
"03861": "AB889X",
"03862": "AB889X",
"03864": "AB889X",
"03865": "AB889X",
"03866": "AB889X",
"03867": "AB889X",
"03868": "AB889X",
"03869": "AB889X",
"03870": "AB889X",
"03871": "AB889X",
"03872": "AB889X",
"03873": "AB889X",
"03874": "AB889X",
"03875": "AB889X",
"03878": "AB889X",
"03882": "AB889X",
"03883": "AB889X",
"03884": "AB889X",
"03885": "AB889X",
"03886": "AB889X",
"03887": "AB889X",
"03890": "AB889X",
"03894": "AB889X",
"03896": "AB889X",
"03897": "AB889X",
"03901": "AB889X",
"03902": "AB889X",
"03903": "AB889X",
"03904": "AB889X",
"03905": "AB889X",
"03906": "AB889X",
"03907": "AB889X",
"03908": "AB889X",
"03909": "AB889X",
"03910": "AB889X",
"03911": "AB889X",
"04001": "AB889X",
"04002": "AB889X",
"04003": "AB889X",
"04004": "AB889X",
"04005": "AB889X",
"04006": "AB889X",
"04007": "AB889X",
"04008": "AB889X",
"04009": "AB889X",
"04010": "AB889X",
"04011": "AB889X",
"04013": "AB889X",
"04014": "AB889X",
"04015": "AB889X",
"04016": "AB889X",
"04017": "AB889X",
"04019": "AB889X",
"04020": "AB889X",
"04021": "AB889X",
"04022": "AB889X",
"04024": "AB889X",
"04027": "AB889X",
"04028": "AB889X",
"04029": "AB889X",
"04030": "AB889X",
"04032": "AB889X",
"04033": "AB889X",
"04034": "AB889X",
"04037": "AB889X",
"04038": "AB889X",
"04039": "AB889X",
"04040": "AB889X",
"04041": "AB889X",
"04042": "AB889X",
"04043": "AB889X",
"04046": "AB889X",
"04047": "AB889X",
"04048": "AB889X",
"04049": "AB889X",
"04050": "AB889X",
"04051": "AB889X",
"04054": "AB889X",
"04055": "AB889X",
"04056": "AB889X",
"04057": "AB889X",
"04061": "AB889X",
"04062": "AB889X",
"04063": "AB889X",
"04064": "AB889X",
"04066": "AB889X",
"04068": "AB889X",
"04069": "AB889X",
"04070": "AB889X",
"04071": "AB889X",
"04072": "AB889X",
"04073": "AB889X",
"04074": "AB889X",
"04075": "AB889X",
"04076": "AB889X",
"04077": "AB889X",
"04078": "AB889X",
"04079": "AB889X",
"04082": "AB889X",
"04083": "AB889X",
"04084": "AB889X",
"04085": "AB889X",
"04086": "AB889X",
"04087": "AB889X",
"04088": "AB889X",
"04090": "AB889X",
"04091": "AB889X",
"04092": "AB889X",
"04093": "AB889X",
"04094": "AB889X",
"04095": "AB889X",
"04096": "AB889X",
"04097": "AB889X",
"04098": "AB889X",
"04101": "AB889X",
"04102": "AB889X",
"04103": "AB889X",
"04104": "AB889X",
"04105": "AB889X",
"04106": "AB889X",
"04107": "AB889X",
"04108": "AB889X",
"04109": "AB889X",
"04110": "AB889X",
"04112": "AB889X",
"04116": "AB889X",
"04122": "AB889X",
"04123": "AB889X",
"04124": "AB889X",
"04210": "AB889X",
"04211": "AB889X",
"04212": "AB889X",
"04216": "AB889X",
"04217": "AB889X",
"04219": "AB889X",
"04220": "AB889X",
"04221": "AB889X",
"04222": "AB889X",
"04223": "AB889X",
"04224": "AB889X",
"04225": "AB889X",
"04226": "AB889X",
"04227": "AB889X",
"04228": "AB889X",
"04230": "AB889X",
"04231": "AB889X",
"04234": "AB889X",
"04236": "AB889X",
"04237": "AB889X",
"04238": "AB889X",
"04239": "AB889X",
"04240": "AB889X",
"04241": "AB889X",
"04243": "AB889X",
"04250": "AB889X",
"04252": "AB889X",
"04253": "AB889X",
"04254": "AB889X",
"04255": "AB889X",
"04256": "AB889X",
| python | MIT | c3cc5a95d4f72e6ccc27ebae23344fce8cc70786 | 2026-01-04T14:40:23.735327Z | true |
psf/black | https://github.com/psf/black/blob/c3cc5a95d4f72e6ccc27ebae23344fce8cc70786/profiling/mix_small.py | profiling/mix_small.py | config = some.Structure(
globalMap = {
103310322020340: [100000031211103,101042000320420,100100001202021,112320301100420,110101024402203,112001202000203,112101112010031,102130400200010,100401014300441,103000401422033],
110040120003212: [114413100031332,102101001412002,100210000032130,214000110100040,103031420121210,112114222301010,110133330100020,100001001203011,102210220202130,102200120234012],
244402003102200: [110212012114431,100001140020000,100012101223021,110031301200114,114002020044120,100021004302202,102202200240222,114102010220042,102021301441201,104103102103201],
122013242003223: [100014100100001,102100004130301,111120004100414,101034024000101,100021424301033,102003004003400,103340410140122,100102114100420,111012202111021,100103144302200],
1120010021223330: [110332202020000,104120130021200,112421004012141,111100220022101,100021104201130,102224410201003,110030021010001,101300401002320,112001321113132,101110434020010],
214100003030021: [102122000214201,100242141004122,102024240221040,110320011200230,100011114300334,102303004110022,100110201042101,110134201140010,112101044000202,100040024340013],
1000220132200020: [102231130213210,103214010102022,102000402041014,100043324210140,100023024211011,102404021403141,100010004313001,100003201021001,100122020011232,100121031014040],
1200041022422001: [100021300101110,103010301402112,100011401031000,100020034100101,100122214300222,103134021420204,102042210220100,100103200021130,103204214043011,103020320102002],
1000232101122040: [110011414240022,102202310203222,100042001020203,102002320220202,100010044300003,114130210231020,103301410110110,112114324040000,102124031023100,100204104320231],
110002000000001: [110010000111020,102011041320000,114240012324120,100022010022031,102014140200013,101240110302200,100031204311023,101232001021020,100012121040101,111243002222100],
200211022142211: [102231130213210,103214010102022,100043324210140,102324014112020,102022004012201,100023024211011,111130210020000,102404021403141,100003201021001,100122020011232],
114121130201204: [100040031020241,101100104000222,102324141000002,101010234004042,102041224010012,100411400030100,100002000102100,114340102322021,112033124022002,102120221120200],
104101002123040: [100032320000112,103140011442322,100312120141341,111144020011120,112142204020032,110044031204131,114012010020012,100001420412010,100102110020102,103022000123142],
241101103112120: [110333404433042,112240002043300,110021100132240,103104221443021,101100304000101,100202210410011,114402212310021,102411011111011,103020141414101,100113001040420],
1004020030320422: [100022214203010,110310011234031,110123111300000,100020031041304,102120004000101,102020002000420,110100302010110,100422020030044,101220100303222,100002220411002],
1041010100001110: [111124004101012,114103220220104,102120004001310,102430121110340,101021204000200,112141104020201,101200111020111,110001200422320,103210114021430,114223242332100],
121103011132000: [110021104200001,110441140102024,111140124100100,110310020040401,100002024303323,110030002130201,102103320232241,110302000400221,110111001302000,110210304220000],
1042022101004111: [110021104200001,111140124100100,110031301200114,102103320232241,110302000400221,110111001302000,110210304220000,102033124000111,111000212204213,110010324410130],
102220010222003: [114003212210013,110302012103114,103023101400002,110010120440200,110024140022040,100002100100320,110100034200300,100401201010220,103200024021204,102100204000234],
1442130103121040: [114413100031332,112121020204344,102211030203010,100210000032130,214000110100040,103031420121210,112114222301010,100001001203011,114214404142034,102210220202130],
110204013021024: [110301402000211,103030201402004,110420104221220,103200004030020,100320014310021,102000110221110,110240214432004,102122141142000,103041204010000,114432234113040],
100001440133234: [102111040210242,114110000002430,102010011412143,100424220032043,114013422300032,102021004001004,100131414303044,100201010110320,100000221030033,110101101120302],
114002033000020: [100001121002141,102000040201003,101132131221321,100004440004031,102002011033030,110310300003402,110220022132003,101001000302103,110044112100220,100010120031210],
112123022000131: [100304201000221,100112021020320,103010001422020,102300124023122,114221004100020,100002100124101,102304100210334,100403210010142,112003100241220,102204304000304],
1143001230040001: [101033124003122,100313144100202,103201004024412,114014222302011,103011020110211,110203420111140,111003412201430,101411214000203,110222422042203],
1002403200000231: [100122004300410,102140014001101,102000104012232,112400202020003,102000224000011,114122134140002,102101010202004,114400202310210,100042030201341,112233332034000],
1003123041101031: [110043132100300,102220120203200,100002004201122,110132031134031,1320303202002000,1321200010022312,110130330012140,114102134002330,140403400240000,100003230110201],
242220101103400: [111130112100210,102202200240222,110131044200121,100100004102234,110223332120231,100001024301322,114013044102010,114220114140122,101023021034304,110200210110130],
121030040400021: [110011011212100,101134030011000,103220211421421,110123230124040,131000001020102,112200412033030,110420001000202,100102110402220,110412310030001,114012022302022],
100104011300401: [110320414420310,110100100101041,100420124100211,103010211222002,101040120320230,110013021300010,101043234000412,101220000040002,100422001002123,114010010010002],
1004003211220032: [102400100230000,101100300302102,114120420223002,100002210201212,102302020212101,112410424012010,100030004104210,103030201434002,110232110004010,103021201430000],
223011102020040: [110012420131001,100132040031022,100020310000200,100220004030002,110020002003334,110014000442004,101441204020001,102014201411102,103231320100042,104121201242032],
100200110422111: [103100221440010,114003344011001,100021101000000,102140000212141,101014201030204,101203001010111,102424114020143,100031201030102,101041001003300,114012444020220],
134300111030221: [112100124023402,101020044011003,100002414210010,102010042000041,102242204040102,100021014100124,100130244302034,100122040012210,100014010104404,101020104000001],
114001000330100: [401142101000022,400114001102410,102100000012031,431232000323104,1012100023110102,232023302143031,120200122422404,1040100003200241,111414004440203,1020220210340010],
211222012120321: [112000112000031,100121214300000,102123214020000,102022124012030,114002320022312],
1122000200402140: [102401141113031,103042420143020,102304314110202,110210400002012,113032012204012,112310002020302,100204311000400,100403012200201,112002111121013,114211100001224],
332000024200013: [102120010220042,110103312142034,102210210312302,101120100320100,114140014100000,102110004002301,100130020001030,112022100213011,100101231202322,111210020001013],
120041442412123: [102133011413110,111001200041102,101223020300040,102034324000220,100210000032400,101230020303000,111340130010314,110200422121211,110214220002020,112220414010040],
1011011320300100: [100102024301030,100111134302041,100112234000041,110004244214343,101002101020003,102214021120301,114221224100022,101330210310300,112003021111000,102012141134211],
220020021000101: [100301001000202,104101112102403,100023121223031,114201432320014,120000002023011,102133120200123,101014020301201,102000031130401,101010111002141,114123124143310],
1024011342000021: [102001001130100,111204203012002,124020002020003,122222120003214,133332002141010,144000013213001,124010030100142,112310202021010,110014020020011,100140044020011],
1131011002310011: [102230040200031,110122001010214,114043140010022,102101204000010,110022300420031,100100401040001,114230230000123,100222024320003,103323001400013,114013012300240],
1142024120224002: [102203344011410,100021324100000,102103430210003,100012014300120,102414014022212,102012220241003,101004411032102,101430211012120,100204021000012,103242044020102],
1100004410231120: [110013202003320,101031241000010,102120231343224,110030332100203,100314114322101,114404232310120,103100034001310,114002202210331,100031301020100,110111032140220],
1440220030001122: [114010330030011,103021220104200,101010020320000,112000210211020,100010324210003,101000000343443,110002400011111,100402132200000,111100300024000,103144040104204],
121414301110004: [110144240011020,110413401240204,112000244001222,114441114123013,103220211421421,114000012334102,101000014012442,100312401002102,111022210021013,103110001420121],
130004004220120: [111240340004003,102021024000010,111101222244030,112011012004300,102300010242330,102000401120420,102004012043122,114011102210402,100120001014040,114300100000041],
1013303000401020: [101000024004442,100002100000100,110130030020000],
220041302111231: [100002014200331,100034244210020,102012004000003,100411000030110,102041201121230,103011014042120,100000030120242,102110400210023,101012204221200,111212422222300],
1000031031200013: [101302134233230,100000130010033,101030404212000,114102224000201,100022021041122,100020001042002,100013011020311,100120041020012,102012204000242,114143024003322],
1000303301100221: [111333104124202,101000304014100,100040011023101,110301030010140,104100002101431,101123232010002,114421242204443,110100222001100,103102000121300,110010331230210],
1410211342331412: [111002100000102,114021010040140,114222302321101,102101024002121,110014024202014,110220130100010,100020011030330,102404221402210,110203022032320,101222014230110],
1411020122202044: [100141401021003,102010000231120,101000400320211,101001010300214,103010020142023,110132002212113,110010100040100,102101002002002,111020302232033,110224030114311],
101010042001102: [101221020040220,100103104302044,101041030320401,102141212000200,101203121020430,102020004000014,100000211023014,114144014122041,100201111002224,101410304041000],
204204011102020: [100212030414010,101400004022210,102031021441200,101200020303202,102301324112020,111340300010010,102013224003020,103013020123142,102240344041020,102140202001100],
240000420312002: [110002004410202,102103114000024,102240221000001,112041002002124,114000024101102,140343100002103,400200234320200,100020124204330,100001424102041,100100021040230],
1030000001422430: [102343230224321,103211200100400,102112231020231,100022004300020,102320000240102,100042144200000,102030304001101,100020420121003,103020004011414,100001104301200],
1104234221030010: [110000322130121,101023001002020,111300222202234,100200001210021,103204230111030,104130020011020,101114422122000,102001314013400,114110414140400,111201100011141],
121341130003000: [111102004100101,102021100220101,114000040010421,112042110220004,100000214240410,100433201004014,102301102004413,102003000220111,102010100204023,102414040230400],
100101220022002: [100010024102110,101041200320012,114303400002201,110204211000331,112121014003422,114430102311021,100240444100022,103004411424400,111014002140322],
1023303420422001: [100043104240004,110002034200042,100001240100033,114100304002030,102100001340122,112030010234104,103414101212410,100123021223100,112302011102312,101020030343002],
101114102124321: [110403244220202,103113014002100,110120400402324,100402340010310,112010020211000,100102200000211,103030201240100,102300210210222,114100332340213,111031030024010],
1422302020100030: [114020000030002,114031000022030,100201211003004,102014002000401,103103241421322,114121012340044,102000400240203,102104304023201,103310300140324,100002224244002],
1121202400231120: [101211201202134,103120104030100,100004000100101,102020030220200,110031404423144,110003434410403,111110000014401,100000204312321,101004000304012,110300121220201],
1042001221000013: [114032104100141,101213114040141,102210101002412,111140100011101,110122421241103,112001144002010,101013030304101,100012011022000,102000004000013,102021241324040],
1102433033042110: [110104112010103,111102004100101,100122004300410,102202414041044,102140014001101,102000104012232,102021100220101,114443330001442,100230120001003,114000040010421],
100103201011144: [110102401201204,102400100230000,100212030414010,101200020303202,114120420223002,102013224003020,100002210201212,103013020123142,102302020212101,114303340000001],
240343043023011: [110120144200000,114022412330004,101200221022044,110241112020204,100002004104004,102100224000210,102310140240012,100014204201000,102103321411004,100400001001300],
1020301032424304: [101302134233230,100000130010033,101030404212000,114102224000201,100431000032001,100020001042002,100013011020311,102103331400000,100120041020012,100020001041231],
114200022220040: [100014100100001,102100004130301,111120004100414,101034024000101,101243024011000,103340410140122,100010021221221,111012202111021,100103144302200,101414100300221],
142041243300010: [102102002002242,101130104022002,101230331020012,100004244210201,102420124024204,122312222240401,102041014011340,110200130004300,100140101012000,101400000302141],
1002203102111022: [100434000032200,110004020020022,114032412303041,112000301122111,102020130212402,100010001020000,200000020021022,114321212322303,112302112002211,114202002333330],
1110102121041413: [101012310301211,103112200123144,114242304004011,102302200241241,110001420021023,110201040003402,112301421130130,110020012100302,114412202320010,110021030030202],
114020200220012: [102010114022011,103340041403223,114002200000421,101020321002002,114302210010001,114030004104220,100104004301000,102443211401140,102301041000014,100001111201214],
202011212123200: [102203301123001,103210400110122,101112020300011,114104302341000,100400201010000,102244400201444,101010001000121,102304000220032,102002131132000,100000031024222],
201032103003132: [110212041114210,113210300100002,112404024011000,102131034000220,111212124130140,101002014013010,103402020120010,112110100230023,112003044002004,103020200102200],
120222032001012: [110121100004131,111400214442024,111122200021102,101041100300001,102140011440001,101011220342010,110200004221020,103114211441300,110000222010004,100114000101041],
1100013040001020: [100003031030034,101001044211204,102100010233022,102120400212001,114313022302101,103001324000110,100002014200021,102300401001010,103212320212011,111120034400000],
1001031021300022: [102000020220000,101020104000040,114412142320040,100003044100101,114012402332300,102220211122102,101010110302010,100032121020101,100013224313301,100012244240030],
124000010122242: [100032320000112,103140011442322,100312120141341,111144020011120,112142204020032,110044031204131,114012010020012,100001420412010,100102110020102,102002204001140],
1121211020204132: [110020301320101,102014001130210,110401041140300,110002302201420,102202001120002,110200010003201,102421004041011,102240040202421,101001110300111,100130004300103],
121320202004301: [111014240041102,101100004200110,112021221130424,112200041103002,110400040402041,112001011112202,100112004304314,100232041001122,100223030001010,100104231000300],
1024242230240244: [110322104424202,101202421002031,102102220312102,103004204021310,112220102043102,110012310020200,102030130241300,103030120100220,100232224340041,112400002011010],
1002100010130110: [101400001011042,100002011020211,100100110010010,111110002213142,100002131030310,111102214100400,103220201402330,102321000221140,103113124030200,102110300311400],
200010402222410: [101210320301302,102100221340112,100114104101001,114002244103320,101023221000031,101400014040400,102012034000402,114221004002212,102100122002001,101000011021414],
112300020202111: [103141411420221,122102400312000,110002100110002,1340024302404122,100002001043010,113110330121030,410020013004430,1002300040412102,1210020204140003,123211320000102],
100102203210034: [102023021100022,111200302222011,112040241120204,111000022200000,100010011232324,110220030000133,110000330430311,101211221014003,103111230120100,102221220200021],
1021001032000012: [102020010203200,100011144312020,102011204001010,102001410221023,110130201302200,103041021430301,101100440320434,114000402211404,101000100302003,110000030430422],
1031102424140120: [100011010414200,111121102240240,102002121101110,102403100202003,110000100041101,100400000010033,100101211001320,101141020321000,103224101400400,102000002043020],
102001021434201: [110131122012210,114010200040441,110032014420232,100000344100100,111304022202211,102302011002003,102011021121200,100012441030002,110222042022111,103131004002200],
220100132400104: [1010400230221020,111320012221132,102302144110440,114140004123122,102143202000400,111020002202333,101321311004010,102110241342210,114122302311011,100002320411400],
1110121023020002: [100022041001002,111240340004003,111101222244030,114032424122040,112011012004300,101021401003220,100301020420101,102002202000000,100022024100041,102010410213111],
1042030001044121: [101230040301234,110033032100000,102303014024221,100100224304110,100400432200321,100020200012311,114140144122230,101400201102014,100000020004004,101040040303102],
1100100224104011: [111130112100210,102202200240222,110131044200121,100100004102234,101023014004000,110223332120231,100001024301322,114013044102010,114220114140122,110200210110130],
212002042300000: [100030220100110,110011202100142,112400102013231,114012002212100,100002114314022,114203110001031,112030122000211,111004012200210,100413131013301,110001102103432],
221001240043040: [100420021010013,112101302302202,102212011120200,102344111104233,100432104320110,114021014020000,102121321340000,114200214002024,100111221011100,100023104101340],
1040021200420124: [100042111220111,112220100202000,100042041220010,100403211010100,103240301400123,114102210214041,100021300103041,101010204000242,111334004121414,114101000213410],
},
)
| python | MIT | c3cc5a95d4f72e6ccc27ebae23344fce8cc70786 | 2026-01-04T14:40:23.735327Z | false |
psf/black | https://github.com/psf/black/blob/c3cc5a95d4f72e6ccc27ebae23344fce8cc70786/docs/conf.py | docs/conf.py | #
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import re
import string
from importlib.metadata import version
from pathlib import Path
from sphinx.application import Sphinx
CURRENT_DIR = Path(__file__).parent
def make_pypi_svg(version: str) -> None:
template: Path = CURRENT_DIR / "_static" / "pypi_template.svg"
target: Path = CURRENT_DIR / "_static" / "pypi.svg"
with open(str(template), encoding="utf8") as f:
svg: str = string.Template(f.read()).substitute(version=version)
with open(str(target), "w", encoding="utf8") as f:
f.write(svg)
def replace_pr_numbers_with_links(content: str) -> str:
"""Replaces all PR numbers with the corresponding GitHub link."""
return re.sub(r"#(\d+)", r"[#\1](https://github.com/psf/black/pull/\1)", content)
def handle_include_read(
app: Sphinx,
relative_path: Path,
parent_docname: str,
content: list[str],
) -> None:
"""Handler for the include-read sphinx event."""
if parent_docname == "change_log":
content[0] = replace_pr_numbers_with_links(content[0])
def setup(app: Sphinx) -> None:
"""Sets up a minimal sphinx extension."""
app.connect("include-read", handle_include_read)
# Necessary so Click doesn't hit an encode error when called by
# sphinxcontrib-programoutput on Windows.
os.putenv("pythonioencoding", "utf-8")
# -- Project information -----------------------------------------------------
project = "Black"
copyright = "2018-Present, Łukasz Langa and contributors to Black"
author = "Łukasz Langa and contributors to Black"
# Autopopulate version
# The version, including alpha/beta/rc tags, but not commit hash and datestamps
release = version("black").split("+")[0]
# The short X.Y version.
version = release
for sp in "abcfr":
version = version.split(sp)[0]
make_pypi_svg(release)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "4.4"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"myst_parser",
"sphinxcontrib.programoutput",
"sphinx_copybutton",
]
# If you need extensions of a certain version or higher, list them here.
needs_extensions = {"myst_parser": "0.13.7"}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# We need headers to be linkable to so ask MyST-Parser to autogenerate anchor IDs for
# headers up to and including level 3.
myst_heading_anchors = 3
# Prettier support formatting some MyST syntax but not all, so let's disable the
# unsupported yet still enabled by default ones.
myst_disable_syntax = [
"colon_fence",
"myst_block_break",
"myst_line_comment",
"math_block",
]
# Optional MyST Syntaxes
myst_enable_extensions = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
html_logo = "_static/logo2-readme.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "blackdoc"
# -- Options for LaTeX output ------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [(
master_doc,
"black.tex",
"Documentation for Black",
"Łukasz Langa and contributors to Black",
"manual",
)]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "black", "Documentation for Black", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
master_doc,
"Black",
"Documentation for Black",
author,
"Black",
"The uncompromising Python code formatter",
"Miscellaneous",
)]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
autodoc_member_order = "bysource"
# -- sphinx-copybutton configuration ----------------------------------------
copybutton_prompt_text = (
r">>> |\.\.\. |> |\$ |\# | In \[\d*\]: | {2,5}\.\.\.: | {5,8}: "
)
copybutton_prompt_is_regexp = True
copybutton_remove_prompts = True
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"<name>": ("https://docs.python.org/3/", None)}
| python | MIT | c3cc5a95d4f72e6ccc27ebae23344fce8cc70786 | 2026-01-04T14:40:23.735327Z | false |
zai-org/ChatGLM-6B | https://github.com/zai-org/ChatGLM-6B/blob/401bf3a8a7dd8a26fba189551dccfc61a7079b4e/web_demo_old.py | web_demo_old.py | from transformers import AutoModel, AutoTokenizer
import gradio as gr
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
model = model.eval()
MAX_TURNS = 20
MAX_BOXES = MAX_TURNS * 2
def predict(input, max_length, top_p, temperature, history=None):
if history is None:
history = []
for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p,
temperature=temperature):
updates = []
for query, response in history:
updates.append(gr.update(visible=True, value="用户:" + query))
updates.append(gr.update(visible=True, value="ChatGLM-6B:" + response))
if len(updates) < MAX_BOXES:
updates = updates + [gr.Textbox.update(visible=False)] * (MAX_BOXES - len(updates))
yield [history] + updates
with gr.Blocks() as demo:
state = gr.State([])
text_boxes = []
for i in range(MAX_BOXES):
if i % 2 == 0:
text_boxes.append(gr.Markdown(visible=False, label="提问:"))
else:
text_boxes.append(gr.Markdown(visible=False, label="回复:"))
with gr.Row():
with gr.Column(scale=4):
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter", lines=11).style(
container=False)
with gr.Column(scale=1):
max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True)
top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
button = gr.Button("Generate")
button.click(predict, [txt, max_length, top_p, temperature, state], [state] + text_boxes)
demo.queue().launch(share=False, inbrowser=True)
| python | Apache-2.0 | 401bf3a8a7dd8a26fba189551dccfc61a7079b4e | 2026-01-04T14:40:23.749869Z | false |
zai-org/ChatGLM-6B | https://github.com/zai-org/ChatGLM-6B/blob/401bf3a8a7dd8a26fba189551dccfc61a7079b4e/api.py | api.py | from fastapi import FastAPI, Request
from transformers import AutoTokenizer, AutoModel
import uvicorn, json, datetime
import torch
DEVICE = "cuda"
DEVICE_ID = "0"
CUDA_DEVICE = f"{DEVICE}:{DEVICE_ID}" if DEVICE_ID else DEVICE
def torch_gc():
if torch.cuda.is_available():
with torch.cuda.device(CUDA_DEVICE):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
app = FastAPI()
@app.post("/")
async def create_item(request: Request):
global model, tokenizer
json_post_raw = await request.json()
json_post = json.dumps(json_post_raw)
json_post_list = json.loads(json_post)
prompt = json_post_list.get('prompt')
history = json_post_list.get('history')
max_length = json_post_list.get('max_length')
top_p = json_post_list.get('top_p')
temperature = json_post_list.get('temperature')
response, history = model.chat(tokenizer,
prompt,
history=history,
max_length=max_length if max_length else 2048,
top_p=top_p if top_p else 0.7,
temperature=temperature if temperature else 0.95)
now = datetime.datetime.now()
time = now.strftime("%Y-%m-%d %H:%M:%S")
answer = {
"response": response,
"history": history,
"status": 200,
"time": time
}
log = "[" + time + "] " + '", prompt:"' + prompt + '", response:"' + repr(response) + '"'
print(log)
torch_gc()
return answer
if __name__ == '__main__':
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
model.eval()
uvicorn.run(app, host='0.0.0.0', port=8000, workers=1)
| python | Apache-2.0 | 401bf3a8a7dd8a26fba189551dccfc61a7079b4e | 2026-01-04T14:40:23.749869Z | false |
zai-org/ChatGLM-6B | https://github.com/zai-org/ChatGLM-6B/blob/401bf3a8a7dd8a26fba189551dccfc61a7079b4e/web_demo2.py | web_demo2.py | from transformers import AutoModel, AutoTokenizer
import streamlit as st
from streamlit_chat import message
st.set_page_config(
page_title="ChatGLM-6b 演示",
page_icon=":robot:"
)
@st.cache_resource
def get_model():
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
model = model.eval()
return tokenizer, model
MAX_TURNS = 20
MAX_BOXES = MAX_TURNS * 2
def predict(input, max_length, top_p, temperature, history=None):
tokenizer, model = get_model()
if history is None:
history = []
with container:
if len(history) > 0:
if len(history)>MAX_BOXES:
history = history[-MAX_TURNS:]
for i, (query, response) in enumerate(history):
message(query, avatar_style="big-smile", key=str(i) + "_user")
message(response, avatar_style="bottts", key=str(i))
message(input, avatar_style="big-smile", key=str(len(history)) + "_user")
st.write("AI正在回复:")
with st.empty():
for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p,
temperature=temperature):
query, response = history[-1]
st.write(response)
return history
container = st.container()
# create a prompt text for the text generation
prompt_text = st.text_area(label="用户命令输入",
height = 100,
placeholder="请在这儿输入您的命令")
max_length = st.sidebar.slider(
'max_length', 0, 4096, 2048, step=1
)
top_p = st.sidebar.slider(
'top_p', 0.0, 1.0, 0.6, step=0.01
)
temperature = st.sidebar.slider(
'temperature', 0.0, 1.0, 0.95, step=0.01
)
if 'state' not in st.session_state:
st.session_state['state'] = []
if st.button("发送", key="predict"):
with st.spinner("AI正在思考,请稍等........"):
# text generation
st.session_state["state"] = predict(prompt_text, max_length, top_p, temperature, st.session_state["state"])
| python | Apache-2.0 | 401bf3a8a7dd8a26fba189551dccfc61a7079b4e | 2026-01-04T14:40:23.749869Z | false |
zai-org/ChatGLM-6B | https://github.com/zai-org/ChatGLM-6B/blob/401bf3a8a7dd8a26fba189551dccfc61a7079b4e/web_demo_vision.py | web_demo_vision.py | from transformers import AutoModel, AutoTokenizer
import gradio as gr
import mdtex2html
tokenizer = AutoTokenizer.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True).half().cuda()
model = model.eval()
"""Override Chatbot.postprocess"""
def postprocess(self, y):
if y is None:
return []
for i, (message, response) in enumerate(y):
y[i] = (
None if message is None else mdtex2html.convert((message)),
None if response is None else mdtex2html.convert(response),
)
return y
gr.Chatbot.postprocess = postprocess
def parse_text(text):
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f'<br></code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
line = line.replace("*", "*")
line = line.replace("_", "_")
line = line.replace("-", "-")
line = line.replace(".", ".")
line = line.replace("!", "!")
line = line.replace("(", "(")
line = line.replace(")", ")")
line = line.replace("$", "$")
lines[i] = "<br>"+line
text = "".join(lines)
return text
def predict(input, image_path, chatbot, max_length, top_p, temperature, history):
if image_path is None:
return [(input, "图片为空!请重新上传图片并重试。")]
chatbot.append((parse_text(input), ""))
for response, history in model.stream_chat(tokenizer, image_path, input, history, max_length=max_length, top_p=top_p,
temperature=temperature):
chatbot[-1] = (parse_text(input), parse_text(response))
yield chatbot, history
def predict_new_image(image_path, chatbot, max_length, top_p, temperature):
input, history = "描述这张图片。", []
chatbot.append((parse_text(input), ""))
for response, history in model.stream_chat(tokenizer, image_path, input, history, max_length=max_length,
top_p=top_p,
temperature=temperature):
chatbot[-1] = (parse_text(input), parse_text(response))
yield chatbot, history
def reset_user_input():
return gr.update(value='')
def reset_state():
return None, [], []
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">VisualGLM</h1>""")
image_path = gr.Image(type="filepath", label="Image Prompt", value=None)
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(scale=4):
with gr.Column(scale=12):
user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style(
container=False)
with gr.Column(min_width=32, scale=1):
submitBtn = gr.Button("Submit", variant="primary")
with gr.Column(scale=1):
emptyBtn = gr.Button("Clear History")
max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True)
top_p = gr.Slider(0, 1, value=0.4, step=0.01, label="Top P", interactive=True)
temperature = gr.Slider(0, 1, value=0.8, step=0.01, label="Temperature", interactive=True)
history = gr.State([])
submitBtn.click(predict, [user_input, image_path, chatbot, max_length, top_p, temperature, history], [chatbot, history],
show_progress=True)
image_path.upload(predict_new_image, [image_path, chatbot, max_length, top_p, temperature], [chatbot, history],
show_progress=True)
image_path.clear(reset_state, outputs=[image_path, chatbot, history], show_progress=True)
submitBtn.click(reset_user_input, [], [user_input])
emptyBtn.click(reset_state, outputs=[image_path, chatbot, history], show_progress=True)
demo.queue().launch(share=False, inbrowser=True)
| python | Apache-2.0 | 401bf3a8a7dd8a26fba189551dccfc61a7079b4e | 2026-01-04T14:40:23.749869Z | false |
zai-org/ChatGLM-6B | https://github.com/zai-org/ChatGLM-6B/blob/401bf3a8a7dd8a26fba189551dccfc61a7079b4e/cli_demo_vision.py | cli_demo_vision.py | import os
import platform
import signal
import sys
from transformers import AutoTokenizer, AutoModel
import readline
tokenizer = AutoTokenizer.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True).half().cuda()
model = model.eval()
os_name = platform.system()
clear_command = 'cls' if os_name == 'Windows' else 'clear'
stop_stream = False
def build_prompt(history, prefix):
prompt = prefix
for query, response in history:
prompt += f"\n\n用户:{query}"
prompt += f"\n\nChatGLM-6B:{response}"
return prompt
def signal_handler(signal, frame):
global stop_stream
stop_stream = True
def main():
global stop_stream
while True:
history = []
prefix = "欢迎使用 VisualGLM-6B 模型,输入图片路径和内容即可进行对话,clear 清空对话历史,stop 终止程序"
print(prefix)
image_path = input("\n请输入图片路径:")
if image_path == "stop":
break
prefix = prefix + "\n" + image_path
query = "描述这张图片。"
while True:
count = 0
for response, history in model.stream_chat(tokenizer, image_path, query, history=history):
if stop_stream:
stop_stream = False
break
else:
count += 1
if count % 8 == 0:
os.system(clear_command)
print(build_prompt(history, prefix), flush=True)
signal.signal(signal.SIGINT, signal_handler)
os.system(clear_command)
print(build_prompt(history, prefix), flush=True)
query = input("\n用户:")
if query.strip() == "clear":
break
if query.strip() == "stop":
sys.exit(0)
if __name__ == "__main__":
main()
| python | Apache-2.0 | 401bf3a8a7dd8a26fba189551dccfc61a7079b4e | 2026-01-04T14:40:23.749869Z | false |
zai-org/ChatGLM-6B | https://github.com/zai-org/ChatGLM-6B/blob/401bf3a8a7dd8a26fba189551dccfc61a7079b4e/utils.py | utils.py | import os
from typing import Dict, Tuple, Union, Optional
from torch.nn import Module
from transformers import AutoModel
def auto_configure_device_map(num_gpus: int) -> Dict[str, int]:
# transformer.word_embeddings 占用1层
# transformer.final_layernorm 和 lm_head 占用1层
# transformer.layers 占用 28 层
# 总共30层分配到num_gpus张卡上
num_trans_layers = 28
per_gpu_layers = 30 / num_gpus
# bugfix: 在linux中调用torch.embedding传入的weight,input不在同一device上,导致RuntimeError
# windows下 model.device 会被设置成 transformer.word_embeddings.device
# linux下 model.device 会被设置成 lm_head.device
# 在调用chat或者stream_chat时,input_ids会被放到model.device上
# 如果transformer.word_embeddings.device和model.device不同,则会导致RuntimeError
# 因此这里将transformer.word_embeddings,transformer.final_layernorm,lm_head都放到第一张卡上
device_map = {'transformer.word_embeddings': 0,
'transformer.final_layernorm': 0, 'lm_head': 0}
used = 2
gpu_target = 0
for i in range(num_trans_layers):
if used >= per_gpu_layers:
gpu_target += 1
used = 0
assert gpu_target < num_gpus
device_map[f'transformer.layers.{i}'] = gpu_target
used += 1
return device_map
def load_model_on_gpus(checkpoint_path: Union[str, os.PathLike], num_gpus: int = 2,
device_map: Optional[Dict[str, int]] = None, **kwargs) -> Module:
if num_gpus < 2 and device_map is None:
model = AutoModel.from_pretrained(checkpoint_path, trust_remote_code=True, **kwargs).half().cuda()
else:
from accelerate import dispatch_model
model = AutoModel.from_pretrained(checkpoint_path, trust_remote_code=True, **kwargs).half()
if device_map is None:
device_map = auto_configure_device_map(num_gpus)
model = dispatch_model(model, device_map=device_map)
return model
| python | Apache-2.0 | 401bf3a8a7dd8a26fba189551dccfc61a7079b4e | 2026-01-04T14:40:23.749869Z | false |
zai-org/ChatGLM-6B | https://github.com/zai-org/ChatGLM-6B/blob/401bf3a8a7dd8a26fba189551dccfc61a7079b4e/web_demo.py | web_demo.py | from transformers import AutoModel, AutoTokenizer
import gradio as gr
import mdtex2html
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
model = model.eval()
"""Override Chatbot.postprocess"""
def postprocess(self, y):
if y is None:
return []
for i, (message, response) in enumerate(y):
y[i] = (
None if message is None else mdtex2html.convert((message)),
None if response is None else mdtex2html.convert(response),
)
return y
gr.Chatbot.postprocess = postprocess
def parse_text(text):
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f'<br></code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
line = line.replace("*", "*")
line = line.replace("_", "_")
line = line.replace("-", "-")
line = line.replace(".", ".")
line = line.replace("!", "!")
line = line.replace("(", "(")
line = line.replace(")", ")")
line = line.replace("$", "$")
lines[i] = "<br>"+line
text = "".join(lines)
return text
def predict(input, chatbot, max_length, top_p, temperature, history):
chatbot.append((parse_text(input), ""))
for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p,
temperature=temperature):
chatbot[-1] = (parse_text(input), parse_text(response))
yield chatbot, history
def reset_user_input():
return gr.update(value='')
def reset_state():
return [], []
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">ChatGLM</h1>""")
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(scale=4):
with gr.Column(scale=12):
user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style(
container=False)
with gr.Column(min_width=32, scale=1):
submitBtn = gr.Button("Submit", variant="primary")
with gr.Column(scale=1):
emptyBtn = gr.Button("Clear History")
max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True)
top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
history = gr.State([])
submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history],
show_progress=True)
submitBtn.click(reset_user_input, [], [user_input])
emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True)
demo.queue().launch(share=False, inbrowser=True)
| python | Apache-2.0 | 401bf3a8a7dd8a26fba189551dccfc61a7079b4e | 2026-01-04T14:40:23.749869Z | false |
zai-org/ChatGLM-6B | https://github.com/zai-org/ChatGLM-6B/blob/401bf3a8a7dd8a26fba189551dccfc61a7079b4e/cli_demo.py | cli_demo.py | import os
import platform
import signal
from transformers import AutoTokenizer, AutoModel
import readline
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
model = model.eval()
os_name = platform.system()
clear_command = 'cls' if os_name == 'Windows' else 'clear'
stop_stream = False
def build_prompt(history):
prompt = "欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序"
for query, response in history:
prompt += f"\n\n用户:{query}"
prompt += f"\n\nChatGLM-6B:{response}"
return prompt
def signal_handler(signal, frame):
global stop_stream
stop_stream = True
def main():
history = []
global stop_stream
print("欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序")
while True:
query = input("\n用户:")
if query.strip() == "stop":
break
if query.strip() == "clear":
history = []
os.system(clear_command)
print("欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序")
continue
count = 0
for response, history in model.stream_chat(tokenizer, query, history=history):
if stop_stream:
stop_stream = False
break
else:
count += 1
if count % 8 == 0:
os.system(clear_command)
print(build_prompt(history), flush=True)
signal.signal(signal.SIGINT, signal_handler)
os.system(clear_command)
print(build_prompt(history), flush=True)
if __name__ == "__main__":
main()
| python | Apache-2.0 | 401bf3a8a7dd8a26fba189551dccfc61a7079b4e | 2026-01-04T14:40:23.749869Z | false |
zai-org/ChatGLM-6B | https://github.com/zai-org/ChatGLM-6B/blob/401bf3a8a7dd8a26fba189551dccfc61a7079b4e/ptuning/trainer_seq2seq.py | ptuning/trainer_seq2seq.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers.deepspeed import is_deepspeed_zero3_enabled
from trainer import Trainer
from transformers.trainer_utils import PredictionOutput
from transformers.utils import logging
logger = logging.get_logger(__name__)
class Seq2SeqTrainer(Trainer):
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
**gen_kwargs
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init `compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (`Dataset`, *optional*):
Pass a dataset if you wish to override `self.eval_dataset`. If it is an [`~datasets.Dataset`], columns
not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`
method.
ignore_keys (`List[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is `"eval"` (default)
max_length (`int`, *optional*):
The maximum target length to use when predicting with the generate method.
num_beams (`int`, *optional*):
Number of beams for beam search that will be used when predicting with the generate method. 1 means no
beam search.
gen_kwargs:
Additional `generate` specific kwargs.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
gen_kwargs = gen_kwargs.copy()
if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None:
gen_kwargs["max_length"] = self.args.generation_max_length
gen_kwargs["num_beams"] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams
)
self._gen_kwargs = gen_kwargs
return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
def predict(
self,
test_dataset: Dataset,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "test",
**gen_kwargs
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in `evaluate()`.
Args:
test_dataset (`Dataset`):
Dataset to run the predictions on. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed. Has to implement the method `__len__`
ignore_keys (`List[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is `"eval"` (default)
max_length (`int`, *optional*):
The maximum target length to use when predicting with the generate method.
num_beams (`int`, *optional*):
Number of beams for beam search that will be used when predicting with the generate method. 1 means no
beam search.
gen_kwargs:
Additional `generate` specific kwargs.
<Tip>
If your predictions or labels have different sequence lengths (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
</Tip>
Returns: *NamedTuple* A namedtuple with the following keys:
- predictions (`np.ndarray`): The predictions on `test_dataset`.
- label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
- metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
labels).
"""
gen_kwargs = gen_kwargs.copy()
if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None:
gen_kwargs["max_length"] = self.args.generation_max_length
gen_kwargs["num_beams"] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams
)
self._gen_kwargs = gen_kwargs
return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on `model` using `inputs`.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to evaluate.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (`bool`):
Whether or not to return the loss only.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
if not self.args.predict_with_generate or prediction_loss_only:
return super().prediction_step(
model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys
)
has_labels = "labels" in inputs
inputs = self._prepare_inputs(inputs)
# XXX: adapt synced_gpus for fairscale as well
gen_kwargs = self._gen_kwargs.copy()
if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None:
gen_kwargs["max_length"] = self.model.config.max_length
gen_kwargs["num_beams"] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.model.config.num_beams
)
default_synced_gpus = True if is_deepspeed_zero3_enabled() else False
gen_kwargs["synced_gpus"] = (
gen_kwargs["synced_gpus"] if gen_kwargs.get("synced_gpus") is not None else default_synced_gpus
)
if "attention_mask" in inputs:
gen_kwargs["attention_mask"] = inputs.get("attention_mask", None)
if "position_ids" in inputs:
gen_kwargs["position_ids"] = inputs.get("position_ids", None)
if "global_attention_mask" in inputs:
gen_kwargs["global_attention_mask"] = inputs.get("global_attention_mask", None)
# prepare generation inputs
# some encoder-decoder models can have varying encoder's and thus
# varying model input names
if hasattr(self.model, "encoder") and self.model.encoder.main_input_name != self.model.main_input_name:
generation_inputs = inputs[self.model.encoder.main_input_name]
else:
generation_inputs = inputs[self.model.main_input_name]
gen_kwargs["input_ids"] = generation_inputs
generated_tokens = self.model.generate(**gen_kwargs)
generated_tokens = generated_tokens[:, generation_inputs.size()[-1]:]
# in case the batch is shorter than max length, the output should be padded
if gen_kwargs.get("max_length") is not None and generated_tokens.shape[-1] < gen_kwargs["max_length"]:
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"])
elif gen_kwargs.get("max_new_tokens") is not None and generated_tokens.shape[-1] < (
gen_kwargs["max_new_tokens"] + 1
):
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_new_tokens"] + 1)
loss = None
if self.args.prediction_loss_only:
return (loss, None, None)
if has_labels:
labels = inputs["labels"]
if gen_kwargs.get("max_length") is not None and labels.shape[-1] < gen_kwargs["max_length"]:
labels = self._pad_tensors_to_max_len(labels, gen_kwargs["max_length"])
elif gen_kwargs.get("max_new_tokens") is not None and labels.shape[-1] < (
gen_kwargs["max_new_tokens"] + 1
):
labels = self._pad_tensors_to_max_len(labels, (gen_kwargs["max_new_tokens"] + 1))
else:
labels = None
return (loss, generated_tokens, labels)
def _pad_tensors_to_max_len(self, tensor, max_length):
if self.tokenizer is not None and hasattr(self.tokenizer, "pad_token_id"):
# If PAD token is not defined at least EOS token has to be defined
pad_token_id = (
self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id
)
else:
if self.model.config.pad_token_id is not None:
pad_token_id = self.model.config.pad_token_id
else:
raise ValueError("Pad_token_id must be set in the configuration of the model, in order to pad tensors")
padded_tensor = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device
)
padded_tensor[:, : tensor.shape[-1]] = tensor
return padded_tensor
| python | Apache-2.0 | 401bf3a8a7dd8a26fba189551dccfc61a7079b4e | 2026-01-04T14:40:23.749869Z | false |
zai-org/ChatGLM-6B | https://github.com/zai-org/ChatGLM-6B/blob/401bf3a8a7dd8a26fba189551dccfc61a7079b4e/ptuning/arguments.py | ptuning/arguments.py | from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
ptuning_checkpoint: str = field(
default=None, metadata={"help": "Path to p-tuning v2 checkpoints"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},
)
resize_position_embeddings: Optional[bool] = field(
default=None,
metadata={
"help": (
"Whether to automatically resize the position embeddings if `max_source_length` exceeds "
"the model's position embeddings."
)
},
)
quantization_bit: Optional[int] = field(
default=None
)
pre_seq_len: Optional[int] = field(
default=None
)
prefix_projection: bool = field(
default=False
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
lang: Optional[str] = field(default=None, metadata={"help": "Language id for summarization."})
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
prompt_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
response_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."},
)
history_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the history of chat."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": (
"An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)."
)
},
)
test_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
val_max_target_length: Optional[int] = field(
default=None,
metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": (
"Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
)
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
source_prefix: Optional[str] = field(
default="", metadata={"help": "A prefix to add before every source text (useful for T5 models)."}
)
forced_bos_token: Optional[str] = field(
default=None,
metadata={
"help": (
"The token to force as the first generated token after the decoder_start_token_id."
"Useful for multilingual models like mBART where the first generated token"
"needs to be the target language token (Usually it is the target language token)"
)
},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None and self.test_file is None:
raise ValueError("Need either a dataset name or a training/validation/test file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
| python | Apache-2.0 | 401bf3a8a7dd8a26fba189551dccfc61a7079b4e | 2026-01-04T14:40:23.749869Z | false |
zai-org/ChatGLM-6B | https://github.com/zai-org/ChatGLM-6B/blob/401bf3a8a7dd8a26fba189551dccfc61a7079b4e/ptuning/main.py | ptuning/main.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for sequence to sequence.
"""
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
import logging
import os
import sys
import json
import numpy as np
from datasets import load_dataset
import jieba
from rouge_chinese import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import torch
import transformers
from transformers import (
AutoConfig,
AutoModel,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
)
from trainer_seq2seq import Seq2SeqTrainer
from arguments import ModelArguments, DataTrainingArguments
logger = logging.getLogger(__name__)
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
# datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Load dataset
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# Load pretrained model and tokenizer
config = AutoConfig.from_pretrained(model_args.model_name_or_path, trust_remote_code=True)
config.pre_seq_len = model_args.pre_seq_len
config.prefix_projection = model_args.prefix_projection
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, trust_remote_code=True)
if model_args.ptuning_checkpoint is not None:
# Evaluation
# Loading extra state dict of prefix encoder
model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True)
prefix_state_dict = torch.load(os.path.join(model_args.ptuning_checkpoint, "pytorch_model.bin"))
new_prefix_state_dict = {}
for k, v in prefix_state_dict.items():
if k.startswith("transformer.prefix_encoder."):
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
else:
model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True)
if model_args.quantization_bit is not None:
print(f"Quantized to {model_args.quantization_bit} bit")
model = model.quantize(model_args.quantization_bit)
if model_args.pre_seq_len is not None:
# P-tuning v2
model = model.half()
model.transformer.prefix_encoder.float()
else:
# Finetune
model = model.float()
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
elif training_args.do_eval:
column_names = raw_datasets["validation"].column_names
elif training_args.do_predict:
column_names = raw_datasets["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
# Get the column names for input/target.
prompt_column = data_args.prompt_column
response_column = data_args.response_column
history_column = data_args.history_column
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
def preprocess_function_eval(examples):
inputs, targets = [], []
for i in range(len(examples[prompt_column])):
if examples[prompt_column][i] and examples[response_column][i]:
query = examples[prompt_column][i]
if history_column is None or len(examples[history_column][i]) == 0:
prompt = query
else:
prompt = ""
history = examples[history_column][i]
for turn_idx, (old_query, response) in enumerate(history):
prompt += "[Round {}]\n问:{}\n答:{}\n".format(turn_idx, old_query, response)
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
inputs.append(prompt)
targets.append(examples[response_column][i])
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, truncation=True, padding=True)
labels = tokenizer(text_target=targets, max_length=max_target_length, truncation=True)
if data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def preprocess_function_train(examples):
max_seq_length = data_args.max_source_length + data_args.max_target_length
model_inputs = {
"input_ids": [],
"labels": [],
}
for i in range(len(examples[prompt_column])):
if examples[prompt_column][i] and examples[response_column][i]:
query, answer = examples[prompt_column][i], examples[response_column][i]
if history_column is None:
prompt = query
else:
prompt = ""
history = examples[history_column][i]
for turn_idx, (old_query, response) in enumerate(history):
prompt += "[Round {}]\n问:{}\n答:{}\n".format(turn_idx, old_query, response)
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
prompt = prefix + prompt
a_ids = tokenizer.encode(text=prompt, add_special_tokens=False)
b_ids = tokenizer.encode(text=answer, add_special_tokens=False)
if len(a_ids) > data_args.max_source_length - 1:
a_ids = a_ids[: data_args.max_source_length - 1]
if len(b_ids) > data_args.max_target_length - 2:
b_ids = b_ids[: data_args.max_target_length - 2]
input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids)
context_length = input_ids.index(tokenizer.bos_token_id)
mask_position = context_length - 1
labels = [-100] * context_length + input_ids[mask_position+1:]
pad_len = max_seq_length - len(input_ids)
input_ids = input_ids + [tokenizer.pad_token_id] * pad_len
labels = labels + [tokenizer.pad_token_id] * pad_len
if data_args.ignore_pad_token_for_loss:
labels = [(l if l != tokenizer.pad_token_id else -100) for l in labels]
model_inputs["input_ids"].append(input_ids)
model_inputs["labels"].append(labels)
return model_inputs
def print_dataset_example(example):
print("input_ids",example["input_ids"])
print("inputs", tokenizer.decode(example["input_ids"]))
print("label_ids", example["labels"])
print("labels", tokenizer.decode(example["labels"]))
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function_train,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
print_dataset_example(train_dataset[0])
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function_eval,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
print_dataset_example(eval_dataset[0])
if training_args.do_predict:
max_target_length = data_args.val_max_target_length
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = raw_datasets["test"]
if data_args.max_predict_samples is not None:
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
preprocess_function_eval,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
print_dataset_example(predict_dataset[0])
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=None,
padding=False
)
# Metric
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if data_args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
score_dict = {
"rouge-1": [],
"rouge-2": [],
"rouge-l": [],
"bleu-4": []
}
for pred, label in zip(decoded_preds, decoded_labels):
hypothesis = list(jieba.cut(pred))
reference = list(jieba.cut(label))
rouge = Rouge()
scores = rouge.get_scores(' '.join(hypothesis) , ' '.join(reference))
result = scores[0]
for k, v in result.items():
score_dict[k].append(round(v["f"] * 100, 4))
bleu_score = sentence_bleu([list(label)], list(pred), smoothing_function=SmoothingFunction().method3)
score_dict["bleu-4"].append(round(bleu_score * 100, 4))
for k, v in score_dict.items():
score_dict[k] = float(np.mean(v))
return score_dict
# Override the decoding parameters of Seq2SeqTrainer
training_args.generation_max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_target_length
)
training_args.generation_num_beams = (
data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
)
# Initialize our Trainer
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
save_prefixencoder=model_args.pre_seq_len is not None
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
# elif last_checkpoint is not None:
# checkpoint = last_checkpoint
model.gradient_checkpointing_enable()
model.enable_input_require_grads()
train_result = trainer.train(resume_from_checkpoint=checkpoint)
# trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
results = {}
max_seq_length = data_args.max_source_length + data_args.max_target_length + 1
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(metric_key_prefix="eval", do_sample=True, top_p=0.7, max_length=max_seq_length, temperature=0.95)
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.do_predict:
logger.info("*** Predict ***")
predict_results = trainer.predict(predict_dataset, metric_key_prefix="predict", max_length=max_seq_length, do_sample=True, top_p=0.7, temperature=0.95)
metrics = predict_results.metrics
max_predict_samples = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
)
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
if trainer.is_world_process_zero():
if training_args.predict_with_generate:
predictions = tokenizer.batch_decode(
predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
predictions = [pred.strip() for pred in predictions]
labels = tokenizer.batch_decode(
predict_results.label_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
labels = [label.strip() for label in labels]
output_prediction_file = os.path.join(training_args.output_dir, "generated_predictions.txt")
with open(output_prediction_file, "w", encoding="utf-8") as writer:
for p, l in zip(predictions, labels):
res = json.dumps({"labels": l, "predict": p}, ensure_ascii=False)
writer.write(f"{res}\n")
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| python | Apache-2.0 | 401bf3a8a7dd8a26fba189551dccfc61a7079b4e | 2026-01-04T14:40:23.749869Z | false |
zai-org/ChatGLM-6B | https://github.com/zai-org/ChatGLM-6B/blob/401bf3a8a7dd8a26fba189551dccfc61a7079b4e/ptuning/trainer.py | ptuning/trainer.py | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import contextlib
import functools
import glob
import inspect
import math
import os
import random
import re
import shutil
import sys
import time
import warnings
from collections.abc import Mapping
from distutils.util import strtobool
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from tqdm.auto import tqdm
# Integrations must be imported before ML frameworks:
# isort: off
from transformers.integrations import (
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
# isort: on
import numpy as np
import torch
import torch.distributed as dist
from huggingface_hub import Repository, create_repo
from packaging import version
from torch import nn
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from transformers import __version__
from transformers.configuration_utils import PretrainedConfig
from transformers.data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from transformers.debug_utils import DebugOption, DebugUnderflowOverflow
from transformers.deepspeed import deepspeed_init, is_deepspeed_zero3_enabled
from transformers.dependency_versions_check import dep_version_check
from transformers.modelcard import TrainingSummary
from transformers.modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model
from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.optimization import Adafactor, get_scheduler
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_10, is_torch_less_than_1_11
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
distributed_broadcast_scalars,
distributed_concat,
find_batch_size,
get_module_class_from_name,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_truncate,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from transformers.trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalLoopOutput,
EvalPrediction,
FSDPOption,
HPSearchBackend,
HubStrategy,
IntervalStrategy,
PredictionOutput,
RemoveColumnsCollator,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
enable_full_determinism,
find_executable_batch_size,
get_last_checkpoint,
has_length,
number_of_arguments,
seed_worker,
set_seed,
speed_metrics,
)
from transformers.training_args import OptimizerNames, ParallelMode, TrainingArguments
from transformers.utils import (
CONFIG_NAME,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
can_return_loss,
find_labels,
get_full_repo_name,
is_accelerate_available,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_ipex_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_torch_compile_available,
is_torch_neuroncore_available,
is_torch_tpu_available,
logging,
)
from transformers.utils.generic import ContextManagers
_is_native_cpu_amp_available = is_torch_greater_or_equal_than_1_10
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from transformers.utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if is_datasets_available():
import datasets
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
dep_version_check("fairscale")
import fairscale
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.nn.wrap import auto_wrap
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
from smdistributed.modelparallel import __version__ as SMP_VERSION
IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10")
from transformers.trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat
else:
IS_SAGEMAKER_MP_POST_1_10 = False
skip_first_batches = None
if is_accelerate_available():
from accelerate import __version__ as accelerate_version
if version.parse(accelerate_version) >= version.parse("0.16"):
from accelerate import skip_first_batches
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
# Name of the files used for checkpointing
TRAINING_ARGS_NAME = "training_args.bin"
TRAINER_STATE_NAME = "trainer_state.json"
OPTIMIZER_NAME = "optimizer.pt"
SCHEDULER_NAME = "scheduler.pt"
SCALER_NAME = "scaler.pt"
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*):
The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed.
<Tip>
[`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use
your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers
models.
</Tip>
args ([`TrainingArguments`], *optional*):
The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the
`output_dir` set to a directory named *tmp_trainer* in the current directory if not provided.
data_collator (`DataCollator`, *optional*):
The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will
default to [`default_data_collator`] if no `tokenizer` is provided, an instance of
[`DataCollatorWithPadding`] otherwise.
train_dataset (`torch.utils.data.Dataset` or `torch.utils.data.IterableDataset`, *optional*):
The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed.
Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a
distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a
`torch.Generator` for the randomization that must be identical on all processes (and the Trainer will
manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally
sets the seed of the RNGs used.
eval_dataset (Union[`torch.utils.data.Dataset`, Dict[str, `torch.utils.data.Dataset`]), *optional*):
The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the
`model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each
dataset prepending the dictionary key to the metric name.
tokenizer ([`PreTrainedTokenizerBase`], *optional*):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs to the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (`Callable[[], PreTrainedModel]`, *optional*):
A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start
from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to
be able to choose different architectures according to hyper parameters (such as layer count, sizes of
inner layers, dropout probabilities etc).
compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*):
The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return
a dictionary string to metric values.
callbacks (List of [`TrainerCallback`], *optional*):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in [here](callback).
If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method.
optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your model
and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`.
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*):
A function that preprocess the logits right before caching them at each evaluation step. Must take two
tensors, the logits and the labels, and return the logits once processed as desired. The modifications made
by this function will be reflected in the predictions received by `compute_metrics`.
Note that the labels (second parameter) will be `None` if the dataset does not have them.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`]
subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`,
the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner
model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to `False` if model parallel or deepspeed is used, or if the default
`TrainingArguments.place_model_on_device` is overridden to return `False` .
- **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while
in `train`)
"""
from transformers.trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
model_init: Optional[Callable[[], PreTrainedModel]] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
save_prefixencoder: bool = False,
):
self.save_prefixencoder = save_prefixencoder
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# set the correct log level depending on the node
log_level = args.get_process_log_level()
logging.set_verbosity(log_level)
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will"
" overwrite your model when calling the `train` method. This will become a fatal error in the next"
" release.",
FutureWarning,
)
self.model_init = model_init
if model.__class__.__name__ in MODEL_MAPPING_NAMES:
raise ValueError(
f"The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only "
"computes hidden states and does not accept any labels. You should choose a model with a head "
"suitable for your task like any of the `AutoModelForXxx` listed at "
"https://huggingface.co/docs/transformers/model_doc/auto."
)
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# At this stage the model is already loaded
if getattr(model, "is_loaded_in_8bit", False):
if getattr(model, "_is_int8_training_enabled", False):
logger.info(
"The model is loaded in 8-bit precision. To train this model you need to add additional modules"
" inside the model such as adapters using `peft` library and freeze the model weights. Please"
" check "
" the examples in https://github.com/huggingface/peft for more details."
)
else:
raise ValueError(
"The model you want to train is loaded in 8-bit precision. if you want to fine-tune an 8-bit"
" model, please make sure that you have installed `bitsandbytes>=0.37.0`. "
)
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if len(args.fsdp) > 0:
raise ValueError(
"Using --sharded_ddp xxx together with --fsdp is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
self.fsdp = None
if len(args.fsdp) > 0:
if args.deepspeed:
raise ValueError(
"Using --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if not args.fsdp_config["xla"] and args.local_rank == -1:
raise ValueError("Using fsdp only works in distributed training.")
# dep_version_check("torch>=1.12.0")
# Would have to update setup.py with torch>=1.12.0
# which isn't ideally given that it will force people not using FSDP to also use torch>=1.12.0
# below is the current alternative.
if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.12.0"):
raise ValueError("FSDP requires PyTorch >= 1.12.0")
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, ShardingStrategy
if FSDPOption.FULL_SHARD in args.fsdp:
self.fsdp = ShardingStrategy.FULL_SHARD
elif FSDPOption.SHARD_GRAD_OP in args.fsdp:
self.fsdp = ShardingStrategy.SHARD_GRAD_OP
elif FSDPOption.NO_SHARD in args.fsdp:
self.fsdp = ShardingStrategy.NO_SHARD
self.backward_prefetch = BackwardPrefetch.BACKWARD_PRE
if "backward_prefetch" in self.args.fsdp_config and "backward_pos" not in self.backward_prefetch:
self.backward_prefetch = BackwardPrefetch.BACKWARD_POST
self.forword_prefetch = False
if self.args.fsdp_config.get("forword_prefect", False):
self.forword_prefetch = True
self.limit_all_gathers = False
if self.args.fsdp_config.get("limit_all_gathers", False):
self.limit_all_gathers = True
# one place to sort out whether to place the model on device or not
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
# 3. full bf16 or fp16 eval - since the model needs to be cast to the right dtype first
# 4. Sharded DDP - same as MP
# 5. FSDP - same as MP
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or args.deepspeed
or ((args.fp16_full_eval or args.bf16_full_eval) and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
or (self.fsdp is not None)
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
if self.place_model_on_device and not getattr(model, "is_loaded_in_8bit", False):
self._move_model_to_device(model, args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.preprocess_logits_for_metrics = preprocess_logits_for_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument. "
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
if is_torch_tpu_available() and self.optimizer is not None:
for param in self.model.parameters():
model_device = param.device
break
for param_group in self.optimizer.param_groups:
if len(param_group["params"]) > 0:
optimizer_device = param_group["params"][0].device
break
if model_device != optimizer_device:
raise ValueError(
"The model and the optimizer parameters are not on the same device, which probably means you"
" created an optimizer around your model **before** putting on the device and passing it to the"
" `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and"
" `model.to(xm.xla_device())` is performed before the optimizer creation in your script."
)
if ((self.sharded_ddp is not None) or args.deepspeed or (self.fsdp is not None)) and (
self.optimizer is not None or self.lr_scheduler is not None
):
raise RuntimeError(
"Passing `optimizers` is not allowed if Fairscale, Deepspeed or PyTorch FSDP is enabled."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create clone of distant repo and output directory if needed
if self.args.push_to_hub:
self.init_git_repo(at_init=True)
# In case of pull, we need to make sure every process has the latest.
if is_torch_tpu_available():
xm.rendezvous("init git repo")
elif args.local_rank != -1:
dist.barrier()
if self.args.should_save:
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
if train_dataset is not None and not has_length(train_dataset) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if (
train_dataset is not None
and isinstance(train_dataset, torch.utils.data.IterableDataset)
and args.group_by_length
):
raise ValueError("the `--group_by_length` option is only available for `Dataset`, not `IterableDataset")
self._signature_columns = None
# Mixed precision setup
self.use_apex = False
self.use_cuda_amp = False
self.use_cpu_amp = False
# Mixed precision setup for SageMaker Model Parallel
if is_sagemaker_mp_enabled():
# BF16 + model parallelism in SageMaker: currently not supported, raise an error
if args.bf16:
raise ValueError("SageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead ")
if IS_SAGEMAKER_MP_POST_1_10:
# When there's mismatch between SMP config and trainer argument, use SMP config as truth
if args.fp16 != smp.state.cfg.fp16:
logger.warning(
f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16},"
f"but FP16 provided in trainer argument is {args.fp16},"
f"setting to {smp.state.cfg.fp16}"
)
args.fp16 = smp.state.cfg.fp16
else:
# smp < 1.10 does not support fp16 in trainer.
if hasattr(smp.state.cfg, "fp16"):
logger.warning(
f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, "
"but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer."
)
if args.fp16 or args.bf16:
if args.half_precision_backend == "auto":
if args.device == torch.device("cpu"):
if args.fp16:
raise ValueError("Tried to use `fp16` but it is not supported on cpu")
elif _is_native_cpu_amp_available:
args.half_precision_backend = "cpu_amp"
else:
raise ValueError("Tried to use cpu amp but native cpu amp is not available")
else:
args.half_precision_backend = "cuda_amp"
logger.info(f"Using {args.half_precision_backend} half precision backend")
self.do_grad_scaling = False
if (args.fp16 or args.bf16) and not (args.deepspeed or is_sagemaker_mp_enabled() or is_torch_tpu_available()):
# deepspeed and SageMaker Model Parallel manage their own half precision
if args.half_precision_backend == "cuda_amp":
self.use_cuda_amp = True
self.amp_dtype = torch.float16 if args.fp16 else torch.bfloat16
# bf16 does not need grad scaling
self.do_grad_scaling = self.amp_dtype == torch.float16
if self.do_grad_scaling:
if self.sharded_ddp is not None:
self.scaler = ShardedGradScaler()
elif self.fsdp is not None:
from torch.distributed.fsdp.sharded_grad_scaler import (
ShardedGradScaler as FSDPShardedGradScaler,
)
self.scaler = FSDPShardedGradScaler()
elif is_torch_tpu_available():
from torch_xla.amp import GradScaler
self.scaler = GradScaler()
else:
self.scaler = torch.cuda.amp.GradScaler()
elif args.half_precision_backend == "cpu_amp":
self.use_cpu_amp = True
self.amp_dtype = torch.bfloat16
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to"
" https://www.github.com/nvidia/apex."
)
self.use_apex = True
# FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error.
if (
is_sagemaker_mp_enabled()
and self.use_cuda_amp
and args.max_grad_norm is not None
and args.max_grad_norm > 0
):
raise ValueError(
"SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass "
"along 'max_grad_norm': 0 in your hyperparameters."
)
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState(
is_local_process_zero=self.is_local_process_zero(),
is_world_process_zero=self.is_world_process_zero(),
)
self.control = TrainerControl()
# Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then
# returned to 0 every time flos need to be logged
self.current_flos = 0
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = find_labels(self.model.__class__)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.can_return_loss = can_return_loss(self.model.__class__)
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# Internal variables to keep track of the original batch size
self._train_batch_size = args.train_batch_size
# very last
self._memory_tracker.stop_and_update_metrics()
# torch.compile
if args.torch_compile and not is_torch_compile_available():
raise RuntimeError("Using torch.compile requires PyTorch 2.0 or higher.")
def add_callback(self, callback):
"""
Add a callback to the current list of [`~transformer.TrainerCallback`].
Args:
callback (`type` or [`~transformer.TrainerCallback`]):
A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the
first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of [`~transformer.TrainerCallback`] and returns it.
If the callback is not found, returns `None` (and no error is raised).
Args:
callback (`type` or [`~transformer.TrainerCallback`]):
A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the
first case, will pop the first member of that class found in the list of callbacks.
Returns:
[`~transformer.TrainerCallback`]: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
| python | Apache-2.0 | 401bf3a8a7dd8a26fba189551dccfc61a7079b4e | 2026-01-04T14:40:23.749869Z | true |
zai-org/ChatGLM-6B | https://github.com/zai-org/ChatGLM-6B/blob/401bf3a8a7dd8a26fba189551dccfc61a7079b4e/ptuning/web_demo.py | ptuning/web_demo.py | import os, sys
import gradio as gr
import mdtex2html
import torch
import transformers
from transformers import (
AutoConfig,
AutoModel,
AutoTokenizer,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
)
from arguments import ModelArguments, DataTrainingArguments
model = None
tokenizer = None
"""Override Chatbot.postprocess"""
def postprocess(self, y):
if y is None:
return []
for i, (message, response) in enumerate(y):
y[i] = (
None if message is None else mdtex2html.convert((message)),
None if response is None else mdtex2html.convert(response),
)
return y
gr.Chatbot.postprocess = postprocess
def parse_text(text):
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f'<br></code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
line = line.replace("*", "*")
line = line.replace("_", "_")
line = line.replace("-", "-")
line = line.replace(".", ".")
line = line.replace("!", "!")
line = line.replace("(", "(")
line = line.replace(")", ")")
line = line.replace("$", "$")
lines[i] = "<br>"+line
text = "".join(lines)
return text
def predict(input, chatbot, max_length, top_p, temperature, history):
chatbot.append((parse_text(input), ""))
for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p,
temperature=temperature):
chatbot[-1] = (parse_text(input), parse_text(response))
yield chatbot, history
def reset_user_input():
return gr.update(value='')
def reset_state():
return [], []
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">ChatGLM</h1>""")
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(scale=4):
with gr.Column(scale=12):
user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style(
container=False)
with gr.Column(min_width=32, scale=1):
submitBtn = gr.Button("Submit", variant="primary")
with gr.Column(scale=1):
emptyBtn = gr.Button("Clear History")
max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True)
top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
history = gr.State([])
submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history],
show_progress=True)
submitBtn.click(reset_user_input, [], [user_input])
emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True)
def main():
global model, tokenizer
parser = HfArgumentParser((
ModelArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))[0]
else:
model_args = parser.parse_args_into_dataclasses()[0]
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, trust_remote_code=True)
config = AutoConfig.from_pretrained(
model_args.model_name_or_path, trust_remote_code=True)
config.pre_seq_len = model_args.pre_seq_len
config.prefix_projection = model_args.prefix_projection
if model_args.ptuning_checkpoint is not None:
print(f"Loading prefix_encoder weight from {model_args.ptuning_checkpoint}")
model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True)
prefix_state_dict = torch.load(os.path.join(model_args.ptuning_checkpoint, "pytorch_model.bin"))
new_prefix_state_dict = {}
for k, v in prefix_state_dict.items():
if k.startswith("transformer.prefix_encoder."):
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
else:
model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True)
if model_args.quantization_bit is not None:
print(f"Quantized to {model_args.quantization_bit} bit")
model = model.quantize(model_args.quantization_bit)
if model_args.pre_seq_len is not None:
# P-tuning v2
model = model.half().cuda()
model.transformer.prefix_encoder.float().cuda()
model = model.eval()
demo.queue().launch(share=False, inbrowser=True)
if __name__ == "__main__":
main() | python | Apache-2.0 | 401bf3a8a7dd8a26fba189551dccfc61a7079b4e | 2026-01-04T14:40:23.749869Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/setup.py | setup.py | import os
import sys
from typing import List
from setuptools import find_packages, setup
try:
import torch # noqa
from torch.utils.cpp_extension import BuildExtension
TORCH_AVAILABLE = True
except ImportError:
TORCH_AVAILABLE = False
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_EXT = int(os.environ.get("BUILD_EXT", "0")) == 1
# we do not support windows currently
if sys.platform == "win32":
raise RuntimeError("Windows is not supported yet. Please try again within the Windows Subsystem for Linux (WSL).")
def fetch_requirements(path) -> List[str]:
"""
This function reads the requirements file.
Args:
path (str): the path to the requirements file.
Returns:
The lines in the requirements file.
"""
with open(path, "r") as fd:
return [r.strip() for r in fd.readlines()]
def fetch_readme() -> str:
"""
This function reads the README.md file in the current directory.
Returns:
The lines in the README file.
"""
with open("README.md", encoding="utf-8") as f:
return f.read()
def get_version() -> str:
"""
This function reads the version.txt and generates the colossalai/version.py file.
Returns:
The library version stored in version.txt.
"""
setup_file_path = os.path.abspath(__file__)
project_path = os.path.dirname(setup_file_path)
version_txt_path = os.path.join(project_path, "version.txt")
version_py_path = os.path.join(project_path, "colossalai/version.py")
with open(version_txt_path) as f:
version = f.read().strip()
# write version into version.py
with open(version_py_path, "w") as f:
f.write(f"__version__ = '{version}'\n")
return version
if BUILD_EXT:
if not TORCH_AVAILABLE:
raise ModuleNotFoundError(
"[extension] PyTorch is not found while BUILD_EXT=1. You need to install PyTorch first in order to build CUDA extensions"
)
from extensions import ALL_EXTENSIONS
op_names = []
ext_modules = []
for ext_cls in ALL_EXTENSIONS:
ext = ext_cls()
if ext.support_aot and ext.is_available():
ext.assert_compatible()
op_names.append(ext.name)
ext_modules.append(ext.build_aot())
# show log
if len(ext_modules) == 0:
raise RuntimeError("[extension] Could not find any kernel compatible with the current environment.")
else:
op_name_list = ", ".join(op_names)
print(f"[extension] Building extensions{op_name_list}")
else:
ext_modules = []
version = get_version()
package_name = "colossalai"
setup(
name=package_name,
version=version,
packages=find_packages(
exclude=(
"extensions",
"benchmark",
"docker",
"tests",
"docs",
"examples",
"tests",
"scripts",
"requirements",
"*.egg-info",
),
),
description="An integrated large-scale model training system with efficient parallelization techniques",
long_description=fetch_readme(),
long_description_content_type="text/markdown",
license="Apache Software License 2.0",
url="https://www.colossalai.org",
project_urls={
"Forum": "https://github.com/hpcaitech/ColossalAI/discussions",
"Bug Tracker": "https://github.com/hpcaitech/ColossalAI/issues",
"Examples": "https://github.com/hpcaitech/ColossalAI-Examples",
"Documentation": "http://colossalai.readthedocs.io",
"Github": "https://github.com/hpcaitech/ColossalAI",
},
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
install_requires=fetch_requirements("requirements/requirements.txt"),
entry_points="""
[console_scripts]
colossalai=colossalai.cli:cli
""",
python_requires=">=3.6",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Environment :: GPU :: NVIDIA CUDA",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: System :: Distributed Computing",
],
package_data={
"colossalai": [
"kernel/extensions/csrc/**/*",
"kernel/extensions/pybind/**/*",
]
},
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/.github/workflows/scripts/generate_leaderboard_and_send_to_lark.py | .github/workflows/scripts/generate_leaderboard_and_send_to_lark.py | import os
from datetime import datetime, timedelta
from typing import Any, Dict, List
import matplotlib.pyplot as plt
import pytz
import requests
import seaborn
from requests_toolbelt import MultipartEncoder
class Counter(dict):
"""
Dataclass for a github contributor.
Args:
name (str): name of the contributor
num_commits_this_week (int): number of commits made within one week
"""
def record(self, item: str):
if item in self:
self[item] += 1
else:
self[item] = 1
def to_sorted_list(self):
data = [(key, value) for key, value in self.items()]
data.sort(key=lambda x: x[1], reverse=True)
return data
def get_utc_time_one_week_ago():
"""
Get the UTC time one week ago.
"""
now = datetime.utcnow()
start_datetime = now - timedelta(days=7)
return start_datetime
def datetime2str(dt):
"""
Convert datetime to string in the format of YYYY-MM-DDTHH:MM:SSZ
"""
return dt.strftime("%Y-%m-%dT%H:%M:%SZ")
def str2datetime(string):
"""
Convert string in the format of YYYY-MM-DDTHH:MM:SSZ to datetime
"""
return datetime.strptime(string, "%Y-%m-%dT%H:%M:%SZ")
def plot_bar_chart(x: List[Any], y: List[Any], xlabel: str, ylabel: str, title: str, output_path: str) -> None:
"""
This function is a utility to plot the bar charts.
"""
plt.clf()
seaborn.color_palette()
fig = seaborn.barplot(x=x, y=y)
fig.set(xlabel=xlabel, ylabel=ylabel, title=title)
seaborn.despine()
plt.tight_layout()
plt.savefig(output_path, dpi=1200)
def get_organization_repositories(github_token, organization_name) -> List[str]:
"""
Retrieve the public repositories under the organization.
"""
url = f"https://api.github.com/orgs/{organization_name}/repos?type=public"
# prepare header
headers = {
"Authorization": f"Bearer {github_token}",
"Accept": "application/vnd.github+json",
"X-GitHub-Api-Version": "2022-11-28",
}
res = requests.get(url, headers=headers).json()
repo_list = []
for item in res:
repo_list.append(item["name"])
return repo_list
def get_issue_pull_request_comments(github_token: str, org_name: str, repo_name: str, since: str) -> Dict[str, int]:
"""
Retrieve the issue/PR comments made by our members in the last 7 days.
Args:
github_token (str): GitHub access token for API calls
since (str): the path parameter required by GitHub Restful APIs, in the format of YYYY-MM-DDTHH:MM:SSZ
"""
# prepare header
headers = {
"Authorization": f"Bearer {github_token}",
"Accept": "application/vnd.github+json",
"X-GitHub-Api-Version": "2022-11-28",
}
user_engagement_count = {}
# do pagination to the API
page = 1
while True:
comment_api = f"https://api.github.com/repos/{org_name}/{repo_name}/issues/comments?since={since}&page={page}"
comment_response = requests.get(comment_api, headers=headers).json()
if len(comment_response) == 0:
break
else:
for item in comment_response:
comment_author_relationship = item["author_association"]
if comment_author_relationship != "MEMBER":
# if the comment is not made by our member
# we don't count this comment towards user engagement
continue
issue_id = item["issue_url"].split("/")[-1]
issue_api = f"https://api.github.com/repos/{org_name}/{repo_name}/issues/{issue_id}"
issue_response = requests.get(issue_api, headers=headers).json()
issue_author_relationship = issue_response["author_association"]
if issue_author_relationship != "MEMBER":
# this means that the issue/PR is not created by our own people
# any comments in this issue/PR by our member will be counted towards the leaderboard
member_name = item["user"]["login"]
if member_name in user_engagement_count:
user_engagement_count[member_name] += 1
else:
user_engagement_count[member_name] = 1
page += 1
return user_engagement_count
def get_discussion_comments(github_token: str, org_name: str, repo_name: str, since: str) -> Dict[str, int]:
"""
Retrieve the discussion comments made by our members in the last 7 days.
This is only available via the GitHub GraphQL API.
Args:
github_token (str): GitHub access token for API calls
since (Datetime): the query parameter to determine whether the comment is made this week
"""
# use graphql to get the discussions updated in the last 7 days
def _generate_discussion_query(num, cursor: str = None):
if cursor is None:
offset_str = ""
else:
offset_str = f', after: "{cursor}"'
query = f"""
{{
repository(owner: "{org_name}", name: "{repo_name}"){{
discussions(first: {num} {offset_str}){{
edges {{
cursor
node{{
title
author{{
login
}}
number
authorAssociation
updatedAt
}}
}}
}}
}}
}}
"""
return query
def _generate_comment_reply_count_for_discussion(discussion_number, num, cursor: str = None):
# here we assume that each comment will not have more than 100 replies for simplicity
# otherwise, we have to go through pagination for both comment and reply
if cursor is None:
offset_str = ""
else:
offset_str = f', before: "{cursor}"'
query = f"""
{{
repository(owner: "{org_name}", name: "{repo_name}"){{
discussion(number: {discussion_number}){{
title
comments(last: {num} {offset_str}){{
edges{{
cursor
node {{
author{{
login
}}
updatedAt
authorAssociation
replies (last: 100) {{
edges {{
node {{
author {{
login
}}
updatedAt
authorAssociation
}}
}}
}}
}}
}}
}}
}}
}}
}}
"""
return query
# a utility function to make call to Github GraphQL API
def _call_graphql_api(query):
headers = {"Authorization": f"Bearer {github_token}"}
json_data = {"query": query}
response = requests.post("https://api.github.com/graphql", json=json_data, headers=headers)
data = response.json()
return data
# get the discussion numbers updated in the last 7 days
discussion_numbers = []
num_per_request = 10
cursor = None
while True:
query = _generate_discussion_query(num_per_request, cursor)
data = _call_graphql_api(query)
found_discussion_out_of_time_range = False
edges = data["data"]["repository"]["discussions"]["edges"]
if len(edges) == 0:
break
else:
# keep the discussion whose author is not a member
for edge in edges:
# print the discussion title
discussion = edge["node"]
discussion_updated_at = str2datetime(discussion["updatedAt"])
# check if the updatedAt is within the last 7 days
# if yes, add it to discussion_numbers
if discussion_updated_at > since:
if discussion["authorAssociation"] != "MEMBER":
discussion_numbers.append(discussion["number"])
else:
found_discussion_out_of_time_range = True
if found_discussion_out_of_time_range:
break
else:
# update cursor
cursor = edges[-1]["cursor"]
# get the discussion comments and replies made by our member
user_engagement_count = {}
for discussion_number in discussion_numbers:
cursor = None
num_per_request = 10
while True:
query = _generate_comment_reply_count_for_discussion(discussion_number, num_per_request, cursor)
data = _call_graphql_api(query)
# get the comments
edges = data["data"]["repository"]["discussion"]["comments"]["edges"]
# update the cursor
if len(edges) == 0:
break
else:
# update cursor for pagination
cursor = edges[-1]["cursor"]
for edge in edges:
comment = edge["node"]
if comment["authorAssociation"] == "MEMBER":
# check if the updatedAt is within the last 7 days
# if yes, add it to user_engagement_count
comment_updated_at = datetime.strptime(comment["updatedAt"], "%Y-%m-%dT%H:%M:%SZ")
if comment_updated_at > since:
member_name = comment["author"]["login"]
if member_name in user_engagement_count:
user_engagement_count[member_name] += 1
else:
user_engagement_count[member_name] = 1
# get the replies
reply_edges = comment["replies"]["edges"]
if len(reply_edges) == 0:
continue
else:
for reply_edge in reply_edges:
reply = reply_edge["node"]
if reply["authorAssociation"] == "MEMBER":
# check if the updatedAt is within the last 7 days
# if yes, add it to discussion_numbers
reply_updated_at = datetime.strptime(reply["updatedAt"], "%Y-%m-%dT%H:%M:%SZ")
if reply_updated_at > since:
member_name = reply["author"]["login"]
if member_name in user_engagement_count:
user_engagement_count[member_name] += 1
else:
user_engagement_count[member_name] = 1
return user_engagement_count
def generate_user_engagement_leaderboard_image(
github_token: str, org_name: str, repo_list: List[str], output_path: str
) -> bool:
"""
Generate the user engagement leaderboard image for stats within the last 7 days
Args:
github_token (str): GitHub access token for API calls
output_path (str): the path to save the image
"""
# request to the Github API to get the users who have replied the most in the last 7 days
start_datetime = get_utc_time_one_week_ago()
start_datetime_str = datetime2str(start_datetime)
# get the issue/PR comments and discussion comment count
total_engagement_count = {}
def _update_count(counter):
for name, count in counter.items():
if name in total_engagement_count:
total_engagement_count[name] += count
else:
total_engagement_count[name] = count
for repo_name in repo_list:
print(f"Fetching user engagement count for {repo_name}/{repo_name}")
issue_pr_engagement_count = get_issue_pull_request_comments(
github_token=github_token, org_name=org_name, repo_name=repo_name, since=start_datetime_str
)
discussion_engagement_count = get_discussion_comments(
github_token=github_token, org_name=org_name, repo_name=repo_name, since=start_datetime
)
# update the total engagement count
_update_count(issue_pr_engagement_count)
_update_count(discussion_engagement_count)
# prepare the data for plotting
x = []
y = []
if len(total_engagement_count) > 0:
ranking = []
for name, count in total_engagement_count.items():
ranking.append((name, count))
ranking.sort(key=lambda x: x[1], reverse=True)
for name, count in ranking:
x.append(count)
y.append(name)
# plot the leaderboard
xlabel = f"Number of Comments made (since {start_datetime_str})"
ylabel = "Member"
title = "Active User Engagement Leaderboard"
plot_bar_chart(x, y, xlabel=xlabel, ylabel=ylabel, title=title, output_path=output_path)
return True
else:
return False
def generate_contributor_leaderboard_image(github_token, org_name, repo_list, output_path) -> bool:
"""
Generate the contributor leaderboard image for stats within the last 7 days
Args:
github_token (str): GitHub access token for API calls
output_path (str): the path to save the image
"""
# request to the Github API to get the users who have contributed in the last 7 days
headers = {
"Authorization": f"Bearer {github_token}",
"Accept": "application/vnd.github+json",
"X-GitHub-Api-Version": "2022-11-28",
}
counter = Counter()
start_datetime = get_utc_time_one_week_ago()
def _get_url(org_name, repo_name, page):
return f"https://api.github.com/repos/{org_name}/{repo_name}/pulls?per_page=50&page={page}&state=closed"
def _iterate_by_page(org_name, repo_name):
page = 1
stop = False
while not stop:
print(f"Fetching pull request data for {org_name}/{repo_name} - page{page}")
url = _get_url(org_name, repo_name, page)
while True:
response = requests.get(url, headers=headers).json()
if isinstance(response, list):
# sometimes the Github API returns nothing
# request again if the response is not a list
break
print("Empty response, request again...")
if len(response) == 0:
# if the response is empty, stop
stop = True
break
# count the pull request and author from response
for pr_data in response:
merged_at = pr_data["merged_at"]
author = pr_data["user"]["login"]
if merged_at is None:
continue
merge_datetime = str2datetime(merged_at)
if merge_datetime < start_datetime:
# if we found a pull request that is merged before the start_datetime
# we stop
stop = True
break
else:
# record the author1
counter.record(author)
# next page
page += 1
for repo_name in repo_list:
_iterate_by_page(org_name, repo_name)
# convert unix timestamp to Beijing datetime
bj_start_datetime = datetime.fromtimestamp(start_datetime.timestamp(), tz=pytz.timezone("Asia/Shanghai"))
bj_start_datetime_str = datetime2str(bj_start_datetime)
contribution_list = counter.to_sorted_list()
# remove contributors who has zero commits
author_list = [x[0] for x in contribution_list]
num_commit_list = [x[1] for x in contribution_list]
# plot
if len(author_list) > 0:
xlabel = f"Number of Pull Requests (since {bj_start_datetime_str})"
ylabel = "Contributor"
title = "Active Contributor Leaderboard"
plot_bar_chart(num_commit_list, author_list, xlabel=xlabel, ylabel=ylabel, title=title, output_path=output_path)
return True
else:
return False
def upload_image_to_lark(lark_tenant_token: str, image_path: str) -> str:
"""
Upload image to Lark and return the image key
Args:
lark_tenant_token (str): Lark tenant access token
image_path (str): the path to the image to be uploaded
"""
url = "https://open.feishu.cn/open-apis/im/v1/images"
form = {"image_type": "message", "image": (open(image_path, "rb"))} # 需要替换具体的path
multi_form = MultipartEncoder(form)
headers = {
"Authorization": f"Bearer {lark_tenant_token}", ## 获取tenant_access_token, 需要替换为实际的token
}
headers["Content-Type"] = multi_form.content_type
response = requests.request("POST", url, headers=headers, data=multi_form).json()
return response["data"]["image_key"]
def generate_lark_tenant_access_token(app_id: str, app_secret: str) -> str:
"""
Generate Lark tenant access token.
Args:
app_id (str): Lark app id
app_secret (str): Lark app secret
"""
url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal"
data = {"app_id": app_id, "app_secret": app_secret}
response = requests.post(url, json=data).json()
return response["tenant_access_token"]
def send_image_to_lark(image_key: str, webhook_url: str) -> None:
"""
Send image to Lark.
Args:
image_key (str): the image key returned by Lark
webhook_url (str): the webhook url to send the image
"""
data = {"msg_type": "image", "content": {"image_key": image_key}}
requests.post(webhook_url, json=data)
def send_message_to_lark(message: str, webhook_url: str):
"""
Send message to Lark.
Args:
message (str): the message to be sent
webhook_url (str): the webhook url to send the message
"""
data = {"msg_type": "text", "content": {"text": message}}
requests.post(webhook_url, json=data)
if __name__ == "__main__":
GITHUB_TOKEN = os.environ["GITHUB_TOKEN"]
CONTRIBUTOR_IMAGE_PATH = "contributor_leaderboard.png"
USER_ENGAGEMENT_IMAGE_PATH = "engagement_leaderboard.png"
ORG_NAME = "hpcaitech"
# get all open source repositories
REPO_LIST = get_organization_repositories(GITHUB_TOKEN, ORG_NAME)
# generate images
contrib_success = generate_contributor_leaderboard_image(GITHUB_TOKEN, ORG_NAME, REPO_LIST, CONTRIBUTOR_IMAGE_PATH)
engagement_success = generate_user_engagement_leaderboard_image(
GITHUB_TOKEN, ORG_NAME, REPO_LIST, USER_ENGAGEMENT_IMAGE_PATH
)
# upload images
APP_ID = os.environ["LARK_APP_ID"]
APP_SECRET = os.environ["LARK_APP_SECRET"]
LARK_TENANT_TOKEN = generate_lark_tenant_access_token(app_id=APP_ID, app_secret=APP_SECRET)
contributor_image_key = upload_image_to_lark(LARK_TENANT_TOKEN, CONTRIBUTOR_IMAGE_PATH)
user_engagement_image_key = upload_image_to_lark(LARK_TENANT_TOKEN, USER_ENGAGEMENT_IMAGE_PATH)
# send message to lark
LARK_WEBHOOK_URL = os.environ["LARK_WEBHOOK_URL"]
message = """本周的社区榜单出炉啦!
1. 开发贡献者榜单
2. 用户互动榜单
注:
- 开发贡献者测评标准为:本周由公司成员与社区在所有开源仓库提交的Pull Request次数
- 用户互动榜单测评标准为:本周由公司成员在非成员在所有开源仓库创建的issue/PR/discussion中回复的次数
"""
send_message_to_lark(message, LARK_WEBHOOK_URL)
# send contributor image to lark
if contrib_success:
send_image_to_lark(contributor_image_key, LARK_WEBHOOK_URL)
else:
send_message_to_lark("本周没有成员贡献PR,无榜单图片生成。", LARK_WEBHOOK_URL)
# send user engagement image to lark
if engagement_success:
send_image_to_lark(user_engagement_image_key, LARK_WEBHOOK_URL)
else:
send_message_to_lark("本周没有成员互动,无榜单图片生成。", LARK_WEBHOOK_URL)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/.github/workflows/scripts/generate_release_draft.py | .github/workflows/scripts/generate_release_draft.py | #!/usr/bin/env python
# coding: utf-8
import argparse
import os
import re
import requests
COMMIT_API = "https://api.github.com/repos/hpcaitech/ColossalAI/commits"
TAGS_API = "https://api.github.com/repos/hpcaitech/ColossalAI/tags"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--out", type=str, help="output path for the release draft", required=True)
parser.add_argument("--version", type=str, help="current version to release", required=True)
return parser.parse_args()
def get_latest_tag_commit(headers=None):
res = requests.get(url=TAGS_API, headers=headers)
data = res.json()
commit_hash = data[0]["commit"]["sha"]
version = data[0]["name"]
return commit_hash, version
def get_commit_info(commit_hash, headers=None):
api = f"{COMMIT_API}/{commit_hash}"
res = requests.get(url=api, headers=headers)
return res.json()
def get_all_commit_info(since, headers=None):
page = 1
results = []
while True:
api = f"{COMMIT_API}?since={since}&per_page=100&page={page}"
resp = requests.get(url=api, headers=headers)
data = resp.json()
# exit when no more data
if len(data) == 0:
break
results.extend(data)
page += 1
return results
def collate_release_info(commit_info_list):
results = dict()
pattern = pattern = r"\[.*\]"
for commit_info in commit_info_list:
author = commit_info["commit"]["author"]["name"]
try:
author_url = commit_info["author"]["url"]
except:
# author can be None
author_url = None
msg = commit_info["commit"]["message"]
match = re.search(pattern, msg)
if match:
tag = match.group().lstrip("[").rstrip("]").capitalize()
if tag not in results:
results[tag] = []
results[tag].append((msg, author, author_url))
return results
def generate_release_post_markdown(current_version, last_version, release_info):
text = []
# add highlights
highlights = "## What's Changed \n\n"
text.append(highlights)
# add items
for k, v in release_info.items():
topic = f"### {k} \n"
text.append(topic)
for msg, author, author_url in v:
# only keep the first line
msg = msg.split("\n")[0]
if author_url:
item = f"{msg} by [{author}]({author_url})\n"
else:
item = f"{msg} by {author}\n"
text.append(f"- {item}")
text.append("\n")
# add full change log
text.append(
f"**Full Changelog**: https://github.com/hpcaitech/ColossalAI/compare/{current_version}...{last_version}"
)
return text
if __name__ == "__main__":
args = parse_args()
token = os.environ["GITHUB_API_TOKEN"]
headers = {"Authorization": token}
# get previous release tag
last_release_commit, last_version = get_latest_tag_commit(headers)
last_release_commit_info = get_commit_info(last_release_commit, headers=headers)
last_release_date = last_release_commit_info["commit"]["author"]["date"]
# get the commits since last release
commit_info = get_all_commit_info(since=last_release_date, headers=headers)
commit_info = commit_info[:-1] # remove the release commit
# collate into markdown
release_info = collate_release_info(commit_info)
markdown_text = generate_release_post_markdown(args.version, last_version, release_info)
# write into a file
with open(args.out, "w") as f:
for line in markdown_text:
f.write(line)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/.github/workflows/scripts/check_doc_i18n.py | .github/workflows/scripts/check_doc_i18n.py | import argparse
import os
def compare_dirs(dir1, dir2):
# First, we need to check if the two directories exist
if not os.path.exists(dir1) or not os.path.exists(dir2):
return False
# Now, we compare the list of items in each directory
items1 = os.listdir(dir1)
items2 = os.listdir(dir2)
# If the number of items in each directory is different, the directories are different
if len(items1) != len(items2):
return False
# For each item in the first directory, we check if there is a corresponding item in the second directory
for item in items1:
item_path1 = os.path.join(dir1, item)
item_path2 = os.path.join(dir2, item)
# If the corresponding item doesn't exist in the second directory, the directories are different
if not os.path.exists(item_path2):
print(f"Found mismatch: {item_path1}, {item_path2}")
return False
# If the corresponding item is a directory, we compare the two directories recursively
if os.path.isdir(item_path1) and os.path.isdir(item_path2):
if not compare_dirs(item_path1, item_path2):
print(f"Found mismatch: {item_path1}, {item_path2}")
return False
# both are files
elif os.path.isfile(item_path1) and os.path.isfile(item_path2):
continue
# If the corresponding item is not a file or a directory, the directories are different
else:
print(f"Found mismatch: {item_path1}, {item_path2}")
return False
# If all items are the same, the directories are the same
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", help="The directory where the multi-language source files are kept.")
args = parser.parse_args()
i18n_folders = os.listdir(args.directory)
i18n_folders = [os.path.join(args.directory, val) for val in i18n_folders]
if len(i18n_folders) > 1:
for i in range(1, len(i18n_folders)):
dir1 = i18n_folders[0]
dir2 = i18n_folders[i]
print(f"comparing {dir1} vs {dir2}")
match = compare_dirs(i18n_folders[0], i18n_folders[i])
if not match:
print(
f"{dir1} and {dir2} don't match, please ensure that your documentation is available in different languages"
)
else:
print(f"{dir1} and {dir2} match")
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/.github/workflows/scripts/update_setup_for_nightly.py | .github/workflows/scripts/update_setup_for_nightly.py | from datetime import datetime
def open_setup_file():
with open("setup.py", "r") as f:
file_lines = f.readlines()
return file_lines
def replace_nightly_package_info(file_lines):
version = datetime.today().strftime("%Y.%m.%d")
package_name = "colossalai-nightly"
for idx, line in enumerate(file_lines):
if "version = get_version()" in line:
file_lines[idx] = f'version = "{version}"\n'
if 'package_name = "colossalai"' in line:
file_lines[idx] = f'package_name = "{package_name}"\n'
return file_lines
def write_setup_file(file_lines):
with open("setup.py", "w") as f:
f.writelines(file_lines)
def main():
file_lines = open_setup_file()
file_lines = replace_nightly_package_info(file_lines)
write_setup_file(file_lines)
if __name__ == "__main__":
main()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/.github/workflows/scripts/send_message_to_lark.py | .github/workflows/scripts/send_message_to_lark.py | import argparse
import requests
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--message", type=str)
parser.add_argument("-u", "--url", type=str)
return parser.parse_args()
def send_message_to_lark(message, webhook_url):
data = {"msg_type": "text", "content": {"text": message}}
requests.post(webhook_url, json=data)
if __name__ == "__main__":
args = parse_args()
send_message_to_lark(args.message, args.url)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/.github/workflows/scripts/example_checks/detect_changed_example.py | .github/workflows/scripts/example_checks/detect_changed_example.py | import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--fileNameList", type=str, help="The list of changed files")
args = parser.parse_args()
name_list = args.fileNameList.split(":")
folder_need_check = set()
for loc in name_list:
# Find only the sub-sub-folder of 'example' folder
# the examples folder structure is like
# - examples
# - area
# - application
# - file
if loc.split("/")[0] == "examples" and len(loc.split("/")) >= 4:
folder_need_check.add("/".join(loc.split("/")[1:3]))
# Output the result using print. Then the shell can get the values.
print(list(folder_need_check))
if __name__ == "__main__":
main()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/.github/workflows/scripts/example_checks/check_example_weekly.py | .github/workflows/scripts/example_checks/check_example_weekly.py | import os
def show_files(path, all_files):
# Traverse all the folder/file in current directory
file_list = os.listdir(path)
# Determine the element is folder or file. If file, pass it into list, if folder, recurse.
for file_name in file_list:
# Get the abs directory using os.path.join() and store into cur_path.
cur_path = os.path.join(path, file_name)
# Determine whether folder
if os.path.isdir(cur_path):
show_files(cur_path, all_files)
else:
all_files.append(cur_path)
return all_files
def join(input_list, sep=None):
return (sep or " ").join(input_list)
def main():
contents = show_files("examples/", [])
all_loc = []
for file_loc in contents:
split_loc = file_loc.split("/")
# must have two sub-folder levels after examples folder, such as examples/images/vit is acceptable, examples/images/README.md is not, examples/requirements.txt is not.
if len(split_loc) >= 4:
re_loc = "/".join(split_loc[1:3])
if re_loc not in all_loc:
all_loc.append(re_loc)
print(all_loc)
if __name__ == "__main__":
main()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/.github/workflows/scripts/example_checks/check_dispatch_inputs.py | .github/workflows/scripts/example_checks/check_dispatch_inputs.py | import argparse
import os
def check_inputs(input_list):
for path in input_list:
real_path = os.path.join("examples", path)
if not os.path.exists(real_path):
return False
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--fileNameList", type=str, help="List of file names")
args = parser.parse_args()
name_list = args.fileNameList.split(",")
is_correct = check_inputs(name_list)
if is_correct:
print("success")
else:
print("failure")
if __name__ == "__main__":
main()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/cuda_extension.py | extensions/cuda_extension.py | import os
import time
from abc import abstractmethod
from pathlib import Path
from typing import List
from .base_extension import _Extension
from .cpp_extension import _CppExtension
from .utils import check_pytorch_version, check_system_pytorch_cuda_match, set_cuda_arch_list
__all__ = ["_CudaExtension"]
# Some constants for installation checks
MIN_PYTORCH_VERSION_MAJOR = 1
MIN_PYTORCH_VERSION_MINOR = 10
class _CudaExtension(_CppExtension):
@abstractmethod
def nvcc_flags(self) -> List[str]:
"""
This function should return a list of nvcc compilation flags for extensions.
"""
return ["-DCOLOSSAL_WITH_CUDA"]
def is_available(self) -> bool:
# cuda extension can only be built if cuda is available
try:
import torch
# torch.cuda.is_available requires a device to exist, allow building with cuda extension on build nodes without a device
# but where cuda is actually available.
cuda_available = torch.cuda.is_available() or bool(os.environ.get("FORCE_CUDA", 0))
except:
cuda_available = False
return cuda_available
def assert_compatible(self) -> None:
from torch.utils.cpp_extension import CUDA_HOME
if not CUDA_HOME:
raise AssertionError(
"[extension] CUDA_HOME is not found. You need to export CUDA_HOME environment variable or install CUDA Toolkit first in order to build/load CUDA extensions"
)
check_system_pytorch_cuda_match(CUDA_HOME)
check_pytorch_version(MIN_PYTORCH_VERSION_MAJOR, MIN_PYTORCH_VERSION_MINOR)
def get_cuda_home_include(self):
"""
return include path inside the cuda home.
"""
from torch.utils.cpp_extension import CUDA_HOME
if CUDA_HOME is None:
raise RuntimeError("CUDA_HOME is None, please set CUDA_HOME to compile C++/CUDA kernels in ColossalAI.")
cuda_include = os.path.join(CUDA_HOME, "include")
return cuda_include
def include_dirs(self) -> List[str]:
"""
This function should return a list of include files for extensions.
"""
return super().include_dirs() + [self.get_cuda_home_include()]
def build_jit(self) -> None:
from torch.utils.cpp_extension import CUDA_HOME, load
set_cuda_arch_list(CUDA_HOME)
# get build dir
build_directory = _Extension.get_jit_extension_folder_path()
build_directory = Path(build_directory)
build_directory.mkdir(parents=True, exist_ok=True)
# check if the kernel has been built
compiled_before = False
kernel_file_path = build_directory.joinpath(f"{self.name}.so")
if kernel_file_path.exists():
compiled_before = True
# load the kernel
if compiled_before:
print(f"[extension] Loading the JIT-built {self.name} kernel during runtime now")
else:
print(f"[extension] Compiling the JIT {self.name} kernel during runtime now")
build_start = time.time()
op_kernel = load(
name=self.name,
sources=self.strip_empty_entries(self.sources_files()),
extra_include_paths=self.strip_empty_entries(self.include_dirs()),
extra_cflags=self.cxx_flags(),
extra_cuda_cflags=self.nvcc_flags(),
extra_ldflags=[],
build_directory=str(build_directory),
)
build_duration = time.time() - build_start
if compiled_before:
print(f"[extension] Time taken to load {self.name} op: {build_duration} seconds")
else:
print(f"[extension] Time taken to compile {self.name} op: {build_duration} seconds")
return op_kernel
def build_aot(self) -> "CUDAExtension":
from torch.utils.cpp_extension import CUDA_HOME, CUDAExtension
set_cuda_arch_list(CUDA_HOME)
return CUDAExtension(
name=self.prebuilt_import_path,
sources=self.strip_empty_entries(self.sources_files()),
include_dirs=self.strip_empty_entries(self.include_dirs()),
extra_compile_args={
"cxx": self.strip_empty_entries(self.cxx_flags()),
"nvcc": self.strip_empty_entries(self.nvcc_flags()),
},
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/cpp_extension.py | extensions/cpp_extension.py | import importlib
import os
import time
from abc import abstractmethod
from pathlib import Path
from typing import List
from .base_extension import _Extension
__all__ = ["_CppExtension"]
class _CppExtension(_Extension):
def __init__(self, name: str, priority: int = 1):
super().__init__(name, support_aot=True, support_jit=True, priority=priority)
# we store the op as an attribute to avoid repeated building and loading
self.cached_op = None
# build-related variables
self.prebuilt_module_path = "colossalai._C"
self.prebuilt_import_path = f"{self.prebuilt_module_path}.{self.name}"
self.version_dependent_macros = ["-DVERSION_GE_1_1", "-DVERSION_GE_1_3", "-DVERSION_GE_1_5"]
def csrc_abs_path(self, path):
return os.path.join(self.relative_to_abs_path("csrc"), path)
def pybind_abs_path(self, path):
return os.path.join(self.relative_to_abs_path("pybind"), path)
def relative_to_abs_path(self, code_path: str) -> str:
"""
This function takes in a path relative to the colossalai root directory and return the absolute path.
"""
# get the current file path
# iteratively check the parent directory
# if the parent directory is "extensions", then the current file path is the root directory
# otherwise, the current file path is inside the root directory
current_file_path = Path(__file__)
while True:
if current_file_path.name == "extensions":
break
else:
current_file_path = current_file_path.parent
extension_module_path = current_file_path
code_abs_path = extension_module_path.joinpath(code_path)
return str(code_abs_path)
# functions must be overrided over
def strip_empty_entries(self, args):
"""
Drop any empty strings from the list of compile and link flags
"""
return [x for x in args if len(x) > 0]
def import_op(self):
"""
This function will import the op module by its string name.
"""
return importlib.import_module(self.prebuilt_import_path)
def build_aot(self) -> "CppExtension":
from torch.utils.cpp_extension import CppExtension
return CppExtension(
name=self.prebuilt_import_path,
sources=self.strip_empty_entries(self.sources_files()),
include_dirs=self.strip_empty_entries(self.include_dirs()),
extra_compile_args=self.strip_empty_entries(self.cxx_flags()),
)
def build_jit(self) -> None:
from torch.utils.cpp_extension import load
build_directory = _Extension.get_jit_extension_folder_path()
build_directory = Path(build_directory)
build_directory.mkdir(parents=True, exist_ok=True)
# check if the kernel has been built
compiled_before = False
kernel_file_path = build_directory.joinpath(f"{self.name}.so")
if kernel_file_path.exists():
compiled_before = True
# load the kernel
if compiled_before:
print(f"[extension] Loading the JIT-built {self.name} kernel during runtime now")
else:
print(f"[extension] Compiling the JIT {self.name} kernel during runtime now")
build_start = time.time()
op_kernel = load(
name=self.name,
sources=self.strip_empty_entries(self.sources_files()),
extra_include_paths=self.strip_empty_entries(self.include_dirs()),
extra_cflags=self.cxx_flags(),
extra_ldflags=[],
build_directory=str(build_directory),
)
build_duration = time.time() - build_start
if compiled_before:
print(f"[extension] Time taken to load {self.name} op: {build_duration} seconds")
else:
print(f"[extension] Time taken to compile {self.name} op: {build_duration} seconds")
return op_kernel
# functions must be overrided begin
@abstractmethod
def sources_files(self) -> List[str]:
"""
This function should return a list of source files for extensions.
"""
@abstractmethod
def include_dirs(self) -> List[str]:
"""
This function should return a list of include files for extensions.
"""
return [self.csrc_abs_path("")]
@abstractmethod
def cxx_flags(self) -> List[str]:
"""
This function should return a list of cxx compilation flags for extensions.
"""
def load(self):
try:
op_kernel = self.import_op()
except (ImportError, ModuleNotFoundError):
# if import error occurs, it means that the kernel is not pre-built
# so we build it jit
op_kernel = self.build_jit()
return op_kernel
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/base_extension.py | extensions/base_extension.py | import hashlib
import os
from abc import ABC, abstractmethod
from typing import Callable, Union
__all__ = ["_Extension"]
class _Extension(ABC):
def __init__(self, name: str, support_aot: bool, support_jit: bool, priority: int = 1):
self._name = name
self._support_aot = support_aot
self._support_jit = support_jit
self.priority = priority
@property
def name(self):
return self._name
@property
def support_aot(self):
return self._support_aot
@property
def support_jit(self):
return self._support_jit
@staticmethod
def get_jit_extension_folder_path():
"""
Kernels which are compiled during runtime will be stored in the same cache folder for reuse.
The folder is in the path ~/.cache/colossalai/torch_extensions/<cache-folder>.
The name of the <cache-folder> follows a common format:
torch<torch_version_major>.<torch_version_minor>_<device_name><device_version>-<hash>
The <hash> suffix is the hash value of the path of the `colossalai` file.
"""
import torch
import colossalai
from colossalai.accelerator import get_accelerator
# get torch version
torch_version_major = torch.__version__.split(".")[0]
torch_version_minor = torch.__version__.split(".")[1]
# get device version
device_name = get_accelerator().name
device_version = get_accelerator().get_version()
# use colossalai's file path as hash
hash_suffix = hashlib.sha256(colossalai.__file__.encode()).hexdigest()
# concat
home_directory = os.path.expanduser("~")
extension_directory = f".cache/colossalai/torch_extensions/torch{torch_version_major}.{torch_version_minor}_{device_name}-{device_version}-{hash_suffix}"
cache_directory = os.path.join(home_directory, extension_directory)
return cache_directory
@abstractmethod
def is_available(self) -> bool:
"""
Check if the hardware required by the kernel is available.
"""
@abstractmethod
def assert_compatible(self) -> None:
"""
Check if the hardware required by the kernel is compatible.
"""
@abstractmethod
def build_aot(self) -> Union["CppExtension", "CUDAExtension"]:
pass
@abstractmethod
def build_jit(self) -> Callable:
pass
@abstractmethod
def load(self) -> Callable:
pass
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/utils.py | extensions/utils.py | import os
import re
import subprocess
import warnings
from typing import List
def print_rank_0(message: str) -> None:
"""
Print on only one process to avoid spamming.
"""
try:
import torch.distributed as dist
if not dist.is_initialized():
is_main_rank = True
else:
is_main_rank = dist.get_rank() == 0
except ImportError:
is_main_rank = True
if is_main_rank:
print(message)
def get_cuda_version_in_pytorch() -> List[int]:
"""
This function returns the CUDA version in the PyTorch build.
Returns:
The CUDA version required by PyTorch, in the form of tuple (major, minor).
"""
import torch
try:
torch_cuda_major = torch.version.cuda.split(".")[0]
torch_cuda_minor = torch.version.cuda.split(".")[1]
except:
raise ValueError(
"[extension] Cannot retrieve the CUDA version in the PyTorch binary given by torch.version.cuda"
)
return torch_cuda_major, torch_cuda_minor
def get_cuda_bare_metal_version(cuda_dir) -> List[int]:
"""
Get the System CUDA version from nvcc.
Args:
cuda_dir (str): the directory for CUDA Toolkit.
Returns:
The CUDA version required by PyTorch, in the form of tuple (major, minor).
"""
nvcc_path = os.path.join(cuda_dir, "bin/nvcc")
if cuda_dir is None:
raise ValueError(
f"[extension] The argument cuda_dir is None, but expected to be a string. Please make sure your have exported the environment variable CUDA_HOME correctly."
)
# check for nvcc path
if not os.path.exists(nvcc_path):
raise FileNotFoundError(
f"[extension] The nvcc compiler is not found in {nvcc_path}, please make sure you have set the correct value for CUDA_HOME."
)
# parse the nvcc -v output to obtain the system cuda version
try:
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
except:
raise ValueError(
f"[extension] Failed to parse the nvcc output to obtain the system CUDA bare metal version. The output for 'nvcc -v' is \n{raw_output}"
)
return bare_metal_major, bare_metal_minor
def check_system_pytorch_cuda_match(cuda_dir):
bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_cuda_major, torch_cuda_minor = get_cuda_version_in_pytorch()
if bare_metal_major != torch_cuda_major:
raise Exception(
f"[extension] Failed to build PyTorch extension because the detected CUDA version ({bare_metal_major}.{bare_metal_minor}) "
f"mismatches the version that was used to compile PyTorch ({torch_cuda_major}.{torch_cuda_minor})."
"Please make sure you have set the CUDA_HOME correctly and installed the correct PyTorch in https://pytorch.org/get-started/locally/ ."
)
if bare_metal_minor != torch_cuda_minor:
warnings.warn(
f"[extension] The CUDA version on the system ({bare_metal_major}.{bare_metal_minor}) does not match with the version ({torch_cuda_major}.{torch_cuda_minor}) torch was compiled with. "
"The mismatch is found in the minor version. As the APIs are compatible, we will allow compilation to proceed. "
"If you encounter any issue when using the built kernel, please try to build it again with fully matched CUDA versions"
)
return True
def get_pytorch_version() -> List[int]:
"""
This functions finds the PyTorch version.
Returns:
A tuple of integers in the form of (major, minor, patch).
"""
import torch
torch_version = torch.__version__.split("+")[0]
TORCH_MAJOR = int(torch_version.split(".")[0])
TORCH_MINOR = int(torch_version.split(".")[1])
TORCH_PATCH = int(torch_version.split(".")[2], 16)
return TORCH_MAJOR, TORCH_MINOR, TORCH_PATCH
def check_pytorch_version(min_major_version, min_minor_version) -> bool:
"""
Compare the current PyTorch version with the minium required version.
Args:
min_major_version (int): the minimum major version of PyTorch required
min_minor_version (int): the minimum minor version of PyTorch required
Returns:
A boolean value. The value is True if the current pytorch version is acceptable and False otherwise.
"""
# get pytorch version
torch_major, torch_minor, _ = get_pytorch_version()
# if the
if torch_major < min_major_version or (torch_major == min_major_version and torch_minor < min_minor_version):
raise RuntimeError(
f"[extension] Colossal-AI requires Pytorch {min_major_version}.{min_minor_version} or newer.\n"
"The latest stable release can be obtained from https://pytorch.org/get-started/locally/"
)
def check_cuda_availability():
"""
Check if CUDA is available on the system.
Returns:
A boolean value. True if CUDA is available and False otherwise.
"""
import torch
return torch.cuda.is_available()
def set_cuda_arch_list(cuda_dir):
"""
This function sets the PyTorch TORCH_CUDA_ARCH_LIST variable for ahead-of-time extension compilation.
Ahead-of-time compilation occurs when BUILD_EXT=1 is set when running 'pip install'.
"""
cuda_available = check_cuda_availability()
# we only need to set this when CUDA is not available for cross-compilation
if not cuda_available:
warnings.warn(
"\n[extension] PyTorch did not find available GPUs on this system.\n"
"If your intention is to cross-compile, this is not an error.\n"
"By default, Colossal-AI will cross-compile for \n"
"1. Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"2. Volta (compute capability 7.0)\n"
"3. Turing (compute capability 7.5),\n"
"4. Ampere (compute capability 8.0, 8.6)if the CUDA version is >= 11.0\n"
"\nIf you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n'
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
arch_list = ["6.0", "6.1", "6.2", "7.0", "7.5"]
if int(bare_metal_major) == 11:
if int(bare_metal_minor) == 0:
arch_list.append("8.0")
else:
arch_list.append("8.0")
arch_list.append("8.6")
arch_list_str = ";".join(arch_list)
os.environ["TORCH_CUDA_ARCH_LIST"] = arch_list_str
return False
return True
def get_cuda_cc_flag() -> List[str]:
"""
This function produces the cc flags for your GPU arch
Returns:
The CUDA cc flags for compilation.
"""
# only import torch when needed
# this is to avoid importing torch when building on a machine without torch pre-installed
# one case is to build wheel for pypi release
import torch
cc_flag = []
max_arch = "".join(str(i) for i in torch.cuda.get_device_capability())
for arch in torch.cuda.get_arch_list():
res = re.search(r"sm_(\d+)", arch)
if res:
arch_cap = res[1]
if int(arch_cap) >= 60 and int(arch_cap) <= int(max_arch):
cc_flag.extend(["-gencode", f"arch=compute_{arch_cap},code={arch}"])
return cc_flag
def append_nvcc_threads(nvcc_extra_args: List[str]) -> List[str]:
"""
This function appends the threads flag to your nvcc args.
Returns:
The nvcc compilation flags including the threads flag.
"""
from torch.utils.cpp_extension import CUDA_HOME
bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/__init__.py | extensions/__init__.py | from .pybind.cpu_adam import CpuAdamArmExtension, CpuAdamX86Extension
from .pybind.flash_attention import (
FlashAttentionDaoCudaExtension,
FlashAttentionNpuExtension,
FlashAttentionSdpaCudaExtension,
)
from .pybind.inference import InferenceOpsCudaExtension
from .pybind.layernorm import LayerNormCudaExtension
from .pybind.moe import MoeCudaExtension
from .pybind.optimizer import FusedOptimizerCudaExtension
from .pybind.softmax import ScaledMaskedSoftmaxCudaExtension, ScaledUpperTriangleMaskedSoftmaxCudaExtension
ALL_EXTENSIONS = [
CpuAdamArmExtension,
CpuAdamX86Extension,
LayerNormCudaExtension,
MoeCudaExtension,
FusedOptimizerCudaExtension,
InferenceOpsCudaExtension,
ScaledMaskedSoftmaxCudaExtension,
ScaledUpperTriangleMaskedSoftmaxCudaExtension,
FlashAttentionDaoCudaExtension,
FlashAttentionSdpaCudaExtension,
FlashAttentionNpuExtension,
]
__all__ = [
"CpuAdamArmExtension",
"CpuAdamX86Extension",
"LayerNormCudaExtension",
"MoeCudaExtension",
"FusedOptimizerCudaExtension",
"InferenceOpsCudaExtension",
"ScaledMaskedSoftmaxCudaExtension",
"ScaledUpperTriangleMaskedSoftmaxCudaExtension",
"FlashAttentionDaoCudaExtension",
"FlashAttentionSdpaCudaExtension",
"FlashAttentionNpuExtension",
]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/triton_extension.py | extensions/triton_extension.py | from .base_extension import _Extension
__all__ = ["_TritonExtension"]
class _TritonExtension(_Extension):
def __init__(self, name: str, priority: int = 1):
super().__init__(name, support_aot=False, support_jit=True, priority=priority)
def is_hardware_compatible(self) -> bool:
# cuda extension can only be built if cuda is available
try:
import torch
cuda_available = torch.cuda.is_available()
except:
cuda_available = False
return cuda_available
def load(self):
return self.build_jit()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/csrc/__init__.py | extensions/csrc/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/__init__.py | extensions/pybind/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/softmax/scaled_masked_softmax_cuda.py | extensions/pybind/softmax/scaled_masked_softmax_cuda.py | from ...cuda_extension import _CudaExtension
from ...utils import append_nvcc_threads
class ScaledMaskedSoftmaxCudaExtension(_CudaExtension):
def __init__(self):
super().__init__(name="scaled_masked_softmax_cuda")
def sources_files(self):
ret = [self.csrc_abs_path(fname) for fname in ["kernel/cuda/scaled_masked_softmax_kernel.cu"]] + [
self.pybind_abs_path("softmax/scaled_masked_softmax.cpp")
]
return ret
def cxx_flags(self):
return ["-O3"] + self.version_dependent_macros
def nvcc_flags(self):
extra_cuda_flags = [
"-std=c++14",
"-std=c++17",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_HALF2_OPERATORS__",
"-DTHRUST_IGNORE_CUB_VERSION_CHECK",
]
ret = ["-O3", "--use_fast_math"] + self.version_dependent_macros + extra_cuda_flags + super().nvcc_flags()
return append_nvcc_threads(ret)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/softmax/__init__.py | extensions/pybind/softmax/__init__.py | from .scaled_masked_softmax_cuda import ScaledMaskedSoftmaxCudaExtension
from .scaled_upper_triangle_masked_softmax_cuda import ScaledUpperTriangleMaskedSoftmaxCudaExtension
__all__ = ["ScaledMaskedSoftmaxCudaExtension", "ScaledUpperTriangleMaskedSoftmaxCudaExtension"]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/softmax/scaled_upper_triangle_masked_softmax_cuda.py | extensions/pybind/softmax/scaled_upper_triangle_masked_softmax_cuda.py | from ...cuda_extension import _CudaExtension
from ...utils import append_nvcc_threads, get_cuda_cc_flag
class ScaledUpperTriangleMaskedSoftmaxCudaExtension(_CudaExtension):
def __init__(self):
super().__init__(name="scaled_upper_triangle_masked_softmax_cuda")
def sources_files(self):
ret = [
self.csrc_abs_path(fname)
for fname in [
"kernel/cuda/scaled_upper_triang_masked_softmax_kernel.cu",
]
] + [self.pybind_abs_path("softmax/scaled_upper_triang_masked_softmax.cpp")]
return ret
def cxx_flags(self):
return ["-O3"] + self.version_dependent_macros
def nvcc_flags(self):
extra_cuda_flags = [
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
]
extra_cuda_flags.extend(get_cuda_cc_flag())
ret = ["-O3", "--use_fast_math"] + extra_cuda_flags + super().nvcc_flags()
return append_nvcc_threads(ret)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/cpu_adam/cpu_adam_x86.py | extensions/pybind/cpu_adam/cpu_adam_x86.py | import platform
from ...cuda_extension import _CudaExtension
from ...utils import append_nvcc_threads
class CpuAdamX86Extension(_CudaExtension):
def __init__(self):
super().__init__(name="cpu_adam_x86")
def is_available(self) -> bool:
return platform.machine() == "x86_64" and super().is_available()
def assert_compatible(self) -> None:
arch = platform.machine()
assert (
arch == "x86_64"
), f"[extension] The {self.name} kernel requires the CPU architecture to be x86_64 but got {arch}"
super().assert_compatible()
# necessary 4 functions
def sources_files(self):
ret = [
self.csrc_abs_path("kernel/x86/cpu_adam.cpp"),
]
return ret
def cxx_flags(self):
extra_cxx_flags = [
"-std=c++14",
"-std=c++17",
"-lcudart",
"-lcublas",
"-g",
"-Wno-reorder",
"-fopenmp",
"-march=native",
]
return ["-O3"] + self.version_dependent_macros + extra_cxx_flags
def nvcc_flags(self):
extra_cuda_flags = [
"-std=c++14",
"-std=c++17",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_HALF2_OPERATORS__",
"-DTHRUST_IGNORE_CUB_VERSION_CHECK",
]
ret = ["-O3", "--use_fast_math"] + self.version_dependent_macros + extra_cuda_flags + super().nvcc_flags()
return append_nvcc_threads(ret)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/cpu_adam/__init__.py | extensions/pybind/cpu_adam/__init__.py | from .cpu_adam_arm import CpuAdamArmExtension
from .cpu_adam_x86 import CpuAdamX86Extension
__all__ = ["CpuAdamArmExtension", "CpuAdamX86Extension"]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/cpu_adam/cpu_adam_arm.py | extensions/pybind/cpu_adam/cpu_adam_arm.py | import platform
from typing import List
from ...cpp_extension import _CppExtension
class CpuAdamArmExtension(_CppExtension):
def __init__(self):
super().__init__(name="cpu_adam_arm")
def is_available(self) -> bool:
# only arm allowed
return platform.machine() == "aarch64"
def assert_compatible(self) -> None:
arch = platform.machine()
assert (
arch == "aarch64"
), f"[extension] The {self.name} kernel requires the CPU architecture to be aarch64 but got {arch}"
# necessary 4 functions
def sources_files(self):
ret = [
self.csrc_abs_path("kernel/arm/cpu_adam_arm.cpp"),
]
return ret
def include_dirs(self) -> List[str]:
return super().include_dirs()
def cxx_flags(self):
extra_cxx_flags = [
"-std=c++14",
"-std=c++17",
"-g",
"-Wno-reorder",
"-fopenmp",
]
return ["-O3"] + self.version_dependent_macros + extra_cxx_flags
def nvcc_flags(self):
return []
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/layernorm/layernorm_cuda.py | extensions/pybind/layernorm/layernorm_cuda.py | from ...cuda_extension import _CudaExtension
from ...utils import append_nvcc_threads, get_cuda_cc_flag
class LayerNormCudaExtension(_CudaExtension):
def __init__(self):
super().__init__(name="layernorm_cuda")
def sources_files(self):
ret = [self.csrc_abs_path(fname) for fname in ["kernel/cuda/layer_norm_kernel.cu"]] + [
self.pybind_abs_path("layernorm/layer_norm.cpp")
]
return ret
def include_dirs(self):
ret = [self.get_cuda_home_include()] + [self.csrc_abs_path("")]
return ret
def cxx_flags(self):
return ["-O3"] + self.version_dependent_macros
def nvcc_flags(self):
extra_cuda_flags = ["-maxrregcount=50"]
extra_cuda_flags.extend(get_cuda_cc_flag())
ret = ["-O3", "--use_fast_math"] + extra_cuda_flags + self.version_dependent_macros + super().nvcc_flags()
return append_nvcc_threads(ret)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/layernorm/__init__.py | extensions/pybind/layernorm/__init__.py | from .layernorm_cuda import LayerNormCudaExtension
__all__ = ["LayerNormCudaExtension"]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/flash_attention/flash_attention_sdpa_cuda.py | extensions/pybind/flash_attention/flash_attention_sdpa_cuda.py | from ...base_extension import _Extension
class FlashAttentionSdpaCudaExtension(_Extension):
def __init__(self):
super().__init__(name="flash_attention_sdpa_cuda", support_aot=False, support_jit=False)
def is_available(self) -> bool:
# cuda extension can only be built if cuda is available
try:
import torch
cuda_available = torch.cuda.is_available()
except:
cuda_available = False
return cuda_available
def assert_compatible(self) -> bool:
pass
def build_aot(self) -> None:
raise NotImplementedError("Flash attention SDPA does not require ahead-of-time compilation.")
def build_jit(self) -> None:
raise NotImplementedError("Flash attention SDPA does not require just-in-time compilation.")
def load(self):
from typing import Optional
import torch
def flash_attention(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
dropout_p: float = 0.0,
scale: Optional[float] = None,
attention_mask: Optional[torch.Tensor] = None,
is_causal: bool = False,
cu_seqlens_q: Optional[torch.Tensor] = None,
cu_seqlens_kv: Optional[torch.Tensor] = None,
max_seqlen_q: Optional[int] = None,
max_seqlen_kv: Optional[int] = None,
q_indices: Optional[torch.Tensor] = None,
kv_indices: Optional[torch.Tensor] = None,
):
return torch.nn.functional.scaled_dot_product_attention(
q,
k,
v,
attn_mask=attention_mask,
dropout_p=dropout_p,
scale=scale,
)
return flash_attention
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/flash_attention/flash_attention_dao_cuda.py | extensions/pybind/flash_attention/flash_attention_dao_cuda.py | from ...base_extension import _Extension
class FlashAttentionDaoCudaExtension(_Extension):
def __init__(self):
super().__init__(name="flash_attention_dao_cuda", support_aot=False, support_jit=False, priority=10)
def is_available(self) -> bool:
# cuda extension can only be built if cuda is available
try:
import torch
from flash_attn import flash_attn_func, flash_attn_varlen_kvpacked_func # noqa
from flash_attn.bert_padding import index_first_axis, pad_input # noqa
cuda_available = torch.cuda.is_available()
except:
cuda_available = False
return cuda_available
def assert_compatible(self) -> bool:
pass
def build_aot(self) -> None:
raise NotImplementedError(
"We rely on the third-party flash-attn library for flash attention (https://github.com/Dao-AILab/flash-attention). Please install flash-attn via 'pip install flash-attn --no-build-isolation'."
)
def build_jit(self) -> None:
raise NotImplementedError(
"We rely on the third-party flash-attn library for flash attention (https://github.com/Dao-AILab/flash-attention). Please install flash-attn via 'pip install flash-attn --no-build-isolation'"
)
def load(self):
from typing import Optional
import torch
from einops import rearrange
from flash_attn import flash_attn_func, flash_attn_varlen_kvpacked_func
from flash_attn.bert_padding import index_first_axis, pad_input
def _unpad_input(hidden_states: torch.Tensor, indices: torch.Tensor):
return index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices)
def flash_attention(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
dropout_p: float = 0.0,
scale: Optional[float] = None,
attention_mask: Optional[torch.Tensor] = None,
is_causal: bool = False,
cu_seqlens_q: Optional[torch.Tensor] = None,
cu_seqlens_kv: Optional[torch.Tensor] = None,
max_seqlen_q: Optional[int] = None,
max_seqlen_kv: Optional[int] = None,
q_indices: Optional[torch.Tensor] = None,
kv_indices: Optional[torch.Tensor] = None,
):
# [B, H, S, D] -> [B, S, H, D]
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
b, s_q = q.shape[:2]
if cu_seqlens_q is not None:
# padded / padded causal
# unpad input: [B, S, H, D] -> [T, H, D]
q = _unpad_input(q, q_indices)
kv = _unpad_input(torch.stack(tensors=(k, v), dim=2), kv_indices)
attn_output = flash_attn_varlen_kvpacked_func(
q,
kv,
cu_seqlens_q,
cu_seqlens_kv,
max_seqlen_q,
max_seqlen_kv,
dropout_p=dropout_p,
softmax_scale=scale,
causal=is_causal,
)
# pad output: [T, H, D] -> [B, S, H, D]
attn_output = pad_input(attn_output, q_indices, b, s_q)
else:
# causal / no attn mask
attn_output = flash_attn_func(
q,
k,
v,
dropout_p=dropout_p,
softmax_scale=scale,
causal=is_causal,
)
# [B, S, H, D] -> [B, H, S, D]
return attn_output.transpose(1, 2)
return flash_attention
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/flash_attention/flash_attention_npu.py | extensions/pybind/flash_attention/flash_attention_npu.py | import math
from ...base_extension import _Extension
class FlashAttentionNpuExtension(_Extension):
def __init__(self):
super().__init__(name="flash_attention_npu", support_aot=False, support_jit=False)
def is_available(self) -> bool:
try:
import torch_npu
return hasattr(torch_npu, "npu_fusion_attention")
except:
return False
def assert_compatible(self) -> bool:
pass
def build_aot(self) -> None:
raise NotImplementedError(
"Flash Attention NPU does not require ahead-of-time compilation. Please use it by installing torch_npu."
)
def build_jit(self) -> None:
raise NotImplementedError(
"Flash Attention NPU does not require just-in-time compilation. Please use it by installing torch_npu."
)
def load(self):
from typing import Optional
import torch
import torch_npu
def flash_attention(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
dropout_p: float = 0.0,
scale: Optional[float] = None,
attention_mask: Optional[torch.Tensor] = None,
is_causal: bool = False,
cu_seqlens_q: Optional[torch.Tensor] = None,
cu_seqlens_kv: Optional[torch.Tensor] = None,
max_seqlen_q: Optional[int] = None,
max_seqlen_kv: Optional[int] = None,
q_indices: Optional[torch.Tensor] = None,
kv_indices: Optional[torch.Tensor] = None,
):
if scale is None:
scale = 1.0 / math.sqrt(q.size(-1))
num_heads = q.size(1)
return torch_npu.npu_fusion_attention(
q,
k,
v,
num_heads,
"BNSD",
atten_mask=attention_mask.bool(),
scale=scale,
keep_prob=1 - dropout_p,
)[0]
return flash_attention
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/flash_attention/__init__.py | extensions/pybind/flash_attention/__init__.py | from .flash_attention_dao_cuda import FlashAttentionDaoCudaExtension
from .flash_attention_npu import FlashAttentionNpuExtension
from .flash_attention_sdpa_cuda import FlashAttentionSdpaCudaExtension
try:
# TODO: remove this after updating openmoe example
import flash_attention # noqa
HAS_FLASH_ATTN = True
except:
HAS_FLASH_ATTN = False
__all__ = ["FlashAttentionDaoCudaExtension", "FlashAttentionSdpaCudaExtension", "FlashAttentionNpuExtension"]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/inference/inference_ops_cuda.py | extensions/pybind/inference/inference_ops_cuda.py | from ...cuda_extension import _CudaExtension
from ...utils import get_cuda_cc_flag
class InferenceOpsCudaExtension(_CudaExtension):
def __init__(self):
super().__init__(name="inference_ops_cuda")
def sources_files(self):
ret = [
self.csrc_abs_path(fname)
for fname in [
"kernel/cuda/decode_kv_cache_memcpy_kernel.cu",
"kernel/cuda/context_kv_cache_memcpy_kernel.cu",
"kernel/cuda/fused_rotary_emb_and_cache_kernel.cu",
"kernel/cuda/activation_kernel.cu",
"kernel/cuda/rms_layernorm_kernel.cu",
"kernel/cuda/get_cos_and_sin_kernel.cu",
"kernel/cuda/flash_decoding_attention_kernel.cu",
"kernel/cuda/convert_fp8_kernel.cu",
]
] + [self.pybind_abs_path("inference/inference.cpp")]
return ret
def cxx_flags(self):
version_dependent_macros = ["-DVERSION_GE_1_1", "-DVERSION_GE_1_3", "-DVERSION_GE_1_5"]
return ["-O3"] + version_dependent_macros
def nvcc_flags(self):
extra_cuda_flags = ["-lineinfo"]
extra_cuda_flags.extend(get_cuda_cc_flag())
return ["-O3", "--use_fast_math"] + extra_cuda_flags + super().nvcc_flags()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/inference/__init__.py | extensions/pybind/inference/__init__.py | from .inference_ops_cuda import InferenceOpsCudaExtension
__all__ = ["InferenceOpsCudaExtension"]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/optimizer/fused_optimizer_cuda.py | extensions/pybind/optimizer/fused_optimizer_cuda.py | from ...cuda_extension import _CudaExtension
from ...utils import get_cuda_cc_flag
class FusedOptimizerCudaExtension(_CudaExtension):
def __init__(self):
super().__init__(name="fused_optim_cuda")
def sources_files(self):
ret = [
self.csrc_abs_path(fname)
for fname in [
"kernel/cuda/multi_tensor_sgd_kernel.cu",
"kernel/cuda/multi_tensor_scale_kernel.cu",
"kernel/cuda/multi_tensor_adam_kernel.cu",
"kernel/cuda/multi_tensor_l2norm_kernel.cu",
"kernel/cuda/multi_tensor_lamb_kernel.cu",
]
] + [self.pybind_abs_path("optimizer/optimizer.cpp")]
return ret
def cxx_flags(self):
version_dependent_macros = ["-DVERSION_GE_1_1", "-DVERSION_GE_1_3", "-DVERSION_GE_1_5"]
return ["-O3"] + version_dependent_macros
def nvcc_flags(self):
extra_cuda_flags = ["-lineinfo"]
extra_cuda_flags.extend(get_cuda_cc_flag())
return ["-O3", "--use_fast_math"] + extra_cuda_flags + super().nvcc_flags()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/optimizer/__init__.py | extensions/pybind/optimizer/__init__.py | from .fused_optimizer_cuda import FusedOptimizerCudaExtension
__all__ = ["FusedOptimizerCudaExtension"]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/moe/moe_cuda.py | extensions/pybind/moe/moe_cuda.py | from ...cuda_extension import _CudaExtension
from ...utils import append_nvcc_threads, get_cuda_cc_flag
class MoeCudaExtension(_CudaExtension):
def __init__(self):
super().__init__(name="moe_cuda")
def sources_files(self):
ret = [self.csrc_abs_path(fname) for fname in ["kernel/cuda/moe_kernel.cu"]] + [
self.pybind_abs_path("moe/moe.cpp")
]
return ret
def cxx_flags(self):
return ["-O3"] + self.version_dependent_macros
def nvcc_flags(self):
extra_cuda_flags = [
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
]
extra_cuda_flags.extend(get_cuda_cc_flag())
ret = ["-O3", "--use_fast_math"] + extra_cuda_flags + super().nvcc_flags()
return append_nvcc_threads(ret)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/extensions/pybind/moe/__init__.py | extensions/pybind/moe/__init__.py | from .moe_cuda import MoeCudaExtension
__all__ = ["MoeCudaExtension"]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/conftest.py | tests/conftest.py | import gc
from colossalai.accelerator import get_accelerator
def pytest_runtest_setup(item):
# called for running each test in 'a' directory
accelerator = get_accelerator()
accelerator.empty_cache()
gc.collect()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/__init__.py | tests/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_optimizer/test_nvme.py | tests/test_optimizer/test_nvme.py | import pytest
import torch
from colossalai.nn.optimizer import CPUAdam, HybridAdam
from colossalai.testing import clear_cache_before_run, parameterize
from tests.kit.model_zoo import model_zoo
def move_some_params_to_cuda(model, torch_model):
model.embed.weight.data = model.embed.weight.cuda()
torch_model.embed.weight.data = model.embed.weight.cuda()
model.ln1.weight.data = model.ln1.weight.cuda()
torch_model.ln1.weight.data = model.ln1.weight.cuda()
def check_params_equal(model, torch_model):
for p, torch_p in zip(model.parameters(), torch_model.parameters()):
assert torch.allclose(p, torch_p, atol=1e-3), f"diff: {torch.abs(p - torch_p)}"
# TODO Something wrong with ci when running this test.
@pytest.mark.skip(reason="skip because of something wrong with CI")
@clear_cache_before_run()
@parameterize("nvme_offload_fraction", [0.0, 0.5, 1.0])
@parameterize("nvme_offload_dir", ["./offload", None])
@parameterize("adam_cls", [CPUAdam, HybridAdam])
def test_nvme_adam(nvme_offload_fraction, nvme_offload_dir, adam_cls):
model_builder, data_gen_fn, *_ = next(iter(model_zoo.get_sub_registry("custom_simple_net").values()))
model = model_builder()
torch_model = model_builder()
move_some_params_to_cuda(model, torch_model)
optimizer = adam_cls(
model.parameters(), lr=0.1, nvme_offload_fraction=nvme_offload_fraction, nvme_offload_dir=nvme_offload_dir
)
torch_optimizer = torch.optim.Adam(torch_model.parameters(), lr=0.1)
with torch.no_grad():
for p, torch_p in zip(model.parameters(), torch_model.parameters()):
torch_p.copy_(p)
p.grad = torch.rand_like(p)
torch_p.grad = p.grad
for _ in range(3):
optimizer.step()
torch_optimizer.step()
check_params_equal(model, torch_model)
if __name__ == "__main__":
test_nvme_adam(0.5, "./offload", CPUAdam)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_optimizer/test_adam_optim.py | tests/test_optimizer/test_adam_optim.py | from copy import deepcopy
from typing import Type, Union
import pytest
import torch
import torch.nn as nn
from torch.optim import Adam, AdamW
from colossalai.nn.optimizer import CPUAdam, FusedAdam, HybridAdam
from tests.kit.model_zoo import model_zoo
from tests.test_optimizer._utils import force_assign_grad, setup_param_groups
_ALLOWED_OPTIM_DEVICES = [
(FusedAdam, torch.device("cuda:0")),
(CPUAdam, torch.device("cpu")),
(CPUAdam, torch.device("cuda:0")),
(HybridAdam, torch.device("cpu")),
(HybridAdam, torch.device("cuda:0")),
]
_ALLOWED_P_G_TYPES = [
(torch.float, torch.float), # pure fp32
(torch.float, torch.half), # fp16 amp
(torch.float, torch.bfloat16), # bfloat16 amp
]
N_STEPS = 3
def set_grad(model: nn.Module, torch_model: nn.Module, g_dtype: torch.dtype) -> None:
for p, torch_p in zip(model.parameters(), torch_model.parameters()):
torch_p.grad = torch.rand_like(torch_p)
# avoid inconsistent grad and param dtype error
force_assign_grad(p, g_dtype, torch_p.grad)
@pytest.mark.parametrize("optim_cls, device", _ALLOWED_OPTIM_DEVICES)
@pytest.mark.parametrize("adamw", [False, True])
@pytest.mark.parametrize("p_dtype, g_dtype", _ALLOWED_P_G_TYPES)
def test_adam_optim_on_bert(
optim_cls: Union[Type[FusedAdam], Type[CPUAdam], Type[HybridAdam]],
device: torch.device,
adamw: bool,
p_dtype: torch.dtype,
g_dtype: torch.dtype,
) -> None:
model_fn, *_ = next(iter(model_zoo.get_sub_registry("transformers_bert_for_sequence_classification").values()))
torch_model = model_fn().to(device)
model = deepcopy(torch_model).to(p_dtype)
lr = 1e-3
beta1, beta2 = 0.9, 0.999
eps = 1e-8
torch_optim_cls = AdamW if adamw else Adam
torch_optim = torch_optim_cls(setup_param_groups(torch_model), lr=lr, betas=(beta1, beta2), eps=eps)
optim = optim_cls(setup_param_groups(model), lr=lr, betas=(beta1, beta2), eps=eps, adamw_mode=adamw)
rtol, atol = 1e-5, 1e-5
if p_dtype is torch.float16 or g_dtype is torch.float16:
rtol, atol = 2e-3, 2e-3
if p_dtype is torch.bfloat16 or g_dtype is torch.bfloat16:
rtol, atol = 4e-3, 4e-3
for _ in range(N_STEPS):
set_grad(model, torch_model, g_dtype)
torch_optim.step()
optim.step()
torch_optim.zero_grad()
optim.zero_grad()
for p, torch_p in zip(model.parameters(), torch_model.parameters()):
# if overflow, the weight won't be updated. so there will be no nan in p
assert not torch.isnan(p).any()
assert torch.allclose(p.float(), torch_p, rtol=rtol, atol=atol)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_optimizer/test_adam_kernel.py | tests/test_optimizer/test_adam_kernel.py | # This test checks adam kernels
# Baseline is pure fp32 torch adam optimizer
import math
from abc import abstractmethod
from typing import Type
import pytest
import torch
from torch import Tensor
from colossalai.accelerator import get_accelerator
from colossalai.utils import multi_tensor_applier
_FUSED_ALLOWED_P_G_TYPES = [
(torch.float, torch.half),
(torch.float, torch.float),
(torch.half, torch.half),
(torch.float, torch.bfloat16),
(torch.bfloat16, torch.bfloat16),
]
_CPU_ALLOWED_P_G_TYPES = [
(torch.float, torch.half),
(torch.float, torch.float),
(torch.half, torch.half),
]
class AdamKernel:
def __init__(self, lr: float, beta1: float, beta2: float, eps: float, weight_decay: float, use_adamw: bool) -> None:
self.lr = lr
self.beta1 = beta1
self.beta2 = beta2
self.eps = eps
self.weight_decay = weight_decay
self.use_adamw = use_adamw
@abstractmethod
def update(self, step: int, param: Tensor, grad: Tensor, exp_avg: Tensor, exp_avg_sq: Tensor):
pass
class TorchAdamKernel(AdamKernel):
def update(self, step: int, param: Tensor, grad: Tensor, exp_avg: Tensor, exp_avg_sq: Tensor):
bias_correction1 = 1 - self.beta1**step
bias_correction2 = 1 - self.beta2**step
if self.weight_decay != 0:
if self.use_adamw:
# Perform stepweight decay
param.mul_(1 - self.lr * self.weight_decay)
else:
grad = grad.add(param, alpha=self.weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(self.beta1).add_(grad, alpha=1 - self.beta1)
exp_avg_sq.mul_(self.beta2).addcmul_(grad, grad, value=1 - self.beta2)
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(self.eps)
step_size = self.lr / bias_correction1
param.addcdiv_(exp_avg, denom, value=-step_size)
class FusedAdamKernel(AdamKernel):
def __init__(self, lr: float, beta1: float, beta2: float, eps: float, weight_decay: float, use_adamw: bool) -> None:
super().__init__(lr, beta1, beta2, eps, weight_decay, use_adamw)
from colossalai.kernel.kernel_loader import FusedOptimizerLoader
fused_optim = FusedOptimizerLoader().load()
self.fused_adam = fused_optim.multi_tensor_adam
self.dummy_overflow_buf = torch.tensor([0], dtype=torch.int, device=get_accelerator().get_current_device())
def update(self, step: int, param: Tensor, grad: Tensor, exp_avg: Tensor, exp_avg_sq: Tensor):
multi_tensor_applier(
self.fused_adam,
self.dummy_overflow_buf,
[[grad], [param], [exp_avg], [exp_avg_sq]],
self.lr,
self.beta1,
self.beta2,
self.eps,
step,
self.use_adamw,
True,
self.weight_decay,
-1,
)
class CPUAdamKernel(AdamKernel):
def __init__(self, lr: float, beta1: float, beta2: float, eps: float, weight_decay: float, use_adamw: bool) -> None:
super().__init__(lr, beta1, beta2, eps, weight_decay, use_adamw)
from colossalai.kernel.kernel_loader import CPUAdamLoader
cpu_optim = CPUAdamLoader().load()
self.cpu_adam_op = cpu_optim.CPUAdamOptimizer(lr, beta1, beta2, eps, weight_decay, use_adamw)
def update(self, step: int, param: Tensor, grad: Tensor, exp_avg: Tensor, exp_avg_sq: Tensor):
self.cpu_adam_op.step(
step,
self.lr,
self.beta1,
self.beta2,
self.eps,
self.weight_decay,
True,
param.view(-1),
grad.view(-1),
exp_avg.view(-1),
exp_avg_sq.view(-1),
-1,
)
def check_adam_kernel(
kernel: Type[AdamKernel],
adamw: bool,
weight_decay: float,
p_dtype: torch.dtype,
g_dtype: torch.dtype,
device: torch.device,
n_steps: int,
rtol: float,
atol: float,
):
lr = 1e-3
beta1, beta2 = 0.9, 0.999
eps = 1e-8
torch_adam = TorchAdamKernel(lr, beta1, beta2, eps, weight_decay, adamw)
adam_kernel = kernel(lr, beta1, beta2, eps, weight_decay, adamw)
master_p = torch.rand(64, device=device)
master_g = torch.rand_like(master_p)
master_exp_avg = torch.zeros_like(master_p)
master_exp_avg_sq = torch.zeros_like(master_p)
p = master_p.clone().to(p_dtype)
g = master_g.clone().to(g_dtype)
exp_avg = master_exp_avg.clone().to(p_dtype)
exp_avg_sq = master_exp_avg_sq.clone().to(p_dtype)
for step in range(1, 1 + n_steps):
torch_adam.update(step, master_p, master_g, master_exp_avg, master_exp_avg_sq)
adam_kernel.update(step, p, g, exp_avg, exp_avg_sq)
# if overflow, the weight won't be updated. so there will be no nan in p
assert not torch.isnan(p).any()
assert torch.allclose(master_p, p.float(), rtol=rtol, atol=atol)
@pytest.mark.parametrize("adamw", [False, True])
@pytest.mark.parametrize("weight_decay", [0.0, 0.1])
@pytest.mark.parametrize("p_dtype, g_dtype", _FUSED_ALLOWED_P_G_TYPES)
def test_fused_adam_kernel(adamw, weight_decay, p_dtype, g_dtype):
rtol, atol = 1e-5, 1e-8
if p_dtype is torch.float16 or g_dtype is torch.float16:
rtol, atol = 1e-3, 1e-3
if p_dtype is torch.bfloat16 or g_dtype is torch.bfloat16:
rtol, atol = 4e-3, 4e-3
check_adam_kernel(
FusedAdamKernel, adamw, weight_decay, p_dtype, g_dtype, get_accelerator().get_current_device(), 3, rtol, atol
)
@pytest.mark.parametrize("adamw", [False, True])
@pytest.mark.parametrize("weight_decay", [0.0, 0.1])
@pytest.mark.parametrize("p_dtype, g_dtype", _CPU_ALLOWED_P_G_TYPES)
def test_cpu_adam_kernel(adamw, weight_decay, p_dtype, g_dtype):
rtol, atol = 1e-5, 1e-8
if p_dtype is torch.float16 or g_dtype is torch.float16:
rtol, atol = 1e-3, 1e-3
check_adam_kernel(CPUAdamKernel, adamw, weight_decay, p_dtype, g_dtype, torch.device("cpu"), 3, rtol, atol)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_optimizer/test_dist_galore.py | tests/test_optimizer/test_dist_galore.py | """Usage(requires 4 GPUs): python test_dist_galore.py"""
import pytest
import torch
import torch.distributed as dist
from torch.testing import assert_close
import colossalai
from colossalai.cluster import DistCoordinator, ProcessGroupMesh
from colossalai.logging import disable_existing_loggers
from colossalai.nn.optimizer import DistGaloreAwamW, GaLoreAdamW8bit
from colossalai.nn.optimizer.galore import get_galore_param_groups
from colossalai.tensor.d_tensor import get_shard_dim_1d, is_distributed_tensor
from colossalai.tensor.d_tensor.api import clear_layout_converter
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
from colossalai.testing.random import seed_all
from colossalai.zero import LowLevelZeroOptimizer
from tests.kit.model_zoo import model_zoo
from tests.test_optimizer._utils import check_optim_states, run_bert_test, set_dist_grad
_ALLOWED_P_G_TYPES = [
(torch.float, torch.float), # pure fp32
(torch.half, torch.half), # fp16 amp
(torch.bfloat16, torch.bfloat16), # bfloat16 amp
]
# Identifiers for Tensor Parallel linear layers
_IN_DIM = 32
_HID_DIM = 128
_N_STEP = 3
_SEED = 0
coordinator = None
lr = 1e-2
beta1, beta2 = 0.9, 0.999
eps = 1e-8
decay = 1e-3
Net, data_gen, *_ = next(iter(model_zoo.get_sub_registry("simple_mlp").values()))
TPNet, *_ = next(iter(model_zoo.get_sub_registry("simple_tp_mlp").values()))
# Doesn't support ZeRO for now
test_config = [
{
"tp_size": 1,
"num_microbatches": 4,
"zero_stage": 0,
"precision": "bf16",
},
{
"tp_size": 2,
"num_microbatches": 4,
"zero_stage": 0,
"precision": "bf16",
},
{
"tp_size": 4,
"num_microbatches": 4,
"zero_stage": 0,
"precision": "bf16",
},
]
def assert_grad_close(tp_model, torch_model, tp_group):
tp_size = dist.get_world_size(tp_group)
# Check equal grads
for p, torch_p in zip(tp_model.parameters(), torch_model.parameters()):
grads = p.grad
if is_distributed_tensor(p):
split_dim = get_shard_dim_1d(p)
all_grads = [torch.empty_like(grads) for _ in range(tp_size)]
dist.all_gather(all_grads, grads.contiguous(), group=tp_group)
all_grads = torch.cat(all_grads, dim=split_dim)
else:
all_grads = grads
try:
assert (all_grads != 0).any()
assert_close(all_grads, torch_p.grad)
except Exception as e:
print(f"Before gather: {grads.shape}, after: {all_grads.shape}")
raise e
def assert_distributed_close(tp_model, torch_model, rtol, atol, tp_group):
rank = dist.get_rank(tp_group)
tp_size = dist.get_world_size(tp_group)
for (name, p), torch_p in zip(tp_model.named_parameters(), torch_model.parameters()):
# if overflow, the weight won't be updated. so there will be no nan in p
assert not torch.isnan(p).any()
try:
if is_distributed_tensor(p):
split_dim = get_shard_dim_1d(p)
torch_p = torch_p.chunk(tp_size, dim=split_dim)[rank]
assert_close(p, torch_p, rtol=rtol, atol=atol)
except AssertionError as e:
print(f"grad mismatch in {name}")
raise e
def force_assign_grad(p, g_dtype, grad=None):
"""avoid inconsistent grad and param dtype error"""
orig_p = p.data
p.data = torch.randn_like(p, device=orig_p.device, dtype=g_dtype) if grad == None else grad
p.grad = p.data
p.data = orig_p
@parameterize("p_g_dtype", _ALLOWED_P_G_TYPES)
@parameterize("tp_zero_size", [(4, 1), (1, 4), (2, 2)])
def run_dist_galore_basic(p_g_dtype: tuple[torch.dtype, torch.dtype], tp_zero_size: tuple[int, int]) -> None:
"""Test without forward"""
p_dtype, g_dtype = p_g_dtype
tp_size, zero_size = tp_zero_size
# Set distributed groups
rank = dist.get_rank()
clear_layout_converter() # Ensure correct sharding
proc_mesh = ProcessGroupMesh(tp_size, zero_size)
tp_group = proc_mesh.get_group_along_axis(0)
dp_group = proc_mesh.get_group_along_axis(1)
dist.get_rank(tp_group)
seed_all(_SEED) # Fix model init
torch_model = Net(in_dim=_IN_DIM, hid_dim=_HID_DIM, dtype=p_dtype).to(rank)
tp_model = TPNet(torch_model.fc0, torch_model.fc1, torch_model.fc2, tp_group, dtype=p_dtype).to(rank)
assert_distributed_close(tp_model, torch_model, rtol=0, atol=0, tp_group=tp_group)
# Set up optimizers
torch_optim = GaLoreAdamW8bit(
get_galore_param_groups(torch_model, decay, rank=8),
lr=lr,
betas=(beta1, beta2),
eps=eps,
percentile_clipping=101,
block_wise=False,
min_8bit_size=1e10, # Disable quantization
)
optim = DistGaloreAwamW(
get_galore_param_groups(tp_model, decay, rank=8),
lr=lr,
betas=(beta1, beta2),
eps=eps,
percentile_clipping=101,
block_wise=False,
min_8bit_size=1e10,
)
optim.setup_distributed(tp_group, dp_group)
rtol, atol = 8e-7, 8e-7
if p_dtype is torch.float16 or g_dtype is torch.float16:
rtol, atol = 1e-6, 1e-6
if p_dtype is torch.bfloat16 or g_dtype is torch.bfloat16:
rtol, atol = 2e-6, 2e-6
for i in range(_N_STEP):
seed_all(_SEED + i) # NOTE: having only one manual_seed above doesn't work?
set_dist_grad(tp_model, torch_model, g_dtype, tp_group)
try:
torch_optim.step()
optim.step()
assert_grad_close(tp_model, torch_model, tp_group)
torch_optim.zero_grad()
optim.zero_grad()
assert_distributed_close(tp_model, torch_model, rtol, atol, tp_group)
check_optim_states(torch_optim, optim)
except Exception as e:
coordinator.print_on_master(f"step {i}: p_g_dtype: {p_g_dtype}, tp_zero_size: {tp_zero_size}")
raise e
@parameterize("p_g_dtype", _ALLOWED_P_G_TYPES)
@parameterize("tp_zero_size", [(4, 1), (2, 2), (1, 4)])
def run_dist_galore_fwd_bwd(p_g_dtype: tuple[torch.dtype, torch.dtype], tp_zero_size: tuple[int, int]) -> None:
p_dtype, g_dtype = p_g_dtype
tp_size, zero_size = tp_zero_size
# Set distributed groups
rank = dist.get_rank()
proc_mesh = ProcessGroupMesh(tp_size, zero_size)
tp_group = proc_mesh.get_group_along_axis(0)
dp_group = proc_mesh.get_group_along_axis(1)
dist.get_rank(tp_group)
seed_all(_SEED)
clear_layout_converter() # Ensure correct sharding
torch_model = Net(_IN_DIM, _HID_DIM, dtype=p_dtype).to(rank)
tp_model = TPNet(torch_model.fc0, torch_model.fc1, torch_model.fc2, tp_group, dtype=p_dtype).to(rank)
assert_distributed_close(tp_model, torch_model, rtol=0, atol=0, tp_group=tp_group)
# Set up optimizers
torch_optim = GaLoreAdamW8bit(
get_galore_param_groups(torch_model, decay, rank=8),
lr=lr,
betas=(beta1, beta2),
eps=eps,
percentile_clipping=101,
block_wise=False,
min_8bit_size=1e10,
)
optim = DistGaloreAwamW(
get_galore_param_groups(tp_model, decay, rank=8),
lr=lr,
betas=(beta1, beta2),
eps=eps,
percentile_clipping=101,
block_wise=False,
min_8bit_size=1e10,
)
# Setup distributed optimizer
if zero_size > 1:
optim = LowLevelZeroOptimizer(
optim,
overlap_communication=True,
initial_scale=128,
partition_grad=True,
dp_process_group=dp_group,
verbose=True,
)
shard_to_param = optim.get_master_to_working_map()
optim.optim.setup_distributed(
tp_group, dp_group, shard_to_param, padding_map=optim.get_param_padding_map(), is_zero=True
)
else:
optim.setup_distributed(tp_group)
rtol, atol = 8e-7, 8e-7
if p_dtype is torch.float16 or g_dtype is torch.float16:
rtol, atol = 1e-6, 1e-6
if p_dtype is torch.bfloat16 or g_dtype is torch.bfloat16:
rtol, atol = 2e-6, 2e-6
seed_all(_SEED) # NOTE: having only one manual_seed above doesn't work?
x = data_gen().cuda().to(dtype=p_dtype)
out_tp = tp_model(x)
out = torch_model(x)
try:
assert_close(out, out_tp, rtol=rtol, atol=atol)
except Exception as e:
coordinator.print_on_master(f"p_g_dtype: {p_g_dtype}, tp_zero_size: {tp_zero_size}")
raise e
if zero_size > 1:
optim.backward(out_tp.sum())
out.sum().backward()
else:
out_tp.sum().backward()
out.sum().backward()
torch_optim.step()
optim.step()
torch_optim.zero_grad()
optim.zero_grad()
try:
assert_distributed_close(tp_model, torch_model, rtol, atol, tp_group)
check_optim_states(getattr(torch_optim, "optim", torch_optim), getattr(optim, "optim", optim))
except Exception as e:
coordinator.print_on_master(f"p_g_dtype: {p_g_dtype}, tp_zero_size: {tp_zero_size}")
raise e
def check_dist_galore(rank, world_size, port):
disable_existing_loggers()
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
global coordinator
coordinator = DistCoordinator()
# run_dist_galore_basic()
# coordinator.print_on_master("Basic backward tests passed")
coordinator.print_on_master("Skipping forward-backward tests due to SVD instability")
# run_dist_galore_fwd_bwd()
# _COORDINATOR.print_on_master("Forward-backward tests passed")
coordinator.print_on_master(
"Running bert tests, which are expected to produce minor errors due to instability in SVD convergence. \
For example, a 1e-9 grad diff causes drastic difference in SVD output."
)
for config in test_config:
try:
run_bert_test(test_config=config, optim_class=GaLoreAdamW8bit, sharded_optim_class=GaLoreAdamW8bit)
except Exception as e:
print(e)
dist.barrier()
print(f"rank {rank} tests passed :)")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_dist_galore():
spawn(check_dist_galore, nprocs=4)
if __name__ == "__main__":
test_dist_galore()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_optimizer/test_lr_scheduler.py | tests/test_optimizer/test_lr_scheduler.py | import torch.nn as nn
from torch.optim import Adam
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
def test_lr_scheduler_save_load():
model = nn.Linear(10, 10)
optimizer = Adam(model.parameters(), lr=1e-3)
scheduler = CosineAnnealingWarmupLR(optimizer, total_steps=5, warmup_steps=2)
new_scheduler = CosineAnnealingWarmupLR(optimizer, total_steps=5, warmup_steps=2)
for _ in range(5):
scheduler.step()
state_dict = scheduler.state_dict()
new_scheduler.load_state_dict(state_dict)
assert state_dict == new_scheduler.state_dict()
if __name__ == "__main__":
test_lr_scheduler_save_load()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_optimizer/test_dist_came.py | tests/test_optimizer/test_dist_came.py | import pytest
import torch
import torch.distributed as dist
from torch.testing import assert_close
import colossalai
from colossalai.cluster import ProcessGroupMesh
from colossalai.logging import disable_existing_loggers
from colossalai.nn.optimizer.came import CAME
from colossalai.nn.optimizer.distributed_came import DistributedCAME
from colossalai.shardformer.layer._operation import _gather
from colossalai.shardformer.layer.utils import Randomizer
from colossalai.tensor.d_tensor import get_sharding_spec, is_distributed_tensor
from colossalai.tensor.d_tensor.api import clear_layout_converter
from colossalai.tensor.d_tensor.sharding_spec import DimSpec
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
from colossalai.testing.random import seed_all
from colossalai.zero import LowLevelZeroOptimizer
from tests.kit.model_zoo import model_zoo
from tests.test_optimizer._utils import (
check_dist_grad,
check_dist_optim_state,
check_dist_param,
check_optim_states,
set_master_param_to_shard_param,
setup_param_groups,
)
from tests.test_shardformer.test_model._utils import (
build_model_from_hybrid_plugin,
build_model_from_low_level_zero_plugin,
run_forward_backward_with_hybrid_plugin,
run_forward_backward_with_low_level_zero_plugin,
unwrap_model,
)
IN_DIM = 128
HID_DIM = 128
_TP_SPEC = DimSpec([0])
_SEED = 0
Net, data_gen, *_ = next(iter(model_zoo.get_sub_registry("simple_mlp").values()))
TPNet, *_ = next(iter(model_zoo.get_sub_registry("simple_tp_mlp").values()))
def correctness_verify(tensor1: torch.Tensor, tensor2: torch.Tensor, dtype: torch.dtype = torch.float32):
rtol = None
atol = None
if dtype is torch.float32:
rtol = 5e-04
atol = 5e-04
elif dtype is torch.float16:
rtol = 5e-2
atol = 5e-4
elif dtype is torch.bfloat16:
rtol = 4e-3
atol = 4e-3
# return torch.all(tensor1.isclose(tensor2, rtol=rtol, atol=atol))
assert_close(tensor1, tensor2, rtol=rtol, atol=atol)
@parameterize("dtype", [torch.float32]) # torch.float32, torch.float16, torch.bfloat16
@parameterize("tp_zero_size", [(2, 2), (4, 1), (1, 4)]) # (4, 1), (1, 4)
def exam_dist_came_base(dtype: torch.dtype, tp_zero_size: tuple[int, int]):
tp_size, zero_size = tp_zero_size
use_zero = True if zero_size > 1 else False
local_rank = dist.get_rank()
clear_layout_converter()
proc_mesh = ProcessGroupMesh(tp_size, zero_size)
tp_group, dp_group = proc_mesh.get_group_along_axis(0), proc_mesh.get_group_along_axis(1)
torch.set_default_dtype(dtype)
# set_seed(42)
# ==============================
# Model Init
# ==============================
base_model = Net(in_dim=IN_DIM, hid_dim=HID_DIM, dtype=dtype).to(local_rank)
# tp_model = TPModel(copy.deepcopy(base_model.linear1), copy.deepcopy(base_model.linear2), tp_group).to(local_rank)
tp_model = TPNet(fc1=base_model.fc1, fc2=base_model.fc2, tp_group=tp_group, dtype=dtype)
base_param_group = setup_param_groups(base_model)
tp_param_group = setup_param_groups(tp_model)
# tp_param_group_, tp_shard_spec, tp_param_shape = setup_flatten_param_groups_sharding_spec_shape(tp_model)
# ==============================
# Optimizer Init
# ==============================
base_optim = CAME(base_param_group, lr=1e-3)
dist_optim = DistributedCAME(tp_param_group, lr=1e-3)
# Setup distributed optimizer
if zero_size > 1:
dist_optim = LowLevelZeroOptimizer(
dist_optim,
overlap_communication=True,
initial_scale=128,
partition_grad=True,
dp_process_group=dp_group,
verbose=True,
)
shard_to_param = dist_optim.master_to_working_param # {id(): param tensor} but flattened
dist_optim.optim.setup_distributed(
tp_group=tp_group,
dp_group=dp_group,
shard_to_working_param=shard_to_param,
use_zero=use_zero,
)
else:
shard_to_param = set_master_param_to_shard_param(tp_param_group)
dist_optim.setup_distributed(
tp_group=tp_group,
dp_group=dp_group,
shard_to_working_param=shard_to_param,
use_zero=use_zero,
)
# ==============================
# Correctness Verify
# ==============================
seed_all(1024)
x = torch.randn(HID_DIM, IN_DIM, device=local_rank)
out = base_model(x)
out_tp = tp_model(x)
if zero_size > 1:
dist_optim.backward(out_tp.sum())
out.sum().backward()
else:
out_tp.sum().backward()
out.sum().backward()
base_optim.step()
dist_optim.step()
base_optim.zero_grad()
dist_optim.zero_grad()
base_params = base_model.parameters()
tp_params = tp_model.parameters()
for p, tp_p in zip(base_params, tp_params):
param_is_distributed = is_distributed_tensor(tp_p)
if param_is_distributed:
shard_spec = get_sharding_spec(tp_p)
if len(shard_spec.sharding_sequence) >= 2:
# Col Parallel
if shard_spec.sharding_sequence[0] == "R":
tp_p = _gather(input_=tp_p, dim=-1, process_group=tp_group) # gather
# ROW Parallel
if shard_spec.sharding_sequence[-1] == "R":
tp_p = _gather(input_=tp_p, dim=0, process_group=tp_group) # gather
else:
# TP bias
tp_p = _gather(input_=tp_p, dim=-1, process_group=tp_group) # gather
else:
# No TP bias
pass
correctness_verify(p.data, tp_p.data, dtype)
clear_layout_converter()
Randomizer.reset_index()
torch.cuda.empty_cache()
print(f"Fwd/Bwd Test Passed")
@parameterize(
"test_config",
[
{
"stage": 1,
"precision": "bf16",
},
{
"stage": 2,
"precision": "bf16",
},
],
)
def exam_bert_test_on_lowlevelzero_plugin(test_config):
sub_model_zoo = model_zoo.get_sub_registry("transformers_bert")
test_config["use_lazy_init"] = False
test_config["initial_scale"] = 2**10
# check weights
if test_config["precision"] == "bf16":
atol, rtol = 5e-4, 5e-4
else:
atol, rtol = 5e-4, 5e-4
# test_config["initial_scale"] = 1
model_list = [
"transformers_bert",
]
clear_layout_converter()
torch.set_default_dtype(torch.bfloat16)
seed_all(_SEED)
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
if name in model_list:
(
org_model,
org_optimizer,
sharded_model,
sharded_optimizer,
criterion,
booster,
) = build_model_from_low_level_zero_plugin(model_fn, loss_fn, test_config, CAME, DistributedCAME)
org_loss, org_output, sharded_loss, sharded_output = run_forward_backward_with_low_level_zero_plugin(
org_model, sharded_model, sharded_optimizer, data_gen_fn, output_transform_fn, criterion, booster
)
# assert same output
# assert_close(org_output, org_output, atol=atol, rtol=rtol)
weight_layer_for_check = [
"bert.encoder.layer.1.intermediate.dense",
# TODO: error in layer:
# "bert.encoder.layer.0.output.dense",
# "bert.encoder.layer.1.output.dense",
]
# assert same weight before step; pass
check_dist_param(org_model, sharded_model, weight_layer_for_check, atol, rtol)
# asserr loss; pass
assert_close(org_loss, sharded_loss)
# assert same grad before step
# TODO: err here; backward diff gard; Only transformers_bert pass;
check_dist_grad(sharded_optimizer, org_model, sharded_model, weight_layer_for_check, atol, rtol)
org_optimizer.step()
sharded_optimizer.step()
# assert same weight after step
check_dist_param(org_model, sharded_model, weight_layer_for_check, atol, rtol)
check_optim_states(org_optimizer, sharded_optimizer.optim)
Randomizer.reset_index()
torch.cuda.empty_cache()
print(f"LowLevelZeroPlugin + Bert Model Zoo Test Passed")
@parameterize(
"test_config",
[
{
"tp_size": 1,
"num_microbatches": 4,
"zero_stage": 2,
"precision": "bf16",
},
{
"tp_size": 2,
"num_microbatches": 4,
"zero_stage": 2,
"precision": "bf16",
},
{
"tp_size": 4,
"num_microbatches": 4,
"zero_stage": 2,
"precision": "bf16",
},
{
"tp_size": 2,
"num_microbatches": 4,
"zero_stage": 1,
"precision": "bf16",
},
{
"tp_size": 4,
"num_microbatches": 4,
"zero_stage": 0,
"precision": "bf16",
},
],
)
def exam_bert_test_on_hybrid_plugin(test_config):
sub_model_zoo = model_zoo.get_sub_registry("transformers_bert")
test_config["use_lazy_init"] = False
test_config["pp_size"] = 1 # Do NOT test Pipeline Parallel
test_config["initial_scale"] = 2**16 # avoid overflow
model_list = [
"transformers_bert",
]
# pass "transformers_bert",
clear_layout_converter()
torch.set_default_dtype(torch.bfloat16)
# check weights
if test_config["precision"] == "bf16":
atol, rtol = 5e-3, 5e-3
else:
atol, rtol = 5e-3, 5e-3
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
if name in model_list:
(
org_model,
org_optimizer,
sharded_model,
sharded_optimizer,
criterion,
booster,
) = build_model_from_hybrid_plugin(model_fn, loss_fn, test_config, CAME, CAME)
org_loss, org_output, sharded_loss, sharded_output = run_forward_backward_with_hybrid_plugin(
org_model, sharded_model, sharded_optimizer, data_gen_fn, output_transform_fn, criterion, booster
)
stage_manager = booster.plugin.stage_manager
booster.plugin.tp_group
bert = unwrap_model(org_model, "BertModel", "bert")
sharded_bert = unwrap_model(sharded_model, "BertModel", "bert")
# TODO: model
# "encoder.layer.0.output.dense.weight", "encoder.layer.1.output.dense.weight" not match
# "encoder.layer[0].output.dense", "encoder.layer[1].output.dense" not match
weight_layer_for_check = ["embeddings.word_embeddings"] # [30522, 128]
# # assert same weight before step; all pass
# check_dist_param(org_model, sharded_model, weight_layer_for_check, atol, rtol)
# # assert loss; all pass
# assert_close(org_loss, sharded_loss)
# # assert same grad before step; all pass
# check_dist_grad(org_model, sharded_model, weight_layer_for_check, atol, rtol)
org_optimizer.step()
sharded_optimizer.step()
if stage_manager is None or stage_manager.is_first_stage(ignore_chunk=True):
check_dist_param(bert, sharded_bert, weight_layer_for_check, atol, rtol)
# check_weight(bert, sharded_bert, weight_layer_for_check, tp_group, atol=atol, rtol=rtol, dim=1)
# check optim states
check_dist_optim_state(org_optimizer, sharded_optimizer.optim)
Randomizer.reset_index()
torch.cuda.empty_cache()
print(f"HybridParallelPlugin + Bert Model Zoo Test Passed")
def run_dist(rank, world_size, port):
disable_existing_loggers()
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
exam_bert_test_on_lowlevelzero_plugin() # err in TODO layer
exam_bert_test_on_hybrid_plugin() # pass
exam_dist_came_base() # pass
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_dist_came():
spawn(run_dist, nprocs=4)
if __name__ == "__main__":
test_dist_came()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_optimizer/test_dist_lamb.py | tests/test_optimizer/test_dist_lamb.py | import pytest
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.testing import assert_close
import colossalai
from colossalai.cluster import DistCoordinator, ProcessGroupMesh
from colossalai.logging import disable_existing_loggers
from colossalai.nn.optimizer import DistributedLamb, Lamb
from colossalai.tensor.d_tensor import get_shard_dim_1d, is_distributed_tensor
from colossalai.tensor.d_tensor.api import clear_layout_converter
from colossalai.testing import clear_cache_before_run, parameterize, rerun_if_address_is_in_use, spawn
from colossalai.testing.random import seed_all
from colossalai.zero import LowLevelZeroOptimizer
from tests.kit.model_zoo import model_zoo
from tests.test_optimizer._utils import check_optim_states, force_assign_grad, run_bert_test, setup_param_groups
_ALLOWED_P_G_TYPES = [
(torch.float, torch.float), # pure fp32
(torch.float, torch.bfloat16), # bfloat16 amp
]
_IN_DIM = 32
_HID_DIM = 128
_N_STEP = 3
_SEED = 1024
coordinator = None
Net, data_gen, *_ = next(iter(model_zoo.get_sub_registry("simple_mlp").values()))
TPNet, *_ = next(iter(model_zoo.get_sub_registry("simple_tp_mlp").values()))
def assert_distributed_close(tp_model, torch_model, rtol, atol, tp_group):
rank = dist.get_rank(tp_group)
tp_size = dist.get_world_size(tp_group)
for (name, p), torch_p in zip(tp_model.named_parameters(), torch_model.parameters()):
# if overflow, the weight won't be updated. so there will be no nan in p
assert not torch.isnan(p).any()
try:
if is_distributed_tensor(p):
split_dim = get_shard_dim_1d(p)
torch_p = torch_p.chunk(tp_size, dim=split_dim)[rank]
assert_close(p.float(), torch_p, rtol=rtol, atol=atol)
except AssertionError as e:
print(f"grad mismatch in {name}")
raise e
def set_dist_grad(
dist_module: nn.Module,
torch_model: nn.Module,
g_dtype: torch.dtype,
group: dist.ProcessGroup,
) -> None:
"""
Set grads chunks for Tensor Parallel or ZeRO DP.
We do not need a separate treatment for ZeRO,
as the LowLevelOptimizer takes care of reduce-scattering grads.
"""
rank = dist.get_rank(group)
world_size = dist.get_world_size(group)
for p, torch_p in zip(dist_module.parameters(), torch_model.parameters()):
if torch_p.grad is None:
# avoid inconsistent grad and param dtype error
force_assign_grad(torch_p, g_dtype)
else:
torch_p.grad += torch.randn_like(torch_p, device=torch_p.device, dtype=g_dtype)
if p.grad is None:
force_assign_grad(p, g_dtype)
if is_distributed_tensor(p):
split_dim = get_shard_dim_1d(p)
# Add grads only to the correctly split chunk
force_assign_grad(p, g_dtype, torch_p.grad.chunk(world_size, dim=split_dim)[rank])
# assert_close(p.grad, torch_p.grad.chunk(world_size, dim=split_dim)[rank])
else:
force_assign_grad(p, g_dtype, torch_p.grad)
@parameterize("p_g_dtype", _ALLOWED_P_G_TYPES)
@parameterize("bias_correction", [False, True])
@parameterize("tp_zero_size", [(1, 4), (4, 1), (2, 2)])
@clear_cache_before_run()
def run_dist_lamb_basic(
bias_correction: bool, p_g_dtype: tuple[torch.dtype, torch.dtype], tp_zero_size: tuple[int, int]
) -> None:
"""Test without forward"""
p_dtype, g_dtype = p_g_dtype
tp_size, zero_size = tp_zero_size
# Set distributed groups
rank = dist.get_rank()
clear_layout_converter() # Ensure correct sharding
proc_mesh = ProcessGroupMesh(tp_size, zero_size)
tp_group = proc_mesh.get_group_along_axis(0)
tp_rank = dist.get_rank(tp_group)
seed_all(_SEED) # Fix model init
torch_model = Net(in_dim=_IN_DIM, hid_dim=_HID_DIM, identity=True).to(rank)
tp_model = TPNet(torch_model.fc0, torch_model.fc1, torch_model.fc2, tp_group).to(rank)
# Ensure equal weight init
assert_close(
torch_model.fc1.weight[tp_rank * _HID_DIM // tp_size : (tp_rank + 1) * _HID_DIM // tp_size],
tp_model.fc1.weight,
)
assert_close(
torch_model.fc2.weight[:, tp_rank * _HID_DIM // tp_size : (tp_rank + 1) * _HID_DIM // tp_size],
tp_model.fc2.weight,
)
# Set up optimizers
lr = 1e-3
beta1, beta2 = 0.9, 0.999
eps = 1e-8
torch_optim = Lamb(
setup_param_groups(torch_model), lr=lr, betas=(beta1, beta2), eps=eps, bias_correction=bias_correction
)
optim = DistributedLamb(
setup_param_groups(tp_model),
lr=lr,
betas=(beta1, beta2),
eps=eps,
bias_correction=bias_correction,
)
optim.setup_distributed(tp_group)
rtol, atol = 8e-7, 8e-7
if p_dtype is torch.float16 or g_dtype is torch.float16:
rtol, atol = 1e-6, 1e-6
if p_dtype is torch.bfloat16 or g_dtype is torch.bfloat16:
rtol, atol = 2e-6, 2e-6
for i in range(_N_STEP):
seed_all(_SEED + i) # NOTE: having only one manual_seed above doesn't work?
set_dist_grad(tp_model, torch_model, g_dtype, tp_group)
torch_optim.step()
optim.step()
torch_optim.zero_grad()
optim.zero_grad()
try:
assert_distributed_close(tp_model, torch_model, rtol, atol, tp_group)
except Exception as e:
coordinator.print_on_master(
f"step {i + 1}: bias_correction: {bias_correction}, p_g_dtype: {p_g_dtype}, tp_zero_size: {tp_zero_size}"
)
raise e
@parameterize("p_g_dtype", _ALLOWED_P_G_TYPES)
@parameterize("bias_correction", [False, True])
@parameterize("tp_zero_size", [(2, 2), (4, 1), (1, 4)])
@clear_cache_before_run()
def run_dist_lamb_fwd_bwd(
bias_correction: bool, p_g_dtype: tuple[torch.dtype, torch.dtype], tp_zero_size: tuple[int, int]
) -> None:
p_dtype, g_dtype = p_g_dtype
tp_size, zero_size = tp_zero_size
# Set distributed groups
rank = dist.get_rank()
proc_mesh = ProcessGroupMesh(tp_size, zero_size)
tp_group = proc_mesh.get_group_along_axis(0)
dp_group = proc_mesh.get_group_along_axis(1)
tp_rank = dist.get_rank(tp_group)
seed_all(_SEED)
clear_layout_converter() # Ensure correct sharding
torch_model = Net(_IN_DIM, _HID_DIM).to(rank)
tp_model = TPNet(torch_model.fc0, torch_model.fc1, torch_model.fc2, tp_group).to(rank)
assert_close(
torch_model.fc1.weight[tp_rank * _HID_DIM // tp_size : (tp_rank + 1) * _HID_DIM // tp_size],
tp_model.fc1.weight,
)
assert_close(
torch_model.fc2.weight[:, tp_rank * _HID_DIM // tp_size : (tp_rank + 1) * _HID_DIM // tp_size],
tp_model.fc2.weight,
)
# Set up optimizers
lr = 1e-3
beta1, beta2 = 0.9, 0.999
eps = 1e-8
torch_optim = Lamb(
setup_param_groups(torch_model), lr=lr, betas=(beta1, beta2), eps=eps, bias_correction=bias_correction
)
optim = DistributedLamb(
setup_param_groups(tp_model),
lr=lr,
betas=(beta1, beta2),
eps=eps,
bias_correction=bias_correction,
)
# Setup distributed optimizer
if zero_size > 1:
optim = LowLevelZeroOptimizer(
optim,
overlap_communication=True,
initial_scale=128,
partition_grad=True,
dp_process_group=dp_group,
verbose=True,
)
shard_to_param = optim.master_to_working_param
optim.optim.setup_distributed(tp_group, dp_group, shard_to_param, is_zero=True)
else:
optim.setup_distributed(tp_group)
rtol, atol = 8e-7, 8e-7
if p_dtype is torch.float16 or g_dtype is torch.float16:
rtol, atol = 1e-6, 1e-6
if p_dtype is torch.bfloat16 or g_dtype is torch.bfloat16:
rtol, atol = 2e-6, 2e-6
seed_all(_SEED) # NOTE: having only one manual_seed above doesn't work?
x = data_gen()
x = x.cuda().to(dtype=p_dtype)
out_tp = tp_model(x)
out = torch_model(x)
try:
assert_close(out, out_tp, rtol=rtol, atol=atol)
except Exception as e:
coordinator.print_on_master(
f"bias_correction: {bias_correction}, p_g_dtype: {p_g_dtype}, tp_zero_size: {tp_zero_size}"
)
raise e
if zero_size > 1:
optim.backward(out_tp.sum())
out.sum().backward()
else:
out_tp.sum().backward()
out.sum().backward()
torch_optim.step()
optim.step()
torch_optim.zero_grad()
optim.zero_grad()
try:
assert_distributed_close(tp_model, torch_model, rtol, atol, tp_group)
check_optim_states(getattr(torch_optim, "optim", torch_optim), getattr(optim, "optim", optim))
except Exception as e:
coordinator.print_on_master(
f"bias_correction: {bias_correction}, p_g_dtype: {p_g_dtype}, tp_zero_size: {tp_zero_size}"
)
raise e
def check_dist_lamb(rank, world_size, port):
disable_existing_loggers()
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
global coordinator
coordinator = DistCoordinator()
run_dist_lamb_basic()
coordinator.print_on_master("Basic tests passed")
run_dist_lamb_fwd_bwd()
coordinator.print_on_master("Forward-backward tests passed")
run_bert_test(optim_class=Lamb, sharded_optim_class=Lamb)
print(f"rank {rank} tests passed :)")
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_dist_lamb():
spawn(check_dist_lamb, nprocs=4)
if __name__ == "__main__":
test_dist_lamb()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_optimizer/_utils.py | tests/test_optimizer/_utils.py | import torch
import torch.distributed as dist
import torch.nn as nn
from torch.testing import assert_close
import colossalai
from colossalai.shardformer.layer.utils import Randomizer
from colossalai.tensor.d_tensor import get_layout, get_sharding_spec, is_distributed_tensor
from colossalai.tensor.d_tensor.api import clear_layout_converter
from colossalai.tensor.d_tensor.sharding_spec import DimSpec
from colossalai.testing import parameterize, spawn
from tests.kit.model_zoo import model_zoo
from tests.test_shardformer.test_model._utils import (
build_model_from_hybrid_plugin,
check_weight,
run_forward_backward_with_hybrid_plugin,
unwrap_model,
)
def force_assign_grad(p, g_dtype, grad=None):
"""Bypass inconsistent grad and param dtype error when assigning grad"""
orig_p = p.data
p.data = torch.randn_like(p, device=orig_p.device, dtype=g_dtype) if grad == None else grad.clone().to(g_dtype)
p.grad = p.data
p.data = orig_p
def setup_param_groups(model: nn.Module) -> list:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": 0.1,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
return optimizer_grouped_parameters
# setup flatten param groups, sharding spec and shape; (For dist Adafactor and CAME)
def setup_flatten_param_groups_sharding_spec_shape(model: nn.Module) -> dict:
flatten_optimizer_grouped_parameters = []
sharding_spec = {} # {id(flatten param): get_layout(p).global_shape}
param_shape = {} # {id(flatten param): get_sharding_spec(p)}
for n, p in model.named_parameters():
# flatten_p = copy.deepcopy(p).flatten()
flatten_p = nn.Parameter(p.clone().flatten().requires_grad_(True))
flatten_optimizer_grouped_parameters.append(flatten_p)
if is_distributed_tensor(p):
sharding_spec[id(flatten_p)] = get_sharding_spec(p)
param_shape[id(flatten_p)] = get_layout(p).global_shape
else:
sharding_spec[id(flatten_p)] = None
param_shape[id(flatten_p)] = p.shape
return flatten_optimizer_grouped_parameters, sharding_spec, param_shape
def set_master_param_to_shard_param(master_param_list) -> dict:
master_param_to_shard_param = {id(p): p for p in master_param_list}
return master_param_to_shard_param
def set_dist_grad(
dist_module: nn.Module,
torch_model: nn.Module,
g_dtype: torch.dtype,
group: dist.ProcessGroup,
tp_spec: DimSpec,
) -> None:
"""
Set split grads for Tensor Parallel or ZeRO DP.
We do not need a separate treatment for ZeRO,
as the wrapper takes care of reduce-scattering grads.
"""
rank = dist.get_rank(group)
world_size = dist.get_world_size(group)
for p, torch_p in zip(dist_module.parameters(), torch_model.parameters()):
if torch_p.grad is None:
torch_p.grad = torch.zeros_like(torch_p)
is_distributed = hasattr(p, "dist_layout")
if is_distributed:
sharding = p.dist_layout.sharding_spec.sharding_sequence
split_dim = sharding.index(tp_spec)
shape = torch_p.split(world_size, dim=split_dim)[rank].shape
indices = torch.arange(shape[split_dim] * rank, shape[split_dim] * (rank + 1))
# Generate grads only for the correctly split chunk
torch_p.grad.index_add_(split_dim, indices, torch.randn(shape, device=torch_p.device, dtype=g_dtype))
else:
shape = torch_p.shape
torch_p.grad += torch.randn(shape, device=torch_p.device, dtype=g_dtype)
force_assign_grad(p, g_dtype, grad=torch_p.grad)
def check_optim_states(org_optim, sharded_optim):
for group in org_optim.param_groups:
for p in group["params"]:
sharded_state = sharded_optim.state[p]
state = org_optim.state[p]
for key in sharded_state:
assert_close(state[key], sharded_state[key], rtol=1e-5, atol=1e-5)
def check_bert_fwd_bwd(
model_fn, data_gen_fn, output_transform_fn, loss_fn, test_config, optim_class, sharded_optim_class
):
org_model, org_optimizer, sharded_model, sharded_optimizer, criterion, booster = build_model_from_hybrid_plugin(
model_fn, loss_fn, test_config, optim_class, sharded_optim_class
)
org_loss, org_output, sharded_loss, sharded_output = run_forward_backward_with_hybrid_plugin(
org_model, sharded_model, sharded_optimizer, data_gen_fn, output_transform_fn, criterion, booster
)
stage_manager = booster.plugin.stage_manager
tp_group = booster.plugin.tp_group
bert = unwrap_model(org_model, "BertModel", "bert")
sharded_bert = unwrap_model(sharded_model, "BertModel", "bert")
weight_layer_for_check = ["encoder.layer[0].output.dense", "encoder.layer[1].output.dense"]
# optimizer executes step
org_optimizer.step()
sharded_optimizer.step()
# check weights
if test_config["precision"] == "bf16":
atol, rtol = 5e-4, 1e-4
else:
atol, rtol = 5e-4, 5e-4
if stage_manager is None or stage_manager.is_first_stage(ignore_chunk=True):
check_weight(bert, sharded_bert, weight_layer_for_check, tp_group, atol=atol, rtol=rtol, dim=1)
# check optim states
check_optim_states(org_optimizer, sharded_optimizer.optim)
torch.cuda.empty_cache()
@parameterize(
"test_config",
[
{
"tp_size": 1,
"num_microbatches": 4,
"zero_stage": 2,
"precision": "bf16",
},
{
"tp_size": 2,
"num_microbatches": 4,
"zero_stage": 2,
"precision": "bf16",
},
{
"tp_size": 4,
"num_microbatches": 4,
"zero_stage": 2,
"precision": "bf16",
},
{
"tp_size": 1,
"num_microbatches": 4,
"zero_stage": 2,
"precision": "fp16",
},
{
"tp_size": 2,
"num_microbatches": 4,
"zero_stage": 2,
"precision": "fp16",
},
{
"tp_size": 4,
"num_microbatches": 4,
"zero_stage": 2,
"precision": "fp16",
},
{
"tp_size": 2,
"num_microbatches": 4,
"zero_stage": 1,
"precision": "bf16",
},
{
"tp_size": 2,
"num_microbatches": 4,
"zero_stage": 0,
"precision": "bf16",
},
],
)
def run_bert_test(test_config, optim_class, sharded_optim_class):
"""Only call this if you've initialized distributed backend and spawned processes"""
sub_model_zoo = model_zoo.get_sub_registry("transformers_bert")
test_config["use_lazy_init"] = False
test_config["pp_size"] = 1 # Do NOT test Pipeline Parallel
test_config["initial_scale"] = 2**15 # avoid overflow
target_models = [
"transformers_bert",
]
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
if name in target_models:
check_bert_fwd_bwd(
model_fn, data_gen_fn, output_transform_fn, loss_fn, test_config, optim_class, sharded_optim_class
)
clear_layout_converter()
Randomizer.reset_index()
torch.cuda.empty_cache()
def _run_bert_test(rank, world_size, port, optim_class, sharded_optim_class):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_bert_test(optim_class, sharded_optim_class)
def check_optim_on_bert(optim_class, sharded_optim_class):
spawn(_run_bert_test, 4, optim_class, sharded_optim_class)
def check_dist_optim_state(org_optimizer, sharded_optimizer):
torch.set_default_dtype(torch.bfloat16)
for group, tp_group in zip(org_optimizer.param_groups, sharded_optimizer.param_groups):
for p, tp in zip(group["params"], tp_group["params"]):
p_state = org_optimizer.state[p]
tp_state = sharded_optimizer.state[tp]
# TODO "exp_avg_sq_col", "exp_avg_sq_row", "exp_avg_sq"
for key in ["exp_avg_sq_row"]:
if key in tp_state.keys() and type(tp_state[key]) is torch.Tensor:
tp_is_dtensor = sharded_optimizer.param_is_dtensor_dict[id(tp)]
shard_spec = sharded_optimizer.shard_spec_dict[id(tp)]
use_zero = sharded_optimizer.use_zero
tp_optim_state = tp_state[key]
state = p_state[key]
dp_size, tp_size = (
sharded_optimizer.dp_size,
sharded_optimizer.tp_size,
)
# we start init model with first tensor parallel then zero;
# So, we gather model with first zero then tensor parallel
if tp_is_dtensor:
# col parallel
if shard_spec.sharding_sequence[0] == "R":
if use_zero:
# sq_row need gather alone dp group
# sq_col don't need gather alone dp group
if key == "exp_avg_sq_row":
state = state.chunk(dp_size, dim=-1)[dist.get_rank(sharded_optimizer.dp_group)]
# gather from tp group
# sq_row don need gather alone tp group
# sq_col need gather alone tp group
if key == "exp_avg_sq_col":
state = state.chunk(tp_size, dim=-1)[dist.get_rank(sharded_optimizer.tp_group)]
# row parallel
elif shard_spec.sharding_sequence[-1] == "R":
# TODO: this case may cause shape mismatch @duanjunwen
if use_zero and key == "exp_avg_sq_row" and state.shape[0] // tp_size % dp_size == 0:
# sq_row need gather alone dp group
# sq_col don't need gather alone dp group
state = state.chunk(dp_size, dim=-1)[dist.get_rank(sharded_optimizer.dp_group)]
# gather from tp group
# sq_row need gather alone tp group
if key == "exp_avg_sq_row":
state = state.chunk(tp_size, dim=-1)[dist.get_rank(sharded_optimizer.tp_group)]
# sq_col don't need gather alone dp group
if key == "exp_avg_sq_col":
pass
else:
return
else:
if use_zero:
# sq_row need gather alone dp group
if key == "exp_avg_sq_row":
# row residule; no gather
if state.shape[0] % dp_size != 0:
pass
else:
state = state.chunk(dp_size, dim=-1)[dist.get_rank(sharded_optimizer.dp_group)]
# sq_col don't need gather alone dp group
if key == "exp_avg_sq_col":
tp_optim_state = tp_optim_state.div_(dp_size)
# need a div;
if state.dtype != tp_optim_state.dtype:
tp_optim_state = tp_optim_state.type(state.dtype)
# TODO: some sharding checks are currently buggy, but the state values should match
# @duanjunwen
if state.shape != tp_optim_state.shape:
return
assert_close(state, tp_optim_state, atol=5e-4, rtol=1.6e-2)
def check_dist_param(org_model, sharded_model, weight_layer_for_check, atol, rtol):
for (org_name, org_param), (sharded_name, sharded_param) in zip(
org_model.named_parameters(), sharded_model.named_parameters()
):
if org_name in weight_layer_for_check:
assert_close(org_param, sharded_param, atol=atol, rtol=rtol)
def check_dist_grad(sharded_optimizer, org_model, sharded_model, weight_layer_for_check, atol, rtol):
for (org_name, org_param), (sharded_name, sharded_param) in zip(
org_model.named_parameters(), sharded_model.named_parameters()
):
if org_name in weight_layer_for_check:
org_grad = org_param.grad
group_id = dist.get_rank(sharded_optimizer.optim.dp_group)
dist_grad = sharded_optimizer.get_partitioned_gradients_by_param_id(group_id, id(sharded_param))
# dist_grad concat then reshape to org_grad shape
if dist_grad:
dist_grad = torch.cat([t for t in dist_grad], 0).view(org_grad.shape)
assert_close(org_grad, dist_grad, atol=atol, rtol=rtol)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_optimizer/test_dist_adafactor.py | tests/test_optimizer/test_dist_adafactor.py | import pytest
import torch
import torch.distributed as dist
from torch import nn
from torch.testing import assert_close
import colossalai
from colossalai.cluster import ProcessGroupMesh
from colossalai.logging import disable_existing_loggers
from colossalai.nn.optimizer.adafactor import Adafactor
from colossalai.nn.optimizer.distributed_adafactor import DistributedAdaFactor
from colossalai.shardformer.layer import Linear1D_Col, Linear1D_Row
from colossalai.shardformer.layer.utils import Randomizer
from colossalai.tensor.d_tensor import (
distribute_tensor,
get_device_mesh,
get_sharding_spec,
is_distributed_tensor,
shard_colwise,
shard_rowwise,
)
from colossalai.tensor.d_tensor.api import clear_layout_converter
from colossalai.tensor.d_tensor.sharding_spec import DimSpec
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
from colossalai.utils import set_seed
from colossalai.zero import LowLevelZeroOptimizer
from tests.kit.model_zoo import model_zoo
from tests.test_optimizer._utils import (
check_dist_optim_state,
check_dist_param,
check_optim_states,
set_master_param_to_shard_param,
setup_param_groups,
)
from tests.test_shardformer.test_model._utils import (
build_model_from_hybrid_plugin,
build_model_from_low_level_zero_plugin,
check_weight,
run_forward_backward_with_hybrid_plugin,
run_forward_backward_with_low_level_zero_plugin,
unwrap_model,
)
IN_DIM = 4
HID_DIM = 4
_TP_SPEC = DimSpec([0])
Net, data_gen, *_ = next(iter(model_zoo.get_sub_registry("simple_mlp").values()))
TPNet, *_ = next(iter(model_zoo.get_sub_registry("simple_tp_mlp").values()))
def correctness_verify(tensor1: torch.Tensor, tensor2: torch.Tensor, dtype: torch.dtype = torch.float32):
rtol = None
atol = None
if dtype is torch.float32:
rtol = 5e-04
atol = 5e-04
elif dtype is torch.float16:
rtol = 5e-2
atol = 5e-4
elif dtype is torch.bfloat16:
rtol = 4e-3
atol = 4e-3
assert_close(tensor1, tensor2, rtol=rtol, atol=atol)
class MlpModel(nn.Module):
def __init__(self):
super(MlpModel, self).__init__()
self.linear1 = nn.Linear(IN_DIM, HID_DIM)
self.linear2 = nn.Linear(HID_DIM, IN_DIM)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return x
class TPModel(nn.Module):
def __init__(self, linear1, linear2, tp_group=None):
super().__init__()
self.linear1 = Linear1D_Col.from_native_module(
linear1, process_group=tp_group, gather_output=False, overlap=True
)
self.linear2 = Linear1D_Row.from_native_module(linear2, process_group=tp_group, parallel_input=True)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return x
@parameterize("dtype", [torch.float32, torch.float16, torch.bfloat16]) # torch.float32, torch.float16, torch.bfloat16
@parameterize("tp_zero_size", [(4, 1)])
def exam_dist_adafactor_base(dtype: torch.dtype, tp_zero_size: tuple[int, int]):
tp_size, zero_size = tp_zero_size
local_rank = dist.get_rank()
use_zero = True if zero_size > 1 else False
proc_mesh = ProcessGroupMesh(tp_size, zero_size)
tp_group, dp_group = proc_mesh.get_group_along_axis(0), proc_mesh.get_group_along_axis(1)
torch.set_default_dtype(dtype)
set_seed(42)
# ==============================
# Base Case
# ==============================
H, W = IN_DIM, HID_DIM
model_col = nn.Linear(H, W).to(local_rank) # Col parallel weight
weight, bias = model_col.weight, model_col.bias
# ==============================
# Col Parallel
# ==============================
weight_col_shard = shard_colwise(weight.clone(), tp_group)
weight_col_shard_shard_spec = get_sharding_spec(weight_col_shard) # Shard spec
weight_col_shard_flatten = nn.Parameter(weight_col_shard.clone().flatten().requires_grad_(True))
bias_col_flatten = nn.Parameter(bias.clone().flatten().requires_grad_(True))
# ==============================
# Row Parallel
# ==============================
weight_row_shard = shard_rowwise(weight.clone(), tp_group)
weight_row_shard_shard_spec = get_sharding_spec(weight_row_shard) # Shard spec
weight_row_shard_flatten = nn.Parameter(
weight_row_shard.clone().flatten().requires_grad_(True)
) # flatten input(not dtensor) to optimizer
bias_row_flatten = nn.Parameter(bias.clone().flatten().requires_grad_(True))
# ==============================
# Init Optimizer
# ==============================
# base
optimizer_base = Adafactor([weight, bias])
cp_dist_optim = DistributedAdaFactor([weight_col_shard_flatten, bias_col_flatten])
rp_dist_optim = DistributedAdaFactor([weight_row_shard_flatten, bias_row_flatten])
shard_to_param_cp = set_master_param_to_shard_param([weight_col_shard_flatten, bias_col_flatten])
cp_dist_optim.setup_distributed(
tp_group=tp_group,
dp_group=dp_group,
shard_to_working_param=shard_to_param_cp,
use_zero=use_zero,
)
shard_to_param_rp = set_master_param_to_shard_param([weight_row_shard_flatten, bias_row_flatten])
rp_dist_optim.setup_distributed(
tp_group=tp_group,
dp_group=dp_group,
shard_to_working_param=shard_to_param_rp,
use_zero=use_zero,
)
N_STEPS = 1
for _ in range(N_STEPS):
# base step
optimizer_base.zero_grad()
weight.grad = torch.rand_like(weight)
bias.grad = torch.rand_like(bias)
optimizer_base.step()
# col parallel step
cp_dist_optim.zero_grad()
weight_col_shard_flatten.grad = (
distribute_tensor(weight.grad, get_device_mesh(weight_col_shard), weight_col_shard_shard_spec)
.clone()
.flatten()
)
bias_col_flatten.grad = bias.grad.clone().flatten()
cp_dist_optim.step()
# row parallel step
rp_dist_optim.zero_grad()
weight_row_shard_flatten.grad = (
distribute_tensor(weight.grad, get_device_mesh(weight_row_shard), weight_row_shard_shard_spec)
.clone()
.flatten()
)
bias_row_flatten.grad = bias.grad.clone().flatten()
rp_dist_optim.step()
weight_row_chunk = weight.t().reshape(-1, W).chunk(tp_size, dim=-1)[dist.get_rank(tp_group)].flatten()
weight_col_chunk = weight.reshape(-1, H).chunk(tp_size, dim=-1)[dist.get_rank(tp_group)].flatten()
# verify
correctness_verify(weight_col_chunk, weight_col_shard_flatten, dtype)
correctness_verify(weight_row_chunk, weight_row_shard_flatten, dtype)
print(f"Base Test Passed")
@parameterize("dtype", [torch.float16]) # torch.float32, torch.float16, torch.bfloat16
@parameterize("tp_zero_size", [(1, 4)]) # (2, 2), (4, 1), (1, 4)
def exam_dist_adafactor_zero(dtype: torch.dtype, tp_zero_size: tuple[int, int]):
tp_size, zero_size = tp_zero_size
use_zero = True if zero_size > 1 else False
local_rank = dist.get_rank()
clear_layout_converter()
proc_mesh = ProcessGroupMesh(tp_size, zero_size)
tp_group, dp_group = proc_mesh.get_group_along_axis(0), proc_mesh.get_group_along_axis(1)
torch.set_default_dtype(dtype)
set_seed(42)
# ==============================
# Model Init
# ==============================
# base_model = MlpModel().to(local_rank)
# tp_model = TPModel(copy.deepcopy(base_model.linear1), copy.deepcopy(base_model.linear2), tp_group).to(local_rank)
base_model = Net(in_dim=IN_DIM, hid_dim=HID_DIM, dtype=dtype).to(local_rank)
# Must specify dtype; TPNet init seem to run out of set_default_dtype scope
tp_model = TPNet(fc1=base_model.fc1, fc2=base_model.fc2, tp_group=tp_group, dtype=dtype)
base_param_group = setup_param_groups(base_model)
tp_param_group = setup_param_groups(tp_model)
# tp_param_group_, tp_shard_spec, tp_param_shape = setup_flatten_param_groups_sharding_spec_shape(tp_model)
# ==============================
# Optimizer Init
# ==============================
base_optim = Adafactor(base_param_group)
dist_optim = DistributedAdaFactor(tp_param_group)
# Setup distributed optimizer
if zero_size > 1:
base_optim = LowLevelZeroOptimizer(
base_optim,
overlap_communication=True,
initial_scale=128,
partition_grad=True,
dp_process_group=dp_group,
verbose=True,
)
dist_optim = LowLevelZeroOptimizer(
dist_optim,
overlap_communication=True,
initial_scale=128,
partition_grad=True,
dp_process_group=dp_group,
verbose=True,
)
shard_to_param = dist_optim.master_to_working_param # {id(): param tensor} but flattened
dist_optim.optim.setup_distributed(
tp_group=tp_group,
dp_group=dp_group,
shard_to_working_param=shard_to_param,
use_zero=use_zero,
)
else:
shard_to_param = set_master_param_to_shard_param(tp_param_group)
dist_optim.setup_distributed(
tp_group=tp_group,
dp_group=dp_group,
shard_to_working_param=shard_to_param,
use_zero=use_zero,
)
# ==============================
# Correctness Verify
# ==============================
x = torch.randn(IN_DIM, HID_DIM, device=local_rank)
out = base_model(x)
out_tp = tp_model(x)
if zero_size > 1:
dist_optim.backward(out_tp.sum())
base_optim.backward(out.sum())
else:
out_tp.sum().backward()
out.sum().backward()
base_optim.step()
dist_optim.step()
base_optim.zero_grad()
dist_optim.zero_grad()
base_params = base_model.parameters()
tp_params = tp_model.parameters()
for p, tp_p in zip(base_params, tp_params):
param_is_distributed = is_distributed_tensor(tp_p)
if param_is_distributed:
shard_spec = get_sharding_spec(tp_p)
if len(shard_spec.sharding_sequence) >= 2:
# Col Parallel
if shard_spec.sharding_sequence[0] == "R":
p = p.chunk(tp_size, dim=-1)[dist.get_rank(tp_group)]
# ROW Parallel
if shard_spec.sharding_sequence[-1] == "R":
p = p.chunk(tp_size, dim=0)[dist.get_rank(tp_group)]
else:
# TP bias
p = p.chunk(tp_size, dim=-1)[dist.get_rank(tp_group)]
correctness_verify(p, tp_p, dtype)
clear_layout_converter()
Randomizer.reset_index()
torch.cuda.empty_cache()
print(f"Zero Test Passed")
@parameterize(
"test_config",
[
{
"stage": 1,
"precision": "bf16",
},
{
"stage": 2,
"precision": "bf16",
},
],
)
def exam_bert_test_on_lowlevelzero_plugin(test_config):
sub_model_zoo = model_zoo.get_sub_registry("transformers_bert")
model_list = [
"transformers_bert",
]
clear_layout_converter()
torch.set_default_dtype(torch.bfloat16)
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
if name in model_list:
(
org_model,
org_optimizer,
sharded_model,
sharded_optimizer,
criterion,
booster,
) = build_model_from_low_level_zero_plugin(model_fn, loss_fn, test_config, Adafactor, Adafactor)
org_loss, org_output, sharded_loss, sharded_output = run_forward_backward_with_low_level_zero_plugin(
org_model, sharded_model, sharded_optimizer, data_gen_fn, output_transform_fn, criterion, booster
)
# LowLevelZero not need warp
# bert = unwrap_model(org_model, "BertModel", "bert")
# sharded_bert = unwrap_model(sharded_model, "BertModel", "bert")
weight_layer_for_check = [
"bert.encoder.layer.0.output.dense.weight",
"bert.encoder.layer.0.output.dense.weight",
]
org_optimizer.step()
sharded_optimizer.step()
# check weights
if test_config["precision"] == "bf16":
atol, rtol = 5e-4, 5e-4
else:
atol, rtol = 5e-4, 5e-4
check_dist_param(org_model, sharded_model, weight_layer_for_check, atol, rtol)
check_optim_states(org_optimizer, sharded_optimizer.optim)
Randomizer.reset_index()
torch.cuda.empty_cache()
print(f"Bert Model Zoo Test Passed")
@parameterize(
"test_config",
[
{
"tp_size": 1,
"num_microbatches": 4,
"zero_stage": 2,
"precision": "bf16",
},
{
"tp_size": 2,
"num_microbatches": 4,
"zero_stage": 2,
"precision": "bf16",
},
{
"tp_size": 4,
"num_microbatches": 4,
"zero_stage": 2,
"precision": "bf16",
},
{
"tp_size": 2,
"num_microbatches": 4,
"zero_stage": 1,
"precision": "bf16",
},
# @duanjunwen TODO: fix this test case. Currently params are sharded but are not dtensor here, throwing an error.
# Probably due to HybridParallelAMPOptimizer replacing some master params ?
# {
# "tp_size": 4,
# "num_microbatches": 4,
# "zero_stage": 0,
# "precision": "bf16",
# },
],
)
def exam_bert_test_on_hybrid_plugin(test_config):
sub_model_zoo = model_zoo.get_sub_registry("transformers_bert")
test_config["use_lazy_init"] = False
test_config["pp_size"] = 1 # Do NOT test Pipeline Parallel
test_config["initial_scale"] = 2**16 # avoid overflow
model_list = [
"transformers_bert",
]
clear_layout_converter()
torch.set_default_dtype(torch.bfloat16)
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
if name in model_list:
(
org_model,
org_optimizer,
sharded_model,
sharded_optimizer,
criterion,
booster,
) = build_model_from_hybrid_plugin(model_fn, loss_fn, test_config, Adafactor, DistributedAdaFactor)
org_loss, org_output, sharded_loss, sharded_output = run_forward_backward_with_hybrid_plugin(
org_model, sharded_model, sharded_optimizer, data_gen_fn, output_transform_fn, criterion, booster
)
stage_manager = booster.plugin.stage_manager
tp_group = booster.plugin.tp_group
bert = unwrap_model(org_model, "BertModel", "bert")
sharded_bert = unwrap_model(sharded_model, "BertModel", "bert")
weight_layer_for_check = ["encoder.layer[0].output.dense", "encoder.layer[1].output.dense"]
org_optimizer.step()
sharded_optimizer.step()
# check weights
if test_config["precision"] == "bf16":
atol, rtol = 5e-4, 5e-4
else:
atol, rtol = 5e-4, 5e-4
if stage_manager is None or stage_manager.is_first_stage(ignore_chunk=True):
check_weight(bert, sharded_bert, weight_layer_for_check, tp_group, atol=atol, rtol=rtol, dim=1)
# check optim states
check_dist_optim_state(org_optimizer, sharded_optimizer.optim)
clear_layout_converter()
Randomizer.reset_index()
torch.cuda.empty_cache()
print(f"Bert Model Zoo Test Passed")
def run_dist(rank, world_size, port):
disable_existing_loggers()
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
exam_dist_adafactor_base()
exam_dist_adafactor_zero()
exam_bert_test_on_lowlevelzero_plugin()
exam_bert_test_on_hybrid_plugin()
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_dist_adafactor():
spawn(run_dist, nprocs=4)
if __name__ == "__main__":
test_dist_adafactor()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_analyzer/__init__.py | tests/test_analyzer/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_analyzer/test_fx/test_symbolic_profile.py | tests/test_analyzer/test_fx/test_symbolic_profile.py | import pytest
import torch
from packaging import version
from colossalai.testing.utils import clear_cache_before_run, parameterize
from tests.test_analyzer.test_fx.zoo import tm_models, tmm_models
try:
from colossalai._analyzer._subclasses import MetaTensorMode
from colossalai._analyzer.fx import symbolic_profile, symbolic_trace
except:
pass
def _check_gm_validity(gm: torch.fx.GraphModule):
for node in gm.graph.nodes:
assert len(node.meta["info"].global_ctx), f"In {gm.__class__.__name__}, {node} has empty global context."
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@clear_cache_before_run()
@parameterize("m", tm_models)
def test_torchvision_profile(m, verbose=False, bias_addition_split=False):
with MetaTensorMode():
model = m()
data = torch.rand(8, 3, 224, 224)
meta_args = {
"x": data,
}
gm = symbolic_trace(model, meta_args=meta_args, bias_addition_split=bias_addition_split)
symbolic_profile(gm, data, verbose=verbose)
_check_gm_validity(gm)
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@clear_cache_before_run()
@parameterize("m", tmm_models)
def test_timm_profile(m, verbose=False, bias_addition_split=False):
with MetaTensorMode():
model = m()
data = torch.rand(8, 3, 224, 224)
meta_args = {
"x": data,
}
gm = symbolic_trace(model, meta_args=meta_args, bias_addition_split=bias_addition_split)
symbolic_profile(gm, data, verbose=verbose)
_check_gm_validity(gm)
if __name__ == "__main__":
test_torchvision_profile()
test_timm_profile()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_analyzer/test_fx/test_nested_ckpt.py | tests/test_analyzer/test_fx/test_nested_ckpt.py | import pytest
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from colossalai.testing import clear_cache_before_run
try:
from colossalai._analyzer.fx import symbolic_trace
except:
pass
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Linear(10, 10)
self.b = nn.Linear(10, 10)
self.c = nn.Linear(10, 10)
self.d = nn.Linear(10, 10)
self.e = nn.Linear(10, 10)
def checkpoint_0(self, x):
return checkpoint(self.checkpoint_0_0, x) + checkpoint(self.checkpoint_0_1, x) + self.e(x)
def checkpoint_0_0(self, x):
return checkpoint(self.checkpoint_0_0_0, x) + checkpoint(self.checkpoint_0_0_1, x)
def checkpoint_0_0_0(self, x):
return self.a(x) + checkpoint(self.checkpoint_0_0_0_0, x, use_reentrant=False)
def checkpoint_0_0_0_0(self, x):
return self.b(x)
def checkpoint_0_0_1(self, x):
return self.b(x) + self.c(x)
def checkpoint_0_1(self, x):
return self.d(x)
def forward(self, x):
return checkpoint(self.checkpoint_0, x)
@pytest.mark.skipif(torch.__version__ < "1.12.0", reason="torch version < 12")
@clear_cache_before_run()
def test_nested_ckpt():
model = MyModule()
x = torch.rand(10, 10)
gm = symbolic_trace(model, meta_args={"x": x}, trace_act_ckpt=True)
assert torch.allclose(gm(x), model(x)), "The traced model should generate the same output as the original model."
for ckpt_def in filter(lambda s: s.startswith("checkpoint"), dir(model)):
assert ckpt_def in gm.code, f"Checkpoint {ckpt_def} should be in the traced code.\n Traced code = {gm.code}"
if __name__ == "__main__":
test_nested_ckpt()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_analyzer/test_fx/test_mod_dir.py | tests/test_analyzer/test_fx/test_mod_dir.py | import pytest
import torch
from colossalai.testing import clear_cache_before_run, parameterize
try:
from colossalai._analyzer.fx import symbolic_trace
except:
pass
class LinearModel(torch.nn.Module):
def __init__(self, in_features, out_features, bias):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features, bias=bias)
def forward(self, x):
x = self.linear(x)
return x
class ConvModel(torch.nn.Module):
def __init__(self, in_channel, out_channels, kernel_size, bias) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channel, out_channels, kernel_size, bias=bias, padding=1, stride=2, dilation=2, groups=3
)
self.conv_transpose = torch.nn.ConvTranspose2d(
out_channels, out_channels, kernel_size, bias=bias, padding=1, stride=2, dilation=2, groups=3
)
def forward(self, x):
x = self.conv(x)
x = self.conv_transpose(x)
return x
class AModel(torch.nn.Module):
def __init__(self, bias) -> None:
super().__init__()
self.linear_1 = LinearModel(3, 3, bias)
self.linear_2 = LinearModel(3, 3, bias)
self.conv = ConvModel(3, 6, 3, bias)
def forward(self, x):
for i in range(x.shape[0]):
x = self.linear_1(x)
x = self.linear_2(x)
x = self.conv(x)
return x
@pytest.mark.skipif(torch.__version__ < "1.12.0", reason="torch version < 12")
@clear_cache_before_run()
@parameterize("bias", [True, False])
@parameterize("bias_addition_split", [True, False])
@parameterize("shape", [(3, 3, 3), (3, 3, 3, 3)])
def test_mod_dir(bias, bias_addition_split, shape):
model = AModel(bias=bias)
x = torch.rand(shape)
gm = symbolic_trace(model, meta_args={"x": x}, bias_addition_split=bias_addition_split)
for node in gm.graph.nodes:
assert len(node.meta["info"].mod_dir), f"{node} should have non-trivial ``mod_dir``."
print(node, node.meta["info"].mod_dir)
if __name__ == "__main__":
test_mod_dir(bias=True, bias_addition_split=True, shape=(3, 3, 3))
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_analyzer/test_fx/test_bias_addition.py | tests/test_analyzer/test_fx/test_bias_addition.py | import pytest
import torch
from packaging import version
from torch.utils.checkpoint import checkpoint
from colossalai.testing.utils import clear_cache_before_run, parameterize
try:
from colossalai._analyzer.fx import symbolic_trace
except:
pass
class LinearModel(torch.nn.Module):
def __init__(self, in_features, out_features, bias):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features, bias=bias)
def forward(self, x):
x = self.linear(x)
return x
class ConvModel(torch.nn.Module):
def __init__(self, in_channel, out_channels, kernel_size, bias) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channel, out_channels, kernel_size, bias=bias, padding=1, stride=2, dilation=2, groups=3
)
self.conv_transpose = torch.nn.ConvTranspose2d(
in_channel, out_channels, kernel_size, bias=bias, padding=1, stride=2, dilation=2, groups=3
)
def forward(self, x, select=0):
if select == 0:
x = self.conv(x)
else:
x = self.conv_transpose(x)
return x
class SiuModel(torch.nn.Module):
def __init__(self, bias) -> None:
super().__init__()
self.linear = LinearModel(3, 3, bias)
self.conv = ConvModel(3, 6, 3, bias)
def forward(self, x, select=torch.Tensor([0])):
x = self.linear(x)
if select:
x = checkpoint(self.conv, x, 0)
else:
x = checkpoint(self.conv, x, 1)
return x
class AddmmModel(torch.nn.Module):
def __init__(self, alpha, beta) -> None:
super().__init__()
self.alpha = alpha
self.beta = beta
def forward(self, x):
x = torch.addmm(x, x, x, alpha=self.alpha, beta=self.beta)
return x
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@clear_cache_before_run()
@parameterize("bias", [True, False])
@parameterize("bias_addition_split", [True, False])
@parameterize("shape", [(3, 3, 3), (3, 3, 3, 3)])
@parameterize("select", [torch.Tensor([0]), torch.Tensor([1])])
def test_siu_model(bias, bias_addition_split, shape, select):
model = SiuModel(bias=bias)
x = torch.rand(shape)
gm = symbolic_trace(
model,
meta_args={"x": x},
concrete_args={"select": select},
trace_act_ckpt=True,
bias_addition_split=bias_addition_split,
)
assert torch.allclose(model(x, select), gm(x)), "original model and traced model should be the same!"
if bias and bias_addition_split:
assert "+" in gm.code, "bias addition should be split!"
else:
assert "+" not in gm.code, "bias addition should not be split!"
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@parameterize("alpha", [1, 2])
@parameterize("beta", [1, 2])
@parameterize("bias_addition_split", [True, False])
@parameterize("shape", [(3, 3), (5, 5)])
def test_addmm_model(alpha, beta, bias_addition_split, shape):
model = AddmmModel(alpha=alpha, beta=beta)
x = torch.rand(shape)
gm = symbolic_trace(model, meta_args={"x": x}, trace_act_ckpt=True, bias_addition_split=bias_addition_split)
assert torch.allclose(model(x), gm(x)), "original model and traced model should be the same!"
if (alpha == 1 and beta == 1) or not bias_addition_split:
assert "*" not in gm.code, "bias addition should not be split!"
elif bias_addition_split:
assert "+" in gm.code, "bias addition should be split!"
if __name__ == "__main__":
test_siu_model()
test_addmm_model()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_analyzer/test_fx/zoo.py | tests/test_analyzer/test_fx/zoo.py | import timm.models as tmm
import torchvision.models as tm
# input shape: (batch_size, 3, 224, 224)
tm_models = [
tm.alexnet,
tm.convnext_base,
tm.densenet121,
# tm.efficientnet_v2_s,
# tm.googlenet, # output bad case
# tm.inception_v3, # bad case
tm.mobilenet_v2,
tm.mobilenet_v3_small,
tm.mnasnet0_5,
tm.resnet18,
tm.regnet_x_16gf,
tm.resnext50_32x4d,
tm.shufflenet_v2_x0_5,
tm.squeezenet1_0,
# tm.swin_s, # fx bad case
tm.vgg11,
tm.vit_b_16,
tm.wide_resnet50_2,
]
tmm_models = [
tmm.beit_base_patch16_224,
tmm.beitv2_base_patch16_224,
tmm.cait_s24_224,
tmm.coat_lite_mini,
tmm.convit_base,
tmm.deit3_base_patch16_224,
tmm.dm_nfnet_f0,
tmm.eca_nfnet_l0,
tmm.efficientformer_l1,
# tmm.ese_vovnet19b_dw,
tmm.gmixer_12_224,
tmm.gmlp_b16_224,
# tmm.hardcorenas_a,
tmm.hrnet_w18_small,
tmm.inception_v3,
tmm.mixer_b16_224,
tmm.nf_ecaresnet101,
tmm.nf_regnet_b0,
# tmm.pit_b_224, # pretrained only
# tmm.regnetv_040,
# tmm.skresnet18,
# tmm.swin_base_patch4_window7_224, # fx bad case
# tmm.tnt_b_patch16_224, # bad case
tmm.vgg11,
tmm.vit_base_patch16_18x2_224,
tmm.wide_resnet50_2,
]
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_analyzer/test_fx/test_shape_prop.py | tests/test_analyzer/test_fx/test_shape_prop.py | import pytest
import torch
from packaging import version
from colossalai.testing.utils import clear_cache_before_run, parameterize
from tests.test_analyzer.test_fx.zoo import tm_models, tmm_models
try:
from colossalai._analyzer._subclasses import MetaTensorMode
from colossalai._analyzer.fx import symbolic_trace
from colossalai._analyzer.fx.passes.shape_prop import shape_prop_pass
from colossalai._analyzer.fx.symbolic_profile import register_shape_impl
@register_shape_impl(torch.nn.functional.linear)
def linear_impl(*args, **kwargs):
assert True
return torch.nn.functional.linear(*args, **kwargs)
except:
pass
def _check_gm_validity(gm: torch.fx.GraphModule):
for node in gm.graph.nodes:
assert node.meta["info"].outputs, f"In {gm.__class__.__name__}, {node} has no output shape."
if node.op in [
"call_module", # can apply to params
"call_function", # can apply to params
"call_method", # can apply to params
]:
assert hasattr(node.meta["info"], "inputs"), f"In {gm.__class__.__name__}, {node} has no input shape."
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@clear_cache_before_run()
@parameterize("m", tm_models)
def test_torchvision_shape_prop(m):
with MetaTensorMode():
model = m()
data = torch.rand(100, 3, 224, 224)
meta_args = {
"x": data,
}
gm = symbolic_trace(model, meta_args=meta_args)
shape_prop_pass(gm, data)
_check_gm_validity(gm)
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@clear_cache_before_run()
@parameterize("m", tmm_models)
def test_timm_shape_prop(m):
with MetaTensorMode():
model = m()
data = torch.rand(100, 3, 224, 224)
meta_args = {
"x": data,
}
gm = symbolic_trace(model, meta_args=meta_args)
shape_prop_pass(gm, data)
_check_gm_validity(gm)
if __name__ == "__main__":
test_torchvision_shape_prop()
test_timm_shape_prop()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_analyzer/test_fx/__init__.py | tests/test_analyzer/test_fx/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_analyzer/test_subclasses/test_meta_mode.py | tests/test_analyzer/test_subclasses/test_meta_mode.py | import pytest
import torch
import torchvision.models as tm
from packaging import version
from colossalai.testing import clear_cache_before_run, parameterize
try:
from colossalai._analyzer._subclasses import MetaTensorMode
except:
pass
from tests.test_analyzer.test_fx.zoo import tm_models, tmm_models
def compare_all(tensor: torch.Tensor, meta_tensor: torch.Tensor):
assert (
tensor.shape == meta_tensor.shape
), f"the shape of tensor ({tensor.shape}) and meta tensor ({meta_tensor.shape}) does not match."
assert (
tensor.dtype == meta_tensor.dtype
), f"the dtype of tensor ({tensor.dtype}) and meta tensor ({meta_tensor.dtype}) does not match."
assert (
tensor.stride() == meta_tensor.stride()
), f"the stride of tensor ({tensor.stride()}) and meta tensor ({meta_tensor.stride()}) does not match."
def run_and_compare(model):
x = torch.rand(2, 3, 224, 224, requires_grad=True)
x_out = model(x)
with MetaTensorMode():
meta_x = torch.rand(2, 3, 224, 224, requires_grad=True)
meta_out = model(meta_x)
compare_all(x_out, meta_out)
x_out.sum().backward()
meta_out.sum().backward()
compare_all(x.grad, meta_x.grad)
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@clear_cache_before_run()
@parameterize("m", tm_models + tmm_models)
def test_meta_mode_shape(m):
run_and_compare(m())
if __name__ == "__main__":
test_meta_mode_shape(tm.resnet18)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_analyzer/test_subclasses/test_aten.py | tests/test_analyzer/test_subclasses/test_aten.py | from typing import Any, Callable, Union
import pytest
import torch
import torch.nn as nn
from colossalai.testing import clear_cache_before_run
try:
from colossalai._analyzer._subclasses import MetaTensor
except:
pass
aten = torch.ops.aten
registered_meta = {
("aten.convolution.default", True): [ # (aten ops, requires_backward)
(nn.Conv1d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2), torch.rand(2, 3, 4)),
(nn.Conv2d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2), torch.rand(2, 3, 4, 4)),
(nn.Conv3d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2), torch.rand(2, 3, 4, 4, 4)),
(nn.ConvTranspose1d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2), torch.rand(2, 3, 4)),
(
nn.ConvTranspose2d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2),
torch.rand(2, 3, 4, 4),
),
(
nn.ConvTranspose3d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2),
torch.rand(2, 3, 4, 4, 4),
),
],
("aten.native_batch_norm.default", True): [
(nn.BatchNorm1d(4), torch.rand(2, 4)),
(nn.BatchNorm2d(4), torch.rand(1, 4, 4, 4)),
(nn.BatchNorm3d(4), torch.rand(1, 4, 4, 4, 4)),
],
("aten.native_layer_norm.default", True): [
(nn.LayerNorm(4), torch.rand(1, 2, 3, 4)),
],
("aten.avg_pool1d.default", True): [
(nn.MaxPool1d(3, stride=2), torch.rand(4, 5, 5)),
(nn.AvgPool1d(3, stride=2), torch.rand(4, 5, 5)),
(nn.AdaptiveMaxPool1d(3), torch.rand(4, 5, 5)),
(nn.AdaptiveAvgPool1d(3), torch.rand(4, 5, 5)),
],
("aten.avg_pool2d.default", True): [
(nn.MaxPool2d((3, 2), stride=(2, 1)), torch.rand(2, 4, 5, 5)),
(nn.AvgPool2d((3, 2), stride=(2, 1)), torch.rand(2, 4, 5, 5)),
(nn.AdaptiveMaxPool2d((3, 2)), torch.rand(2, 4, 5, 5)),
(nn.AdaptiveAvgPool2d((3, 2)), torch.rand(2, 4, 5, 5)),
],
("aten.relu.default", True): [
(nn.ReLU(), torch.rand(4, 3, 1, 2)),
(nn.LeakyReLU(), torch.rand(4, 3, 1, 2)),
(nn.SiLU(), torch.rand(4, 3, 1, 2)),
(nn.GELU(), torch.rand(4, 3, 1, 2)),
(nn.ELU(), torch.rand(4, 3, 1, 2)),
(nn.Sigmoid(), torch.rand(4, 3, 1, 2)),
(nn.Tanh(), torch.rand(4, 3, 1, 2)),
(nn.Hardswish(), torch.rand(4, 3, 1, 2)),
],
}
def compare_all(tensor: torch.Tensor, meta_tensor: torch.Tensor) -> Any:
assert (
tensor.shape == meta_tensor.shape
), f"the shape of tensor ({tensor.shape}) and meta tensor ({meta_tensor.shape}) does not match."
assert (
tensor.dtype == meta_tensor.dtype
), f"the dtype of tensor ({tensor.dtype}) and meta tensor ({meta_tensor.dtype}) does not match."
assert (
tensor.stride() == meta_tensor.stride()
), f"the stride of tensor ({tensor.stride()}) and meta tensor ({meta_tensor.stride()}) does not match."
def run_and_compare(f: Union[nn.Module, Callable], x: torch.Tensor, requires_backward=False) -> Any:
x.requires_grad = requires_backward
meta_x = MetaTensor(x)
x_out, meta_out = f(x), f(meta_x)
compare_all(x_out, meta_out)
if requires_backward:
x_out.sum().backward()
meta_out.sum().backward()
compare_all(x.grad, meta_x.grad)
@pytest.mark.skipif(torch.__version__ < "1.12.0", reason="torch version < 12")
@clear_cache_before_run()
def test_meta_aten():
for (aten_op, requires_backward), v in registered_meta.items():
for f, x in v:
run_and_compare(f, x, requires_backward)
if __name__ == "__main__":
test_meta_aten()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_analyzer/test_subclasses/test_flop_tensor.py | tests/test_analyzer/test_subclasses/test_flop_tensor.py | import pytest
import torch
import torch.nn.functional as F
import torchvision.models as tm
from packaging import version
from tests.test_analyzer.test_fx.zoo import tm_models, tmm_models
try:
from colossalai._analyzer._subclasses import MetaTensorMode, flop_count
except:
pass
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@pytest.mark.parametrize("m", tm_models + tmm_models)
def test_flop_count_module(m):
x = torch.rand(2, 3, 224, 224)
with MetaTensorMode(): # save time for testing
module = m()
rs_fwd, rs_bwd = flop_count(module, x, verbose=True)
assert rs_fwd > 0, f"fwd flop count of {m.__name__} is {rs_fwd}"
assert rs_bwd > 0, f"bwd flop count of {m.__name__} is {rs_bwd}"
odd_cases = [
(F.relu, (torch.rand(2, 3, 224, 224, requires_grad=True),), {"inplace": True}),
(
F.max_pool2d,
(torch.rand(2, 3, 224, 224, requires_grad=True),),
{"kernel_size": 3, "stride": 2, "padding": 1, "dilation": 2},
),
(
torch.where,
(
torch.rand(2, 3, 224, 224) > 0.5,
torch.rand(2, 3, 224, 224, requires_grad=True),
torch.rand(2, 3, 224, 224, requires_grad=True),
),
{},
),
]
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse("1.12.0"), reason="torch version < 12")
@pytest.mark.parametrize("func, args, kwargs", odd_cases)
def test_flop_count_function(func, args, kwargs):
rs_fwd, rs_bwd = flop_count(func, *args, **kwargs, verbose=True)
assert rs_fwd > 0, f"fwd flop count of {func.__name__} is {rs_fwd}"
assert rs_bwd > 0, f"bwd flop count of {func.__name__} is {rs_bwd}"
if __name__ == "__main__":
test_flop_count_module(tm.resnet18)
test_flop_count_function(F.relu, (torch.rand(2, 3, 224, 224, requires_grad=True),), {"inplace": True})
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_analyzer/test_subclasses/__init__.py | tests/test_analyzer/test_subclasses/__init__.py | python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false | |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_cluster/test_device_mesh_manager.py | tests/test_cluster/test_device_mesh_manager.py | from colossalai.cluster.device_mesh_manager import DeviceMeshInfo, DeviceMeshManager
from colossalai.initialize import launch
from colossalai.logging import disable_existing_loggers
from colossalai.testing import rerun_if_address_is_in_use, spawn
def check_device_mesh_manager(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
device_mesh_manager = DeviceMeshManager()
# TODO(ver217): this test is strictly relies on hardware, temporary skip it
# device_mesh_info_auto = DeviceMeshInfo(physical_ids=[0, 1, 2, 3],)
# device_mesh_auto = device_mesh_manager.create_device_mesh('0', device_mesh_info_auto)
# assert device_mesh_auto.shape == (2, 2)
# assert device_mesh_auto._logical_mesh_id.tolist() == [[0, 1], [2, 3]]
device_mesh_info_with_shape = DeviceMeshInfo(
physical_ids=[0, 1, 2, 3],
mesh_shape=(2, 2),
)
device_mesh_with_shape = device_mesh_manager.create_device_mesh("1", device_mesh_info_with_shape)
assert device_mesh_with_shape.shape == (2, 2)
assert device_mesh_with_shape._logical_mesh_id.tolist() == [[0, 1], [2, 3]]
@rerun_if_address_is_in_use()
def test_device_mesh_manager():
spawn(check_device_mesh_manager, 4)
if __name__ == "__main__":
test_device_mesh_manager()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_cluster/test_process_group_mesh.py | tests/test_cluster/test_process_group_mesh.py | import pytest
import torch.distributed as dist
import colossalai
from colossalai.cluster import ProcessGroupMesh
from colossalai.testing import spawn
def check_process_group_mesh_with_cases():
DP_DIM, PP_DIM, TP_DIM = 0, 1, 2
DP_SIZE, PP_SIZE, TP_SIZE = 1, 2, 2
RANK_TO_COORDINATE = {
0: (0, 0, 0),
1: (0, 0, 1),
2: (0, 1, 0),
3: (0, 1, 1),
}
TP_RANKS_IN_GROUP = {
0: [0, 1],
1: [0, 1],
2: [2, 3],
3: [2, 3],
}
PP_RANKS_IN_GROUP = {
0: [0, 2],
1: [1, 3],
2: [0, 2],
3: [1, 3],
}
DP_RANKS_IN_GROUP = {
0: [0],
1: [1],
2: [2],
3: [3],
}
TPxPP_RANKS_IN_GROUP = {
0: [0, 1, 2, 3],
1: [0, 1, 2, 3],
2: [0, 1, 2, 3],
3: [0, 1, 2, 3],
}
DPxTP_RANKS_IN_GROUP = {
0: [0, 1],
1: [0, 1],
2: [2, 3],
3: [2, 3],
}
TPxPP_PARTIAL_INDICES = {
0: [[0, 1], [0]],
1: [[1], [0, 1]],
2: [[0], [0, 1]],
3: [[0, 1], [1]],
}
TPxPP_RANKS_IN_GROUP_PARTIAL = {
0: [0, 1],
1: [1, 3],
2: [0, 2],
3: [2, 3],
}
pg_mesh = ProcessGroupMesh(DP_SIZE, PP_SIZE, TP_SIZE)
rank = dist.get_rank()
assert rank == pg_mesh.rank
# check world size
assert pg_mesh.size(TP_DIM) == 2
assert pg_mesh.size(PP_DIM) == 2
assert pg_mesh.size(DP_DIM) == 1
# check coordinate
assert pg_mesh.coordinate(TP_DIM) == RANK_TO_COORDINATE[rank][TP_DIM]
assert pg_mesh.coordinate(PP_DIM) == RANK_TO_COORDINATE[rank][PP_DIM]
assert pg_mesh.coordinate(DP_DIM) == RANK_TO_COORDINATE[rank][DP_DIM]
# check ranks in group
tp_group = pg_mesh.get_group_along_axis(TP_DIM)
assert pg_mesh.get_ranks_in_group(tp_group) == TP_RANKS_IN_GROUP[rank]
pp_group = pg_mesh.get_group_along_axis(PP_DIM)
assert pg_mesh.get_ranks_in_group(pp_group) == PP_RANKS_IN_GROUP[rank]
dp_group = pg_mesh.get_group_along_axis(DP_DIM)
assert pg_mesh.get_ranks_in_group(dp_group) == DP_RANKS_IN_GROUP[rank]
dpxtp_group = pg_mesh.create_group_along_axis([DP_DIM, TP_DIM])
assert pg_mesh.get_ranks_in_group(dpxtp_group) == DPxTP_RANKS_IN_GROUP[rank]
tpxpp_group = pg_mesh.create_group_along_axis([TP_DIM, PP_DIM])
assert pg_mesh.get_ranks_in_group(tpxpp_group) == TPxPP_RANKS_IN_GROUP[rank]
tpxpp_group_partial = pg_mesh.create_group_along_axis([TP_DIM, PP_DIM], TPxPP_PARTIAL_INDICES[rank])
assert pg_mesh.get_ranks_in_group(tpxpp_group_partial) == TPxPP_RANKS_IN_GROUP_PARTIAL[rank]
# check prev rank
if RANK_TO_COORDINATE[rank][TP_DIM] != 0:
prev_coord = (
RANK_TO_COORDINATE[rank][:TP_DIM]
+ (RANK_TO_COORDINATE[rank][TP_DIM] - 1,)
+ RANK_TO_COORDINATE[rank][TP_DIM + 1 :]
)
prev_rank = TP_RANKS_IN_GROUP[rank][TP_RANKS_IN_GROUP[rank].index(rank) - 1]
assert pg_mesh.ravel(prev_coord, pg_mesh.shape) == prev_rank
if RANK_TO_COORDINATE[rank][PP_DIM] != 0:
prev_coord = (
RANK_TO_COORDINATE[rank][:PP_DIM]
+ (RANK_TO_COORDINATE[rank][PP_DIM] - 1,)
+ RANK_TO_COORDINATE[rank][PP_DIM + 1 :]
)
prev_rank = PP_RANKS_IN_GROUP[rank][PP_RANKS_IN_GROUP[rank].index(rank) - 1]
assert pg_mesh.ravel(prev_coord, pg_mesh.shape) == prev_rank
# check next rank
if RANK_TO_COORDINATE[rank][TP_DIM] != TP_SIZE - 1:
next_coord = (
RANK_TO_COORDINATE[rank][:TP_DIM]
+ (RANK_TO_COORDINATE[rank][TP_DIM] + 1,)
+ RANK_TO_COORDINATE[rank][TP_DIM + 1 :]
)
next_rank = TP_RANKS_IN_GROUP[rank][TP_RANKS_IN_GROUP[rank].index(rank) + 1]
assert pg_mesh.ravel(next_coord, pg_mesh.shape) == next_rank
if RANK_TO_COORDINATE[rank][PP_DIM] != PP_SIZE - 1:
next_coord = (
RANK_TO_COORDINATE[rank][:PP_DIM]
+ (RANK_TO_COORDINATE[rank][PP_DIM] + 1,)
+ RANK_TO_COORDINATE[rank][PP_DIM + 1 :]
)
next_rank = PP_RANKS_IN_GROUP[rank][PP_RANKS_IN_GROUP[rank].index(rank) + 1]
assert pg_mesh.ravel(next_coord, pg_mesh.shape) == next_rank
def run_dist(rank, world_size, port):
colossalai.launch(
rank=rank,
world_size=world_size,
port=port,
host="localhost",
)
check_process_group_mesh_with_cases()
@pytest.mark.dist
def test_process_group_mesh():
spawn(run_dist, 4)
if __name__ == "__main__":
test_process_group_mesh()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_lora/test_lora.py | tests/test_lora/test_lora.py | import copy
import os
from itertools import product
import torch
from peft import LoraConfig
from torch import distributed as dist
from torch.optim import AdamW
import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin import HybridParallelPlugin, LowLevelZeroPlugin, TorchDDPPlugin
from colossalai.booster.plugin.hybrid_parallel_plugin import HybridParallelModule
from colossalai.testing import check_state_dict_equal, clear_cache_before_run, rerun_if_address_is_in_use, spawn
from tests.kit.model_zoo import model_zoo
from tests.test_checkpoint_io.utils import shared_tempdir
@clear_cache_before_run()
def check_fwd_bwd(model_fn, data_gen_fn, output_transform_fn, loss_fn, task_type):
model = model_fn()
lora_config = LoraConfig(task_type=task_type, r=8, lora_alpha=32, lora_dropout=0.1)
test_plugins = [TorchDDPPlugin(), LowLevelZeroPlugin(), HybridParallelPlugin(tp_size=1, pp_size=1)]
test_configs = [
{
"lora_config": lora_config,
"quantize": False,
},
{
"lora_config": lora_config,
"quantize": True,
},
]
for plugin, test_config in product(test_plugins, test_configs):
# checkpoint loaded model
model_save = model_fn()
model_load = copy.deepcopy(model_save)
optimizer = AdamW(model.parameters(), lr=0.001)
criterion = loss_fn
booster = Booster(plugin=plugin)
model_save = booster.enable_lora(model_save, **test_config)
model_save, optimizer, criterion, _, _ = booster.boost(model_save, optimizer, criterion)
with shared_tempdir() as tempdir:
lora_ckpt_path = os.path.join(tempdir, "ckpt")
booster.save_lora_as_pretrained(model_save, lora_ckpt_path)
dist.barrier()
# The Lora checkpoint should be small in size
checkpoint_size_mb = os.path.getsize(os.path.join(lora_ckpt_path, "adapter_model.bin")) / (1024 * 1024)
assert checkpoint_size_mb < 1
model_load = booster.enable_lora(model_load, pretrained_dir=lora_ckpt_path, **test_config)
model_load, _, _, _, _ = booster.boost(model_load)
check_state_dict_equal(model_save.state_dict(), model_load.state_dict())
# test fwd bwd correctness
test_model = model_load
if isinstance(model_load, HybridParallelModule):
model_load = model_load.module.module
model_copy = copy.deepcopy(model_load)
data = data_gen_fn()
data = {
k: v.to("cuda") if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v for k, v in data.items()
}
output = test_model(**data)
output = output_transform_fn(output)
loss = criterion(output)
booster.backward(loss, optimizer)
optimizer.clip_grad_by_norm(1.0)
optimizer.step()
for (n1, p1), (n2, p2) in zip(test_model.named_parameters(), model_copy.named_parameters()):
if "lora_" in n1:
# lora modules require gradients, thus updated
assert p1.requires_grad
assert not torch.testing.assert_close(p1.to(p2.device).to(p2.dtype), p2, atol=5e-3, rtol=5e-3)
else:
if not p1.requires_grad:
torch.testing.assert_close(p1.to(p2.device).to(p2.dtype), p2, atol=5e-3, rtol=5e-3)
def run_lora_test():
sub_model_zoo = model_zoo.get_sub_registry("transformers_llama")
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
task_type = None
if name == "transformers_llama_for_causal_lm":
task_type = "CAUSAL_LM"
if name == "transformers_llama_for_sequence_classification":
task_type = "SEQ_CLS"
check_fwd_bwd(model_fn, data_gen_fn, output_transform_fn, loss_fn, task_type)
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
run_lora_test()
@rerun_if_address_is_in_use()
def test_torch_ddp_lora():
spawn(run_dist, 2)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_pipeline/test_stage_manager.py | tests/test_pipeline/test_stage_manager.py | import pytest
import torch.distributed as dist
import colossalai
from colossalai.cluster import ProcessGroupMesh
from colossalai.pipeline.stage_manager import PipelineStageManager
from colossalai.testing import rerun_if_address_is_in_use, spawn
def check_stage_manager():
DP_DIM, PP_DIM = 0, 1
DP_SIZE, PP_SIZE = 2, 2
RANK_TO_COORDINATE = {
0: (0, 0),
1: (0, 1),
2: (1, 0),
3: (1, 1),
}
PP_RANKS_IN_GROUP = {
0: [0, 1],
1: [0, 1],
2: [2, 3],
3: [2, 3],
}
pg_mesh = ProcessGroupMesh(DP_SIZE, PP_SIZE)
stage_manager = PipelineStageManager(pg_mesh, PP_DIM)
rank = dist.get_rank()
# check stage info
assert stage_manager.num_stages == PP_SIZE
assert stage_manager.stage == RANK_TO_COORDINATE[rank][PP_DIM]
# check is_first_stage
ranks_in_group = PP_RANKS_IN_GROUP[rank]
is_first_stage = ranks_in_group.index(rank) == 0
assert stage_manager.is_first_stage() == is_first_stage
# check is_last_stage
is_last_stage = ranks_in_group.index(rank) == len(ranks_in_group) - 1
assert stage_manager.is_last_stage() == is_last_stage
# check prev rank
if not is_first_stage:
prev_rank = ranks_in_group[ranks_in_group.index(rank) - 1]
assert stage_manager.get_prev_rank() == prev_rank
# check next rank
if not is_last_stage:
next_rank = ranks_in_group[ranks_in_group.index(rank) + 1]
assert stage_manager.get_next_rank() == next_rank
# check p2p groups
for prev, cur in zip(ranks_in_group[:-1], ranks_in_group[1:]):
if rank in [prev, cur]:
group = stage_manager.get_p2p_process_group()
dist.barrier(group=group)
# check stage groups
pg_mesh = ProcessGroupMesh(4)
stage_manager = PipelineStageManager(pg_mesh, 0)
group = stage_manager.init_process_group_by_stages([0, 2])
if rank in [0, 2]:
dist.barrier(group=group)
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost")
check_stage_manager()
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_pipeline_stage_manager():
spawn(run_dist, 4)
if __name__ == "__main__":
test_pipeline_stage_manager()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_pipeline/test_p2p_communication.py | tests/test_pipeline/test_p2p_communication.py | import pytest
import torch
import torch.distributed as dist
import colossalai
from colossalai.accelerator import get_accelerator
from colossalai.cluster import ProcessGroupMesh
from colossalai.pipeline.p2p import PipelineP2PCommunication, create_send_metadata
from colossalai.pipeline.stage_manager import PipelineStageManager
from colossalai.testing import rerun_if_address_is_in_use, spawn
WORLD_SIZE = 2
def check_p2p_communication():
pg_mesh = ProcessGroupMesh(WORLD_SIZE)
stage_manager = PipelineStageManager(pg_mesh, 0)
p2p = PipelineP2PCommunication(stage_manager, overlap_p2p=False)
rank = dist.get_rank()
tensor = torch.ones(1, device=get_accelerator().get_current_device())
data = [
"tensor",
tensor,
[tensor],
{"tensor": tensor},
]
if rank == 0:
for obj in data:
p2p.send_forward(obj)
for i in range(len(data)):
recv_obj, _ = p2p.send_forward_recv_backward(data[i], send_first=False)
assert recv_obj == data[-(i + 1)]
elif rank == 1:
for obj in data:
recv_obj, _ = p2p.recv_forward()
assert recv_obj == obj
for i in range(len(data)):
p2p.send_backward(data[-(i + 1)])
recv_obj, _ = p2p.recv_forward()
assert recv_obj == data[i]
if rank == 1:
for obj in data:
p2p.send_backward(obj)
for i in range(len(data)):
recv_obj, _ = p2p.send_backward_recv_forward(data[i], send_first=True)
assert recv_obj == data[-(i + 1)]
elif rank == 0:
for obj in data:
recv_obj, _ = p2p.recv_backward()
assert recv_obj == obj
for i in range(len(data)):
recv_obj, _ = p2p.send_forward_recv_backward(data[-(i + 1)], send_first=False)
assert recv_obj == data[i]
if rank == 0:
recv_obj, _ = p2p.send_forward_recv_backward(
tensor,
send_metadata=False,
metadata_recv=create_send_metadata(tensor),
)
assert recv_obj == tensor
elif rank == 1:
recv_obj, _ = p2p.recv_forward(metadata_recv=create_send_metadata(tensor))
assert recv_obj == tensor
p2p.send_backward(tensor, send_metadata=False)
def run_dist(rank, world_size, port):
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost")
check_p2p_communication()
@pytest.mark.dist
@rerun_if_address_is_in_use()
def test_pipeline_p2p():
spawn(run_dist, WORLD_SIZE)
if __name__ == "__main__":
test_pipeline_p2p()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_pipeline/test_schedule/test_zerobubble_pp.py | tests/test_pipeline/test_schedule/test_zerobubble_pp.py | from contextlib import nullcontext
from copy import deepcopy
from functools import partial
from typing import Tuple
import pytest
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.testing import assert_close
from transformers.models.llama.configuration_llama import LlamaConfig
from transformers.models.llama.modeling_llama import LlamaModel
from transformers.models.mixtral.configuration_mixtral import MixtralConfig
from transformers.models.mixtral.modeling_mixtral import MixtralModel
import colossalai
from colossalai.booster.booster import Booster
from colossalai.booster.plugin.moe_hybrid_parallel_plugin import HybridParallelPlugin, MoeHybridParallelPlugin
from colossalai.cluster import ProcessGroupMesh
from colossalai.interface import OptimizerWrapper
from colossalai.logging import disable_existing_loggers
from colossalai.pipeline.schedule.v_schedule import PipelineGraph, ScheduledNode
from colossalai.pipeline.schedule.zero_bubble_pp import ZeroBubbleVPipeScheduler
from colossalai.pipeline.stage_manager import PipelineStageManager
from colossalai.shardformer.layer.utils import Randomizer
from colossalai.tensor.d_tensor.api import clear_layout_converter
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
from colossalai.testing.random import seed_all
from tests.test_moe.moe_utils import assert_loose_close
NUM_BATCH = 8
NUM_TOK_PER_BATCH, NUM_EXPERTS = 4, 4
NUM_LAYERS = 8
HIDDEN_SIZE_PER_HEAD = 4
NUM_HEADS = 4
TOP_K = 1
class MlpModel(nn.Module):
def __init__(
self,
in_dim,
out_dim,
num_layers,
stage_index=None,
stage_mgr: PipelineStageManager = None,
):
super().__init__()
self.layers = nn.Sequential(*[nn.Linear(in_dim, out_dim, bias=None) for _ in range(num_layers)])
def forward(
self,
data: torch.Tensor = None,
hidden_states: torch.Tensor = None,
stage_index=None,
stage_mgr: PipelineStageManager = None,
model_chunk_id: int = None,
):
if stage_mgr is None:
hidden_states = data
for layer in self.layers:
hidden_states = layer(hidden_states)
return hidden_states
else:
# Set not used layer to None
held_layers = self.layers[stage_index[0] : stage_index[1]]
# fwd end
if stage_mgr.is_first_stage() and stage_mgr.model_chunk_id == 1:
return held_layers(hidden_states)
# fwd start
elif stage_mgr.is_first_stage() and stage_mgr.model_chunk_id == 0:
return {"hidden_states": held_layers(data)}
# fwd middle
else:
return {"hidden_states": held_layers(hidden_states)}
def no_sync(self):
return nullcontext()
def assert_optim_param_groups(optim_base_param_groups, optim_pp_param_groups):
for (key_base, val_base), (key_pp, val_pp) in zip(optim_base_param_groups.items(), optim_pp_param_groups.items()):
if key_base == key_pp:
if key_base != "params":
assert val_base == val_pp
def get_model_numel(model: torch.nn.Module) -> Tuple[int, int]:
num_params = 0
num_params_trainable = 0
for p in model.parameters():
num_params += p.numel()
if p.requires_grad:
num_params_trainable += p.numel()
return num_params, num_params_trainable
# 1) Test manual v_schedule with multiple microbatch
@parameterize(
"test_config",
[
{
"batch_size": 8,
"tp_size": 1,
"pp_size": 4,
"num_microbatches": 4,
"zero_stage": 1,
"precision": "bf16",
"num_model_chunk": 2,
},
],
)
def run_fwd_bwd_iter_input(test_config):
# init dist
rank = dist.get_rank()
pp_size = test_config["pp_size"]
pg_mesh = ProcessGroupMesh(pp_size)
num_microbatch = test_config["num_microbatches"]
num_model_chunk = test_config["num_model_chunk"]
# stage_manager
stage_manager = PipelineStageManager(
pg_mesh, pipeline_axis=0, enable_interleave=True, num_model_chunks=num_model_chunk
)
# schedule list
zbv_schedule = [
# stage 0
[
# microbatch 0
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=0, minibatch=0),
ScheduledNode(type="F", chunk=0, stage=0, minibatch=0),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=0, minibatch=0),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=0, minibatch=0),
ScheduledNode(type="F", chunk=1, stage=0, minibatch=0),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=0, minibatch=0),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=0, minibatch=0),
ScheduledNode(type="B", chunk=1, stage=0, minibatch=0),
ScheduledNode(type="W", chunk=1, stage=0, minibatch=0),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=0, minibatch=0),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=0, minibatch=0),
ScheduledNode(type="B", chunk=0, stage=0, minibatch=0),
ScheduledNode(type="W", chunk=0, stage=0, minibatch=0),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=0, minibatch=0),
# microbatch 1
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=0, minibatch=1),
ScheduledNode(type="F", chunk=0, stage=0, minibatch=1),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=0, minibatch=1),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=0, minibatch=1),
ScheduledNode(type="F", chunk=1, stage=0, minibatch=1),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=0, minibatch=1),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=0, minibatch=1),
ScheduledNode(type="B", chunk=1, stage=0, minibatch=1),
ScheduledNode(type="W", chunk=1, stage=0, minibatch=1),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=0, minibatch=1),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=0, minibatch=1),
ScheduledNode(type="B", chunk=0, stage=0, minibatch=1),
ScheduledNode(type="W", chunk=0, stage=0, minibatch=1),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=0, minibatch=1),
# microbatch 2
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=0, minibatch=2),
ScheduledNode(type="F", chunk=0, stage=0, minibatch=2),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=0, minibatch=2),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=0, minibatch=2),
ScheduledNode(type="F", chunk=1, stage=0, minibatch=2),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=0, minibatch=2),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=0, minibatch=2),
ScheduledNode(type="B", chunk=1, stage=0, minibatch=2),
ScheduledNode(type="W", chunk=1, stage=0, minibatch=2),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=0, minibatch=2),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=0, minibatch=2),
ScheduledNode(type="B", chunk=0, stage=0, minibatch=2),
ScheduledNode(type="W", chunk=0, stage=0, minibatch=2),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=0, minibatch=2),
# microbatch 3
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=0, minibatch=3),
ScheduledNode(type="F", chunk=0, stage=0, minibatch=3),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=0, minibatch=3),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=0, minibatch=3),
ScheduledNode(type="F", chunk=1, stage=0, minibatch=3),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=0, minibatch=3),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=0, minibatch=3),
ScheduledNode(type="B", chunk=1, stage=0, minibatch=3),
ScheduledNode(type="W", chunk=1, stage=0, minibatch=3),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=0, minibatch=3),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=0, minibatch=3),
ScheduledNode(type="B", chunk=0, stage=0, minibatch=3),
ScheduledNode(type="W", chunk=0, stage=0, minibatch=3),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=0, minibatch=3),
],
# stage 1
[
# microbatch 0
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=1, minibatch=0),
ScheduledNode(type="F", chunk=0, stage=1, minibatch=0),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=1, minibatch=0),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=1, minibatch=0),
ScheduledNode(type="F", chunk=1, stage=1, minibatch=0),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=1, minibatch=0),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=1, minibatch=0),
ScheduledNode(type="B", chunk=1, stage=1, minibatch=0),
ScheduledNode(type="W", chunk=1, stage=1, minibatch=0),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=1, minibatch=0),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=1, minibatch=0),
ScheduledNode(type="B", chunk=0, stage=1, minibatch=0),
ScheduledNode(type="W", chunk=0, stage=1, minibatch=0),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=0, minibatch=0),
# microbatch 1
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=1, minibatch=1),
ScheduledNode(type="F", chunk=0, stage=1, minibatch=1),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=1, minibatch=1),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=1, minibatch=1),
ScheduledNode(type="F", chunk=1, stage=1, minibatch=1),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=1, minibatch=1),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=1, minibatch=1),
ScheduledNode(type="B", chunk=1, stage=1, minibatch=1),
ScheduledNode(type="W", chunk=1, stage=1, minibatch=1),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=1, minibatch=1),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=1, minibatch=1),
ScheduledNode(type="B", chunk=0, stage=1, minibatch=1),
ScheduledNode(type="W", chunk=0, stage=1, minibatch=1),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=0, minibatch=1),
# microbatch 2
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=1, minibatch=2),
ScheduledNode(type="F", chunk=0, stage=1, minibatch=2),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=1, minibatch=2),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=1, minibatch=2),
ScheduledNode(type="F", chunk=1, stage=1, minibatch=2),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=1, minibatch=2),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=1, minibatch=2),
ScheduledNode(type="B", chunk=1, stage=1, minibatch=2),
ScheduledNode(type="W", chunk=1, stage=1, minibatch=2),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=1, minibatch=2),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=1, minibatch=2),
ScheduledNode(type="B", chunk=0, stage=1, minibatch=2),
ScheduledNode(type="W", chunk=0, stage=1, minibatch=2),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=0, minibatch=2),
# microbatch 3
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=1, minibatch=3),
ScheduledNode(type="F", chunk=0, stage=1, minibatch=3),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=1, minibatch=3),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=1, minibatch=3),
ScheduledNode(type="F", chunk=1, stage=1, minibatch=3),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=1, minibatch=3),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=1, minibatch=3),
ScheduledNode(type="B", chunk=1, stage=1, minibatch=3),
ScheduledNode(type="W", chunk=1, stage=1, minibatch=3),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=1, minibatch=3),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=1, minibatch=3),
ScheduledNode(type="B", chunk=0, stage=1, minibatch=3),
ScheduledNode(type="W", chunk=0, stage=1, minibatch=3),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=0, minibatch=3),
],
# stage 2
[
# microbatch 0
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=2, minibatch=0),
ScheduledNode(type="F", chunk=0, stage=2, minibatch=0),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=2, minibatch=0),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=2, minibatch=0),
ScheduledNode(type="F", chunk=1, stage=2, minibatch=0),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=2, minibatch=0),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=2, minibatch=0),
ScheduledNode(type="B", chunk=1, stage=2, minibatch=0),
ScheduledNode(type="W", chunk=1, stage=2, minibatch=0),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=2, minibatch=0),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=2, minibatch=0),
ScheduledNode(type="B", chunk=0, stage=2, minibatch=0),
ScheduledNode(type="W", chunk=0, stage=2, minibatch=0),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=2, minibatch=0),
# microbatch 1
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=2, minibatch=1),
ScheduledNode(type="F", chunk=0, stage=2, minibatch=1),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=2, minibatch=1),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=2, minibatch=1),
ScheduledNode(type="F", chunk=1, stage=2, minibatch=1),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=2, minibatch=1),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=2, minibatch=1),
ScheduledNode(type="B", chunk=1, stage=2, minibatch=1),
ScheduledNode(type="W", chunk=1, stage=2, minibatch=1),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=2, minibatch=1),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=2, minibatch=1),
ScheduledNode(type="B", chunk=0, stage=2, minibatch=1),
ScheduledNode(type="W", chunk=0, stage=2, minibatch=1),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=2, minibatch=1),
# microbatch 2
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=2, minibatch=2),
ScheduledNode(type="F", chunk=0, stage=2, minibatch=2),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=2, minibatch=2),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=2, minibatch=2),
ScheduledNode(type="F", chunk=1, stage=2, minibatch=2),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=2, minibatch=2),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=2, minibatch=2),
ScheduledNode(type="B", chunk=1, stage=2, minibatch=2),
ScheduledNode(type="W", chunk=1, stage=2, minibatch=2),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=2, minibatch=2),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=2, minibatch=2),
ScheduledNode(type="B", chunk=0, stage=2, minibatch=2),
ScheduledNode(type="W", chunk=0, stage=2, minibatch=2),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=2, minibatch=2),
# microbatch 3
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=2, minibatch=3),
ScheduledNode(type="F", chunk=0, stage=2, minibatch=3),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=2, minibatch=3),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=2, minibatch=3),
ScheduledNode(type="F", chunk=1, stage=2, minibatch=3),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=2, minibatch=3),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=2, minibatch=3),
ScheduledNode(type="B", chunk=1, stage=2, minibatch=3),
ScheduledNode(type="W", chunk=1, stage=2, minibatch=3),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=2, minibatch=3),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=2, minibatch=3),
ScheduledNode(type="B", chunk=0, stage=2, minibatch=3),
ScheduledNode(type="W", chunk=0, stage=2, minibatch=3),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=2, minibatch=3),
],
# stage 3
[
# microbatch 0
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=3, minibatch=0),
ScheduledNode(type="F", chunk=0, stage=3, minibatch=0),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=3, minibatch=0),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=3, minibatch=0),
ScheduledNode(type="F", chunk=1, stage=3, minibatch=0),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=3, minibatch=0),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=3, minibatch=0),
ScheduledNode(type="B", chunk=1, stage=3, minibatch=0),
ScheduledNode(type="W", chunk=1, stage=3, minibatch=0),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=3, minibatch=0),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=3, minibatch=0),
ScheduledNode(type="B", chunk=0, stage=3, minibatch=0),
ScheduledNode(type="W", chunk=0, stage=3, minibatch=0),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=3, minibatch=0),
# microbatch 1
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=3, minibatch=1),
ScheduledNode(type="F", chunk=0, stage=3, minibatch=1),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=3, minibatch=1),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=3, minibatch=1),
ScheduledNode(type="F", chunk=1, stage=3, minibatch=1),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=3, minibatch=1),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=3, minibatch=1),
ScheduledNode(type="B", chunk=1, stage=3, minibatch=1),
ScheduledNode(type="W", chunk=1, stage=3, minibatch=1),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=3, minibatch=1),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=3, minibatch=1),
ScheduledNode(type="B", chunk=0, stage=3, minibatch=1),
ScheduledNode(type="W", chunk=0, stage=3, minibatch=1),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=3, minibatch=1),
# microbatch 2
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=3, minibatch=2),
ScheduledNode(type="F", chunk=0, stage=3, minibatch=2),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=3, minibatch=2),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=3, minibatch=2),
ScheduledNode(type="F", chunk=1, stage=3, minibatch=2),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=3, minibatch=2),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=3, minibatch=2),
ScheduledNode(type="B", chunk=1, stage=3, minibatch=2),
ScheduledNode(type="W", chunk=1, stage=3, minibatch=2),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=3, minibatch=2),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=3, minibatch=2),
ScheduledNode(type="B", chunk=0, stage=3, minibatch=2),
ScheduledNode(type="W", chunk=0, stage=3, minibatch=2),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=3, minibatch=2),
# microbatch 3
# chunk 0 fwd
ScheduledNode(type="RECV_FORWARD", chunk=0, stage=3, minibatch=3),
ScheduledNode(type="F", chunk=0, stage=3, minibatch=3),
ScheduledNode(type="SEND_FORWARD", chunk=0, stage=3, minibatch=3),
# chunk 1 fwd
ScheduledNode(type="RECV_FORWARD", chunk=1, stage=3, minibatch=3),
ScheduledNode(type="F", chunk=1, stage=3, minibatch=3),
ScheduledNode(type="SEND_FORWARD", chunk=1, stage=3, minibatch=3),
# chunk 1 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=1, stage=3, minibatch=3),
ScheduledNode(type="B", chunk=1, stage=3, minibatch=3),
ScheduledNode(type="W", chunk=1, stage=3, minibatch=3),
ScheduledNode(type="SEND_BACKWARD", chunk=1, stage=3, minibatch=3),
# chunk 0 bwd
ScheduledNode(type="RECV_BACKWARD", chunk=0, stage=3, minibatch=3),
ScheduledNode(type="B", chunk=0, stage=3, minibatch=3),
ScheduledNode(type="W", chunk=0, stage=3, minibatch=3),
ScheduledNode(type="SEND_BACKWARD", chunk=0, stage=3, minibatch=3),
],
]
scheduler = ZeroBubbleVPipeScheduler(
schedule=zbv_schedule, # hint: send whole schedule or local schedule only ?
stage_manager=stage_manager,
num_model_chunks=pp_size,
num_microbatch=num_microbatch,
overlap_p2p=False,
)
# loss func
def criterion(x, *args, **kwargs):
return (x * x).mean()
# init model and input
batch_size = 4
num_layers = 8
in_dim = out_dim = 8
print(f"Before init Model: {torch.cuda.memory_allocated()/1024**3 :.3f} GB on device {stage_manager.get_rank()};")
model = MlpModel(in_dim=in_dim, out_dim=out_dim, num_layers=num_layers).to(rank)
data_iter = [torch.rand(batch_size, in_dim, out_dim, requires_grad=True).to(rank)]
input_base = [t.clone() for t in data_iter]
model_base = deepcopy(model)
if rank == 0:
# layer 0 & 7 to chunk 0 on rank0
local_chunk = torch.nn.ModuleList().to(rank)
for idx, sub_model in enumerate(model.layers):
if idx == 0 or idx == 7:
local_chunk.append(sub_model)
elif rank == 1:
# layer 1 & 6 to chunk 1 on rank1
local_chunk = torch.nn.ModuleList().to(rank)
for idx, sub_model in enumerate(model.layers):
if idx == 1 or idx == 6:
local_chunk.append(sub_model)
elif rank == 2:
# layer 2 & 5 to chunk 2 on rank2
local_chunk = torch.nn.ModuleList().to(rank)
for idx, sub_model in enumerate(model.layers):
if idx == 2 or idx == 5:
local_chunk.append(sub_model)
else:
# layer 3 & 4 to chunk 3 on rank3
local_chunk = torch.nn.ModuleList().to(rank)
for idx, sub_model in enumerate(model.layers):
if idx == 3 or idx == 4:
local_chunk.append(sub_model)
# init optimizer
optimizer_base = torch.optim.SGD(model_base.parameters(), lr=1e-5)
optimizer_pp = OptimizerWrapper(torch.optim.SGD(local_chunk.parameters(), lr=1e-5))
print(
f"After init Model & input: {torch.cuda.memory_allocated()/1024**3 :.3f} GB on device {stage_manager.get_rank()};"
)
torch.cuda.synchronize()
result = scheduler.forward_backward_step(
model_chunk=local_chunk,
data_iter=iter(data_iter),
criterion=criterion,
optimizer=optimizer_pp,
return_loss=True,
return_outputs=True,
)
optimizer_pp.step()
##########################
# Fwd bwd for base
##########################
# fwd & bwd
output_base = model_base(input_base[0])
loss_base = criterion(output_base)
loss_base.backward()
optimizer_base.step()
print(f"After base fwd & bwd: {torch.cuda.memory_allocated()/1024**3 :.3f} GB;")
##########################
# assert weight
##########################
if rank == 0:
# layer 0
assert_close(local_chunk[0].weight, model_base.layers[0].weight)
assert_close(local_chunk[0].weight.grad, model_base.layers[0].weight.grad)
# layer 7
assert_close(local_chunk[1].weight, model_base.layers[7].weight)
assert_close(local_chunk[1].weight.grad, model_base.layers[7].weight.grad)
if rank == 1:
# layer 1
assert_close(local_chunk[0].weight, model_base.layers[1].weight)
assert_close(local_chunk[0].weight.grad, model_base.layers[1].weight.grad)
# layer 6
assert_close(local_chunk[1].weight, model_base.layers[6].weight)
assert_close(local_chunk[1].weight.grad, model_base.layers[6].weight.grad)
if rank == 2:
# layer 2
assert_close(local_chunk[0].weight, model_base.layers[2].weight)
assert_close(local_chunk[0].weight.grad, model_base.layers[2].weight.grad)
# layer 5
assert_close(local_chunk[1].weight, model_base.layers[5].weight)
assert_close(local_chunk[1].weight.grad, model_base.layers[5].weight.grad)
if rank == 3:
# layer 3
assert_close(local_chunk[0].weight, model_base.layers[3].weight)
assert_close(local_chunk[0].weight.grad, model_base.layers[3].weight.grad)
# layer 4
assert_close(local_chunk[1].weight, model_base.layers[4].weight)
assert_close(local_chunk[1].weight.grad, model_base.layers[4].weight.grad)
# 2) add optimizer base 1)
@parameterize(
"test_config",
[
{
"batch_size": 8,
"tp_size": 1,
"pp_size": 4,
"num_microbatches": 4,
"zero_stage": 1,
"precision": "bf16",
"num_model_chunk": 2,
},
{
"batch_size": 8,
"tp_size": 1,
"pp_size": 4,
"num_microbatches": 8,
"zero_stage": 1,
"precision": "bf16",
"num_model_chunk": 2,
},
],
)
def run_fwd_bwd_vschedule_with_optim(test_config):
# init dist
rank = dist.get_rank()
pp_size = test_config["pp_size"]
pg_mesh = ProcessGroupMesh(pp_size)
num_microbatch = test_config["num_microbatches"]
num_model_chunk = test_config["num_model_chunk"]
# stage_manager
stage_manager = PipelineStageManager(
pg_mesh, pipeline_axis=0, enable_interleave=True, num_model_chunks=num_model_chunk, use_zbv=True
)
h, a, s = 4096, 32, 1024
mem_f = 34 * h + 5 * a * s
mem_w = -32 * h
mem_b = -mem_w - mem_f
graph = PipelineGraph(
n_stage=pp_size,
n_micro=num_microbatch,
f_cost=1,
b_cost=1,
w_cost=1,
c_cost=1,
f_mem=mem_f,
b_mem=mem_b,
w_mem=mem_w,
# max_mem=mem_f * (p * 2 + m_offset),
)
zbv_schedule = graph.get_v_schedule()
scheduler = ZeroBubbleVPipeScheduler(
schedule=zbv_schedule, # hint: send whole schedule or local schedule only ?
stage_manager=stage_manager,
num_model_chunks=num_model_chunk,
num_microbatch=num_microbatch,
overlap_p2p=False,
)
# init loss func
def criterion(x, *args, **kwargs):
x = x["hidden_states"]
return (x * x).mean()
def criterion_base(x, *args, **kwargs):
return (x * x).mean()
# init model and input
batch_size = test_config["batch_size"]
num_layers = 8
assert num_layers % num_model_chunk == 0, f"Model with {num_layers} layer can not dist on {num_model_chunk} chunk"
in_dim = out_dim = 1024
before_init_memory = torch.cuda.memory_allocated() / 1024**3
print(f"Before init Model: {before_init_memory :.3f} GB on device {stage_manager.get_rank()};")
model = MlpModel(in_dim=in_dim, out_dim=out_dim, num_layers=num_layers).to(rank)
data_iter = {"data": torch.rand(batch_size, in_dim, out_dim, requires_grad=True).to(rank)}
input_base = {k: v.clone() for k, v in data_iter.items()}
model_base = deepcopy(model)
model_pp = deepcopy(model)
layers_per_stage = stage_manager.distribute_layers(len(model.layers))
stage_manager.stage_indices = stage_manager.get_stage_index(layers_per_stage)
model_pp._forward = model_pp.forward
model_pp.forward = partial(model_pp._forward, stage_mgr=stage_manager)
# init optimizer
optimizer_base = torch.optim.SGD(model_base.parameters(), momentum=0.1, lr=1e-5)
optimizer_pp = OptimizerWrapper(torch.optim.SGD(model_pp.parameters(), momentum=0.1, lr=1e-5))
after_init_memory = torch.cuda.memory_allocated() / 1024**3
print(f"After init Model & input: {after_init_memory :.5f} GB on device {stage_manager.get_rank()};")
torch.cuda.synchronize()
result = scheduler.forward_backward_step(
model_chunk=model_pp,
data_iter=iter([data_iter]),
criterion=criterion,
optimizer=optimizer_pp,
return_loss=True,
return_outputs=True,
)
optimizer_pp.step()
after_pp_step_memory = torch.cuda.memory_allocated() / 1024**3
# assert memory
if rank != 0:
# w.grad: hid_dim * hid_dim * microbatch * 4(fp32) * 2 (2 layer in each stage) / 1024**3
# output: hid_dim * hid_dim * microbatch * 4(fp32) / 1024**3
# optim: state hid_dim * hid_dim * 4(fp32) * 2 (2 layer in each stage) / 1024**3
print(
f" num_microbatch {num_microbatch} rank {rank}: {(after_pp_step_memory - after_init_memory)} <= {(in_dim * in_dim * 4 * 5 * batch_size / 1024**3)}"
)
assert (after_pp_step_memory - after_init_memory) <= (in_dim * in_dim * 4 * 5 * batch_size / 1024**3)
else:
# rank0 will also hold output;
print(
f" num_microbatch {num_microbatch} rank {rank}: {round((after_pp_step_memory - after_init_memory), 5)} <= {round((in_dim * in_dim * 4 * 5 * batch_size / 1024**3 + batch_size * in_dim * in_dim * 4 / 1024**3), 5)}"
)
assert round((after_pp_step_memory - after_init_memory), 5) <= round(
(in_dim * in_dim * 4 * 5 * batch_size / 1024**3 + batch_size * in_dim * in_dim * 4 / 1024**3), 5
)
##########################
# Fwd bwd for base
##########################
# fwd & bwd
# output_base = model_base(input_base["data"])
output_base = model_base.forward(data=input_base["data"])
loss_base = criterion_base(output_base)
loss_base.backward()
optimizer_base.step()
##########################
# assert loss & output
##########################
# only chunk 1 stage 0 hold loss and output
if rank == 0:
assert_close(result["loss"], loss_base)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | true |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_pipeline/test_schedule/test_interleaved.py | tests/test_pipeline/test_schedule/test_interleaved.py | import copy
from functools import partial
from types import MethodType
import pytest
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.testing import assert_close
import colossalai
from colossalai.cluster import ProcessGroupMesh
from colossalai.interface import OptimizerWrapper
from colossalai.pipeline.schedule.interleaved_pp import InterleavedSchedule
from colossalai.pipeline.stage_manager import PipelineStageManager
from colossalai.testing import rerun_if_address_is_in_use, spawn
from colossalai.testing.random import seed_all
NUM_LAYER = 8
DIM = 4
class MlpModel(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.ModuleList([nn.Linear(DIM, DIM) for _ in range(NUM_LAYER)])
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
def pp_linear_fwd(
forward,
data: torch.Tensor = None,
input_obj: torch.Tensor = None,
stage_mgr: PipelineStageManager = None,
model_chunk_id: int = None,
):
with stage_mgr.switch_model_chunk_id(model_chunk_id):
if stage_mgr.is_first_stage():
return {"input_obj": forward(data)}
elif stage_mgr.is_last_stage():
return forward(input_obj)
else:
return {"input_obj": forward(input_obj)}
def run_pp(
rank: int,
world_size: int,
port: int,
num_microbatch: int,
batch_size: int,
num_model_chunk: int,
):
"""
This test is to examine the correctness of interleaved 1F1B, compared with torch.
Be aware it contains some hardcodes.
"""
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost")
# create model
seed_all(1453)
torch_model = MlpModel().cuda()
pp_model = copy.deepcopy(torch_model).cuda()
pg_mesh = ProcessGroupMesh(world_size)
stage_manager = PipelineStageManager(
pg_mesh, pipeline_axis=0, enable_interleave=True, num_model_chunks=num_model_chunk
)
schedule = InterleavedSchedule(
stage_manager=stage_manager,
num_model_chunks=num_model_chunk,
num_microbatch=num_microbatch,
)
sharded_model = torch.nn.ModuleList()
for idx, sub_model in enumerate(pp_model.layers):
if idx % world_size == rank:
sub_model._forward = sub_model.forward
sub_model.forward = MethodType(
partial(pp_linear_fwd, stage_mgr=stage_manager, model_chunk_id=len(sharded_model)),
sub_model._forward,
)
sharded_model.append(sub_model.cuda())
assert len(sharded_model) == num_model_chunk, "num_model_chunk is not correct"
# create optimizer
torch_optimizer = torch.optim.SGD(torch_model.parameters(), lr=1e-5)
pp_optimizer = OptimizerWrapper(torch.optim.SGD(sharded_model.parameters(), lr=1e-5))
# create data
seed_all(115)
input_list = [torch.rand(batch_size, DIM).cuda()]
dist.all_reduce(input_list[0])
def criterion(x, *args, **kwargs):
return (x * x).mean()
# forward and backward
torch_output = torch_model(input_list[0])
torch_loss = criterion(torch_output)
torch_loss.backward()
pp_ret = schedule.forward_backward_step(sharded_model, iter(input_list), criterion, pp_optimizer, return_loss=True)
# check loss
if stage_manager.is_last_stage(ignore_chunk=True):
assert_close(torch_loss, pp_ret["loss"])
# check gradients
for i in range(num_model_chunk):
idx = world_size * i + rank
assert_close(torch_model.layers[idx].weight.grad, sharded_model[i].weight.grad)
assert_close(torch_model.layers[idx].bias.grad, sharded_model[i].bias.grad)
# step
torch_optimizer.step()
pp_optimizer.step()
pp_optimizer.zero_grad()
# check updated param
for i in range(num_model_chunk):
idx = world_size * i + rank
assert_close(torch_model.layers[idx].weight, sharded_model[i].weight)
assert_close(torch_model.layers[idx].bias, sharded_model[i].bias)
# forward only
with torch.no_grad():
torch_output = torch_model(input_list[0])
torch_loss = criterion(torch_output)
pp_ret = schedule.forward_backward_step(
sharded_model, iter(input_list), criterion, pp_optimizer, return_loss=True
)
if stage_manager.is_last_stage(ignore_chunk=True):
assert_close(torch_loss, pp_ret["loss"])
for layer in sharded_model:
if layer.weight.grad is None:
assert layer.weight.grad is None and layer.bias.grad is None
else:
assert_close(layer.weight.grad, torch.zeros_like(layer.weight.grad))
assert_close(layer.bias.grad, torch.zeros_like(layer.bias.grad))
@pytest.mark.dist
@pytest.mark.parametrize("num_microbatch", [4, 12])
@pytest.mark.parametrize("batch_size", [12])
@pytest.mark.parametrize("num_model_chunk", [2, 4])
@rerun_if_address_is_in_use()
def test_pp(num_microbatch: int, batch_size: int, num_model_chunk: int):
assert NUM_LAYER % num_model_chunk == 0
spawn(
run_pp,
nprocs=NUM_LAYER // num_model_chunk,
num_microbatch=num_microbatch,
batch_size=batch_size,
num_model_chunk=num_model_chunk,
)
if __name__ == "__main__":
test_pp(num_microbatch=4, batch_size=4, num_model_chunk=4)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_pipeline/test_schedule/test_pipeline_schedule_utils.py | tests/test_pipeline/test_schedule/test_pipeline_schedule_utils.py | import torch
from colossalai.pipeline.schedule._utils import get_batch_size, get_micro_batch, merge_batch
def test_get_batch_size():
tensor = torch.rand(2, 3)
assert get_batch_size(tensor) == 2
assert get_batch_size([tensor]) == 2
assert get_batch_size((1, tensor)) == 2
assert get_batch_size({"tensor": tensor}) == 2
assert get_batch_size({"dummy": [1], "tensor": tensor}) == 2
assert get_batch_size({"tensor": [tensor]}) == 2
def test_get_micro_batch():
x = torch.rand(2, 1)
y = torch.rand(2, 3)
micro_batch = get_micro_batch(x, 0, 1)
assert torch.equal(micro_batch, x[0:1])
micro_batch = get_micro_batch(x, 1, 1)
assert torch.equal(micro_batch, x[1:2])
micro_batch = get_micro_batch([x, y], 0, 1)
assert torch.equal(micro_batch[0], x[0:1])
assert torch.equal(micro_batch[1], y[0:1])
micro_batch = get_micro_batch([x, y], 1, 1)
assert torch.equal(micro_batch[0], x[1:2])
assert torch.equal(micro_batch[1], y[1:2])
micro_batch = get_micro_batch({"x": x, "y": y}, 0, 1)
assert torch.equal(micro_batch["x"], x[0:1])
assert torch.equal(micro_batch["y"], y[0:1])
micro_batch = get_micro_batch({"x": x, "y": y}, 1, 1)
assert torch.equal(micro_batch["x"], x[1:2])
assert torch.equal(micro_batch["y"], y[1:2])
def test_merge_batch():
x = torch.rand(2, 1)
y = torch.rand(2, 3)
merged = merge_batch([x[0:1], x[1:2]])
assert torch.equal(merged, x)
merged = merge_batch([[x[0:1], y[0:1]], [x[1:2], y[1:2]]])
assert torch.equal(merged[0], x)
assert torch.equal(merged[1], y)
merged = merge_batch([{"x": x[0:1], "y": y[0:1]}, {"x": x[1:2], "y": y[1:2]}])
assert torch.equal(merged["x"], x)
assert torch.equal(merged["y"], y)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_pipeline/test_schedule/test_oneF_oneB.py | tests/test_pipeline/test_schedule/test_oneF_oneB.py | import copy
from functools import partial
from types import MethodType
import pytest
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.testing import assert_close
import colossalai
from colossalai.cluster import ProcessGroupMesh
from colossalai.interface import OptimizerWrapper
from colossalai.pipeline.schedule.one_f_one_b import OneForwardOneBackwardSchedule
from colossalai.pipeline.stage_manager import PipelineStageManager
from colossalai.testing import rerun_if_address_is_in_use, spawn
from colossalai.testing.random import seed_all
DIM = 8
NUM_LAYER = 8
class MlpModel(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.ModuleList([nn.Linear(DIM, DIM) for _ in range(NUM_LAYER)])
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
def pp_linear_fwd(
forward,
data: torch.Tensor = None,
input_obj: torch.Tensor = None,
stage_mgr: PipelineStageManager = None,
):
if stage_mgr.is_first_stage():
return {"input_obj": forward(data)}
elif stage_mgr.is_last_stage():
return forward(input_obj)
else:
return {"input_obj": forward(input_obj)}
def examine_pp(num_microbatch: int, batch_size: int):
"""
This test is to examine the correctness of 1F1B, compared with torch.
Be aware it contains some hardcodes.
"""
world_size = dist.get_world_size()
dist.get_rank()
seed_all(1453)
# create models
torch_model = MlpModel().cuda()
pp_model = copy.deepcopy(torch_model).cuda()
pg_mesh = ProcessGroupMesh(world_size)
stage_manager = PipelineStageManager(pg_mesh, pipeline_axis=0)
schedule = OneForwardOneBackwardSchedule(stage_manager, num_microbatches=num_microbatch)
rank = dist.get_rank()
sharded_model = torch.nn.ModuleList()
num_local_layer = NUM_LAYER // world_size
for idx, sub_model in enumerate(pp_model.layers):
if idx // num_local_layer == rank:
sharded_model.append(sub_model.cuda())
assert len(sharded_model) == num_local_layer
def custom_fwd(self, x):
for layer in self._modules.values():
x = layer(x)
return x
sharded_model._forward = MethodType(custom_fwd, sharded_model)
sharded_model.forward = MethodType(
partial(
pp_linear_fwd,
stage_mgr=stage_manager,
),
sharded_model._forward,
)
# create optimizer
torch_optimizer = torch.optim.SGD(torch_model.parameters(), lr=1)
pp_optimizer = OptimizerWrapper(torch.optim.SGD(sharded_model.parameters(), lr=1))
# create
seed_all(1453)
input_list = [torch.rand(batch_size, DIM).cuda()]
dist.all_reduce(input_list[0])
criterion = lambda x, *arg, **kwargs: (x * x).mean()
# forward and backward
torch_output = torch_model(input_list[0])
torch_loss = criterion(torch_output)
torch_loss.backward()
pp_ret = schedule.forward_backward_step(sharded_model, iter(input_list), criterion, pp_optimizer, return_loss=True)
# check loss
if stage_manager.is_last_stage():
assert_close(torch_loss, pp_ret["loss"])
# check gradients
for i in range(len(sharded_model)):
idx = rank * num_local_layer + i
assert_close(torch_model.layers[idx].weight.grad, sharded_model[i].weight.grad)
assert_close(torch_model.layers[idx].bias.grad, sharded_model[i].bias.grad)
# step
torch_optimizer.step()
pp_optimizer.step()
pp_optimizer.zero_grad()
# check updated param
for i in range(len(sharded_model)):
idx = rank * num_local_layer + i
assert_close(torch_model.layers[idx].weight, sharded_model[i].weight)
assert_close(torch_model.layers[idx].bias, sharded_model[i].bias)
# forward only
with torch.no_grad():
torch_output = torch_model(input_list[0])
torch_loss = criterion(torch_output)
pp_ret = schedule.forward_backward_step(
sharded_model, iter(input_list), criterion, pp_optimizer, return_loss=True
)
if stage_manager.is_last_stage():
assert_close(torch_loss, pp_ret["loss"])
for layer in sharded_model:
if layer.weight.grad is None:
assert layer.weight.grad is None and layer.bias.grad is None
else:
assert_close(layer.weight.grad, torch.zeros_like(layer.weight.grad))
assert_close(layer.bias.grad, torch.zeros_like(layer.bias.grad))
def run_dist(
rank: int,
world_size: int,
port: int,
num_microbatch: int,
batch_size: int,
):
colossalai.launch(rank=rank, world_size=world_size, port=port, host="localhost")
examine_pp(num_microbatch, batch_size)
@pytest.mark.dist
@pytest.mark.parametrize("num_microbatch", [4, 6])
@pytest.mark.parametrize("batch_size", [12])
@pytest.mark.parametrize("world_size", [2, 4])
@rerun_if_address_is_in_use()
def test_pp(num_microbatch: int, batch_size: int, world_size: int):
assert NUM_LAYER % world_size == 0
spawn(
run_dist,
world_size,
num_microbatch=num_microbatch,
batch_size=batch_size,
)
if __name__ == "__main__":
test_pp(num_microbatch=4, batch_size=4, world_size=4)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_pipeline/test_pipeline_utils/test_t5_pipeline_utils.py | tests/test_pipeline/test_pipeline_utils/test_t5_pipeline_utils.py | import random
from colossalai.pipeline.stage_manager import PipelineStageManager
from colossalai.shardformer.policies.t5 import T5BasePolicy
from colossalai.shardformer.shard.shard_config import ShardConfig
class _ShardConfig(ShardConfig):
def __post_init__(self):
pass
class _PipelineStageManager(PipelineStageManager):
def __init__(self):
self.is_interleave = False
self.num_layers_per_stage = None
self.num_model_chunks = 1
self.use_zbv = False
@property
def num_stages(self):
return random.randint(5, 10)
def test_t5_pipeline_distribution():
num_test_cases = 8
test_dict = {
"num_encoder_layers": [2, 1, 3, 2, 3, 2, 10, 5],
"num_decoder_layers": [2, 8, 0, 2, 1, 5, 6, 22],
"num_stages": [2, 2, 2, 4, 4, 4, 8, 8],
"decoder_starting_stage": [1, 1, 2, 2, 3, 1, 5, 2],
}
stage_manager = _PipelineStageManager()
shard_config = _ShardConfig(pipeline_stage_manager=stage_manager)
policy = T5BasePolicy()
policy.set_shard_config(shard_config)
for i in range(num_test_cases):
_, decoder_starting_stage = policy.distribute_t5_layers(
test_dict["num_encoder_layers"][i],
test_dict["num_decoder_layers"][i],
test_dict["num_stages"][i],
)
assert test_dict["decoder_starting_stage"][i] == decoder_starting_stage
def test_t5_pipeline_layers():
num_test_cases = 4
test_dict = {
"num_encoder_layers": [2, 3, 2, 4],
"num_decoder_layers": [2, 0, 2, 8],
"num_stages": [2, 2, 4, 4],
"layers_per_stage": [
[[0, 2], [0, 2]],
[[0, 1], [1, 3]],
[[0, 1], [1, 2], [0, 1], [1, 2]],
[[0, 4], [0, 3], [3, 6], [6, 8]],
],
}
for i in range(num_test_cases):
stage_manager = _PipelineStageManager()
shard_config = _ShardConfig(pipeline_stage_manager=stage_manager)
policy = T5BasePolicy()
policy.set_shard_config(shard_config)
layers_per_stage, decoder_starting_stage = policy.distribute_t5_layers(
test_dict["num_encoder_layers"][i],
test_dict["num_decoder_layers"][i],
test_dict["num_stages"][i],
)
for stage in range(test_dict["num_stages"][i]):
start_idx, end_idx = test_dict["layers_per_stage"][i][stage]
predicted_start, predicted_end = policy.get_t5_stage_index(layers_per_stage, stage, decoder_starting_stage)
assert start_idx == predicted_start
assert end_idx == predicted_end
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_pipeline/test_pipeline_utils/test_whisper_pipeline_utils.py | tests/test_pipeline/test_pipeline_utils/test_whisper_pipeline_utils.py | import random
from colossalai.pipeline.stage_manager import PipelineStageManager
from colossalai.shardformer.policies.whisper import WhisperPolicy
from colossalai.shardformer.shard.shard_config import ShardConfig
class _ShardConfig(ShardConfig):
def __post_init__(self):
pass
class _PipelineStageManager(PipelineStageManager):
def __init__(self):
self.is_interleave = False
self.num_layers_per_stage = None
self.num_model_chunks = 1
self.use_zbv = False
@property
def num_stages(self):
return random.randint(5, 10)
def test_whisper_pipeline_distribution():
num_test_cases = 8
test_dict = {
"num_encoder_layers": [2, 1, 3, 2, 3, 2, 10, 5],
"num_decoder_layers": [2, 8, 0, 2, 1, 5, 6, 22],
"num_stages": [2, 2, 2, 4, 4, 4, 8, 8],
"decoder_starting_stage": [1, 1, 2, 2, 3, 1, 5, 2],
}
stage_manager = _PipelineStageManager()
shard_config = _ShardConfig(pipeline_stage_manager=stage_manager)
policy = WhisperPolicy()
policy.set_shard_config(shard_config)
for i in range(num_test_cases):
_, decoder_starting_stage = policy.distribute_whisper_layers(
test_dict["num_encoder_layers"][i],
test_dict["num_decoder_layers"][i],
test_dict["num_stages"][i],
)
assert test_dict["decoder_starting_stage"][i] == decoder_starting_stage
def test_whisper_pipeline_layers():
num_test_cases = 4
test_dict = {
"num_encoder_layers": [2, 3, 2, 4],
"num_decoder_layers": [2, 0, 2, 8],
"num_stages": [2, 2, 4, 4],
"layers_per_stage": [
[[0, 2], [0, 2]],
[[0, 1], [1, 3]],
[[0, 1], [1, 2], [0, 1], [1, 2]],
[[0, 4], [0, 3], [3, 6], [6, 8]],
],
}
stage_manager = _PipelineStageManager()
shard_config = _ShardConfig(pipeline_stage_manager=stage_manager)
policy = WhisperPolicy()
policy.set_shard_config(shard_config)
for i in range(num_test_cases):
layers_per_stage, decoder_starting_stage = policy.distribute_whisper_layers(
test_dict["num_encoder_layers"][i],
test_dict["num_decoder_layers"][i],
test_dict["num_stages"][i],
)
for stage in range(test_dict["num_stages"][i]):
start_idx, end_idx = test_dict["layers_per_stage"][i][stage]
predicted_start, predicted_end = policy.get_whisper_stage_index(
layers_per_stage, stage, decoder_starting_stage
)
assert start_idx == predicted_start
assert end_idx == predicted_end
if __name__ == "__main__":
test_whisper_pipeline_distribution()
test_whisper_pipeline_layers()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_config/sample_config.py | tests/test_config/sample_config.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
train_data = dict(
dataset=dict(
type="CIFAR10Dataset",
root="/path/to/data",
download=True,
transform_pipeline=[
dict(type="RandomResizedCrop", size=224),
dict(type="RandomHorizontalFlip"),
dict(type="ToTensor"),
dict(type="Normalize", mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
],
),
dataloader=dict(
batch_size=64,
pin_memory=True,
num_workers=4,
sampler=dict(
type="DataParallelSampler",
shuffle=True,
),
),
)
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_config/test_load_config.py | tests/test_config/test_load_config.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from pathlib import Path
from colossalai.context.config import Config
def test_load_config():
filename = Path(__file__).parent.joinpath("sample_config.py")
config = Config.from_file(filename)
assert config.train_data, "cannot access train data as attribute"
assert config.train_data.dataset, "cannot access grandchild attribute"
assert isinstance(
config.train_data.dataset.transform_pipeline[0], dict
), f"expected attribute transform_pipeline elements to be a dict, but found {type(config.train_data.dataset.transform_pipeline)}"
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_comm_size_compute.py | tests/test_fx/test_comm_size_compute.py | import torch
from torch.fx import symbolic_trace
from colossalai.fx._compatibility import is_compatible_with_meta
from colossalai.fx.passes.adding_split_node_pass import split_with_split_nodes_pass, uniform_split_pass
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
from colossalai.fx.passes.utils import get_comm_size
from colossalai.testing import clear_cache_before_run
is_compatible = is_compatible_with_meta()
if is_compatible:
from colossalai.fx.profiler import MetaTensor
MODEL_DIM = 16
BATCH_SIZE = 8
PIPELINE_SIZE = 2
class MLP(torch.nn.Module):
def __init__(self, dim: int):
super().__init__()
self.linear1 = torch.nn.Linear(dim, dim)
self.linear2 = torch.nn.Linear(dim, dim)
self.linear3 = torch.nn.Linear(dim, dim)
self.linear4 = torch.nn.Linear(dim, dim)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
x = self.linear3(x)
x = self.linear4(x)
return x
@clear_cache_before_run()
def test_comm_size_compute():
model = MLP(MODEL_DIM)
input_sample = torch.rand(BATCH_SIZE, MODEL_DIM, device="meta")
gm = symbolic_trace(model)
if is_compatible:
input_sample = MetaTensor(input_sample, fake_device=next(gm.parameters()).device)
MetaInfoProp(gm).run(input_sample)
annotated_model = uniform_split_pass(gm, PIPELINE_SIZE)
split_model, split_submodules = split_with_split_nodes_pass(annotated_model)
submodule_list = list(split_model.children())
comm_size = get_comm_size(submodule_list[0], submodule_list[1])
# the shape of tensor send from partition 0 to partition 1 is (8, 16)
assert comm_size == 128
if __name__ == "__main__":
test_comm_size_compute()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_coloproxy.py | tests/test_fx/test_coloproxy.py | import torch
import torch.nn as nn
from torch.fx import GraphModule
from colossalai.fx.proxy import ColoProxy
from colossalai.fx.tracer.tracer import ColoTracer
from colossalai.testing import clear_cache_before_run
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super().__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.shape[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(size_out)
return x
@clear_cache_before_run()
def test_coloproxy():
tracer = ColoTracer()
model = Conv1D(3, 3)
input_sample = {"x": torch.rand(3, 3).to("meta")}
graph = tracer.trace(root=model, meta_args=input_sample)
gm = GraphModule(model, graph, model.__class__.__name__)
gm.recompile()
node = list(gm.graph.nodes)[0]
proxy = ColoProxy(node=node, tracer=tracer)
proxy.meta_data = torch.empty(4, 2, device="meta")
assert len(proxy) == 4
assert proxy.shape[0] == 4 and proxy.shape[1] == 2
assert proxy.dim() == 2
assert proxy.dtype == torch.float32
assert proxy.size(0) == 4
if __name__ == "__main__":
test_coloproxy()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_parallel_1d.py | tests/test_fx/test_parallel_1d.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import pytest
import torch
from torch.fx import symbolic_trace
from colossalai.fx.passes import column_shard_linear_pass
from colossalai.initialize import launch
from colossalai.legacy.core import global_context as gpc
from colossalai.logging import disable_existing_loggers
from colossalai.testing import clear_cache_before_run, rerun_if_address_is_in_use, spawn
class MLP(torch.nn.Module):
def __init__(self, dim: int):
super().__init__()
self.linear1 = torch.nn.Linear(dim, dim)
self.linear2 = torch.nn.Linear(dim, dim)
self.linear3 = torch.nn.Linear(dim, dim)
self.linear4 = torch.nn.Linear(dim, dim)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
x = self.linear3(x)
x = self.linear4(x)
return x
CONFIG = dict(parallel=dict(tensor=dict(mode="1d", size=2)))
def check_layer(rank, world_size, port):
disable_existing_loggers()
launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
input_tensor = torch.rand(2, 16).cuda()
model = MLP(16).cuda()
symbolic_traced = symbolic_trace(model)
output = model(input_tensor)
splitted_gm = column_shard_linear_pass(symbolic_traced)
new_output = splitted_gm(input_tensor)
assert output.equal(new_output)
gpc.destroy()
torch.cuda.empty_cache()
@pytest.mark.dist
@clear_cache_before_run()
@rerun_if_address_is_in_use()
def test_1d():
spawn(check_layer, 2)
if __name__ == "__main__":
test_1d()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_meta_info_prop.py | tests/test_fx/test_meta_info_prop.py | import torch
from torch.fx import symbolic_trace
from colossalai.fx._compatibility import is_compatible_with_meta
from colossalai.fx.passes.meta_info_prop import MetaInfoProp, TensorMetadata
from colossalai.testing import clear_cache_before_run
if is_compatible_with_meta():
from colossalai.fx.profiler import MetaTensor
BATCH_SIZE = 2
DIM_IN = 4
DIM_OUT = 16
def meta_check(meta_info_spec: TensorMetadata, orig_tensor: torch.Tensor):
assert meta_info_spec.shape == orig_tensor.shape
assert meta_info_spec.dtype == orig_tensor.dtype
assert meta_info_spec.stride == orig_tensor.stride()
assert meta_info_spec.numel == orig_tensor.numel()
@clear_cache_before_run()
def test_meta_info_prop():
model = torch.nn.Linear(DIM_IN, DIM_OUT)
input_sample = torch.rand(BATCH_SIZE, DIM_IN, device="meta")
if is_compatible_with_meta():
input_sample = MetaTensor(input_sample, fake_device="cpu")
orig_output = model(input_sample)
gm = symbolic_trace(model)
MetaInfoProp(gm).run(input_sample)
for node in gm.graph.nodes:
if node.op == "placeholder":
meta_check(node.meta["tensor_meta"], input_sample)
if node.op == "output":
meta_check(node.meta["tensor_meta"], orig_output)
if __name__ == "__main__":
test_meta_info_prop()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_pipeline_passes.py | tests/test_fx/test_pipeline_passes.py | import torch
from torch.fx import symbolic_trace
from colossalai.fx.passes.adding_split_node_pass import (
balanced_split_pass,
balanced_split_pass_v2,
split_with_split_nodes_pass,
uniform_split_pass,
)
from colossalai.testing import clear_cache_before_run
MODEL_DIM = 16
BATCH_SIZE = 8
PIPELINE_SIZE = 2
class MLP(torch.nn.Module):
def __init__(self, dim: int):
super().__init__()
self.linear1 = torch.nn.Linear(dim, dim)
self.linear2 = torch.nn.Linear(dim, dim)
self.linear3 = torch.nn.Linear(dim, dim)
self.linear4 = torch.nn.Linear(dim, dim)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
x = self.linear3(x)
x = self.linear4(x)
return x
def pipeline_pass_test_helper(model, data, pass_func):
origin_output = model(data)
symbolic_traced = symbolic_trace(model)
annotated_model = pass_func(symbolic_traced, PIPELINE_SIZE)
split_model, split_submodules = split_with_split_nodes_pass(annotated_model)
output = split_model(data)
assert output.equal(origin_output)
@clear_cache_before_run()
def test_pipeline_passes():
model = MLP(MODEL_DIM)
data = torch.rand(BATCH_SIZE, MODEL_DIM)
pipeline_pass_test_helper(model, data, balanced_split_pass)
pipeline_pass_test_helper(model, data, balanced_split_pass_v2)
pipeline_pass_test_helper(model, data, uniform_split_pass)
if __name__ == "__main__":
test_pipeline_passes()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
hpcaitech/ColossalAI | https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/tests/test_fx/test_graph_manipulation.py | tests/test_fx/test_graph_manipulation.py | import torch
from colossalai.fx import ColoTracer
from colossalai.fx.passes.utils import assign_bfs_level_to_nodes, get_leaf, get_top
from colossalai.testing import clear_cache_before_run
class MLP(torch.nn.Module):
def __init__(self, dim: int):
super().__init__()
self.linear1 = torch.nn.Linear(dim, dim)
self.linear2 = torch.nn.Linear(dim, dim)
self.linear3 = torch.nn.Linear(dim, dim)
self.linear4 = torch.nn.Linear(dim, dim)
self.linear5 = torch.nn.Linear(dim, dim)
def forward(self, x):
l1 = self.linear1(x)
l2 = self.linear2(x)
l3 = self.linear3(l1)
l4 = self.linear4(l2)
l5 = self.linear5(l3)
return l4, l5
@clear_cache_before_run()
def test_graph_manipulation():
model = MLP(4)
tracer = ColoTracer()
graph = tracer.trace(model)
nodes = list(graph.nodes)
x, l1, l2, l3, l4, l5, output = nodes
leaf_nodes = set(get_leaf(graph))
top_nodes = set(get_top(graph))
compare_dict = {x: None, l1: 0, l2: 0, l3: 1, l4: 1, l5: 2, output: None}
assign_bfs_level_to_nodes(graph)
assert leaf_nodes == set([l4, l5])
assert top_nodes == set([l1, l2])
for node in graph.nodes:
if node.op in ("placeholder", "output"):
assert not hasattr(node, "bfs_level")
else:
assert node.bfs_level == compare_dict[node]
if __name__ == "__main__":
test_graph_manipulation()
| python | Apache-2.0 | b1915d2889543949eb5b610241f1515c73df5059 | 2026-01-04T14:40:19.002665Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.