repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/engine/protocol.py | vllm/engine/protocol.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from abc import ABC, abstractmethod
from collections.abc import AsyncGenerator, Iterable, Mapping
from typing import Any
from vllm.config import ModelConfig, VllmConfig
from vllm.inputs.data import PromptType
from vllm.lora.request import LoRARequest
from vllm.outputs import PoolingRequestOutput, RequestOutput
from vllm.plugins.io_processors import IOProcessor
from vllm.pooling_params import PoolingParams
from vllm.sampling_params import SamplingParams
from vllm.tasks import SupportedTask
from vllm.tokenizers import TokenizerLike
from vllm.v1.engine import EngineCoreRequest
from vllm.v1.engine.input_processor import InputProcessor
class EngineClient(ABC):
"""Protocol class for Clients to Engine"""
vllm_config: VllmConfig
model_config: ModelConfig
input_processor: InputProcessor
io_processor: IOProcessor | None
@property
@abstractmethod
def is_running(self) -> bool: ...
@property
@abstractmethod
def is_stopped(self) -> bool: ...
@property
@abstractmethod
def errored(self) -> bool: ...
@property
@abstractmethod
def dead_error(self) -> BaseException: ...
@abstractmethod
def generate(
self,
prompt: EngineCoreRequest | PromptType,
sampling_params: SamplingParams,
request_id: str,
*,
prompt_text: str | None = None,
lora_request: LoRARequest | None = None,
tokenization_kwargs: dict[str, Any] | None = None,
trace_headers: Mapping[str, str] | None = None,
priority: int = 0,
data_parallel_rank: int | None = None,
) -> AsyncGenerator[RequestOutput, None]:
"""Generate outputs for a request."""
...
@abstractmethod
def encode(
self,
prompt: PromptType,
pooling_params: PoolingParams,
request_id: str,
lora_request: LoRARequest | None = None,
trace_headers: Mapping[str, str] | None = None,
priority: int = 0,
truncate_prompt_tokens: int | None = None,
tokenization_kwargs: dict[str, Any] | None = None,
) -> AsyncGenerator[PoolingRequestOutput, None]:
"""Generate outputs for a request from a pooling model.
NOTE: truncate_prompt_tokens is deprecated in v0.14.
TODO: Remove this argument in v0.15.
"""
...
@abstractmethod
async def abort(self, request_id: str | Iterable[str]) -> None:
"""Abort a request.
Args:
request_id: The unique id of the request,
or an iterable of such ids.
"""
...
@abstractmethod
async def get_tokenizer(self) -> TokenizerLike:
"""Get the tokenizer"""
...
@abstractmethod
async def is_tracing_enabled(self) -> bool: ...
@abstractmethod
async def do_log_stats(self) -> None: ...
@abstractmethod
async def check_health(self) -> None:
"""Raise if unhealthy"""
...
@abstractmethod
async def start_profile(self) -> None:
"""Start profiling the engine"""
...
@abstractmethod
async def stop_profile(self) -> None:
"""Stop profiling the engine"""
...
@abstractmethod
async def reset_mm_cache(self) -> None:
"""Reset the multi-modal cache"""
...
@abstractmethod
async def reset_prefix_cache(
self, reset_running_requests: bool = False, reset_connector: bool = False
) -> bool:
"""Reset the prefix cache and optionally any configured connector cache"""
...
@abstractmethod
async def sleep(self, level: int = 1) -> None:
"""Sleep the engine"""
...
@abstractmethod
async def wake_up(self, tags: list[str] | None = None) -> None:
"""Wake up the engine"""
...
@abstractmethod
async def is_sleeping(self) -> bool:
"""Check whether the engine is sleeping"""
...
@abstractmethod
async def add_lora(self, lora_request: LoRARequest) -> bool:
"""Load a new LoRA adapter into the engine for future requests."""
...
@abstractmethod
async def pause_generation(
self,
*,
wait_for_inflight_requests: bool = False,
clear_cache: bool = True,
) -> None:
"""Pause new generation/encoding requests.
Args:
wait_for_inflight_requests: When ``True`` waits for in-flight requests
to finish before pausing. When ``False`` (default), aborts in-flight
requests immediately.
clear_cache: Whether to clear KV and prefix caches after draining.
"""
...
@abstractmethod
async def resume_generation(self) -> None:
"""Resume accepting generation/encoding requests."""
...
@abstractmethod
async def is_paused(self) -> bool:
"""Return whether the engine is currently paused."""
...
async def scale_elastic_ep(
self, new_data_parallel_size: int, drain_timeout: int = 300
) -> None:
"""Scale the engine"""
raise NotImplementedError
async def collective_rpc(
self,
method: str,
timeout: float | None = None,
args: tuple = (),
kwargs: dict | None = None,
):
"""Perform a collective RPC call to the given path."""
raise NotImplementedError
async def get_supported_tasks(self) -> tuple[SupportedTask, ...]:
"""Get supported tasks"""
raise NotImplementedError
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/engine/async_llm_engine.py | vllm/engine/async_llm_engine.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.v1.engine.async_llm import AsyncLLM
AsyncLLMEngine = AsyncLLM # type: ignore
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/logging_utils/lazy.py | vllm/logging_utils/lazy.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
from typing import Any
class lazy:
"""Wrap a zero-argument callable evaluated only during log formatting."""
__slots__ = ("_factory",)
def __init__(self, factory: Callable[[], Any]) -> None:
self._factory = factory
def __str__(self) -> str:
return str(self._factory())
def __repr__(self) -> str:
return str(self)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/logging_utils/formatter.py | vllm/logging_utils/formatter.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import logging
from pathlib import Path
from vllm import envs
class NewLineFormatter(logging.Formatter):
"""Adds logging prefix to newlines to align multi-line messages."""
def __init__(self, fmt, datefmt=None, style="%"):
super().__init__(fmt, datefmt, style)
self.use_relpath = envs.VLLM_LOGGING_LEVEL == "DEBUG"
if self.use_relpath:
self.root_dir = Path(__file__).resolve().parent.parent.parent
def format(self, record):
def shrink_path(relpath: Path) -> str:
"""
Shortens a file path for logging display:
- Removes leading 'vllm' folder if present.
- If path starts with 'v1',
keeps the first two and last two levels,
collapsing the middle as '...'.
- Otherwise, keeps the first and last two levels,
collapsing the middle as '...'.
- If the path is short, returns it as-is.
- Examples:
vllm/model_executor/layers/quantization/utils/fp8_utils.py ->
model_executor/.../quantization/utils/fp8_utils.py
vllm/model_executor/layers/quantization/awq.py ->
model_executor/layers/quantization/awq.py
vllm/v1/attention/backends/mla/common.py ->
v1/attention/backends/mla/common.py
Args:
relpath (Path): The relative path to be shortened.
Returns:
str: The shortened path string for display.
"""
parts = list(relpath.parts)
new_parts = []
if parts and parts[0] == "vllm":
parts = parts[1:]
if parts and parts[0] == "v1":
new_parts += parts[:2]
parts = parts[2:]
elif parts:
new_parts += parts[:1]
parts = parts[1:]
if len(parts) > 2:
new_parts += ["..."] + parts[-2:]
else:
new_parts += parts
return "/".join(new_parts)
if self.use_relpath:
abs_path = getattr(record, "pathname", None)
if abs_path:
try:
relpath = Path(abs_path).resolve().relative_to(self.root_dir)
except Exception:
relpath = Path(record.filename)
else:
relpath = Path(record.filename)
record.fileinfo = shrink_path(relpath)
else:
record.fileinfo = record.filename
msg = super().format(record)
if record.message != "":
parts = msg.split(record.message)
msg = msg.replace("\n", "\r\n" + parts[0])
return msg
class ColoredFormatter(NewLineFormatter):
"""Adds ANSI color codes to log levels for terminal output.
This formatter adds colors by injecting them into the format string for
static elements (timestamp, filename, line number) and modifying the
levelname attribute for dynamic color selection.
"""
# ANSI color codes
COLORS = {
"DEBUG": "\033[37m", # White
"INFO": "\033[32m", # Green
"WARNING": "\033[33m", # Yellow
"ERROR": "\033[31m", # Red
"CRITICAL": "\033[35m", # Magenta
}
GREY = "\033[90m" # Grey for timestamp and file info
RESET = "\033[0m"
def __init__(self, fmt, datefmt=None, style="%"):
# Inject grey color codes into format string for timestamp and file info
if fmt:
# Wrap %(asctime)s with grey
fmt = fmt.replace("%(asctime)s", f"{self.GREY}%(asctime)s{self.RESET}")
# Wrap [%(fileinfo)s:%(lineno)d] with grey
fmt = fmt.replace(
"[%(fileinfo)s:%(lineno)d]",
f"{self.GREY}[%(fileinfo)s:%(lineno)d]{self.RESET}",
)
# Call parent __init__ with potentially modified format string
super().__init__(fmt, datefmt, style)
def format(self, record):
# Store original levelname to restore later (in case record is reused)
orig_levelname = record.levelname
# Only modify levelname - it needs dynamic color based on severity
if (color_code := self.COLORS.get(record.levelname)) is not None:
record.levelname = f"{color_code}{record.levelname}{self.RESET}"
# Call parent format which will handle everything else
msg = super().format(record)
# Restore original levelname
record.levelname = orig_levelname
return msg
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/logging_utils/__init__.py | vllm/logging_utils/__init__.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.logging_utils.formatter import ColoredFormatter, NewLineFormatter
from vllm.logging_utils.lazy import lazy
from vllm.logging_utils.log_time import logtime
__all__ = [
"NewLineFormatter",
"ColoredFormatter",
"lazy",
"logtime",
]
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/logging_utils/dump_input.py | vllm/logging_utils/dump_input.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import contextlib
import enum
import json
import torch
from vllm.config import VllmConfig
from vllm.logger import init_logger
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.metrics.stats import SchedulerStats
from vllm.version import __version__ as VLLM_VERSION
logger = init_logger(__name__)
def prepare_object_to_dump(obj) -> str:
if isinstance(obj, str):
return f"'{obj}'" # Double quotes
elif isinstance(obj, dict):
dict_str = ", ".join(
{f"{str(k)}: {prepare_object_to_dump(v)}" for k, v in obj.items()}
)
return f"{{{dict_str}}}"
elif isinstance(obj, list):
return f"[{', '.join([prepare_object_to_dump(v) for v in obj])}]"
elif isinstance(obj, set):
return f"[{', '.join([prepare_object_to_dump(v) for v in list(obj)])}]"
# return [prepare_object_to_dump(v) for v in list(obj)]
elif isinstance(obj, tuple):
return f"[{', '.join([prepare_object_to_dump(v) for v in obj])}]"
elif isinstance(obj, enum.Enum):
return repr(obj)
elif isinstance(obj, torch.Tensor):
# We only print the 'draft' of the tensor to not expose sensitive data
# and to get some metadata in case of CUDA runtime crashed
return f"Tensor(shape={obj.shape}, device={obj.device},dtype={obj.dtype})"
elif hasattr(obj, "anon_repr"):
return obj.anon_repr()
elif hasattr(obj, "__dict__"):
items = obj.__dict__.items()
dict_str = ", ".join(
[f"{str(k)}={prepare_object_to_dump(v)}" for k, v in items]
)
return f"{type(obj).__name__}({dict_str})"
else:
# Hacky way to make sure we can serialize the object in JSON format
try:
return json.dumps(obj)
except (TypeError, OverflowError):
return repr(obj)
def dump_engine_exception(
config: VllmConfig,
scheduler_output: SchedulerOutput,
scheduler_stats: SchedulerStats | None,
):
# NOTE: ensure we can log extra info without risking raises
# unexpected errors during logging
with contextlib.suppress(Exception):
_dump_engine_exception(config, scheduler_output, scheduler_stats)
def _dump_engine_exception(
config: VllmConfig,
scheduler_output: SchedulerOutput,
scheduler_stats: SchedulerStats | None,
):
logger.error(
"Dumping input data for V1 LLM engine (v%s) with config: %s, ",
VLLM_VERSION,
config,
)
try:
dump_obj = prepare_object_to_dump(scheduler_output)
logger.error("Dumping scheduler output for model execution: %s", dump_obj)
if scheduler_stats:
logger.error("Dumping scheduler stats: %s", scheduler_stats)
except Exception:
logger.exception("Error preparing object to dump")
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/logging_utils/log_time.py | vllm/logging_utils/log_time.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Provides a timeslice logging decorator
"""
import functools
import time
def logtime(logger, msg=None):
"""
Logs the execution time of the decorated function.
Always place it beneath other decorators.
"""
def _inner(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
start = time.perf_counter()
result = func(*args, **kwargs)
elapsed = time.perf_counter() - start
prefix = (
f"Function '{func.__module__}.{func.__qualname__}'"
if msg is None
else msg
)
logger.debug("%s: Elapsed time %.7f secs", prefix, elapsed)
return result
return _wrapper
return _inner
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/tokenizers/deepseek_v32_encoding.py | vllm/tokenizers/deepseek_v32_encoding.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# copy from https://huggingface.co/deepseek-ai/DeepSeek-V3.2/blob/main/encoding/encoding_dsv32.py
import copy
import json
from typing import Any
import regex as re
# flake8: noqa: E501
TOOLS_SYSTEM_TEMPLATE = """## Tools
You have access to a set of tools you can use to answer the user's question.
You can invoke functions by writing a "<{dsml_token}function_calls>" block like the following as part of your reply to the user:
<{dsml_token}function_calls>
<{dsml_token}invoke name="$FUNCTION_NAME">
<{dsml_token}parameter name="$PARAMETER_NAME" string="true|false">$PARAMETER_VALUE</{dsml_token}parameter>
...
</{dsml_token}invoke>
<{dsml_token}invoke name="$FUNCTION_NAME2">
...
</{dsml_token}invoke>
</{dsml_token}function_calls>
String and scalar parameters should be specified as is without any escaping or quotes, while lists and objects should use JSON format. The "string" attribute should be set to "true" for string type parameters and "false" for other types (numbers, booleans, arrays, objects).
If the thinking_mode is enabled, then after function results you should strongly consider outputting a thinking block. Here is an example:
<{dsml_token}function_calls>
...
</{dsml_token}function_calls>
<function_results>
...
</function_results>
{thinking_start_token}...thinking about results{thinking_end_token}
Here are the functions available in JSONSchema format:
<functions>
{tool_schemas}
</functions>
"""
bos_token: str = "<|begin▁of▁sentence|>"
eos_token: str = "<|end▁of▁sentence|>"
thinking_start_token: str = "<think>"
thinking_end_token: str = "</think>"
dsml_token: str = "|DSML|"
system_msg_template: str = "{content}"
user_msg_template: str = "<|User|>{content}<|Assistant|>"
assistant_msg_template: str = "{reasoning}{content}{tool_calls}<|end▁of▁sentence|>"
thinking_template = "{reasoning_content}"
response_format_template: str = "## Response Format:\n\nYou MUST strictly adhere to the following schema to reply:\n{schema}"
tool_call_template: str = (
'<{dsml_token}invoke name="{name}">\n{arguments}\n</{dsml_token}invoke>'
)
tool_calls_template = (
"<{dsml_token}function_calls>\n{tool_calls}\n</{dsml_token}function_calls>"
)
tool_output_template: str = "\n<result>{content}</result>"
def to_json(value: Any) -> str:
try:
return json.dumps(value, ensure_ascii=False)
except Exception:
return json.dumps(value, ensure_ascii=True)
def tools_from_openai_format(tools):
return [tool["function"] for tool in tools]
def tool_calls_from_openai_format(tool_calls):
return [
{
"name": tool_call["function"]["name"],
"arguments": tool_call["function"]["arguments"],
}
for tool_call in tool_calls
]
def tool_calls_to_openai_format(tool_calls):
return [
{
"type": "function",
"function": {
"name": tool_call["name"],
"arguments": tool_call["arguments"],
},
}
for tool_call in tool_calls
]
def encode_arguments_to_dsml(tool_call: dict[str, str]) -> str:
p_dsml_template = """<{dsml_token}parameter name="{key}" string="{is_str}">{value}</{dsml_token}parameter>"""
P_dsml_strs = []
if isinstance(tool_call["arguments"], str):
arguments = json.loads(tool_call["arguments"])
else:
arguments = tool_call["arguments"]
for k, v in arguments.items():
p_dsml_str = p_dsml_template.format(
dsml_token=dsml_token,
key=k,
is_str="true" if isinstance(v, str) else "false",
value=v if isinstance(v, str) else to_json(v),
)
P_dsml_strs.append(p_dsml_str)
return "\n".join(P_dsml_strs)
def decode_dsml_to_arguments(
tool_name: str, tool_args: dict[str, tuple[str, str]]
) -> dict[str, str]:
def _decode_value(key: str, value: str, string: str):
if string == "true":
value = to_json(value)
return f"{to_json(key)}: {value}"
tool_args_json = (
"{"
+ ", ".join(
[_decode_value(k, v, string=is_str) for k, (v, is_str) in tool_args.items()]
)
+ "}"
)
return dict(name=tool_name, arguments=tool_args_json)
def render_tools(tools: list[dict[str, str | dict[str, Any]]]) -> str:
tools_json = [to_json(t) for t in tools]
return TOOLS_SYSTEM_TEMPLATE.format(
tool_schemas="\n".join(tools_json),
dsml_token=dsml_token,
thinking_start_token=thinking_start_token,
thinking_end_token=thinking_end_token,
)
def find_last_user_index(messages: list[dict[str, Any]]) -> int:
last_user_index = -1
for idx in range(len(messages) - 1, -1, -1):
if messages[idx].get("role") in ["user", "developer"]:
last_user_index = idx
break
return last_user_index
def render_message(
index: int, messages: list[dict[str, Any]], thinking_mode: str
) -> str:
assert 0 <= index < len(messages)
assert thinking_mode in ["chat", "thinking"], (
f"Invalid thinking_mode `{thinking_mode}`"
)
prompt = ""
msg = messages[index]
last_user_idx = find_last_user_index(messages)
role = msg.get("role")
content = msg.get("content")
tools = msg.get("tools")
response_format = msg.get("response_format")
tool_calls = msg.get("tool_calls")
reasoning_content = msg.get("reasoning") or msg.get("reasoning_content")
if tools:
tools = tools_from_openai_format(tools)
if tool_calls:
tool_calls = tool_calls_from_openai_format(tool_calls)
if role == "system":
prompt += system_msg_template.format(content=content or "")
if tools:
prompt += "\n\n" + render_tools(tools)
if response_format:
prompt += "\n\n" + response_format_template.format(
schema=to_json(response_format)
)
elif role == "developer":
assert content, f"Invalid message for role `{role}`: {msg}"
content_developer = ""
if tools:
content_developer += "\n\n" + render_tools(tools)
if response_format:
content_developer += "\n\n" + response_format_template.format(
schema=to_json(response_format)
)
content_developer += "\n\n# The user's message is: {}".format(content)
prompt += user_msg_template.format(content=content_developer)
if index == last_user_idx and thinking_mode == "thinking":
prompt += thinking_start_token
else:
prompt += thinking_end_token
elif role == "user":
prompt += user_msg_template.format(content=content)
if index == last_user_idx and thinking_mode == "thinking":
prompt += thinking_start_token
else:
prompt += thinking_end_token
elif role == "tool":
prev_assistant_idx = index - 1
assistant_msg = messages[prev_assistant_idx]
while prev_assistant_idx >= 0 and assistant_msg.get("role") == "tool":
prev_assistant_idx -= 1
assistant_msg = messages[prev_assistant_idx]
assert (
index == 0
or prev_assistant_idx >= 0
and assistant_msg.get("role") == "assistant"
), f"Invalid messages at {index}:\n{assistant_msg}"
tool_call_order = index - prev_assistant_idx
assistant_tool_calls = assistant_msg.get("tool_calls")
assert assistant_tool_calls and len(assistant_tool_calls) >= tool_call_order, (
"No tool calls but found tool output"
)
if tool_call_order == 1:
prompt += "\n\n<function_results>"
prompt += tool_output_template.format(content=content)
if tool_call_order == len(assistant_tool_calls):
prompt += "\n</function_results>"
if index >= last_user_idx and thinking_mode == "thinking":
prompt += "\n\n" + thinking_start_token
else:
prompt += "\n\n" + thinking_end_token
elif role == "assistant":
prev_assistant_idx = index
thinking_part = ""
tool_calls_content = ""
if tool_calls:
tool_calls = [
tool_call_template.format(
dsml_token=dsml_token,
name=tool_call.get("name"),
arguments=encode_arguments_to_dsml(tool_call),
)
for tool_call in tool_calls
]
tool_calls_content += "\n\n" + tool_calls_template.format(
dsml_token=dsml_token, tool_calls="\n".join(tool_calls)
)
summary_content = content or ""
if thinking_mode == "thinking" and index > last_user_idx:
assert reasoning_content or tool_calls, (
f"ThinkingMode: {thinking_mode}, invalid message without reasoning_content/tool_calls `{msg}` after last user message"
)
thinking_part = (
thinking_template.format(reasoning_content=reasoning_content or "")
+ thinking_end_token
)
prompt += assistant_msg_template.format(
reasoning=thinking_part,
content=summary_content,
tool_calls=tool_calls_content,
)
else:
raise NotImplementedError(f"Unknown role: {role}")
return prompt
def drop_thinking_messages(
messages: list[dict[str, Any]], last_user_idx: int | None = None
) -> list[dict[str, Any]]:
messages_wo_thinking: list[dict[str, Any]] = []
last_user_idx = (
find_last_user_index(messages) if last_user_idx is None else last_user_idx
)
for idx, msg in enumerate(messages):
role = msg.get("role")
if role in ["user", "system", "tool"] or idx >= last_user_idx:
messages_wo_thinking.append(msg)
continue
elif role == "assistant":
msg_wo_thinking = copy.copy(msg)
msg_wo_thinking.pop("reasoning_content", None)
msg_wo_thinking.pop("reasoning", None)
messages_wo_thinking.append(msg_wo_thinking)
return messages_wo_thinking
def encode_messages(
messages: list[dict[str, Any]],
thinking_mode: str,
context: list[dict[str, Any]] | None = None,
drop_thinking: bool = True,
add_default_bos_token: bool = True,
) -> str:
context = context if context else []
full_messages = context + messages
prompt = bos_token if add_default_bos_token and len(context) == 0 else ""
if thinking_mode == "thinking" and drop_thinking:
full_messages = drop_thinking_messages(full_messages)
for idx in range(len(messages)):
prompt += render_message(
idx + len(context), full_messages, thinking_mode=thinking_mode
)
return prompt
def _read_until_stop(
index: int, text: str, stop: list[str]
) -> tuple[int, str, None | str]:
min_pos = len(text)
matched_stop = None
for s in stop:
pos = text.find(s, index)
if pos != -1 and pos < min_pos:
min_pos = pos
matched_stop = s
if matched_stop:
content = text[index:min_pos]
return min_pos + len(matched_stop), content, matched_stop
else:
content = text[index:]
return len(text), content, None
def parse_tool_calls(index: int, text: str):
tool_calls: list[dict[str, Any]] = []
stop_token = None
tool_calls_end_token = f"</{dsml_token}function_calls>"
while index < len(text):
index, _, stop_token = _read_until_stop(
index, text, [f"<{dsml_token}invoke", tool_calls_end_token]
)
assert _ == ">\n", "Tool call format error"
if stop_token == tool_calls_end_token:
break
assert stop_token is not None, "Missing special token"
index, tool_name_content, stop_token = _read_until_stop(
index, text, [f"<{dsml_token}parameter", f"</{dsml_token}invoke"]
)
p_tool_name = re.findall(
r'^\s*name="(.*?)">\n$', tool_name_content, flags=re.DOTALL
)
assert len(p_tool_name) == 1, "Tool name format error"
tool_name = p_tool_name[0]
tool_args: dict[str, tuple[str, str]] = {}
while stop_token == f"<{dsml_token}parameter":
index, param_content, stop_token = _read_until_stop(
index, text, [f"/{dsml_token}parameter"]
)
param_kv = re.findall(
r'^ name="(.*?)" string="(true|false)">(.*?)<$',
param_content,
flags=re.DOTALL,
)
assert len(param_kv) == 1, "Parameter format error"
param_name, string, param_value = param_kv[0]
assert param_name not in tool_args, "Duplicate parameter name"
tool_args[param_name] = (param_value, string)
index, content, stop_token = _read_until_stop(
index, text, [f"<{dsml_token}parameter", f"</{dsml_token}invoke"]
)
assert content == ">\n", "Parameter format error"
tool_call = decode_dsml_to_arguments(tool_name=tool_name, tool_args=tool_args)
tool_calls.append(tool_call)
return index, stop_token, tool_calls
# NOTE: This function is designed to parse only correctly
# formatted string and will not attempt to correct malformed output
# that may be generated by the model.
def parse_message_from_completion_text(text: str, thinking_mode: str):
summary_content, reasoning_content, tool_calls = "", "", []
index, stop_token = 0, None
tool_calls_start_token = f"\n\n<{dsml_token}function_calls"
is_thinking, is_tool_calling = thinking_mode == "thinking", False
if is_thinking:
index, content_delta, stop_token = _read_until_stop(
index, text, [thinking_end_token, tool_calls_start_token]
)
reasoning_content = content_delta
assert stop_token == thinking_end_token, "Invalid thinking format"
index, content_delta, stop_token = _read_until_stop(
index, text, [eos_token, tool_calls_start_token]
)
summary_content = content_delta
if stop_token == tool_calls_start_token:
is_tool_calling = True
else:
assert stop_token == eos_token, "Invalid summary format"
if is_tool_calling:
index, stop_token, tool_calls = parse_tool_calls(index, text)
index, tool_ends_text, stop_token = _read_until_stop(index, text, [eos_token])
assert not tool_ends_text, "Unexpected content after tool calls"
assert len(text) == index and stop_token in [eos_token, None], (
"Unexpected content at end"
)
for sp_token in [
bos_token,
eos_token,
thinking_start_token,
thinking_end_token,
dsml_token,
]:
assert sp_token not in summary_content and sp_token not in reasoning_content, (
"Unexpected special token in content"
)
return {
"role": "assistant",
"content": summary_content,
"reasoning_content": reasoning_content,
"reasoning": reasoning_content,
"tool_calls": tool_calls_to_openai_format(tool_calls),
}
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/tokenizers/registry.py | vllm/tokenizers/registry.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import importlib.util
from dataclasses import dataclass, field
from functools import lru_cache
from pathlib import Path
from typing import TYPE_CHECKING
import huggingface_hub
from typing_extensions import TypeVar, assert_never
import vllm.envs as envs
from vllm.logger import init_logger
from vllm.transformers_utils.gguf_utils import (
check_gguf_file,
get_gguf_file_path_from_hf,
is_gguf,
is_remote_gguf,
split_remote_gguf,
)
from vllm.transformers_utils.repo_utils import list_filtered_repo_files
from vllm.utils.import_utils import resolve_obj_by_qualname
from .protocol import TokenizerLike
if TYPE_CHECKING:
from vllm.config.model import ModelConfig, RunnerType
logger = init_logger(__name__)
_VLLM_TOKENIZERS = {
"deepseek_v32": ("deepseek_v32", "DeepseekV32Tokenizer"),
"hf": ("hf", "CachedHfTokenizer"),
"mistral": ("mistral", "MistralTokenizer"),
}
@dataclass
class _TokenizerRegistry:
# Tokenizer mode -> (tokenizer module, tokenizer class)
tokenizers: dict[str, tuple[str, str]] = field(default_factory=dict)
def register(self, tokenizer_mode: str, module: str, class_name: str) -> None:
if tokenizer_mode in self.tokenizers:
logger.warning(
"%s.%s is already registered for tokenizer_mode=%r. "
"It is overwritten by the new one.",
module,
class_name,
tokenizer_mode,
)
self.tokenizers[tokenizer_mode] = (module, class_name)
return None
def load_tokenizer_cls(self, tokenizer_mode: str) -> type[TokenizerLike]:
if tokenizer_mode not in self.tokenizers:
raise ValueError(f"No tokenizer registered for {tokenizer_mode=!r}.")
module, class_name = self.tokenizers[tokenizer_mode]
logger.debug_once(f"Loading {class_name} for {tokenizer_mode=!r}")
return resolve_obj_by_qualname(f"{module}.{class_name}")
def load_tokenizer(self, tokenizer_mode: str, *args, **kwargs) -> TokenizerLike:
tokenizer_cls = self.load_tokenizer_cls(tokenizer_mode)
return tokenizer_cls.from_pretrained(*args, **kwargs)
TokenizerRegistry = _TokenizerRegistry(
{
mode: (f"vllm.tokenizers.{mod_relname}", cls_name)
for mode, (mod_relname, cls_name) in _VLLM_TOKENIZERS.items()
}
)
def resolve_tokenizer_args(
tokenizer_name: str | Path,
*args,
runner_type: "RunnerType" = "generate",
tokenizer_mode: str = "auto",
**kwargs,
):
revision: str | None = kwargs.get("revision")
download_dir: str | None = kwargs.get("download_dir")
if envs.VLLM_USE_MODELSCOPE:
# download model from ModelScope hub,
# lazy import so that modelscope is not required for normal use.
from modelscope.hub.snapshot_download import snapshot_download
# avoid circular import
from vllm.model_executor.model_loader.weight_utils import get_lock
# Only set the tokenizer here, model will be downloaded on the workers.
if not Path(tokenizer_name).exists():
# Use file lock to prevent multiple processes from
# downloading the same file at the same time.
with get_lock(tokenizer_name, download_dir):
tokenizer_path = snapshot_download(
model_id=str(tokenizer_name),
cache_dir=download_dir,
revision=revision,
local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
# Ignore weights - we only need the tokenizer.
ignore_file_pattern=[".*.pt", ".*.safetensors", ".*.bin"],
)
tokenizer_name = tokenizer_path
# Separate model folder from file path for GGUF models
if is_gguf(tokenizer_name):
if check_gguf_file(tokenizer_name):
kwargs["gguf_file"] = Path(tokenizer_name).name
tokenizer_name = Path(tokenizer_name).parent
elif is_remote_gguf(tokenizer_name):
tokenizer_name, quant_type = split_remote_gguf(tokenizer_name)
# Get the HuggingFace Hub path for the GGUF file
gguf_file = get_gguf_file_path_from_hf(
tokenizer_name,
quant_type,
revision=revision,
)
kwargs["gguf_file"] = gguf_file
if "truncation_side" not in kwargs:
if runner_type == "generate" or runner_type == "draft":
kwargs["truncation_side"] = "left"
elif runner_type == "pooling":
kwargs["truncation_side"] = "right"
else:
assert_never(runner_type)
if tokenizer_mode == "slow":
if kwargs.get("use_fast", False):
raise ValueError("Cannot use the fast tokenizer in slow tokenizer mode.")
tokenizer_mode = "hf"
kwargs["use_fast"] = False
# Try to use official Mistral tokenizer if possible
if tokenizer_mode == "auto" and importlib.util.find_spec("mistral_common"):
allow_patterns = ["tekken.json", "tokenizer.model.v*"]
files_list = list_filtered_repo_files(
model_name_or_path=str(tokenizer_name),
allow_patterns=allow_patterns,
revision=revision,
)
if len(files_list) > 0:
tokenizer_mode = "mistral"
# Fallback to HF tokenizer
if tokenizer_mode == "auto":
tokenizer_mode = "hf"
return tokenizer_mode, tokenizer_name, args, kwargs
cached_resolve_tokenizer_args = lru_cache(resolve_tokenizer_args)
def tokenizer_args_from_config(config: "ModelConfig", **kwargs):
return cached_resolve_tokenizer_args(
config.tokenizer,
runner_type=config.runner_type,
tokenizer_mode=config.tokenizer_mode,
revision=config.tokenizer_revision,
trust_remote_code=config.trust_remote_code,
**kwargs,
)
_T = TypeVar("_T", bound=TokenizerLike, default=TokenizerLike)
def get_tokenizer(
tokenizer_name: str | Path,
*args,
tokenizer_cls: type[_T] = TokenizerLike, # type: ignore[assignment]
trust_remote_code: bool = False,
revision: str | None = None,
download_dir: str | None = None,
**kwargs,
) -> _T:
"""Gets a tokenizer for the given model name via HuggingFace or ModelScope."""
tokenizer_mode, tokenizer_name, args, kwargs = cached_resolve_tokenizer_args(
tokenizer_name,
*args,
trust_remote_code=trust_remote_code,
revision=revision,
download_dir=download_dir,
**kwargs,
)
if tokenizer_cls == TokenizerLike:
tokenizer_cls_ = TokenizerRegistry.load_tokenizer_cls(tokenizer_mode)
else:
tokenizer_cls_ = tokenizer_cls
tokenizer = tokenizer_cls_.from_pretrained(tokenizer_name, *args, **kwargs)
if not tokenizer.is_fast:
logger.warning(
"Using a slow tokenizer. This might cause a significant "
"slowdown. Consider using a fast tokenizer instead."
)
return tokenizer # type: ignore
cached_get_tokenizer = lru_cache(get_tokenizer)
def cached_tokenizer_from_config(model_config: "ModelConfig", **kwargs):
if model_config.skip_tokenizer_init:
return None
return cached_get_tokenizer(
model_config.tokenizer,
runner_type=model_config.runner_type,
tokenizer_mode=model_config.tokenizer_mode,
revision=model_config.tokenizer_revision,
trust_remote_code=model_config.trust_remote_code,
**kwargs,
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/tokenizers/detokenizer_utils.py | vllm/tokenizers/detokenizer_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.tokenizers import TokenizerLike
def _replace_none_with_empty(tokens: list[str | None]):
for i, token in enumerate(tokens):
if token is None:
tokens[i] = ""
def _convert_tokens_to_string_with_added_encoders(
tokenizer: TokenizerLike,
output_tokens: list[str],
skip_special_tokens: bool,
spaces_between_special_tokens: bool,
) -> str:
# Adapted from
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/tokenization_utils.py#L921
# NOTE(woosuk): The following code is slow because it runs a for loop over
# the output_tokens. In Python, running a for loop over a list can be slow
# even when the loop body is very simple.
# Performance improvements: avoid repeated attribute and function lookups;
# localize frequently used objects;
sub_texts: list[str] = []
current_sub_text: list[str] = []
convert_tokens_to_string = tokenizer.convert_tokens_to_string
added_vocab_set = set(tokenizer.get_added_vocab())
all_special_tokens = (
set(tokenizer.all_special_tokens) if skip_special_tokens else ()
)
for token in output_tokens:
# Use precomputed set for skip-special check
if token in all_special_tokens:
continue
if token in added_vocab_set:
if current_sub_text:
sub_texts.append(convert_tokens_to_string(current_sub_text))
current_sub_text.clear()
sub_texts.append(token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(convert_tokens_to_string(current_sub_text))
if spaces_between_special_tokens:
return " ".join(sub_texts)
return "".join(sub_texts)
# 5 is an arbitrary value that should work for all
# tokenizers (bigger = more conservative).
INITIAL_INCREMENTAL_DETOKENIZATION_OFFSET = 5
def convert_prompt_ids_to_tokens(
tokenizer: TokenizerLike,
prompt_ids: list[int],
skip_special_tokens: bool = False,
) -> tuple[list[str], int, int]:
"""Converts the prompt ids to tokens and returns the tokens and offsets
for incremental detokenization.
Note that not all tokens are converted to strings. Only the tokens that
are necessary for incremental detokenization are converted to strings.
"""
# We do not need to convert the whole prompt to tokens.
# Offset a little more in case we have special tokens.
new_tokens = tokenizer.convert_ids_to_tokens(
prompt_ids[-INITIAL_INCREMENTAL_DETOKENIZATION_OFFSET - 2 :],
skip_special_tokens=skip_special_tokens,
)
read_offset = len(new_tokens)
prefix_offset = max(read_offset - INITIAL_INCREMENTAL_DETOKENIZATION_OFFSET, 0)
# This is required to guard against out-of-vocab prompt token ids
_replace_none_with_empty(new_tokens) # type: ignore[arg-type]
return new_tokens, prefix_offset, read_offset
def convert_ids_list_to_tokens(
tokenizer: TokenizerLike,
token_ids: list[int],
) -> list[str]:
"""Detokenize the input ids individually.
Args:
tokenizer: tokenizer used by model under test
token_ids: convert these tokens (Python list form)
Returns:
Python list of token string representations
"""
token_str_lst = []
for token_id in token_ids:
# use default skip_special_tokens.
token_str = tokenizer.decode([token_id])
if token_str is None:
token_str = ""
token_str_lst.append(token_str)
return token_str_lst
# Based on
# https://github.com/huggingface/text-generation-inference/blob/v0.9.4/server/text_generation_server/models/model.py#L62C9-L62C15
# under Apache 2.0 license
def detokenize_incrementally(
tokenizer: TokenizerLike,
all_input_ids: list[int],
prev_tokens: list[str] | None,
prefix_offset: int,
read_offset: int,
skip_special_tokens: bool = False,
spaces_between_special_tokens: bool = True,
) -> tuple[list[str], str, int, int]:
"""Detokenizes the input ids incrementally and returns the new tokens
and the new text.
If `prev_tokens` is None, this function will convert the input ids to
tokens and return the tokens and the new text. Otherwise, it will return the
new tokens and the new text.
This function will also return the new prefix offset and the new read
offset to be used in the next iteration.
The offsets are necessary to defeat cleanup algorithms in the decode which
decide to add a space or not depending on the surrounding ids.
Args:
tokenizer: The tokenizer to use.
all_input_ids: The input ids. The last id is the new token id.
prev_tokens: The previous tokens. If None, this function will convert
the input ids to tokens and return the tokens and the new text.
prefix_offset: The prefix offset.
read_offset: The read offset.
skip_special_tokens: Whether to skip special tokens.
spaces_between_special_tokens: Whether to add spaces between special
tokens.
"""
new_token_id = all_input_ids[-1]
# This is the first iteration for this sequence
is_first_iter = prev_tokens is None
if is_first_iter:
(prev_tokens, prefix_offset, read_offset) = convert_prompt_ids_to_tokens(
tokenizer, all_input_ids[:-1], skip_special_tokens=skip_special_tokens
)
assert prev_tokens is not None
# If the new token id is out of bounds, return an empty string.
if 0 <= new_token_id < len(tokenizer):
# Put new_token_id in a list so skip_special_tokens is respected
new_tokens = tokenizer.convert_ids_to_tokens(
[new_token_id], skip_special_tokens=skip_special_tokens
)
if isinstance(new_tokens, str):
new_tokens = [new_tokens]
else:
new_tokens = [""]
output_tokens = prev_tokens + new_tokens
# If this is the first iteration, return all tokens.
if is_first_iter:
new_tokens = output_tokens
# The prefix text is necessary only to defeat cleanup algorithms in
# the decode which decide to add a space or not depending on the
# surrounding ids.
if tokenizer.is_fast or not tokenizer.get_added_vocab():
prefix_text = tokenizer.convert_tokens_to_string(
output_tokens[prefix_offset:read_offset]
)
new_text = tokenizer.convert_tokens_to_string(output_tokens[prefix_offset:])
else:
prefix_text = _convert_tokens_to_string_with_added_encoders(
tokenizer,
output_tokens[prefix_offset:read_offset],
skip_special_tokens=skip_special_tokens,
spaces_between_special_tokens=spaces_between_special_tokens,
)
new_text = _convert_tokens_to_string_with_added_encoders(
tokenizer,
output_tokens[prefix_offset:],
skip_special_tokens=skip_special_tokens,
spaces_between_special_tokens=spaces_between_special_tokens,
)
if len(new_text) <= len(prefix_text) or new_text.endswith("�"):
# utf-8 char at the end means it's a potential unfinished byte sequence
# from byte fallback tokenization.
# If it's in the middle, it's probably a real invalid id generated
# by the model
return new_tokens, "", prefix_offset, read_offset
new_text = new_text[len(prefix_text) :]
return new_tokens, new_text, read_offset, len(output_tokens)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/tokenizers/hf.py | vllm/tokenizers/hf.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import contextlib
import copy
from pathlib import Path
from typing import TypeAlias
from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
from vllm.transformers_utils.config import get_sentence_transformer_tokenizer_config
from .protocol import TokenizerLike
HfTokenizer: TypeAlias = PreTrainedTokenizer | PreTrainedTokenizerFast
def get_cached_tokenizer(tokenizer: HfTokenizer) -> HfTokenizer:
"""
By default, transformers will recompute multiple tokenizer properties
each time they are called, leading to a significant slowdown.
This proxy caches these properties for faster access.
"""
cached_tokenizer = copy.copy(tokenizer)
tokenizer_all_special_ids = tokenizer.all_special_ids
tokenizer_all_special_tokens = tokenizer.all_special_tokens
tokenizer_vocab = tokenizer.get_vocab()
tokenizer_len = len(tokenizer)
max_token_id = max(tokenizer_vocab.values())
# Some tokenizers (e.g., QwenTokenizer) have special tokens that
# are added and included in the implementation of the vocab_size
# property, but not in get_vocab(); if there is an implementation
# of vocab size, we should take the greater value.
if hasattr(tokenizer, "vocab_size"):
with contextlib.suppress(NotImplementedError):
max_token_id = max(max_token_id, tokenizer.vocab_size)
class CachedTokenizer(tokenizer.__class__): # type: ignore
@property
def all_special_ids(self) -> list[int]:
return tokenizer_all_special_ids
@property
def all_special_tokens(self) -> list[str]:
return tokenizer_all_special_tokens
@property
def max_token_id(self) -> int:
return max_token_id
def get_vocab(self) -> dict[str, int]:
return tokenizer_vocab
def __len__(self) -> int:
return tokenizer_len
def __reduce__(self):
return get_cached_tokenizer, (tokenizer,)
CachedTokenizer.__name__ = f"Cached{tokenizer.__class__.__name__}"
cached_tokenizer.__class__ = CachedTokenizer
return cached_tokenizer
class CachedHfTokenizer(TokenizerLike):
@classmethod
def from_pretrained(
cls,
path_or_repo_id: str | Path,
*args,
trust_remote_code: bool = False,
revision: str | None = None,
download_dir: str | None = None,
**kwargs,
) -> HfTokenizer:
try:
tokenizer = AutoTokenizer.from_pretrained(
path_or_repo_id,
*args,
trust_remote_code=trust_remote_code,
revision=revision,
cache_dir=download_dir,
**kwargs,
)
except ValueError as e:
# If the error pertains to the tokenizer class not existing or not
# currently being imported,
# suggest using the --trust-remote-code flag.
if not trust_remote_code and (
"does not exist or is not currently imported." in str(e)
or "requires you to execute the tokenizer file" in str(e)
):
err_msg = (
"Failed to load the tokenizer. If the tokenizer "
"is a custom tokenizer not yet available in the "
"HuggingFace transformers library, consider "
"setting `trust_remote_code=True` in LLM or using "
"the `--trust-remote-code` flag in the CLI."
)
raise RuntimeError(err_msg) from e
else:
raise e
# The special_tokens in tokenizer should also be
# controlled by do_lower_case in encoder_config
encoder_config = get_sentence_transformer_tokenizer_config(
path_or_repo_id, revision
)
if isinstance(encoder_config, dict) and encoder_config.get(
"do_lower_case", False
):
special_tokens_map = {
k: v.lower() for k, v in tokenizer.special_tokens_map.items()
}
tokenizer.add_special_tokens(special_tokens_map)
return get_cached_tokenizer(tokenizer)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/tokenizers/__init__.py | vllm/tokenizers/__init__.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from .protocol import TokenizerLike
from .registry import (
TokenizerRegistry,
cached_get_tokenizer,
cached_tokenizer_from_config,
get_tokenizer,
)
__all__ = [
"TokenizerLike",
"TokenizerRegistry",
"cached_get_tokenizer",
"get_tokenizer",
"cached_tokenizer_from_config",
]
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/tokenizers/protocol.py | vllm/tokenizers/protocol.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from pathlib import Path
from typing import TYPE_CHECKING, Any, Protocol
if TYPE_CHECKING:
from transformers import BatchEncoding
from vllm.entrypoints.chat_utils import ChatCompletionMessageParam
class TokenizerLike(Protocol):
@classmethod
def from_pretrained(
cls,
path_or_repo_id: str | Path,
*args,
trust_remote_code: bool = False,
revision: str | None = None,
download_dir: str | None = None,
**kwargs,
) -> "TokenizerLike":
raise NotImplementedError
def num_special_tokens_to_add(self) -> int:
raise NotImplementedError
@property
def all_special_tokens(self) -> list[str]:
raise NotImplementedError
@property
def all_special_ids(self) -> list[int]:
raise NotImplementedError
@property
def bos_token_id(self) -> int:
raise NotImplementedError
@property
def eos_token_id(self) -> int:
raise NotImplementedError
@property
def pad_token_id(self) -> int:
raise NotImplementedError
@property
def is_fast(self) -> bool:
raise NotImplementedError
@property
def vocab_size(self) -> int:
raise NotImplementedError
@property
def max_token_id(self) -> int:
raise NotImplementedError
@property
def truncation_side(self) -> str:
raise NotImplementedError
def __hash__(self) -> int:
return hash(id(self))
def __len__(self) -> int:
return self.vocab_size
def __call__(
self,
text: str | list[str],
text_pair: str | None = None,
add_special_tokens: bool = True,
truncation: bool = False,
max_length: int | None = None,
) -> "BatchEncoding":
raise NotImplementedError
def get_vocab(self) -> dict[str, int]:
raise NotImplementedError
def get_added_vocab(self) -> dict[str, int]:
raise NotImplementedError
def encode(
self,
text: str,
truncation: bool | None = None,
max_length: int | None = None,
add_special_tokens: bool = True,
) -> list[int]:
raise NotImplementedError
def apply_chat_template(
self,
messages: list["ChatCompletionMessageParam"],
tools: list[dict[str, Any]] | None = None,
**kwargs,
) -> str | list[int]:
raise NotImplementedError
def convert_tokens_to_string(self, tokens: list[str]) -> str:
raise NotImplementedError
def decode(self, ids: list[int] | int, skip_special_tokens: bool = False) -> str:
raise NotImplementedError
def convert_ids_to_tokens(
self,
ids: list[int],
skip_special_tokens: bool = False,
) -> list[str]:
raise NotImplementedError
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/tokenizers/deepseek_v32.py | vllm/tokenizers/deepseek_v32.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from pathlib import Path
from typing import Any
from transformers import BatchEncoding
from vllm.entrypoints.chat_utils import ChatCompletionMessageParam
from .deepseek_v32_encoding import encode_messages
from .hf import CachedHfTokenizer
from .protocol import TokenizerLike
class DeepseekV32Tokenizer(CachedHfTokenizer):
@classmethod
def from_pretrained(
cls,
path_or_repo_id: str | Path,
*args,
trust_remote_code: bool = False,
revision: str | None = None,
download_dir: str | None = None,
**kwargs,
) -> "TokenizerLike":
tokenizer = super().from_pretrained(
path_or_repo_id,
*args,
trust_remote_code=trust_remote_code,
revision=revision,
download_dir=download_dir,
**kwargs,
)
return DeepseekV32Tokenizer(tokenizer)
def __init__(self, tokenizer: TokenizerLike) -> None:
super().__init__()
self.tokenizer = tokenizer
self.name_or_path = getattr(tokenizer, "name_or_path", "")
self._added_vocab = self.tokenizer.get_added_vocab()
self._added_vocab_size = len(self._added_vocab)
def apply_chat_template(
self,
messages: list["ChatCompletionMessageParam"],
tools: list[dict[str, Any]] | None = None,
**kwargs,
) -> str | list[int]:
thinking = kwargs.get("thinking", False)
enable_thinking = kwargs.get("enable_thinking", False)
thinking = thinking or enable_thinking
thinking_mode = "thinking"
if not thinking:
thinking_mode = "chat"
conversation = kwargs.get("conversation", messages)
messages = conversation.copy()
if tools is not None and len(tools) > 0:
messages.insert(0, {"role": "system"})
messages[0]["tools"] = tools # type: ignore[typeddict-unknown-key]
# Historical reasoning content is dropped when a new user message is introduced
drop_thinking = messages[-1]["role"] == "user"
encode_config = dict(thinking_mode=thinking_mode, drop_thinking=drop_thinking)
prompt_str = encode_messages(messages, **encode_config) # type: ignore
if kwargs.get("tokenize", True):
tokenizer_kwargs = {
k: kwargs[k] for k in ("truncation", "max_length") if k in kwargs
}
return self.encode(
prompt_str,
add_special_tokens=False,
**tokenizer_kwargs,
)
return prompt_str
def num_special_tokens_to_add(self) -> int:
return len(self.encode(""))
@property
def all_special_tokens(self) -> list[str]:
return self.tokenizer.all_special_tokens
@property
def all_special_ids(self) -> list[int]:
return self.tokenizer.all_special_ids
@property
def bos_token_id(self) -> int:
return self.tokenizer.bos_token_id
@property
def eos_token_id(self) -> int:
return self.tokenizer.eos_token_id
@property
def pad_token_id(self) -> int:
return self.tokenizer.pad_token_id
@property
def is_fast(self) -> bool:
return self.tokenizer.is_fast
@property
def vocab_size(self) -> int:
return self.tokenizer.vocab_size
@property
def max_token_id(self) -> int:
return self.tokenizer.max_token_id
@property
def truncation_side(self) -> str:
return self.tokenizer.truncation_side
def __hash__(self) -> int:
return hash(id(self))
def __len__(self) -> int:
# </think> is an added token in DeepseekV32 tokenizer
return self.vocab_size + self._added_vocab_size
def __call__(
self,
text: str | list[str],
text_pair: str | None = None,
add_special_tokens: bool = True,
truncation: bool = False,
max_length: int | None = None,
) -> "BatchEncoding":
return self.tokenizer(
text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
truncation=truncation,
max_length=max_length,
)
def get_vocab(self) -> dict[str, int]:
return self.tokenizer.get_vocab()
def get_added_vocab(self) -> dict[str, int]:
return self._added_vocab.copy()
def encode(
self,
text: str,
truncation: bool | None = None,
max_length: int | None = None,
add_special_tokens: bool = True,
) -> list[int]:
return self.tokenizer.encode(
text,
truncation=truncation,
max_length=max_length,
add_special_tokens=add_special_tokens,
)
def convert_tokens_to_string(self, tokens: list[str]) -> str:
return self.tokenizer.convert_tokens_to_string(tokens)
def decode(self, ids: list[int] | int, skip_special_tokens: bool = False) -> str:
return self.tokenizer.decode(ids, skip_special_tokens=skip_special_tokens)
def convert_ids_to_tokens(
self,
ids: list[int],
skip_special_tokens: bool = False,
) -> list[str]:
return self.tokenizer.convert_ids_to_tokens(
ids, skip_special_tokens=skip_special_tokens
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/tokenizers/mistral.py | vllm/tokenizers/mistral.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from pathlib import Path
from typing import TYPE_CHECKING, Any, cast
from mistral_common.protocol.instruct.request import (
ChatCompletionRequest as MistralChatCompletionRequest,
)
from mistral_common.protocol.instruct.tool_calls import Function, Tool
from mistral_common.protocol.instruct.validator import ValidationMode
from mistral_common.tokens.tokenizers.base import (
SpecialTokenPolicy,
SpecialTokens,
)
from mistral_common.tokens.tokenizers.instruct import InstructTokenizerV13
from mistral_common.tokens.tokenizers.sentencepiece import (
SentencePieceTokenizer,
)
from mistral_common.tokens.tokenizers.tekken import Tekkenizer
from vllm.entrypoints.chat_utils import ChatCompletionMessageParam
from vllm.entrypoints.openai.protocol import ChatCompletionRequest
from vllm.logger import init_logger
from .protocol import TokenizerLike
if TYPE_CHECKING:
from transformers import BatchEncoding
try:
# Transformers v5
from transformers.tokenization_mistral_common import MistralCommonBackend
except ImportError:
# Transformers v4
from transformers.tokenization_mistral_common import (
MistralCommonTokenizer as MistralCommonBackend,
)
logger = init_logger(__name__)
def maybe_serialize_tool_calls(request: "MistralChatCompletionRequest"):
# SEE: https://github.com/vllm-project/vllm/pull/9951
# Credits go to: @gcalmettes
# NOTE: There is currently a bug in pydantic where attributes
# declared as iterables are replaced in in the instances by
# pydantic-core ValidatorIterator instance. In particular, this
# affects tool_calls defined in ChatCompletionAssistantMessageParam
# model:
# see:
# - https://github.com/pydantic/pydantic/issues/9467
# As a result, tool_calls from assistant messages are never
# deserialized in the request object if the tool_calls iterator is
# not consumed. This affect messages passed to the MistralTokenizer
# since no chat template is applied and therefore the tools_calls
# iterator is not directly consumed.
# Issue is tracked on Pydantic side, with resolution planned for
# v2.11 release. In the meantime, the official workaround is to
# consume the iterator so the tool_calls are correctly deserialized
# in the OpenAI ChatCompletionAssistantMessageParam object
# https://github.com/pydantic/pydantic/issues/9467#issuecomment-2442097291 # noqa: E501
# Official Pydantic Issues:
# - https://github.com/pydantic/pydantic/issues/9541
# TODO: remove when pydantic v2.11 is released
for i, message in enumerate(request.messages):
if message.get("role") == "assistant":
tool_calls_validator = message.get("tool_calls", ().__iter__())
validated_tool_calls = []
while True:
try:
tool_call = next(tool_calls_validator) # type: ignore
validated_tool_calls.append(tool_call)
except StopIteration:
break
request.messages[i]["tool_calls"] = validated_tool_calls
def truncate_tool_call_ids(request: "MistralChatCompletionRequest"):
"""Truncates tool call IDs for Mistral's ID requirements."""
for i, message in enumerate(request.messages):
if message.get("role") == "assistant":
tool_calls = message.get("tool_calls", [])
for tool_call in tool_calls:
if len(tool_call["id"]) > 9:
logger.warning(
"Truncating tool call ID: %s to %s",
tool_call["id"],
tool_call["id"][-9:],
)
tool_call["id"] = tool_call["id"][-9:]
request.messages[i]["tool_calls"] = tool_calls
elif message.get("role") in {"tool_results", "tool"}:
if "tool_call_id" in message:
tool_call_id = message["tool_call_id"]
if len(tool_call_id) > 9:
logger.warning(
"Truncating tool_call_id: %s to %s",
tool_call_id,
tool_call_id[-9:],
)
tool_call_id = tool_call_id[-9:]
request.messages[i]["tool_call_id"] = tool_call_id
def _prepare_apply_chat_template_tools_and_messages(
messages: list["ChatCompletionMessageParam"],
tools: list[dict[str, Any]] | None = None,
continue_final_message: bool = False,
add_generation_prompt: bool = False,
) -> tuple[list["ChatCompletionMessageParam"], list[dict[str, Any]] | None]:
if add_generation_prompt and continue_final_message:
raise ValueError(
"Cannot set both `add_generation_prompt` and "
"`continue_final_message` to True."
)
last_message = cast(dict[str, Any], messages[-1])
# add_generation_prompt is directly handled by the tokenizer but we
# check if the user is trying to use it with a final assistant message
# which is probably not what they want.
# If add_generation_prompt is False, we don't need to check anything.
if add_generation_prompt and last_message["role"] == "assistant":
raise ValueError(
"Cannot set `add_generation_prompt` to True when "
"the last message is from the assistant. Consider "
"using `continue_final_message` instead."
)
if continue_final_message and last_message["role"] != "assistant":
raise ValueError(
"Cannot set `continue_final_message` to True when "
"the last message is not from the assistant."
)
# mistral-common requires AssistantMessage content to be string [1].
#
# [1]: https://github.com/mistralai/mistral-common/blob/f4a06998b75ed78bbf5aaf569590b772ea26c9f6/src/mistral_common/protocol/instruct/messages.py#L80
for message in messages:
# Remove reasoning as unsupported by Mistral
_ = message.pop("reasoning", None) # type: ignore
# The Mistral client, in comparison to the OpenAI client, requires the
# "parameters" dict and the "description" string to be present
# even if they are empty.
if tools:
for function in [
tool["function"] for tool in tools if tool["type"] == "function"
]:
if function.get("parameters") is None:
function["parameters"] = {}
if function.get("description") is None:
function["description"] = ""
# We filter not supported arguments to avoid throwing an error.
# TODO(juliendenize): remove this once OpenAI API is better supported by
# `mistral-common`.
tools_fields = set(Tool.model_fields.keys())
function_fields = set(Function.model_fields.keys())
for tool in tools:
tool_keys = list(tool.keys())
for tool_key in tool_keys:
if tool_key not in tools_fields:
tool.pop(tool_key)
logger.warning_once(
f"'{tool_key}' is not supported by mistral-common for tools. "
"It has been poped from the tool definition."
)
if tool["type"] == "function":
function_keys = list(tool["function"].keys())
for function_key in function_keys:
if function_key not in function_fields:
tool["function"].pop(function_key)
logger.warning_once(
f"'{function_key}' is not supported by mistral-common "
"for function tools. It has been poped from the "
"function definition."
)
else:
raise ValueError("mistral-common only supports function tools.")
return messages, tools
def validate_request_params(request: "ChatCompletionRequest"):
if request.chat_template is not None or request.chat_template_kwargs is not None:
raise ValueError("chat_template is not supported for Mistral tokenizers.")
def _tekken_token_to_id(tokenizer: "Tekkenizer", t: str | bytes) -> int:
assert isinstance(tokenizer, Tekkenizer), type(tokenizer)
t_bytes = t.encode("utf-8") if not isinstance(t, bytes) else t
shift = tokenizer.num_special_tokens
try:
return shift + tokenizer._tekken_token2id_nospecial[t_bytes]
except KeyError:
t_str = t_bytes.decode("utf-8")
if t_str in tokenizer._special_tokens_reverse_vocab:
return tokenizer._special_tokens_reverse_vocab[t_str]
logger.warning(
"Failed to convert token %s to id, replacing with <unk>", t_bytes
)
return tokenizer.unk_id
class MistralTokenizer(TokenizerLike):
@classmethod
def from_pretrained(
cls,
path_or_repo_id: str | Path,
*args,
trust_remote_code: bool = False,
revision: str | None = None,
download_dir: str | None = None,
**kwargs,
) -> "MistralTokenizer":
try:
# Transformers v5
from transformers.tokenization_mistral_common import MistralCommonBackend
except ImportError:
# Transformers v4
from transformers.tokenization_mistral_common import (
MistralCommonTokenizer as MistralCommonBackend,
)
tokenizer = MistralCommonBackend.from_pretrained(
path_or_repo_id,
*args,
mode=ValidationMode.test,
cache_dir=download_dir,
revision="main" if revision is None else revision,
**kwargs,
)
return cls(tokenizer)
def __init__(self, tokenizer: "MistralCommonBackend") -> None:
super().__init__()
self.transformers_tokenizer = tokenizer
self.mistral = tokenizer.tokenizer
self.instruct = self.mistral.instruct_tokenizer
self.tokenizer = self.instruct.tokenizer
mode = self.mistral._chat_completion_request_validator._mode
if mode != ValidationMode.test:
raise ValueError(
"Mistral tokenizer must be in test mode. Make sure to "
"set `mode='ValidationMode.test'` when creating the "
"Mistral tokenizer."
)
_mistral_version_str = str(self.tokenizer.version.value)
self.version: int = int(_mistral_version_str.split("v")[-1])
self.is_tekken = isinstance(self.tokenizer, Tekkenizer)
self.is_spm = isinstance(self.tokenizer, SentencePieceTokenizer)
if not (self.is_tekken or self.is_spm):
raise TypeError(f"Unsupported tokenizer: {type(self.tokenizer)}")
# Reverse order to ensure that the lowest token id is kept.
self._vocab_dict = {
self.convert_ids_to_tokens([i], skip_special_tokens=False)[0]: i
for i in range(self.vocab_size - 1, -1, -1)
}
# Sort the dict for convenience
self._vocab_dict = dict(sorted(self._vocab_dict.items(), key=lambda x: x[1]))
# Vocab sorted by token id.
self._vocab = self.tokenizer.vocab()
self._max_token_id = self.vocab_size - 1
# Cache special tokens for faster access.
self._special_token_ids = self._get_special_token_ids()
self._special_token_ids_set = set(self._special_token_ids)
self._special_tokens = self._get_special_tokens(self._special_token_ids)
self._special_tokens_set = set(self._special_tokens)
def _get_special_token_ids(self) -> list[int]:
return [i for i in range(len(self._vocab)) if self.tokenizer.is_special(i)]
def _get_special_tokens(self, all_special_ids: list[int]) -> list[str]:
return [
self.tokenizer.decode([i], special_token_policy=SpecialTokenPolicy.KEEP)
for i in all_special_ids
]
def num_special_tokens_to_add(self) -> int:
return len(self.encode(""))
# the following attributes are set to fit vLLM's design and are used
# by the structured output backends.
@property
def all_special_tokens(self) -> list[str]:
return self._special_tokens
@property
def all_special_ids(self) -> list[int]:
return self._special_token_ids
@property
def bos_token_id(self) -> int:
return self.tokenizer.bos_id
@property
def eos_token_id(self) -> int:
return self.tokenizer.eos_id
@property
def pad_token_id(self) -> int:
return self.tokenizer.pad_id
@property
def is_fast(self) -> bool:
return True
@property
def vocab_size(self) -> int:
return self.transformers_tokenizer.vocab_size
@property
def max_token_id(self) -> int:
return self._max_token_id
@property
def truncation_side(self) -> str:
return self.transformers_tokenizer.truncation_side
def _is_special_token_id(self, token_id: int) -> bool:
return token_id in self._special_token_ids_set
def __hash__(self) -> int:
return hash(id(self))
def __len__(self) -> int:
return self.vocab_size
def __call__(
self,
text: str | list[str],
text_pair: str | None = None,
add_special_tokens: bool = True,
truncation: bool = False,
max_length: int | None = None,
) -> "BatchEncoding":
if text_pair is not None:
raise ValueError(
"`text_pair` is not supported by `MistralTokenizer.__call__`."
)
encoded = self.transformers_tokenizer(
text=text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
truncation=truncation,
max_length=max_length,
)
# TODO(juliendenize): once https://github.com/huggingface/transformers/pull/41962
# is in, revert to only call self.transformers_tokenizer(...).
# Hack to fix wrongly added eos token, when fix will be supported the condition
# below will be False even before the revert is done.
if encoded["input_ids"] and encoded["input_ids"][-1] == self.eos_token_id:
encoded["input_ids"].pop(-1)
if attention_mask := encoded.get("attention_mask"):
attention_mask.pop(-1)
return encoded
@property
def vocab(self) -> list[str]:
return self._vocab
def get_vocab(self) -> dict[str, int]:
return self._vocab_dict
def get_added_vocab(self) -> dict[str, int]:
# Mistral tokenizers have no added vocabulary
return {}
def encode(
self,
text: str,
truncation: bool | None = None,
max_length: int | None = None,
add_special_tokens: bool = True,
) -> list[int]:
# TODO(juliendenize): once https://github.com/huggingface/transformers/pull/41962
# is in, directly call self.transformers_tokenizer.encode(...).
encoded = self.tokenizer.encode(text, bos=add_special_tokens, eos=False)
if truncation is not False and max_length is not None:
return encoded[:max_length]
else:
return encoded
def apply_chat_template(
self,
messages: list["ChatCompletionMessageParam"],
tools: list[dict[str, Any]] | None = None,
**kwargs,
) -> list[int]:
add_generation_prompt = kwargs.pop("add_generation_prompt", False)
continue_final_message = kwargs.get("continue_final_message", False)
tokenize = kwargs.get("tokenize", True)
padding = kwargs.get("padding", False)
truncation = kwargs.get("truncation", False)
max_length = kwargs.get("max_length")
messages, tools = _prepare_apply_chat_template_tools_and_messages(
messages, tools, continue_final_message, add_generation_prompt
)
return self.transformers_tokenizer.apply_chat_template(
conversation=messages,
tools=tools,
continue_final_message=continue_final_message,
tokenize=tokenize,
padding=padding,
truncation=truncation,
max_length=max_length,
return_tensors=None,
return_dict=False,
)
def decode(self, ids: list[int] | int, skip_special_tokens: bool = False) -> str:
# TODO(juliendenize): once https://github.com/huggingface/transformers/pull/41962
# is in, directly call self.transformers_tokenizer.decode(...).
if isinstance(ids, int):
ids = [ids]
return self.transformers_tokenizer.decode(
ids, skip_special_tokens=skip_special_tokens
)
def batch_decode(
self, ids: list[list[int]] | list[int], skip_special_tokens: bool = False
) -> str:
return self.transformers_tokenizer.batch_decode(
ids, skip_special_tokens=skip_special_tokens
)
def convert_tokens_to_string(self, tokens: list[str]) -> str:
to_decode_special_tokens = {SpecialTokens.tool_calls}
if self.is_tekken:
assert isinstance(self.tokenizer, Tekkenizer), type(self.tokenizer)
tokens = [
t
for t in tokens
if (t in to_decode_special_tokens or t not in self._special_tokens_set)
]
if any(isinstance(t, bytes) for t in tokens):
# we need to encode and decode all tokens again
ids = [_tekken_token_to_id(self.tokenizer, t) for t in tokens]
# We filtered unwanted special tokens before
# so we can decode the rest.
decoded = self.tokenizer.decode(ids, SpecialTokenPolicy.KEEP)
else:
decoded = "".join(tokens)
else:
# make sure certain special tokens like Tool calls are
# not decoded
assert isinstance(self.tokenizer, SentencePieceTokenizer), type(
self.tokenizer
)
regular_tokens: list[str] = []
decoded_list: list[str] = []
decoded = ""
for token in tokens:
if token in to_decode_special_tokens:
if regular_tokens:
decoded_list.append(
self.tokenizer.decode(
regular_tokens, SpecialTokenPolicy.IGNORE
)
)
regular_tokens = []
decoded_list.append(token)
else:
regular_tokens.append(token)
if regular_tokens:
decoded_list.append(
self.tokenizer.decode(regular_tokens, SpecialTokenPolicy.IGNORE)
)
decoded = "".join(decoded_list)
return decoded
def convert_ids_to_tokens(
self,
ids: list[int],
skip_special_tokens: bool = False,
) -> list[str]:
if not skip_special_tokens:
return [self.tokenizer.id_to_piece(token_id) for token_id in ids]
non_skip_special_tokens_ids = {
self.tokenizer.get_control_token(SpecialTokens.tool_calls),
}
if isinstance(self.instruct, InstructTokenizerV13):
if self.instruct.BEGIN_THINK:
non_skip_special_tokens_ids.add(self.instruct.BEGIN_THINK)
if self.instruct.END_THINK:
non_skip_special_tokens_ids.add(self.instruct.END_THINK)
ids_kept = [
i
for i in ids
if i in non_skip_special_tokens_ids or not self._is_special_token_id(i)
]
# We filtered unwanted special tokens so we can decode the rest.
tokens = [self.tokenizer.id_to_piece(token_id) for token_id in ids_kept]
if any("�" in t for t in tokens) and self.is_tekken:
# if a decoded token contains the replacement character, then the
# token has an incomplete UTF-8 character so we must use bytes
# See: https://github.com/vllm-project/vllm/pull/8640
# https://github.com/vllm-project/vllm/pull/9625
# if underlying tokenizer is sentencepiece, we just add "�".
# We filtered unwanted special tokens so we can decode the rest.
tokens = [
self.tokenizer.id_to_byte_piece(token_id, SpecialTokenPolicy.KEEP)
if token_id not in self._special_token_ids_set
else self.tokenizer.decode([token_id], SpecialTokenPolicy.KEEP)
for token_id in ids_kept
]
return tokens
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/peft_helper.py | vllm/lora/peft_helper.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Adapted from: https://github.com/huggingface/peft/blob/main/src/peft/tuners/lora/config.py
import json
import math
import os
from dataclasses import MISSING, dataclass, field, fields
from typing import Literal
from vllm.config.lora import LoRAConfig
from vllm.logger import init_logger
from vllm.model_executor.model_loader.tensorizer import TensorizerConfig
logger = init_logger(__name__)
@dataclass
class PEFTHelper:
"""
A helper class for PEFT configurations, specifically designed for LoRA.
This class handles configuration validation, compatibility checks for
various LoRA implementations.
"""
# Required fields
r: int
lora_alpha: int
target_modules: list[str] | str
bias: Literal["none"] = field(default="none")
modules_to_save: list[str] | None = field(default=None)
# True to use Rank-Stabilized LoRA (rsLoRA, see: https://arxiv.org/abs/2312.03732)
use_rslora: bool = field(default=False)
# True to use Weight-Decomposed Low-Rank Adaptation (DoRA, see: https://arxiv.org/abs/2402.09353)
use_dora: bool = field(default=False)
# Extra vllm field, start with 'vllm_' to avoid conflict
vllm_lora_scaling_factor: float = field(default=1.0)
vllm_max_position_embeddings: int | None = field(default=False)
def _validate_features(self) -> list[str]:
"""
Check if there are any unsupported LoRA features.
"""
error_msg = []
if self.modules_to_save:
error_msg.append("vLLM only supports modules_to_save being None.")
if self.use_dora:
error_msg.append("vLLM does not yet support DoRA.")
return error_msg
def __post_init__(self):
if self.use_rslora:
logger.info_once("Loading LoRA weights trained with rsLoRA.")
self.vllm_lora_scaling_factor = self.lora_alpha / math.sqrt(self.r)
else:
self.vllm_lora_scaling_factor = self.lora_alpha / self.r
@classmethod
def from_dict(cls, config_dict: dict) -> "PEFTHelper":
# Get all field information from the class
class_fields = {f.name: f for f in fields(cls)}
# Check for required fields
required_fields = {
name
for name, f in class_fields.items()
if f.default is MISSING and f.default_factory is MISSING
}
# Identify any missing required fields
missing_fields = required_fields - set(config_dict.keys())
if missing_fields:
raise ValueError(f"Missing required configuration fields: {missing_fields}")
# Filter out fields that aren't defined in the class
filtered_dict = {k: v for k, v in config_dict.items() if k in class_fields}
return cls(**filtered_dict)
@classmethod
def from_local_dir(
cls,
lora_path: str,
max_position_embeddings: int | None,
tensorizer_config_dict: dict | None = None,
) -> "PEFTHelper":
lora_config_path = os.path.join(lora_path, "adapter_config.json")
if tensorizer_config_dict:
tensorizer_config = TensorizerConfig(**tensorizer_config_dict)
tensorizer_args = tensorizer_config._construct_tensorizer_args()
from tensorizer.stream_io import open_stream
lora_config_path = os.path.join(
tensorizer_config.tensorizer_dir, "adapter_config.json"
)
with open_stream(
lora_config_path, mode="rb", **tensorizer_args.stream_kwargs
) as f:
config = json.load(f)
logger.info(
"Successfully deserialized LoRA config from %s",
tensorizer_config.tensorizer_dir,
)
else:
with open(lora_config_path) as f:
config = json.load(f)
config["vllm_max_position_embeddings"] = max_position_embeddings
return cls.from_dict(config)
def validate_legal(self, lora_config: LoRAConfig) -> None:
"""
Validates the LoRA configuration settings against application
constraints and requirements.
"""
error_msg = self._validate_features()
if self.r > lora_config.max_lora_rank:
error_msg.append(
f"LoRA rank {self.r} is greater than max_lora_rank"
f" {lora_config.max_lora_rank}."
)
if self.bias != "none":
error_msg.append("Adapter bias is not supported.")
if error_msg:
raise ValueError(f"{' '.join(error_msg)}")
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/worker_manager.py | vllm/lora/worker_manager.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from contextlib import contextmanager
from typing import Any, Literal
import torch
from vllm.config import VllmConfig
from vllm.logger import init_logger
from vllm.lora.lora_model import LoRAModel
from vllm.lora.model_manager import (
LoRAModelManager,
LRUCacheLoRAModelManager,
create_lora_manager,
)
from vllm.lora.peft_helper import PEFTHelper
from vllm.lora.request import LoRARequest
from vllm.lora.utils import get_adapter_absolute_path
logger = init_logger(__name__)
class WorkerLoRAManager:
"""WorkerLoRAManager that manages LoRA models on the worker side.
Every request, the requested LoRAs will be loaded (unless they are already
loaded), and every other LoRA will be unloaded."""
_manager_cls: type[LoRAModelManager] = LoRAModelManager
def __init__(
self,
vllm_config: VllmConfig,
device: torch.device,
embedding_modules: dict[str, str],
lora_model_cls: type[LoRAModel] = LoRAModel,
):
self._lora_model_cls = lora_model_cls
self.embedding_modules = embedding_modules
self._cached_dummy_lora: None | Literal[False] | LoRAModel = False
self.max_num_seqs = vllm_config.scheduler_config.max_num_seqs
self.max_num_batched_tokens = (
vllm_config.scheduler_config.max_num_batched_tokens
)
self.vocab_size = vllm_config.model_config.get_vocab_size()
self.lora_config = vllm_config.lora_config
# Use get_text_config() in case of multimodal models
text_config = vllm_config.model_config.hf_config.get_text_config()
self.max_position_embeddings = text_config.max_position_embeddings
self.device = device
# Lazily initialized by create_lora_manager.
self._adapter_manager: LoRAModelManager
@contextmanager
def dummy_lora_cache(self):
"""Use this context manager to reuse the dummy lora model
to avoid creating it repeatedly."""
self._cached_dummy_lora = None
yield
self._cached_dummy_lora = False
@property
def is_enabled(self) -> bool:
return True
def create_lora_manager(
self,
model: torch.nn.Module,
vllm_config: VllmConfig | None = None,
) -> Any:
lora_manager = create_lora_manager(
model,
max_num_seqs=self.max_num_seqs,
max_num_batched_tokens=self.max_num_batched_tokens,
vocab_size=self.vocab_size,
lora_config=self.lora_config,
device=self.device,
lora_manager_cls=self._manager_cls,
vllm_config=vllm_config,
)
self._adapter_manager = lora_manager
return lora_manager.model
def _load_adapter(self, lora_request: LoRARequest) -> LoRAModel:
try:
supported_lora_modules = self._adapter_manager.supported_lora_modules
packed_modules_mapping = self._adapter_manager.packed_modules_mapping
expected_lora_lst: list[str] = []
for module in supported_lora_modules:
if module in packed_modules_mapping:
expected_lora_lst.extend(packed_modules_mapping[module])
else:
expected_lora_lst.append(module)
if module == "experts":
expected_lora_lst.append(module)
expected_lora_modules = set(expected_lora_lst)
lora_path = get_adapter_absolute_path(lora_request.lora_path)
peft_helper = PEFTHelper.from_local_dir(
lora_path,
self.max_position_embeddings,
lora_request.tensorizer_config_dict,
)
# Validates the LoRA configuration against requirements before
# loading weights, throwing an exception if validation fails.
peft_helper.validate_legal(self.lora_config)
# For some models like Qwen2VL, we need to use hf_to_vllm_mapper
# to ensure correct loading of lora weights.
model = self._adapter_manager.model
hf_to_vllm_mapper = getattr(model, "hf_to_vllm_mapper", None)
lora = self._lora_model_cls.from_local_checkpoint(
lora_path,
expected_lora_modules,
peft_helper=peft_helper,
lora_model_id=lora_request.lora_int_id,
device="cpu",
dtype=self.lora_config.lora_dtype,
model_vocab_size=self.vocab_size,
tensorizer_config_dict=lora_request.tensorizer_config_dict,
weights_mapper=hf_to_vllm_mapper,
)
except FileNotFoundError as e:
# FileNotFoundError should be raised if both
# - No adapter found to download from huggingface (or in
# offline mode)
# - No local adapter files found at `lora_request.lora_path`
# For NotFoundError
raise ValueError(
f"Loading lora {lora_request.lora_name} failed: No adapter "
f"found for {lora_request.lora_path}"
) from e
except Exception as e:
# For BadRequestError
raise e
return lora
def add_dummy_lora(self, lora_request: LoRARequest, rank: int) -> bool:
if lora_request.lora_int_id in self.list_adapters():
return False
if isinstance(self._cached_dummy_lora, LoRAModel):
dummy_lora = self._cached_dummy_lora.clone(lora_request.lora_int_id)
else:
dummy_lora = self._adapter_manager.create_dummy_lora(
lora_request.lora_int_id, rank, self.embedding_modules
)
if self._cached_dummy_lora is None:
self._cached_dummy_lora = dummy_lora
return self._adapter_manager.add_adapter(dummy_lora)
def pin_adapter(self, adapter_id: int) -> bool:
return self._adapter_manager.pin_adapter(adapter_id)
def set_active_adapters(self, requests: set[Any], mapping: Any | None) -> None:
self._apply_adapters(requests)
if mapping is not None:
self._adapter_manager.set_adapter_mapping(mapping)
def supports_tower_connector_lora(self) -> bool:
return (
self._adapter_manager.supports_mm
and self._adapter_manager.supports_tower_connector_lora
)
def _apply_adapters(self, adapter_requests: set[Any]) -> None:
existing_adapters = self.list_adapters()
models_map = {
adapter_request.adapter_id: adapter_request
for adapter_request in adapter_requests
if adapter_request
}
if len(models_map) > self._adapter_manager.adapter_slots:
raise RuntimeError(
f"Number of requested models ({len(models_map)}) is greater "
"than the number of GPU model slots "
f"({self._adapter_manager.adapter_slots})."
)
requested_ids = set(models_map)
for adapter_id in existing_adapters - requested_ids:
self.remove_adapter(adapter_id)
for adapter_id in requested_ids - existing_adapters:
self.add_adapter(models_map[adapter_id])
def add_adapter(self, adapter_request: Any) -> bool:
if adapter_request.adapter_id in self.list_adapters():
return False
loaded_adapter = self._load_adapter(adapter_request)
loaded = self._adapter_manager.add_adapter(loaded_adapter)
self._adapter_manager.activate_adapter(loaded_adapter.id)
return loaded
def remove_adapter(self, adapter_id: int) -> bool:
return self._adapter_manager.remove_adapter(adapter_id)
def remove_all_adapters(self):
self._adapter_manager.remove_all_adapters()
def list_adapters(self) -> set[int]:
return set(self._adapter_manager.list_adapters())
class LRUCacheWorkerLoRAManager(WorkerLoRAManager):
"""WorkerLoRAManager that manages LoRA models on the worker side.
Uses an LRU Cache. Every request, the requested LoRAs will be loaded
(unless they are already loaded) and least recently used LoRAs will
be unloaded if the cache is above capacity."""
_manager_cls: type[LRUCacheLoRAModelManager] = LRUCacheLoRAModelManager
def create_lora_manager(
self,
model: torch.nn.Module,
vllm_config: VllmConfig | None = None,
) -> Any:
lora_manager = create_lora_manager(
model,
lora_manager_cls=self._manager_cls,
max_num_seqs=self.max_num_seqs,
vocab_size=self.vocab_size,
lora_config=self.lora_config,
device=self.device,
max_num_batched_tokens=self.max_num_batched_tokens,
vllm_config=vllm_config,
)
self._adapter_manager = lora_manager
return lora_manager.model
def _apply_adapters(self, lora_requests: set[LoRARequest]) -> None:
loras_map = {
lora_request.lora_int_id: lora_request
for lora_request in lora_requests
if lora_request
}
if len(loras_map) > self._adapter_manager.lora_slots:
raise RuntimeError(
f"Number of requested LoRAs ({len(loras_map)}) is greater "
"than the number of GPU LoRA slots "
f"({self._adapter_manager.lora_slots})."
)
for lora in loras_map.values():
self.add_adapter(lora)
def add_adapter(self, lora_request: LoRARequest) -> bool:
# Note that this method is not thread-safe. It may be invoked multiple
# times for the same adapter when using multiple API servers.
# This is ok because it's currently only called from
# the single-threaded core engine loop.
if lora_request.lora_int_id not in self.list_adapters():
# Load the new adapter first to ensure it is actually valid, before
# evicting any existing adapters.
# This may cause the # of loaded lora adapters to very temporarily
# exceed `--max-cpu-loras`.
lora = self._load_adapter(lora_request)
# Loading succeeded, now check if we will exceed cache capacity and
# evict if the oldest adapter if so
if len(self._adapter_manager) + 1 > self._adapter_manager.capacity:
assert isinstance(self._adapter_manager, LRUCacheLoRAModelManager)
self._adapter_manager.remove_oldest_adapter()
# Then add the new adapter to the cache
loaded = self._adapter_manager.add_adapter(lora)
else:
# If the lora is already loaded, just touch it to
# update its position in the caches
loaded = (
self._adapter_manager.get_adapter(lora_request.lora_int_id) is not None
)
self._adapter_manager.activate_adapter(lora_request.lora_int_id)
return loaded
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/resolver.py | vllm/lora/resolver.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from abc import ABC, abstractmethod
from collections.abc import Set
from dataclasses import dataclass, field
from vllm.logger import init_logger
from vllm.lora.request import LoRARequest
logger = init_logger(__name__)
class LoRAResolver(ABC):
"""Base class for LoRA adapter resolvers.
This class defines the interface for resolving and fetching LoRA adapters.
Implementations of this class should handle the logic for locating and
downloading LoRA adapters from various sources (e.g. S3, cloud storage,
etc.).
"""
@abstractmethod
async def resolve_lora(
self, base_model_name: str, lora_name: str
) -> LoRARequest | None:
"""Abstract method to resolve and fetch a LoRA model adapter.
Implements logic to locate and download LoRA adapter based on the name.
Implementations might fetch from a blob storage or other sources.
Args:
base_model_name: The name/identifier of the base model to resolve.
lora_name: The name/identifier of the LoRA model to resolve.
Returns:
Optional[LoRARequest]: The resolved LoRA model information, or None
if the LoRA model cannot be found.
"""
pass
@dataclass
class _LoRAResolverRegistry:
resolvers: dict[str, LoRAResolver] = field(default_factory=dict)
def get_supported_resolvers(self) -> Set[str]:
"""Get all registered resolver names."""
return self.resolvers.keys()
def register_resolver(
self,
resolver_name: str,
resolver: LoRAResolver,
) -> None:
"""Register a LoRA resolver.
Args:
resolver_name: Name to register the resolver under.
resolver: The LoRA resolver instance to register.
"""
if resolver_name in self.resolvers:
logger.warning(
"LoRA resolver %s is already registered, and will be "
"overwritten by the new resolver instance %s.",
resolver_name,
resolver,
)
self.resolvers[resolver_name] = resolver
def get_resolver(self, resolver_name: str) -> LoRAResolver:
"""Get a registered resolver instance by name.
Args:
resolver_name: Name of the resolver to get.
Returns:
The resolver instance.
Raises:
KeyError: If the resolver is not found in the registry.
"""
if resolver_name not in self.resolvers:
raise KeyError(
f"LoRA resolver '{resolver_name}' not found. "
f"Available resolvers: {list(self.resolvers.keys())}"
)
return self.resolvers[resolver_name]
LoRAResolverRegistry = _LoRAResolverRegistry()
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/lora_model.py | vllm/lora/lora_model.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
import safetensors
import torch
from vllm.logger import init_logger
from vllm.lora.lora_weights import LoRALayerWeights
from vllm.lora.peft_helper import PEFTHelper
from vllm.lora.utils import (
get_lora_id,
is_base_embeddding_weights,
parse_fine_tuned_lora_name,
)
from vllm.model_executor.model_loader.tensorizer import TensorizerConfig
from vllm.model_executor.models.utils import WeightsMapper
from vllm.utils.platform_utils import is_pin_memory_available
logger = init_logger(__name__)
class LoRAModel:
"""A LoRA fine-tuned model."""
def __init__(
self,
lora_model_id: int,
rank: int,
loras: dict[str, LoRALayerWeights],
) -> None:
"""
Args:
lora_model_id: The integer id for the lora model.
rank: lora rank.
loras: module name -> weights for lora-replaced layers.
"""
self.id = lora_model_id
assert lora_model_id > 0, (
f"a valid lora id should be greater than 0, got {self.id}"
)
self.rank = rank
self.loras: dict[str, LoRALayerWeights] = loras
def clone(self, lora_model_id: int) -> "LoRAModel":
"""Return a copy of the object with different ids.
Will share the underlying tensors."""
return self.__class__(
lora_model_id,
rank=self.rank,
loras=self.loras.copy(),
)
def get_lora(self, module_name: str) -> LoRALayerWeights | None:
"""Get LoRA for a given module by name"""
return self.loras.get(module_name, None)
def check_lora_name(self, lora_name: str) -> bool:
return lora_name in self.loras
@classmethod
def from_lora_tensors(
cls,
lora_model_id: int,
tensors: dict[str, torch.Tensor],
peft_helper: PEFTHelper,
device: str = "cuda",
dtype: torch.dtype | None = None,
model_vocab_size: int | None = None,
weights_mapper: WeightsMapper | None = None,
) -> "LoRAModel":
"""Create a LoRAModel from a dictionary of tensors."""
pin_memory = str(device) == "cpu" and is_pin_memory_available()
loras: dict[str, LoRALayerWeights] = {}
for tensor_name, tensor in tensors.items():
if is_base_embeddding_weights(tensor_name):
continue
module_name, is_lora_a = parse_fine_tuned_lora_name(
tensor_name, weights_mapper
)
if module_name not in loras:
loras[module_name] = LoRALayerWeights.from_config(
module_name, peft_helper
)
if is_lora_a:
if (
"lora_embedding_A" in tensor_name
and model_vocab_size is not None
and model_vocab_size != tensor.shape[1]
):
raise RuntimeError(
f"The embedding LoRA size({tensor.shape[1]}) must be consistent"
f" with the base model's vocabulary size({model_vocab_size})."
)
loras[module_name].lora_a = tensor.to(device=device, dtype=dtype)
if pin_memory:
loras[module_name].lora_a = loras[module_name].lora_a.pin_memory()
else:
loras[module_name].lora_b = tensor.to(device=device, dtype=dtype)
if pin_memory:
loras[module_name].lora_b = loras[module_name].lora_b.pin_memory()
return cls(lora_model_id, peft_helper.r, loras)
@classmethod
def from_local_checkpoint(
cls,
lora_dir: str,
expected_lora_modules: set[str],
peft_helper: PEFTHelper,
*,
lora_model_id: int | None = None,
device: str = "cuda",
dtype: torch.dtype | None = None,
model_vocab_size: int | None = None,
weights_mapper: WeightsMapper | None = None,
tensorizer_config_dict: dict | None = None,
) -> "LoRAModel":
"""Create a LoRAModel from a local checkpoint.
Args:
lora_dir: The local path that has lora data.
expected_lora_modules: Name of modules that are expected to be
replaced by lora.
peft_helper: Loaded lora configuration information.
lora_model_id: LoRA model id. If not given, automatically set by
a global counter.
device: Device where the lora model is loaded.
dtype: dtype of the lora model weights.
Returns:
Loaded LoRA Model.
"""
lora_tensor_path = os.path.join(lora_dir, "adapter_model.safetensors")
lora_bin_file_path = os.path.join(lora_dir, "adapter_model.bin")
lora_pt_file_path = os.path.join(lora_dir, "adapter_model.pt")
tensors: dict[str, torch.Tensor] = {}
unexpected_modules: list[list[str] | str] = []
def check_unexpected_modules(modules: dict):
for lora_module in modules.keys(): # noqa
if is_base_embeddding_weights(lora_module):
continue
# Handle PEFT file format where experts.base_layer is the
# gate_up_proj and experts is the down_proj
if "base_layer" in lora_module:
continue
module_name, _ = parse_fine_tuned_lora_name(lora_module, weights_mapper)
# Case for expert lora weights
if ".experts" in module_name:
expert_idx = module_name.find(".experts")
expert_suffix = module_name[expert_idx + 1 :]
if expert_suffix not in expected_lora_modules:
unexpected_modules.append(module_name)
elif module_name.rsplit(".", 1)[-1] not in expected_lora_modules:
unexpected_modules.append(module_name)
if unexpected_modules:
raise ValueError(
f"While loading {lora_dir}, expected"
f" target modules in {expected_lora_modules}"
f" but received {unexpected_modules}."
f" Please verify that the loaded LoRA module is correct"
)
if tensorizer_config_dict:
from tensorizer import TensorDeserializer
tensorizer_config = TensorizerConfig(**tensorizer_config_dict)
lora_tensor_path = os.path.join(
tensorizer_config.tensorizer_dir, "adapter_model.tensors"
)
tensorizer_args = tensorizer_config._construct_tensorizer_args()
tensors = TensorDeserializer(
lora_tensor_path,
dtype=tensorizer_config.dtype,
**tensorizer_args.deserialization_kwargs,
)
check_unexpected_modules(tensors)
elif os.path.isfile(lora_tensor_path):
# Find unexpected modules.
# Use safetensor key as a source of truth to find expected modules.
# in peft if you have target_modules A, B, C and C does not exist
# in the model it won’t error and model will be trained with A, B
# loraified. C won’t exist in the safetensor but it will exist in
# the target_modules of the adapter_config.json.
unexpected_modules = []
with safetensors.safe_open(lora_tensor_path, framework="pt") as f: # type: ignore
# Load tensors if there are only expected modules.
check_unexpected_modules(f)
for module in f.keys(): # noqa
tensors[module] = f.get_tensor(module)
elif os.path.isfile(lora_bin_file_path) or os.path.isfile(lora_pt_file_path):
lora_file_path = (
lora_bin_file_path
if os.path.isfile(lora_bin_file_path)
else lora_pt_file_path
)
tensors = torch.load(lora_file_path, map_location=device, weights_only=True)
check_unexpected_modules(tensors)
else:
raise ValueError(f"{lora_dir} doesn't contain tensors")
return cls.from_lora_tensors(
lora_model_id=get_lora_id() if lora_model_id is None else lora_model_id,
tensors=tensors,
peft_helper=peft_helper,
device=device,
dtype=dtype,
model_vocab_size=model_vocab_size,
weights_mapper=weights_mapper,
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/request.py | vllm/lora/request.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import msgspec
class LoRARequest(
msgspec.Struct,
omit_defaults=True, # type: ignore[call-arg]
array_like=True,
): # type: ignore[call-arg]
"""
Request for a LoRA adapter.
lora_int_id must be globally unique for a given adapter.
This is currently not enforced in vLLM.
"""
lora_name: str
lora_int_id: int
lora_path: str = ""
base_model_name: str | None = msgspec.field(default=None)
tensorizer_config_dict: dict | None = None
def __post_init__(self):
if self.lora_int_id < 1:
raise ValueError(f"id must be > 0, got {self.lora_int_id}")
# Ensure lora_path is not empty
assert self.lora_path, "lora_path cannot be empty"
@property
def adapter_id(self):
return self.lora_int_id
@property
def name(self):
return self.lora_name
@property
def path(self):
return self.lora_path
def __eq__(self, value: object) -> bool:
"""
Overrides the equality method to compare LoRARequest
instances based on lora_name. This allows for identification
and comparison lora adapter across engines.
"""
return isinstance(value, self.__class__) and self.lora_name == value.lora_name
def __hash__(self) -> int:
"""
Overrides the hash method to hash LoRARequest instances
based on lora_name. This ensures that LoRARequest instances
can be used in hash-based collections such as sets and dictionaries,
identified by their names across engines.
"""
return hash(self.lora_name)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/utils.py | vllm/lora/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
from typing import TYPE_CHECKING, Optional
import huggingface_hub
from huggingface_hub.utils import (
EntryNotFoundError,
HfHubHTTPError,
HFValidationError,
RepositoryNotFoundError,
)
from torch import nn
from transformers import PretrainedConfig
from vllm.config.lora import LoRAConfig
from vllm.logger import init_logger
# being imported for _all_lora_classes below
from vllm.lora.layers import (
BaseLayerWithLoRA,
ColumnParallelLinearWithLoRA,
ColumnParallelLinearWithShardedLoRA,
FusedMoE3DWithLoRA,
FusedMoEWithLoRA,
LogitsProcessorWithLoRA,
MergedColumnParallelLinearWithLoRA,
MergedColumnParallelLinearWithShardedLoRA,
MergedQKVParallelLinearWithLoRA,
MergedQKVParallelLinearWithShardedLoRA,
QKVParallelLinearWithLoRA,
QKVParallelLinearWithShardedLoRA,
ReplicatedLinearWithLoRA,
RowParallelLinearWithLoRA,
RowParallelLinearWithShardedLoRA,
VocabParallelEmbeddingWithLoRA,
)
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.linear import LinearBase
from vllm.model_executor.utils import get_moe_expert_mapping, get_packed_modules_mapping
if TYPE_CHECKING:
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead
from vllm.model_executor.models.utils import WeightsMapper
logger = init_logger(__name__)
_GLOBAL_LORA_ID = 0
def get_lora_id():
global _GLOBAL_LORA_ID
_GLOBAL_LORA_ID += 1
return _GLOBAL_LORA_ID
_all_lora_classes: set[type[BaseLayerWithLoRA]] = {
VocabParallelEmbeddingWithLoRA,
ColumnParallelLinearWithLoRA,
MergedColumnParallelLinearWithLoRA,
QKVParallelLinearWithLoRA,
MergedQKVParallelLinearWithLoRA,
RowParallelLinearWithLoRA,
ReplicatedLinearWithLoRA,
LogitsProcessorWithLoRA,
ColumnParallelLinearWithShardedLoRA,
QKVParallelLinearWithShardedLoRA,
MergedColumnParallelLinearWithShardedLoRA,
MergedQKVParallelLinearWithShardedLoRA,
RowParallelLinearWithShardedLoRA,
FusedMoEWithLoRA,
FusedMoE3DWithLoRA,
}
def is_moe_model(model: nn.Module) -> bool:
"""Checks if the model contains FusedMoE layers and warns the user."""
if any(isinstance(module, FusedMoE) for module in model.modules()):
logger.info_once("MoE model detected. Using fused MoE LoRA implementation.")
return True
return False
def from_layer(
layer: nn.Module,
max_loras: int,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> nn.Module:
for lora_cls in _all_lora_classes:
# specifying kwargs so they can be easily accessed in decorator
if lora_cls.can_replace_layer(
source_layer=layer,
lora_config=lora_config,
packed_modules_list=packed_modules_list,
model_config=model_config,
):
instance_layer = lora_cls(layer)
instance_layer.create_lora_weights(max_loras, lora_config, model_config)
return instance_layer
return layer
def from_layer_logits_processor(
layer: "LogitsProcessor",
lm_head: "ParallelLMHead",
max_loras: int,
lora_config: LoRAConfig,
model_config: PretrainedConfig | None = None,
) -> LogitsProcessorWithLoRA:
ret = LogitsProcessorWithLoRA(
layer,
lm_head.embedding_dim,
lm_head.weight.dtype,
lm_head.weight.device,
lm_head.get_sharded_to_full_mapping(),
)
ret.create_lora_weights(max_loras, lora_config, model_config)
return ret
def replace_submodule(
model: nn.Module, module_name: str, new_module: nn.Module
) -> nn.Module:
"""Replace a submodule in a model with a new module."""
parent = model.get_submodule(".".join(module_name.split(".")[:-1]))
target_name = module_name.split(".")[-1]
setattr(parent, target_name, new_module)
return new_module
def parse_fine_tuned_lora_name(
name: str, weights_mapper: Optional["WeightsMapper"] = None
) -> tuple[str, bool]:
"""Parse the name of lora weights.
args:
name: the name of the fine-tuned LoRA, e.g.
base_model.model.dense1.weight
weights_mapper: maps the name of weight, e.g.
`model.` -> `language_model.model.`,
return:
tuple(module_name, is_lora_a):
module_name: the name of the module, e.g. model.dense1,
is_lora_a whether the tensor is lora_a or lora_b.
"""
# LoRA weight qualified name usually starts with `base_model.model.`,
# so we remove the prefix `base_model.model.` to make the following
# mapping correctly.
if name.startswith("base_model.model."):
name = name.replace("base_model.model.", "")
name = weights_mapper._map_name(name) if weights_mapper else name
# recover the prefix `base_model.model.`
name = "base_model.model." + name
else:
name = weights_mapper._map_name(name) if weights_mapper else name
# In some situations, we may not start with `base_model.model.`.
# If we don't (e.g., ibm-granite/granite-speech-3.3-8b),
# we should keep the prefix intact.
start_index = 2 if name.startswith("base_model.model.") else 0
parts = name.split(".")
if parts[-1] == "weight" and (parts[-2] == "lora_A" or parts[-2] == "lora_B"):
new_name = ".".join(parts[start_index:-2])
return new_name, parts[-2] == "lora_A"
if parts[-1] == "lora_embedding_A" or parts[-1] == "lora_embedding_B":
new_name = ".".join(parts[start_index:-1])
return new_name, parts[-1] == "lora_embedding_A"
raise ValueError(f"{name} is unsupported LoRA weight")
def is_base_embeddding_weights(name: str) -> bool:
# hardcoded subfixes for input & output embedding weights
embedding_suffixes = (
".embed_tokens.base_layer.weight",
".lm_head.base_layer.weight",
)
return name.endswith(embedding_suffixes)
def get_supported_lora_modules(model: nn.Module) -> list[str]:
"""
In vLLM, all linear layers support LoRA.
"""
supported_lora_modules: set[str] = set()
for name, module in model.named_modules():
# get the embedding modules if the module's embedding_modules
# is not empty.
embedding_modules = getattr(module, "embedding_modules", None)
if embedding_modules is not None:
for name in embedding_modules:
supported_lora_modules.add(name)
# get all the linear subfixes.
if isinstance(module, (LinearBase,)):
supported_lora_modules.add(name.split(".")[-1])
if isinstance(module, (FusedMoE,)):
supported_lora_modules.add(name.split(".")[-1])
return list(supported_lora_modules)
def get_adapter_absolute_path(lora_path: str) -> str:
"""
Resolves the given lora_path to an absolute local path.
If the lora_path is identified as a Hugging Face model identifier,
it will download the model and return the local snapshot path.
Otherwise, it treats the lora_path as a local file path and
converts it to an absolute path.
Parameters:
lora_path (str): The path to the lora model, which can be an absolute path,
a relative path, or a Hugging Face model identifier.
Returns:
str: The resolved absolute local path to the lora model.
"""
# Check if the path is an absolute path. Return it no matter exists or not.
if os.path.isabs(lora_path):
return lora_path
# If the path starts with ~, expand the user home directory.
if lora_path.startswith("~"):
return os.path.expanduser(lora_path)
# Check if the expanded relative path exists locally.
if os.path.exists(lora_path):
return os.path.abspath(lora_path)
# If the path does not exist locally, assume it's a Hugging Face repo.
try:
local_snapshot_path = huggingface_hub.snapshot_download(repo_id=lora_path)
except (
HfHubHTTPError,
RepositoryNotFoundError,
EntryNotFoundError,
HFValidationError,
):
# Handle errors that may occur during the download
# Return original path instead of throwing error here
logger.exception("Error downloading the HuggingFace model")
return lora_path
return local_snapshot_path
def process_packed_modules_mapping(model: nn.Module) -> dict[str, list[str]]:
if is_moe_model(model):
if moe_packed_mapping := get_moe_expert_mapping(model):
# This method generates and returns a dictionary mapping packed module
# names to lists of their corresponding submodule names. It includes
# both static mappings and dynamic mappings for expert layers, where
# the expert indices are expanded based on the configured number
# of routed experts.
packed_modules_mapping = get_packed_modules_mapping(model)
if not model.is_3d_moe_weight:
# 3D MoE LoRA does not need `packed_modules_mapping`
packed_modules_mapping["experts"] = [
weight_name.rstrip(".")
for _, weight_name, _, _ in moe_packed_mapping
]
return packed_modules_mapping
else:
raise AttributeError(
"To support LoRA for MoE model, "
"'get_expert_mapping' must be implemented"
)
else:
return get_packed_modules_mapping(model)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/model_manager.py | vllm/lora/model_manager.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import math
from collections.abc import Callable
from typing import TypeVar
import regex as re
import torch
from torch import nn
from vllm.config import VllmConfig
from vllm.config.lora import LoRAConfig, ModelConfig
from vllm.logger import init_logger
from vllm.lora.layers import (
BaseLayerWithLoRA,
FusedMoE3DWithLoRA,
LoRAMapping,
LoRAMappingType,
)
from vllm.lora.lora_model import LoRAModel
from vllm.lora.lora_weights import LoRALayerWeights, PackedLoRALayerWeights
from vllm.lora.punica_wrapper import PunicaWrapperBase, get_punica_wrapper
from vllm.lora.utils import (
from_layer,
from_layer_logits_processor,
get_supported_lora_modules,
is_moe_model,
process_packed_modules_mapping,
replace_submodule,
)
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.models import SupportsLoRA, supports_multimodal
from vllm.model_executor.models.interfaces import is_pooling_model
from vllm.model_executor.models.module_mapping import MultiModelKeys
from vllm.model_executor.models.utils import PPMissingLayer
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.utils.cache import LRUCache
from vllm.utils.platform_utils import is_pin_memory_available
from vllm.v1.worker.utils import MultiModalBudget
logger = init_logger(__name__)
T = TypeVar("T")
DEFAULT_LANGUAGE_WRAPPER_KEY = "language_model"
class AdapterLRUCache(LRUCache[int, T]):
def __init__(self, capacity: int, deactivate_fn: Callable[[int], object]):
super().__init__(capacity)
self.deactivate_fn = deactivate_fn
def _on_remove(self, key: int, value: T | None):
logger.debug("Removing adapter int id: %d", key)
self.deactivate_fn(key)
return super()._on_remove(key, value)
class LoRAModelManager:
"""A manager that manages multiple LoRA-fine-tuned models."""
def __init__(
self,
model: SupportsLoRA,
max_num_seqs: int,
max_num_batched_tokens: int,
vocab_size: int,
lora_config: LoRAConfig,
device: torch.device,
vllm_config: VllmConfig | None = None,
):
"""Create a LoRAModelManager and adapter for a given model.
Args:
model: the model to be adapted.
max_num_seqs: the maximum number of sequences model can run in a
single batch.
max_num_batched_tokens: the maximum number of tokens model can run
in a single batch.
vocab_size: the vocab size of the model.
lora_config: the LoRA configuration.
"""
self.model: SupportsLoRA = model
self.supported_lora_modules = get_supported_lora_modules(self.model)
assert self.supported_lora_modules, (
f"No supported LoRA modules found in {self.model.__class__.__name__}."
)
self._registered_adapters: dict[int, LoRAModel] = {}
# Dict instead of a set for compatibility with LRUCache.
self._active_adapters: dict[int, None] = {}
self.adapter_type = "LoRA"
self.lora_config = lora_config
self.device = device
self.max_num_seqs = max_num_seqs
assert self.capacity >= self.lora_slots
self.max_num_batched_tokens = math.ceil(max_num_batched_tokens / 8) * 8
self.lora_index_to_id: list[int | None] = [None] * self.lora_slots
self.vocab_size = vocab_size
self.packed_modules_mapping = process_packed_modules_mapping(self.model)
self.is_pooling_model = is_pooling_model(self.model)
self.packed_modules: dict[str, list[str]] = {}
self.modules: dict[str, BaseLayerWithLoRA] = {}
# Dict instead of a set for compatibility with LRUCache.
self._last_mapping: LoRAMapping | None = None
self._is_3d_moe_model = is_moe_model(self.model) and self.model.is_3d_moe_weight
self._init_punica_wrapper(max_num_batched_tokens, vllm_config)
self._create_lora_modules()
self.model.lora_manager = self
def _init_punica_wrapper(
self, max_num_batched_tokens: int, vllm_config: VllmConfig
) -> None:
# Used to indicate whether the model is a multimodal model
self.supports_mm: bool = (
supports_multimodal(self.model)
# In case the model only supports LoRA for
# text modules (e.g. ChatGLM)
and hasattr(self.model, "get_mm_mapping")
)
self.punica_wrapper_mapping: dict[str, PunicaWrapperBase] = {}
if self.supports_mm:
self._maybe_init_mm(vllm_config, max_num_batched_tokens)
else:
llm_punica_wrapper = get_punica_wrapper(
max_num_batched_tokens,
max_batches=self.max_num_seqs,
device=self.device,
lora_config=self.lora_config,
)
self.punica_wrapper_mapping[DEFAULT_LANGUAGE_WRAPPER_KEY] = (
llm_punica_wrapper
)
def _maybe_init_mm(self, vllm_config: VllmConfig, max_num_batched_tokens) -> None:
self.supports_tower_connector_lora = False
model_config: ModelConfig = vllm_config.model_config
self.mm_mapping: MultiModelKeys = self.model.get_mm_mapping()
# Only one language model can be included in the model.
assert len(self.mm_mapping.language_model) == 1
# Language model punica wrapper
llm_punica_wrapper = get_punica_wrapper(
max_num_batched_tokens,
max_batches=self.max_num_seqs,
device=self.device,
lora_config=self.lora_config,
)
lm_prefix = self.mm_mapping.language_model[0]
self.punica_wrapper_mapping[lm_prefix] = llm_punica_wrapper
if self.lora_config.enable_tower_connector_lora:
self.mm_processor_info = MULTIMODAL_REGISTRY.create_processor(
model_config
).info
self.supports_tower_connector_lora = self.supports_mm and hasattr(
self.model, "get_num_mm_encoder_tokens"
)
if not self.supports_tower_connector_lora:
return
logger.warning(
"LoRA for the tower and connector of multimodal models is "
"experimental and may contain bugs. Please report any related issues on "
"GitHub if you encounter them."
)
mm_budget = MultiModalBudget(
model_config,
vllm_config.scheduler_config,
MULTIMODAL_REGISTRY,
)
limit_per_prompt: int = max(
self.mm_processor_info.get_allowed_mm_limits().values()
)
num_encoder_tokens = self.model.get_num_mm_encoder_tokens(
mm_budget.get_encoder_budget()
)
# Tower wrappers
tower_punica_wrapper = get_punica_wrapper(
num_encoder_tokens,
max_batches=self.max_num_seqs * limit_per_prompt,
device=self.device,
lora_config=self.lora_config,
)
for prefix in self.mm_mapping.tower_model:
self.punica_wrapper_mapping[prefix] = tower_punica_wrapper
# Use wrapper for connector if present.
if self.mm_mapping.connector:
if hasattr(self.model, "get_num_mm_connector_tokens"):
connector_tokens = self.model.get_num_mm_connector_tokens(
num_encoder_tokens
)
connector_punica_wrapper = get_punica_wrapper(
connector_tokens,
max_batches=self.max_num_seqs * limit_per_prompt,
device=self.device,
lora_config=self.lora_config,
)
for prefix in self.mm_mapping.connector:
self.punica_wrapper_mapping[prefix] = connector_punica_wrapper
else:
logger.warning_once(
"Connector LoRA support disabled: model does not implement "
"get_num_mm_connector_tokens(). This method is required to "
"determine the connector's token budget for LoRA operations."
)
def __len__(self) -> int:
return len(self._registered_adapters)
@property
def capacity(self) -> int:
return self.lora_config.max_cpu_loras
@property
def lora_slots(self) -> int:
return self.lora_config.max_loras
@property
def adapter_slots(self) -> int:
return self.lora_slots
def activate_adapter(
self,
lora_id: int,
) -> bool:
"""Move LoRA into a GPU buffer to be used in the forward pass."""
if lora_id in self._active_adapters:
return False
first_free_slot = next(
(
(i, lora_id)
for i, lora_id in enumerate(self.lora_index_to_id)
if lora_id is None
),
None,
)
if first_free_slot is None:
raise ValueError("No free lora slots")
index, _ = first_free_slot
self._active_adapters[lora_id] = None
lora_model = self._registered_adapters[lora_id]
logger.debug(
"Activating LoRA. int id: %d, slot index: %d", lora_model.id, index
)
self.lora_index_to_id[index] = lora_model.id
for module_name, module in self.modules.items():
module_lora = self._get_lora_layer_weights(lora_model, module_name)
if not module_lora:
module.reset_lora(index)
continue
# Note (gnovack) - If MOE lora weights are not split into
# num_experts chunks, we split them here
if isinstance(module, FusedMoE3DWithLoRA) and torch.is_tensor(
module_lora.lora_a
):
# Handle PEFT file format where experts.base_layer is the
# gate_up_proj and experts is the down_proj
gate_up_proj_lora = self._get_lora_layer_weights(
lora_model, module_name + ".base_layer"
)
down_proj_lora = module_lora
# FIXME Edge case where LoRA is not added to gate_up_proj
# or down_proj
assert gate_up_proj_lora is not None
assert down_proj_lora is not None
if self._is_3d_moe_model:
module_lora.lora_a = [
gate_up_proj_lora.lora_a,
down_proj_lora.lora_a,
]
module_lora.lora_b = [
gate_up_proj_lora.lora_b,
down_proj_lora.lora_b,
]
else:
# Some 3D MoE models haven't added the `is_3d_moe_weight`
# attribute yet, so fallback here
num_experts = module_lora.lora_a.shape[0] // module_lora.rank
gate_proj_a = gate_up_proj_lora.lora_a.chunk(num_experts, dim=0)
up_proj_a = gate_up_proj_lora.lora_a.chunk(num_experts, dim=0)
gate_proj_b = gate_up_proj_lora.lora_b[::2, ...].chunk(
num_experts, dim=-1
)
up_proj_b = gate_up_proj_lora.lora_b[1::2, ...].chunk(
num_experts, dim=-1
)
down_proj_a = down_proj_lora.lora_a.chunk(num_experts, dim=0)
down_proj_b = down_proj_lora.lora_b.chunk(num_experts, dim=-1)
lora_a = []
lora_b = []
for i in range(num_experts):
lora_a.append(gate_proj_a[i])
lora_a.append(down_proj_a[i])
lora_a.append(up_proj_a[i])
lora_b.append(gate_proj_b[i])
lora_b.append(down_proj_b[i])
lora_b.append(up_proj_b[i])
module_lora.lora_a = lora_a
module_lora.lora_b = lora_b
module.set_lora(
index,
module_lora.lora_a,
module_lora.lora_b,
)
return True
def _deactivate_adapter(self, lora_id: int):
try:
index = self.lora_index_to_id.index(lora_id)
self.lora_index_to_id[index] = None
except ValueError:
pass
def _add_adapter(self, lora: LoRAModel):
self._create_merged_loras_inplace(lora)
self._registered_adapters[lora.id] = lora
def pin_adapter(self, lora_id: int) -> bool:
"""Pin a LoRAModel in the manager cache."""
raise NotImplementedError(
"Pinning is not supported in LoRAModelManager. "
"Use LRUCacheLoRAModelManager for pinning"
) # type: ignore
def _set_adapter_mapping(self, mapping: LoRAMapping) -> None:
# Default to the main language model wrapper
if not (self.supports_mm and self.supports_tower_connector_lora):
target_prefix = (
self.mm_mapping.language_model[0]
if self.supports_mm
else DEFAULT_LANGUAGE_WRAPPER_KEY
)
elif mapping.type == LoRAMappingType.TOWER and self.mm_mapping.tower_model:
target_prefix = self.mm_mapping.tower_model[0]
elif mapping.type == LoRAMappingType.CONNECTOR and self.mm_mapping.connector:
target_prefix = self.mm_mapping.connector[0]
else:
target_prefix = self.mm_mapping.language_model[0]
punica_wrapper = self._get_punica_wrapper(target_prefix)
assert punica_wrapper is not None
punica_wrapper.update_metadata(
mapping,
self.lora_index_to_id,
self.lora_slots + 1,
self.vocab_size,
)
def remove_all_adapters(self):
"""Remove all LoRAModels from the manager."""
self._registered_adapters.clear()
self.lora_index_to_id = [None] * self.lora_slots
self._active_adapters.clear()
def _create_lora_modules(self):
def _parent_module(module_name: str) -> str:
# module name is a dot separated name.
# for example:
# - given an input 'x.y.z' return 'x.y'
# - given an input 'x' return ''
return module_name.rpartition(".")[0]
for module_name, module in self.model.named_modules(remove_duplicate=False):
if isinstance(module, PPMissingLayer):
continue
if not self._match_target_modules(module_name):
continue
punica_wrapper = self._get_punica_wrapper(module_name)
if punica_wrapper is None:
logger.warning(
"Regarding %s, vLLM currently only supports adding LoRA to"
" language model, %s will be ignored.",
self.model.__class__.__name__,
module_name,
)
continue
parts = module_name.split(".")[-1]
packed_moduled_lst = self.packed_modules_mapping.get(parts, [])
if isinstance(module, FusedMoE):
# packed_moduled_lst is used here to just determine whether to
# instantiate FusedMoE3DWithLoRA or FusedMoEWithLoRA, and the
# difference between these two LoRA layers is whether the
# LoRA weights of w1 and w3 have already been fused on disk.
packed_moduled_lst = ["w13"] if self._is_3d_moe_model else ["w1", "w3"]
new_module = replace_submodule(
self.model,
module_name,
from_layer(
module,
self.lora_slots,
self.lora_config,
packed_moduled_lst,
self.model.config,
),
)
# (yard1): TODO make this more robust
if "lm_head" in module_name:
logits_processor_module_name = "logits_processor"
parent_module = _parent_module(module_name)
if parent_module:
logits_processor_module_name = (
f"{parent_module}.{logits_processor_module_name}"
)
logits_processor_module = self.model.get_submodule(
logits_processor_module_name
)
new_module = replace_submodule(
self.model,
logits_processor_module_name,
from_layer_logits_processor(
logits_processor_module,
module,
self.lora_slots,
self.lora_config,
self.model.config,
),
)
# In some models, especially multimodal ones, layers with the same
# name may have different types, such as nn.Linear and
# ReplicatedLinear. The nn.Linear layers cannot be replaced with
# LoRA layers, leading to assertion error. The following check
# aims to prevent this error
if self.supports_mm and not isinstance(new_module, BaseLayerWithLoRA):
continue
self.register_module(module_name, new_module)
self._register_packed_modules(module_name)
# All lora layers share the same punica_wrapper based on reference.
new_module.set_mapping(punica_wrapper)
def register_module(self, module_name: str, module: "BaseLayerWithLoRA"):
assert isinstance(module, BaseLayerWithLoRA), (
f"Module {module_name} must be a BaseLayerWithLoRA instance, "
f"got {type(module)}"
)
self.modules[module_name] = module
def create_dummy_lora(
self,
lora_id: int,
rank: int,
embedding_modules: dict[str, str] | None = None,
) -> LoRAModel:
"""Create zero-initialized LoRAModel for warmup."""
model = LoRAModel(lora_id, rank, {})
for module_name, module in self.model.named_modules():
if (
not self._match_target_modules(module_name)
or not isinstance(module, BaseLayerWithLoRA)
or self._get_punica_wrapper(module_name) is None
):
continue
parts = module_name.split(".")
if module_name not in self.packed_modules:
assert embedding_modules is not None
if parts[-1] in embedding_modules:
input_dim = (
module.base_layer.org_vocab_size
if hasattr(module.base_layer, "org_vocab_size")
else module.base_layer.weight.shape[1]
)
output_dim = (
module.base_layer.embedding_dim
if hasattr(module.base_layer, "embedding_dim")
else module.base_layer.weight.shape[0]
)
lora = LoRALayerWeights.create_dummy_lora_weights(
module_name,
input_dim,
output_dim,
rank,
module.lora_a_stacked[0].dtype,
"cpu",
)
model.loras[module_name] = lora
elif module.__class__.__name__ == "FusedMoE3DWithLoRA":
# Case for 3D moe model
# w2
lora = LoRALayerWeights.create_dummy_lora_weights(
module_name,
module.w2_input_size,
module.w2_output_size,
rank * module.w2_lora_a_stacked[0].shape[1], # rank*num_experts
module.w2_lora_a_stacked[0].dtype,
"cpu",
)
model.loras[module_name] = lora
# w13
lora = LoRALayerWeights.create_dummy_lora_weights(
module_name,
module.w13_input_size,
module.w13_output_size,
rank
* module.w13_lora_a_stacked[0].shape[1], # rank*num_experts
module.w13_lora_a_stacked[0].dtype,
"cpu",
)
model.loras[module_name + ".base_layer"] = lora
else:
lora = LoRALayerWeights.create_dummy_lora_weights(
module_name,
module.lora_a_stacked[0].shape[-1],
module.lora_b_stacked[0].shape[-2],
rank,
module.lora_a_stacked[0].dtype,
"cpu",
)
model.loras[module_name] = lora
else:
parts = module_name.split(".")
replacements = self.packed_modules_mapping[parts[-1]]
subloras: list[LoRALayerWeights | None] = []
for i, r in enumerate(replacements):
lora = LoRALayerWeights.create_dummy_lora_weights(
module_name + "." + r,
module.lora_a_stacked[i].shape[-1],
module.lora_b_stacked[i].shape[-2],
rank,
module.lora_a_stacked[i].dtype,
"cpu",
)
subloras.append(lora)
if module.__class__.__name__ == "FusedMoEWithLoRA":
lora = PackedLoRALayerWeights.pack_moe(subloras, module_name)
else:
lora = PackedLoRALayerWeights.pack(subloras)
model.loras[module_name] = lora
return model
def _match_target_modules(self, module_name: str):
return any(
re.match(
r".*\.{target_module}$".format(target_module=target_module), module_name
)
or target_module == module_name
for target_module in self.supported_lora_modules
)
def _get_punica_wrapper(self, module_name: str) -> PunicaWrapperBase | None:
"""
Determine whether this module supports LoRA and which wrapper to use.
"""
# For language model (early return)
if not self.supports_mm:
return self.punica_wrapper_mapping[DEFAULT_LANGUAGE_WRAPPER_KEY]
# For multimodal model
# NOTE Sort by prefix length (descending) to match the longest prefix first
# e.g., 'visual.merger' should match 'visual.merger' instead of 'visual.'
for prefix in sorted(self.punica_wrapper_mapping.keys(), key=len, reverse=True):
if module_name.startswith(prefix):
return self.punica_wrapper_mapping[prefix]
return None
def _register_packed_modules(self, module_full_name: str) -> None:
parts = module_full_name.split(".")
module_name = parts[-1]
replacements = self.packed_modules_mapping.get(module_name, [])
# When replacements is less than or equal to 1, it indicates that this
# module is not a packed module.
if len(replacements) <= 1:
return
prefix = ".".join(parts[:-1])
self.packed_modules[module_full_name] = [
prefix + "." + r if prefix else r for r in replacements
]
def _create_merged_loras_inplace(self, lora_model: LoRAModel) -> None:
for module_name, new_module_names in self.packed_modules.items():
replacement_loras: list[LoRALayerWeights | None] = []
replaced_module: set[str] = set()
has_replacement = False
for r in new_module_names:
lora = self._get_lora_layer_weights(lora_model, r)
replacement_loras.append(lora)
if lora:
has_replacement = True
replaced_module.add(r)
if not has_replacement:
continue
for i in range(len(replacement_loras)):
if replacement_loras[i]:
continue
replacement_loras[i] = None
# HACK Temporary solution for the pool model.
if self.is_pooling_model and not lora_model.check_lora_name(module_name):
replaced_module_name = module_name.replace("model.", "")
if lora_model.check_lora_name(module_name):
module_name = replaced_module_name
if module_name.endswith(".experts"):
lora_model.loras[module_name] = PackedLoRALayerWeights.pack_moe(
replacement_loras, module_name
)
else:
lora_model.loras[module_name] = PackedLoRALayerWeights.pack(
replacement_loras
)
# Remove the modules that have been replaced.
for module in replaced_module:
lora_model.loras.pop(module, None)
for lora in lora_model.loras.values():
lora.optimize()
first_lora: LoRALayerWeights = next(iter(lora_model.loras.values()))
assert first_lora.lora_a is not None
if isinstance(first_lora.lora_a, list):
lora_device = next(iter(first_lora.lora_a))
else:
lora_device = first_lora.lora_a.device
# Execute pin_memory after LoRA weight merging, mainly because:
# 1. Some MoE models have a large number of LoRA weights. If we
# perform # pin_memory immediately after loading weights, the
# overhead is significant.
# 2. The weight packing above (e.g., pack_moe) may invalidate the
# pin_memory allocation, so we execute it after packing.
pin_memory = str(lora_device) == "cpu" and is_pin_memory_available()
if pin_memory:
for lora in lora_model.loras.values():
if isinstance(lora.lora_a, list):
for index in range(len(lora.lora_a)):
if lora.lora_a[index] is None:
continue
lora.lora_a[index] = lora.lora_a[index].pin_memory()
lora.lora_b[index] = lora.lora_b[index].pin_memory()
else:
lora.lora_a = lora.lora_a.pin_memory()
lora.lora_b = lora.lora_b.pin_memory()
def _get_lora_layer_weights(
self, lora_model: LoRAModel, module_name: str
) -> LoRALayerWeights | None:
org_module_name = module_name
if self.is_pooling_model and not lora_model.check_lora_name(module_name):
# If it's a pool model, and the layer name is not found,
# remove the prefix 'model.' and search again.
module_name = module_name.replace("model.", "")
if lora_model.check_lora_name(module_name):
org_module_name = module_name
logger.info_once(
"For the pool model, successfully loaded the LoRA weights "
"after removing the prefix 'model.'."
)
return lora_model.get_lora(org_module_name)
def deactivate_adapter(self, adapter_id: int) -> bool:
if adapter_id not in self._active_adapters:
return False
self._deactivate_adapter(adapter_id)
self._active_adapters.pop(adapter_id, None)
return True
def add_adapter(self, adapter: LoRAModel) -> bool:
logger.debug("Adding lora. Model id: %d, int id: %d", adapter.id, adapter.id)
if adapter.id in self._registered_adapters:
return False
if len(self._registered_adapters) >= self.capacity:
raise RuntimeError("No free adapter slots.")
self._add_adapter(adapter)
return True
def set_adapter_mapping(self, mapping: LoRAMapping) -> None:
if self._last_mapping != mapping:
self._set_adapter_mapping(mapping)
self._last_mapping = mapping
def remove_adapter(self, adapter_id: int) -> bool:
self.deactivate_adapter(adapter_id)
if adapter_id not in self._registered_adapters:
return False
self._registered_adapters.pop(adapter_id, None)
return True
def list_adapters(self) -> dict[int, LoRAModel]:
return dict(self._registered_adapters)
def get_adapter(self, adapter_id: int) -> LoRAModel | None:
return self._registered_adapters.get(adapter_id)
class LoRALRUCache(AdapterLRUCache[LoRAModel]):
def __init__(self, capacity: int, deactivate_lora_fn: Callable[[int], bool]):
super().__init__(capacity, deactivate_lora_fn)
class LRUCacheLoRAModelManager(LoRAModelManager):
"""A model manager that manages multiple LoRAs with LRU cache."""
def __init__(
self,
model: nn.Module,
max_num_seqs: int,
max_num_batched_tokens: int,
vocab_size: int,
lora_config: LoRAConfig,
device: torch.device,
vllm_config: VllmConfig | None = None,
):
super().__init__(
model,
max_num_seqs,
max_num_batched_tokens,
vocab_size,
lora_config,
device,
vllm_config,
)
self._registered_adapters: LoRALRUCache = LoRALRUCache(
self.capacity, self.deactivate_adapter
)
self._active_adapters: LoRALRUCache = LoRALRUCache(
self.lora_slots, self._deactivate_adapter
)
def list_adapters(self) -> dict[int, LoRAModel]:
"""List all registered LoRAModels."""
return dict(self._registered_adapters.cache)
def add_adapter(self, lora: LoRAModel) -> bool:
"""Add a LoRAModel to the manager."""
logger.debug("Adding lora. Model id: %d, int id: %d", lora.id, lora.id)
if lora.id not in self._registered_adapters:
self._add_adapter(lora)
was_added = True
else:
# We always touch to update the LRU cache order
self._registered_adapters.touch(lora.id)
was_added = False
return was_added
def activate_adapter(
self,
lora_id: int,
) -> bool:
if (
lora_id not in self._active_adapters
and len(self._active_adapters) >= self.lora_slots
):
self._active_adapters.remove_oldest()
result = super().activate_adapter(lora_id)
# We always touch to update the LRU cache order
self._active_adapters.touch(lora_id)
return result
def remove_oldest_adapter(self) -> bool:
if len(self._registered_adapters) > 0:
self._registered_adapters.remove_oldest()
return True
return False
def pin_adapter(self, lora_id: int) -> bool:
"""Pin a LoRAModel in the manager cache."""
self._pin_lora_in_cpu_cache(lora_id)
self._pin_lora_in_gpu_cache(lora_id)
return True
def _pin_lora_in_cpu_cache(self, lora_id: int):
try:
self._registered_adapters.pin(lora_id)
except ValueError as err:
raise ValueError(
f"Pinning failed. LoRA {lora_id} is not registered."
) from err
def _pin_lora_in_gpu_cache(self, lora_id: int):
if lora_id not in self._active_adapters:
# move lora to gpu if not already active
self.activate_adapter(lora_id)
self._active_adapters.pin(lora_id)
def create_lora_manager(
model: nn.Module,
max_num_seqs: int,
max_num_batched_tokens: int,
vocab_size: int,
lora_config: LoRAConfig,
vllm_config: VllmConfig,
device: torch.device,
lora_manager_cls: type[LoRAModelManager] = LoRAModelManager,
**kwargs,
) -> LoRAModelManager:
"""Create a LoRA adapter for a given model."""
if not isinstance(model, SupportsLoRA):
raise ValueError(f"Model {type(model)} is not supported for LoRA.")
lora_manager = lora_manager_cls(
model=model,
max_num_seqs=max_num_seqs,
max_num_batched_tokens=max_num_batched_tokens,
vocab_size=vocab_size,
lora_config=lora_config,
vllm_config=vllm_config,
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | true |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/__init__.py | vllm/lora/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/lora_weights.py | vllm/lora/lora_weights.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Sequence as GenericSequence
from typing import Optional
import torch
import torch.types
from vllm.lora.peft_helper import PEFTHelper
from vllm.utils.platform_utils import is_pin_memory_available
class LoRALayerWeights:
"""LoRA weights for a layer composed of two low rank matrixes."""
def __init__(
self,
module_name: str,
rank: int,
lora_alpha: int,
lora_a: torch.Tensor,
lora_b: torch.Tensor,
scaling: float | None = None,
) -> None:
self.module_name = module_name
self.rank = rank
self.lora_alpha = lora_alpha
self.lora_a = lora_a
self.lora_b = lora_b
if scaling is None:
self.scaling = self.lora_alpha / self.rank
else:
self.scaling = scaling
def optimize(self) -> "LoRALayerWeights":
"""Optimize the LoRA by merging the scaling into lora_b."""
if self.scaling == 1:
return self
self.lora_b *= self.scaling
self.scaling = 1
return self
@property
def input_dim(self) -> int:
return self.lora_a.shape[1]
@property
def output_dim(self) -> int:
return self.lora_b.shape[0]
@property
def is_packed(self) -> bool:
return False
@classmethod
def from_config(
cls,
module_name: str,
peft_helper: PEFTHelper,
) -> "LoRALayerWeights":
# lora_a and lora_b are set to None for config-based construction
return cls(
module_name,
peft_helper.r,
peft_helper.lora_alpha,
None,
None,
peft_helper.vllm_lora_scaling_factor,
)
@classmethod
def create_dummy_lora_weights(
cls,
module_name: str,
input_dim: int,
output_dim: int,
rank: int,
dtype: torch.dtype,
device: torch.types.Device,
) -> "LoRALayerWeights":
pin_memory = str(device) == "cpu" and is_pin_memory_available()
lora_a = torch.zeros(
[rank, input_dim], dtype=dtype, device=device, pin_memory=pin_memory
)
lora_b = torch.zeros(
[output_dim, rank], dtype=dtype, device=device, pin_memory=pin_memory
)
return cls(
module_name,
rank=rank,
lora_alpha=1,
lora_a=lora_a,
lora_b=lora_b,
)
class PackedLoRALayerWeights(LoRALayerWeights):
"""LoRA used for packed layers (eg. qkv_proj)."""
def __init__(
self,
module_name: str,
rank: int,
lora_alphas: list[int | None],
lora_a: list[torch.Tensor | None],
lora_b: list[torch.Tensor | None],
scaling: list[float] | None = None,
) -> None:
super().__init__(
module_name=module_name,
rank=rank,
lora_alpha=0,
lora_a=lora_a,
lora_b=lora_b,
scaling=scaling, # type: ignore
)
self.lora_alphas = lora_alphas
if scaling is None:
self.scaling = [ # type: ignore
lora_alpha / self.rank # type: ignore # noqa
for lora_alpha in self.lora_alphas
]
@classmethod
def pack(
cls, loras: GenericSequence[Optional["LoRALayerWeights"]]
) -> "PackedLoRALayerWeights":
"""Pack a list of LoRAs into a single LoRA.
If LoRA is None, it signifies that the submodule does not have a LoRA.
"""
first_lora = next(lora for lora in loras if lora is not None)
for lora in loras:
if lora is None:
continue
lora.optimize()
rank = first_lora.rank
module_name = first_lora.module_name
obj = cls(
module_name,
rank,
[lora.lora_alpha if lora is not None else None for lora in loras],
[lora.lora_a if lora is not None else None for lora in loras],
[lora.lora_b if lora is not None else None for lora in loras],
scaling=[
1 if lora is not None else None # type: ignore
for lora in loras
],
)
return obj
@classmethod
def pack_moe(
cls, loras: GenericSequence[Optional["LoRALayerWeights"]], module_name: str
) -> "PackedLoRALayerWeights":
"""Pack a list of LoRAs into a single LoRA.
If LoRA is None, it signifies that the submodule does not have a LoRA.
"""
first_lora = next(lora for lora in loras if lora is not None)
assert first_lora is not None
rank = first_lora.rank
lora_alpha = first_lora.lora_alpha
assert len(loras) % 3 == 0
w1_lora_a_lst = []
w2_lora_a_lst = []
w3_lora_a_lst = []
w1_lora_b_lst = []
w2_lora_b_lst = []
w3_lora_b_lst = []
# TODO: Consider the case where some experts don't have LoRA added.
for eid in range(len(loras) // 3):
w1_lora = loras[eid * 3]
w2_lora = loras[eid * 3 + 1]
w3_lora = loras[eid * 3 + 2]
assert w1_lora is not None
assert w2_lora is not None
assert w3_lora is not None
w1_lora_a_lst.append(w1_lora.lora_a)
w2_lora_a_lst.append(w2_lora.lora_a)
w3_lora_a_lst.append(w3_lora.lora_a)
w1_lora_b_lst.append(w1_lora.lora_b)
w2_lora_b_lst.append(w2_lora.lora_b)
w3_lora_b_lst.append(w3_lora.lora_b)
w1_lora_a = torch.stack(w1_lora_a_lst, dim=0) # (num_experts,rank,input_size)
w2_lora_a = torch.stack(w2_lora_a_lst, dim=0)
w3_lora_a = torch.stack(w3_lora_a_lst, dim=0)
w1_lora_b = torch.stack(w1_lora_b_lst, dim=0) # (num_experts,output_size,rank)
w2_lora_b = torch.stack(w2_lora_b_lst, dim=0)
w3_lora_b = torch.stack(w3_lora_b_lst, dim=0)
obj = cls(
module_name,
rank,
[lora_alpha, lora_alpha, lora_alpha],
[w1_lora_a, w2_lora_a, w3_lora_a],
[w1_lora_b, w2_lora_b, w3_lora_b],
)
return obj
def optimize(self) -> "PackedLoRALayerWeights":
"""Optimize the LoRA by merging the scaling into lora_b."""
for i in range(len(self.lora_b)):
if self.scaling[i] == 1 or self.lora_b[i] is None: # type: ignore
continue
self.lora_b[i] *= self.scaling[i] # type: ignore
self.scaling[i] = 1 # type: ignore
return self
@property
def input_dim(self) -> int:
raise NotImplementedError()
@property
def output_dim(self) -> int:
raise NotImplementedError()
@property
def is_packed(self) -> bool:
return True
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/punica_wrapper/punica_base.py | vllm/lora/punica_wrapper/punica_base.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Based on:
Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023).
Punica: Multi-Tenant LoRA Serving.
https://arxiv.org/abs/2310.18547
"""
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
import torch
from .utils import compute_meta, convert_mapping
if TYPE_CHECKING:
# avoid circuit import
from vllm.lora.layers import LoRAMapping
class PunicaWrapperABC(ABC):
"""
PunicaWrapper ABC.
"""
@abstractmethod
def update_metadata(
self,
mapping: "LoRAMapping",
lora_index_to_id: list[int | None],
max_loras: int,
vocab_size: int,
**kwargs,
) -> None:
"""
Update the lora-related metadata
"""
raise NotImplementedError
@abstractmethod
def add_shrink(
self,
y: tuple[torch.Tensor, ...] | torch.Tensor,
x: torch.Tensor,
lora_a_stacked: tuple[torch.Tensor, ...],
scale: float,
**kwargs,
) -> torch.Tensor | None:
"""
Performs GEMM for multiple slices of lora_a.
"""
raise NotImplementedError
@abstractmethod
def add_expand(
self,
y: torch.Tensor,
x: tuple[torch.Tensor, ...] | torch.Tensor,
lora_b_stacked: tuple[torch.Tensor, ...],
output_slices: tuple[int, ...],
offset_start: int = 0,
add_inputs=True,
**kwargs,
) -> torch.Tensor | None:
"""
Performs GEMM for multiple slices of lora_b.
"""
raise NotImplementedError
@abstractmethod
def add_lora_embedding(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_b_stacked: torch.Tensor,
add_inputs: bool = True,
**kwargs,
) -> torch.Tensor | None:
"""
Applies lora specifically for VocabParallelEmbeddingWithLoRA,
and this layer only requires the expand operation.
"""
raise NotImplementedError
@abstractmethod
def add_lora_linear(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: tuple[torch.Tensor, ...],
lora_b_stacked: tuple[torch.Tensor, ...],
scale: float,
output_slices: tuple[int, ...],
*,
buffer: tuple[torch.Tensor, ...] | None = None,
**kwargs,
) -> torch.Tensor | None:
"""
Applicable to linear-related lora.
"""
raise NotImplementedError
@abstractmethod
def add_lora_logits(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: torch.Tensor,
lora_b_stacked: torch.Tensor,
scale,
*,
buffer: torch.Tensor | None = None,
**kwargs,
) -> torch.Tensor | None:
"""
Applies lora specifically for LogitsProcessorWithLoRA.
"""
raise NotImplementedError
class PunicaWrapperBase(PunicaWrapperABC):
"""
PunicaWrapperBase is designed to manage and provide metadata for the punica
kernel. The main function is to maintain the state information for
Multi-LoRA, and to provide the interface for the punica.
"""
def __init__(
self,
max_num_batched_tokens: int,
max_batches: int,
device: torch.device | str,
**kwargs,
):
self._token_lora_indices = torch.empty(
max_num_batched_tokens, dtype=torch.long, device=device
)
self._sampler_indices = torch.empty(
max_num_batched_tokens, dtype=torch.long, device=device
)
self._sampler_indices_padded = torch.empty(
max_num_batched_tokens, dtype=torch.long, device=device
)
self._embeddings_indices = torch.empty(
2, max_num_batched_tokens, dtype=torch.long, device=device
)
# 4 is the number of indices tensors.
# base_indices, sampler_indices, sampler_indices_padded,
# embeddings_indices
self.indices_len: list[int | None] = [None] * 4
# these attributes are the information required for sgmv kernel
self._seq_start_locs = torch.empty(max_batches, dtype=torch.long, device=device)
self._seq_lengths = torch.empty(max_batches, dtype=torch.long, device=device)
self._lora_indices_per_batch = torch.empty(
max_batches, dtype=torch.long, device=device
)
self.device: torch.device = device
self.max_length: int = 0
self.token_nums: int = 0
self.batch_size: int = -1
self.is_prefill = False
self.no_lora = False
def _update_base_metadata(
self,
mapping: "LoRAMapping",
lora_index_to_id: list[int | None],
max_loras: int,
vocab_size: int,
):
# NOTE We have remove lora extra vocab support for now. So we set
# extra_vocab_size always to 0, and extra_vocab_size will be removed.
extra_vocab_size = 0
(
base_indices,
sampler_indices,
sampler_indices_padded,
embeddings_indices,
indices_len,
) = convert_mapping(
mapping,
lora_index_to_id,
max_loras,
vocab_size,
extra_vocab_size,
self.device,
)
self._token_lora_indices[: base_indices.shape[0]].copy_(base_indices)
self._sampler_indices[: sampler_indices.shape[0]].copy_(sampler_indices)
self._sampler_indices_padded[: sampler_indices_padded.shape[0]].copy_(
sampler_indices_padded
)
self._embeddings_indices[
: embeddings_indices.shape[0], : embeddings_indices.shape[1]
].copy_(embeddings_indices)
self.indices_len[:] = indices_len
def _update_prefill_metadata(self, token_lora_tensor: torch.Tensor) -> None:
(
b_seq_start_tensor,
seq_length_tensor,
lora_indices_tensor,
batch_size,
max_length,
token_nums,
no_lora,
) = compute_meta(token_lora_tensor)
self._seq_start_locs[: b_seq_start_tensor.shape[0]].copy_(b_seq_start_tensor)
self._seq_lengths[: seq_length_tensor.shape[0]].copy_(seq_length_tensor)
self._lora_indices_per_batch[: lora_indices_tensor.shape[0]].copy_(
lora_indices_tensor
)
self.batch_size = batch_size
self.max_length = max_length
self.token_nums = token_nums
self.no_lora = no_lora
@property
def prefill_metadata(
self,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int, int]:
"""
This property provides a convenient way to access the necessary
metadata for prefill-related kernel computations.
1. seq_start_locs: Tensor of sequence start positions.
2. seq_lengths: Tensor of sequence lengths.
3. lora_indices_per_batch: Tensor of lora indices, and an index of
-1 means no lora should be applied.
4. batch_size: Batch size after clustering identical lora indices.
5. max_length: The maximum sequence length in the batch.
6. token_nums: The token numbers in the batch.
"""
return (
self._seq_start_locs[: self.batch_size],
self._seq_lengths[: self.batch_size],
self._lora_indices_per_batch[: self.batch_size],
self.batch_size,
self.max_length,
self.token_nums,
)
@property
def token_lora_indices(self) -> torch.Tensor:
"""
This property provides the lora indices corresponding to each token
in the batch. An index of -1 means no lora should be applied.
"""
token_lora_len = self.indices_len[0]
return self._token_lora_indices[:token_lora_len]
@property
def sampler_indices(self) -> torch.Tensor:
"""
This property is used to access the lora indices specifically for
LogitsProcessorWithLoRA.
"""
sampler_indices_len = self.indices_len[1]
return self._sampler_indices[:sampler_indices_len]
@property
def sampler_indices_padded(self) -> torch.Tensor:
"""
This property provides access to padded sampler indices.
"""
indices_padded_len = self.indices_len[2]
return self._sampler_indices_padded[:indices_padded_len]
@property
def embeddings_indices(self) -> torch.Tensor:
"""
This property provides access to the indices used for lora embeddings,
specifically for VocabParallelEmbeddingWithLoRA.
"""
embeddings_indices_len = self.indices_len[3]
return self._embeddings_indices[:, :embeddings_indices_len]
def update_metadata(
self,
mapping: "LoRAMapping",
lora_index_to_id: list[int | None],
max_loras: int,
vocab_size: int,
**kwargs,
):
self._update_base_metadata(mapping, lora_index_to_id, max_loras, vocab_size)
if mapping.is_prefill:
# Update metadata required for prefill-related operators.
self._update_prefill_metadata(self.token_lora_indices)
self.is_prefill = True
else:
self.is_prefill = False
@abstractmethod
def add_shrink(
self,
y: tuple[torch.Tensor, ...] | torch.Tensor,
x: torch.Tensor,
lora_a_stacked: tuple[torch.Tensor, ...],
scale: float,
**kwargs,
) -> torch.Tensor | None:
"""
Performs GEMM for multiple slices of lora_a.
Semantics:
for i in range(len(lora_a_stacked)):
y[i] += (x @ lora_a_stacked[i]) * scale
Args:
y (Union[tuple[torch.Tensor, ...], torch.Tensor]): Output tensors
x (torch.Tensor): Input tensor
lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weights
scale (float): Scaling factor for the operation
"""
# TODO: implement it based on torch ops
raise NotImplementedError
@abstractmethod
def add_expand(
self,
y: torch.Tensor,
x: tuple[torch.Tensor, ...] | torch.Tensor,
lora_b_stacked: tuple[torch.Tensor, ...],
output_slices: tuple[int, ...],
offset_start: int = 0,
add_inputs=True,
**kwargs,
) -> torch.Tensor | None:
"""
Performs GEMM for multiple slices of lora_b.
Semantics:
offset = offset_start
for i in range(len(lora_b_stacked)):
slice = output_slices[i]
y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i]
offset += slice
Args:
y (torch.Tensor): Output tensor.
x (Union[tuple[torch.Tensor, ...], torch.Tensor]): Input tensors
lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight
output_slices (tuple[int, ...]): Every slice's size
offset_start (int): The starting position of y, defaults to 0
add_inputs (bool): Defaults to True.
"""
# TODO: implement it based on torch ops
raise NotImplementedError
@abstractmethod
def add_lora_embedding(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_b_stacked: torch.Tensor,
add_inputs: bool = True,
**kwargs,
) -> torch.Tensor | None:
"""
Applies lora specifically for VocabParallelEmbeddingWithLoRA.
and this layer only requires the expand operation.
Semantics:
y += x @ lora_b_stacked
Args:
y (torch.Tensor): Output tensor.
x (torch.Tensor): Input tensor.
lora_b_stacked (torch.Tensor): lora_b's weights.
add_inputs (bool): Default to True.
"""
# TODO: implement it based on torch ops
raise NotImplementedError
@abstractmethod
def add_lora_linear(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: tuple[torch.Tensor, ...],
lora_b_stacked: tuple[torch.Tensor, ...],
scale: float,
output_slices: tuple[int, ...],
*,
buffer: tuple[torch.Tensor, ...] | None = None,
**kwargs,
) -> torch.Tensor | None:
"""
Applicable to linear-related lora.
Semantics:
for i in range(len(lora_a_stacked)):
y[i] += (
x[i].unsqueeze(0)
@ lora_a_stacked[indices[i], layer_idx, :, :]
@ lora_b_stacked[indices[i], layer_idx, :, :]
* scale
).squeeze(0)
Args:
y (torch.Tensor): Output tensor. Will be changed in-place.
x (torch.Tensor): Input tensor
lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weight.
lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight.
scale (float): Scaling factor.
output_slices (tuple[int, ...]): Every slice's size.
buffer (Optional[tuple[torch.Tensor, ...]]): Defaults to None.
"""
# TODO: implement it based on torch ops
raise NotImplementedError
@abstractmethod
def add_lora_logits(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: torch.Tensor,
lora_b_stacked: torch.Tensor,
scale,
*,
buffer: torch.Tensor | None = None,
**kwargs,
) -> torch.Tensor | None:
"""
Applies lora specifically for LogitsProcessorWithLoRA.
Semantics:
buffer = (x @ lora_a_stacked) * scale
y += buffer @ lora_b_stacked
Args:
y (torch.Tensor): Output tensor.
x (torch.Tensor): Input tensor.
lora_a_stacked (torch.Tensor): lora_a's weights.
lora_b_stacked (torch.Tensor):lora_b's weights.
scale (float): Scaling factor.
buffer (Optional[torch.Tensor]):Default to None.
"""
# TODO: implement it based on torch ops
raise NotImplementedError
def moe_lora_align_block_size(
self,
topk_ids: torch.Tensor,
num_tokens: int,
block_size: int,
num_experts: int,
max_loras: int,
adapter_enabled: torch.Tensor,
expert_map: torch.Tensor | None = None,
pad_sorted_ids: bool = False,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Aligns tokens and experts into block-sized chunks for LoRA-based
mixture-of-experts (MoE) execution.
"""
# TODO: implement it based on torch ops
raise NotImplementedError
def add_lora_fused_moe(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: tuple[torch.Tensor, ...],
lora_b_stacked: tuple[torch.Tensor, ...],
topk_weights: torch.Tensor,
sorted_token_ids: torch.Tensor,
expert_ids: torch.Tensor,
num_tokens_post_padded: torch.Tensor,
max_lora_rank: int,
top_k_num: int,
shrink_config,
expand_config,
adapter_enabled: torch.Tensor,
mul_routed_weight=False,
fully_sharded: bool = False,
offset: int = 0,
):
"""
Performs a fused forward computation for LoRA of
Mixture-of-Experts (MoE) layer.
"""
# TODO: implement it based on torch ops
raise NotImplementedError
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/punica_wrapper/punica_xpu.py | vllm/lora/punica_wrapper/punica_xpu.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Based on:
Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023).
Punica: Multi-Tenant LoRA Serving.
https://arxiv.org/abs/2310.18547
"""
from typing import final
import torch
from vllm.lora.layers import LoRAMapping
from vllm.lora.ops.ipex_ops import bgmv_expand, bgmv_expand_slice, bgmv_shrink
from .punica_base import PunicaWrapperBase
@final
class PunicaWrapperXPU(PunicaWrapperBase):
"""
PunicaWrapperXPU is designed to manage and provide metadata for the punica
kernel. The main function is to maintain the state information for
Multi-LoRA, and to provide the interface for the punica ipex kernel.
"""
def __init__(
self,
max_num_batched_tokens: int,
max_batches: int,
device: torch.device | str,
**kwargs,
):
PunicaWrapperBase.__init__(self, max_num_batched_tokens, max_batches, device)
torch._dynamo.mark_dynamic(self._token_lora_indices, 0)
torch._dynamo.mark_dynamic(self._embeddings_indices, 1)
torch._dynamo.mark_dynamic(self._sampler_indices_padded, 0)
def update_metadata(
self,
mapping: LoRAMapping,
lora_index_to_id: list[int | None],
max_loras: int,
vocab_size: int,
**kwargs,
):
self.is_prefill = mapping.is_prefill
self._update_base_metadata(mapping, lora_index_to_id, max_loras, vocab_size)
def _get_token_lora_indices(self, x: torch.Tensor) -> torch.IntTensor:
return torch.narrow(self._token_lora_indices, 0, 0, x.size(0))
def _apply_shrink(
self,
y: torch.Tensor,
x: torch.Tensor,
w_t_all: torch.Tensor,
scale: float,
):
bgmv_shrink(x, w_t_all, y, self._get_token_lora_indices(x), scale)
def _apply_expand(
self,
y: torch.Tensor,
x: torch.Tensor,
w_t_all: torch.Tensor,
y_offset: int,
y_slice_size: int,
add_inputs: bool,
):
token_lora_indices = self._get_token_lora_indices(x)
bgmv_expand_slice(
x, w_t_all, y, token_lora_indices, y_offset, y_slice_size, add_inputs
)
def add_shrink(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: tuple[torch.Tensor, ...],
scale: float,
**kwargs,
):
"""
Performs GEMM for multiple slices of lora_a.
Semantics:
for i in range(len(lora_a_stacked)):
y[i] += (x @ lora_a_stacked[i]) * scale
Args:
y (torch.Tensor): Output tensors
x (torch.Tensor): Input tensor
lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weights
scale (float): Scaling factor for the operation
"""
x = x.view(-1, x.shape[-1])
for slice_idx in range(len(lora_a_stacked)):
self._apply_shrink(y[slice_idx], x, lora_a_stacked[slice_idx], scale)
def add_expand(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_b_stacked: tuple[torch.Tensor, ...],
output_slices: tuple[int, ...],
offset_start: int = 0,
add_inputs=True,
**kwargs,
) -> None:
"""
Performs GEMM for multiple slices of lora_b.
Semantics:
for i in range(len(lora_b_stacked)):
slice = output_slices[i]
y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i]
offset += slice
Args:
y (torch.Tensor): Output tensor.
x (torch.Tensor): Input tensors
lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight
output_slices (tuple[int, ...]): Every slice's size
add_inputs (bool): Defaults to True.
"""
y_org = y
y = y.view(-1, y.shape[-1])
assert x.ndim == 3
assert x.size(0) == len(output_slices)
# TODO fuse these kernels
for slice_idx in range(len(lora_b_stacked)):
self._apply_expand(
y,
x[slice_idx],
lora_b_stacked[slice_idx],
offset_start,
output_slices[slice_idx],
add_inputs=add_inputs,
)
offset_start += output_slices[slice_idx]
y.view_as(y_org)
def add_lora_embedding(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_b_stacked: torch.Tensor,
add_inputs: bool = True,
**kwargs,
) -> None:
"""
Applies lora specifically for VocabParallelEmbeddingWithLoRA.
Semantics:
y += x @ lora_b_stacked
Args:
y (torch.Tensor): Output tensor.
x (torch.Tensor): Input tensor.
lora_b_stacked (torch.Tensor): lora_b's weights.
add_inputs (bool): Default to True.
"""
token_lora_indices = self._get_token_lora_indices(x)
bgmv_expand(x, lora_b_stacked, y, token_lora_indices, add_inputs)
def add_lora_linear(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: tuple[torch.Tensor, ...],
lora_b_stacked: tuple[torch.Tensor, ...],
scale: float,
output_slices: tuple[int, ...],
*,
buffer: torch.Tensor | None = None,
**kwargs,
) -> None:
"""
Applicable to linear-related lora.
Semantics:
for i in range(len(lora_a_stacked)):
y[i] += (
x[i].unsqueeze(0)
@ lora_a_stacked[indices[i], layer_idx, :, :]
@ lora_b_stacked[indices[i], layer_idx, :, :]
* scale
).squeeze(0)
Args:
y (torch.Tensor): Output tensor. Will be changed in-place.
x (torch.Tensor): Input tensor
lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weight.
lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight.
scale (float): Scaling factor.
output_slices (tuple[int, ...]): Every slice's size.
buffer (Optional[torch.Tensor]): Defaults to None.
"""
assert len(lora_a_stacked) == len(lora_b_stacked) == len(output_slices)
if buffer is None:
r = lora_b_stacked[0].size(-1)
# We set the buffer to be float32 by default, refer to:
# https://github.com/triton-lang/triton/issues/1387
buffer = torch.zeros( # type: ignore
(len(output_slices), x.size(0), r),
dtype=torch.float32,
device=x.device,
)
self.add_shrink(
buffer, # type: ignore
x,
lora_a_stacked,
scale,
**kwargs,
)
self.add_expand(
y,
buffer, # type: ignore
lora_b_stacked,
output_slices,
add_inputs=True,
**kwargs,
)
@property
def sampler_indices_padded(self) -> torch.Tensor:
"""
This property provides access to padded sampler indices.
"""
return self._sampler_indices_padded[:]
def add_lora_logits(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: torch.Tensor,
lora_b_stacked: torch.Tensor,
scale,
*,
buffer: torch.Tensor | None = None,
**kwargs,
) -> None:
"""
Applies lora specifically for LogitsProcessorWithLoRA.
Semantics:
buffer = (x @ lora_a_stacked) * scale
y += buffer @ lora_b_stacked
Args:
y (torch.Tensor): Output tensor.
x (torch.Tensor): Input tensor.
lora_a_stacked (torch.Tensor): lora_a's weights.
lora_b_stacked (torch.Tensor): lora_b's weights.
scale (float): Scaling factor.
buffer (Optional[torch.Tensor]): Default to None.
"""
y_org = y
y = y.view(-1, y.shape[-1])
x = x.view(-1, x.shape[-1])
r = lora_b_stacked.size(-1)
if buffer is None:
# We set the buffer to be float32 by default, refer to:
# https://github.com/triton-lang/triton/issues/1387
buffer = torch.zeros((x.size(0), r), dtype=torch.float32, device=x.device)
sampler_indices = torch.narrow(self._sampler_indices, 0, 0, x.size(0))
bgmv_shrink(x, lora_a_stacked, buffer, sampler_indices, scale)
bgmv_expand(buffer, lora_b_stacked, y, sampler_indices, add_inputs=True)
return y.view_as(y_org)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/punica_wrapper/punica_tpu.py | vllm/lora/punica_wrapper/punica_tpu.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import math
from typing import TYPE_CHECKING
import torch
import torch.nn.functional as F
import torch_xla
from vllm.lora.ops.xla_ops import bgmv_expand, bgmv_expand_slice, bgmv_shrink
from vllm.lora.punica_wrapper.utils import convert_mapping
if TYPE_CHECKING:
# avoid circuit import
from vllm.lora.layers import LoRAMapping
from .punica_base import PunicaWrapperBase
class PunicaWrapperTPU(PunicaWrapperBase):
"""
PunicaWrapperTPU is designed to manage and provide metadata for the punica
kernel. The main function is to maintain the state information for
Multi-LoRA, and to provide the interface for the pytorch punica ops.
"""
def __init__(
self,
max_num_batched_tokens: int,
max_batches: int,
device: torch.device | str,
**kwargs,
):
PunicaWrapperBase.__init__(self, max_num_batched_tokens, max_batches, device)
# PunicaWrapperBase defines some tensors with dtype=torch.int64, which
# isn't supported by the TPU. So convert those tensors to int32.
# Not all of them are used by the TPU so only convert the useful ones.
self._token_lora_indices = self._token_lora_indices.to(dtype=torch.int32)
self._sampler_indices = self._sampler_indices.to(dtype=torch.int32)
self._sampler_indices_padded = self._sampler_indices_padded.to(
dtype=torch.int32
)
torch.ops.xla.dynamo_set_buffer_donor_(self._token_lora_indices, True)
torch.ops.xla.dynamo_set_buffer_donor_(self._sampler_indices, True)
torch.ops.xla.dynamo_set_buffer_donor_(self._sampler_indices_padded, True)
torch.ops.xla.dynamo_set_buffer_donor_(self._embeddings_indices, True)
torch.ops.xla.dynamo_set_buffer_donor_(self._lora_indices_per_batch, True)
torch._dynamo.mark_dynamic(self._token_lora_indices, 0)
torch._dynamo.mark_dynamic(self._embeddings_indices, 1)
torch._dynamo.mark_dynamic(self._sampler_indices_padded, 0)
def _get_token_lora_indices(self, x: torch.Tensor) -> torch.IntTensor:
return torch.narrow(self._token_lora_indices, 0, 0, x.size(0))
@property
def embeddings_indices(self) -> torch.Tensor:
"""
This property provides access to the indices used for lora embeddings,
specifically for VocabParallelEmbeddingWithLoRA.
"""
return self._embeddings_indices[:]
@property
def sampler_indices_padded(self) -> torch.Tensor:
"""
This property provides access to padded sampler indices.
"""
return self._sampler_indices_padded[:]
def shrink(
self,
x: torch.Tensor,
w_t_all: torch.Tensor,
scale: float,
):
return bgmv_shrink(x, w_t_all, self._get_token_lora_indices(x), scale)
def expand(
self, y: torch.Tensor, x: torch.Tensor, w_t_all: torch.Tensor, add_inputs: bool
):
return bgmv_expand(x, w_t_all, y, self._get_token_lora_indices(x), add_inputs)
def expand_slice(
self,
y: torch.Tensor,
x: torch.Tensor,
w_t_all: torch.Tensor,
y_offset: int,
y_slice_size: int,
add_inputs: bool,
) -> torch.Tensor:
return bgmv_expand_slice(
x,
w_t_all,
y,
self._get_token_lora_indices(x),
y_offset,
y_slice_size,
add_inputs,
)
def add_shrink(
self,
y: tuple[torch.Tensor, ...] | torch.Tensor,
x: torch.Tensor,
lora_a_stacked: tuple[torch.Tensor, ...],
scale: float,
**kwargs,
) -> torch.Tensor | None:
"""
Performs GEMM for multiple slices of lora_a.
Semantics:
for i in range(len(lora_a_stacked)):
y[i] += (x @ lora_a_stacked[i]) * scale
Args:
y (Union[tuple[torch.Tensor, ...], torch.Tensor]): Output tensors
x (torch.Tensor): Input tensor
lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weights
scale (float): Scaling factor for the operation
"""
torch.ops.xla.dynamo_set_buffer_donor_(y, True)
x = x.view(-1, x.shape[-1])
for slice_idx in range(len(lora_a_stacked)):
lora_s = lora_a_stacked[slice_idx]
y_s = self.shrink(x, lora_s, scale)
y[slice_idx, :, :] = y_s # type: ignore[index]
return y
def add_expand(
self,
y: torch.Tensor,
x: tuple[torch.Tensor, ...] | torch.Tensor,
lora_b_stacked: tuple[torch.Tensor, ...],
output_slices: tuple[int, ...],
offset_start: int = 0,
add_inputs=True,
**kwargs,
) -> torch.Tensor:
"""
Performs GEMM for multiple slices of lora_b.
Semantics:
for i in range(len(lora_b_stacked)):
slice = output_slices[i]
y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i]
offset += slice
Args:
y (torch.Tensor): Output tensor.
x (Union[tuple[torch.Tensor, ...], torch.Tensor]): Input tensors
lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight
output_slices (tuple[int, ...]): Every slice's size
add_inputs (bool): Defaults to True.
"""
y_org = y
y = y.view(-1, y.shape[-1])
offset_left = 0
for slice_idx in range(len(lora_b_stacked)):
y = self.expand_slice(
y,
x[slice_idx],
lora_b_stacked[slice_idx],
offset_left,
output_slices[slice_idx],
add_inputs=add_inputs,
)
offset_left += output_slices[slice_idx]
return y.view_as(y_org)
def add_lora_embedding(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_b_stacked: torch.Tensor,
add_inputs: bool = True,
**kwargs,
) -> torch.Tensor:
"""
Applies lora specifically for VocabParallelEmbeddingWithLoRA.
Semantics:
y += x @ lora_b_stacked
Args:
y (torch.Tensor): Output tensor.
x (torch.Tensor): Input tensor.
lora_b_stacked (torch.Tensor): lora_b's weights.
add_inputs (bool): Default to True.
"""
# Embedding layer only needs the expand op
return self.expand(y, x, lora_b_stacked, add_inputs)
def add_lora_linear(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: tuple[torch.Tensor, ...],
lora_b_stacked: tuple[torch.Tensor, ...],
scale: float,
output_slices: tuple[int, ...],
*,
buffer: tuple[torch.Tensor, ...] | None = None,
**kwargs,
) -> torch.Tensor:
"""
Applicable to linear-related lora.
Semantics:
for i in range(len(lora_a_stacked)):
y[i] += (
x[i].unsqueeze(0)
@ lora_a_stacked[indices[i], layer_idx, :, :]
@ lora_b_stacked[indices[i], layer_idx, :, :]
* scale
).squeeze(0)
Args:
y (torch.Tensor): Output tensor. Will not be changed in-place.
x (torch.Tensor): Input tensor (T, E)
lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weight.
lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight.
scale (float): Scaling factor.
output_slices (tuple[int, ...]): Every slice's size.
buffer (Optional[tuple[torch.Tensor, ...]]): Defaults to None.
"""
assert len(lora_a_stacked) == len(lora_b_stacked) == len(output_slices)
if buffer is None:
r = lora_b_stacked[0].size(-1)
T = x.size(0)
buffer = torch.zeros(
(len(output_slices), T, r),
dtype=x.dtype,
device=x.device,
)
buffer = self.add_shrink(buffer, x, lora_a_stacked, scale, **kwargs)
return self.add_expand(
y, buffer, lora_b_stacked, output_slices, add_inputs=True, **kwargs
)
def add_lora_logits(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: torch.Tensor,
lora_b_stacked: torch.Tensor,
scale,
*,
buffer: torch.Tensor | None = None,
**kwargs,
) -> torch.Tensor:
"""
Applies lora specifically for LogitsProcessorWithLoRA.
Semantics:
buffer = (x @ lora_a_stacked) * scale
y += buffer @ lora_b_stacked
Args:
y (torch.Tensor): Output tensor.
x (torch.Tensor): Input tensor.
lora_a_stacked (torch.Tensor): lora_a's weights.
lora_b_stacked (torch.Tensor):lora_b's weights.
scale (float): Scaling factor.
buffer (Optional[torch.Tensor]):Default to None.
"""
y_org = y
y = y.view(-1, y.shape[-1])
x = x.view(-1, x.shape[-1])
sampler_indices = torch.narrow(self._sampler_indices, 0, 0, x.size(0))
buffer = bgmv_shrink(x, lora_a_stacked, sampler_indices, scale)
y = bgmv_expand(buffer, lora_b_stacked, y, sampler_indices, add_inputs=True)
return y.view_as(y_org)
# This performs the same tensor ops as the base method, except it does them
# on the CPU then transfers the results to the TPU
def _update_base_metadata(
self,
mapping: "LoRAMapping",
lora_index_to_id: list[int | None],
max_loras: int,
vocab_size: int,
):
# Make sure we don't accidentally collect outside operations
torch_xla.sync()
# Pad the prompt mapping to avoid running into recompiles on the TPU
# TODO: Should this happen inside mapping internally? If so how can we
# avoid having backend specific LoRAMapping classes?
mapping.prompt_mapping = self._pad_prompt_mapping(mapping.prompt_mapping)
(
base_indices,
sampler_indices,
sampler_indices_padded,
embeddings_indices,
indices_len,
) = convert_mapping(
mapping,
lora_index_to_id,
max_loras,
vocab_size,
0, # extra_vocab_size
"cpu",
)
self._token_lora_indices = self._pad_to_shape(
base_indices, self._token_lora_indices.shape, dims=1
).to(self.device)
self._sampler_indices = self._pad_to_shape(
sampler_indices, self._sampler_indices.shape, dims=1
).to(self.device)
self._sampler_indices_padded = self._pad_to_shape(
sampler_indices_padded, self._sampler_indices_padded.shape, dims=1
).to(self.device)
self._embeddings_indices = self._pad_to_shape(
embeddings_indices, self._embeddings_indices.shape, dims=2
).to(self.device)
self.indices_len[:] = indices_len
def _update_prefill_metadata(self, token_lora_tensor: torch.Tensor) -> None:
self.batch_size = 1
self._lora_indices_per_batch[: self.batch_size] = token_lora_tensor[
: self.batch_size
]
def _pad_prompt_mapping(self, prompt_mapping: tuple[int, ...]) -> tuple[int, ...]:
num_reqs = len(prompt_mapping)
# From vllm/v1/worker/tpu_model_runner:51, but need to avoid a circular
# import
MIN_NUM_SEQS = 8
padded_num_reqs = max(2 ** math.ceil(math.log2(num_reqs)), MIN_NUM_SEQS)
pad_len = padded_num_reqs - num_reqs
padding = [-1] * pad_len
return tuple(list(prompt_mapping) + padding)
def _pad_to_shape(self, src, target_shape, dims=1):
if dims == 1:
pad_len = target_shape[0] - src.shape[0]
return F.pad(src, (0, pad_len), value=0).to(torch.int32)
else:
pad_rows = target_shape[0] - src.shape[0]
pad_cols = target_shape[1] - src.shape[1]
return F.pad(src, (0, pad_cols, 0, pad_rows), value=0).to(torch.int32)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/punica_wrapper/punica_cpu.py | vllm/lora/punica_wrapper/punica_cpu.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
import torch
from vllm.lora.ops.torch_ops import (
bgmv_expand,
bgmv_expand_slice,
bgmv_shrink,
sgmv_expand,
sgmv_expand_slice,
sgmv_shrink,
)
from .punica_base import PunicaWrapperBase
# The platforms that are compatible with the PyTorch-native implementation can
# inherit this class
class PunicaWrapperCPU(PunicaWrapperBase):
"""
PunicaWrapperCPU is designed to manage and provide metadata for the punica
kernel. The main function is to maintain the state information for
Multi-LoRA, and to provide the interface for the pytorch punica ops.
"""
def __init__(
self,
max_num_batched_tokens: int,
max_batches: int,
device: torch.device | str,
**kwargs,
):
PunicaWrapperBase.__init__(self, max_num_batched_tokens, max_batches, device)
def _shrink_prefill(
self,
y: torch.Tensor,
x: torch.Tensor,
w_t_all: torch.Tensor,
scale: float,
):
# No LoRA request, so return directly
if self.no_lora:
return
sgmv_shrink(
x,
w_t_all,
y,
*self.prefill_metadata,
scale,
)
def _shrink_decode(
self,
y: torch.Tensor,
x: torch.Tensor,
w_t_all: torch.Tensor,
scale: float,
):
bgmv_shrink(x, w_t_all, y, self.token_lora_indices, scale)
def _expand_prefill(
self,
y: torch.Tensor,
x: torch.Tensor,
w_t_all: torch.Tensor,
add_inputs: bool,
):
# No LoRA request, so return directly
if self.no_lora:
return
sgmv_expand(
x,
w_t_all,
y,
*self.prefill_metadata,
add_inputs,
)
def _expand_decode(
self,
y: torch.Tensor,
x: torch.Tensor,
w_t_all: torch.Tensor,
add_inputs: bool,
):
bgmv_expand(x, w_t_all, y, self.token_lora_indices, add_inputs)
def _expand_slice_prefill(
self,
y: torch.Tensor,
x: torch.Tensor,
w_t_all: torch.Tensor,
y_offset: int,
y_slice_size: int,
add_inputs: bool,
):
# No LoRA request, so return directly
if self.no_lora:
return
sgmv_expand_slice(
x,
w_t_all,
y,
*self.prefill_metadata,
y_offset,
y_slice_size,
add_inputs,
)
def _expand_slice_decode(
self,
y: torch.Tensor,
x: torch.Tensor,
w_t_all: torch.Tensor,
y_offset: int,
y_slice_size: int,
add_inputs: bool,
):
bgmv_expand_slice(
x, w_t_all, y, self.token_lora_indices, y_offset, y_slice_size, add_inputs
)
def _apply_expand(
self,
y: torch.Tensor,
x: torch.Tensor,
w_t_all: torch.Tensor,
y_offset: int,
y_slice_size: int,
add_inputs: bool = True,
):
"""
Perform the ` y[:,y_offset:y_offset+y_slice_size]+=x@w_t_all`
computation, which is suitable for the
GEMM of lora'b.
"""
expand_slice_fun: Callable = (
self._expand_slice_prefill if self.is_prefill else self._expand_slice_decode
)
expand_slice_fun(y, x, w_t_all, y_offset, y_slice_size, add_inputs)
def _apply_shrink(
self, y: torch.Tensor, x: torch.Tensor, w_t_all: torch.Tensor, scale: float
):
"""
Perform the ` y+=x@w_t_all` computation, which is suitable for the
GEMM of lora'a.
When `is_prefill is` true, it indicates that it is currently the
prefill stage, and the `_shrink_prefill` function should be called.
Otherwise, it is the decode stage, and the _shrink_decode function
should be called.
"""
y_org = y
y = y.view(-1, y.shape[-1])
shrink_fun: Callable = (
self._shrink_prefill if self.is_prefill else self._shrink_decode
)
shrink_fun(y, x, w_t_all, scale)
y = y.view_as(y_org)
def add_shrink(
self,
y: tuple[torch.Tensor, ...] | torch.Tensor,
x: torch.Tensor,
lora_a_stacked: tuple[torch.Tensor, ...],
scale: float,
**kwargs,
):
"""
Performs GEMM for multiple slices of lora_a.
When `is_prefill is` true, it indicates that it is currently the
prefill stage, and the `_shrink_prefill` function should be called.
Otherwise, it is the decode stage, and the _shrink_decode function
should be called.
Semantics:
for i in range(len(lora_a_stacked)):
y[i] += (x @ lora_a_stacked[i]) * scale
Args:
y (Union[tuple[torch.Tensor, ...], torch.Tensor]): Output tensors
x (torch.Tensor): Input tensor
lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weights
scale (float): Scaling factor for the operation
"""
x = x.view(-1, x.shape[-1])
# TODO fuse these kernels
for slice_idx in range(len(lora_a_stacked)):
self._apply_shrink(y[slice_idx], x, lora_a_stacked[slice_idx], scale)
def add_expand(
self,
y: torch.Tensor,
x: tuple[torch.Tensor, ...] | torch.Tensor,
lora_b_stacked: tuple[torch.Tensor, ...],
output_slices: tuple[int, ...],
offset_start: int = 0,
add_inputs=True,
**kwargs,
) -> None:
"""
Performs GEMM for multiple slices of lora_b.
Semantics:
for i in range(len(lora_b_stacked)):
slice = output_slices[i]
y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i]
offset += slice
Args:
y (torch.Tensor): Output tensor.
x (Union[tuple[torch.Tensor, ...], torch.Tensor]): Input tensors
lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight
output_slices (tuple[int, ...]): Every slice's size
add_inputs (bool): Defaults to True.
"""
y_org = y
y = y.view(-1, y.shape[-1])
offset_left = offset_start
for slice_idx in range(len(lora_b_stacked)):
self._apply_expand(
y,
x[slice_idx],
lora_b_stacked[slice_idx],
offset_left,
output_slices[slice_idx],
add_inputs=add_inputs,
)
offset_left += output_slices[slice_idx]
y = y.view_as(y_org)
def add_lora_embedding(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_b_stacked: torch.Tensor,
add_inputs: bool = True,
**kwargs,
) -> None:
"""
Applies lora specifically for VocabParallelEmbeddingWithLoRA.
Semantics:
y += x @ lora_b_stacked
Args:
y (torch.Tensor): Output tensor.
x (torch.Tensor): Input tensor.
lora_b_stacked (torch.Tensor): lora_b's weights.
add_inputs (bool): Default to True.
"""
# Embedding layer only need expand op
expand_fun: Callable = (
self._expand_prefill if self.is_prefill else self._expand_decode
)
expand_fun(y, x, lora_b_stacked, add_inputs)
def add_lora_linear(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: tuple[torch.Tensor, ...],
lora_b_stacked: tuple[torch.Tensor, ...],
scale: float,
output_slices: tuple[int, ...],
*,
buffer: tuple[torch.Tensor, ...] | None = None,
**kwargs,
) -> None:
"""
Applicable to linear-related lora.
Semantics:
for i in range(len(lora_a_stacked)):
y[i] += (
x[i].unsqueeze(0)
@ lora_a_stacked[indices[i], layer_idx, :, :]
@ lora_b_stacked[indices[i], layer_idx, :, :]
* scale
).squeeze(0)
Args:
y (torch.Tensor): Output tensor. Will be changed in-place.
x (torch.Tensor): Input tensor
lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weight.
lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight.
scale (float): Scaling factor.
output_slices (tuple[int, ...]): Every slice's size.
buffer (Optional[tuple[torch.Tensor, ...]]): Defaults to None.
"""
assert len(lora_a_stacked) == len(lora_b_stacked) == len(output_slices)
if buffer is None:
r = lora_b_stacked[0].size(-1)
# We set the buffer to be float32 by default, consistent with the
# triton op
buffer = tuple(
torch.zeros((x.size(0), r), dtype=torch.float32, device=x.device)
for _ in range(len(output_slices))
)
self.add_shrink(buffer, x, lora_a_stacked, scale, **kwargs)
self.add_expand(
y, buffer, lora_b_stacked, output_slices, add_inputs=True, **kwargs
)
def add_lora_logits(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: torch.Tensor,
lora_b_stacked: torch.Tensor,
scale,
*,
buffer: torch.Tensor | None = None,
**kwargs,
) -> None:
"""
Applies lora specifically for LogitsProcessorWithLoRA.
Semantics:
buffer = (x @ lora_a_stacked) * scale
y += buffer @ lora_b_stacked
Args:
y (torch.Tensor): Output tensor.
x (torch.Tensor): Input tensor.
lora_a_stacked (torch.Tensor): lora_a's weights.
lora_b_stacked (torch.Tensor):lora_b's weights.
scale (float): Scaling factor.
buffer (Optional[torch.Tensor]):Default to None.
"""
y_org = y
y = y.view(-1, y.shape[-1])
x = x.view(-1, x.shape[-1])
r = lora_b_stacked.size(-1)
if buffer is None:
# We set the buffer to be float32 by default, consistent with the
# triton op
buffer = torch.zeros((x.size(0), r), dtype=torch.float32, device=x.device)
# LogitsProcessorWithLoRA always using bgmv.
bgmv_shrink(x, lora_a_stacked, buffer, self.sampler_indices, scale)
bgmv_expand(buffer, lora_b_stacked, y, self.sampler_indices, add_inputs=True)
y = y.view_as(y_org)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/punica_wrapper/utils.py | vllm/lora/punica_wrapper/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import TYPE_CHECKING
import torch
if TYPE_CHECKING:
# avoid circuit import
from vllm.lora.layers import LoRAMapping
def compute_meta(
token_lora_tensor: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int, int, bool]:
"""
Get the information required for the sgmv kernel. With the features:
1. If consecutive requests in the batch use the same LoRA, this function
will combine them into a single request, improving sgmv kernel inference
performance.
2. At the beginning of each prefill stage inference, recalculations are
needed based on the input, but only once.
"""
lora_indices_tensor, seq_length_tensor = torch.unique_consecutive(
token_lora_tensor, return_counts=True
)
cum_result = torch.cumsum(seq_length_tensor, dim=0)
b_seq_start_tensor = torch.zeros_like(seq_length_tensor)
b_seq_start_tensor[1:].copy_(cum_result[:-1])
max_length = seq_length_tensor.max().item()
token_nums = seq_length_tensor.sum().item()
batch_size = lora_indices_tensor.size(0)
no_lora = False
# -1 means no lora should be applied. Use `no_lora` to determine whether
# the current step requires LoRA. If LoRA is not needed, the prefill stage
# does not need to launch the triton kernel, which can improve performance
if batch_size == 1 and lora_indices_tensor == -1:
no_lora = True
return (
b_seq_start_tensor,
seq_length_tensor,
lora_indices_tensor,
batch_size,
max_length,
token_nums,
no_lora,
)
# TODO see if this can be vectorized
def convert_mapping(
mapping: "LoRAMapping",
lora_index_to_id: list[int | None],
max_loras: int,
vocab_size: int,
extra_vocab_size: int,
device: torch.device,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, list[int]]:
"""Converts LoRAMapping to index tensors.
Args:
mapping: LoRAMapping mapping rows in a batch to LoRA ids.
lora_index_to_id: List mapping LoRA ids to LoRA indices.
max_loras: Maximum number of LoRAs.
vocab_size: Model vocab size.
extra_vocab_size: Extra vocab size each LoRA can have.
Returns:
A tuple of tensors:
base_indices: Tensor of shape [batch_size] mapping batch rows to
LoRA indices.
sampler_indices: Tensor of shape [batch_size] mapping requests to
LoRA indices for sampler. For generation, this will be the
same as base_indices. For prefill, this will map requests
to LoRA indices.
sampler_indices_padded: Tensor of shape [batch_size] mapping
requests to LoRA indices for sampler with padding.
Same as sampler_indices, but -1 is replaced with
max_loras.
embeddings_indices: Tensor of shape [2, batch_size] mapping
requests to embedding indices. First row is for embeddings
added by the LoRAs, second row is for the LoRA.lora_a
embeddings.
indices_len: List of lengths of the above tensors. It contains
(base_indices, sampler_indices, sampler_indices_padded,
embeddings_indices).
"""
index_mapping_indices: list[int] = list(mapping.index_mapping).copy()
embedding_indices = index_mapping_indices.copy()
lora_indices = index_mapping_indices.copy()
prompt_mapping: list[int] = [
lora_index_to_id.index(x) if x > 0 else -1 for x in mapping.prompt_mapping
]
lora_idx = None
for i in range(len(index_mapping_indices)):
# TODO index can be slow. optimize
lora_idx = (
lora_index_to_id.index(index_mapping_indices[i])
if index_mapping_indices[i] > 0
else -1
)
embedding_indices[i] = lora_idx if index_mapping_indices[i] > 0 else 0
lora_indices[i] = lora_idx
indices_list: list[list[int] | torch.Tensor] = [
index_mapping_indices,
lora_indices,
embedding_indices,
]
indices = torch.tensor(indices_list, dtype=torch.long, device=device)
prompt_mapping_tensor = torch.tensor(
prompt_mapping, dtype=torch.long, device=device
)
embeddings_indices = torch.stack(
[
indices[2] * extra_vocab_size,
indices[2] * (vocab_size + extra_vocab_size),
]
)
embeddings_indices = torch.where(
embeddings_indices == -1, max_loras - 1, embeddings_indices
)
base_indices = indices[1]
sampler_indices = prompt_mapping_tensor
sampler_indices_padded = sampler_indices.clone()
sampler_indices_padded = torch.where(
sampler_indices_padded == -1, max_loras - 1, sampler_indices_padded
)
sampler_indices_padded = torch.arange(
0, len(sampler_indices_padded), device=device, dtype=torch.long
) + (sampler_indices_padded * len(sampler_indices_padded))
# Contain length of indices tensors. Used to index into each tensor.
indices_len = [
base_indices.shape[-1],
sampler_indices.shape[-1],
sampler_indices_padded.shape[-1],
embeddings_indices.shape[-1],
]
return (
base_indices,
sampler_indices,
sampler_indices_padded,
embeddings_indices,
indices_len,
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/punica_wrapper/__init__.py | vllm/lora/punica_wrapper/__init__.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.lora.punica_wrapper.punica_base import PunicaWrapperBase
from vllm.lora.punica_wrapper.punica_selector import get_punica_wrapper
__all__ = [
"PunicaWrapperBase",
"get_punica_wrapper",
]
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/punica_wrapper/punica_gpu.py | vllm/lora/punica_wrapper/punica_gpu.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Based on:
Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023).
Punica: Multi-Tenant LoRA Serving.
https://arxiv.org/abs/2310.18547
"""
from typing import final
import torch
from vllm.lora.layers import LoRAMapping
from vllm.triton_utils import HAS_TRITON, triton
from vllm.utils.math_utils import round_up
if HAS_TRITON:
from vllm.lora.ops.triton_ops import (
LoRAKernelMeta,
fused_moe_lora,
lora_expand,
lora_shrink,
)
from vllm import _custom_ops as ops
from .punica_base import PunicaWrapperBase
@final
class PunicaWrapperGPU(PunicaWrapperBase):
"""
PunicaWrapperGPU is designed to manage and provide metadata for the punica
kernel. The main function is to maintain the state information for
Multi-LoRA, and to provide the interface for the punica triton kernel.
"""
def __init__(
self,
max_num_batched_tokens: int,
max_batches: int,
device: torch.device | str,
**kwargs,
):
PunicaWrapperBase.__init__(self, max_num_batched_tokens, max_batches, device)
self.lora_config = kwargs["lora_config"]
self.max_loras = self.lora_config.max_loras
self.token_mapping_meta = LoRAKernelMeta.make(
self.max_loras, max_num_batched_tokens, device=device
)
# When speculative decoding is enabled, max_num_samples is
# max_batches * (num_speculative_decoding_tokens + 1).
# This line can be optimized by replacing max_num_batched_tokens
# to max_batches * (num_speculative_decoding_tokens + 1).
self.prompt_mapping_meta = LoRAKernelMeta.make(
self.max_loras, max_num_batched_tokens, device=device
)
def update_metadata(
self,
mapping: LoRAMapping,
lora_index_to_id: list[int | None],
max_loras: int,
vocab_size: int,
**kwargs,
):
self.is_prefill = mapping.is_prefill
self._update_base_metadata(mapping, lora_index_to_id, max_loras, vocab_size)
# Prepare cuda kernel metadata tensors
self.token_mapping_meta.prepare_tensors(self.token_lora_indices)
self.prompt_mapping_meta.prepare_tensors(self.sampler_indices)
def add_shrink(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: tuple[torch.Tensor, ...],
scale: float,
**kwargs,
):
"""
Performs GEMM for multiple slices of lora_a.
Semantics:
for i in range(len(lora_a_stacked)):
y[i] += (x @ lora_a_stacked[i]) * scale
Args:
y (torch.Tensor): Output tensors
x (torch.Tensor): Input tensor
lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weights
scale (float): Scaling factor for the operation
"""
x = x.view(-1, x.shape[-1])
lora_shrink(
x,
lora_a_stacked,
y,
*self.token_mapping_meta.meta_args(x.size(0)),
scale,
)
def add_expand(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_b_stacked: tuple[torch.Tensor, ...],
output_slices: tuple[int, ...],
offset_start: int = 0,
add_inputs=True,
**kwargs,
) -> None:
"""
Performs GEMM for multiple slices of lora_b.
Semantics:
for i in range(len(lora_b_stacked)):
slice = output_slices[i]
y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i]
offset += slice
Args:
y (torch.Tensor): Output tensor.
x (torch.Tensor): Input tensors
lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight
output_slices (tuple[int, ...]): Every slice's size
add_inputs (bool): Defaults to True.
"""
y_org = y
y = y.view(-1, y.shape[-1])
assert x.ndim == 3
assert x.size(0) == len(output_slices)
num_tokens = x.size(1) # first dimension is the num slices
lora_expand(
x,
lora_b_stacked,
y,
*self.token_mapping_meta.meta_args(num_tokens),
offset_start=offset_start,
add_inputs=True,
)
y = y.view_as(y_org)
def add_lora_embedding(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_b_stacked: torch.Tensor,
add_inputs: bool = True,
**kwargs,
) -> None:
"""
Applies lora specifically for VocabParallelEmbeddingWithLoRA.
Semantics:
y += x @ lora_b_stacked
Args:
y (torch.Tensor): Output tensor.
x (torch.Tensor): Input tensor.
lora_b_stacked (torch.Tensor): lora_b's weights.
add_inputs (bool): Default to True.
"""
lora_expand(
x.unsqueeze(dim=0),
(lora_b_stacked,),
y,
*self.token_mapping_meta.meta_args(x.size(0)),
offset_start=0,
add_inputs=add_inputs,
)
def add_lora_linear(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: tuple[torch.Tensor, ...],
lora_b_stacked: tuple[torch.Tensor, ...],
scale: float,
output_slices: tuple[int, ...],
*,
buffer: torch.Tensor | None = None,
**kwargs,
) -> None:
"""
Applicable to linear-related lora.
Semantics:
for i in range(len(lora_a_stacked)):
y[i] += (
x[i].unsqueeze(0)
@ lora_a_stacked[indices[i], layer_idx, :, :]
@ lora_b_stacked[indices[i], layer_idx, :, :]
* scale
).squeeze(0)
Args:
y (torch.Tensor): Output tensor. Will be changed in-place.
x (torch.Tensor): Input tensor
lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weight.
lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight.
scale (float): Scaling factor.
output_slices (tuple[int, ...]): Every slice's size.
buffer (Optional[torch.Tensor]): Defaults to None.
"""
assert len(lora_a_stacked) == len(lora_b_stacked) == len(output_slices)
assert buffer is None, (
"To minimize overhead, the buffer should be created by "
".add_lora_linear() instead of being passed in."
)
r = lora_b_stacked[0].size(-1)
# We set the buffer to be float32 by default, refer to:
# https://github.com/triton-lang/triton/issues/1387
# Note: buffer is zeroed inside the shrink op
buffer = torch.empty(
(len(output_slices), x.size(0), r), dtype=torch.float32, device=x.device
)
self.add_shrink(
buffer, # type: ignore
x,
lora_a_stacked,
scale,
**kwargs,
)
self.add_expand(
y,
buffer, # type: ignore
lora_b_stacked,
output_slices,
add_inputs=True,
**kwargs,
)
def add_lora_logits(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: torch.Tensor,
lora_b_stacked: torch.Tensor,
scale,
*,
buffer: torch.Tensor | None = None,
**kwargs,
) -> None:
"""
Applies lora specifically for LogitsProcessorWithLoRA.
Semantics:
buffer = (x @ lora_a_stacked) * scale
y += buffer @ lora_b_stacked
Args:
y (torch.Tensor): Output tensor.
x (torch.Tensor): Input tensor.
lora_a_stacked (torch.Tensor): lora_a's weights.
lora_b_stacked (torch.Tensor): lora_b's weights.
scale (float): Scaling factor.
buffer (Optional[torch.Tensor]): Default to None.
"""
y_org = y
y = y.view(-1, y.shape[-1])
x = x.view(-1, x.shape[-1])
r = lora_b_stacked.size(-1)
assert buffer is None, (
"To minimize overhead, the buffer should be created by "
".add_lora_linear() instead of being passed in."
)
# We set the buffer to be float32 by default, refer to:
# https://github.com/triton-lang/triton/issues/1387
# Note: buffer is zeroed inside the shrink op
buffer = torch.empty((x.size(0), r), dtype=torch.float32, device=x.device)
lora_shrink(
x,
[lora_a_stacked],
buffer.unsqueeze(dim=0),
*self.prompt_mapping_meta.meta_args(x.size(0)),
scale,
)
lora_expand(
buffer.unsqueeze(dim=0),
[lora_b_stacked],
y,
*self.prompt_mapping_meta.meta_args(buffer.size(0)),
add_inputs=True,
)
y = y.view_as(y_org)
def moe_lora_align_block_size(
self,
topk_ids: torch.Tensor,
num_tokens: int,
block_size: int,
num_experts: int,
max_loras: int,
adapter_enabled: torch.Tensor,
expert_map: torch.Tensor | None = None,
pad_sorted_ids: bool = False,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Aligns tokens and experts into block-sized chunks for LoRA-based
mixture-of-experts (MoE) execution.
"""
max_num_tokens_padded = topk_ids.numel() + num_experts * (block_size - 1)
if pad_sorted_ids:
max_num_tokens_padded = round_up(max_num_tokens_padded, block_size)
sorted_ids = torch.empty(
(max_loras * max_num_tokens_padded,),
dtype=torch.int32,
device=topk_ids.device,
)
max_num_m_blocks = triton.cdiv(max_num_tokens_padded, block_size)
# Expert ids must be set default to -1 to prevent a blank block
expert_ids = torch.empty(
(max_loras * max_num_m_blocks,),
dtype=torch.int32,
device=topk_ids.device,
)
num_tokens_post_pad = torch.empty(
(max_loras), dtype=torch.int32, device=topk_ids.device
)
(token_lora_mapping, _, _, _, lora_ids, _) = self.token_mapping_meta.meta_args(
num_tokens
)
ops.moe_lora_align_block_size(
topk_ids,
token_lora_mapping,
num_experts,
block_size,
max_loras,
max_num_tokens_padded,
max_num_m_blocks,
sorted_ids,
expert_ids,
num_tokens_post_pad,
adapter_enabled,
lora_ids,
)
if expert_map is not None:
expert_ids = expert_map[expert_ids]
return sorted_ids, expert_ids, num_tokens_post_pad
def add_lora_fused_moe(
self,
y: torch.Tensor,
x: torch.Tensor,
lora_a_stacked: tuple[torch.Tensor, ...],
lora_b_stacked: tuple[torch.Tensor, ...],
topk_weights: torch.Tensor,
sorted_token_ids: torch.Tensor,
expert_ids: torch.Tensor,
num_tokens_post_padded: torch.Tensor,
max_lora_rank: int,
top_k_num: int,
shrink_config,
expand_config,
adapter_enabled: torch.Tensor,
mul_routed_weight=False,
fully_sharded: bool = False,
offset: int = 0,
):
"""
Performs a fused forward computation for LoRA of Mixture-of-Experts (MoE) layer.
"""
(_, _, _, _, lora_ids, _) = self.token_mapping_meta.meta_args(x.size(0))
fused_moe_lora(
y,
x,
lora_a_stacked,
lora_b_stacked,
topk_weights,
sorted_token_ids,
expert_ids,
num_tokens_post_padded,
max_lora_rank,
top_k_num,
lora_ids,
adapter_enabled,
shrink_config.get("BLOCK_SIZE_M", 64),
shrink_config.get("BLOCK_SIZE_N", 64),
shrink_config.get("BLOCK_SIZE_K", 32),
shrink_config.get("GROUP_SIZE_M", 8),
shrink_config.get("NUM_WARPS", 4),
shrink_config.get("NUM_STAGES", 3),
shrink_config.get("SPLIT_K", 1),
expand_config.get("BLOCK_SIZE_M", 64),
expand_config.get("BLOCK_SIZE_N", 64),
expand_config.get("BLOCK_SIZE_K", 32),
expand_config.get("GROUP_SIZE_M", 8),
expand_config.get("NUM_WARPS", 4),
expand_config.get("NUM_STAGES", 3),
expand_config.get("SPLIT_K", 1),
mul_routed_weight,
fully_sharded,
offset,
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/punica_wrapper/punica_selector.py | vllm/lora/punica_wrapper/punica_selector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.logger import init_logger
from vllm.platforms import current_platform
from vllm.utils.import_utils import resolve_obj_by_qualname
from .punica_base import PunicaWrapperBase
logger = init_logger(__name__)
def get_punica_wrapper(*args, **kwargs) -> PunicaWrapperBase:
punica_wrapper_qualname = current_platform.get_punica_wrapper()
punica_wrapper_cls = resolve_obj_by_qualname(punica_wrapper_qualname)
punica_wrapper = punica_wrapper_cls(*args, **kwargs)
assert punica_wrapper is not None, (
"the punica_wrapper_qualname(" + punica_wrapper_qualname + ") is wrong."
)
logger.info_once("Using %s.", punica_wrapper_qualname.rsplit(".", 1)[1])
return punica_wrapper
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/layers/row_parallel_linear.py | vllm/lora/layers/row_parallel_linear.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import torch.nn as nn
from transformers import PretrainedConfig
from vllm.config.lora import LoRAConfig
from vllm.distributed import (
split_tensor_along_last_dim,
tensor_model_parallel_all_reduce,
)
from vllm.model_executor.layers.linear import RowParallelLinear
from vllm.platforms import current_platform
from .base_linear import BaseLinearLayerWithLoRA
from .utils import _fully_sharded_can_replace, _not_fully_sharded_can_replace
class RowParallelLinearWithLoRA(BaseLinearLayerWithLoRA):
def __init__(self, base_layer: RowParallelLinear) -> None:
super().__init__(base_layer)
# reset input_size
self.input_size = self.base_layer.input_size_per_partition
self.output_size = self.base_layer.output_size
# There is only one LoRA layer.
self.n_slices = 1
def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor:
shard_size = self.input_size
start_idx = self.tp_rank * shard_size
end_idx = (self.tp_rank + 1) * shard_size
lora_a = lora_a[:, start_idx:end_idx]
return lora_a
def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor:
return lora_b
def forward(
self, input_: torch.Tensor
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor | None]:
"""Forward of RowParallelLinear
Args:
input_: tensor whose last dimension is `input_size`. If
`input_is_parallel` is set, then the last dimension
is `input_size // tp_size`.
Returns:
- output
- bias
"""
# set up backprop all-reduce.
if self.base_layer.input_is_parallel:
input_parallel = input_
else:
# TODO: simplify code below
splitted_input = split_tensor_along_last_dim(
input_, num_partitions=self.tp_size
)
input_parallel = splitted_input[self.tp_rank].contiguous()
# Matrix multiply.
bias_ = (
None
if (self.tp_rank > 0 or self.base_layer.skip_bias_add)
else self.base_layer.bias
)
output_parallel = self.apply(input_parallel, bias_)
if self.base_layer.reduce_results and self.tp_size > 1:
output = tensor_model_parallel_all_reduce(output_parallel)
else:
output = output_parallel
output_bias = self.base_layer.bias if self.base_layer.skip_bias_add else None
if not self.base_layer.return_bias:
return output
return output, output_bias
@classmethod
@_not_fully_sharded_can_replace
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
return type(source_layer) is RowParallelLinear
# The following layer is based on the tensor parallelism strategy given in
# Y. Sheng et al., S-LoRA: Serving Thousands of Concurrent LoRA Adapters. 2023,
# https://arxiv.org/abs/2311.03285.
class RowParallelLinearWithShardedLoRA(RowParallelLinearWithLoRA):
"""
Differs from RowParallelLinearWithLoRA by slicing the
LoRA B's also.
Based on S-LoRA, slicing happens along the output dim.
This yields a combined partial sum from the row parallel base
layer and column partitioned output from the LoRA.
"""
def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor:
shard_size = self.lora_b_stacked[0].shape[2]
start_idx = self.tp_rank * shard_size
end_idx = (self.tp_rank + 1) * shard_size
lora_b = lora_b[start_idx:end_idx, :]
return lora_b
def apply(self, x: torch.Tensor, bias: torch.Tensor | None = None) -> torch.Tensor:
output = self.base_layer.quant_method.apply(self.base_layer, x, bias)
x = x.view(-1, x.shape[-1])
output, out_orig_shape = output.view(-1, output.shape[-1]), output.shape
buffer = torch.zeros(
(self.n_slices, x.shape[0], self.lora_a_stacked[0].shape[2]),
dtype=torch.float32,
device=x.device,
)
shrunk_buffer: torch.Tensor | None = self.punica_wrapper.add_shrink(
buffer, x, self.lora_a_stacked, 1.0
)
if not current_platform.can_update_inplace():
buffer = shrunk_buffer
if self.tp_size > 1:
buffer = tensor_model_parallel_all_reduce(buffer)
# following S-LoRA, allows the fusing of all_gather and all_reduce
# by adding the column partitioned lora output to a slice of output
# tensor, which is a partial sum due to row parallel. All that
# remains is a standard all_reduce. User should be aware though that
# the output is not the same as a normal row_parallel, it should be
# reduced before being used
# NOTE offset are based on the rank.
shard_size = self.lora_b_stacked[0].shape[2]
offset_start = self.tp_rank * shard_size
lora_output: torch.Tensor | None = self.punica_wrapper.add_expand(
output,
buffer,
self.lora_b_stacked,
self.output_slices,
offset_start=offset_start,
add_input=True,
)
if not current_platform.can_update_inplace():
output = lora_output
output = output.view(*out_orig_shape)
return output
@classmethod
@_fully_sharded_can_replace
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
# specifying kwargs so they can be easily accessed in decorator
return super().can_replace_layer(
source_layer=source_layer,
lora_config=lora_config,
packed_modules_list=packed_modules_list,
model_config=model_config,
decorate=False,
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/layers/logits_processor.py | vllm/lora/layers/logits_processor.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import torch.nn as nn
from transformers import PretrainedConfig
from vllm.config.lora import LoRAConfig
from vllm.distributed import (
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.vocab_parallel_embedding import VocabParallelEmbedding
from vllm.platforms import current_platform
from .base import BaseLayerWithLoRA
class LogitsProcessorWithLoRA(BaseLayerWithLoRA):
"""
LoRA wrapper for LogitsProcessor, with extra logic to handle the
application of the LoRA adapter and added LoRA vocabulary.
Args:
base_layer: LogitsProcessor layer
hidden_size: hidden size of the model
dtype: data type of the model
device: device of the model
sharded_to_full_mapping: index mapping from sharded vocab to full vocab
received from base_layer.get_sharded_to_full_mapping(). If None,
no reindexing will be done.
"""
def __init__(
self,
base_layer: LogitsProcessor,
hidden_size: int,
dtype: torch.dtype,
device: torch.device,
sharded_to_full_mapping: list[int] | None,
) -> None:
super().__init__()
self.base_layer = base_layer
self.hidden_size = hidden_size
self.dtype = dtype
self.device = device
self.tp_size = get_tensor_model_parallel_world_size()
self.tp_rank = get_tensor_model_parallel_rank()
self.sharded_to_full_mapping = sharded_to_full_mapping
@property
def logits_as_input(self):
return self.base_layer.logits_as_input
@property
def vocab_size(self):
return self.base_layer.vocab_size
@property
def scale(self):
return self.base_layer.scale
@property
def soft_cap(self):
return self.base_layer.soft_cap
@property
def use_all_gather(self):
return self.base_layer.use_all_gather
@property
def org_vocab_size(self):
return self.base_layer.org_vocab_size
@property
def include_gpu_probs_tensor(self):
return self.base_layer.include_gpu_probs_tensor
@property
def should_modify_greedy_probs_inplace(self):
return self.base_layer.should_modify_greedy_probs_inplace
def create_lora_weights(
self,
max_loras: int,
lora_config: LoRAConfig,
model_config: PretrainedConfig | None = None,
) -> None:
# TODO: Verify if this condition can be further relaxed
if 32000 < self.base_layer.vocab_size > 257024:
raise ValueError(
"When using LoRA, vocab size must be 32000 >= vocab_size <= 257024"
)
self.lora_a_stacked = torch.zeros(
(
max_loras,
1,
lora_config.max_lora_rank,
self.hidden_size,
),
dtype=lora_config.lora_dtype,
device=self.device,
)
self.lora_b_stacked = torch.zeros(
(
max_loras,
1,
self.base_layer.vocab_size,
lora_config.max_lora_rank,
),
dtype=lora_config.lora_dtype,
device=self.device,
)
if self.sharded_to_full_mapping is not None:
self.sharded_to_full_mapping_gpu = torch.tensor(
self.sharded_to_full_mapping, device=self.device, dtype=torch.long
)
else:
self.sharded_to_full_mapping_gpu = None
def reset_lora(self, index: int):
self.lora_a_stacked[index] = 0
self.lora_b_stacked[index] = 0
def set_lora(
self,
index: int,
lora_a: torch.Tensor | list[torch.Tensor],
lora_b: torch.Tensor | list[torch.Tensor],
):
assert isinstance(lora_a, torch.Tensor)
assert isinstance(lora_b, torch.Tensor)
self.reset_lora(index)
self.lora_a_stacked[index, 0, : lora_a.shape[0], : lora_a.shape[1]].copy_(
lora_a, non_blocking=True
)
self.lora_b_stacked[index, 0, : lora_b.shape[0], : lora_b.shape[1]].copy_(
lora_b, non_blocking=True
)
def _get_logits(
self,
hidden_states: torch.Tensor,
lm_head: VocabParallelEmbedding,
embedding_bias: torch.Tensor | None = None,
) -> torch.Tensor | None:
# Get the logits for the next tokens.
logits = lm_head.quant_method.apply(lm_head, hidden_states)
if embedding_bias is not None:
logits += embedding_bias
# Gather logits for TP
logits = self.base_layer._gather_logits(logits)
if logits is None:
return None
if self.sharded_to_full_mapping_gpu is not None:
# Reindex full logits tensor to ensure 1:1 mapping between
# index and token_id
# Example for:
# org_vocab_size = 4
# added_vocab_size = 2
# pad_to_size = 8
# tp_size = 2
# indices: [0, 1, 2, 3, 4, 5, 6, 7]
# token_id: [0, 1, 4, -1, 2, 3, 5, -1]
# Therefore, the mapping is expected to be:
# [0, 1, 4, 6, 2, 3, 5, 7] so that when we reindex,
# we get:
# indices: [0, 1, 2, 3, 4, 5, 6, 7]
# token_id: [0, 1, 2, 3, 4, 5, -1, -1]
logits = logits[:, self.sharded_to_full_mapping_gpu]
lora_output: torch.Tensor | None = self.punica_wrapper.add_lora_logits(
logits, hidden_states, self.lora_a_stacked, self.lora_b_stacked, 1.0
)
if not current_platform.can_update_inplace():
logits = lora_output
# Remove paddings in vocab (if any).
logits = logits[:, : self.base_layer.vocab_size]
return logits
def forward(self, *args, **kwargs):
return type(self.base_layer).forward(self, *args, **kwargs)
@classmethod
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
# Special handling for the LogitsProcessor.
return False
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/layers/base_linear.py | vllm/lora/layers/base_linear.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
from transformers import PretrainedConfig
from vllm.config.lora import LoRAConfig
from vllm.distributed.utils import divide
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
LinearBase,
ReplicatedLinear,
RowParallelLinear,
)
from vllm.platforms import current_platform
from .base import BaseLayerWithLoRA
from .utils import _get_lora_device
class BaseLinearLayerWithLoRA(BaseLayerWithLoRA):
def __init__(self, base_layer: LinearBase):
super().__init__()
self.base_layer = base_layer
self.input_size = self.base_layer.input_size
# Ensure tp_size and tp_rank consistency with the base_layer.
self.tp_size = self.base_layer.tp_size
self.tp_rank = self.base_layer.tp_rank
self.device = _get_lora_device(self.base_layer)
self.output_slices: tuple[int, ...]
self.output_size: int
self.n_slices: int
def create_lora_weights(
self,
max_loras: int,
lora_config: LoRAConfig,
model_config: PretrainedConfig | None = None,
) -> None:
self.lora_config = lora_config
#
if isinstance(self.base_layer, ReplicatedLinear):
lora_a_out_size = lora_config.max_lora_rank
lora_b_out_size = self.output_size
elif isinstance(self.base_layer, ColumnParallelLinear):
lora_a_out_size = (
lora_config.max_lora_rank
if not lora_config.fully_sharded_loras
else divide(lora_config.max_lora_rank, self.tp_size)
)
lora_b_out_size = self.output_size
elif isinstance(self.base_layer, RowParallelLinear):
lora_a_out_size = lora_config.max_lora_rank
lora_b_out_size = (
self.output_size
if not lora_config.fully_sharded_loras
else divide(self.output_size, self.tp_size)
)
else:
raise NotImplementedError
self.lora_a_stacked = tuple(
torch.zeros(
max_loras,
1,
lora_a_out_size,
self.input_size,
dtype=lora_config.lora_dtype,
device=self.device,
)
for _ in range(self.n_slices)
)
self.lora_b_stacked = tuple(
torch.zeros(
max_loras,
1,
lora_b_out_size,
lora_config.max_lora_rank,
dtype=lora_config.lora_dtype,
device=self.device,
)
for _ in range(self.n_slices)
)
self.output_slices = (self.lora_b_stacked[0].shape[2],)
def reset_lora(self, index: int):
for s_index in range(self.n_slices):
self.lora_a_stacked[s_index][index] = 0
self.lora_b_stacked[s_index][index] = 0
def set_lora(
self,
index: int,
lora_a: torch.Tensor | list[torch.Tensor],
lora_b: torch.Tensor | list[torch.Tensor],
):
# Except for QKVParallelLinearWithLoRA and
# MergedColumnParallelLinearWithLoRA, all other linear LoRA layers
# store weights in a tuple of size 1. These two layers will
# override this function.
assert isinstance(lora_a, torch.Tensor)
assert isinstance(lora_b, torch.Tensor)
assert (
len(self.lora_a_stacked) == len(self.lora_b_stacked) == self.n_slices == 1
)
self.reset_lora(index)
if self.tp_size > 1:
lora_a = self.slice_lora_a(lora_a)
lora_b = self.slice_lora_b(lora_b)
self.lora_a_stacked[0][index, 0, : lora_a.shape[0], : lora_a.shape[1]].copy_(
lora_a, non_blocking=True
)
self.lora_b_stacked[0][index, 0, : lora_b.shape[0], : lora_b.shape[1]].copy_(
lora_b, non_blocking=True
)
def apply(self, x: torch.Tensor, bias: torch.Tensor | None = None) -> torch.Tensor:
output = self.base_layer.quant_method.apply(self.base_layer, x, bias)
original_shape = output.shape if output.ndim == 3 else None
# In transformers backend, x and output have extra batch dimension like
# (1, seq_len, hidden_dim), while punica expects (seq_len, hidden_dim),
# therefore we need to flatten the batch dimensions.
if x.ndim == 3 and output.ndim == 3:
output = output.flatten(0, 1)
x = x.flatten(0, 1)
lora_output: torch.Tensor | None = self.punica_wrapper.add_lora_linear(
output, x, self.lora_a_stacked, self.lora_b_stacked, 1.0, self.output_slices
)
if not current_platform.can_update_inplace():
output = lora_output
# Reshape the flattened output back to its original shape,
# as some MM encoders cannot handle flattened inputs.
if original_shape is not None:
output = output.reshape(original_shape)
return output
@property
def weight(self) -> torch.Tensor:
# unquantizedLinear
if hasattr(self.base_layer, "weight"):
return self.base_layer.weight
# Compressed Tensor
elif hasattr(self.base_layer, "weight_packed"):
return self.base_layer.weight_packed
# GPTQ/AWQ
elif hasattr(self.base_layer, "qweight"):
return self.base_layer.qweight
# marlin
elif hasattr(self.base_layer, "B"):
return self.base_layer.B
# HQQ marlin
elif hasattr(self.base_layer, "W_q"):
return self.base_layer.W_q
else:
raise ValueError(f"Unsupported base layer: {self.base_layer}")
@property
def bias(self) -> torch.Tensor | None:
if hasattr(self.base_layer, "bias"):
return self.base_layer.bias
else:
return None
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/layers/utils.py | vllm/lora/layers/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import dataclass
from enum import Enum
import torch
import torch.nn as nn
class LoRAMappingType(Enum):
LANGUAGE = 1
TOWER = 2
CONNECTOR = 3
@dataclass
class LoRAMapping:
index_mapping: tuple[int, ...]
prompt_mapping: tuple[int, ...]
is_prefill: bool = False
type: LoRAMappingType = LoRAMappingType.LANGUAGE
def __post_init__(self):
self.index_mapping = tuple(self.index_mapping)
self.prompt_mapping = tuple(self.prompt_mapping)
def _get_lora_device(base_layer: nn.Module) -> torch.device:
# code borrowed from https://github.com/fmmoret/vllm/blob/fm-support-lora-on-quantized-models/vllm/lora/layers.py#L34
"""Returns the device for where to place the LoRA tensors."""
# unquantizedLinear
if hasattr(base_layer, "weight"):
return base_layer.weight.device
# Compressed Tensor
elif hasattr(base_layer, "weight_packed"):
return base_layer.weight_packed.device
# GPTQ/AWQ
elif hasattr(base_layer, "qweight"):
return base_layer.qweight.device
# HQQ marlin
elif hasattr(base_layer, "W_q"):
return base_layer.W_q.device
# MoE layer
elif hasattr(base_layer, "w2_weight"):
return base_layer.w2_weight.device
# MoE Compressed Tensor
elif hasattr(base_layer, "w2_weight_packed"):
return base_layer.w2_weight_packed.device
# MoE GPTQ/AWQ/GGUF
elif hasattr(base_layer, "w2_qweight"):
return base_layer.w2_qweight.device
else:
raise ValueError(f"Unsupported base layer: {base_layer}")
def _not_fully_sharded_can_replace(can_replace):
"""
decorator which adds the condition of not using fully sharded loras
intended to wrap can_replace_layer()
"""
def dec(*args, **kwargs):
decorate = kwargs.pop("decorate") if "decorate" in kwargs else True
condition = not kwargs["lora_config"].fully_sharded_loras if decorate else True
return can_replace(*args, **kwargs) and condition
return dec
def _fully_sharded_can_replace(can_replace):
"""
decorator which adds the condition of fully sharded loras
intended to wrap can_replace_layer()
"""
def dec(*args, **kwargs):
return (
can_replace(*args, **kwargs) and kwargs["lora_config"].fully_sharded_loras
)
return dec
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/layers/column_parallel_linear.py | vllm/lora/layers/column_parallel_linear.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import torch.nn as nn
from transformers import PretrainedConfig
from vllm.config.lora import LoRAConfig
from vllm.distributed import tensor_model_parallel_all_gather
from vllm.distributed.utils import divide
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
MergedColumnParallelLinear,
QKVParallelLinear,
)
from vllm.platforms import current_platform
from .base_linear import BaseLinearLayerWithLoRA
from .utils import _fully_sharded_can_replace, _not_fully_sharded_can_replace
def _mcp_apply(x, bias, layer: "ColumnParallelLinearWithLoRA"):
"""
For `ColumnParallelLinearWithLoRA` or classes that inherit from
`ColumnParallelLinearWithLoRA`, they share the same `apply` logic.
"""
assert (
layer.n_slices
== len(layer.lora_a_stacked)
== len(layer.lora_b_stacked)
== len(layer.output_slices)
)
output = layer.base_layer.quant_method.apply(layer.base_layer, x, bias)
x = x.view(-1, x.shape[-1])
output, out_orig_shape = output.view(-1, output.shape[-1]), output.shape
# Since communication is needed, the buffer is directly initialized as a
# tensor rather than a tuple of tensor.
buffers = torch.zeros(
(layer.n_slices, x.shape[0], layer.lora_a_stacked[0].shape[2]),
dtype=torch.float32,
device=x.device,
)
shrunk_buffers: torch.Tensor | None = layer.punica_wrapper.add_shrink(
buffers, x, layer.lora_a_stacked, 1.0
)
if not current_platform.can_update_inplace():
buffers = shrunk_buffers
buffers = tensor_model_parallel_all_gather(buffers)
lora_output: torch.Tensor | None = layer.punica_wrapper.add_expand(
output,
buffers,
layer.lora_b_stacked,
layer.output_slices,
offset_start=0,
add_input=True,
)
if not current_platform.can_update_inplace():
output = lora_output
output = output.view(*out_orig_shape)
# now have column partitioned and packed output
return output
class ColumnParallelLinearWithLoRA(BaseLinearLayerWithLoRA):
"""
LoRA on top of ColumnParallelLinear layer.
LoRA B is sliced for tensor parallelism.
There are two types for the `base_layer`:
1. ColumnParallelLinear, e.g.`dense_h_to_4h` in `FalconForCausalLM`.
2. MergedColumnParallelLinear, e.g.`gate_up_proj` in `Phi3ForCausalLM`.
"""
def __init__(self, base_layer: ColumnParallelLinear) -> None:
super().__init__(base_layer)
# The base_layer type is ColumnParallelLinear or
# MergedColumnParallelLinear, their weight sharding logic is
# inconsistent when TP is greater than 1.
self.is_merged_col_linear = type(base_layer) is MergedColumnParallelLinear
self.output_size = self.base_layer.output_size_per_partition
# There is only one LoRA layer
self.n_slices = 1
def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor:
return lora_a
def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor:
# Applicable to cases where the base_layer is
# MergedColumnParallelLinear.
if self.is_merged_col_linear:
shard_size = self.output_size // 2
offset = lora_b.shape[0] // 2
left_weight = lora_b[
self.tp_rank * shard_size : (self.tp_rank + 1) * shard_size, :
]
right_weight = lora_b[
offset + self.tp_rank * shard_size : offset
+ (self.tp_rank + 1) * shard_size,
:,
]
lora_b = torch.cat([left_weight, right_weight], dim=0)
# Applicable to cases where the base_layer is
# ColumnParallelLinear.
else:
shard_size = self.output_size
start_idx = self.tp_rank * shard_size
end_idx = (self.tp_rank + 1) * shard_size
lora_b = lora_b[start_idx:end_idx, :]
return lora_b
def forward(
self, input_: torch.Tensor
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor | None]:
"""Forward of ColumnParallelLinear
Args:
input_: Tensor whose last dimension is `input_size`.
Returns:
- output
- bias
"""
bias = self.base_layer.bias if not self.base_layer.skip_bias_add else None
# Matrix multiply.
output_parallel = self.apply(input_, bias)
if self.base_layer.gather_output and self.tp_size > 1:
# All-gather across the partitions.
output = tensor_model_parallel_all_gather(output_parallel)
else:
output = output_parallel
if not self.base_layer.return_bias:
return output
output_bias = self.base_layer.bias if self.base_layer.skip_bias_add else None
return output, output_bias
@classmethod
@_not_fully_sharded_can_replace
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
return type(source_layer) is ColumnParallelLinear or (
type(source_layer) is MergedColumnParallelLinear
and len(packed_modules_list) == 1
)
class MergedColumnParallelLinearWithLoRA(ColumnParallelLinearWithLoRA):
"""ColumnParallelLinear layer that is composed of 2 sublayers (slices)
packed together (e.g. gate_proj + up_proj -> gate_up_proj).
This means we have 2 LoRAs, each applied to one half of the layer.
Both slices must have the same size.
"""
def __init__(
self, base_layer: MergedColumnParallelLinear | QKVParallelLinear
) -> None:
super().__init__(base_layer)
# There are two LoRA layers
# the output_sizes in MergedColumnParallelLinear is not sharded by tp
# we need to divide it by the tp_size to get correct slices size
output_sizes = self.base_layer.output_sizes
self.output_slices = tuple(
divide(output_size, self.tp_size) for output_size in output_sizes
)
self.n_slices = len(self.output_slices)
self.output_ids = (self.tp_rank,) * self.n_slices
def create_lora_weights(
self,
max_loras: int,
lora_config: LoRAConfig,
model_config: PretrainedConfig | None = None,
) -> None:
"""
The main reason for overriding this function is to enhance code
maintainability.
"""
self.lora_config = lora_config
lora_a_output_size_per_partition = (
lora_config.max_lora_rank
if not lora_config.fully_sharded_loras
else divide(lora_config.max_lora_rank, self.tp_size)
)
self.lora_a_stacked = tuple(
torch.zeros(
max_loras,
1,
lora_a_output_size_per_partition,
self.input_size,
dtype=lora_config.lora_dtype,
device=self.device,
)
for _ in range(self.n_slices)
)
self.lora_b_stacked = tuple(
torch.zeros(
max_loras,
1,
output_size,
lora_config.max_lora_rank,
dtype=lora_config.lora_dtype,
device=self.device,
)
for output_size in self.output_slices
)
def slice_lora_a(
self, lora_a: list[torch.Tensor | None]
) -> list[torch.Tensor | None]:
return lora_a
def slice_lora_b(
self, lora_b: list[torch.Tensor | None]
) -> list[torch.Tensor | None]:
sliced_lora_b = [None] * self.n_slices
for i, (shard_id, shard_size) in enumerate(
zip(self.output_ids, self.output_slices)
):
if (lora_b_i := lora_b[i]) is not None:
sliced_lora_b[i] = lora_b_i[
shard_size * shard_id : shard_size * (shard_id + 1), :
]
return sliced_lora_b
def set_lora(
self,
index: int,
lora_a: torch.Tensor | list[torch.Tensor],
lora_b: torch.Tensor | list[torch.Tensor],
):
self.reset_lora(index)
if self.tp_size > 1:
lora_a = self.slice_lora_a(lora_a)
lora_b = self.slice_lora_b(lora_b)
for i in range(self.n_slices):
if (lora_a_i := lora_a[i]) is not None:
self.lora_a_stacked[i][
index, 0, : lora_a_i.shape[0], : lora_a_i.shape[1]
].copy_(lora_a_i, non_blocking=True)
if (lora_b_i := lora_b[i]) is not None:
self.lora_b_stacked[i][
index, 0, : lora_b_i.shape[0], : lora_b_i.shape[1]
].copy_(lora_b_i, non_blocking=True)
@classmethod
@_not_fully_sharded_can_replace
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
return (
type(source_layer) is MergedColumnParallelLinear
and len(packed_modules_list) == 2
)
class QKVParallelLinearWithLoRA(ColumnParallelLinearWithLoRA):
"""
ColumnParallelLinear layer that is specifically designed for
qkv_proj. Certain models, such as chatglm3 and baichuan-7b,
only contains a single LoRA within their qkv_proj layer.
During inference with Tensor Parallel, the weights of lora_b
must be accurately partitioned according to the respective ranks.
Q slice may have different shape than K and V slices (which both have
the same shape).
"""
def __init__(self, base_layer: QKVParallelLinear) -> None:
super().__init__(base_layer)
self.q_proj_total_size = (
self.base_layer.total_num_heads * self.base_layer.head_size
)
self.q_proj_shard_size = self.base_layer.num_heads * self.base_layer.head_size
self.kv_proj_shard_size = (
self.base_layer.num_kv_heads * self.base_layer.head_size
)
self.kv_proj_total_size = (
self.base_layer.total_num_kv_heads * self.base_layer.head_size
)
# There is only one LoRA layer
self.n_slices = 1
def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor:
self.q_shard_id = self.tp_rank
self.kv_shard_id = self.tp_rank // self.base_layer.num_kv_head_replicas
lora_b_q = lora_b[
self.q_proj_shard_size * self.q_shard_id : self.q_proj_shard_size
* (self.q_shard_id + 1),
:,
]
k_offset = self.q_proj_total_size
lora_b_k = lora_b[
k_offset + self.kv_proj_shard_size * self.kv_shard_id : k_offset
+ self.kv_proj_shard_size * (self.kv_shard_id + 1),
:,
]
v_offset = k_offset + self.kv_proj_total_size
lora_b_v = lora_b[
v_offset + self.kv_proj_shard_size * self.kv_shard_id : v_offset
+ self.kv_proj_shard_size * (self.kv_shard_id + 1),
:,
]
lora_b = torch.cat([lora_b_q, lora_b_k, lora_b_v], dim=0)
return lora_b
@classmethod
@_not_fully_sharded_can_replace
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
return type(source_layer) is QKVParallelLinear and len(packed_modules_list) == 1
class MergedQKVParallelLinearWithLoRA(MergedColumnParallelLinearWithLoRA):
"""MergedColumnParallelLinear layer that is composed of 3 sublayers (slices)
packed together in qkv proj fashion
(q_proj + k_proj + v_proj -> qkv_proj).
This means we have 3 LoRAs, each applied to one slice of the layer.
Q slice may have different shape than K and V slices (which both have
the same shape).
"""
def __init__(self, base_layer: QKVParallelLinear) -> None:
super().__init__(base_layer)
# There are three LoRA layer.
self.n_slices = len(self.base_layer.output_sizes)
self.q_proj_shard_size = self.base_layer.num_heads * self.base_layer.head_size
self.kv_proj_shard_size = (
self.base_layer.num_kv_heads * self.base_layer.head_size
)
self.q_shard_id = self.tp_rank
self.kv_shard_id = self.tp_rank // self.base_layer.num_kv_head_replicas
self.output_slices = (
self.q_proj_shard_size,
self.kv_proj_shard_size,
self.kv_proj_shard_size,
)
self.output_ids = (
self.q_shard_id,
self.kv_shard_id,
self.kv_shard_id,
)
def create_lora_weights(
self,
max_loras: int,
lora_config: LoRAConfig,
model_config: PretrainedConfig | None = None,
) -> None:
"""
The main reason for overloading this function is to handle inconsistent
weight dimensions in qkv lora.
"""
super().create_lora_weights(max_loras, lora_config, model_config)
@classmethod
@_not_fully_sharded_can_replace
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
return type(source_layer) is QKVParallelLinear and len(packed_modules_list) == 3
# These following layers are based on the tensor parallelism strategy given in
# Y. Sheng et al., S-LoRA: Serving Thousands of Concurrent LoRA Adapters. 2023,
# https://arxiv.org/abs/2311.03285.
class ColumnParallelLinearWithShardedLoRA(ColumnParallelLinearWithLoRA):
"""
Differs from ColumnParallelLinearWithLoRA by slicing LoRA A also.
Based on S-LoRA, slicing happens along the rank dim.
"""
# For all LoRA layers where the `base_layer` is `ColumnParallelLinear`,
# their `lora_a` and `lora_b` have different sharding patterns. After
# completing the `lora_a` GEMM , a gather operation is performed.
# Therefore, the sharding of `lora_a` only needs to correspond with the
# gather operation.
def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor:
shard_size = self.lora_a_stacked[0].shape[2]
start_idx = self.tp_rank * shard_size
lora_a = lora_a[start_idx : start_idx + shard_size, :]
return lora_a
def apply(self, x: torch.Tensor, bias: torch.Tensor | None = None) -> torch.Tensor:
return _mcp_apply(x, bias, self)
@classmethod
@_fully_sharded_can_replace
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
# specifying kwargs so they can be easily accessed in decorator
return super().can_replace_layer(
source_layer=source_layer,
lora_config=lora_config,
packed_modules_list=packed_modules_list,
model_config=model_config,
decorate=False,
)
class MergedColumnParallelLinearWithShardedLoRA(MergedColumnParallelLinearWithLoRA):
"""
Differs from MergedColumnParallelLinearWithLoRA by slicing the
LoRA A's also.
Based on S-LoRA, slicing happens along the rank dim.
"""
def slice_lora_a(
self, lora_a: list[torch.Tensor | None]
) -> list[torch.Tensor | None]:
# NOTE: lora_a contains 2 subloras, and each sublora could be None.
output_shard_size = self.lora_a_stacked[0].shape[2]
output_start_idx = self.tp_rank * output_shard_size
lora_a = [
lora_a[0][output_start_idx : output_start_idx + output_shard_size, :]
if lora_a[0] is not None
else None,
lora_a[1][output_start_idx : output_start_idx + output_shard_size, :]
if lora_a[1] is not None
else None,
]
return lora_a
def apply(self, x: torch.Tensor, bias: torch.Tensor | None = None) -> torch.Tensor:
return _mcp_apply(x, bias, self)
@classmethod
@_fully_sharded_can_replace
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
# specifying kwargs so they can be easily accessed in decorator
return super().can_replace_layer(
source_layer=source_layer,
lora_config=lora_config,
packed_modules_list=packed_modules_list,
model_config=model_config,
decorate=False,
)
class QKVParallelLinearWithShardedLoRA(QKVParallelLinearWithLoRA):
"""
Differs from QKVParallelLinearWithLoRA by slicing the
LoRA A's also.
Based on S-LoRA, slicing happens along the rank dim.
"""
def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor:
shard_size = self.lora_a_stacked[0].shape[2]
start_idx = self.tp_rank * shard_size
lora_a = lora_a[start_idx : start_idx + shard_size, :]
return lora_a
def apply(self, x: torch.Tensor, bias: torch.Tensor | None = None) -> torch.Tensor:
return _mcp_apply(x, bias, self)
@classmethod
@_fully_sharded_can_replace
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
# specifying kwargs so they can be easily accessed in decorator
return super().can_replace_layer(
source_layer=source_layer,
lora_config=lora_config,
packed_modules_list=packed_modules_list,
model_config=model_config,
decorate=False,
)
class MergedQKVParallelLinearWithShardedLoRA(MergedQKVParallelLinearWithLoRA):
"""
Differs from MergedQKVParallelLinearWithLoRA by slicing the
LoRA A's also.
Based on S-LoRA, slicing happens along the rank dim.
"""
def slice_lora_a(
self, lora_a: list[torch.Tensor | None]
) -> list[torch.Tensor | None]:
# NOTE: lora_a contains 3 subloras, and each sublora could be None.
shard_size = [self.lora_a_stacked[i].shape[2] for i in range(3)]
start_idx = [self.tp_rank * shard_size[i] for i in range(3)]
lora_a = [
lora_a[0][start_idx[0] : start_idx[0] + shard_size[0], :]
if lora_a[0] is not None
else None,
lora_a[1][start_idx[1] : start_idx[1] + shard_size[1], :]
if lora_a[1] is not None
else None,
lora_a[2][start_idx[2] : start_idx[2] + shard_size[2], :]
if lora_a[2] is not None
else None,
]
return lora_a
def apply(self, x: torch.Tensor, bias: torch.Tensor | None = None) -> torch.Tensor:
return _mcp_apply(x, bias, self)
@classmethod
@_fully_sharded_can_replace
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
# specifying kwargs so they can be easily accessed in decorator
return super().can_replace_layer(
source_layer=source_layer,
lora_config=lora_config,
packed_modules_list=packed_modules_list,
model_config=model_config,
decorate=False,
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/layers/vocal_parallel_embedding.py | vllm/lora/layers/vocal_parallel_embedding.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import PretrainedConfig
from vllm.config.lora import LoRAConfig
from vllm.model_executor.layers.vocab_parallel_embedding import VocabParallelEmbedding
from vllm.platforms import current_platform
from .base import BaseLayerWithLoRA
class VocabParallelEmbeddingWithLoRA(BaseLayerWithLoRA):
def __init__(self, base_layer: VocabParallelEmbedding) -> None:
super().__init__()
self.base_layer = base_layer
self.embeddings_slice: tuple[int, int] | None
self.embeddings_weights: torch.Tensor | None
def create_lora_weights(
self,
max_loras: int,
lora_config: LoRAConfig,
model_config: PretrainedConfig | None = None,
) -> None:
if self.base_layer.num_added_embeddings_per_partition > 0:
# We can start adding lora weights
self.embeddings_weights = self.base_layer.weight.data[
self.base_layer.num_org_embeddings_per_partition : self.base_layer.num_org_embeddings_per_partition # noqa: E501
+ self.base_layer.num_added_embeddings_per_partition
]
self.embeddings_slice = (
self.base_layer.shard_indices.added_vocab_start_index
- self.base_layer.org_vocab_size,
self.base_layer.shard_indices.added_vocab_end_index
- self.base_layer.org_vocab_size,
)
self.base_layer.weight.data[
self.base_layer.num_org_embeddings_per_partition :
].fill_(0)
else:
self.embeddings_slice = None
self.embeddings_weights = None
self.lora_a_stacked = torch.zeros(
(
max_loras,
self.base_layer.org_vocab_size,
lora_config.max_lora_rank,
),
dtype=lora_config.lora_dtype,
device=self.base_layer.weight.device,
)
self.lora_b_stacked = torch.zeros(
(
max_loras,
1,
self.base_layer.embedding_dim,
lora_config.max_lora_rank,
),
dtype=lora_config.lora_dtype,
device=self.base_layer.weight.device,
)
self.lora_a_stacked_2d = self.lora_a_stacked.view(
self.lora_a_stacked.shape[0] * self.lora_a_stacked.shape[1],
self.lora_a_stacked.shape[2],
)
def reset_lora(self, index: int):
self.lora_a_stacked[index] = 0
self.lora_b_stacked[index] = 0
def set_lora(
self,
index: int,
lora_a: torch.Tensor | list[torch.Tensor],
lora_b: torch.Tensor | list[torch.Tensor],
):
assert isinstance(lora_a, torch.Tensor)
assert isinstance(lora_b, torch.Tensor)
self.reset_lora(index)
# NOTE self.lora_a_stacked is row-major, and lora_a is col-major,
# so we need transpose here
self.lora_a_stacked[index, : lora_a.shape[1], : lora_a.shape[0]].copy_(
lora_a.T, non_blocking=True
)
self.lora_b_stacked[index, 0, : lora_b.shape[0], : lora_b.shape[1]].copy_(
lora_b, non_blocking=True
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# NB: Don't use torch.narrow here. torch.narrow triggers some
# Dynamic Shape specialization in torch.compile
num_tokens = x.shape[0]
indices_1 = self.punica_wrapper._embeddings_indices[1][:num_tokens]
full_lora_a_embeddings = F.embedding(
x + indices_1,
self.lora_a_stacked_2d,
)
full_output = self.base_layer.forward(x)
full_output_org = full_output
if full_output.ndim == 3:
full_output = full_output.view(
full_output.shape[0] * full_output.shape[1], -1
)
if full_lora_a_embeddings.ndim == 3:
full_lora_a_embeddings = full_lora_a_embeddings.view(
full_lora_a_embeddings.shape[0] * full_lora_a_embeddings.shape[1],
-1,
)
lora_output: torch.Tensor | None = self.punica_wrapper.add_lora_embedding(
full_output, full_lora_a_embeddings, self.lora_b_stacked, add_input=True
)
if not current_platform.can_update_inplace():
full_output = lora_output
return full_output.view_as(full_output_org)
@classmethod
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
return type(source_layer) is VocabParallelEmbedding
@property
def weight(self):
return self.base_layer.weight
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/layers/__init__.py | vllm/lora/layers/__init__.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.lora.layers.base import BaseLayerWithLoRA
from vllm.lora.layers.column_parallel_linear import (
ColumnParallelLinearWithLoRA,
ColumnParallelLinearWithShardedLoRA,
MergedColumnParallelLinearWithLoRA,
MergedColumnParallelLinearWithShardedLoRA,
MergedQKVParallelLinearWithLoRA,
MergedQKVParallelLinearWithShardedLoRA,
QKVParallelLinearWithLoRA,
QKVParallelLinearWithShardedLoRA,
)
from vllm.lora.layers.fused_moe import FusedMoE3DWithLoRA, FusedMoEWithLoRA
from vllm.lora.layers.logits_processor import LogitsProcessorWithLoRA
from vllm.lora.layers.replicated_linear import ReplicatedLinearWithLoRA
from vllm.lora.layers.row_parallel_linear import (
RowParallelLinearWithLoRA,
RowParallelLinearWithShardedLoRA,
)
from vllm.lora.layers.utils import LoRAMapping, LoRAMappingType
from vllm.lora.layers.vocal_parallel_embedding import VocabParallelEmbeddingWithLoRA
__all__ = [
"BaseLayerWithLoRA",
"VocabParallelEmbeddingWithLoRA",
"LogitsProcessorWithLoRA",
"ColumnParallelLinearWithLoRA",
"ColumnParallelLinearWithShardedLoRA",
"MergedColumnParallelLinearWithLoRA",
"MergedColumnParallelLinearWithShardedLoRA",
"MergedQKVParallelLinearWithLoRA",
"MergedQKVParallelLinearWithShardedLoRA",
"QKVParallelLinearWithLoRA",
"QKVParallelLinearWithShardedLoRA",
"RowParallelLinearWithLoRA",
"RowParallelLinearWithShardedLoRA",
"ReplicatedLinearWithLoRA",
"LoRAMapping",
"LoRAMappingType",
"FusedMoEWithLoRA",
"FusedMoE3DWithLoRA",
]
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/layers/base.py | vllm/lora/layers/base.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import TYPE_CHECKING
import torch
import torch.nn as nn
from transformers import PretrainedConfig
from vllm.config.lora import LoRAConfig
if TYPE_CHECKING:
from vllm.lora.punica_wrapper import PunicaWrapperBase
class BaseLayerWithLoRA(nn.Module):
def slice_lora_a(
self, lora_a: torch.Tensor | list[torch.Tensor | None]
) -> torch.Tensor | list[torch.Tensor | None]:
"""Slice lora a if splitting for tensor parallelism."""
...
def slice_lora_b(
self, lora_b: torch.Tensor | list[torch.Tensor | None]
) -> torch.Tensor | list[torch.Tensor | None]:
"""Slice lora b if splitting with tensor parallelism."""
...
def create_lora_weights(
self,
max_loras: int,
lora_config: LoRAConfig,
model_config: PretrainedConfig | None = None,
) -> None:
"""Initializes lora matrices."""
...
def reset_lora(self, index: int):
"""Resets the lora weights at index back to 0."""
...
def set_lora(
self,
index: int,
lora_a: torch.Tensor | list[torch.Tensor],
lora_b: torch.Tensor | list[torch.Tensor],
):
"""Overwrites lora tensors at index."""
...
def set_mapping(
self,
punica_wrapper,
):
self.punica_wrapper: PunicaWrapperBase = punica_wrapper
@classmethod
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
"""Returns True if the layer can be replaced by this LoRA layer."""
raise NotImplementedError
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/layers/replicated_linear.py | vllm/lora/layers/replicated_linear.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import torch.nn as nn
from transformers import PretrainedConfig
from vllm.config.lora import LoRAConfig
from vllm.model_executor.layers.linear import ReplicatedLinear
from .base_linear import BaseLinearLayerWithLoRA
class ReplicatedLinearWithLoRA(BaseLinearLayerWithLoRA):
def __init__(self, base_layer: ReplicatedLinear) -> None:
super().__init__(
base_layer,
)
# To ensure interface compatibility, set to 1 always.
self.output_size = self.base_layer.output_size
self.n_slices = 1
def forward(
self, input_: torch.Tensor
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor | None]:
"""Forward of ReplicatedLinearWithLoRA
Args:
input_: Tensor whose last dimension is `input_size`.
Returns:
- output
- bias
"""
bias = self.base_layer.bias if not self.base_layer.skip_bias_add else None
# Matrix multiply.
output = self.apply(input_, bias)
output_bias = self.base_layer.bias if self.base_layer.skip_bias_add else None
if not self.base_layer.return_bias:
return output
return output, output_bias
# ReplicatedLinear should always be replaced, regardless of the fully
# sharded LoRAs setting, because it is, by definition, copied per GPU.
@classmethod
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
return type(source_layer) is ReplicatedLinear
def slice_lora_a(
self, lora_a: torch.Tensor | list[torch.Tensor | None]
) -> torch.Tensor | list[torch.Tensor | None]:
"""Slice lora a if splitting for tensor parallelism."""
return lora_a
def slice_lora_b(
self, lora_b: torch.Tensor | list[torch.Tensor | None]
) -> torch.Tensor | list[torch.Tensor | None]:
"""Slice lora b if splitting with tensor parallelism."""
return lora_b
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/layers/fused_moe.py | vllm/lora/layers/fused_moe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import functools
import torch
import torch.nn as nn
from transformers import PretrainedConfig
from vllm import envs
from vllm.config.lora import LoRAConfig
from vllm.distributed.parallel_state import (
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
)
from vllm.distributed.utils import divide
from vllm.lora.layers.base import BaseLayerWithLoRA
from vllm.lora.ops.triton_ops.utils import get_lora_op_configs
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.fused_moe.config import (
_get_config_dtype_str,
)
from vllm.model_executor.layers.fused_moe.fused_marlin_moe import (
MarlinExperts,
)
from vllm.model_executor.layers.fused_moe.fused_moe import (
TritonExperts,
try_get_optimal_moe_config,
)
from vllm.model_executor.layers.fused_moe.fused_moe_modular_method import (
FusedMoEModularMethod,
)
from vllm.model_executor.layers.fused_moe.gpt_oss_triton_kernels_moe import (
UnfusedOAITritonExperts,
)
from vllm.model_executor.layers.fused_moe.modular_kernel import (
FusedMoEModularKernel,
)
from vllm.model_executor.layers.fused_moe.prepare_finalize import (
MoEPrepareAndFinalizeNoEP,
)
from .utils import _get_lora_device
class FusedMoEWithLoRA(BaseLayerWithLoRA):
def __init__(self, base_layer: FusedMoE) -> None:
super().__init__()
self.base_layer = base_layer
assert not self.base_layer.use_ep, (
"EP support for Fused MoE LoRA is not implemented yet."
)
self.tp_size = get_tensor_model_parallel_world_size()
self.tp_rank = get_tensor_model_parallel_rank()
self.device = _get_lora_device(base_layer)
self._w13_slices = 2
self._inject_lora_into_fused_moe()
def _normalize_keys(self, config: dict[str, int | None]) -> dict[str, int | None]:
normalized_config = {}
for key, value in config.items():
if key.islower():
if key.startswith("block_"):
normalized_key = "BLOCK_SIZE_" + key.split("_")[-1].upper()
else:
normalized_key = key.upper()
else:
normalized_key = key
normalized_config[normalized_key] = value
return normalized_config
def _get_lora_moe_configs(
self,
op_prefix: str,
num_loras: int,
rank: int,
num_slices: int,
M: int,
layer: FusedMoE,
top_k: int,
config_dtype: str,
):
if envs.VLLM_TUNED_CONFIG_FOLDER:
hidden_size = layer.hidden_size
intermediate_size = layer.intermediate_size_per_partition
shrink_config = get_lora_op_configs(
op_type=f"fused_moe_lora_{op_prefix}_shrink",
max_loras=num_loras,
batch=M,
hidden_size=hidden_size,
rank=rank,
num_slices=num_slices,
moe_intermediate_size=intermediate_size,
)
expand_config = get_lora_op_configs(
op_type=f"fused_moe_lora_{op_prefix}_expand",
max_loras=num_loras,
batch=M,
hidden_size=hidden_size, # lora_a_stacked.shape[-1],
rank=rank,
num_slices=num_slices,
moe_intermediate_size=intermediate_size, # lora_b_stacked.shape[-2],
)
else: # fall back to the default config
get_config_func = functools.partial(
try_get_optimal_moe_config,
layer.w13_weight.size(),
layer.w2_weight.size(),
top_k,
config_dtype,
block_shape=layer.quant_method.moe_quant_config.block_shape,
)
shrink_config = get_config_func(M)
expand_config = get_config_func(M)
shrink_config = self._normalize_keys(shrink_config)
expand_config = self._normalize_keys(expand_config)
return shrink_config, expand_config
def _inject_lora_into_fused_moe(self):
moe_state_dict = {}
top_k = self.base_layer.top_k
self.base_layer.ensure_moe_quant_config_init()
quant_config = self.base_layer.quant_method.moe_quant_config
prepare_finalize = MoEPrepareAndFinalizeNoEP()
m_fused_moe_fn = FusedMoEModularKernel(
prepare_finalize,
self.base_layer.quant_method.select_gemm_impl(
prepare_finalize, self.base_layer
),
self.base_layer.shared_experts,
getattr(self.base_layer, "shared_experts_stream", None),
)
if quant_config.use_mxfp4_w4a16:
assert isinstance(
m_fused_moe_fn.fused_experts, (MarlinExperts, UnfusedOAITritonExperts)
)
else:
assert isinstance(
m_fused_moe_fn.fused_experts, (MarlinExperts, TritonExperts)
)
def fwd_decorator(layer, func):
def wrapper(*args, **kwargs):
moe_state_dict["hidden_states"] = kwargs["hidden_states"]
moe_state_dict["topk_ids"] = kwargs["topk_ids"]
moe_state_dict["topk_weights"] = kwargs["topk_weights"]
moe_state_dict["expert_map"] = kwargs["expert_map"]
moe_state_dict["apply_router_weight_on_input"] = kwargs[
"apply_router_weight_on_input"
]
result = func(*args, **kwargs)
return result
return wrapper
def act_decorator(layer, func):
def wrapper(*args, **kwargs):
_, output, input = args
hidden_states = moe_state_dict["hidden_states"]
topk_weights = moe_state_dict["topk_weights"]
curr_topk_ids = moe_state_dict["topk_ids"]
expert_map = moe_state_dict["expert_map"]
config_dtype = _get_config_dtype_str(
dtype=hidden_states.dtype,
use_fp8_w8a8=False,
use_int8_w8a16=False,
use_int4_w4a16=False,
)
CHUNK_SIZE = envs.VLLM_FUSED_MOE_CHUNK_SIZE
num_tokens = hidden_states.size(0)
M = min(num_tokens, CHUNK_SIZE)
max_lora_rank = self.w13_lora_a_stacked[0].shape[-2]
shrink_config, expand_config = self._get_lora_moe_configs(
op_prefix="w13",
num_loras=self.max_loras,
rank=max_lora_rank,
num_slices=self._w13_slices,
M=M,
layer=layer,
top_k=top_k,
config_dtype=config_dtype,
)
# get the block size of m from customized config or default config
(
sorted_token_ids_lora,
expert_ids_lora,
num_tokens_post_padded_lora,
) = self.punica_wrapper.moe_lora_align_block_size(
curr_topk_ids,
num_tokens,
shrink_config["BLOCK_SIZE_M"],
self.base_layer.local_num_experts,
self.max_loras,
self.adapter_enabled,
expert_map,
)
moe_state_dict["sorted_token_ids_lora"] = sorted_token_ids_lora
moe_state_dict["expert_ids_lora"] = expert_ids_lora
moe_state_dict["num_tokens_post_padded_lora"] = (
num_tokens_post_padded_lora
)
expert_ids_lora = expert_ids_lora.view(self.max_loras, -1)
sorted_token_ids_lora = sorted_token_ids_lora.view(self.max_loras, -1)
#
self.punica_wrapper.add_lora_fused_moe(
input.view(-1, top_k, input.shape[-1]),
hidden_states,
self.w13_lora_a_stacked,
self.w13_lora_b_stacked,
topk_weights,
sorted_token_ids_lora,
expert_ids_lora,
num_tokens_post_padded_lora,
max_lora_rank,
top_k,
shrink_config, ## pass the shrink config
expand_config, ## pass the expand config
self.adapter_enabled,
fully_sharded=self.fully_sharded,
)
result = func(*args, **kwargs)
moe_state_dict["intermediate_cache2"] = output
return result
return wrapper
def moe_sum_decorator(layer, func):
def wrapper(*args, **kwargs):
hidden_states = moe_state_dict["hidden_states"]
topk_weights = moe_state_dict["topk_weights"]
config_dtype = _get_config_dtype_str(
dtype=hidden_states.dtype,
use_fp8_w8a8=False,
use_int8_w8a16=False,
use_int4_w4a16=False,
)
CHUNK_SIZE = envs.VLLM_FUSED_MOE_CHUNK_SIZE
num_tokens = hidden_states.size(0)
M = min(num_tokens, CHUNK_SIZE)
max_lora_rank = self.w2_lora_a_stacked[0].shape[-2]
shrink_config, expand_config = self._get_lora_moe_configs(
op_prefix="w2",
num_loras=self.max_loras,
rank=max_lora_rank,
num_slices=1,
M=M,
layer=layer,
top_k=top_k,
config_dtype=config_dtype,
)
sorted_token_ids_lora = moe_state_dict["sorted_token_ids_lora"]
expert_ids_lora = moe_state_dict["expert_ids_lora"]
num_tokens_post_padded_lora = moe_state_dict[
"num_tokens_post_padded_lora"
]
expert_ids_lora = expert_ids_lora.view(self.max_loras, -1)
sorted_token_ids_lora = sorted_token_ids_lora.view(self.max_loras, -1)
intermediate_cache2 = moe_state_dict["intermediate_cache2"]
intermediate_cache3 = args[0]
shard_size_w2 = divide(self.base_layer.hidden_size, self.tp_size)
self.punica_wrapper.add_lora_fused_moe(
intermediate_cache3,
intermediate_cache2,
self.w2_lora_a_stacked,
self.w2_lora_b_stacked,
topk_weights,
sorted_token_ids_lora,
expert_ids_lora,
num_tokens_post_padded_lora,
max_lora_rank,
top_k,
shrink_config, ## pass the shrink config
expand_config, ## pass the expand config
self.adapter_enabled,
True,
fully_sharded=self.fully_sharded,
offset=shard_size_w2 * self.tp_rank if self.fully_sharded else 0,
)
result = func(*args, **kwargs)
return result
return wrapper
fused_experts = m_fused_moe_fn.fused_experts
m_fused_moe_fn.forward = fwd_decorator(self.base_layer, m_fused_moe_fn.forward)
fused_experts.activation = act_decorator(
self.base_layer, fused_experts.activation
)
fused_experts.moe_sum = moe_sum_decorator(
self.base_layer, fused_experts.moe_sum
)
self.base_layer.quant_method = FusedMoEModularMethod(
self.base_layer.quant_method, m_fused_moe_fn
)
def _create_lora_a_weights(
self,
max_loras: int,
lora_config: LoRAConfig,
):
self.w13_lora_a_stacked: tuple[torch.Tensor, ...] = tuple(
torch.zeros(
(
max_loras,
self.base_layer.local_num_experts,
lora_config.max_lora_rank
if not self.fully_sharded
else divide(lora_config.max_lora_rank, self.tp_size),
self.base_layer.hidden_size,
),
dtype=lora_config.lora_dtype,
device=self.device,
)
for _ in range(self._w13_slices)
)
self.w2_lora_a_stacked: tuple[torch.Tensor, ...] = (
torch.zeros(
(
max_loras,
self.base_layer.local_num_experts,
lora_config.max_lora_rank,
self.base_layer.intermediate_size_per_partition,
),
dtype=lora_config.lora_dtype,
device=self.device,
),
)
def _create_lora_b_weights(self, max_loras: int, lora_config: LoRAConfig):
self.w13_lora_b_stacked: tuple[torch.Tensor, ...] = tuple(
torch.zeros(
(
max_loras,
self.base_layer.local_num_experts,
self.base_layer.intermediate_size_per_partition,
lora_config.max_lora_rank,
),
dtype=lora_config.lora_dtype,
device=self.device,
)
for _ in range(self._w13_slices)
)
self.w2_lora_b_stacked: tuple[torch.Tensor, ...] = (
torch.zeros(
(
max_loras,
self.base_layer.local_num_experts,
self.base_layer.hidden_size
if not self.fully_sharded
else divide(self.base_layer.hidden_size, self.tp_size),
lora_config.max_lora_rank,
),
dtype=lora_config.lora_dtype,
device=self.device,
),
)
def create_lora_weights(
self,
max_loras: int,
lora_config: LoRAConfig,
model_config: PretrainedConfig | None = None,
) -> None:
"""Initializes lora matrices."""
self.max_loras = lora_config.max_loras
self.fully_sharded = lora_config.fully_sharded_loras
self.adapter_enabled = torch.tensor(
[0] * (max_loras + 1), dtype=torch.int, device=self.device
)
self._create_lora_a_weights(max_loras, lora_config)
self._create_lora_b_weights(max_loras, lora_config)
# They will be used by 'LoRALayerWeights.create_dummy_lora_weights'
# to create a dummy LoRA weights.
# TODO Optimize this section
self.lora_a_stacked = []
self.lora_b_stacked = []
for lora_id in range(max_loras):
for experts_id in range(self.base_layer.local_num_experts):
# gate_proj,down_proj,up_proj
self.lora_a_stacked.append(
self.w13_lora_a_stacked[0][lora_id][experts_id]
)
self.lora_a_stacked.append(
self.w2_lora_a_stacked[0][lora_id][experts_id]
)
self.lora_b_stacked.append(
self.w13_lora_b_stacked[0][lora_id][experts_id]
)
self.lora_b_stacked.append(
self.w2_lora_b_stacked[0][lora_id][experts_id]
)
self.lora_a_stacked.append(
self.w13_lora_a_stacked[1][lora_id][experts_id]
)
self.lora_b_stacked.append(
self.w13_lora_b_stacked[1][lora_id][experts_id]
)
def _slice_w13_a(self, w13_lora_a: torch.Tensor) -> torch.Tensor:
"""
Applies to FusedMoEWithLoRA and FusedMoE3DWithLoRA
"""
if self.tp_size == 1 or not self.fully_sharded:
return w13_lora_a
# w13_lora_a shape (num_experts,rank,input_size)
current_lora_rank = w13_lora_a.shape[1]
assert current_lora_rank % self.tp_size == 0
# Based on S-LoRA, we slice W13/W1/W3 A along the rank dim.
sliced_rank = current_lora_rank // self.tp_size
start_idx = self.tp_rank * sliced_rank
end_idx = (self.tp_rank + 1) * sliced_rank
return w13_lora_a[:, start_idx:end_idx, :]
def _slice_w13_b(self, w13_lora_b: torch.Tensor):
if self.tp_size == 1:
return w13_lora_b
# w13_lora_b shape (num_experts,output_size,rank)
shard_size = self.base_layer.intermediate_size_per_partition
start_idx = self.tp_rank * shard_size
end_idx = (self.tp_rank + 1) * shard_size
return w13_lora_b[:, start_idx:end_idx, :]
def _slice_w2_a(self, w2_lora_a: torch.Tensor) -> torch.Tensor:
"""
Applies to FusedMoEWithLoRA and FusedMoE3DWithLoRA
"""
if self.tp_size == 1:
return w2_lora_a
# w2_lora_a shape (num_experts,rank,input_size)
shard_size = self.base_layer.intermediate_size_per_partition
start_idx = self.tp_rank * shard_size
end_idx = (self.tp_rank + 1) * shard_size
return w2_lora_a[:, :, start_idx:end_idx]
def _slice_w2_b(self, w2_lora_b: torch.Tensor) -> torch.Tensor:
"""
Applies to FusedMoEWithLoRA and FusedMoE3DWithLoRA
"""
if self.tp_size == 1 or not self.fully_sharded:
return w2_lora_b
# Based on S-LoRA, we slice W2 B along the hidden_size dim.
# w2_lora_b shape (num_experts,output_size,rank)
current_lora_size = w2_lora_b.shape[1]
sliced_size = current_lora_size // self.tp_size
start_idx = self.tp_rank * sliced_size
end_idx = (self.tp_rank + 1) * sliced_size
return w2_lora_b[:, start_idx:end_idx, :]
def reset_lora(self, index: int):
"""Resets the lora weights at index back to 0."""
for pos in range(self._w13_slices):
self.w13_lora_a_stacked[pos][index] = 0
self.w13_lora_b_stacked[pos][index] = 0
self.w2_lora_a_stacked[0][index] = 0
self.w2_lora_b_stacked[0][index] = 0
self.adapter_enabled[index] = 0
#
def set_lora(
self,
index: int,
lora_a: torch.Tensor | list[torch.Tensor],
lora_b: torch.Tensor | list[torch.Tensor],
):
"""Overwrites lora tensors at index."""
# Make mypy happy
assert isinstance(lora_a, list)
assert isinstance(lora_b, list)
self.reset_lora(index)
self.adapter_enabled[index] = 1
num_experts = self.w13_lora_a_stacked[0].shape[1]
w1_lora_a, w2_lora_a, w3_lora_a = lora_a
w1_lora_b, w2_lora_b, w3_lora_b = lora_b
assert (
num_experts
== w1_lora_a.shape[0]
== w2_lora_a.shape[0]
== w3_lora_a.shape[0]
)
slliced_w1_lora_a = self._slice_w13_a(w1_lora_a)
slliced_w1_lora_b = self._slice_w13_b(w1_lora_b)
slliced_w3_lora_a = self._slice_w13_a(w3_lora_a)
slliced_w3_lora_b = self._slice_w13_b(w3_lora_b)
sliced_w2_lora_a = self._slice_w2_a(w2_lora_a)
sliced_w2_lora_b = self._slice_w2_b(w2_lora_b)
self.w13_lora_a_stacked[0][
index, :, : slliced_w1_lora_a.shape[1], : slliced_w1_lora_a.shape[2]
].copy_(slliced_w1_lora_a, non_blocking=True)
self.w13_lora_a_stacked[1][
index, :, : slliced_w3_lora_a.shape[1], : slliced_w3_lora_a.shape[2]
].copy_(slliced_w3_lora_a, non_blocking=True)
self.w13_lora_b_stacked[0][
index, :, : slliced_w1_lora_b.shape[1], : slliced_w1_lora_b.shape[2]
].copy_(slliced_w1_lora_b, non_blocking=True)
self.w13_lora_b_stacked[1][
index, :, : slliced_w3_lora_b.shape[1], : slliced_w3_lora_b.shape[2]
].copy_(slliced_w3_lora_b, non_blocking=True)
self.w2_lora_a_stacked[0][
index, :, : sliced_w2_lora_a.shape[1], : sliced_w2_lora_a.shape[2]
].copy_(sliced_w2_lora_a, non_blocking=True)
self.w2_lora_b_stacked[0][
index, :, : sliced_w2_lora_b.shape[1], : sliced_w2_lora_b.shape[2]
].copy_(sliced_w2_lora_b, non_blocking=True)
def forward(self, *args, **kwargs):
return self.base_layer.forward(*args, **kwargs)
def maybe_all_reduce_tensor_model_parallel(self, *args, **kwargs):
return self.base_layer.maybe_all_reduce_tensor_model_parallel(*args, **kwargs)
@property
def _shared_experts(self):
return self.base_layer._shared_experts
@property
def quant_method(self):
return self.base_layer.quant_method
@property
def is_internal_router(self) -> bool:
return self.base_layer.is_internal_router
@classmethod
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
"""Returns True if the layer can be replaced by this LoRA layer."""
# source_layer is FusedMoE or SharedFusedMoE
return isinstance(source_layer, FusedMoE) and len(packed_modules_list) == 2
class FusedMoE3DWithLoRA(FusedMoEWithLoRA):
def __init__(self, base_layer):
super().__init__(base_layer)
self._w13_slices = 1
def _create_lora_b_weights(self, max_loras, lora_config):
self.w13_lora_b_stacked: tuple[torch.Tensor] = tuple(
torch.zeros(
(
max_loras,
self.base_layer.local_num_experts,
self.base_layer.intermediate_size_per_partition * 2,
lora_config.max_lora_rank,
),
dtype=lora_config.lora_dtype,
device=self.device,
)
for _ in range(self._w13_slices)
)
self.w2_lora_b_stacked: tuple[torch.Tensor] = (
torch.zeros(
(
max_loras,
self.base_layer.local_num_experts,
self.base_layer.hidden_size
if not self.fully_sharded
else divide(self.base_layer.hidden_size, self.tp_size),
lora_config.max_lora_rank,
),
dtype=lora_config.lora_dtype,
device=self.device,
),
)
def create_lora_weights(
self,
max_loras: int,
lora_config: LoRAConfig,
model_config: PretrainedConfig | None = None,
) -> None:
"""Initializes lora matrices."""
assert isinstance(model_config, PretrainedConfig)
self._base_model = model_config.architectures[0]
self.max_loras = lora_config.max_loras
self.fully_sharded = lora_config.fully_sharded_loras
self.adapter_enabled = torch.tensor(
[0] * (max_loras + 1), dtype=torch.int, device=self.device
)
self._create_lora_a_weights(max_loras, lora_config)
self._create_lora_b_weights(max_loras, lora_config)
def _slice_w13_b(self, w13_lora_b: torch.Tensor):
if self.tp_size == 1:
return w13_lora_b
# w13_lora_b shape (num_experts,output_size,rank)
shard_size = self.base_layer.intermediate_size_per_partition
start_idx = self.tp_rank * shard_size
end_idx = (self.tp_rank + 1) * shard_size
# HACK: Currently, only GPT-OSS is in interleaved order
if self._base_model == "GptOssForCausalLM":
# For models like GPT-OSS, the weights of w1 (gate_proj) and w3 (up_proj)
# in the interleaved order, and corresponding LoRA need to be processed.
w1_lora_b = w13_lora_b[:, ::2, :]
w3_lora_b = w13_lora_b[:, 1::2, :]
sliced_w1_lora_b = w1_lora_b[:, start_idx:end_idx, :]
sliced_w3_lora_b = w3_lora_b[:, start_idx:end_idx, :]
return torch.stack([sliced_w1_lora_b, sliced_w3_lora_b], dim=2).flatten(
1, 2
)
else:
slice_size = w13_lora_b.shape[1] // 2
w1_lora_b = w13_lora_b[:, :slice_size, :]
w3_lora_b = w13_lora_b[:, slice_size:, :]
sliced_w1_lora_b = w1_lora_b[:, start_idx:end_idx, :]
sliced_w3_lora_b = w3_lora_b[:, start_idx:end_idx, :]
return torch.cat([sliced_w1_lora_b, sliced_w3_lora_b], dim=1)
def set_lora(
self,
index: int,
lora_a: torch.Tensor | list[torch.Tensor],
lora_b: torch.Tensor | list[torch.Tensor],
):
"""Overwrites lora tensors at index."""
# Make mypy happy
assert isinstance(lora_a, list)
assert isinstance(lora_b, list)
assert len(lora_a) == len(lora_b) == 2
self.reset_lora(index)
self.adapter_enabled[index] = 1
num_experts = self.w13_lora_a_stacked[0].shape[1]
w13_lora_a, w2_lora_a = lora_a
w13_lora_b, w2_lora_b = lora_b
# (num_experts,rank,input_size)
w13_lora_a = w13_lora_a.reshape(num_experts, -1, w13_lora_a.shape[-1])
w2_lora_a = w2_lora_a.reshape(num_experts, -1, w2_lora_a.shape[-1])
# (output_size,rank,num_experts)
w13_lora_b = w13_lora_b.reshape(w13_lora_b.shape[0], -1, num_experts)
w2_lora_b = w2_lora_b.reshape(w2_lora_b.shape[0], -1, num_experts)
# (num_experts,output_size,rank)
w13_lora_b = w13_lora_b.permute(2, 0, 1)
w2_lora_b = w2_lora_b.permute(2, 0, 1)
sliced_w13_lora_a = self._slice_w13_a(w13_lora_a)
sliced_w13_lora_b = self._slice_w13_b(w13_lora_b)
sliced_w2_lora_a = self._slice_w2_a(w2_lora_a)
sliced_w2_lora_b = self._slice_w2_b(w2_lora_b)
self.w13_lora_a_stacked[0][
index, :, : sliced_w13_lora_a.shape[1], : sliced_w13_lora_a.shape[2]
].copy_(sliced_w13_lora_a, non_blocking=True)
self.w2_lora_a_stacked[0][
index, :, : sliced_w2_lora_a.shape[1], : sliced_w2_lora_a.shape[2]
].copy_(sliced_w2_lora_a, non_blocking=True)
self.w13_lora_b_stacked[0][
index, :, : sliced_w13_lora_b.shape[1], : sliced_w13_lora_b.shape[2]
].copy_(sliced_w13_lora_b, non_blocking=True)
self.w2_lora_b_stacked[0][
index, :, : sliced_w2_lora_b.shape[1], : sliced_w2_lora_b.shape[2]
].copy_(sliced_w2_lora_b, non_blocking=True)
@property
def w13_input_size(self):
"""
Full size
"""
return self.w13_lora_a_stacked[0].shape[-1]
@property
def w13_output_size(self):
"""
Full size
"""
return self.w13_lora_b_stacked[0].shape[-2] * self.tp_size
@property
def w2_input_size(self):
"""
Full size
"""
return self.w2_lora_a_stacked[0].shape[-1] * self.tp_size
@property
def w2_output_size(self):
"""
Full size
"""
return self.w2_lora_a_stacked[0].shape[-2]
@classmethod
def can_replace_layer(
cls,
source_layer: nn.Module,
lora_config: LoRAConfig,
packed_modules_list: list,
model_config: PretrainedConfig | None = None,
) -> bool:
"""Returns True if the layer can be replaced by this LoRA layer."""
# source_layer is FusedMoE or SharedFusedMoE
return isinstance(source_layer, FusedMoE) and len(packed_modules_list) == 1
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/ops/__init__.py | vllm/lora/ops/__init__.py | python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false | |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/ops/torch_ops/lora_ops.py | vllm/lora/ops/torch_ops/lora_ops.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
def sgmv_expand(
inputs: torch.Tensor,
lora_b_weights: torch.Tensor,
output_tensor: torch.Tensor,
b_seq_start_loc: torch.Tensor,
seq_len_tensor: torch.Tensor,
lora_indices_tensor: torch.Tensor,
batches: int,
max_seq_length: int,
token_nums: int,
add_inputs: bool = False,
):
exploded_indices = torch.repeat_interleave(lora_indices_tensor, seq_len_tensor)
bgmv_expand(inputs, lora_b_weights, output_tensor, exploded_indices, add_inputs)
def bgmv_expand(
inputs: torch.Tensor,
lora_b_weights: torch.Tensor,
output_tensor: torch.Tensor,
lora_indices_tensor: torch.Tensor,
add_inputs: bool = True,
):
selected_loras = lora_b_weights[lora_indices_tensor].to(dtype=output_tensor.dtype)
if len(selected_loras.shape) == 4:
selected_loras = selected_loras.squeeze(dim=1)
inputs = inputs.to(dtype=output_tensor.dtype)
outputs = torch.einsum("bi, boi -> bo", inputs, selected_loras)
limit = output_tensor.shape[0]
if outputs.shape[0] == 1 and output_tensor.shape[0] != 1:
limit = 1
# LoRA adapter and model may add different amounts of padding to output
common_len = min(outputs.shape[1], output_tensor.shape[1])
if add_inputs:
output_tensor[:, :common_len] += outputs[:limit, :common_len]
else:
output_tensor[:, :common_len] = outputs[:limit, :common_len]
def sgmv_shrink(
inputs: torch.Tensor,
lora_a_weights: torch.Tensor,
output_tensor: torch.Tensor,
b_seq_start_loc: torch.Tensor,
seq_len_tensor: torch.Tensor,
lora_indices_tensor: torch.Tensor,
batches: int,
max_seq_length: int,
token_nums: int,
scaling: float,
):
exploded_indices = torch.repeat_interleave(lora_indices_tensor, seq_len_tensor)
bgmv_shrink(inputs, lora_a_weights, output_tensor, exploded_indices, scaling)
def bgmv_shrink(
inputs: torch.Tensor,
lora_b_weights: torch.Tensor,
output_tensor: torch.Tensor,
lora_indices_tensor: torch.Tensor,
scaling: float = 1.0,
):
selected_loras = lora_b_weights[lora_indices_tensor].to(dtype=output_tensor.dtype)
if len(selected_loras.shape) == 4:
selected_loras = selected_loras.squeeze(dim=1)
inputs = inputs.to(dtype=output_tensor.dtype)
outputs = torch.einsum("bi, boi -> bo", inputs, selected_loras)
output_tensor[:, : outputs.shape[1]] = scaling * outputs[:]
def sgmv_expand_slice(
inputs: torch.Tensor,
lora_b_weights: torch.Tensor,
output_tensor: torch.Tensor,
b_seq_start_loc: torch.Tensor,
seq_len_tensor: torch.Tensor,
lora_indices_tensor: torch.Tensor,
batches: int,
max_seq_length: int,
token_nums: int,
slice_offset: int,
slice_size: int,
add_inputs: bool = False,
):
exploded_indices = torch.repeat_interleave(lora_indices_tensor, seq_len_tensor)
bgmv_expand_slice(
inputs,
lora_b_weights,
output_tensor,
exploded_indices,
slice_offset,
slice_size,
add_inputs,
)
def bgmv_expand_slice(
inputs: torch.Tensor,
lora_b_weights: torch.Tensor,
output_tensor: torch.Tensor,
lora_indices_tensor: torch.Tensor,
slice_offset: int,
slice_size: int,
add_inputs: bool = True,
):
selected_loras = lora_b_weights[lora_indices_tensor].to(dtype=output_tensor.dtype)
inputs = inputs.to(dtype=output_tensor.dtype)
if len(selected_loras.shape) == 4:
selected_loras = selected_loras.squeeze(dim=1)
outputs = torch.einsum("bi, boi -> bo", inputs, selected_loras)
if add_inputs:
output_tensor[:, slice_offset : slice_offset + slice_size] += outputs[:]
else:
output_tensor[:, slice_offset : slice_offset + slice_size] = outputs[:]
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/ops/torch_ops/__init__.py | vllm/lora/ops/torch_ops/__init__.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.lora.ops.torch_ops.lora_ops import (
bgmv_expand, # noqa: F401
bgmv_expand_slice,
bgmv_shrink,
sgmv_expand,
sgmv_expand_slice,
sgmv_shrink,
)
__all__ = [
"bgmv_expand",
"bgmv_expand_slice",
"bgmv_shrink",
"sgmv_expand",
"sgmv_expand_slice",
"sgmv_shrink",
]
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/ops/ipex_ops/lora_ops.py | vllm/lora/ops/ipex_ops/lora_ops.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
from vllm.logger import init_logger
logger = init_logger(__name__)
try:
import intel_extension_for_pytorch as ipex
except ImportError as e:
raise e
def bgmv_shrink(
inputs: torch.Tensor,
lora_a_weights: torch.Tensor,
output_tensor: torch.Tensor,
lora_indices_tensor: torch.Tensor,
scaling: float = 1.0,
) -> None:
ipex.llm.functional.bgmv_shrink(
inputs, lora_a_weights, output_tensor, lora_indices_tensor, scaling
)
def bgmv_expand(
inputs: torch.Tensor,
lora_b_weights: torch.Tensor,
output_tensor: torch.Tensor,
lora_indices_tensor: torch.Tensor,
add_inputs: bool = True,
) -> None:
ipex.llm.functional.bgmv_expand(
inputs, lora_b_weights, output_tensor, lora_indices_tensor, add_inputs
)
def bgmv_expand_slice(
inputs: torch.Tensor,
lora_b_weights: torch.Tensor,
output_tensor: torch.Tensor,
lora_indices_tensor: torch.Tensor,
slice_offset: int,
slice_size: int,
add_inputs: bool = True,
) -> None:
ipex.llm.functional.bgmv_expand_slice(
inputs,
lora_b_weights,
output_tensor,
lora_indices_tensor,
slice_offset,
slice_size,
add_inputs,
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/ops/ipex_ops/__init__.py | vllm/lora/ops/ipex_ops/__init__.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.lora.ops.ipex_ops.lora_ops import bgmv_expand, bgmv_expand_slice, bgmv_shrink
__all__ = ["bgmv_expand", "bgmv_expand_slice", "bgmv_shrink"]
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/ops/triton_ops/lora_kernel_metadata.py | vllm/lora/ops/triton_ops/lora_kernel_metadata.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
LoRA kernels metadata preparation utilities.
"""
from dataclasses import dataclass
import torch
@dataclass
class LoRAKernelMeta:
token_lora_mapping: torch.Tensor
token_indices_sorted_by_lora_ids: torch.Tensor
active_lora_ids: torch.Tensor
num_tokens_per_lora: torch.Tensor
lora_token_start_loc: torch.Tensor
# The V1 architecture uses the traced torch.compile graphs to execute
# a forward pass. Things to note about this process,
# 1. The tracing infers all python scalar datatype objects into a constant
# value.
# 2. The tracing cannot handle dynamic control flow. (dynamic control flow
# is an experimental feature in pytorch)
# 3. The internals of torch.ops functions are not traced.
# We disguise the "no_lora" flag as a cpu tensor and leverage point number 3
# to early exit from inside the lora_expand / lora_shrink torch operation.
no_lora_flag_cpu: torch.Tensor
@staticmethod
def make(
max_loras: int, max_num_tokens: int, device: torch.device | str
) -> "LoRAKernelMeta":
token_lora_mapping = torch.empty(
max_num_tokens, dtype=torch.int32, device=device
)
token_indices_sorted_by_lora_ids = torch.empty(
max_num_tokens, dtype=torch.int32, device=device
)
# +1 because "no-lora" is also a possibility
# example: let max_loras be 3, active_lora_ids of [-1, 0, 2, 1]
# is a possibility.
active_lora_ids = torch.empty(max_loras + 1, dtype=torch.int32, device=device)
# using running example, [3, 10, 5, 2] is a possibility.
num_tokens_per_lora = torch.zeros(
max_loras + 1, dtype=torch.int32, device=device
)
# +2 for this because, the first index is always 0.
# using running example, lora_token_start_loc
# is [0, 3, 13, 18, 20].
lora_token_start_loc = torch.zeros(
max_loras + 2, dtype=torch.int32, device=device
)
no_lora_flag_cpu = torch.tensor([False], dtype=torch.bool, device="cpu")
return LoRAKernelMeta(
token_lora_mapping=token_lora_mapping,
token_indices_sorted_by_lora_ids=token_indices_sorted_by_lora_ids,
active_lora_ids=active_lora_ids,
num_tokens_per_lora=num_tokens_per_lora,
lora_token_start_loc=lora_token_start_loc,
no_lora_flag_cpu=no_lora_flag_cpu,
)
def _reset(self):
self.active_lora_ids.fill_(-1)
self.num_tokens_per_lora.fill_(0)
self.lora_token_start_loc.fill_(0)
self.no_lora_flag_cpu.fill_(False)
def prepare_tensors(self, token_lora_mapping: torch.Tensor) -> None:
"""
Prepare kernel metadata tensors for the current forward pass.
Args:
token_lora_mapping (torch.Tensor): Tensor containing lora indices
for each input token.
"""
self._reset()
# Check and record no-lora case.
no_lora = torch.all(token_lora_mapping == -1)
self.no_lora_flag_cpu[0] = no_lora
if no_lora:
# Early exit. LoRA kernels will not be run.
return
num_tokens = token_lora_mapping.size(0)
# copy token lora mapping
self.token_lora_mapping[:num_tokens].copy_(
token_lora_mapping, non_blocking=True
)
# token_indices_sorted_by_lora_ids
_, token_indices_sorted_by_lora_ids = torch.sort(
token_lora_mapping, stable=True
)
# start gpu transfer
self.token_indices_sorted_by_lora_ids[:num_tokens].copy_(
token_indices_sorted_by_lora_ids, non_blocking=True
)
# active_lora_ids, num_tokens_per_lora
lora_ids, num_tokens_per_lora = torch.unique(
token_lora_mapping, sorted=True, return_counts=True
)
self.active_lora_ids[: lora_ids.size(0)].copy_(lora_ids, non_blocking=True)
self.num_tokens_per_lora[: num_tokens_per_lora.size(0)].copy_(
num_tokens_per_lora, non_blocking=True
)
# lora_token_start_loc
lora_token_start_loc = torch.cumsum(num_tokens_per_lora, dim=0)
self.lora_token_start_loc[1 : 1 + lora_token_start_loc.size(0)].copy_(
lora_token_start_loc, non_blocking=True
)
def meta_args(
self, token_nums: int
) -> tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
]:
"""
This function returns the kernel metadata required for the current
forward pass execution of the kernel. The function returns all the
metadata required by the kernel, in order, as a tuple, so it can be
unpacked directly during the lora_shrink/lora_expand function call.
Args:
token_nums (int): Number of input tokens in the current forward
pass of the kernel.
"""
return (
self.token_lora_mapping[:token_nums],
self.token_indices_sorted_by_lora_ids[:token_nums],
self.num_tokens_per_lora,
self.lora_token_start_loc,
self.active_lora_ids,
self.no_lora_flag_cpu,
)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/ops/triton_ops/kernel_utils.py | vllm/lora/ops/triton_ops/kernel_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Utilities for Punica kernel construction.
"""
from vllm.triton_utils import tl, triton
@triton.jit
def mm_k(
a_ptr,
b_ptr,
ak_stride,
bk_stride,
offset_k,
K: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
EVEN_K: tl.constexpr,
SPLIT_K: tl.constexpr,
CAST_TYPE: tl.constexpr,
b_dtype: tl.constexpr,
USE_GDC: tl.constexpr,
base_k,
):
"""
Given a_ptr and b_ptr, that identify the rows of A (m x k) and columns of
B (k x n), iterate, through the K dimension to compute the partial/complete
matrix block product.
If SPLIT_K == 1, the output m x n product is complete.
If SPLIT_K > 1, the thread block computes partial outputs. The partial
outputs are then atomically summed in the caller code.
Args:
a_ptr: Array of pointers, identifying rows of A
b_ptr: Array of pointers, identifying columns of B
ak_stride: K dimension stride of the A matrix
bk_stride: K dimension stride of the B matrix
K: Length of the K dimension
BLOCK_M: M dimension of the output block m x n
BLOCK_N: N dimension of the output block m x n
BLOCK_K: K dimension atom
EVEN_K: True if the blocks of A and B can be loaded without any
masking.
SPLIT_K: Parameter signifying parallelism in the K dimension.
CAST_TYPE: if True, cast the values from the A matrix to the B
matrix dtype.
b_dtype: datatype of the B matrix
USE_GDC: Whether to use PDL. True indicates use.
base_k: Base offset along K dimension for current SPLIT_K group
"""
accumulator = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
# Step size along K for each iteration
STEP_K = BLOCK_K * SPLIT_K
# Total number of iterations (compile-time constant)
num_iters = tl.cdiv(K, STEP_K)
for k in range(num_iters):
# Current iteration's global K offset
iter_k = k * STEP_K + base_k
# Check if this iteration is completely valid (no masking needed)
block_end = iter_k + BLOCK_K
if EVEN_K:
# K is divisible by BLOCK_K, no masking ever needed
# pre-fetch lora weight
tiled_b = tl.load(b_ptr)
if USE_GDC:
tl.extra.cuda.gdc_wait()
tiled_a = tl.load(a_ptr)
if CAST_TYPE:
tiled_a = tiled_a.to(b_dtype)
accumulator += tl.dot(tiled_a, tiled_b)
else:
# Check if we need element-wise masking
if iter_k >= K:
# Entire block out of range, skip
pass
elif block_end <= K:
# Entire block in range, no masking needed (fast path)
tiled_b = tl.load(b_ptr)
if USE_GDC:
tl.extra.cuda.gdc_wait()
tiled_a = tl.load(a_ptr)
if CAST_TYPE:
tiled_a = tiled_a.to(b_dtype)
accumulator += tl.dot(tiled_a, tiled_b)
else:
# Partial block, need masking (only last iteration)
k_offsets = tl.arange(0, BLOCK_K)
mask = iter_k + k_offsets < K
tiled_b = tl.load(b_ptr, mask=mask[:, None], other=0.0)
if USE_GDC:
tl.extra.cuda.gdc_wait()
tiled_a = tl.load(a_ptr, mask=mask[None, :], other=0.0)
if CAST_TYPE:
tiled_a = tiled_a.to(b_dtype)
accumulator += tl.dot(tiled_a, tiled_b)
a_ptr += STEP_K * ak_stride
b_ptr += STEP_K * bk_stride
return accumulator
@triton.jit
def do_expand_kernel(
pid_n,
lora_index,
slice_id,
input_ptr,
lora_ptr,
out_ptr,
N,
K,
M_LEN,
ram, # array identifying the rows of Input ptr to operate on
slice_start_loc,
# input ptr strides
input_d0_stride,
input_d1_stride,
input_d2_stride,
# lora ptr strides
ls_d0_ptr,
ls_d1_ptr,
ls_d2_ptr,
# out ptr strides
output_d0_stride,
output_d1_stride,
# constants
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
SAME_STRIDE: tl.constexpr,
SLICE_NUM: tl.constexpr,
EVEN_K: tl.constexpr,
CAST_TYPE: tl.constexpr,
ADD_INPUTS: tl.constexpr,
USE_GDC: tl.constexpr,
):
"""
Given an array of integers that identifies the rows of A, ram,
a lora index that identifies which LoRA to use from lora_ptr, lora_index,
a slice_id that identifies the input/output slice,
compute the matrix product and store in the appropriate output location.
Given that this is an expand kernel, we don't perform any split-K reduction
as the K dimension is assumed to be small.
"""
# ls_d*_ptr can be either an integer or a pointer
if SAME_STRIDE:
# integer
cur_lora_d0_stride = ls_d0_ptr
cur_lora_d1_stride = ls_d1_ptr
cur_lora_d2_stride = ls_d2_ptr
else:
# pointer
cur_lora_d0_stride = tl.load(ls_d0_ptr + slice_id)
cur_lora_d1_stride = tl.load(ls_d1_ptr + slice_id)
cur_lora_d2_stride = tl.load(ls_d2_ptr + slice_id)
# Identify the input_ptr and lora_ptr from slice_id.
if SLICE_NUM == 1:
cur_input_ptr = input_ptr
cur_lora_ptr = lora_ptr
else:
cur_input_ptr = input_ptr + slice_id * input_d0_stride
cur_lora_ptr = tl.load(lora_ptr + slice_id).to(
tl.pointer_type(out_ptr.dtype.element_ty)
)
# Identify the column indices of B to process.
offset_n = tl.arange(0, BLOCK_N) + pid_n * BLOCK_N
rbn = tl.max_contiguous(tl.multiple_of(offset_n % N, BLOCK_N), BLOCK_N)
# Identify A and B block pointers
offset_k = tl.arange(0, BLOCK_K)
a_ptr = (
cur_input_ptr
+ ram[:, None] * input_d1_stride
+ offset_k[None, :] * input_d2_stride
)
b_ptr = (
cur_lora_ptr
+ cur_lora_d0_stride * lora_index
+ offset_k[:, None] * cur_lora_d2_stride
+ rbn[None, :] * cur_lora_d1_stride
)
# Compute the block matrix product.
SPLIT_K = 1
accumulator = mm_k(
a_ptr,
b_ptr,
input_d2_stride,
cur_lora_d2_stride,
offset_k,
K,
BLOCK_M,
BLOCK_N,
BLOCK_K,
EVEN_K,
SPLIT_K,
CAST_TYPE,
cur_lora_ptr.dtype.element_ty,
USE_GDC,
base_k=0,
)
tiled_c = accumulator.to(cur_lora_ptr.dtype.element_ty)
if SLICE_NUM == 1:
cur_slice_start = slice_start_loc
else:
cur_slice_start = tl.load(slice_start_loc + slice_id)
# Identify the C output pointers to store the results of the accumulator.
offset_cn = tl.arange(0, BLOCK_N) + pid_n * BLOCK_N + cur_slice_start
offset_cm = tl.arange(0, BLOCK_M)
c_ptr = (
out_ptr
+ ram[:, None] * output_d0_stride
+ offset_cn[None, :] * output_d1_stride
)
c_mask = (offset_cm[:, None] < M_LEN) & (offset_cn[None, :] < (cur_slice_start + N))
if ADD_INPUTS:
tiled_out = tl.load(c_ptr, mask=c_mask)
tiled_c += tiled_out
tl.store(c_ptr, tiled_c, mask=c_mask)
@triton.jit
def do_shrink_kernel(
pid_n,
pid_sk,
slice_id,
lora_index,
input_ptr,
lora_ptr,
out_ptr,
N,
K,
M_LEN,
ram,
# input strides
input_d0_stride,
input_d1_stride,
# lora strides
lora_d0_stride,
lora_d1_stride,
lora_d2_stride,
# output strides
output_d0_stride,
output_d1_stride,
output_d2_stride,
scaling,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
EVEN_K: tl.constexpr,
SPLIT_K: tl.constexpr,
SLICE_NUM: tl.constexpr,
USE_GDC: tl.constexpr,
):
"""
Given an array of integers that identifies the rows of A, ram,
a lora index that identifies which LoRA to use from lora_ptr, lora_index,
a slice_id that identifies the input/output slice, compute the
matrix product and store in the appropriate output location.
"""
# Identify the lora_ptr from slice_id.
if SLICE_NUM == 1:
# current lora ptr
cur_lora_ptr = lora_ptr
else:
# current lora ptr
cur_lora_ptr = tl.load(lora_ptr + slice_id).to(
tl.pointer_type(input_ptr.dtype.element_ty)
)
# Identify the column indices of B to process.
offset_n = tl.arange(0, BLOCK_N) + pid_n * BLOCK_N
rbn = tl.max_contiguous(tl.multiple_of(offset_n % N, BLOCK_N), BLOCK_N)
# Identify A and B block pointers
offset_k = pid_sk * BLOCK_K + tl.arange(0, BLOCK_K)
a_ptr = (
input_ptr + ram[:, None] * input_d0_stride + offset_k[None, :] * input_d1_stride
)
b_ptr = (
cur_lora_ptr
+ lora_d0_stride * lora_index
+ rbn[None, :] * lora_d1_stride
+ offset_k[:, None] * lora_d2_stride
)
# Compute partial/complete block matrix product.
accumulator = mm_k(
a_ptr,
b_ptr,
input_d1_stride,
lora_d2_stride,
offset_k,
K,
BLOCK_M,
BLOCK_N,
BLOCK_K,
EVEN_K,
SPLIT_K,
False,
cur_lora_ptr.dtype.element_ty,
False, # USE_GDC is always False in shrink kernel
base_k=pid_sk * BLOCK_K,
)
# GDC launch dependents hints the runtime system to launch dependent kernels.
if USE_GDC:
tl.extra.cuda.gdc_launch_dependents()
# Identify the C output pointers to store the results of the accumulator.
offset_cn = tl.arange(0, BLOCK_N) + pid_n * BLOCK_N
offset_cm = tl.arange(0, BLOCK_M)
cur_out_ptr = out_ptr if SLICE_NUM == 1 else out_ptr + slice_id * output_d0_stride
c_ptr = (
cur_out_ptr
+ ram[:, None] * output_d1_stride
+ offset_cn[None, :] * output_d2_stride
)
c_mask = (offset_cm[:, None] < M_LEN) & (offset_cn[None, :] < N)
accumulator *= scaling
# handles write-back with reduction-splitting
if SPLIT_K == 1:
tl.store(c_ptr, accumulator, mask=c_mask)
else:
tl.atomic_add(c_ptr, accumulator, mask=c_mask, sem="relaxed")
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/ops/triton_ops/lora_expand_op.py | vllm/lora/ops/triton_ops/lora_expand_op.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Based on:
Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023).
Punica: Multi-Tenant LoRA Serving.
https://arxiv.org/abs/2310.18547
"""
import torch
from vllm.lora.ops.triton_ops.kernel_utils import do_expand_kernel
from vllm.lora.ops.triton_ops.utils import _get_lora_b_ptr, get_lora_op_configs
from vllm.triton_utils import tl, triton
from vllm.utils.torch_utils import direct_register_custom_op
from .utils import supports_pdl
@triton.jit
def _lora_expand_kernel(
input_ptr,
lora_ptr,
out_ptr,
M,
N,
K,
token_indices_sorted_by_lora_ids,
num_tokens_per_lora,
lora_token_start_loc,
lora_ids,
slice_start_loc,
input_d0_stride,
input_d1_stride,
input_d2_stride, # 1
ls_d0_ptr,
ls_d1_ptr,
ls_d2_ptr, # 1
output_d0_stride,
output_d1_stride, # 1
output_hs_ptr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
EVEN_K: tl.constexpr,
ADD_INPUTS: tl.constexpr,
CAST_TYPE: tl.constexpr,
SLICE_NUM: tl.constexpr,
SAME_STRIDE: tl.constexpr,
USE_GDC: tl.constexpr,
launch_pdl: tl.constexpr,
):
cta_n_num = tl.cdiv(N, BLOCK_N)
cta_m_num = tl.cdiv(M, BLOCK_M)
pid_mn = tl.program_id(axis=0)
pid_m = pid_mn % cta_m_num
pid_n = (pid_mn // cta_m_num) % cta_n_num
slice_id = tl.program_id(axis=1)
lora_idx = tl.program_id(axis=2)
lora_id = tl.load(lora_ids + lora_idx)
if lora_id == -1:
# Early exit for the no-lora case.
return
lora_m_size = tl.load(num_tokens_per_lora + lora_idx)
cta_m_offset = pid_m * BLOCK_M
if cta_m_offset >= lora_m_size:
# Early exit CTA.
return
# When the output dimensions of each slice are the same,cur_n=N, otherwise
# cur_n=tl.load(output_hs_ptr + slice_id), this situation exists in GQA's
# qkv linear.
curr_N = N if SAME_STRIDE else tl.load(output_hs_ptr + slice_id)
if pid_n * BLOCK_N >= curr_N:
# Early exit CTA.
return
# num rows this CTA should process.
cta_m_len = min(BLOCK_M, lora_m_size - cta_m_offset)
# Identify all rows that this CTA should process.
lora_m_indices_start = tl.load(lora_token_start_loc + lora_idx)
cta_lora_seq_indices = (
token_indices_sorted_by_lora_ids + lora_m_indices_start + cta_m_offset
)
# Load all relevant row indices.
offset_m = tl.arange(0, BLOCK_M) % cta_m_len
ram = tl.load(cta_lora_seq_indices + offset_m)
do_expand_kernel(
pid_n,
lora_id,
slice_id,
input_ptr,
lora_ptr,
out_ptr,
curr_N,
K,
cta_m_len,
ram, # array identifying the rows of Input ptr to operate on
slice_start_loc,
# input ptr strides
input_d0_stride,
input_d1_stride,
input_d2_stride,
# lora ptr strides
ls_d0_ptr,
ls_d1_ptr,
ls_d2_ptr,
# out ptr strides
output_d0_stride,
output_d1_stride,
# constants
BLOCK_M,
BLOCK_N,
BLOCK_K,
SAME_STRIDE,
SLICE_NUM,
EVEN_K,
CAST_TYPE,
ADD_INPUTS,
USE_GDC,
)
@torch.inference_mode()
def _lora_expand(
inputs: torch.Tensor, # shape [num_slices, num_tokens, lora_rank]
lora_b_weights: list[torch.Tensor], # shape [num_lora, hidden_size, lora_rank]
output_tensor: torch.Tensor, # shape [num_tokens, hidden_size * num_slices]
token_lora_mapping: torch.Tensor, # shape [num_tokens]
token_indices_sorted_by_lora_ids: torch.Tensor, # shape [num_tokens]
num_tokens_per_lora: torch.Tensor, # shape [max-loras + 1]
lora_token_start_loc: torch.Tensor, # shape [max-loras + 2]
lora_ids: torch.Tensor, # shape [max-loras + 1]
no_lora_flag_cpu: torch.Tensor, # shape [1]
offset_start: int = 0,
add_inputs: bool = False,
) -> None:
"""
Args:
inputs (torch.Tensor): input tensor
lora_b_weights (list[torch.Tensor]): lora'b weight
output_tensor (torch.Tensor): output tensor
token_lora_mapping (torch.Tensor): A tensor mapping each input token
to the lora-id related to that token. A value of -1 indicates that
LoRA doesn't apply to that token.
token_indices_sorted_by_lora_ids (torch.Tensor): Row/Token indices from
the A matrix grouped by LoRA IDs.
num_tokens_per_lora (torch.Tensor): num_tokens_per_lora[i] is the number
of tokens that are to be processed by LoRA ID lora_ids[i]
lora_token_start_loc (torch.Tensor): A cumulative sum of
num_tokens_per_lora. lora_token_start_loc[0] is always 0 so that
lora_token_start_loc[i], along with num_tokens_per_lora[i]
identifies the region in token_indices_sorted_by_lora_ids that
LoRA lora_ids[i] should process.
lora_ids (torch.Tensor): LoRA ids to process.
no_lora_flag_cpu (torch.Tensor): A CPU tensor of size 1, that indicates
if there are any requests that require LoRA.
offset_start (int, optional): Offset start for output_tensor.
Defaults to 0.
add_inputs (bool, optional): Whether to add the input tensor to the
output tensor. Defaults to False.
"""
assert no_lora_flag_cpu.numel() == 1
if no_lora_flag_cpu.item():
# None of the inputs require LoRA.
return
assert inputs.dtype in [torch.float16, torch.bfloat16, torch.float32]
for weight in lora_b_weights:
assert weight.dtype in [torch.float16, torch.bfloat16]
assert inputs.size(0) == len(lora_b_weights)
assert output_tensor.is_contiguous()
# metadata sanity check.
M = inputs.size(1)
assert token_lora_mapping.size(0) == M
assert token_lora_mapping.size(0) == token_indices_sorted_by_lora_ids.size(0)
assert lora_ids.size(0) == num_tokens_per_lora.size(0)
assert lora_token_start_loc.size(0) == lora_ids.size(0) + 1
(
slice_start_tensor,
lora_ptr_tensor,
lora_strides_d0_tensor,
lora_strides_d1_tensor,
lora_strides_d2_tensor,
hidden_sizes_tensor,
same_stride,
MAX_N,
) = _get_lora_b_ptr(lora_b_weights, offset_start, inputs.device)
K = lora_b_weights[0].shape[-1] # K= rank
ADD_INPUTS = add_inputs
MAX_LORAS = lora_ids.size(0)
CAST_TYPE = False
NUM_SLICES = len(lora_b_weights)
# Triton kernel configs.
kernel_config = get_lora_op_configs(
op_type="expand",
max_loras=MAX_LORAS,
batch=M,
hidden_size=MAX_N,
rank=K,
num_slices=NUM_SLICES,
add_inputs=add_inputs,
)
BLOCK_M = kernel_config["block_m"]
BLOCK_N = kernel_config["block_n"]
BLOCK_K = kernel_config["block_k"]
NUM_WARPS = kernel_config["num_warps"]
NUM_CTAS = kernel_config["num_ctas"]
NUM_STAGES = kernel_config["num_stages"]
EVEN_K = K % BLOCK_K == 0 # type: ignore
if inputs.dtype == torch.float32 and lora_b_weights[0].dtype in [
torch.float16,
torch.bfloat16,
]:
CAST_TYPE = True
# TODO (varun): This grid formulation maximizes parallelization at the
# cost of wasteful thread block launch when only a few input tokens require
# LoRA. This might not be the best in all cases.
grid = (
triton.cdiv(M, BLOCK_M) * triton.cdiv(MAX_N, BLOCK_N),
NUM_SLICES,
# Each LoRA receives its own set of thread blocks for output
# computation. If some LoRA doesn't have any tokens to process, its
# thread blocks simply exit.
MAX_LORAS,
)
use_gdc = supports_pdl(inputs.device)
_lora_expand_kernel[grid](
inputs,
lora_ptr_tensor,
output_tensor,
M,
MAX_N,
K,
token_indices_sorted_by_lora_ids,
num_tokens_per_lora,
lora_token_start_loc,
lora_ids,
slice_start_tensor,
inputs.stride(0),
inputs.stride(1),
inputs.stride(2),
lora_strides_d0_tensor,
lora_strides_d1_tensor,
lora_strides_d2_tensor,
output_tensor.stride(0),
output_tensor.stride(1),
hidden_sizes_tensor,
BLOCK_M,
BLOCK_N,
BLOCK_K,
EVEN_K,
ADD_INPUTS,
CAST_TYPE,
NUM_SLICES,
same_stride,
use_gdc,
num_warps=NUM_WARPS,
num_ctas=NUM_CTAS,
num_stages=NUM_STAGES,
launch_pdl=use_gdc,
)
return
def _lora_expand_fake(
inputs: torch.Tensor,
lora_b_weights: list[torch.Tensor],
output_tensor: torch.Tensor,
token_lora_mapping: torch.Tensor,
token_indices_sorted_by_lora_ids: torch.Tensor,
num_tokens_per_lora: torch.Tensor,
lora_token_start_loc: torch.Tensor,
lora_ids: torch.Tensor,
no_lora_flag_cpu: torch.Tensor,
offset_start: int = 0,
add_inputs: bool = False,
) -> None:
return
try:
direct_register_custom_op(
op_name="lora_expand",
op_func=_lora_expand,
mutates_args=["output_tensor"],
fake_impl=_lora_expand_fake,
)
lora_expand = torch.ops.vllm.lora_expand
except AttributeError:
lora_expand = _lora_expand
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/ops/triton_ops/fused_moe_lora_op.py | vllm/lora/ops/triton_ops/fused_moe_lora_op.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
from vllm.distributed import (
tensor_model_parallel_all_gather,
tensor_model_parallel_all_reduce,
)
from vllm.triton_utils import tl, triton
from vllm.utils.torch_utils import direct_register_custom_op
from .utils import supports_pdl
_LORA_PTR_DICT: dict[tuple[int, ...], torch.tensor] = {}
def _get_ptr(lora_weights: list[torch.Tensor], device: torch.device):
"""
`_LORA_PTR_DICT` collects the required information during `profile_run`,
After this, it remains constant and subsequent usage is through LUT.
Refer to:
https://github.com/triton-lang/triton/blob/release/3.1.x/python/tutorials/08-grouped-gemm.py
"""
key = tuple(lora_weight.data_ptr() for lora_weight in lora_weights)
if (ptr_tensor := _LORA_PTR_DICT.get(key)) is not None:
return ptr_tensor
tensor_ptrs = []
for lora_weight in lora_weights:
tensor_ptrs.append(lora_weight.data_ptr())
ptr_tensor = torch.tensor(tensor_ptrs, device=device, dtype=torch.uint64)
_LORA_PTR_DICT[key] = ptr_tensor
return _LORA_PTR_DICT.get(key)
@triton.jit(
do_not_specialize=[
"num_valid_tokens",
"EM",
"stride_tl",
"stride_el",
"slice_a_size",
"slice_c_size",
]
)
def _fused_moe_lora_kernel(
a_ptr,
b_ptr,
c_ptr,
topk_weights_ptr,
sorted_token_ids_ptr,
expert_ids_ptr,
num_tokens_post_padded_ptr,
# Matrix dimensions
N,
K,
EM,
num_valid_tokens,
num_experts,
lora_ids,
adapter_enabled,
# The stride variables represent how much to increase the ptr by when
# moving by 1 element in a particular dimension. E.g. `stride_am` is
# how much to increase `a_ptr` by to get the element one row down
# (A has M rows).
stride_am,
stride_ak,
stride_bl,
stride_be,
stride_bk,
stride_bn,
stride_cm,
stride_cn,
stride_tl,
stride_el,
slice_a_size,
slice_c_size,
# Meta-parameters
num_slice_a: tl.constexpr,
num_slice_c: tl.constexpr,
top_k: tl.constexpr,
MUL_ROUTED_WEIGHT: tl.constexpr,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
SPLIT_K: tl.constexpr,
USE_GDC: tl.constexpr,
launch_pdl: tl.constexpr,
IS_PRIMARY: tl.constexpr,
):
pid = tl.program_id(axis=0)
slice_id = tl.program_id(axis=1)
lora_idx = tl.program_id(axis=2)
lora_id = tl.load(lora_ids + lora_idx)
if lora_id == -1:
# Early exit for the no-lora case.
return
moe_enabled = tl.load(adapter_enabled + lora_id)
if moe_enabled == 0:
# Early exit for the no moe lora case.
return
max_loras = tl.num_programs(axis=2)
grid_k = tl.cdiv(K, BLOCK_SIZE_K * SPLIT_K)
# calculate pid_m,pid_n
pid_sk = pid % SPLIT_K
pid_m_n = pid // SPLIT_K
num_pid_m = tl.cdiv(EM, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid_m_n // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid_m_n % num_pid_in_group) % group_size_m)
pid_n = (pid_m_n % num_pid_in_group) // group_size_m
num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr + lora_id)
if pid_m * BLOCK_SIZE_M >= num_tokens_post_padded:
return
# get the expert_id to process curr shard
ind = lora_id * stride_el + pid_m
expert_id = tl.load(expert_ids_ptr + ind, ind < max_loras * stride_el, -1)
if expert_id == -1:
return
# get a_ptr,b_ptr,c_ptr
cur_a_ptr = a_ptr + (slice_id % num_slice_a) * slice_a_size
cur_b_ptr = tl.load(b_ptr + slice_id).to(tl.pointer_type(c_ptr.dtype.element_ty))
cur_c_ptr = c_ptr + (slice_id % num_slice_c) * slice_c_size
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N).to(tl.int64)) % N
offs_k = pid_sk * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)
offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M).to(tl.int64)
token_ind = stride_tl * lora_id + offs_token_id
offs_token = tl.load(
sorted_token_ids_ptr + token_ind, token_ind < max_loras * stride_tl, 0
)
token_mask = offs_token < num_valid_tokens
# get a_ptrs,b_ptrs
a_ptrs = cur_a_ptr + (
offs_token[:, None] // top_k * stride_am + offs_k[None, :] * stride_ak
)
b_ptrs = (
cur_b_ptr
+ lora_id * stride_bl
+ expert_id * stride_be
+ offs_k[:, None] * stride_bk
+ offs_bn[None, :] * stride_bn
)
if USE_GDC and IS_PRIMARY:
# GDC launch dependents hints the runtime system to launch dependent kernels.
tl.extra.cuda.gdc_launch_dependents()
# accumulator
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
# GDC wait waits for ALL programs in the prior kernel to complete
# before continuing.
if USE_GDC and not IS_PRIMARY:
tl.extra.cuda.gdc_wait()
for k in range(0, grid_k):
k_remaining = K - k * (BLOCK_SIZE_K * SPLIT_K)
# pre-fetch lora weight
b = tl.load(b_ptrs, mask=offs_k[:, None] < k_remaining, other=0.0)
a = tl.load(
a_ptrs,
mask=token_mask[:, None] & (offs_k[None, :] < k_remaining),
other=0.0,
)
accumulator += tl.dot(a, b)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_SIZE_K * SPLIT_K * stride_ak
b_ptrs += BLOCK_SIZE_K * SPLIT_K * stride_bk
if MUL_ROUTED_WEIGHT:
moe_weight = tl.load(topk_weights_ptr + offs_token, mask=token_mask, other=0)
accumulator = accumulator * moe_weight[:, None]
accumulator = accumulator.to(c_ptr.dtype.element_ty)
# Write back the block of the output
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = cur_c_ptr + stride_cm * offs_token[:, None] + stride_cn * offs_cn[None, :]
c_mask = token_mask[:, None] & (offs_cn[None, :] < N)
if SPLIT_K == 1:
tl.store(c_ptrs, accumulator, mask=c_mask)
else:
tl.atomic_add(c_ptrs, accumulator, mask=c_mask, sem="relaxed")
@torch.inference_mode()
def _fused_moe_lora_shrink(
a_intermediate_cache1: torch.Tensor,
# (num_slices, num_tokens, top_k_num, max_lora_rank)
qcurr_hidden_states: torch.Tensor, # (num_tokens, K,)
lora_a_stacked: list[
torch.Tensor
], # [(max_loras, num_experts, max_lora_rank, K,),...]
topk_weights: torch.Tensor, # (num_tokens, top_k_num)
sorted_token_ids: torch.Tensor, # (max_loras, _)
expert_ids: torch.Tensor, # (max_loras, _ ,)
num_tokens_post_padded: torch.Tensor, # (max_loras, )
top_k_num: int,
lora_ids: torch.Tensor,
adapter_enabled: torch.Tensor,
## adding for kernel
device: torch.device,
N: int,
M: int,
EM: int,
K: int,
num_tokens: int,
num_experts: int,
num_slices: int,
block_size_m: int,
block_size_n: int,
block_size_k: int,
group_size_m: int,
num_warps: int,
num_stages: int,
split_k: int,
mul_routed_weight: bool = False,
) -> None:
w1_lora_a_stacked = lora_a_stacked[0]
use_gdc = supports_pdl(qcurr_hidden_states.device)
shrink_config = {
"BLOCK_SIZE_M": block_size_m,
"BLOCK_SIZE_N": block_size_n,
"BLOCK_SIZE_K": block_size_k,
"GROUP_SIZE_M": group_size_m,
"num_warps": num_warps,
"num_stages": num_stages,
"SPLIT_K": split_k,
"USE_GDC": use_gdc,
"launch_pdl": use_gdc, # triton kernel metadata
}
b_ptr = _get_ptr(lora_a_stacked, device)
grid = lambda META: (
split_k
* triton.cdiv(EM, META["BLOCK_SIZE_M"])
* triton.cdiv(N, META["BLOCK_SIZE_N"]),
len(lora_a_stacked),
lora_a_stacked[0].shape[0],
)
_fused_moe_lora_kernel[grid](
qcurr_hidden_states,
b_ptr,
a_intermediate_cache1,
topk_weights,
sorted_token_ids,
expert_ids,
num_tokens_post_padded,
N,
K,
EM,
num_tokens,
num_experts,
lora_ids,
adapter_enabled,
qcurr_hidden_states.stride(0),
qcurr_hidden_states.stride(1),
w1_lora_a_stacked.stride(0),
w1_lora_a_stacked.stride(1),
w1_lora_a_stacked.stride(3),
w1_lora_a_stacked.stride(2),
a_intermediate_cache1.stride(2),
a_intermediate_cache1.stride(3),
sorted_token_ids.stride(0),
expert_ids.stride(0),
slice_a_size=qcurr_hidden_states.numel(),
slice_c_size=a_intermediate_cache1.numel() // num_slices,
num_slice_a=1,
num_slice_c=num_slices,
top_k=1 if mul_routed_weight else top_k_num,
MUL_ROUTED_WEIGHT=False,
IS_PRIMARY=True,
**shrink_config,
)
@torch.inference_mode()
def _fused_moe_lora_expand(
output: torch.Tensor, # (num_tokens, top_k_num, N*len(lora_a_stacked),)
a_intermediate_cache1: torch.Tensor, # (num_slices, M, top_k_num, max_lora_rank)
b_intermediate_cache1: torch.Tensor, # (num_slices, M, top_k_num, output_dim_size)
lora_b_stacked: list[
torch.Tensor
], # [(max_loras, num_experts, max_lora_rank, K,),...]
topk_weights: torch.Tensor, # (num_tokens, top_k_num)
sorted_token_ids: torch.Tensor, # (max_loras, _)
expert_ids: torch.Tensor, # (max_loras, _ ,)
num_tokens_post_padded: torch.Tensor, # (max_loras, )
top_k_num: int,
lora_ids: torch.Tensor,
adapter_enabled: torch.Tensor,
## adding for kernel
device: torch.device,
N: int,
M: int,
EM: int,
K: int,
num_tokens: int,
num_experts: int,
num_slices: int,
max_lora_rank: int,
w1_output_dim_size: int,
block_size_m: int,
block_size_n: int,
block_size_k: int,
group_size_m: int,
num_warps: int,
num_stages: int,
split_k: int,
mul_routed_weight: bool = False,
offset: int = 0,
) -> None:
b_ptr = _get_ptr(lora_b_stacked, device)
K = max_lora_rank
N = w1_output_dim_size
w1_lora_b_stacked = lora_b_stacked[0]
a_intermediate_cache1 = a_intermediate_cache1.view(
-1, a_intermediate_cache1.shape[3]
)
use_gdc = supports_pdl(a_intermediate_cache1.device)
expand_config = {
"BLOCK_SIZE_M": block_size_m,
"BLOCK_SIZE_N": block_size_n,
"BLOCK_SIZE_K": block_size_k,
"GROUP_SIZE_M": group_size_m,
"num_warps": num_warps,
"num_stages": num_stages,
"SPLIT_K": split_k, # Set split_k = 1 for expand calls
"USE_GDC": use_gdc,
"launch_pdl": use_gdc, # triton kernel metadata
}
grid = lambda META: (
triton.cdiv(EM, META["BLOCK_SIZE_M"]) * triton.cdiv(N, META["BLOCK_SIZE_N"]),
len(lora_b_stacked),
lora_b_stacked[0].shape[0],
)
_fused_moe_lora_kernel[grid](
a_intermediate_cache1,
b_ptr,
b_intermediate_cache1,
topk_weights,
sorted_token_ids,
expert_ids,
num_tokens_post_padded,
N,
K,
EM,
num_tokens,
num_experts,
lora_ids,
adapter_enabled,
a_intermediate_cache1.stride(0),
a_intermediate_cache1.stride(1),
w1_lora_b_stacked.stride(0),
w1_lora_b_stacked.stride(1),
w1_lora_b_stacked.stride(3),
w1_lora_b_stacked.stride(2),
b_intermediate_cache1.stride(2),
b_intermediate_cache1.stride(3),
sorted_token_ids.stride(0),
expert_ids.stride(0),
slice_a_size=a_intermediate_cache1.numel() // num_slices,
slice_c_size=b_intermediate_cache1.numel() // num_slices,
num_slice_a=num_slices,
num_slice_c=num_slices,
top_k=1,
MUL_ROUTED_WEIGHT=mul_routed_weight,
IS_PRIMARY=False,
**expand_config,
)
for i in range(num_slices):
output[:, :, i * N + offset : (i + 1) * N + offset] += b_intermediate_cache1[i]
@torch.inference_mode()
def _fused_moe_lora(
output: torch.Tensor, # (num_tokens, top_k_num, N*len(lora_a_stacked),)
qcurr_hidden_states: torch.Tensor, # (num_tokens, K,)
lora_a_stacked: list[
torch.Tensor
], # [(max_loras, num_experts, max_lora_rank, K,),...]
lora_b_stacked: list[
torch.Tensor
], # [(max_loras, num_experts, N, max_lora_rank,),...]
topk_weights: torch.Tensor, # (num_tokens, top_k_num)
sorted_token_ids: torch.Tensor, # (max_loras, _)
expert_ids: torch.Tensor, # (max_loras, _ ,)
num_tokens_post_padded: torch.Tensor, # (max_loras, )
max_lora_rank: int,
top_k_num: int,
lora_ids: torch.Tensor,
adapter_enabled: torch.Tensor,
shrink_block_size_m: int,
shrink_block_size_n: int,
shrink_block_size_k: int,
shrink_group_size_m: int,
shrink_num_warps: int,
shrink_num_stages: int,
shrink_split_k: int,
expand_block_size_m: int,
expand_block_size_n: int,
expand_block_size_k: int,
expand_group_size_m: int,
expand_num_warps: int,
expand_num_stages: int,
expand_split_k: int,
mul_routed_weight: bool = False,
fully_sharded: bool = False,
offset: int = 0,
) -> None:
assert len(lora_a_stacked) == len(lora_b_stacked) > 0
assert (
sorted_token_ids.dim()
== expert_ids.dim()
== topk_weights.dim()
== qcurr_hidden_states.dim()
== 2
)
assert (
sorted_token_ids.shape[0]
== expert_ids.shape[0]
== num_tokens_post_padded.shape[0]
)
assert output.shape[0] == topk_weights.shape[0]
assert top_k_num == topk_weights.shape[1]
device = qcurr_hidden_states.device
num_slices = len(lora_a_stacked)
w1_lora_b_stacked = lora_b_stacked[0]
num_experts = lora_a_stacked[0].shape[1]
N = max_lora_rank
M = topk_weights.shape[0]
EM = sorted_token_ids.shape[1]
K = qcurr_hidden_states.shape[1]
num_tokens = M * top_k_num
w1_output_dim_size = w1_lora_b_stacked.shape[2]
a_intermediate_cache1 = torch.zeros(
(num_slices, M, top_k_num, max_lora_rank),
dtype=output.dtype,
device=device,
)
b_intermediate_cache1 = torch.zeros(
(num_slices, M, top_k_num, w1_output_dim_size),
dtype=output.dtype,
device=device,
)
_fused_moe_lora_shrink(
a_intermediate_cache1,
qcurr_hidden_states,
lora_a_stacked,
topk_weights,
sorted_token_ids,
expert_ids,
num_tokens_post_padded,
top_k_num,
lora_ids,
adapter_enabled,
## adding for kernel
device,
N,
M,
EM,
K,
num_tokens,
num_experts,
num_slices,
shrink_block_size_m,
shrink_block_size_n,
shrink_block_size_k,
shrink_group_size_m,
shrink_num_warps,
shrink_num_stages,
shrink_split_k,
mul_routed_weight,
)
if fully_sharded:
if max_lora_rank == w1_lora_b_stacked.shape[-1]:
a_intermediate_cache1 = tensor_model_parallel_all_reduce(
a_intermediate_cache1
)
else:
a_intermediate_cache1 = tensor_model_parallel_all_gather(
a_intermediate_cache1
)
# reset max_lora_rank to the full rank after allgather
max_lora_rank = a_intermediate_cache1.shape[-1]
_fused_moe_lora_expand(
output,
a_intermediate_cache1,
b_intermediate_cache1,
lora_b_stacked,
topk_weights,
sorted_token_ids,
expert_ids,
num_tokens_post_padded,
top_k_num,
lora_ids,
adapter_enabled,
## adding for kernel
device,
N,
M,
EM,
K,
num_tokens,
num_experts,
num_slices,
max_lora_rank,
w1_output_dim_size,
expand_block_size_m,
expand_block_size_n,
expand_block_size_k,
expand_group_size_m,
expand_num_warps,
expand_num_stages,
expand_split_k,
mul_routed_weight,
offset,
)
def _fused_moe_lora_fake(
output: torch.Tensor,
qcurr_hidden_states: torch.Tensor,
lora_a_stacked: list[torch.Tensor],
lora_b_stacked: list[torch.Tensor],
topk_weights: torch.Tensor,
sorted_token_ids: torch.Tensor,
expert_ids: torch.Tensor,
num_tokens_post_padded: torch.Tensor,
max_lora_rank: int,
top_k_num: int,
lora_ids: torch.Tensor,
adapter_enabled: torch.Tensor,
shrink_block_size_m: int,
shrink_block_size_n: int,
shrink_block_size_k: int,
shrink_group_size_m: int,
shrink_num_warps: int,
shrink_num_stages: int,
shrink_split_k: int,
expand_block_size_m: int,
expand_block_size_n: int,
expand_block_size_k: int,
expand_group_size_m: int,
expand_num_warps: int,
expand_num_stages: int,
expand_split_k: int,
mul_routed_weight: bool = False,
) -> None:
return
def _fused_moe_lora_shrink_fake(
a_intermediate_cache1: torch.Tensor,
qcurr_hidden_states: torch.Tensor,
lora_a_stacked: list[torch.Tensor],
topk_weights: torch.Tensor,
sorted_token_ids: torch.Tensor,
expert_ids: torch.Tensor,
num_tokens_post_padded: torch.Tensor,
top_k_num: int,
lora_ids: torch.Tensor,
adapter_enabled: torch.Tensor,
device: torch.device,
N: int,
M: int,
EM: int,
K: int,
num_tokens: int,
num_experts: int,
num_slices: int,
block_size_m: int,
block_size_n: int,
block_size_k: int,
group_size_m: int,
num_warps: int,
num_stages: int,
split_k: int,
mul_routed_weight: bool = False,
) -> None:
return
def _fused_moe_lora_expand_fake(
output: torch.Tensor,
a_intermediate_cache1: torch.Tensor,
lora_b_stacked: list[torch.Tensor],
topk_weights: torch.Tensor,
sorted_token_ids: torch.Tensor,
expert_ids: torch.Tensor,
num_tokens_post_padded: torch.Tensor,
top_k_num: int,
lora_ids: torch.Tensor,
adapter_enabled: torch.Tensor,
device: torch.device,
N: int,
M: int,
EM: int,
K: int,
num_tokens: int,
num_experts: int,
num_slices: int,
max_lora_rank: int,
w1_output_dim_size: int,
block_size_m: int,
block_size_n: int,
block_size_k: int,
group_size_m: int,
num_warps: int,
num_stages: int,
split_k: int,
mul_routed_weight: bool = False,
) -> None:
return
try:
direct_register_custom_op(
op_name="fused_moe_lora",
op_func=_fused_moe_lora,
mutates_args=["output"],
fake_impl=_fused_moe_lora_fake,
)
direct_register_custom_op(
op_name="fused_moe_lora_shrink",
op_func=_fused_moe_lora_shrink,
mutates_args=["a_intermediate_cache1"],
fake_impl=_fused_moe_lora_shrink_fake,
)
direct_register_custom_op(
op_name="fused_moe_lora_expand",
op_func=_fused_moe_lora_expand,
mutates_args=["output"],
fake_impl=_fused_moe_lora_expand_fake,
)
fused_moe_lora = torch.ops.vllm.fused_moe_lora
fused_moe_lora_shrink = torch.ops.vllm.fused_moe_lora_shrink
fused_moe_lora_expand = torch.ops.vllm.fused_moe_lora_expand
except AttributeError:
fused_moe_lora = _fused_moe_lora
fused_moe_lora_shrink = _fused_moe_lora_shrink
fused_moe_lora_expand = _fused_moe_lora_expand
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/ops/triton_ops/utils.py | vllm/lora/ops/triton_ops/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import functools
import json
from functools import lru_cache
from pathlib import Path
from typing import Any
import torch
from vllm import envs
from vllm.logger import init_logger
from vllm.model_executor.layers.batch_invariant import vllm_is_batch_invariant
from vllm.platforms import current_platform
logger = init_logger(__name__)
is_batch_invariant = vllm_is_batch_invariant()
_LORA_A_PTR_DICT: dict[tuple[int, ...], tuple[torch.tensor, ...]] = {}
_LORA_B_PTR_DICT: dict[tuple[int, ...], tuple[torch.tensor, ...]] = {}
def _get_lora_a_ptr(lora_a_weights: list[torch.Tensor], device: torch.device):
"""
`_LORA_A_PTR_DICT` collects the required information during `profile_run`,
After this, it remains constant and subsequent usage is through LUT.
Refer to:
https://github.com/triton-lang/triton/blob/release/3.1.x/python/tutorials/08-grouped-gemm.py
"""
key = tuple(lora_weight.data_ptr() for lora_weight in lora_a_weights)
if values := _LORA_A_PTR_DICT.get(key):
return values
lora_strides_d0 = []
lora_strides_d1 = []
lora_strides_d2 = []
tensor_ptrs = []
for lora_a_weight in lora_a_weights:
if lora_a_weight.ndim == 4: # shape:(lora_num,1,size,rank)
assert lora_a_weight.size(1) == 1
lora_a_weight = lora_a_weight.squeeze(dim=1)
else:
assert lora_a_weight.ndim == 3 # shape:(lora_num,size,rank)
assert lora_a_weight.is_contiguous()
tensor_ptrs.append(lora_a_weight.data_ptr())
lora_strides_d0.append(lora_a_weight.stride(0))
lora_strides_d1.append(lora_a_weight.stride(1))
lora_strides_d2.append(lora_a_weight.stride(2))
if len(lora_a_weights) > 1:
lora_ptr_tensor = torch.tensor(tensor_ptrs, device=device, dtype=torch.uint64)
else:
lora_ptr_tensor = lora_a_weights[0]
if (
len(set(lora_strides_d0)) > 1
or len(set(lora_strides_d1)) > 1
or len(set(lora_strides_d2)) > 1
):
raise ValueError("All LoRA weights must have the same stride.")
_LORA_A_PTR_DICT[key] = (
lora_ptr_tensor,
lora_strides_d0[0],
lora_strides_d1[0],
lora_strides_d2[0],
)
return _LORA_A_PTR_DICT.get(key)
def _get_lora_b_ptr(
lora_weights: list[torch.Tensor], offset_start: int, device: torch.device
):
"""
`_LORA_B_PTR_DICT` collects the required information during `profile_run`,
After this, it remains constant and subsequent usage is through LUT.
Refer to:
https://github.com/triton-lang/triton/blob/release/3.1.x/python/tutorials/08-grouped-gemm.py
"""
key = tuple(lora_weight.data_ptr() for lora_weight in lora_weights)
if values := _LORA_B_PTR_DICT.get(key):
return values
slice_offset_lst = []
tensor_ptrs = []
lora_strides_d0 = []
lora_strides_d1 = []
lora_strides_d2 = []
hidden_sizes = []
slice_offset = offset_start
for lora_b_weight in lora_weights:
if lora_b_weight.ndim == 4: # shape:(lora_num,1,size,rank)
assert lora_b_weight.size(1) == 1
lora_b_weight = lora_b_weight.squeeze(dim=1)
else:
assert lora_b_weight.ndim == 3 # shape:(lora_num,size,rank)
assert lora_b_weight.is_contiguous()
tensor_ptrs.append(lora_b_weight.data_ptr())
lora_strides_d0.append(lora_b_weight.stride(0))
lora_strides_d1.append(lora_b_weight.stride(1))
lora_strides_d2.append(lora_b_weight.stride(2))
slice_offset_lst.append(slice_offset)
slice_offset += lora_b_weight.size(1)
hidden_sizes.append(lora_b_weight.size(1))
if len(lora_weights) > 1:
# note these are device tensors
lora_ptr_tensor = torch.tensor(tensor_ptrs, device=device, dtype=torch.uint64)
slice_start_tensor = torch.tensor(
slice_offset_lst, device=device, dtype=torch.uint64
)
else:
slice_start_tensor = slice_offset_lst[0]
lora_ptr_tensor = lora_b_weight[0]
# If each lora has the same stride, there's no need to use a
# tensor for storage.
if (
len(set(lora_strides_d0)) == 1
and len(set(lora_strides_d1)) == 1
and len(set(lora_strides_d2)) == 1
) and len(set(hidden_sizes)) == 1:
lora_strides_d0_tensor = lora_strides_d0[0]
lora_strides_d1_tensor = lora_strides_d1[0]
lora_strides_d2_tensor = lora_strides_d2[0]
hidden_sizes_tensor = hidden_sizes[0]
same_stride = True
else:
lora_strides_d0_tensor = torch.tensor(lora_strides_d0, device=device)
lora_strides_d1_tensor = torch.tensor(lora_strides_d1, device=device)
lora_strides_d2_tensor = torch.tensor(lora_strides_d2, device=device)
hidden_sizes_tensor = torch.tensor(hidden_sizes, device=device)
same_stride = False
# MAX_N is the maximum hidden size among all the lora_b weights
MAX_N = max(hidden_sizes)
_LORA_B_PTR_DICT[key] = (
slice_start_tensor,
lora_ptr_tensor,
lora_strides_d0_tensor,
lora_strides_d1_tensor,
lora_strides_d2_tensor,
hidden_sizes_tensor,
same_stride,
MAX_N,
)
return _LORA_B_PTR_DICT.get(key)
@functools.lru_cache
def load_lora_op_config(op_type: str, add_inputs: bool | None) -> dict | None:
user_defined_config_folder = envs.VLLM_TUNED_CONFIG_FOLDER
# Avoid optimizing for the batch invariant case. Use default config
if user_defined_config_folder is not None and not is_batch_invariant:
gpu_name = torch.cuda.get_device_name()
gpu_name = gpu_name.replace(" ", "_")
gpu_name = gpu_name.replace("-", "_")
config_fname = None
# only expand op needs to consider add_inputs
if op_type == "expand":
config_fname = (
f"{gpu_name}_{op_type.upper()}_{str(add_inputs).upper()}.json"
)
else:
config_fname = f"{gpu_name}_{op_type.upper()}.json"
config_path = Path(f"{user_defined_config_folder}/{config_fname}")
if not config_path.exists():
logger.warning_once(f"No LoRA kernel configs founded in {config_path}")
return None
# Load json
logger.info_once(f"Using tuned LoRA kernel configs from {config_path}.")
with open(str(config_path)) as f:
config_data = json.load(f)
else:
config_data = None
return config_data
@functools.lru_cache
def get_lora_op_configs(
op_type: str,
max_loras: int,
batch: int,
hidden_size: int,
rank: int,
num_slices: int,
add_inputs: bool | None = None,
moe_intermediate_size: int | None = None,
) -> dict[str, int | None]:
# Add support for fused_moe_lora ops
assert op_type in [
"shrink",
"expand",
"fused_moe_lora_w13_shrink",
"fused_moe_lora_w13_expand",
"fused_moe_lora_w2_shrink",
"fused_moe_lora_w2_expand",
]
# default config
default = {}
if op_type == "shrink":
split_k = 64 if batch < 128 else 8
if is_batch_invariant:
split_k = 1
default = {
"block_m": 32,
"block_n": 16,
"block_k": 256 if batch < 128 else 32,
"split_k": split_k,
"num_warps": 4,
"num_ctas": 1,
"group_size_m": 8,
"num_stages": 2,
"max_nreg": None,
}
# The default config for fused_moe_lora ops
elif op_type in [
"fused_moe_lora_w13_shrink",
"fused_moe_lora_w13_expand",
"fused_moe_lora_w2_shrink",
"fused_moe_lora_w2_expand",
]:
default = {
"block_m": 64,
"block_n": 64,
"block_k": 32,
"num_warps": 4,
"num_stages": 3,
"group_size_m": 8,
"split_k": 1,
}
else:
default = {
"block_m": 64,
"block_n": 128,
"block_k": 16,
"num_warps": 4,
"num_ctas": 1,
"num_stages": 2,
"max_nreg": None,
}
m = batch
k, n = (hidden_size, rank) if op_type == "shrink" else (rank, hidden_size)
config_data: Any
config_data = load_lora_op_config(op_type, add_inputs)
if not config_data:
logger.warning_once("Using default LoRA kernel configs")
return default
# config is structured as config_data[max_loras][num_slices][m][k][n] = {}
# slice by max_loras
config_data = (
config_data.get(str(max_loras))
or config_data[min(config_data.keys(), key=lambda x: abs(int(x) - max_loras))]
)
# slice by num_slices
config_data = config_data[str(num_slices)]
# slice by m
config_data = (
config_data.get(str(m))
or config_data[min(config_data.keys(), key=lambda x: abs(int(x) - m))]
)
# slice by k
config_data = (
config_data.get(str(k))
or config_data[min(config_data.keys(), key=lambda x: abs(int(x) - k))]
)
# slice by n
config_data = (
config_data.get(str(n))
or config_data[min(config_data.keys(), key=lambda x: abs(int(x) - n))]
)
# slice by moe-intermediate-size if applicable
if moe_intermediate_size is not None:
i = moe_intermediate_size
config_data = (
config_data.get(str(i))
or config_data[min(config_data.keys(), key=lambda x: abs(int(x) - i))]
)
assert config_data is not None
return config_data
@lru_cache
def supports_pdl(device: torch.device | None = None) -> bool:
"""
Refer to: https://github.com/triton-lang/triton/blob/v3.5.0/python/tutorials/11-programmatic-dependent-launch.py
"""
# PDL requires compute capability SM90 or above
return current_platform.is_cuda() and current_platform.has_device_capability(90)
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/ops/triton_ops/__init__.py | vllm/lora/ops/triton_ops/__init__.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.lora.ops.triton_ops.fused_moe_lora_op import (
fused_moe_lora,
fused_moe_lora_expand,
fused_moe_lora_shrink,
)
from vllm.lora.ops.triton_ops.lora_expand_op import lora_expand
from vllm.lora.ops.triton_ops.lora_kernel_metadata import LoRAKernelMeta
from vllm.lora.ops.triton_ops.lora_shrink_op import lora_shrink
__all__ = [
"lora_expand",
"lora_shrink",
"LoRAKernelMeta",
"fused_moe_lora",
"fused_moe_lora_shrink",
"fused_moe_lora_expand",
]
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/ops/triton_ops/lora_shrink_op.py | vllm/lora/ops/triton_ops/lora_shrink_op.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Based on:
Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023).
Punica: Multi-Tenant LoRA Serving.
https://arxiv.org/abs/2310.18547
"""
import torch
from vllm.lora.ops.triton_ops.kernel_utils import do_shrink_kernel
from vllm.lora.ops.triton_ops.utils import _get_lora_a_ptr, get_lora_op_configs
from vllm.triton_utils import tl, triton
from vllm.utils.torch_utils import direct_register_custom_op
from .utils import supports_pdl
@triton.jit
def _lora_shrink_kernel(
input_ptr,
lora_ptr,
out_ptr,
M,
N,
K,
token_indices_sorted_by_lora_ids,
num_tokens_per_lora,
lora_token_start_loc,
lora_ids,
scaling,
input_d0_stride,
input_d1_stride,
lora_d0_stride,
lora_d1_stride,
lora_d2_stride,
output_d0_stride,
output_d1_stride,
output_d2_stride,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
EVEN_K: tl.constexpr,
SPLIT_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
SLICE_NUM: tl.constexpr,
USE_GDC: tl.constexpr,
launch_pdl: tl.constexpr,
):
cta_n_num = tl.cdiv(N, BLOCK_N)
cta_m_num = tl.cdiv(M, BLOCK_M)
pid_sk_m_n = tl.program_id(axis=0)
pid_sk = pid_sk_m_n % SPLIT_K
pid_m_n = pid_sk_m_n // SPLIT_K
num_pid_in_group = GROUP_SIZE_M * cta_n_num
group_id = pid_m_n // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(cta_m_num - first_pid_m, GROUP_SIZE_M)
# Column-major ordering within groups for better cache reuse
pid_m = first_pid_m + ((pid_m_n % num_pid_in_group) % group_size_m)
pid_n = (pid_m_n % num_pid_in_group) // group_size_m
slice_id = tl.program_id(axis=1)
lora_idx = tl.program_id(axis=2)
lora_id = tl.load(lora_ids + lora_idx)
if lora_id == -1:
# Early exit for the no-lora case.
return
lora_m_size = tl.load(num_tokens_per_lora + lora_idx)
cta_m_offset = pid_m * BLOCK_M
if cta_m_offset >= lora_m_size:
# Early exit CTA.
return
# num rows this CTA should process.
cta_m_len = min(BLOCK_M, lora_m_size - cta_m_offset)
# Identify all rows that this CTA should process.
lora_m_indices_start = tl.load(lora_token_start_loc + lora_idx)
cta_lora_seq_indices = (
token_indices_sorted_by_lora_ids + lora_m_indices_start + cta_m_offset
)
# Load all relevant row indices.
offset_m = tl.arange(0, BLOCK_M) % cta_m_len
ram = tl.load(cta_lora_seq_indices + offset_m)
do_shrink_kernel(
pid_n,
pid_sk,
slice_id,
lora_id,
input_ptr,
lora_ptr,
out_ptr,
N,
K,
cta_m_len,
ram, # array identifying the rows of Input ptr to operate on
# input strides
input_d0_stride,
input_d1_stride,
# lora strides
lora_d0_stride,
lora_d1_stride,
lora_d2_stride,
# output strides
output_d0_stride,
output_d1_stride,
output_d2_stride,
scaling,
BLOCK_M,
BLOCK_N,
BLOCK_K,
EVEN_K,
SPLIT_K,
SLICE_NUM,
USE_GDC,
)
@torch.inference_mode()
def _lora_shrink(
inputs: torch.Tensor, # shape [num_tokens, hidden_size]
lora_a_weights: list[torch.Tensor], # shape [num_loras, lora_rank, hidden_size]
output_tensor: torch.Tensor, # shape [num_slices, num_tokens, lora_rank]
token_lora_mapping: torch.Tensor, # shape [num_tokens]
token_indices_sorted_by_lora_ids: torch.Tensor, # shape [num_tokens]
num_tokens_per_lora: torch.Tensor, # shape [max-loras + 1]
lora_token_start_loc: torch.Tensor, # shape [max-loras + 2]
lora_ids: torch.Tensor, # shape [max-loras + 1]
no_lora_flag_cpu: torch.Tensor, # shape [1]
scaling: float,
) -> None:
"""
Args:
inputs (torch.Tensor): Input tensor
lora_a_weights (list[torch.Tensor]): LoRA weights
output_tensor (torch.Tensor): output tensor
token_lora_mapping (torch.Tensor): A tensor mapping each input token
to the lora-id related to that token. A value of -1 indicates that
LoRA doesn't apply to that token.
token_indices_sorted_by_lora_ids (torch.Tensor): Row/Token indices from
the A matrix grouped by LoRA IDs.
num_tokens_per_lora (torch.Tensor): num_tokens_per_lora[i] is the number
of tokens that are to be processed by LoRA ID lora_ids[i]
lora_token_start_loc (torch.Tensor): A cumulative sum of
num_tokens_per_lora. lora_token_start_loc[0] is always 0 so that
lora_token_start_loc[i], along with num_tokens_per_lora[i]
identifies the region in token_indices_sorted_by_lora_ids that
LoRA lora_ids[i] should process.
lora_ids (torch.Tensor): LoRA ids to process.
no_lora_flag_cpu (torch.Tensor): A CPU tensor of size 1, that indicates
if there are any requests that require LoRA.
scaling (float): Scaling factor.
"""
assert no_lora_flag_cpu.numel() == 1
if no_lora_flag_cpu.item():
# None of the inputs require LoRA.
return
assert inputs.dtype == lora_a_weights[0].dtype
assert inputs.dtype in [torch.float16, torch.bfloat16]
for weight in lora_a_weights:
assert weight.dtype in [torch.float16, torch.bfloat16]
assert inputs.size(1) == lora_a_weights[0].size(-1)
assert inputs.is_contiguous()
assert output_tensor.is_contiguous()
# metadata sanity check
M = inputs.size(0)
assert token_lora_mapping.size(0) == M
assert token_lora_mapping.size(0) == token_indices_sorted_by_lora_ids.size(0)
assert lora_ids.size(0) == num_tokens_per_lora.size(0)
assert lora_token_start_loc.size(0) == lora_ids.size(0) + 1
output_tensor.zero_()
(lora_ptr_tensor, lora_strides_d0, lora_strides_d1, lora_strides_d2) = (
_get_lora_a_ptr(lora_a_weights, inputs.device)
)
N, K = lora_a_weights[0].shape[-2:] # K=hidden_size,N=rank
NUM_SLICES = len(lora_a_weights)
MAX_LORAS = lora_ids.size(0)
# Triton kernel configs
kernel_config = get_lora_op_configs(
"shrink",
max_loras=MAX_LORAS,
batch=M,
hidden_size=K,
rank=N,
num_slices=NUM_SLICES,
)
BLOCK_M = kernel_config["block_m"]
BLOCK_N = kernel_config["block_n"]
BLOCK_K = kernel_config["block_k"]
SPLIT_K = kernel_config["split_k"]
NUM_WARPS = kernel_config["num_warps"]
NUM_STAGES = kernel_config["num_stages"]
NUM_CTAS = kernel_config["num_ctas"]
GROUP_SIZE_M = kernel_config.get("group_size_m", 8)
EVEN_K = K % (BLOCK_K * SPLIT_K) == 0 # type: ignore
# TODO (varun): This grid formulation maximizes parallelization at the
# cost of wasteful thread block launch when only few of the input tokens
# require LoRA. This might not be the best in all cases.
grid = (
SPLIT_K * triton.cdiv(M, BLOCK_M) * triton.cdiv(N, BLOCK_N),
NUM_SLICES,
# Each LoRA receives its own set of thread blocks for output
# computation. If some LoRA doesn't have any tokens to process, its
# thread blocks exit early.
MAX_LORAS,
)
use_gdc = supports_pdl(inputs.device)
_lora_shrink_kernel[grid](
inputs,
lora_ptr_tensor,
output_tensor,
M,
N,
K,
token_indices_sorted_by_lora_ids,
num_tokens_per_lora,
lora_token_start_loc,
lora_ids,
scaling,
inputs.stride(0),
inputs.stride(1),
lora_strides_d0,
lora_strides_d1,
lora_strides_d2,
output_tensor.stride(0),
output_tensor.stride(1),
output_tensor.stride(2),
BLOCK_M,
BLOCK_N,
BLOCK_K,
EVEN_K,
SPLIT_K,
GROUP_SIZE_M,
NUM_SLICES,
use_gdc,
num_warps=NUM_WARPS,
num_ctas=NUM_CTAS,
num_stages=NUM_STAGES,
launch_pdl=use_gdc,
)
return
def _lora_shrink_fake(
inputs: torch.Tensor,
lora_a_weights: list[torch.Tensor],
output_tensor: torch.Tensor,
token_lora_mapping: torch.Tensor,
token_indices_sorted_by_lora_ids: torch.Tensor,
num_tokens_per_lora: torch.Tensor,
lora_token_start_loc: torch.Tensor,
lora_ids: torch.Tensor,
no_lora_flag_cpu: torch.Tensor,
scaling: float,
) -> None:
return
try:
direct_register_custom_op(
op_name="lora_shrink",
op_func=_lora_shrink,
mutates_args=["output_tensor"],
fake_impl=_lora_shrink_fake,
)
lora_shrink = torch.ops.vllm.lora_shrink
except AttributeError:
lora_shrink = _lora_shrink
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/ops/xla_ops/lora_ops.py | vllm/lora/ops/xla_ops/lora_ops.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import jax
import jax.numpy as jnp
import torch
import torch.nn.functional as F
import torch_xla.core.xla_builder as xb
from torch.library import impl
from torch_xla.experimental.custom_kernel import XLA_LIB, jax_import_guard
@jax.jit
def bgmv_jax(inputs, loras, idxs):
return jnp.einsum(
"td,tX,Xld->tl",
inputs,
jax.nn.one_hot(idxs, loras.shape[0], dtype=inputs.dtype),
loras,
)
XLA_LIB.define("bgmv(Tensor inputs, Tensor loras, Tensor idxs) -> Tensor")
@impl(XLA_LIB, "bgmv", "XLA")
def bgmv_xla(inputs: torch.Tensor, loras: torch.Tensor, idxs: torch.IntTensor):
if len(loras.shape) == 4:
loras = loras.squeeze(axis=1)
jax_import_guard()
return xb.call_jax(bgmv_jax, (inputs, loras, idxs))
@impl(XLA_LIB, "bgmv", "CompositeExplicitAutograd")
def bgmv_non_xla(inputs: torch.Tensor, loras: torch.Tensor, idxs: torch.IntTensor):
T, _ = inputs.shape
if len(loras.shape) == 4:
loras = loras.squeeze(axis=1)
_, L, _ = loras.shape
return torch.empty((T, L), device=inputs.device)
def bgmv_expand(
inputs: torch.Tensor,
lora_b_weights: torch.Tensor,
output_tensor: torch.Tensor,
lora_indices_tensor: torch.Tensor,
add_inputs: bool = True,
):
"""
Args:
inputs (torch.Tensor): Input tensor of shape [num_tokens, hidden_size].
lora_b_weights (torch.Tensor): LoRA weights of shape
[num_loras, lora_rank, hidden_size].
output_tensor (torch.Tensor): output tensor of shape
[num_tokens, hidden_size * num_slices].
lora_indices_tensor (torch.Tensor): Tensor of shape [num_tokens]
indicating which LoRA matrix to use for each token.
add_inputs (bool): Whether or not to add the input tensor to the output
tensor.
"""
outputs = torch.ops.xla.bgmv(inputs, lora_b_weights, lora_indices_tensor)
limit = output_tensor.shape[0]
if outputs.shape[0] == 1 and output_tensor.shape[0] != 1:
limit = 1
if output_tensor.shape[1] > outputs.shape[1]:
outputs = F.pad(outputs, (0, output_tensor.shape[1] - outputs.shape[1], 0, 0))
if add_inputs:
return output_tensor + outputs[:limit, : output_tensor.shape[1]]
else:
return outputs[:limit, : output_tensor.shape[1]]
def bgmv_shrink(
inputs: torch.Tensor,
lora_b_weights: torch.Tensor,
lora_indices_tensor: torch.Tensor,
scaling: float = 1.0,
):
"""
Args:
inputs (torch.Tensor): Input tensor of shape [num_tokens, hidden_size].
lora_b_weights (torch.Tensor): LoRA weights of shape
[num_loras, lora_rank, hidden_size].
lora_indices_tensor (torch.Tensor): Tensor of shape [num_tokens]
indicating which LoRA matrix to use for each token.
scaling (float, optional): Scalar multiplier applied to the output.
"""
return scaling * torch.ops.xla.bgmv(inputs, lora_b_weights, lora_indices_tensor)
def bgmv_expand_slice(
inputs: torch.Tensor,
lora_b_weights: torch.Tensor,
output_tensor: torch.Tensor,
lora_indices_tensor: torch.Tensor,
slice_offset: int,
slice_size: int,
add_inputs: bool = True,
):
"""
Args:
inputs (torch.Tensor): Input tensor of shape [num_tokens, hidden_size].
lora_b_weights (torch.Tensor): LoRA weights of shape
[num_loras, lora_rank, hidden_size].
output_tensor (torch.Tensor): output tensor of shape
[num_tokens, hidden_size * num_slices].
lora_indices_tensor (torch.Tensor): Tensor of shape [num_tokens]
indicating which LoRA matrix to use for each token.
add_inputs (bool): Whether or not to add the input tensor to the output
tensor.
"""
outputs = torch.ops.xla.bgmv(inputs, lora_b_weights, lora_indices_tensor)
outputs = F.pad(
outputs,
(
slice_offset,
output_tensor.shape[1] - (slice_offset + slice_size),
0,
0,
),
)
if add_inputs:
return output_tensor + outputs
else:
return outputs
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/vllm/lora/ops/xla_ops/__init__.py | vllm/lora/ops/xla_ops/__init__.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.lora.ops.xla_ops.lora_ops import bgmv_expand, bgmv_expand_slice, bgmv_shrink
__all__ = ["bgmv_expand", "bgmv_expand_slice", "bgmv_shrink"]
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/csrc/cutlass_extensions/vllm_cutlass_library_extension.py | csrc/cutlass_extensions/vllm_cutlass_library_extension.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import enum
from cutlass_library import *
#
# Extend cutlass library with custom types, and missing values
#
class VLLMDataType(enum.Enum):
u4b8 = enum_auto()
u8b128 = enum_auto()
class MixedInputKernelScheduleType(enum.Enum):
TmaWarpSpecialized = enum_auto()
TmaWarpSpecializedPingpong = enum_auto()
TmaWarpSpecializedCooperative = enum_auto()
VLLMDataTypeNames: dict[VLLMDataType | DataType, str] = {
**DataTypeNames, # type: ignore
**{
VLLMDataType.u4b8: "u4b8",
VLLMDataType.u8b128: "u8b128",
},
}
VLLMDataTypeTag: dict[VLLMDataType | DataType, str] = {
**DataTypeTag, # type: ignore
**{
VLLMDataType.u4b8: "cutlass::vllm_uint4b8_t",
VLLMDataType.u8b128: "cutlass::vllm_uint8b128_t",
},
}
VLLMDataTypeSize: dict[VLLMDataType | DataType, int] = {
**DataTypeSize, # type: ignore
**{
VLLMDataType.u4b8: 4,
VLLMDataType.u8b128: 8,
},
}
VLLMDataTypeVLLMScalarTypeTag: dict[VLLMDataType | DataType, str] = {
VLLMDataType.u4b8: "vllm::kU4B8",
VLLMDataType.u8b128: "vllm::kU8B128",
DataType.u4: "vllm::kU4",
DataType.u8: "vllm::kU8",
DataType.s4: "vllm::kS4",
DataType.s8: "vllm::kS8",
DataType.f16: "vllm::kFloat16",
DataType.bf16: "vllm::kBfloat16",
}
VLLMDataTypeTorchDataTypeTag: dict[VLLMDataType | DataType, str] = {
DataType.u8: "at::ScalarType::Byte",
DataType.s8: "at::ScalarType::Char",
DataType.e4m3: "at::ScalarType::Float8_e4m3fn",
DataType.s32: "at::ScalarType::Int",
DataType.f16: "at::ScalarType::Half",
DataType.bf16: "at::ScalarType::BFloat16",
DataType.f32: "at::ScalarType::Float",
}
VLLMKernelScheduleTag: dict[MixedInputKernelScheduleType | KernelScheduleType, str] = {
**KernelScheduleTag, # type: ignore
**{
MixedInputKernelScheduleType.TmaWarpSpecialized: "cutlass::gemm::KernelTmaWarpSpecialized", # noqa: E501
MixedInputKernelScheduleType.TmaWarpSpecializedPingpong: "cutlass::gemm::KernelTmaWarpSpecializedPingpong", # noqa: E501
MixedInputKernelScheduleType.TmaWarpSpecializedCooperative: "cutlass::gemm::KernelTmaWarpSpecializedCooperative", # noqa: E501
},
}
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/csrc/quantization/gptq_marlin/generate_kernels.py | csrc/quantization/gptq_marlin/generate_kernels.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import glob
import itertools
import os
import subprocess
import sys
import jinja2
ARCHS = []
SUPPORT_FP8 = False
SUPPORT_SM75 = False
SUPPORT_SM80 = False
for arch in sys.argv[1].split(","):
arch = arch[: arch.index(".") + 2].replace(".", "")
arch = int(arch)
# only SM89 and SM120 fully support
# mma.sync.aligned.m16n8k32.row.col.f32.e4m3.e4m3.f32.
# SM90 and SM100 can use this PTX, but it’s simulated
# with FP16 MMA, so it cannot achieve any acceleration.
if arch in [89, 120]:
SUPPORT_FP8 = True
if arch >= 80:
SUPPORT_SM80 = True
if arch == 75:
SUPPORT_SM75 = True
FILE_HEAD_COMMENT = """
// auto generated by generate_kernels.py
// clang-format off
""".lstrip()
FILE_HEAD = (
FILE_HEAD_COMMENT
+ """
#include "kernel.h"
#include "marlin_template.h"
namespace MARLIN_NAMESPACE_NAME {
"""
)
TEMPLATE = (
"template __global__ void Marlin<"
"{{a_type_id}}, "
"{{b_type_id}}, "
"{{c_type_id}}, "
"{{s_type_id}}, "
"{{threads}}, "
"{{thread_m_blocks}}, "
"{{thread_n_blocks}}, "
"{{thread_k_blocks}}, "
"{{m_block_size_8}}, "
"{{stages}}, "
"{{group_blocks}}, "
"{{is_zp_float}}>"
"( MARLIN_KERNEL_PARAMS );"
)
THREAD_CONFIGS = [(128, 128, 256), (64, 256, 256), (64, 128, 128), (128, 64, 128)]
THREAD_M_BLOCKS = [0.5, 1, 2, 3, 4]
QUANT_CONFIGS = [
# AWQ-INT4
{
"b_type": "kU4",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": THREAD_M_BLOCKS,
"group_blocks": [-1, 2, 4, 8],
},
# HQQ
{
"a_type": ["kFloat16"],
"b_type": "kU4",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": THREAD_M_BLOCKS,
"group_blocks": [4],
"is_zp_float": True,
},
# GPTQ-INT4
{
"b_type": "kU4B8",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": THREAD_M_BLOCKS,
"group_blocks": [-1, 0, 2, 4, 8],
},
# GPTQ-INT8
{
"b_type": "kU8B128",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": THREAD_M_BLOCKS,
"group_blocks": [-1, 0, 2, 4, 8],
},
# FP8
{
"b_type": "kFE4M3fn",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": THREAD_M_BLOCKS,
"group_blocks": [-1, 8],
},
# NVFP4
{
"b_type": "kFE2M1f",
"s_type": "kFE4M3fn",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": THREAD_M_BLOCKS,
"group_blocks": [1],
},
# MXFP4
{
"a_type": ["kBFloat16"],
"b_type": "kFE2M1f",
"s_type": "kFE8M0fnu",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": THREAD_M_BLOCKS,
"group_blocks": [2],
},
# AWQ-INT4 with INT8 activation
{
"a_type": ["kS8"],
"b_type": "kU4",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": [1, 2, 3, 4],
"group_blocks": [-1, 2, 4, 8],
},
# GPTQ-INT4 with INT8 activation
{
"a_type": ["kS8"],
"b_type": "kU4B8",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": [1, 2, 3, 4],
"group_blocks": [-1, 2, 4, 8],
},
# GPTQ-INT4 with FP8 activation
{
"a_type": ["kFE4M3fn"],
"b_type": "kU4B8",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": [1, 2, 3, 4],
"group_blocks": [-1, 2, 4, 8],
},
# AWQ-INT4 with FP8 activation
{
"a_type": ["kFE4M3fn"],
"b_type": "kU4",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": [1, 2, 3, 4],
"group_blocks": [-1, 2, 4, 8],
},
# MXFP4 with FP8 activation
{
"a_type": ["kFE4M3fn"],
"b_type": "kFE2M1f",
"c_type": ["kBFloat16"],
"s_type": "kFE8M0fnu",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": [1, 2, 3, 4],
"group_blocks": [2],
},
]
def remove_old_kernels():
for filename in glob.glob(os.path.dirname(__file__) + "/*kernel_*.cu"):
subprocess.call(["rm", "-f", filename])
filename = os.path.dirname(__file__) + "/kernel_selector.h"
subprocess.call(["rm", "-f", filename])
def generate_new_kernels():
result_dict = {}
sm_75_result_dict = {}
for quant_config in QUANT_CONFIGS:
c_types = quant_config.get("c_type", ["kFloat16", "kBFloat16"])
a_types = quant_config.get("a_type", ["kFloat16", "kBFloat16"])
b_type = quant_config["b_type"]
is_zp_float = quant_config.get("is_zp_float", False)
all_group_blocks = quant_config["group_blocks"]
all_m_blocks = quant_config["thread_m_blocks"]
all_thread_configs = quant_config["thread_configs"]
for a_type, c_type in itertools.product(a_types, c_types):
if not SUPPORT_FP8 and a_type == "kFE4M3fn":
continue
if "16" in a_type and "16" in c_type and a_type != c_type:
continue
s_type = quant_config.get("s_type", c_type)
if (a_type, b_type, c_type) not in result_dict:
result_dict[(a_type, b_type, c_type)] = []
if a_type in ["kFloat16", "kS8"] and c_type == "kFloat16":
sm_75_result_dict[(a_type, b_type, c_type)] = []
for group_blocks, m_blocks, thread_configs in itertools.product(
all_group_blocks, all_m_blocks, all_thread_configs
):
thread_k, thread_n, threads = thread_configs
if threads == 256:
# for small batch (m_blocks == 1),
# we only need (128, 128, 256)
# for large batch (m_blocks > 1),
# we only need (64, 256, 256)
if m_blocks <= 1 and (thread_k, thread_n) != (128, 128):
continue
if m_blocks > 1 and (thread_k, thread_n) != (64, 256):
continue
config = {
"threads": threads,
"s_type": s_type,
"thread_m_blocks": max(m_blocks, 1),
"thread_k_blocks": thread_k // 16,
"thread_n_blocks": thread_n // 16,
"m_block_size_8": "true" if m_blocks == 0.5 else "false",
"stages": 4,
"group_blocks": group_blocks,
"is_zp_float": "true" if is_zp_float else "false",
}
if SUPPORT_SM80:
result_dict[(a_type, b_type, c_type)].append(config)
if (a_type, b_type, c_type) in sm_75_result_dict and SUPPORT_SM75:
config_sm75 = config.copy()
config_sm75["stages"] = 2
sm_75_result_dict[(a_type, b_type, c_type)].append(config_sm75)
kernel_selector_str = FILE_HEAD_COMMENT
for result_dict_tmp in [result_dict, sm_75_result_dict]:
for (a_type, b_type, c_type), config_list in result_dict_tmp.items():
all_template_str_list = []
if not config_list:
continue
for config in config_list:
s_type = config["s_type"]
template_str = jinja2.Template(TEMPLATE).render(
a_type_id=f"vllm::{a_type}.id()",
b_type_id=f"vllm::{b_type}.id()",
c_type_id=f"vllm::{c_type}.id()",
s_type_id=f"vllm::{s_type}.id()",
**config,
)
all_template_str_list.append(template_str)
conditions = [
f"a_type == vllm::{a_type}",
f"b_type == vllm::{b_type}",
f"c_type == vllm::{c_type}",
f"s_type == vllm::{s_type}",
f"threads == {config['threads']}",
f"thread_m_blocks == {config['thread_m_blocks']}",
f"thread_n_blocks == {config['thread_n_blocks']}",
f"thread_k_blocks == {config['thread_k_blocks']}",
f"m_block_size_8 == {config['m_block_size_8']}",
f"stages == {config['stages']}",
f"group_blocks == {config['group_blocks']}",
f"is_zp_float == {config['is_zp_float']}",
]
conditions = " && ".join(conditions)
if kernel_selector_str == FILE_HEAD_COMMENT:
kernel_selector_str += f"if ({conditions})\n kernel = "
else:
kernel_selector_str += f"else if ({conditions})\n kernel = "
kernel_template2 = (
"Marlin<{{a_type_id}}, {{b_type_id}}, {{c_type_id}}, "
"{{s_type_id}}, {{threads}}, {{thread_m_blocks}}, "
"{{thread_n_blocks}}, {{thread_k_blocks}}, "
"{{m_block_size_8}}, {{stages}}, {{group_blocks}}, "
"{{is_zp_float}}>;"
)
kernel_selector_str += (
jinja2.Template(kernel_template2).render(
a_type_id=f"vllm::{a_type}.id()",
b_type_id=f"vllm::{b_type}.id()",
c_type_id=f"vllm::{c_type}.id()",
s_type_id=f"vllm::{s_type}.id()",
**config,
)
+ "\n"
)
file_content = FILE_HEAD + "\n\n"
file_content += "\n\n".join(all_template_str_list) + "\n\n}\n"
if a_type == "kFE4M3fn":
filename = f"sm89_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu"
elif result_dict_tmp is sm_75_result_dict:
filename = f"sm75_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu"
else:
filename = f"sm80_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu"
filename = filename.lower()
with open(os.path.join(os.path.dirname(__file__), filename), "w") as f:
f.write(file_content)
if not SUPPORT_FP8 and kernel_selector_str != FILE_HEAD_COMMENT:
kernel_selector_str += (
"else if (a_type == vllm::kFE4M3fn)\n"
" TORCH_CHECK(false, "
'"marlin kernel with fp8 activation is not built.");'
)
with open(os.path.join(os.path.dirname(__file__), "kernel_selector.h"), "w") as f:
f.write(kernel_selector_str)
if __name__ == "__main__":
remove_old_kernels()
generate_new_kernels()
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/csrc/quantization/machete/generate.py | csrc/quantization/machete/generate.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import itertools
import math
import os
import shutil
from collections.abc import Iterable
from copy import deepcopy
from dataclasses import dataclass, fields
from functools import reduce
import jinja2
from vllm_cutlass_library_extension import (
DataType,
EpilogueScheduleTag,
EpilogueScheduleType,
MixedInputKernelScheduleType,
TileSchedulerTag,
TileSchedulerType,
VLLMDataType,
VLLMDataTypeNames,
VLLMDataTypeSize,
VLLMDataTypeTag,
VLLMDataTypeTorchDataTypeTag,
VLLMDataTypeVLLMScalarTypeTag,
VLLMKernelScheduleTag,
)
#
# Generator templating
#
DISPATCH_TEMPLATE = """
#include "../machete_mm_launcher.cuh"
namespace machete {
{% for impl_config in impl_configs %}
{% set type_sig = gen_type_sig(impl_config.types) -%}
{% for s in impl_config.schedules %}
extern torch::Tensor impl_{{type_sig}}_sch_{{gen_sch_sig(s)}}(MMArgs);
{%- endfor %}
torch::Tensor mm_dispatch_{{type_sig}}(MMArgs args) {
[[maybe_unused]] auto M = args.A.size(0);
[[maybe_unused]] auto N = args.B.size(1);
[[maybe_unused]] auto K = args.A.size(1);
if (!args.maybe_schedule) {
{%- for cond, s in impl_config.heuristic %}
{%if cond is not none%}if ({{cond}})
{%- else %}else
{%- endif %}
return impl_{{type_sig}}_sch_{{ gen_sch_sig(s) }}(args);{% endfor %}
}
{%- for s in impl_config.schedules %}
if (*args.maybe_schedule == "{{ gen_sch_sig(s) }}")
return impl_{{type_sig}}_sch_{{ gen_sch_sig(s) }}(args);
{%- endfor %}
TORCH_CHECK_NOT_IMPLEMENTED(false, "machete_gemm(..) is not implemented for "
"schedule = ", *args.maybe_schedule);
}
{%- endfor %}
static inline std::optional<at::ScalarType> maybe_scalartype(
std::optional<at::Tensor> const& t) {
if (!t) {
return std::nullopt;
} else {
return t->scalar_type();
};
}
torch::Tensor mm_dispatch(MMArgs args) {
auto out_type = args.maybe_out_type.value_or(args.A.scalar_type());
auto a_type = args.A.scalar_type();
auto maybe_g_scales_type = maybe_scalartype(args.maybe_group_scales);
auto maybe_g_zeros_type = maybe_scalartype(args.maybe_group_zeros);
auto maybe_ch_scales_type = maybe_scalartype(args.maybe_channel_scales);
auto maybe_tok_scales_type = maybe_scalartype(args.maybe_token_scales);
{% for impl_config in impl_configs %}
{% set t = impl_config.types -%}
{% set type_sig = gen_type_sig(t) -%}
if (args.b_type == {{VLLMScalarTypeTag[t.b]}}
&& a_type == {{TorchTypeTag[t.a]}}
&& out_type == {{TorchTypeTag[t.out]}}
&& {%if t.b_group_scale != void -%}
maybe_g_scales_type == {{TorchTypeTag[t.b_group_scale]}}
{%- else %}!maybe_g_scales_type{%endif%}
&& {%if t.b_group_zeropoint != void -%}
maybe_g_zeros_type == {{TorchTypeTag[t.b_group_zeropoint]}}
{%- else %}!maybe_g_zeros_type{%endif%}
&& {%if t.b_channel_scale != void -%}
maybe_ch_scales_type == {{TorchTypeTag[t.b_channel_scale]}}
{%- else %}!maybe_ch_scales_type{%endif%}
&& {%if t.a_token_scale != void -%}
maybe_tok_scales_type == {{TorchTypeTag[t.a_token_scale]}}
{%- else %}!maybe_tok_scales_type{%endif%}
) {
return mm_dispatch_{{type_sig}}(args);
}
{%- endfor %}
TORCH_CHECK_NOT_IMPLEMENTED(
false, "machete_mm(..) is not implemented for "
"a_type=", args.A.scalar_type(),
", b_type=", args.b_type.str(),
", out_type=", out_type,
", with_group_scale_type=", maybe_g_scales_type
? toString(*maybe_g_scales_type) : "None",
", with_group_zeropoint_type=", maybe_g_zeros_type
? toString(*maybe_g_zeros_type) : "None",
", with_channel_scale_type=", maybe_ch_scales_type
? toString(*maybe_ch_scales_type) : "None",
", with_token_scale_type=", maybe_tok_scales_type
? toString(*maybe_tok_scales_type) : "None",
"; implemented types are: \\n",
{%- for impl_config in impl_configs %}
{% set t = impl_config.types -%}
"\\t{{gen_type_option_name(t)}}\\n",
{%- endfor %}
"");
}
std::vector<std::string> supported_schedules_dispatch(
SupportedSchedulesArgs args) {
auto out_type = args.maybe_out_type.value_or(args.a_type);
{% for impl_config in impl_configs %}
{% set t = impl_config.types -%}
{% set schs = impl_config.schedules -%}
if (args.b_type == {{VLLMScalarTypeTag[t.b]}}
&& args.a_type == {{TorchTypeTag[t.a]}}
&& out_type == {{TorchTypeTag[t.out]}}
&& {%if t.b_group_scale != void -%}
args.maybe_group_scales_type == {{TorchTypeTag[t.b_group_scale]}}
{%- else %}!args.maybe_group_scales_type{%endif%}
&& {%if t.b_group_zeropoint != void-%}
args.maybe_group_zeros_type == {{TorchTypeTag[t.b_group_zeropoint]}}
{%- else %}!args.maybe_group_zeros_type{%endif%}
) {
return {
{%- for s in impl_config.schedules %}
"{{gen_sch_sig(s)}}"{% if not loop.last %},{% endif %}
{%- endfor %}
};
}
{%- endfor %}
return {};
};
}; // namespace machete
"""
IMPL_TEMPLATE = """
#include "../machete_mm_launcher.cuh"
namespace machete {
{% for sch in unique_schedules(impl_configs) %}
{% set sch_sig = gen_sch_sig(sch) -%}
struct sch_{{sch_sig}} {
using TileShapeNM = Shape<{{
to_cute_constant(sch.tile_shape_mn)|join(', ')}}>;
using ClusterShape = Shape<{{
to_cute_constant(sch.cluster_shape_mnk)|join(', ')}}>;
// TODO: Reimplement
// using KernelSchedule = {{KernelScheduleTag[sch.kernel_schedule]}};
using EpilogueSchedule = {{EpilogueScheduleTag[sch.epilogue_schedule]}};
using TileScheduler = {{TileSchedulerTag[sch.tile_scheduler]}};
using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
};
{% endfor %}
{% for impl_config in impl_configs %}
{% set t = impl_config.types -%}
{% set schs = impl_config.schedules -%}
{% set type_sig = gen_type_sig(t) -%}
template<typename Sch>
using Kernel_{{type_sig}} = MacheteKernelTemplate<
{{DataTypeTag[t.a]}}, // ElementA
{{DataTypeTag[t.b]}}, // ElementB
{{DataTypeTag[t.out]}}, // ElementD
{{DataTypeTag[t.accumulator]}}, // Accumulator
{{DataTypeTag[t.b_group_scale]}}, // GroupScaleT
{{DataTypeTag[t.b_group_zeropoint]}}, // GroupZeroT
{{DataTypeTag[t.b_channel_scale]}}, // ChannelScaleT
{{DataTypeTag[t.a_token_scale]}}, // TokenScaleT
cutlass::gemm::KernelTmaWarpSpecializedCooperative,
Sch>;
{% for sch in schs %}
{% set sch_sig = gen_sch_sig(sch) -%}
torch::Tensor
impl_{{type_sig}}_sch_{{sch_sig}}(MMArgs args) {
return run_impl<Kernel_{{type_sig}}<sch_{{sch_sig}}>>(args);
}
{%- endfor %}
{%- endfor %}
}; // namespace machete
"""
PREPACK_TEMPLATE = """
#include "../machete_prepack_launcher.cuh"
namespace machete {
torch::Tensor prepack_B_dispatch(PrepackBArgs args) {
auto convert_type = args.maybe_group_scales_type.value_or(args.a_type);
{%- for t in types %}
{% set b_type = unsigned_type_with_bitwidth(t.b_num_bits) %}
if (args.a_type == {{TorchTypeTag[t.a]}}
&& args.b_type.size_bits() == {{t.b_num_bits}}
&& convert_type == {{TorchTypeTag[t.convert]}}) {
return prepack_impl<
PrepackedLayoutBTemplate<
{{DataTypeTag[t.a]}}, // ElementA
{{DataTypeTag[b_type]}}, // ElementB
{{DataTypeTag[t.convert]}}, // ElementConvert
{{DataTypeTag[t.accumulator]}}, // Accumulator
cutlass::layout::ColumnMajor,
cutlass::gemm::KernelTmaWarpSpecializedCooperative>
>(args.B);
}
{%- endfor %}
TORCH_CHECK_NOT_IMPLEMENTED(false,
"prepack_B_dispatch(..) is not implemented for "
"atype = ", args.a_type,
", b_type = ", args.b_type.str(),
", with_group_scales_type= ", args.maybe_group_scales_type ?
toString(*args.maybe_group_scales_type) : "None");
}
}; // namespace machete
"""
TmaMI = MixedInputKernelScheduleType.TmaWarpSpecializedCooperative
TmaCoop = EpilogueScheduleType.TmaWarpSpecializedCooperative
@dataclass(frozen=True)
class ScheduleConfig:
tile_shape_mn: tuple[int, int]
cluster_shape_mnk: tuple[int, int, int]
kernel_schedule: MixedInputKernelScheduleType
epilogue_schedule: EpilogueScheduleType
tile_scheduler: TileSchedulerType
@dataclass(frozen=True)
class TypeConfig:
a: DataType
b: DataType | VLLMDataType
b_group_scale: DataType
b_group_zeropoint: DataType
b_channel_scale: DataType
a_token_scale: DataType
out: DataType
accumulator: DataType
@dataclass(frozen=True)
class PrepackTypeConfig:
a: DataType
b_num_bits: int
convert: DataType
accumulator: DataType
@dataclass
class ImplConfig:
types: TypeConfig
schedules: list[ScheduleConfig]
heuristic: list[tuple[str | None, ScheduleConfig]]
def generate_sch_sig(schedule_config: ScheduleConfig) -> str:
tile_shape = (
f"{schedule_config.tile_shape_mn[0]}x{schedule_config.tile_shape_mn[1]}"
)
cluster_shape = (
f"{schedule_config.cluster_shape_mnk[0]}"
+ f"x{schedule_config.cluster_shape_mnk[1]}"
+ f"x{schedule_config.cluster_shape_mnk[2]}"
)
kernel_schedule = VLLMKernelScheduleTag[schedule_config.kernel_schedule].split(
"::"
)[-1]
epilogue_schedule = EpilogueScheduleTag[schedule_config.epilogue_schedule].split(
"::"
)[-1]
tile_scheduler = TileSchedulerTag[schedule_config.tile_scheduler].split("::")[-1]
return (
f"{tile_shape}_{cluster_shape}_{kernel_schedule}"
+ f"_{epilogue_schedule}_{tile_scheduler}"
)
# mostly unique shorter sch_sig
def generate_terse_sch_sig(schedule_config: ScheduleConfig) -> str:
kernel_terse_names_replace = {
"KernelTmaWarpSpecializedCooperative": "TmaMI_",
"TmaWarpSpecializedCooperative_": "TmaCoop_",
"StreamKScheduler": "streamK",
}
sch_sig = generate_sch_sig(schedule_config)
for orig, terse in kernel_terse_names_replace.items():
sch_sig = sch_sig.replace(orig, terse)
return sch_sig
# unique type_name
def generate_type_signature(kernel_types: TypeConfig):
return str(
"".join(
[
VLLMDataTypeNames[getattr(kernel_types, field.name)]
for field in fields(TypeConfig)
]
)
)
def generate_type_option_name(kernel_types: TypeConfig):
return ", ".join(
[
f"{field.name.replace('b_', 'with_') + '_type'}="
+ VLLMDataTypeNames[getattr(kernel_types, field.name)]
for field in fields(TypeConfig)
]
)
def is_power_of_two(n):
return (n != 0) and (n & (n - 1) == 0)
def to_cute_constant(value: list[int]):
def _to_cute_constant(value: int):
if is_power_of_two(value):
return f"_{value}"
else:
return f"Int<{value}>"
if isinstance(value, Iterable):
return [_to_cute_constant(value) for value in value]
else:
return _to_cute_constant(value)
def unique_schedules(impl_configs: list[ImplConfig]):
# Use dict over set for deterministic ordering
return list(
{
sch: None for impl_config in impl_configs for sch in impl_config.schedules
}.keys()
)
def unsigned_type_with_bitwidth(num_bits):
return {
4: DataType.u4,
8: DataType.u8,
16: DataType.u16,
32: DataType.u32,
64: DataType.u64,
}[num_bits]
template_globals = {
"void": DataType.void,
"DataTypeTag": VLLMDataTypeTag,
"VLLMScalarTypeTag": VLLMDataTypeVLLMScalarTypeTag,
"TorchTypeTag": VLLMDataTypeTorchDataTypeTag,
"KernelScheduleTag": VLLMKernelScheduleTag,
"EpilogueScheduleTag": EpilogueScheduleTag,
"TileSchedulerTag": TileSchedulerTag,
"to_cute_constant": to_cute_constant,
"gen_sch_sig": generate_terse_sch_sig,
"gen_type_sig": generate_type_signature,
"unique_schedules": unique_schedules,
"unsigned_type_with_bitwidth": unsigned_type_with_bitwidth,
"gen_type_option_name": generate_type_option_name,
}
def create_template(template_str):
template = jinja2.Template(template_str)
template.globals.update(template_globals)
return template
mm_dispatch_template = create_template(DISPATCH_TEMPLATE)
mm_impl_template = create_template(IMPL_TEMPLATE)
prepack_dispatch_template = create_template(PREPACK_TEMPLATE)
def create_sources(impl_configs: list[ImplConfig], num_impl_files=8):
sources = []
sources.append(
(
"machete_mm_dispatch",
mm_dispatch_template.render(impl_configs=impl_configs),
)
)
prepack_types = []
for impl_config in impl_configs:
convert_type = (
impl_config.types.a
if impl_config.types.b_group_scale == DataType.void
else impl_config.types.b_group_scale
)
prepack_types.append(
PrepackTypeConfig(
a=impl_config.types.a,
b_num_bits=VLLMDataTypeSize[impl_config.types.b],
convert=convert_type,
accumulator=impl_config.types.accumulator,
)
)
def prepacked_type_key(prepack_type: PrepackTypeConfig):
# For now, we can just use the first accumulator type seen since
# the tensor core shapes/layouts don't vary based on accumulator
# type so we can generate less code this way
return (prepack_type.a, prepack_type.b_num_bits, prepack_type.convert)
unique_prepack_types = []
prepack_types_seen = set()
for prepack_type in prepack_types:
key = prepacked_type_key(prepack_type)
if key not in prepack_types_seen:
unique_prepack_types.append(prepack_type)
prepack_types_seen.add(key)
sources.append(
(
"machete_prepack",
prepack_dispatch_template.render(
types=unique_prepack_types,
),
)
)
# Split up impls across files
num_impls = reduce(lambda x, y: x + len(y.schedules), impl_configs, 0)
num_impls_per_file = math.ceil(num_impls / num_impl_files)
files_impls: list[list[ImplConfig]] = [[]]
curr_num_impls_assigned = 0
curr_impl_in_file = 0
curr_impl_configs = deepcopy(list(reversed(impl_configs)))
while curr_num_impls_assigned < num_impls:
room_left_in_file = num_impls_per_file - curr_impl_in_file
if room_left_in_file == 0:
files_impls.append([])
room_left_in_file = num_impls_per_file
curr_impl_in_file = 0
curr_ic = curr_impl_configs[-1]
if len(curr_ic.schedules) >= room_left_in_file:
# Break apart the current impl config
tmp_ic = deepcopy(curr_ic)
tmp_ic.schedules = curr_ic.schedules[:room_left_in_file]
curr_ic.schedules = curr_ic.schedules[room_left_in_file:]
files_impls[-1].append(tmp_ic)
else:
files_impls[-1].append(curr_ic)
curr_impl_configs.pop()
curr_num_impls_assigned += len(files_impls[-1][-1].schedules)
curr_impl_in_file += len(files_impls[-1][-1].schedules)
for part, file_impls in enumerate(files_impls):
sources.append(
(
f"machete_mm_impl_part{part + 1}",
mm_impl_template.render(impl_configs=file_impls),
)
)
return sources
def generate():
# See csrc/quantization/machete/Readme.md, the Codegeneration for more info
# about how this works
SCRIPT_DIR = os.path.dirname(__file__)
sch_common_params = dict(
kernel_schedule=TmaMI,
epilogue_schedule=TmaCoop,
tile_scheduler=TileSchedulerType.StreamK,
)
# Stored as "condition": ((tile_shape_mn), (cluster_shape_mnk))
default_tile_heuristic_config = {
#### M = 257+
"M > 256 && K <= 16384 && N <= 4096": ((128, 128), (2, 1, 1)),
"M > 256": ((128, 256), (2, 1, 1)),
#### M = 129-256
"M > 128 && K <= 4096 && N <= 4096": ((128, 64), (2, 1, 1)),
"M > 128 && K <= 8192 && N <= 8192": ((128, 128), (2, 1, 1)),
"M > 128": ((128, 256), (2, 1, 1)),
#### M = 65-128
"M > 64 && K <= 4069 && N <= 4069": ((128, 32), (2, 1, 1)),
"M > 64 && K <= 4069 && N <= 8192": ((128, 64), (2, 1, 1)),
"M > 64 && K >= 8192 && N >= 12288": ((256, 128), (2, 1, 1)),
"M > 64": ((128, 128), (2, 1, 1)),
#### M = 33-64
"M > 32 && K <= 6144 && N <= 6144": ((128, 16), (1, 1, 1)),
"M > 32 && K >= 16384 && N >= 12288": ((256, 64), (2, 1, 1)),
"M > 32": ((128, 64), (2, 1, 1)),
#### M = 17-32
"M > 16 && K <= 12288 && N <= 8192": ((128, 32), (2, 1, 1)),
"M > 16": ((256, 32), (2, 1, 1)),
#### M = 1-16
"N >= 26624": ((256, 16), (1, 1, 1)),
None: ((128, 16), (1, 1, 1)),
}
# For now we use the same heuristic for all types
# Heuristic is currently tuned for H100s
default_heuristic = [
(cond, ScheduleConfig(*tile_config, **sch_common_params)) # type: ignore
for cond, tile_config in default_tile_heuristic_config.items()
]
def get_unique_schedules(heuristic: dict[str, ScheduleConfig]):
# Do not use schedules = list(set(...)) because we need to make sure
# the output list is deterministic; otherwise the generated kernel file
# will be non-deterministic and causes ccache miss.
schedules = []
for _, schedule_config in heuristic:
if schedule_config not in schedules:
schedules.append(schedule_config)
return schedules
impl_configs = []
GPTQ_kernel_type_configs = list(
TypeConfig(
a=a,
b=b,
b_group_scale=a,
b_group_zeropoint=DataType.void,
b_channel_scale=DataType.void,
a_token_scale=DataType.void,
out=a,
accumulator=DataType.f32,
)
for b in (VLLMDataType.u4b8, VLLMDataType.u8b128)
for a in (DataType.f16, DataType.bf16)
)
impl_configs += [
ImplConfig(x[0], x[1], x[2])
for x in zip(
GPTQ_kernel_type_configs,
itertools.repeat(get_unique_schedules(default_heuristic)),
itertools.repeat(default_heuristic),
)
]
AWQ_kernel_type_configs = list(
TypeConfig(
a=a,
b=b,
b_group_scale=a,
b_group_zeropoint=a,
b_channel_scale=DataType.void,
a_token_scale=DataType.void,
out=a,
accumulator=DataType.f32,
)
for b in (DataType.u4, DataType.u8)
for a in (DataType.f16, DataType.bf16)
)
impl_configs += [
ImplConfig(x[0], x[1], x[2])
for x in zip(
AWQ_kernel_type_configs,
itertools.repeat(get_unique_schedules(default_heuristic)),
itertools.repeat(default_heuristic),
)
]
# TODO: Support W4A8 when ready
# # Stored as "condition": ((tile_shape_mn), (cluster_shape_mnk))
# # TODO (LucasWilkinson): Further tuning required
# qqq_tile_heuristic_config = {
# #### M = 257+
# # ((128, 256), (2, 1, 1)) Broken for QQQ types
# # TODO (LucasWilkinson): Investigate further
# # "M > 256 && K <= 16384 && N <= 4096": ((128, 128), (2, 1, 1)),
# # "M > 256": ((128, 256), (2, 1, 1)),
# "M > 256": ((128, 128), (2, 1, 1)),
# #### M = 129-256
# "M > 128 && K <= 4096 && N <= 4096": ((128, 64), (2, 1, 1)),
# "M > 128 && K <= 8192 && N <= 8192": ((128, 128), (2, 1, 1)),
# # ((128, 256), (2, 1, 1)) Broken for QQQ types
# # TODO (LucasWilkinson): Investigate further
# # "M > 128": ((128, 256), (2, 1, 1)),
# "M > 128": ((128, 128), (2, 1, 1)),
# #### M = 65-128
# "M > 64 && K <= 4069 && N <= 4069": ((128, 32), (2, 1, 1)),
# "M > 64 && K <= 4069 && N <= 8192": ((128, 64), (2, 1, 1)),
# "M > 64 && K >= 8192 && N >= 12288": ((256, 128), (2, 1, 1)),
# "M > 64": ((128, 128), (2, 1, 1)),
# #### M = 33-64
# "M > 32 && K <= 6144 && N <= 6144": ((128, 16), (1, 1, 1)),
# # Broken for QQQ types
# # TODO (LucasWilkinson): Investigate further
# #"M > 32 && K >= 16384 && N >= 12288": ((256, 64), (2, 1, 1)),
# "M > 32": ((128, 64), (2, 1, 1)),
# #### M = 17-32
# "M > 16 && K <= 12288 && N <= 8192": ((128, 32), (2, 1, 1)),
# "M > 16": ((256, 32), (2, 1, 1)),
# #### M = 1-16
# "N >= 26624": ((256, 16), (1, 1, 1)),
# None: ((128, 16), (1, 1, 1)),
# }
# # For now we use the same heuristic for all types
# # Heuristic is currently tuned for H100s
# qqq_heuristic = [
# (cond, ScheduleConfig(*tile_config,
# **sch_common_params)) # type: ignore
# for cond, tile_config in qqq_tile_heuristic_config.items()
# ]
# QQQ_kernel_types = [
# *(TypeConfig(
# a=DataType.s8,
# b=VLLMDataType.u4b8,
# b_group_scale=b_group_scale,
# b_group_zeropoint=DataType.void,
# b_channel_scale=DataType.f32,
# a_token_scale=DataType.f32,
# out=DataType.f16,
# accumulator=DataType.s32,
# ) for b_group_scale in (DataType.f16, DataType.void)),
# *(TypeConfig(
# a=DataType.e4m3,
# b=VLLMDataType.u4b8,
# b_group_scale=b_group_scale,
# b_group_zeropoint=DataType.void,
# b_channel_scale=DataType.f32,
# a_token_scale=DataType.f32,
# out=DataType.f16,
# accumulator=DataType.f32,
# ) for b_group_scale in (DataType.f16, DataType.void)),
# ]
# impl_configs += [
# ImplConfig(x[0], x[1], x[2])
# for x in zip(QQQ_kernel_types,
# itertools.repeat(get_unique_schedules(qqq_heuristic)),
# itertools.repeat(qqq_heuristic))
# ]
output_dir = os.path.join(SCRIPT_DIR, "generated")
# Delete the "generated" directory if it exists
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
# Create the "generated" directory
os.makedirs(output_dir)
# Render each group of configurations into separate files
for filename, code in create_sources(impl_configs):
filepath = os.path.join(output_dir, f"{filename}.cu")
with open(filepath, "w") as output_file:
output_file.write(code)
print(f"Rendered template to {filepath}")
if __name__ == "__main__":
generate()
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
vllm-project/vllm | https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/csrc/moe/marlin_moe_wna16/generate_kernels.py | csrc/moe/marlin_moe_wna16/generate_kernels.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import glob
import itertools
import os
import subprocess
import sys
import jinja2
ARCHS = []
SUPPORT_FP8 = False
SUPPORT_SM75 = False
SUPPORT_SM80 = False
for arch in sys.argv[1].split(","):
arch = arch[: arch.index(".") + 2].replace(".", "")
arch = int(arch)
# only SM89 and SM120 fully support
# mma.sync.aligned.m16n8k32.row.col.f32.e4m3.e4m3.f32.
# SM90 and SM100 can use this PTX, but it’s simulated
# with FP16 MMA, so it cannot achieve any acceleration.
if arch in [89, 120]:
SUPPORT_FP8 = True
if arch >= 80:
SUPPORT_SM80 = True
if arch == 75:
SUPPORT_SM75 = True
FILE_HEAD_COMMENT = """
// auto generated by generate_kernels.py
// clang-format off
""".lstrip()
FILE_HEAD = (
FILE_HEAD_COMMENT
+ """
#include "kernel.h"
#include "marlin_template.h"
namespace MARLIN_NAMESPACE_NAME {
"""
)
TEMPLATE = (
"template __global__ void Marlin<"
"{{a_type_id}}, "
"{{b_type_id}}, "
"{{c_type_id}}, "
"{{s_type_id}}, "
"{{threads}}, "
"{{thread_m_blocks}}, "
"{{thread_n_blocks}}, "
"{{thread_k_blocks}}, "
"{{m_block_size_8}}, "
"{{stages}}, "
"{{group_blocks}}, "
"{{is_zp_float}}>"
"( MARLIN_KERNEL_PARAMS );"
)
THREAD_CONFIGS = [(128, 128, 256), (64, 256, 256), (64, 128, 128)]
THREAD_M_BLOCKS = [0.5, 1, 2, 3, 4]
QUANT_CONFIGS = [
# AWQ-INT4
{
"b_type": "kU4",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": THREAD_M_BLOCKS,
"group_blocks": [-1, 2, 4, 8],
},
# GPTQ-INT4
{
"b_type": "kU4B8",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": THREAD_M_BLOCKS,
"group_blocks": [-1, 0, 2, 4, 8],
},
# AWQ-INT8
{
"b_type": "kU8B128",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": THREAD_M_BLOCKS,
"group_blocks": [-1, 0, 2, 4, 8],
},
# FP8
{
"b_type": "kFE4M3fn",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": THREAD_M_BLOCKS,
"group_blocks": [-1, 8],
},
# NVFP4
{
"b_type": "kFE2M1f",
"s_type": "kFE4M3fn",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": THREAD_M_BLOCKS,
"group_blocks": [1],
},
# MXFP4
{
"a_type": ["kBFloat16"],
"b_type": "kFE2M1f",
"s_type": "kFE8M0fnu",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": THREAD_M_BLOCKS,
"group_blocks": [2],
},
# AWQ-INT4 with INT8 activation
{
"a_type": ["kS8"],
"b_type": "kU4",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": [1, 2, 3, 4],
"group_blocks": [-1, 2, 4, 8],
},
# GPTQ-INT4 with INT8 activation
{
"a_type": ["kS8"],
"b_type": "kU4B8",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": [1, 2, 3, 4],
"group_blocks": [-1, 2, 4, 8],
},
# GPTQ-INT4 with FP8 activation
{
"a_type": ["kFE4M3fn"],
"b_type": "kU4B8",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": [1, 2, 3, 4],
"group_blocks": [-1, 2, 4, 8],
},
# AWQ-INT4 with FP8 activation
{
"a_type": ["kFE4M3fn"],
"b_type": "kU4",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": [1, 2, 3, 4],
"group_blocks": [-1, 2, 4, 8],
},
# MXFP4 with FP8 activation
{
"a_type": ["kFE4M3fn"],
"b_type": "kFE2M1f",
"c_type": ["kBFloat16"],
"s_type": "kFE8M0fnu",
"thread_configs": THREAD_CONFIGS,
"thread_m_blocks": [1, 2, 3, 4],
"group_blocks": [2],
},
]
def remove_old_kernels():
for filename in glob.glob(os.path.dirname(__file__) + "/*kernel_*.cu"):
subprocess.call(["rm", "-f", filename])
filename = os.path.dirname(__file__) + "/kernel_selector.h"
subprocess.call(["rm", "-f", filename])
def generate_new_kernels():
result_dict = {}
sm_75_result_dict = {}
for quant_config in QUANT_CONFIGS:
c_types = quant_config.get("c_type", ["kFloat16", "kBFloat16"])
a_types = quant_config.get("a_type", ["kFloat16", "kBFloat16"])
b_type = quant_config["b_type"]
all_group_blocks = quant_config["group_blocks"]
all_m_blocks = quant_config["thread_m_blocks"]
all_thread_configs = quant_config["thread_configs"]
for a_type, c_type in itertools.product(a_types, c_types):
if not SUPPORT_FP8 and a_type == "kFE4M3fn":
continue
if "16" in a_type and "16" in c_type and a_type != c_type:
continue
s_type = quant_config.get("s_type", c_type)
if (a_type, b_type, c_type) not in result_dict:
result_dict[(a_type, b_type, c_type)] = []
if a_type in ["kFloat16", "kS8"] and c_type == "kFloat16":
sm_75_result_dict[(a_type, b_type, c_type)] = []
for group_blocks, m_blocks, thread_configs in itertools.product(
all_group_blocks, all_m_blocks, all_thread_configs
):
thread_k, thread_n, threads = thread_configs
if threads == 256:
# for small batch (m_blocks == 1),
# we only need (128, 128, 256)
# for large batch (m_blocks > 1),
# we only need (64, 256, 256)
if m_blocks <= 1 and (thread_k, thread_n) != (128, 128):
continue
if m_blocks > 1 and (thread_k, thread_n) != (64, 256):
continue
config = {
"threads": threads,
"s_type": s_type,
"thread_m_blocks": max(m_blocks, 1),
"thread_k_blocks": thread_k // 16,
"thread_n_blocks": thread_n // 16,
"m_block_size_8": "true" if m_blocks == 0.5 else "false",
"stages": 4,
"group_blocks": group_blocks,
"is_zp_float": "false",
}
if SUPPORT_SM80:
result_dict[(a_type, b_type, c_type)].append(config)
if (a_type, b_type, c_type) in sm_75_result_dict and SUPPORT_SM75:
config_sm75 = config.copy()
config_sm75["stages"] = 2
sm_75_result_dict[(a_type, b_type, c_type)].append(config_sm75)
kernel_selector_str = FILE_HEAD_COMMENT
for result_dict_tmp in [result_dict, sm_75_result_dict]:
for (a_type, b_type, c_type), config_list in result_dict_tmp.items():
all_template_str_list = []
if not config_list:
continue
for config in config_list:
s_type = config["s_type"]
template_str = jinja2.Template(TEMPLATE).render(
a_type_id=f"vllm::{a_type}.id()",
b_type_id=f"vllm::{b_type}.id()",
c_type_id=f"vllm::{c_type}.id()",
s_type_id=f"vllm::{s_type}.id()",
**config,
)
all_template_str_list.append(template_str)
conditions = [
f"a_type == vllm::{a_type}",
f"b_type == vllm::{b_type}",
f"c_type == vllm::{c_type}",
f"s_type == vllm::{s_type}",
f"threads == {config['threads']}",
f"thread_m_blocks == {config['thread_m_blocks']}",
f"thread_n_blocks == {config['thread_n_blocks']}",
f"thread_k_blocks == {config['thread_k_blocks']}",
f"m_block_size_8 == {config['m_block_size_8']}",
f"stages == {config['stages']}",
f"group_blocks == {config['group_blocks']}",
f"is_zp_float == {config['is_zp_float']}",
]
conditions = " && ".join(conditions)
if kernel_selector_str == FILE_HEAD_COMMENT:
kernel_selector_str += f"if ({conditions})\n kernel = "
else:
kernel_selector_str += f"else if ({conditions})\n kernel = "
kernel_template2 = (
"Marlin<{{a_type_id}}, {{b_type_id}}, {{c_type_id}}, "
"{{s_type_id}}, {{threads}}, {{thread_m_blocks}}, "
"{{thread_n_blocks}}, {{thread_k_blocks}}, "
"{{m_block_size_8}}, {{stages}}, {{group_blocks}}, "
"{{is_zp_float}}>;"
)
kernel_selector_str += (
jinja2.Template(kernel_template2).render(
a_type_id=f"vllm::{a_type}.id()",
b_type_id=f"vllm::{b_type}.id()",
c_type_id=f"vllm::{c_type}.id()",
s_type_id=f"vllm::{s_type}.id()",
**config,
)
+ "\n"
)
file_content = FILE_HEAD + "\n\n"
file_content += "\n\n".join(all_template_str_list) + "\n\n}\n"
if a_type == "kFE4M3fn":
filename = f"sm89_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu"
elif result_dict_tmp is sm_75_result_dict:
filename = f"sm75_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu"
else:
filename = f"sm80_kernel_{a_type[1:]}_{b_type[1:]}_{c_type[1:]}.cu"
filename = filename.lower()
with open(os.path.join(os.path.dirname(__file__), filename), "w") as f:
f.write(file_content)
if not SUPPORT_FP8 and kernel_selector_str != FILE_HEAD_COMMENT:
kernel_selector_str += (
"else if (a_type == vllm::kFE4M3fn)\n"
" TORCH_CHECK(false, "
'"marlin kernel with fp8 activation is not built.");'
)
with open(os.path.join(os.path.dirname(__file__), "kernel_selector.h"), "w") as f:
f.write(kernel_selector_str)
if __name__ == "__main__":
remove_old_kernels()
generate_new_kernels()
| python | Apache-2.0 | 0d4044edd85de30d7d4558aeea4d1e95c7c556d6 | 2026-01-04T14:38:19.902011Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/setup.py | setup.py | import setuptools
with open("readme.md", "r", encoding="utf-8") as f:
long_description = f.read()
setuptools.setup(
name='labml_nn',
version='0.5.1',
author="Varuna Jayasiri, Nipun Wijerathne",
author_email="vpjayasiri@gmail.com, hnipun@gmail.com",
description="🧑🏫 Implementations/tutorials of deep learning papers with side-by-side notes 📝; including transformers (original, xl, switch, feedback, vit), optimizers (adam, radam, adabelief), gans(dcgan, cyclegan, stylegan2), 🎮 reinforcement learning (ppo, dqn), capsnet, distillation, diffusion, etc. 🧠",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/labmlai/annotated_deep_learning_paper_implementations",
project_urls={
'Documentation': 'https://nn.labml.ai'
},
packages=setuptools.find_packages(exclude=('labml', 'labml.*',
'labml_samples', 'labml_samples.*',
'labml_helpers', 'labml_helpers.*',
'test',
'test.*')),
install_requires=['labml',
'torch',
'torchtext',
'torchvision',
'einops',
'numpy',
'fairscale'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='machine learning',
)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/utils/papers_list.py | utils/papers_list.py | import json
import re
from pathlib import Path
from labml import logger
from labml.logger import Text
HOME = Path('./labml_nn').absolute()
print(HOME)
REGEX = re.compile(r"""
\(
https://arxiv\.org/abs/ # Start of a numeric entity reference
(?P<id>[0-9\.]+) # Paper ID
\)
""", re.VERBOSE)
IGNORE = {
'neox/model.html',
'transformers/index.html',
'transformers/configs.html',
'optimizers/noam.html',
'transformers/basic/autoregressive_experiment.html',
'transformers/xl/relative_mha.html',
'capsule_networks/mnist.html',
'transformers/rope/value_pe/index.html',
}
IGNORE_PAPERS = {
'2002.04745', # On Layer Normalization in the Transformer Architecture
'1606.08415', # Gaussian Error Linear Units (GELUs)
'1710.10196', # Progressive Growing of GANs for Improved Quality, Stability, and Variation
'1904.11486', # Making Convolutional Networks Shift-Invariant Again
'1801.04406', # Which Training Methods for GANs do actually Converge?
'1812.04948', # A Style-Based Generator Architecture for Generative Adversarial Networks
'1705.10528', # Constrained Policy Optimization
}
def collect(path: Path):
if path.is_file():
html = path.relative_to(HOME)
if html.suffix not in {'.py'}:
return []
if html.stem == '__init__':
html = html.parent / 'index.html'
else:
html = html.parent / f'{html.stem}.html'
if str(html) in IGNORE:
return []
with open(str(path), 'r') as f:
contents = f.read()
papers = set()
for m in REGEX.finditer(contents):
if m.group('id') in IGNORE_PAPERS:
continue
papers.add(m.group('id'))
if len(papers) > 1:
logger.log([(str(html), Text.key), ': ', str(papers)])
return [{'url': str(html), 'arxiv_id': p} for p in papers]
urls = []
for f in path.iterdir():
urls += collect(f)
return urls
def main():
papers = []
for f in HOME.iterdir():
papers += collect(f)
papers.sort(key=lambda p: p['arxiv_id'])
by_id = {}
for p in papers:
if p['arxiv_id'] not in by_id:
by_id[p['arxiv_id']] = []
by_id[p['arxiv_id']].append(f'''https://nn.labml.ai/{p['url']}''')
logger.log([('Papers', Text.key), ': ', f'{len(by_id) :,}'])
with open(str(HOME.parent / 'docs' / 'papers.json'), 'w') as f:
f.write(json.dumps(by_id, indent=1))
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/utils/sitemap.py | utils/sitemap.py | from pathlib import Path
import git
HOME = Path('./labml_nn')
REPO = git.Repo('.')
def collect(path: Path):
if path.is_file():
try:
commit = next(iter(REPO.iter_commits(paths=path)))
except StopIteration:
return []
html = path.relative_to(HOME)
if html.suffix not in {'.py'}:
return []
if html.stem == '__init__':
html = html.parent / 'index.html'
else:
html = html.parent / f'{html.stem}.html'
return [{'path': str(html), 'date': str(commit.committed_datetime.date())}]
urls = []
for f in path.iterdir():
urls += collect(f)
return urls
def main():
urls = []
for f in HOME.iterdir():
urls += collect(f)
urls = [f'''
<url>
<loc>https://nn.labml.ai/{u['path']}</loc>
<lastmod>{u['date']}T16:30:00+00:00</lastmod>
<priority>1.00</priority>
</url>
''' for u in urls]
urls = '\n'.join(urls)
xml = f'''
<?xml version="1.0" encoding="UTF-8"?>
<urlset
xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9
http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">
{urls}
</urlset>
'''
with open(str(HOME.parent / 'docs' / 'sitemap.xml'), 'w') as f:
f.write(xml)
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/utils/diagrams.py | utils/diagrams.py | import shutil
from pathlib import Path
from typing import List
from xml.dom import minidom
import os
from labml import monit
HOME = Path('.').absolute()
STYLES = """
.black-stroke {
stroke: #aaa;
}
rect.black-stroke {
stroke: #444;
}
.black-fill {
fill: #ddd;
}
.white-fill {
fill: #333;
}
.blue-stroke {
stroke: #5b8fab;
}
.blue-fill {
fill: #356782;
}
.yellow-stroke {
stroke: #bbab52;
}
.yellow-fill {
fill: #a7942b;
}
.grey-stroke {
stroke: #484d5a;
}
.grey-fill {
fill: #2e323c;
}
.red-stroke {
stroke: #bb3232;
}
.red-fill {
fill: #901c1c;
}
.orange-stroke {
stroke: #a5753f;
}
.orange-fill {
fill: #82531e;
}
.purple-stroke {
stroke: #a556a5;
}
.purple-fill {
fill: #8a308a;
}
.green-stroke {
stroke: #80cc92;
}
.green-fill {
fill: #499e5d;
}
switch foreignObject div div div {
color: #ddd !important;
}
switch foreignObject div div div span {
color: #ddd !important;
}
.has-background {
background-color: #1d2127 !important;
}
"""
STROKES = {
'#000000': 'black',
'#6c8ebf': 'blue',
'#d6b656': 'yellow',
'#666666': 'grey',
'#b85450': 'red',
'#d79b00': 'orange',
'#9673a6': 'purple',
'#82b366': 'green',
}
FILLS = {
'#000000': 'black',
'#ffffff': 'white',
'#dae8fc': 'blue',
'#fff2cc': 'yellow',
'#f5f5f5': 'grey',
'#f8cecc': 'red',
'#ffe6cc': 'orange',
'#e1d5e7': 'purple',
'#d5e8d4': 'green',
}
def clear_switches(doc: minidom.Document):
switches = doc.getElementsByTagName('switch')
for s in switches:
children = s.childNodes
assert len(children) == 2
if children[0].tagName == 'g' and 'requiredFeatures' in children[0].attributes:
s.parentNode.removeChild(s)
s.unlink()
continue
assert children[0].tagName == 'foreignObject'
assert children[1].tagName == 'text'
c = children[1]
s.removeChild(c)
s.parentNode.insertBefore(c, s)
s.parentNode.removeChild(s)
def add_class(node: minidom.Node, class_name: str):
if 'class' not in node.attributes:
node.attributes['class'] = class_name
return
node.attributes['class'] = node.attributes['class'].value + f' {class_name}'
def add_bg_classes(nodes: List[minidom.Node]):
for node in nodes:
if 'style' in node.attributes:
s = node.attributes['style'].value
if s.count('background-color'):
add_class(node, 'has-background')
def add_stroke_classes(nodes: List[minidom.Node]):
for node in nodes:
if 'stroke' in node.attributes:
stroke = node.attributes['stroke'].value
if stroke not in STROKES:
continue
node.removeAttribute('stroke')
add_class(node, f'{STROKES[stroke]}-stroke')
def add_fill_classes(nodes: List[minidom.Node]):
for node in nodes:
if 'fill' in node.attributes:
fill = node.attributes['fill'].value
if fill not in FILLS:
continue
node.removeAttribute('fill')
add_class(node, f'{FILLS[fill]}-fill')
def add_classes(doc: minidom.Document):
paths = doc.getElementsByTagName('path')
add_stroke_classes(paths)
add_fill_classes(paths)
rects = doc.getElementsByTagName('rect')
add_stroke_classes(rects)
add_fill_classes(rects)
ellipse = doc.getElementsByTagName('ellipse')
add_stroke_classes(ellipse)
add_fill_classes(ellipse)
text = doc.getElementsByTagName('text')
add_fill_classes(text)
div = doc.getElementsByTagName('div')
add_bg_classes(div)
span = doc.getElementsByTagName('span')
add_bg_classes(span)
def parse(source: Path, dest: Path):
doc: minidom.Document = minidom.parse(str(source))
svg = doc.getElementsByTagName('svg')
assert len(svg) == 1
svg = svg[0]
if 'content' in svg.attributes:
svg.removeAttribute('content')
# svg.attributes['height'] = str(int(svg.attributes['height'].value[:-2]) + 30) + 'px'
# svg.attributes['width'] = str(int(svg.attributes['width'].value[:-2]) + 30) + 'px'
view_box = svg.attributes['viewBox'].value.split(' ')
view_box = [float(v) for v in view_box]
view_box[0] -= 10
view_box[1] -= 10
view_box[2] += 20
view_box[3] += 20
svg.attributes['viewBox'] = ' '.join([str(v) for v in view_box])
svg.attributes['style'] = 'background: #1d2127;' # padding: 10px;'
# clear_switches(doc)
style = doc.createElement('style')
style.appendChild(doc.createTextNode(STYLES))
svg.insertBefore(style, svg.childNodes[0])
add_classes(doc)
with open(str(dest), 'w') as f:
doc.writexml(f)
def recurse(path: Path):
files = []
if path.is_file():
files.append(path)
return files
for f in path.iterdir():
files += recurse(f)
return files
def main():
diagrams_path = HOME / 'diagrams'
docs_path = HOME / 'docs'
# For first invocation
os.makedirs(diagrams_path, exist_ok=True)
for p in recurse(diagrams_path):
source_path = p
p = p.relative_to(diagrams_path)
dest_path = docs_path / p
if not dest_path.parent.exists():
dest_path.parent.mkdir(parents=True)
with monit.section(str(p)):
if source_path.suffix == '.svg':
parse(source_path, dest_path)
else:
shutil.copy(str(source_path), str(dest_path))
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/utils/__init__.py | utils/__init__.py | python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false | |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/__init__.py | labml_nn/__init__.py | """
# [Annotated Research Paper Implementations: Transformers, StyleGAN, Stable Diffusion, DDPM/DDIM, LayerNorm, Nucleus Sampling and more](index.html)
This is a collection of simple PyTorch implementations of
neural networks and related algorithms.
[These implementations](https://github.com/labmlai/annotated_deep_learning_paper_implementations) are documented with explanations,
and the [website](index.html)
renders these as side-by-side formatted notes.
We believe these would help you understand these algorithms better.

We are actively maintaining this repo and adding new
implementations.
[](https://twitter.com/labmlai) for updates.
## Translations
### **[English (original)](https://nn.labml.ai)**
### **[Chinese (translated)](https://nn.labml.ai/zh/)**
### **[Japanese (translated)](https://nn.labml.ai/ja/)**
## Paper Implementations
#### ✨ [Transformers](transformers/index.html)
* [JAX implementation](transformers/jax_transformer/index.html)
* [Multi-headed attention](transformers/mha.html)
* [Triton Flash Attention](transformers/flash/index.html)
* [Transformer building blocks](transformers/models.html)
* [Transformer XL](transformers/xl/index.html)
* [Relative multi-headed attention](transformers/xl/relative_mha.html)
* [Rotary Positional Embeddings (RoPE)](transformers/rope/index.html)
* [Attention with Linear Biases (ALiBi)](transformers/alibi/index.html)
* [RETRO](transformers/retro/index.html)
* [Compressive Transformer](transformers/compressive/index.html)
* [GPT Architecture](transformers/gpt/index.html)
* [GLU Variants](transformers/glu_variants/simple.html)
* [kNN-LM: Generalization through Memorization](transformers/knn/index.html)
* [Feedback Transformer](transformers/feedback/index.html)
* [Switch Transformer](transformers/switch/index.html)
* [Fast Weights Transformer](transformers/fast_weights/index.html)
* [FNet](transformers/fnet/index.html)
* [Attention Free Transformer](transformers/aft/index.html)
* [Masked Language Model](transformers/mlm/index.html)
* [MLP-Mixer: An all-MLP Architecture for Vision](transformers/mlp_mixer/index.html)
* [Pay Attention to MLPs (gMLP)](transformers/gmlp/index.html)
* [Vision Transformer (ViT)](transformers/vit/index.html)
* [Primer EZ](transformers/primer_ez/index.html)
* [Hourglass](transformers/hour_glass/index.html)
#### ✨ [Low-Rank Adaptation (LoRA)](lora/index.html)
#### ✨ [Eleuther GPT-NeoX](neox/index.html)
* [Generate on a 48GB GPU](neox/samples/generate.html)
* [Finetune on two 48GB GPUs](neox/samples/finetune.html)
* [LLM.int8()](neox/utils/llm_int8.html)
#### ✨ [Diffusion models](diffusion/index.html)
* [Denoising Diffusion Probabilistic Models (DDPM)](diffusion/ddpm/index.html)
* [Denoising Diffusion Implicit Models (DDIM)](diffusion/stable_diffusion/sampler/ddim.html)
* [Latent Diffusion Models](diffusion/stable_diffusion/latent_diffusion.html)
* [Stable Diffusion](diffusion/stable_diffusion/index.html)
#### ✨ [Generative Adversarial Networks](gan/index.html)
* [Original GAN](gan/original/index.html)
* [GAN with deep convolutional network](gan/dcgan/index.html)
* [Cycle GAN](gan/cycle_gan/index.html)
* [Wasserstein GAN](gan/wasserstein/index.html)
* [Wasserstein GAN with Gradient Penalty](gan/wasserstein/gradient_penalty/index.html)
* [StyleGAN 2](gan/stylegan/index.html)
#### ✨ [Recurrent Highway Networks](recurrent_highway_networks/index.html)
#### ✨ [LSTM](lstm/index.html)
#### ✨ [HyperNetworks - HyperLSTM](hypernetworks/hyper_lstm.html)
#### ✨ [ResNet](resnet/index.html)
#### ✨ [ConvMixer](conv_mixer/index.html)
#### ✨ [Capsule Networks](capsule_networks/index.html)
#### ✨ [U-Net](unet/index.html)
#### ✨ [Sketch RNN](sketch_rnn/index.html)
#### ✨ Graph Neural Networks
* [Graph Attention Networks (GAT)](graphs/gat/index.html)
* [Graph Attention Networks v2 (GATv2)](graphs/gatv2/index.html)
#### ✨ [Reinforcement Learning](rl/index.html)
* [Proximal Policy Optimization](rl/ppo/index.html) with
[Generalized Advantage Estimation](rl/ppo/gae.html)
* [Deep Q Networks](rl/dqn/index.html) with
with [Dueling Network](rl/dqn/model.html),
[Prioritized Replay](rl/dqn/replay_buffer.html)
and Double Q Network.
#### ✨ [Counterfactual Regret Minimization (CFR)](cfr/index.html)
Solving games with incomplete information such as poker with CFR.
* [Kuhn Poker](cfr/kuhn/index.html)
#### ✨ [Optimizers](optimizers/index.html)
* [Adam](optimizers/adam.html)
* [AMSGrad](optimizers/amsgrad.html)
* [Adam Optimizer with warmup](optimizers/adam_warmup.html)
* [Noam Optimizer](optimizers/noam.html)
* [Rectified Adam Optimizer](optimizers/radam.html)
* [AdaBelief Optimizer](optimizers/ada_belief.html)
* [Sophia-G Optimizer](optimizers/sophia.html)
#### ✨ [Normalization Layers](normalization/index.html)
* [Batch Normalization](normalization/batch_norm/index.html)
* [Layer Normalization](normalization/layer_norm/index.html)
* [Instance Normalization](normalization/instance_norm/index.html)
* [Group Normalization](normalization/group_norm/index.html)
* [Weight Standardization](normalization/weight_standardization/index.html)
* [Batch-Channel Normalization](normalization/batch_channel_norm/index.html)
* [DeepNorm](normalization/deep_norm/index.html)
#### ✨ [Distillation](distillation/index.html)
#### ✨ [Adaptive Computation](adaptive_computation/index.html)
* [PonderNet](adaptive_computation/ponder_net/index.html)
#### ✨ [Uncertainty](uncertainty/index.html)
* [Evidential Deep Learning to Quantify Classification Uncertainty](uncertainty/evidence/index.html)
#### ✨ [Activations](activations/index.html)
* [Fuzzy Tiling Activations](activations/fta/index.html)
#### ✨ [Language Model Sampling Techniques](sampling/index.html)
* [Greedy Sampling](sampling/greedy.html)
* [Temperature Sampling](sampling/temperature.html)
* [Top-k Sampling](sampling/top_k.html)
* [Nucleus Sampling](sampling/nucleus.html)
#### ✨ [Scalable Training/Inference](scaling/index.html)
* [Zero3 memory optimizations](scaling/zero3/index.html)
### Installation
```bash
pip install labml-nn
```
"""
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/rl/__init__.py | labml_nn/rl/__init__.py | """
---
title: Reinforcement Learning Algorithms
summary: >
This is a collection of PyTorch implementations/tutorials of reinforcement learning algorithms.
It currently includes Proximal Policy Optimization, Generalized Advantage Estimation, and
Deep Q Networks.
---
# Reinforcement Learning Algorithms
* [Proximal Policy Optimization](ppo)
* [This is an experiment](ppo/experiment.html) that runs a PPO agent on Atari Breakout.
* [Generalized advantage estimation](ppo/gae.html)
* [Deep Q Networks](dqn)
* [This is an experiment](dqn/experiment.html) that runs a DQN agent on Atari Breakout.
* [Model](dqn/model.html) with dueling network
* [Prioritized Experience Replay Buffer](dqn/replay_buffer.html)
[This is the implementation for OpenAI game wrapper](game.html) using `multiprocessing`.
""" | python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/rl/game.py | labml_nn/rl/game.py | """
---
title: Atari wrapper with multi-processing
summary: This implements the Atari games with multi-processing.
---
# Atari wrapper with multi-processing
"""
import multiprocessing
import multiprocessing.connection
import cv2
import gym
import numpy as np
class Game:
"""
<a id="GameEnvironment"></a>
## Game environment
This is a wrapper for OpenAI gym game environment.
We do a few things here:
1. Apply the same action on four frames and get the last frame
2. Convert observation frames to gray and scale it to (84, 84)
3. Stack four frames of the last four actions
4. Add episode information (total reward for the entire episode) for monitoring
5. Restrict an episode to a single life (game has 5 lives, we reset after every single life)
#### Observation format
Observation is tensor of size (4, 84, 84). It is four frames
(images of the game screen) stacked on first axis.
i.e, each channel is a frame.
"""
def __init__(self, seed: int):
# create environment
self.env = gym.make('BreakoutNoFrameskip-v4')
self.env.seed(seed)
# tensor for a stack of 4 frames
self.obs_4 = np.zeros((4, 84, 84))
# buffer to keep the maximum of last 2 frames
self.obs_2_max = np.zeros((2, 84, 84))
# keep track of the episode rewards
self.rewards = []
# and number of lives left
self.lives = 0
def step(self, action):
"""
### Step
Executes `action` for 4 time steps and
returns a tuple of (observation, reward, done, episode_info).
* `observation`: stacked 4 frames (this frame and frames for last 3 actions)
* `reward`: total reward while the action was executed
* `done`: whether the episode finished (a life lost)
* `episode_info`: episode information if completed
"""
reward = 0.
done = None
# run for 4 steps
for i in range(4):
# execute the action in the OpenAI Gym environment
obs, r, done, info = self.env.step(action)
if i >= 2:
self.obs_2_max[i % 2] = self._process_obs(obs)
reward += r
# get number of lives left
lives = self.env.unwrapped.ale.lives()
# reset if a life is lost
if lives < self.lives:
done = True
break
# maintain rewards for each step
self.rewards.append(reward)
if done:
# if finished, set episode information if episode is over, and reset
episode_info = {"reward": sum(self.rewards), "length": len(self.rewards)}
self.reset()
else:
episode_info = None
# get the max of last two frames
obs = self.obs_2_max.max(axis=0)
# push it to the stack of 4 frames
self.obs_4 = np.roll(self.obs_4, shift=-1, axis=0)
self.obs_4[-1] = obs
return self.obs_4, reward, done, episode_info
def reset(self):
"""
### Reset environment
Clean up episode info and 4 frame stack
"""
# reset OpenAI Gym environment
obs = self.env.reset()
# reset caches
obs = self._process_obs(obs)
for i in range(4):
self.obs_4[i] = obs
self.rewards = []
self.lives = self.env.unwrapped.ale.lives()
return self.obs_4
@staticmethod
def _process_obs(obs):
"""
#### Process game frames
Convert game frames to gray and rescale to 84x84
"""
obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
obs = cv2.resize(obs, (84, 84), interpolation=cv2.INTER_AREA)
return obs
def worker_process(remote: multiprocessing.connection.Connection, seed: int):
"""
##Worker Process
Each worker process runs this method
"""
# create game
game = Game(seed)
# wait for instructions from the connection and execute them
while True:
cmd, data = remote.recv()
if cmd == "step":
remote.send(game.step(data))
elif cmd == "reset":
remote.send(game.reset())
elif cmd == "close":
remote.close()
break
else:
raise NotImplementedError
class Worker:
"""
Creates a new worker and runs it in a separate process.
"""
def __init__(self, seed):
self.child, parent = multiprocessing.Pipe()
self.process = multiprocessing.Process(target=worker_process, args=(parent, seed))
self.process.start()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/rl/ppo/gae.py | labml_nn/rl/ppo/gae.py | """
---
title: Generalized Advantage Estimation (GAE)
summary: A PyTorch implementation/tutorial of Generalized Advantage Estimation (GAE).
---
# Generalized Advantage Estimation (GAE)
This is a [PyTorch](https://pytorch.org) implementation of paper
[Generalized Advantage Estimation](https://arxiv.org/abs/1506.02438).
You can find an experiment that uses it [here](experiment.html).
"""
import numpy as np
class GAE:
def __init__(self, n_workers: int, worker_steps: int, gamma: float, lambda_: float):
self.lambda_ = lambda_
self.gamma = gamma
self.worker_steps = worker_steps
self.n_workers = n_workers
def __call__(self, done: np.ndarray, rewards: np.ndarray, values: np.ndarray) -> np.ndarray:
"""
### Calculate advantages
\begin{align}
\hat{A_t^{(1)}} &= r_t + \gamma V(s_{t+1}) - V(s)
\\
\hat{A_t^{(2)}} &= r_t + \gamma r_{t+1} +\gamma^2 V(s_{t+2}) - V(s)
\\
...
\\
\hat{A_t^{(\infty)}} &= r_t + \gamma r_{t+1} +\gamma^2 r_{t+2} + ... - V(s)
\end{align}
$\hat{A_t^{(1)}}$ is high bias, low variance, whilst
$\hat{A_t^{(\infty)}}$ is unbiased, high variance.
We take a weighted average of $\hat{A_t^{(k)}}$ to balance bias and variance.
This is called Generalized Advantage Estimation.
$$\hat{A_t} = \hat{A_t^{GAE}} = \frac{\sum_k w_k \hat{A_t^{(k)}}}{\sum_k w_k}$$
We set $w_k = \lambda^{k-1}$, this gives clean calculation for
$\hat{A_t}$
\begin{align}
\delta_t &= r_t + \gamma V(s_{t+1}) - V(s_t)
\\
\hat{A_t} &= \delta_t + \gamma \lambda \delta_{t+1} + ... +
(\gamma \lambda)^{T - t + 1} \delta_{T - 1}
\\
&= \delta_t + \gamma \lambda \hat{A_{t+1}}
\end{align}
"""
# advantages table
advantages = np.zeros((self.n_workers, self.worker_steps), dtype=np.float32)
last_advantage = 0
# $V(s_{t+1})$
last_value = values[:, -1]
for t in reversed(range(self.worker_steps)):
# mask if episode completed after step $t$
mask = 1.0 - done[:, t]
last_value = last_value * mask
last_advantage = last_advantage * mask
# $\delta_t$
delta = rewards[:, t] + self.gamma * last_value - values[:, t]
# $\hat{A_t} = \delta_t + \gamma \lambda \hat{A_{t+1}}$
last_advantage = delta + self.gamma * self.lambda_ * last_advantage
#
advantages[:, t] = last_advantage
last_value = values[:, t]
# $\hat{A_t}$
return advantages
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/rl/ppo/experiment.py | labml_nn/rl/ppo/experiment.py | """
---
title: PPO Experiment with Atari Breakout
summary: Annotated implementation to train a PPO agent on Atari Breakout game.
---
# PPO Experiment with Atari Breakout
This experiment trains Proximal Policy Optimization (PPO) agent Atari Breakout game on OpenAI Gym.
It runs the [game environments on multiple processes](../game.html) to sample efficiently.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/rl/ppo/experiment.ipynb)
"""
from typing import Dict
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.distributions import Categorical
from labml import monit, tracker, logger, experiment
from labml.configs import FloatDynamicHyperParam, IntDynamicHyperParam
from labml_nn.rl.game import Worker
from labml_nn.rl.ppo import ClippedPPOLoss, ClippedValueFunctionLoss
from labml_nn.rl.ppo.gae import GAE
# Select device
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
class Model(nn.Module):
"""
## Model
"""
def __init__(self):
super().__init__()
# The first convolution layer takes a
# 84x84 frame and produces a 20x20 frame
self.conv1 = nn.Conv2d(in_channels=4, out_channels=32, kernel_size=8, stride=4)
# The second convolution layer takes a
# 20x20 frame and produces a 9x9 frame
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2)
# The third convolution layer takes a
# 9x9 frame and produces a 7x7 frame
self.conv3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1)
# A fully connected layer takes the flattened
# frame from third convolution layer, and outputs
# 512 features
self.lin = nn.Linear(in_features=7 * 7 * 64, out_features=512)
# A fully connected layer to get logits for $\pi$
self.pi_logits = nn.Linear(in_features=512, out_features=4)
# A fully connected layer to get value function
self.value = nn.Linear(in_features=512, out_features=1)
#
self.activation = nn.ReLU()
def forward(self, obs: torch.Tensor):
h = self.activation(self.conv1(obs))
h = self.activation(self.conv2(h))
h = self.activation(self.conv3(h))
h = h.reshape((-1, 7 * 7 * 64))
h = self.activation(self.lin(h))
pi = Categorical(logits=self.pi_logits(h))
value = self.value(h).reshape(-1)
return pi, value
def obs_to_torch(obs: np.ndarray) -> torch.Tensor:
"""Scale observations from `[0, 255]` to `[0, 1]`"""
return torch.tensor(obs, dtype=torch.float32, device=device) / 255.
class Trainer:
"""
## Trainer
"""
def __init__(self, *,
updates: int, epochs: IntDynamicHyperParam,
n_workers: int, worker_steps: int, batches: int,
value_loss_coef: FloatDynamicHyperParam,
entropy_bonus_coef: FloatDynamicHyperParam,
clip_range: FloatDynamicHyperParam,
learning_rate: FloatDynamicHyperParam,
):
# #### Configurations
# number of updates
self.updates = updates
# number of epochs to train the model with sampled data
self.epochs = epochs
# number of worker processes
self.n_workers = n_workers
# number of steps to run on each process for a single update
self.worker_steps = worker_steps
# number of mini batches
self.batches = batches
# total number of samples for a single update
self.batch_size = self.n_workers * self.worker_steps
# size of a mini batch
self.mini_batch_size = self.batch_size // self.batches
assert (self.batch_size % self.batches == 0)
# Value loss coefficient
self.value_loss_coef = value_loss_coef
# Entropy bonus coefficient
self.entropy_bonus_coef = entropy_bonus_coef
# Clipping range
self.clip_range = clip_range
# Learning rate
self.learning_rate = learning_rate
# #### Initialize
# create workers
self.workers = [Worker(47 + i) for i in range(self.n_workers)]
# initialize tensors for observations
self.obs = np.zeros((self.n_workers, 4, 84, 84), dtype=np.uint8)
for worker in self.workers:
worker.child.send(("reset", None))
for i, worker in enumerate(self.workers):
self.obs[i] = worker.child.recv()
# model
self.model = Model().to(device)
# optimizer
self.optimizer = optim.Adam(self.model.parameters(), lr=2.5e-4)
# GAE with $\gamma = 0.99$ and $\lambda = 0.95$
self.gae = GAE(self.n_workers, self.worker_steps, 0.99, 0.95)
# PPO Loss
self.ppo_loss = ClippedPPOLoss()
# Value Loss
self.value_loss = ClippedValueFunctionLoss()
def sample(self) -> Dict[str, torch.Tensor]:
"""
### Sample data with current policy
"""
rewards = np.zeros((self.n_workers, self.worker_steps), dtype=np.float32)
actions = np.zeros((self.n_workers, self.worker_steps), dtype=np.int32)
done = np.zeros((self.n_workers, self.worker_steps), dtype=np.bool)
obs = np.zeros((self.n_workers, self.worker_steps, 4, 84, 84), dtype=np.uint8)
log_pis = np.zeros((self.n_workers, self.worker_steps), dtype=np.float32)
values = np.zeros((self.n_workers, self.worker_steps + 1), dtype=np.float32)
with torch.no_grad():
# sample `worker_steps` from each worker
for t in range(self.worker_steps):
# `self.obs` keeps track of the last observation from each worker,
# which is the input for the model to sample the next action
obs[:, t] = self.obs
# sample actions from $\pi_{\theta_{OLD}}$ for each worker;
# this returns arrays of size `n_workers`
pi, v = self.model(obs_to_torch(self.obs))
values[:, t] = v.cpu().numpy()
a = pi.sample()
actions[:, t] = a.cpu().numpy()
log_pis[:, t] = pi.log_prob(a).cpu().numpy()
# run sampled actions on each worker
for w, worker in enumerate(self.workers):
worker.child.send(("step", actions[w, t]))
for w, worker in enumerate(self.workers):
# get results after executing the actions
self.obs[w], rewards[w, t], done[w, t], info = worker.child.recv()
# collect episode info, which is available if an episode finished;
# this includes total reward and length of the episode -
# look at `Game` to see how it works.
if info:
tracker.add('reward', info['reward'])
tracker.add('length', info['length'])
# Get value of after the final step
_, v = self.model(obs_to_torch(self.obs))
values[:, self.worker_steps] = v.cpu().numpy()
# calculate advantages
advantages = self.gae(done, rewards, values)
#
samples = {
'obs': obs,
'actions': actions,
'values': values[:, :-1],
'log_pis': log_pis,
'advantages': advantages
}
# samples are currently in `[workers, time_step]` table,
# we should flatten it for training
samples_flat = {}
for k, v in samples.items():
v = v.reshape(v.shape[0] * v.shape[1], *v.shape[2:])
if k == 'obs':
samples_flat[k] = obs_to_torch(v)
else:
samples_flat[k] = torch.tensor(v, device=device)
return samples_flat
def train(self, samples: Dict[str, torch.Tensor]):
"""
### Train the model based on samples
"""
# It learns faster with a higher number of epochs,
# but becomes a little unstable; that is,
# the average episode reward does not monotonically increase
# over time.
# May be reducing the clipping range might solve it.
for _ in range(self.epochs()):
# shuffle for each epoch
indexes = torch.randperm(self.batch_size)
# for each mini batch
for start in range(0, self.batch_size, self.mini_batch_size):
# get mini batch
end = start + self.mini_batch_size
mini_batch_indexes = indexes[start: end]
mini_batch = {}
for k, v in samples.items():
mini_batch[k] = v[mini_batch_indexes]
# train
loss = self._calc_loss(mini_batch)
# Set learning rate
for pg in self.optimizer.param_groups:
pg['lr'] = self.learning_rate()
# Zero out the previously calculated gradients
self.optimizer.zero_grad()
# Calculate gradients
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=0.5)
# Update parameters based on gradients
self.optimizer.step()
@staticmethod
def _normalize(adv: torch.Tensor):
"""#### Normalize advantage function"""
return (adv - adv.mean()) / (adv.std() + 1e-8)
def _calc_loss(self, samples: Dict[str, torch.Tensor]) -> torch.Tensor:
"""
### Calculate total loss
"""
# $R_t$ returns sampled from $\pi_{\theta_{OLD}}$
sampled_return = samples['values'] + samples['advantages']
# $\bar{A_t} = \frac{\hat{A_t} - \mu(\hat{A_t})}{\sigma(\hat{A_t})}$,
# where $\hat{A_t}$ is advantages sampled from $\pi_{\theta_{OLD}}$.
# Refer to sampling function in [Main class](#main) below
# for the calculation of $\hat{A}_t$.
sampled_normalized_advantage = self._normalize(samples['advantages'])
# Sampled observations are fed into the model to get $\pi_\theta(a_t|s_t)$ and $V^{\pi_\theta}(s_t)$;
# we are treating observations as state
pi, value = self.model(samples['obs'])
# $-\log \pi_\theta (a_t|s_t)$, $a_t$ are actions sampled from $\pi_{\theta_{OLD}}$
log_pi = pi.log_prob(samples['actions'])
# Calculate policy loss
policy_loss = self.ppo_loss(log_pi, samples['log_pis'], sampled_normalized_advantage, self.clip_range())
# Calculate Entropy Bonus
#
# $\mathcal{L}^{EB}(\theta) =
# \mathbb{E}\Bigl[ S\bigl[\pi_\theta\bigr] (s_t) \Bigr]$
entropy_bonus = pi.entropy()
entropy_bonus = entropy_bonus.mean()
# Calculate value function loss
value_loss = self.value_loss(value, samples['values'], sampled_return, self.clip_range())
# $\mathcal{L}^{CLIP+VF+EB} (\theta) =
# \mathcal{L}^{CLIP} (\theta) +
# c_1 \mathcal{L}^{VF} (\theta) - c_2 \mathcal{L}^{EB}(\theta)$
loss = (policy_loss
+ self.value_loss_coef() * value_loss
- self.entropy_bonus_coef() * entropy_bonus)
# for monitoring
approx_kl_divergence = .5 * ((samples['log_pis'] - log_pi) ** 2).mean()
# Add to tracker
tracker.add({'policy_reward': -policy_loss,
'value_loss': value_loss,
'entropy_bonus': entropy_bonus,
'kl_div': approx_kl_divergence,
'clip_fraction': self.ppo_loss.clip_fraction})
return loss
def run_training_loop(self):
"""
### Run training loop
"""
# last 100 episode information
tracker.set_queue('reward', 100, True)
tracker.set_queue('length', 100, True)
for update in monit.loop(self.updates):
# sample with current policy
samples = self.sample()
# train the model
self.train(samples)
# Save tracked indicators.
tracker.save()
# Add a new line to the screen periodically
if (update + 1) % 1_000 == 0:
logger.log()
def destroy(self):
"""
### Destroy
Stop the workers
"""
for worker in self.workers:
worker.child.send(("close", None))
def main():
# Create the experiment
experiment.create(name='ppo')
# Configurations
configs = {
# Number of updates
'updates': 10000,
# ⚙️ Number of epochs to train the model with sampled data.
# You can change this while the experiment is running.
'epochs': IntDynamicHyperParam(8),
# Number of worker processes
'n_workers': 8,
# Number of steps to run on each process for a single update
'worker_steps': 128,
# Number of mini batches
'batches': 4,
# ⚙️ Value loss coefficient.
# You can change this while the experiment is running.
'value_loss_coef': FloatDynamicHyperParam(0.5),
# ⚙️ Entropy bonus coefficient.
# You can change this while the experiment is running.
'entropy_bonus_coef': FloatDynamicHyperParam(0.01),
# ⚙️ Clip range.
'clip_range': FloatDynamicHyperParam(0.1),
# You can change this while the experiment is running.
# ⚙️ Learning rate.
'learning_rate': FloatDynamicHyperParam(1e-3, (0, 1e-3)),
}
experiment.configs(configs)
# Initialize the trainer
m = Trainer(**configs)
# Run and monitor the experiment
with experiment.start():
m.run_training_loop()
# Stop the workers
m.destroy()
# ## Run it
if __name__ == "__main__":
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/rl/ppo/__init__.py | labml_nn/rl/ppo/__init__.py | """
---
title: Proximal Policy Optimization - PPO
summary: >
An annotated implementation of Proximal Policy Optimization - PPO algorithm in PyTorch.
---
# Proximal Policy Optimization - PPO
This is a [PyTorch](https://pytorch.org) implementation of
[Proximal Policy Optimization - PPO](https://arxiv.org/abs/1707.06347).
PPO is a policy gradient method for reinforcement learning.
Simple policy gradient methods do a single gradient update per sample (or a set of samples).
Doing multiple gradient steps for a single sample causes problems
because the policy deviates too much, producing a bad policy.
PPO lets us do multiple gradient updates per sample by trying to keep the
policy close to the policy that was used to sample data.
It does so by clipping gradient flow if the updated policy
is not close to the policy used to sample the data.
You can find an experiment that uses it [here](experiment.html).
The experiment uses [Generalized Advantage Estimation](gae.html).
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/rl/ppo/experiment.ipynb)
"""
import torch
from labml_nn.rl.ppo.gae import GAE
from torch import nn
class ClippedPPOLoss(nn.Module):
"""
## PPO Loss
Here's how the PPO update rule is derived.
We want to maximize policy reward
$$\max_\theta J(\pi_\theta) =
\mathop{\mathbb{E}}_{\tau \sim \pi_\theta}\Biggl[\sum_{t=0}^\infty \gamma^t r_t \Biggr]$$
where $r$ is the reward, $\pi$ is the policy, $\tau$ is a trajectory sampled from policy,
and $\gamma$ is the discount factor between $[0, 1]$.
\begin{align}
\mathbb{E}_{\tau \sim \pi_\theta} \Biggl[
\sum_{t=0}^\infty \gamma^t A^{\pi_{OLD}}(s_t, a_t)
\Biggr] &=
\\
\mathbb{E}_{\tau \sim \pi_\theta} \Biggl[
\sum_{t=0}^\infty \gamma^t \Bigl(
Q^{\pi_{OLD}}(s_t, a_t) - V^{\pi_{OLD}}(s_t)
\Bigr)
\Biggr] &=
\\
\mathbb{E}_{\tau \sim \pi_\theta} \Biggl[
\sum_{t=0}^\infty \gamma^t \Bigl(
r_t + V^{\pi_{OLD}}(s_{t+1}) - V^{\pi_{OLD}}(s_t)
\Bigr)
\Biggr] &=
\\
\mathbb{E}_{\tau \sim \pi_\theta} \Biggl[
\sum_{t=0}^\infty \gamma^t \Bigl(
r_t
\Bigr)
\Biggr]
- \mathbb{E}_{\tau \sim \pi_\theta}
\Biggl[V^{\pi_{OLD}}(s_0)\Biggr] &=
J(\pi_\theta) - J(\pi_{\theta_{OLD}})
\end{align}
So,
$$\max_\theta J(\pi_\theta) =
\max_\theta \mathbb{E}_{\tau \sim \pi_\theta} \Biggl[
\sum_{t=0}^\infty \gamma^t A^{\pi_{OLD}}(s_t, a_t)
\Biggr]$$
Define discounted-future state distribution,
$$d^\pi(s) = (1 - \gamma) \sum_{t=0}^\infty \gamma^t P(s_t = s | \pi)$$
Then,
\begin{align}
J(\pi_\theta) - J(\pi_{\theta_{OLD}})
&= \mathbb{E}_{\tau \sim \pi_\theta} \Biggl[
\sum_{t=0}^\infty \gamma^t A^{\pi_{OLD}}(s_t, a_t)
\Biggr]
\\
&= \frac{1}{1 - \gamma}
\mathbb{E}_{s \sim d^{\pi_\theta}, a \sim \pi_\theta} \Bigl[
A^{\pi_{OLD}}(s, a)
\Bigr]
\end{align}
Importance sampling $a$ from $\pi_{\theta_{OLD}}$,
\begin{align}
J(\pi_\theta) - J(\pi_{\theta_{OLD}})
&= \frac{1}{1 - \gamma}
\mathbb{E}_{s \sim d^{\pi_\theta}, a \sim \pi_\theta} \Bigl[
A^{\pi_{OLD}}(s, a)
\Bigr]
\\
&= \frac{1}{1 - \gamma}
\mathbb{E}_{s \sim d^{\pi_\theta}, a \sim \pi_{\theta_{OLD}}} \Biggl[
\frac{\pi_\theta(a|s)}{\pi_{\theta_{OLD}}(a|s)} A^{\pi_{OLD}}(s, a)
\Biggr]
\end{align}
Then we assume $d^\pi_\theta(s)$ and $d^\pi_{\theta_{OLD}}(s)$ are similar.
The error we introduce to $J(\pi_\theta) - J(\pi_{\theta_{OLD}})$
by this assumption is bound by the KL divergence between
$\pi_\theta$ and $\pi_{\theta_{OLD}}$.
[Constrained Policy Optimization](https://arxiv.org/abs/1705.10528)
shows the proof of this. I haven't read it.
\begin{align}
J(\pi_\theta) - J(\pi_{\theta_{OLD}})
&= \frac{1}{1 - \gamma}
\mathop{\mathbb{E}}_{s \sim d^{\pi_\theta} \atop a \sim \pi_{\theta_{OLD}}} \Biggl[
\frac{\pi_\theta(a|s)}{\pi_{\theta_{OLD}}(a|s)} A^{\pi_{OLD}}(s, a)
\Biggr]
\\
&\approx \frac{1}{1 - \gamma}
\mathop{\mathbb{E}}_{\textcolor{orange}{s \sim d^{\pi_{\theta_{OLD}}}}
\atop a \sim \pi_{\theta_{OLD}}} \Biggl[
\frac{\pi_\theta(a|s)}{\pi_{\theta_{OLD}}(a|s)} A^{\pi_{OLD}}(s, a)
\Biggr]
\\
&= \frac{1}{1 - \gamma} \mathcal{L}^{CPI}
\end{align}
"""
def __init__(self):
super().__init__()
def forward(self, log_pi: torch.Tensor, sampled_log_pi: torch.Tensor,
advantage: torch.Tensor, clip: float) -> torch.Tensor:
# ratio $r_t(\theta) = \frac{\pi_\theta (a_t|s_t)}{\pi_{\theta_{OLD}} (a_t|s_t)}$;
# *this is different from rewards* $r_t$.
ratio = torch.exp(log_pi - sampled_log_pi)
# ### Cliping the policy ratio
#
# \begin{align}
# \mathcal{L}^{CLIP}(\theta) =
# \mathbb{E}_{a_t, s_t \sim \pi_{\theta{OLD}}} \biggl[
# min \Bigl(r_t(\theta) \bar{A_t},
# clip \bigl(
# r_t(\theta), 1 - \epsilon, 1 + \epsilon
# \bigr) \bar{A_t}
# \Bigr)
# \biggr]
# \end{align}
#
# The ratio is clipped to be close to 1.
# We take the minimum so that the gradient will only pull
# $\pi_\theta$ towards $\pi_{\theta_{OLD}}$ if the ratio is
# not between $1 - \epsilon$ and $1 + \epsilon$.
# This keeps the KL divergence between $\pi_\theta$
# and $\pi_{\theta_{OLD}}$ constrained.
# Large deviation can cause performance collapse;
# where the policy performance drops and doesn't recover because
# we are sampling from a bad policy.
#
# Using the normalized advantage
# $\bar{A_t} = \frac{\hat{A_t} - \mu(\hat{A_t})}{\sigma(\hat{A_t})}$
# introduces a bias to the policy gradient estimator,
# but it reduces variance a lot.
clipped_ratio = ratio.clamp(min=1.0 - clip,
max=1.0 + clip)
policy_reward = torch.min(ratio * advantage,
clipped_ratio * advantage)
self.clip_fraction = (abs((ratio - 1.0)) > clip).to(torch.float).mean()
return -policy_reward.mean()
class ClippedValueFunctionLoss(nn.Module):
"""
## Clipped Value Function Loss
Similarly we clip the value function update also.
\begin{align}
V^{\pi_\theta}_{CLIP}(s_t)
&= clip\Bigl(V^{\pi_\theta}(s_t) - \hat{V_t}, -\epsilon, +\epsilon\Bigr)
\\
\mathcal{L}^{VF}(\theta)
&= \frac{1}{2} \mathbb{E} \biggl[
max\Bigl(\bigl(V^{\pi_\theta}(s_t) - R_t\bigr)^2,
\bigl(V^{\pi_\theta}_{CLIP}(s_t) - R_t\bigr)^2\Bigr)
\biggr]
\end{align}
Clipping makes sure the value function $V_\theta$ doesn't deviate
significantly from $V_{\theta_{OLD}}$.
"""
def forward(self, value: torch.Tensor, sampled_value: torch.Tensor, sampled_return: torch.Tensor, clip: float):
clipped_value = sampled_value + (value - sampled_value).clamp(min=-clip, max=clip)
vf_loss = torch.max((value - sampled_return) ** 2, (clipped_value - sampled_return) ** 2)
return 0.5 * vf_loss.mean()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/rl/dqn/experiment.py | labml_nn/rl/dqn/experiment.py | """
---
title: DQN Experiment with Atari Breakout
summary: Implementation of DQN experiment with Atari Breakout
---
# DQN Experiment with Atari Breakout
This experiment trains a Deep Q Network (DQN) to play Atari Breakout game on OpenAI Gym.
It runs the [game environments on multiple processes](../game.html) to sample efficiently.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/rl/dqn/experiment.ipynb)
"""
import numpy as np
import torch
from labml import tracker, experiment, logger, monit
from labml.internal.configs.dynamic_hyperparam import FloatDynamicHyperParam
from labml_nn.helpers.schedule import Piecewise
from labml_nn.rl.dqn import QFuncLoss
from labml_nn.rl.dqn.model import Model
from labml_nn.rl.dqn.replay_buffer import ReplayBuffer
from labml_nn.rl.game import Worker
# Select device
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
def obs_to_torch(obs: np.ndarray) -> torch.Tensor:
"""Scale observations from `[0, 255]` to `[0, 1]`"""
return torch.tensor(obs, dtype=torch.float32, device=device) / 255.
class Trainer:
"""
## Trainer
"""
def __init__(self, *,
updates: int, epochs: int,
n_workers: int, worker_steps: int, mini_batch_size: int,
update_target_model: int,
learning_rate: FloatDynamicHyperParam,
):
# number of workers
self.n_workers = n_workers
# steps sampled on each update
self.worker_steps = worker_steps
# number of training iterations
self.train_epochs = epochs
# number of updates
self.updates = updates
# size of mini batch for training
self.mini_batch_size = mini_batch_size
# update target network every 250 update
self.update_target_model = update_target_model
# learning rate
self.learning_rate = learning_rate
# exploration as a function of updates
self.exploration_coefficient = Piecewise(
[
(0, 1.0),
(25_000, 0.1),
(self.updates / 2, 0.01)
], outside_value=0.01)
# $\beta$ for replay buffer as a function of updates
self.prioritized_replay_beta = Piecewise(
[
(0, 0.4),
(self.updates, 1)
], outside_value=1)
# Replay buffer with $\alpha = 0.6$. Capacity of the replay buffer must be a power of 2.
self.replay_buffer = ReplayBuffer(2 ** 14, 0.6)
# Model for sampling and training
self.model = Model().to(device)
# target model to get $\textcolor{orange}Q(s';\textcolor{orange}{\theta_i^{-}})$
self.target_model = Model().to(device)
# create workers
self.workers = [Worker(47 + i) for i in range(self.n_workers)]
# initialize tensors for observations
self.obs = np.zeros((self.n_workers, 4, 84, 84), dtype=np.uint8)
# reset the workers
for worker in self.workers:
worker.child.send(("reset", None))
# get the initial observations
for i, worker in enumerate(self.workers):
self.obs[i] = worker.child.recv()
# loss function
self.loss_func = QFuncLoss(0.99)
# optimizer
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=2.5e-4)
def _sample_action(self, q_value: torch.Tensor, exploration_coefficient: float):
"""
#### $\epsilon$-greedy Sampling
When sampling actions we use a $\epsilon$-greedy strategy, where we
take a greedy action with probabiliy $1 - \epsilon$ and
take a random action with probability $\epsilon$.
We refer to $\epsilon$ as `exploration_coefficient`.
"""
# Sampling doesn't need gradients
with torch.no_grad():
# Sample the action with highest Q-value. This is the greedy action.
greedy_action = torch.argmax(q_value, dim=-1)
# Uniformly sample and action
random_action = torch.randint(q_value.shape[-1], greedy_action.shape, device=q_value.device)
# Whether to chose greedy action or the random action
is_choose_rand = torch.rand(greedy_action.shape, device=q_value.device) < exploration_coefficient
# Pick the action based on `is_choose_rand`
return torch.where(is_choose_rand, random_action, greedy_action).cpu().numpy()
def sample(self, exploration_coefficient: float):
"""### Sample data"""
# This doesn't need gradients
with torch.no_grad():
# Sample `worker_steps`
for t in range(self.worker_steps):
# Get Q_values for the current observation
q_value = self.model(obs_to_torch(self.obs))
# Sample actions
actions = self._sample_action(q_value, exploration_coefficient)
# Run sampled actions on each worker
for w, worker in enumerate(self.workers):
worker.child.send(("step", actions[w]))
# Collect information from each worker
for w, worker in enumerate(self.workers):
# Get results after executing the actions
next_obs, reward, done, info = worker.child.recv()
# Add transition to replay buffer
self.replay_buffer.add(self.obs[w], actions[w], reward, next_obs, done)
# update episode information.
# collect episode info, which is available if an episode finished;
# this includes total reward and length of the episode -
# look at `Game` to see how it works.
if info:
tracker.add('reward', info['reward'])
tracker.add('length', info['length'])
# update current observation
self.obs[w] = next_obs
def train(self, beta: float):
"""
### Train the model
"""
for _ in range(self.train_epochs):
# Sample from priority replay buffer
samples = self.replay_buffer.sample(self.mini_batch_size, beta)
# Get the predicted Q-value
q_value = self.model(obs_to_torch(samples['obs']))
# Get the Q-values of the next state for [Double Q-learning](index.html).
# Gradients shouldn't propagate for these
with torch.no_grad():
# Get $\textcolor{cyan}Q(s';\textcolor{cyan}{\theta_i})$
double_q_value = self.model(obs_to_torch(samples['next_obs']))
# Get $\textcolor{orange}Q(s';\textcolor{orange}{\theta_i^{-}})$
target_q_value = self.target_model(obs_to_torch(samples['next_obs']))
# Compute Temporal Difference (TD) errors, $\delta$, and the loss, $\mathcal{L}(\theta)$.
td_errors, loss = self.loss_func(q_value,
q_value.new_tensor(samples['action']),
double_q_value, target_q_value,
q_value.new_tensor(samples['done']),
q_value.new_tensor(samples['reward']),
q_value.new_tensor(samples['weights']))
# Calculate priorities for replay buffer $p_i = |\delta_i| + \epsilon$
new_priorities = np.abs(td_errors.cpu().numpy()) + 1e-6
# Update replay buffer priorities
self.replay_buffer.update_priorities(samples['indexes'], new_priorities)
# Set learning rate
for pg in self.optimizer.param_groups:
pg['lr'] = self.learning_rate()
# Zero out the previously calculated gradients
self.optimizer.zero_grad()
# Calculate gradients
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=0.5)
# Update parameters based on gradients
self.optimizer.step()
def run_training_loop(self):
"""
### Run training loop
"""
# Last 100 episode information
tracker.set_queue('reward', 100, True)
tracker.set_queue('length', 100, True)
# Copy to target network initially
self.target_model.load_state_dict(self.model.state_dict())
for update in monit.loop(self.updates):
# $\epsilon$, exploration fraction
exploration = self.exploration_coefficient(update)
tracker.add('exploration', exploration)
# $\beta$ for prioritized replay
beta = self.prioritized_replay_beta(update)
tracker.add('beta', beta)
# Sample with current policy
self.sample(exploration)
# Start training after the buffer is full
if self.replay_buffer.is_full():
# Train the model
self.train(beta)
# Periodically update target network
if update % self.update_target_model == 0:
self.target_model.load_state_dict(self.model.state_dict())
# Save tracked indicators.
tracker.save()
# Add a new line to the screen periodically
if (update + 1) % 1_000 == 0:
logger.log()
def destroy(self):
"""
### Destroy
Stop the workers
"""
for worker in self.workers:
worker.child.send(("close", None))
def main():
# Create the experiment
experiment.create(name='dqn')
# Configurations
configs = {
# Number of updates
'updates': 1_000_000,
# Number of epochs to train the model with sampled data.
'epochs': 8,
# Number of worker processes
'n_workers': 8,
# Number of steps to run on each process for a single update
'worker_steps': 4,
# Mini batch size
'mini_batch_size': 32,
# Target model updating interval
'update_target_model': 250,
# Learning rate.
'learning_rate': FloatDynamicHyperParam(1e-4, (0, 1e-3)),
}
# Configurations
experiment.configs(configs)
# Initialize the trainer
m = Trainer(**configs)
# Run and monitor the experiment
with experiment.start():
m.run_training_loop()
# Stop the workers
m.destroy()
# ## Run it
if __name__ == "__main__":
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/rl/dqn/model.py | labml_nn/rl/dqn/model.py | """
---
title: Deep Q Network (DQN) Model
summary: Implementation of neural network model for Deep Q Network (DQN).
---
# Deep Q Network (DQN) Model
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/rl/dqn/experiment.ipynb)
"""
import torch
from torch import nn
class Model(nn.Module):
"""
## Dueling Network ⚔️ Model for $Q$ Values
We are using a [dueling network](https://arxiv.org/abs/1511.06581)
to calculate Q-values.
Intuition behind dueling network architecture is that in most states
the action doesn't matter,
and in some states the action is significant. Dueling network allows
this to be represented very well.
\begin{align}
Q^\pi(s,a) &= V^\pi(s) + A^\pi(s, a)
\\
\mathop{\mathbb{E}}_{a \sim \pi(s)}
\Big[
A^\pi(s, a)
\Big]
&= 0
\end{align}
So we create two networks for $V$ and $A$ and get $Q$ from them.
$$
Q(s, a) = V(s) +
\Big(
A(s, a) - \frac{1}{|\mathcal{A}|} \sum_{a' \in \mathcal{A}} A(s, a')
\Big)
$$
We share the initial layers of the $V$ and $A$ networks.
"""
def __init__(self):
super().__init__()
self.conv = nn.Sequential(
# The first convolution layer takes a
# $84\times84$ frame and produces a $20\times20$ frame
nn.Conv2d(in_channels=4, out_channels=32, kernel_size=8, stride=4),
nn.ReLU(),
# The second convolution layer takes a
# $20\times20$ frame and produces a $9\times9$ frame
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2),
nn.ReLU(),
# The third convolution layer takes a
# $9\times9$ frame and produces a $7\times7$ frame
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1),
nn.ReLU(),
)
# A fully connected layer takes the flattened
# frame from third convolution layer, and outputs
# $512$ features
self.lin = nn.Linear(in_features=7 * 7 * 64, out_features=512)
self.activation = nn.ReLU()
# This head gives the state value $V$
self.state_value = nn.Sequential(
nn.Linear(in_features=512, out_features=256),
nn.ReLU(),
nn.Linear(in_features=256, out_features=1),
)
# This head gives the action value $A$
self.action_value = nn.Sequential(
nn.Linear(in_features=512, out_features=256),
nn.ReLU(),
nn.Linear(in_features=256, out_features=4),
)
def forward(self, obs: torch.Tensor):
# Convolution
h = self.conv(obs)
# Reshape for linear layers
h = h.reshape((-1, 7 * 7 * 64))
# Linear layer
h = self.activation(self.lin(h))
# $A$
action_value = self.action_value(h)
# $V$
state_value = self.state_value(h)
# $A(s, a) - \frac{1}{|\mathcal{A}|} \sum_{a' \in \mathcal{A}} A(s, a')$
action_score_centered = action_value - action_value.mean(dim=-1, keepdim=True)
# $Q(s, a) =V(s) + \Big(A(s, a) - \frac{1}{|\mathcal{A}|} \sum_{a' \in \mathcal{A}} A(s, a')\Big)$
q = state_value + action_score_centered
return q
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/rl/dqn/__init__.py | labml_nn/rl/dqn/__init__.py | """
---
title: Deep Q Networks (DQN)
summary: >
This is a PyTorch implementation/tutorial of Deep Q Networks (DQN) from paper
Playing Atari with Deep Reinforcement Learning.
This includes dueling network architecture, a prioritized replay buffer and
double-Q-network training.
---
# Deep Q Networks (DQN)
This is a [PyTorch](https://pytorch.org) implementation of paper
[Playing Atari with Deep Reinforcement Learning](https://arxiv.org/abs/1312.5602)
along with [Dueling Network](model.html), [Prioritized Replay](replay_buffer.html)
and Double Q Network.
Here is the [experiment](experiment.html) and [model](model.html) implementation.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/rl/dqn/experiment.ipynb)
"""
from typing import Tuple
import torch
from torch import nn
from labml import tracker
from labml_nn.rl.dqn.replay_buffer import ReplayBuffer
class QFuncLoss(nn.Module):
"""
## Train the model
We want to find optimal action-value function.
\begin{align}
Q^*(s,a) &= \max_\pi \mathbb{E} \Big[
r_t + \gamma r_{t + 1} + \gamma^2 r_{t + 2} + ... | s_t = s, a_t = a, \pi
\Big]
\\
Q^*(s,a) &= \mathop{\mathbb{E}}_{s' \sim \large{\varepsilon}} \Big[
r + \gamma \max_{a'} Q^* (s', a') | s, a
\Big]
\end{align}
### Target network 🎯
In order to improve stability we use experience replay that randomly sample
from previous experience $U(D)$. We also use a Q network
with a separate set of parameters $\textcolor{orange}{\theta_i^{-}}$ to calculate the target.
$\textcolor{orange}{\theta_i^{-}}$ is updated periodically.
This is according to paper
[Human Level Control Through Deep Reinforcement Learning](https://deepmind.com/research/dqn/).
So the loss function is,
$$
\mathcal{L}_i(\theta_i) = \mathop{\mathbb{E}}_{(s,a,r,s') \sim U(D)}
\bigg[
\Big(
r + \gamma \max_{a'} Q(s', a'; \textcolor{orange}{\theta_i^{-}}) - Q(s,a;\theta_i)
\Big) ^ 2
\bigg]
$$
### Double $Q$-Learning
The max operator in the above calculation uses same network for both
selecting the best action and for evaluating the value.
That is,
$$
\max_{a'} Q(s', a'; \theta) = \textcolor{cyan}{Q}
\Big(
s', \mathop{\operatorname{argmax}}_{a'}
\textcolor{cyan}{Q}(s', a'; \textcolor{cyan}{\theta}); \textcolor{cyan}{\theta}
\Big)
$$
We use [double Q-learning](https://arxiv.org/abs/1509.06461), where
the $\operatorname{argmax}$ is taken from $\textcolor{cyan}{\theta_i}$ and
the value is taken from $\textcolor{orange}{\theta_i^{-}}$.
And the loss function becomes,
\begin{align}
\mathcal{L}_i(\theta_i) = \mathop{\mathbb{E}}_{(s,a,r,s') \sim U(D)}
\Bigg[
\bigg(
&r + \gamma \textcolor{orange}{Q}
\Big(
s',
\mathop{\operatorname{argmax}}_{a'}
\textcolor{cyan}{Q}(s', a'; \textcolor{cyan}{\theta_i}); \textcolor{orange}{\theta_i^{-}}
\Big)
\\
- &Q(s,a;\theta_i)
\bigg) ^ 2
\Bigg]
\end{align}
"""
def __init__(self, gamma: float):
super().__init__()
self.gamma = gamma
self.huber_loss = nn.SmoothL1Loss(reduction='none')
def forward(self, q: torch.Tensor, action: torch.Tensor, double_q: torch.Tensor,
target_q: torch.Tensor, done: torch.Tensor, reward: torch.Tensor,
weights: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
* `q` - $Q(s;\theta_i)$
* `action` - $a$
* `double_q` - $\textcolor{cyan}Q(s';\textcolor{cyan}{\theta_i})$
* `target_q` - $\textcolor{orange}Q(s';\textcolor{orange}{\theta_i^{-}})$
* `done` - whether the game ended after taking the action
* `reward` - $r$
* `weights` - weights of the samples from prioritized experienced replay
"""
# $Q(s,a;\theta_i)$
q_sampled_action = q.gather(-1, action.to(torch.long).unsqueeze(-1)).squeeze(-1)
tracker.add('q_sampled_action', q_sampled_action)
# Gradients shouldn't propagate gradients
# $$r + \gamma \textcolor{orange}{Q}
# \Big(s',
# \mathop{\operatorname{argmax}}_{a'}
# \textcolor{cyan}{Q}(s', a'; \textcolor{cyan}{\theta_i}); \textcolor{orange}{\theta_i^{-}}
# \Big)$$
with torch.no_grad():
# Get the best action at state $s'$
# $$\mathop{\operatorname{argmax}}_{a'}
# \textcolor{cyan}{Q}(s', a'; \textcolor{cyan}{\theta_i})$$
best_next_action = torch.argmax(double_q, -1)
# Get the q value from the target network for the best action at state $s'$
# $$\textcolor{orange}{Q}
# \Big(s',\mathop{\operatorname{argmax}}_{a'}
# \textcolor{cyan}{Q}(s', a'; \textcolor{cyan}{\theta_i}); \textcolor{orange}{\theta_i^{-}}
# \Big)$$
best_next_q_value = target_q.gather(-1, best_next_action.unsqueeze(-1)).squeeze(-1)
# Calculate the desired Q value.
# We multiply by `(1 - done)` to zero out
# the next state Q values if the game ended.
#
# $$r + \gamma \textcolor{orange}{Q}
# \Big(s',
# \mathop{\operatorname{argmax}}_{a'}
# \textcolor{cyan}{Q}(s', a'; \textcolor{cyan}{\theta_i}); \textcolor{orange}{\theta_i^{-}}
# \Big)$$
q_update = reward + self.gamma * best_next_q_value * (1 - done)
tracker.add('q_update', q_update)
# Temporal difference error $\delta$ is used to weigh samples in replay buffer
td_error = q_sampled_action - q_update
tracker.add('td_error', td_error)
# We take [Huber loss](https://en.wikipedia.org/wiki/Huber_loss) instead of
# mean squared error loss because it is less sensitive to outliers
losses = self.huber_loss(q_sampled_action, q_update)
# Get weighted means
loss = torch.mean(weights * losses)
tracker.add('loss', loss)
return td_error, loss
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/rl/dqn/replay_buffer.py | labml_nn/rl/dqn/replay_buffer.py | """
---
title: Prioritized Experience Replay Buffer
summary: Annotated implementation of prioritized experience replay using a binary segment tree.
---
# Prioritized Experience Replay Buffer
This implements paper [Prioritized experience replay](https://arxiv.org/abs/1511.05952),
using a binary segment tree.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/rl/dqn/experiment.ipynb)
"""
import random
import numpy as np
class ReplayBuffer:
"""
## Buffer for Prioritized Experience Replay
[Prioritized experience replay](https://arxiv.org/abs/1511.05952)
samples important transitions more frequently.
The transitions are prioritized by the Temporal Difference error (td error), $\delta$.
We sample transition $i$ with probability,
$$P(i) = \frac{p_i^\alpha}{\sum_k p_k^\alpha}$$
where $\alpha$ is a hyper-parameter that determines how much
prioritization is used, with $\alpha = 0$ corresponding to uniform case.
$p_i$ is the priority.
We use proportional prioritization $p_i = |\delta_i| + \epsilon$ where
$\delta_i$ is the temporal difference for transition $i$.
We correct the bias introduced by prioritized replay using
importance-sampling (IS) weights
$$w_i = \bigg(\frac{1}{N} \frac{1}{P(i)}\bigg)^\beta$$ in the loss function.
This fully compensates when $\beta = 1$.
We normalize weights by $\frac{1}{\max_i w_i}$ for stability.
Unbiased nature is most important towards the convergence at end of training.
Therefore we increase $\beta$ towards end of training.
### Binary Segment Tree
We use a binary segment tree to efficiently calculate
$\sum_k^i p_k^\alpha$, the cumulative probability,
which is needed to sample.
We also use a binary segment tree to find $\min p_i^\alpha$,
which is needed for $\frac{1}{\max_i w_i}$.
We can also use a min-heap for this.
Binary Segment Tree lets us calculate these in $\mathcal{O}(\log n)$
time, which is way more efficient that the naive $\mathcal{O}(n)$
approach.
This is how a binary segment tree works for sum;
it is similar for minimum.
Let $x_i$ be the list of $N$ values we want to represent.
Let $b_{i,j}$ be the $j^{\mathop{th}}$ node of the $i^{\mathop{th}}$ row
in the binary tree.
That is two children of node $b_{i,j}$ are $b_{i+1,2j}$ and $b_{i+1,2j + 1}$.
The leaf nodes on row $D = \left\lceil {1 + \log_2 N} \right\rceil$
will have values of $x$.
Every node keeps the sum of the two child nodes.
That is, the root node keeps the sum of the entire array of values.
The left and right children of the root node keep
the sum of the first half of the array and
the sum of the second half of the array, respectively.
And so on...
$$b_{i,j} = \sum_{k = (j -1) * 2^{D - i} + 1}^{j * 2^{D - i}} x_k$$
Number of nodes in row $i$,
$$N_i = \left\lceil{\frac{N}{D - i + 1}} \right\rceil$$
This is equal to the sum of nodes in all rows above $i$.
So we can use a single array $a$ to store the tree, where,
$$b_{i,j} \rightarrow a_{N_i + j}$$
Then child nodes of $a_i$ are $a_{2i}$ and $a_{2i + 1}$.
That is,
$$a_i = a_{2i} + a_{2i + 1}$$
This way of maintaining binary trees is very easy to program.
*Note that we are indexing starting from 1*.
We use the same structure to compute the minimum.
"""
def __init__(self, capacity, alpha):
"""
### Initialize
"""
# We use a power of $2$ for capacity because it simplifies the code and debugging
self.capacity = capacity
# $\alpha$
self.alpha = alpha
# Maintain segment binary trees to take sum and find minimum over a range
self.priority_sum = [0 for _ in range(2 * self.capacity)]
self.priority_min = [float('inf') for _ in range(2 * self.capacity)]
# Current max priority, $p$, to be assigned to new transitions
self.max_priority = 1.
# Arrays for buffer
self.data = {
'obs': np.zeros(shape=(capacity, 4, 84, 84), dtype=np.uint8),
'action': np.zeros(shape=capacity, dtype=np.int32),
'reward': np.zeros(shape=capacity, dtype=np.float32),
'next_obs': np.zeros(shape=(capacity, 4, 84, 84), dtype=np.uint8),
'done': np.zeros(shape=capacity, dtype=np.bool)
}
# We use cyclic buffers to store data, and `next_idx` keeps the index of the next empty
# slot
self.next_idx = 0
# Size of the buffer
self.size = 0
def add(self, obs, action, reward, next_obs, done):
"""
### Add sample to queue
"""
# Get next available slot
idx = self.next_idx
# store in the queue
self.data['obs'][idx] = obs
self.data['action'][idx] = action
self.data['reward'][idx] = reward
self.data['next_obs'][idx] = next_obs
self.data['done'][idx] = done
# Increment next available slot
self.next_idx = (idx + 1) % self.capacity
# Calculate the size
self.size = min(self.capacity, self.size + 1)
# $p_i^\alpha$, new samples get `max_priority`
priority_alpha = self.max_priority ** self.alpha
# Update the two segment trees for sum and minimum
self._set_priority_min(idx, priority_alpha)
self._set_priority_sum(idx, priority_alpha)
def _set_priority_min(self, idx, priority_alpha):
"""
#### Set priority in binary segment tree for minimum
"""
# Leaf of the binary tree
idx += self.capacity
self.priority_min[idx] = priority_alpha
# Update tree, by traversing along ancestors.
# Continue until the root of the tree.
while idx >= 2:
# Get the index of the parent node
idx //= 2
# Value of the parent node is the minimum of it's two children
self.priority_min[idx] = min(self.priority_min[2 * idx], self.priority_min[2 * idx + 1])
def _set_priority_sum(self, idx, priority):
"""
#### Set priority in binary segment tree for sum
"""
# Leaf of the binary tree
idx += self.capacity
# Set the priority at the leaf
self.priority_sum[idx] = priority
# Update tree, by traversing along ancestors.
# Continue until the root of the tree.
while idx >= 2:
# Get the index of the parent node
idx //= 2
# Value of the parent node is the sum of it's two children
self.priority_sum[idx] = self.priority_sum[2 * idx] + self.priority_sum[2 * idx + 1]
def _sum(self):
"""
#### $\sum_k p_k^\alpha$
"""
# The root node keeps the sum of all values
return self.priority_sum[1]
def _min(self):
"""
#### $\min_k p_k^\alpha$
"""
# The root node keeps the minimum of all values
return self.priority_min[1]
def find_prefix_sum_idx(self, prefix_sum):
"""
#### Find largest $i$ such that $\sum_{k=1}^{i} p_k^\alpha \le P$
"""
# Start from the root
idx = 1
while idx < self.capacity:
# If the sum of the left branch is higher than required sum
if self.priority_sum[idx * 2] > prefix_sum:
# Go to left branch of the tree
idx = 2 * idx
else:
# Otherwise go to right branch and reduce the sum of left
# branch from required sum
prefix_sum -= self.priority_sum[idx * 2]
idx = 2 * idx + 1
# We are at the leaf node. Subtract the capacity by the index in the tree
# to get the index of actual value
return idx - self.capacity
def sample(self, batch_size, beta):
"""
### Sample from buffer
"""
# Initialize samples
samples = {
'weights': np.zeros(shape=batch_size, dtype=np.float32),
'indexes': np.zeros(shape=batch_size, dtype=np.int32)
}
# Get sample indexes
for i in range(batch_size):
p = random.random() * self._sum()
idx = self.find_prefix_sum_idx(p)
samples['indexes'][i] = idx
# $\min_i P(i) = \frac{\min_i p_i^\alpha}{\sum_k p_k^\alpha}$
prob_min = self._min() / self._sum()
# $\max_i w_i = \bigg(\frac{1}{N} \frac{1}{\min_i P(i)}\bigg)^\beta$
max_weight = (prob_min * self.size) ** (-beta)
for i in range(batch_size):
idx = samples['indexes'][i]
# $P(i) = \frac{p_i^\alpha}{\sum_k p_k^\alpha}$
prob = self.priority_sum[idx + self.capacity] / self._sum()
# $w_i = \bigg(\frac{1}{N} \frac{1}{P(i)}\bigg)^\beta$
weight = (prob * self.size) ** (-beta)
# Normalize by $\frac{1}{\max_i w_i}$,
# which also cancels off the $\frac{1}{N}$ term
samples['weights'][i] = weight / max_weight
# Get samples data
for k, v in self.data.items():
samples[k] = v[samples['indexes']]
return samples
def update_priorities(self, indexes, priorities):
"""
### Update priorities
"""
for idx, priority in zip(indexes, priorities):
# Set current max priority
self.max_priority = max(self.max_priority, priority)
# Calculate $p_i^\alpha$
priority_alpha = priority ** self.alpha
# Update the trees
self._set_priority_min(idx, priority_alpha)
self._set_priority_sum(idx, priority_alpha)
def is_full(self):
"""
### Whether the buffer is full
"""
return self.capacity == self.size
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/distillation/small.py | labml_nn/distillation/small.py | """
---
title: Train a small model on CIFAR 10
summary: >
Train a small model on CIFAR 10 to test how much distillation benefits.
---
# Train a small model on CIFAR 10
This trains a small model on CIFAR 10 to test how much [distillation](index.html) benefits.
"""
import torch.nn as nn
from labml import experiment, logger
from labml.configs import option
from labml_nn.experiments.cifar10 import CIFAR10Configs, CIFAR10VGGModel
from labml_nn.normalization.batch_norm import BatchNorm
class Configs(CIFAR10Configs):
"""
## Configurations
We use [`CIFAR10Configs`](../experiments/cifar10.html) which defines all the
dataset related configurations, optimizer, and a training loop.
"""
pass
class SmallModel(CIFAR10VGGModel):
"""
### VGG style model for CIFAR-10 classification
This derives from the [generic VGG style architecture](../experiments/cifar10.html).
"""
def conv_block(self, in_channels, out_channels) -> nn.Module:
"""
Create a convolution layer and the activations
"""
return nn.Sequential(
# Convolution layer
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
# Batch normalization
BatchNorm(out_channels, track_running_stats=False),
# ReLU activation
nn.ReLU(inplace=True),
)
def __init__(self):
# Create a model with given convolution sizes (channels)
super().__init__([[32, 32], [64, 64], [128], [128], [128]])
@option(Configs.model)
def _small_model(c: Configs):
"""
### Create model
"""
return SmallModel().to(c.device)
def main():
# Create experiment
experiment.create(name='cifar10', comment='small model')
# Create configurations
conf = Configs()
# Load configurations
experiment.configs(conf, {
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
})
# Set model for saving/loading
experiment.add_pytorch_models({'model': conf.model})
# Print number of parameters in the model
logger.inspect(params=(sum(p.numel() for p in conf.model.parameters() if p.requires_grad)))
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/distillation/large.py | labml_nn/distillation/large.py | """
---
title: Train a large model on CIFAR 10
summary: >
Train a large model on CIFAR 10 for distillation.
---
# Train a large model on CIFAR 10
This trains a large model on CIFAR 10 for [distillation](index.html).
"""
import torch.nn as nn
from labml import experiment, logger
from labml.configs import option
from labml_nn.experiments.cifar10 import CIFAR10Configs, CIFAR10VGGModel
from labml_nn.normalization.batch_norm import BatchNorm
class Configs(CIFAR10Configs):
"""
## Configurations
We use [`CIFAR10Configs`](../experiments/cifar10.html) which defines all the
dataset related configurations, optimizer, and a training loop.
"""
pass
class LargeModel(CIFAR10VGGModel):
"""
### VGG style model for CIFAR-10 classification
This derives from the [generic VGG style architecture](../experiments/cifar10.html).
"""
def conv_block(self, in_channels, out_channels) -> nn.Module:
"""
Create a convolution layer and the activations
"""
return nn.Sequential(
# Dropout
nn.Dropout(0.1),
# Convolution layer
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
# Batch normalization
BatchNorm(out_channels, track_running_stats=False),
# ReLU activation
nn.ReLU(inplace=True),
)
def __init__(self):
# Create a model with given convolution sizes (channels)
super().__init__([[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]])
@option(Configs.model)
def _large_model(c: Configs):
"""
### Create model
"""
return LargeModel().to(c.device)
def main():
# Create experiment
experiment.create(name='cifar10', comment='large model')
# Create configurations
conf = Configs()
# Load configurations
experiment.configs(conf, {
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
'is_save_models': True,
'epochs': 20,
})
# Set model for saving/loading
experiment.add_pytorch_models({'model': conf.model})
# Print number of parameters in the model
logger.inspect(params=(sum(p.numel() for p in conf.model.parameters() if p.requires_grad)))
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/distillation/__init__.py | labml_nn/distillation/__init__.py | """
---
title: Distilling the Knowledge in a Neural Network
summary: >
PyTorch implementation and tutorial of the paper
Distilling the Knowledge in a Neural Network.
---
# Distilling the Knowledge in a Neural Network
This is a [PyTorch](https://pytorch.org) implementation/tutorial of the paper
[Distilling the Knowledge in a Neural Network](https://arxiv.org/abs/1503.02531).
It's a way of training a small network using the knowledge in a trained larger network;
i.e. distilling the knowledge from the large network.
A large model with regularization or an ensemble of models (using dropout) generalizes
better than a small model when trained directly on the data and labels.
However, a small model can be trained to generalize better with help of a large model.
Smaller models are better in production: faster, less compute, less memory.
The output probabilities of a trained model give more information than the labels
because it assigns non-zero probabilities to incorrect classes as well.
These probabilities tell us that a sample has a chance of belonging to certain classes.
For instance, when classifying digits, when given an image of digit *7*,
a generalized model will give a high probability to 7 and a small but non-zero
probability to 2, while assigning almost zero probability to other digits.
Distillation uses this information to train a small model better.
## Soft Targets
The probabilities are usually computed with a softmax operation,
$$q_i = \frac{\exp (z_i)}{\sum_j \exp (z_j)}$$
where $q_i$ is the probability for class $i$ and $z_i$ is the logit.
We train the small model to minimize the Cross entropy or KL Divergence between its output
probability distribution and the large network's output probability distribution
(soft targets).
One of the problems here is that the probabilities assigned to incorrect classes by the
large network are often very small and don't contribute to the loss.
So they soften the probabilities by applying a temperature $T$,
$$q_i = \frac{\exp (\frac{z_i}{T})}{\sum_j \exp (\frac{z_j}{T})}$$
where higher values for $T$ will produce softer probabilities.
## Training
Paper suggests adding a second loss term for predicting the actual labels
when training the small model.
We calculate the composite loss as the weighted sum of the two loss terms:
soft targets and actual labels.
The dataset for distillation is called *the transfer set*, and the paper
suggests using the same training data.
## Our experiment
We train on CIFAR-10 dataset.
We [train a large model](large.html) that has $14,728,266$ parameters
with dropout and it gives an accuracy of 85% on the validation set.
A [small model](small.html) with $437,034$ parameters
gives an accuracy of 80%.
We then train the small model with distillation from the large model,
and it gives an accuracy of 82%; a 2% increase in the accuracy.
"""
import torch
import torch.nn.functional
from torch import nn
from labml import experiment, tracker
from labml.configs import option
from labml_nn.helpers.trainer import BatchIndex
from labml_nn.distillation.large import LargeModel
from labml_nn.distillation.small import SmallModel
from labml_nn.experiments.cifar10 import CIFAR10Configs
class Configs(CIFAR10Configs):
"""
## Configurations
This extends from [`CIFAR10Configs`](../experiments/cifar10.html) which defines all the
dataset related configurations, optimizer, and a training loop.
"""
# The small model
model: SmallModel
# The large model
large: LargeModel
# KL Divergence loss for soft targets
kl_div_loss = nn.KLDivLoss(log_target=True)
# Cross entropy loss for true label loss
loss_func = nn.CrossEntropyLoss()
# Temperature, $T$
temperature: float = 5.
# Weight for soft targets loss.
#
# The gradients produced by soft targets get scaled by $\frac{1}{T^2}$.
# To compensate for this the paper suggests scaling the soft targets loss
# by a factor of $T^2$
soft_targets_weight: float = 100.
# Weight for true label cross entropy loss
label_loss_weight: float = 0.5
def step(self, batch: any, batch_idx: BatchIndex):
"""
### Training/validation step
We define a custom training/validation step to include the distillation
"""
# Training/Evaluation mode for the small model
self.model.train(self.mode.is_train)
# Large model in evaluation mode
self.large.eval()
# Move data to the device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Update global step (number of samples processed) when in training mode
if self.mode.is_train:
tracker.add_global_step(len(data))
# Get the output logits, $v_i$, from the large model
with torch.no_grad():
large_logits = self.large(data)
# Get the output logits, $z_i$, from the small model
output = self.model(data)
# Soft targets
# $$p_i = \frac{\exp (\frac{v_i}{T})}{\sum_j \exp (\frac{v_j}{T})}$$
soft_targets = nn.functional.log_softmax(large_logits / self.temperature, dim=-1)
# Temperature adjusted probabilities of the small model
# $$q_i = \frac{\exp (\frac{z_i}{T})}{\sum_j \exp (\frac{z_j}{T})}$$
soft_prob = nn.functional.log_softmax(output / self.temperature, dim=-1)
# Calculate the soft targets loss
soft_targets_loss = self.kl_div_loss(soft_prob, soft_targets)
# Calculate the true label loss
label_loss = self.loss_func(output, target)
# Weighted sum of the two losses
loss = self.soft_targets_weight * soft_targets_loss + self.label_loss_weight * label_loss
# Log the losses
tracker.add({"loss.kl_div.": soft_targets_loss,
"loss.nll": label_loss,
"loss.": loss})
# Calculate and log accuracy
self.accuracy(output, target)
self.accuracy.track()
# Train the model
if self.mode.is_train:
# Calculate gradients
loss.backward()
# Take optimizer step
self.optimizer.step()
# Log the model parameters and gradients on last batch of every epoch
if batch_idx.is_last:
tracker.add('model', self.model)
# Clear the gradients
self.optimizer.zero_grad()
# Save the tracked metrics
tracker.save()
@option(Configs.large)
def _large_model(c: Configs):
"""
### Create large model
"""
return LargeModel().to(c.device)
@option(Configs.model)
def _small_student_model(c: Configs):
"""
### Create small model
"""
return SmallModel().to(c.device)
def get_saved_model(run_uuid: str, checkpoint: int):
"""
### Load [trained large model](large.html)
"""
from labml_nn.distillation.large import Configs as LargeConfigs
# In evaluation mode (no recording)
experiment.evaluate()
# Initialize configs of the large model training experiment
conf = LargeConfigs()
# Load saved configs
experiment.configs(conf, experiment.load_configs(run_uuid))
# Set models for saving/loading
experiment.add_pytorch_models({'model': conf.model})
# Set which run and checkpoint to load
experiment.load(run_uuid, checkpoint)
# Start the experiment - this will load the model, and prepare everything
experiment.start()
# Return the model
return conf.model
def main(run_uuid: str, checkpoint: int):
"""
Train a small model with distillation
"""
# Load saved model
large_model = get_saved_model(run_uuid, checkpoint)
# Create experiment
experiment.create(name='distillation', comment='cifar10')
# Create configurations
conf = Configs()
# Set the loaded large model
conf.large = large_model
# Load configurations
experiment.configs(conf, {
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
'model': '_small_student_model',
})
# Set model for saving/loading
experiment.add_pytorch_models({'model': conf.model})
# Start experiment from scratch
experiment.load(None, None)
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main('d46cd53edaec11eb93c38d6538aee7d6', 1_000_000)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/experiments/nlp_classification.py | labml_nn/experiments/nlp_classification.py | """
---
title: NLP classification trainer
summary: >
This is a reusable trainer for classification tasks
---
# NLP model trainer for classification
"""
from collections import Counter
from typing import Callable
import torchtext
import torchtext.vocab
from torchtext.vocab import Vocab
import torch
from labml import lab, tracker, monit
from labml.configs import option
from labml_nn.helpers.device import DeviceConfigs
from labml_nn.helpers.metrics import Accuracy
from labml_nn.helpers.trainer import TrainValidConfigs, BatchIndex
from labml_nn.optimizers.configs import OptimizerConfigs
from torch import nn
from torch.utils.data import DataLoader
class NLPClassificationConfigs(TrainValidConfigs):
"""
<a id="NLPClassificationConfigs"></a>
## Trainer configurations
This has the basic configurations for NLP classification task training.
All the properties are configurable.
"""
# Optimizer
optimizer: torch.optim.Adam
# Training device
device: torch.device = DeviceConfigs()
# Autoregressive model
model: nn.Module
# Batch size
batch_size: int = 16
# Length of the sequence, or context size
seq_len: int = 512
# Vocabulary
vocab: Vocab = 'ag_news'
# Number of token in vocabulary
n_tokens: int
# Number of classes
n_classes: int = 'ag_news'
# Tokenizer
tokenizer: Callable = 'character'
# Whether to periodically save models
is_save_models = True
# Loss function
loss_func = nn.CrossEntropyLoss()
# Accuracy function
accuracy = Accuracy()
# Model embedding size
d_model: int = 512
# Gradient clipping
grad_norm_clip: float = 1.0
# Training data loader
train_loader: DataLoader = 'ag_news'
# Validation data loader
valid_loader: DataLoader = 'ag_news'
# Whether to log model parameters and gradients (once per epoch).
# These are summarized stats per layer, but it could still lead
# to many indicators for very deep networks.
is_log_model_params_grads: bool = False
# Whether to log model activations (once per epoch).
# These are summarized stats per layer, but it could still lead
# to many indicators for very deep networks.
is_log_model_activations: bool = False
def init(self):
"""
### Initialization
"""
# Set tracker configurations
tracker.set_scalar("accuracy.*", True)
tracker.set_scalar("loss.*", True)
# Add accuracy as a state module.
# The name is probably confusing, since it's meant to store
# states between training and validation for RNNs.
# This will keep the accuracy metric stats separate for training and validation.
self.state_modules = [self.accuracy]
def step(self, batch: any, batch_idx: BatchIndex):
"""
### Training or validation step
"""
# Move data to the device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Update global step (number of tokens processed) when in training mode
if self.mode.is_train:
tracker.add_global_step(data.shape[1])
# Get model outputs.
# It's returning a tuple for states when using RNNs.
# This is not implemented yet. 😜
output, *_ = self.model(data)
# Calculate and log loss
loss = self.loss_func(output, target)
tracker.add("loss.", loss)
# Calculate and log accuracy
self.accuracy(output, target)
self.accuracy.track()
# Train the model
if self.mode.is_train:
# Calculate gradients
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip)
# Take optimizer step
self.optimizer.step()
# Log the model parameters and gradients on last batch of every epoch
if batch_idx.is_last and self.is_log_model_params_grads:
tracker.add('model', self.model)
# Clear the gradients
self.optimizer.zero_grad()
# Save the tracked metrics
tracker.save()
@option(NLPClassificationConfigs.optimizer)
def _optimizer(c: NLPClassificationConfigs):
"""
### Default [optimizer configurations](../optimizers/configs.html)
"""
optimizer = OptimizerConfigs()
optimizer.parameters = c.model.parameters()
optimizer.optimizer = 'Adam'
optimizer.d_model = c.d_model
return optimizer
@option(NLPClassificationConfigs.tokenizer)
def basic_english():
"""
### Basic english tokenizer
We use character level tokenizer in this experiment.
You can switch by setting,
```
'tokenizer': 'basic_english',
```
in the configurations dictionary when starting the experiment.
"""
from torchtext.data import get_tokenizer
return get_tokenizer('basic_english')
def character_tokenizer(x: str):
"""
### Character level tokenizer
"""
return list(x)
@option(NLPClassificationConfigs.tokenizer)
def character():
"""
Character level tokenizer configuration
"""
return character_tokenizer
@option(NLPClassificationConfigs.n_tokens)
def _n_tokens(c: NLPClassificationConfigs):
"""
Get number of tokens
"""
return len(c.vocab) + 2
class CollateFunc:
"""
## Function to load data into batches
"""
def __init__(self, tokenizer, vocab: Vocab, seq_len: int, padding_token: int, classifier_token: int):
"""
* `tokenizer` is the tokenizer function
* `vocab` is the vocabulary
* `seq_len` is the length of the sequence
* `padding_token` is the token used for padding when the `seq_len` is larger than the text length
* `classifier_token` is the `[CLS]` token which we set at end of the input
"""
self.classifier_token = classifier_token
self.padding_token = padding_token
self.seq_len = seq_len
self.vocab = vocab
self.tokenizer = tokenizer
def __call__(self, batch):
"""
* `batch` is the batch of data collected by the `DataLoader`
"""
# Input data tensor, initialized with `padding_token`
data = torch.full((self.seq_len, len(batch)), self.padding_token, dtype=torch.long)
# Empty labels tensor
labels = torch.zeros(len(batch), dtype=torch.long)
# Loop through the samples
for (i, (_label, _text)) in enumerate(batch):
# Set the label
labels[i] = int(_label) - 1
# Tokenize the input text
_text = [self.vocab[token] for token in self.tokenizer(_text)]
# Truncate upto `seq_len`
_text = _text[:self.seq_len]
# Transpose and add to data
data[:len(_text), i] = data.new_tensor(_text)
# Set the final token in the sequence to `[CLS]`
data[-1, :] = self.classifier_token
#
return data, labels
@option([NLPClassificationConfigs.n_classes,
NLPClassificationConfigs.vocab,
NLPClassificationConfigs.train_loader,
NLPClassificationConfigs.valid_loader])
def ag_news(c: NLPClassificationConfigs):
"""
### AG News dataset
This loads the AG News dataset and the set the values for
`n_classes`, `vocab`, `train_loader`, and `valid_loader`.
"""
# Get training and validation datasets
train, valid = torchtext.datasets.AG_NEWS(root=str(lab.get_data_path() / 'ag_news'), split=('train', 'test'))
# Load data to memory
with monit.section('Load data'):
from labml_nn.utils import MapStyleDataset
# Create [map-style datasets](../utils.html#map_style_dataset)
train, valid = MapStyleDataset(train), MapStyleDataset(valid)
# Get tokenizer
tokenizer = c.tokenizer
# Create a counter
counter = Counter()
# Collect tokens from training dataset
for (label, line) in train:
counter.update(tokenizer(line))
# Collect tokens from validation dataset
for (label, line) in valid:
counter.update(tokenizer(line))
# Create vocabulary
vocab = torchtext.vocab.vocab(counter, min_freq=1)
# Create training data loader
train_loader = DataLoader(train, batch_size=c.batch_size, shuffle=True,
collate_fn=CollateFunc(tokenizer, vocab, c.seq_len, len(vocab), len(vocab) + 1))
# Create validation data loader
valid_loader = DataLoader(valid, batch_size=c.batch_size, shuffle=True,
collate_fn=CollateFunc(tokenizer, vocab, c.seq_len, len(vocab), len(vocab) + 1))
# Return `n_classes`, `vocab`, `train_loader`, and `valid_loader`
return 4, vocab, train_loader, valid_loader
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/experiments/arithmetic_dataset.py | labml_nn/experiments/arithmetic_dataset.py | """
---
title: Arithmetic Dataset
summary: >
This creates arithmetic problems.
---
*This is based on code by [Georges Harik (@gharik)](https://twitter.com/gharik).*
"""
import random
import string
from typing import List
import torch
from labml.logger import Text
from torch.utils.data import DataLoader, Dataset
from labml import monit, logger, tracker
from labml.configs import option
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs, transpose_batch
class ArithmeticDataset(Dataset):
"""
## Arithmetic Dataset
This creates arithmetic addition problems and solutions with workings.
We've only implemented addition so far.
It's based on a character level tokenization.
"""
def __init__(self, seq_len: int, max_digits: int, n_sequences: int):
"""
:param seq_len: is the sequence length of generated math problems.
We fill as many problems as possible upto this length
:max_digits: is the maximum number of digits in the operand integers
:n_sequences: is the number of sequences per epoch
"""
self.n_sequences = n_sequences
self.max_digits = max_digits
self.seq_len = seq_len
# Token id to string
self.itos = list(string.digits + 'xe =\n?+;')
# Character to token id
self.stoi = {c: i for i, c in enumerate(self.itos)}
@staticmethod
def make_int(n_digits: int):
"""
Generates an integer with `n_digit` number of digits
"""
res = 0
for i in range(n_digits):
d = random.randrange(1, 11) if i == 0 else random.randrange(0, 11)
res = res * 10 + d
return res
@staticmethod
def get_add_explanation(x: int, y: int):
"""
Generates the workings for `x + y`.
For example for `11+29` it generates
`1e0+9e0+0e0=10e0 1e0+2e0+1e0=4e0`.
"""
carry = 0
e = 0
explanation = []
while x > 0 or y > 0 or carry > 0:
rx, ry = x % 10, y % 10
total = rx + ry + carry
explanation.append(f"{rx}e{e}+{ry}e{e}+{carry}e{e}=={total}e{e}")
x, y, carry = x // 10, y // 10, total // 10
e += 1
return ' '.join(explanation)
# Make a problem with a pre_explanation or not
def make_add_problem(self):
"""
Creates an arithmetic addition problem with workings and answer.
"""
x = self.make_int(n_digits=random.randrange(1, self.max_digits + 1))
y = self.make_int(n_digits=random.randrange(1, self.max_digits + 1))
explanation = self.get_add_explanation(x, y)
return f"x={x}+{y}; {explanation} x=={x + y}\n"
def get_qa(self):
"""
Get arithmetic problem and answer. This is used for evaluation.
"""
x = self.make_int(n_digits=random.randrange(1, self.max_digits + 1))
y = self.make_int(n_digits=random.randrange(1, self.max_digits + 1))
return f'x={x}+{y};', f'{x + y}'
def get_packed_math_input(self):
"""
Generate multiple problems and pack them into a sequence.
"""
s_enc = []
while len(s_enc) <= self.seq_len:
s_part = self.make_add_problem()
s_part_enc = self.encode('?' + s_part)
s_enc = s_enc + s_part_enc
return s_enc
def encode(self, s: str):
"""
Encode a given string
"""
return [self.stoi[c] for c in s]
def decode(self, arr: List[int]):
"""
Decode a list of token ids
"""
return ''.join([self.itos[c] for c in arr])
def __getitem__(self, idx: int):
"""
Get a input and target pair for auto-regressive modelling
"""
s = torch.tensor(self.get_packed_math_input())
return s[:self.seq_len], s[1:self.seq_len + 1]
def __len__(self):
"""
Number of sequences per epoch
"""
return self.n_sequences
class ArithmeticAutoregression(NLPAutoRegressionConfigs):
"""
## Arithmetic Task Experiment Configurations
"""
# Maximum number of digits per operand integer
max_digits: int = 4
# Number of training sequences per epoch
train_sequences_per_epoch: int = 2 ** 12
# Training data loader
train_loader: DataLoader = 'arithmetic_train_loader'
# Number of problems in evaluation
n_tests: int = 64
# No need of a validation dataset
validator = None
# Number of times to run evaluations per epoch
inner_iterations = 4
# Number of tokens in the vocabulary
n_tokens = len(ArithmeticDataset(1, 1, 1).itos)
@torch.no_grad()
def sample(self):
"""
### Evaluation
We use the sampling function to evaluate the model on a set of problems
"""
# Skip in the first epoch
if self.training_loop.idx < 1:
return
# Create a dataset to generate problems
dataset = ArithmeticDataset(self.seq_len, self.max_digits, 1)
# Get a set of problems and answers
qa = [dataset.get_qa() for _ in range(self.n_tests)]
# Collect the problems only
questions = [p[0] for p in qa]
# Create a tensor with only the initial token
data = torch.tensor([[dataset.stoi[p[0]] for p in questions]])
# Move to device
data = data.to(self.device)
# Number of sequences that have completed
finished = torch.zeros((len(questions),)).bool().to(self.device)
# Token id of the new line character - this marks end of the answer
new_line = dataset.stoi['\n']
# Sampled results
results = [p[0] for p in questions]
# Sample upto sequence length
for i in monit.iterate('Sample', self.seq_len - 1):
# If all the sequences have completed we skip this
if finished.sum() == len(finished):
continue
# Get the model output
output, *_ = self.model(data)
# Get the model prediction (greedy)
output = output[-1].argmax(dim=-1)
# Find which sequences have finished
finished = finished | (output == new_line)
# Skip if all have finished
if finished.sum() == len(finished):
continue
# Override with the question
for j, p in enumerate(questions):
if len(p) > i + 1:
output[j] = dataset.stoi[p[i + 1]]
# Add the next token to the input
data = torch.cat([data, output[None, :]], dim=0)
# Get the sampled results
for j, c in enumerate(output):
results[j] += dataset.itos[c]
# Discard everything after the answer in the results
results = [r.split('\n')[0] for r in results]
# Log a sample
res_sample = results[0].split(';')
logger.log([(res_sample[0], Text.key), (';', Text.subtle), (';'.join(res_sample[1:]), Text.none)])
# Get the answers
results = [r.split('x==')[-1] for r in results]
# Count the number of correct answers
correct = 0
for r, _qa in zip(results, qa):
if r == _qa[1]:
correct += 1
# Log the score
tracker.save('score', correct / len(results))
@option(ArithmeticAutoregression.train_loader)
def arithmetic_train_loader(c: ArithmeticAutoregression):
"""
Training data loader
"""
return DataLoader(ArithmeticDataset(c.seq_len, c.max_digits, c.train_sequences_per_epoch),
batch_size=c.batch_size,
collate_fn=transpose_batch,
num_workers=4)
def _test():
"""
Code to test generated problems
"""
dataset = ArithmeticDataset(256, 8, 10)
print(dataset.decode(dataset.get_packed_math_input()))
#
if __name__ == '__main__':
_test()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/experiments/mnist.py | labml_nn/experiments/mnist.py | """
---
title: MNIST Experiment
summary: >
This is a reusable trainer for MNIST dataset
---
# MNIST Experiment
"""
import torch.nn as nn
import torch.utils.data
from labml import tracker
from labml.configs import option
from labml_nn.helpers.datasets import MNISTConfigs as MNISTDatasetConfigs
from labml_nn.helpers.device import DeviceConfigs
from labml_nn.helpers.metrics import Accuracy
from labml_nn.helpers.trainer import TrainValidConfigs, BatchIndex
from labml_nn.optimizers.configs import OptimizerConfigs
class MNISTConfigs(MNISTDatasetConfigs, TrainValidConfigs):
"""
<a id="MNISTConfigs"></a>
## Trainer configurations
"""
# Optimizer
optimizer: torch.optim.Adam
# Training device
device: torch.device = DeviceConfigs()
# Classification model
model: nn.Module
# Number of epochs to train for
epochs: int = 10
# Number of times to switch between training and validation within an epoch
inner_iterations = 10
# Accuracy function
accuracy = Accuracy()
# Loss function
loss_func = nn.CrossEntropyLoss()
def init(self):
"""
### Initialization
"""
# Set tracker configurations
tracker.set_scalar("loss.*", True)
tracker.set_scalar("accuracy.*", True)
# Add accuracy as a state module.
# The name is probably confusing, since it's meant to store
# states between training and validation for RNNs.
# This will keep the accuracy metric stats separate for training and validation.
self.state_modules = [self.accuracy]
def step(self, batch: any, batch_idx: BatchIndex):
"""
### Training or validation step
"""
# Training/Evaluation mode
self.model.train(self.mode.is_train)
# Move data to the device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Update global step (number of samples processed) when in training mode
if self.mode.is_train:
tracker.add_global_step(len(data))
# Get model outputs.
output = self.model(data)
# Calculate and log loss
loss = self.loss_func(output, target)
tracker.add("loss.", loss)
# Calculate and log accuracy
self.accuracy(output, target)
self.accuracy.track()
# Train the model
if self.mode.is_train:
# Calculate gradients
loss.backward()
# Take optimizer step
self.optimizer.step()
# Log the model parameters and gradients on last batch of every epoch
if batch_idx.is_last:
tracker.add('model', self.model)
# Clear the gradients
self.optimizer.zero_grad()
# Save the tracked metrics
tracker.save()
@option(MNISTConfigs.optimizer)
def _optimizer(c: MNISTConfigs):
"""
### Default optimizer configurations
"""
opt_conf = OptimizerConfigs()
opt_conf.parameters = c.model.parameters()
opt_conf.optimizer = 'Adam'
return opt_conf
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/experiments/__init__.py | labml_nn/experiments/__init__.py | python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false | |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/experiments/cifar10.py | labml_nn/experiments/cifar10.py | """
---
title: CIFAR10 Experiment
summary: >
This is a reusable trainer for CIFAR10 dataset
---
# CIFAR10 Experiment
"""
from typing import List
import torch.nn as nn
from labml import lab
from labml.configs import option
from labml_nn.helpers.datasets import CIFAR10Configs as CIFAR10DatasetConfigs
from labml_nn.experiments.mnist import MNISTConfigs
class CIFAR10Configs(CIFAR10DatasetConfigs, MNISTConfigs):
"""
## Configurations
This extends from [CIFAR 10 dataset configurations](../helpers/datasets.html)
and [`MNISTConfigs`](mnist.html).
"""
# Use CIFAR10 dataset by default
dataset_name: str = 'CIFAR10'
@option(CIFAR10Configs.train_dataset)
def cifar10_train_augmented():
"""
### Augmented CIFAR 10 train dataset
"""
from torchvision.datasets import CIFAR10
from torchvision.transforms import transforms
return CIFAR10(str(lab.get_data_path()),
train=True,
download=True,
transform=transforms.Compose([
# Pad and crop
transforms.RandomCrop(32, padding=4),
# Random horizontal flip
transforms.RandomHorizontalFlip(),
#
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
@option(CIFAR10Configs.valid_dataset)
def cifar10_valid_no_augment():
"""
### Non-augmented CIFAR 10 validation dataset
"""
from torchvision.datasets import CIFAR10
from torchvision.transforms import transforms
return CIFAR10(str(lab.get_data_path()),
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
class CIFAR10VGGModel(nn.Module):
"""
### VGG model for CIFAR-10 classification
"""
def conv_block(self, in_channels, out_channels) -> nn.Module:
"""
Convolution and activation combined
"""
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
)
def __init__(self, blocks: List[List[int]]):
super().__init__()
# 5 $2 \times 2$ pooling layers will produce a output of size $1 \ times 1$.
# CIFAR 10 image size is $32 \times 32$
assert len(blocks) == 5
layers = []
# RGB channels
in_channels = 3
# Number of channels in each layer in each block
for block in blocks:
# Convolution, Normalization and Activation layers
for channels in block:
layers += self.conv_block(in_channels, channels)
in_channels = channels
# Max pooling at end of each block
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
# Create a sequential model with the layers
self.layers = nn.Sequential(*layers)
# Final logits layer
self.fc = nn.Linear(in_channels, 10)
def forward(self, x):
# The VGG layers
x = self.layers(x)
# Reshape for classification layer
x = x.view(x.shape[0], -1)
# Final linear layer
return self.fc(x)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/experiments/nlp_autoregression.py | labml_nn/experiments/nlp_autoregression.py | """
---
title: NLP auto-regression trainer
summary: >
This is a reusable trainer for auto-regressive tasks
---
# Auto-regressive NLP model trainer
"""
from typing import Callable
import torch
import torch.nn as nn
from labml import lab, monit, logger, tracker
from labml.configs import option
from labml.logger import Text
from labml_nn.helpers.datasets import TextDataset, SequentialDataLoader, SequentialUnBatchedDataset, TextFileDataset
from labml_nn.helpers.device import DeviceConfigs
from labml_nn.helpers.metrics import Accuracy
from labml_nn.helpers.trainer import TrainValidConfigs, BatchIndex
from labml_nn.optimizers.configs import OptimizerConfigs
from torch.utils.data import DataLoader, RandomSampler
class CrossEntropyLoss(nn.Module):
"""
### Cross entropy loss
"""
def __init__(self):
super().__init__()
self.loss = nn.CrossEntropyLoss()
def forward(self, outputs, targets):
return self.loss(outputs.view(-1, outputs.shape[-1]), targets.view(-1))
class NLPAutoRegressionConfigs(TrainValidConfigs):
"""
<a id="NLPAutoRegressionConfigs"></a>
## Trainer configurations
This has the basic configurations for NLP auto-regressive task training.
All the properties are configurable.
"""
# Optimizer
optimizer: torch.optim.Adam
# Training device
device: torch.device = DeviceConfigs()
# Autoregressive model
model: nn.Module
# Text dataset
text: TextDataset
# Batch size
batch_size: int = 16
# Length of the sequence, or context size
seq_len: int = 512
# Number of token in vocabulary
n_tokens: int
# Tokenizer
tokenizer: Callable = 'character'
# Text prompt to start sampling (for illustration)
prompt: str
# The token separator when sampling (blank for character level tokenization)
prompt_separator: str
# Whether to periodically save models
is_save_models = True
# Loss function
loss_func = CrossEntropyLoss()
# Accuracy function
accuracy = Accuracy()
# Model embedding size
d_model: int = 512
# Gradient clipping
grad_norm_clip: float = 1.0
# Training data loader
train_loader: DataLoader = 'shuffled_train_loader'
# Validation data loader
valid_loader: DataLoader = 'shuffled_valid_loader'
# Data loaders shuffle with replacement
dataloader_shuffle_with_replacement: bool = False
# Whether to log model parameters and gradients (once per epoch).
# These are summarized stats per layer, but it could still lead
# to many indicators for very deep networks.
is_log_model_params_grads: bool = False
# Whether to log model activations (once per epoch).
# These are summarized stats per layer, but it could still lead
# to many indicators for very deep networks.
is_log_model_activations: bool = False
def init(self):
"""
### Initialization
"""
# Set tracker configurations
tracker.set_scalar("accuracy.*", True)
tracker.set_scalar("loss.*", True)
tracker.set_text("sampled", False)
# Add accuracy as a state module.
# The name is probably confusing, since it's meant to store
# states between training and validation for RNNs.
# This will keep the accuracy metric stats separate for training and validation.
self.state_modules = [self.accuracy]
def other_metrics(self, output: torch.Tensor, target: torch.Tensor):
"""Override to calculate and log other metrics"""
pass
def step(self, batch: any, batch_idx: BatchIndex):
"""
### Training or validation step
"""
# Set training/eval mode
self.model.train(self.mode.is_train)
# Move data to the device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Update global step (number of tokens processed) when in training mode
if self.mode.is_train:
tracker.add_global_step(data.shape[0] * data.shape[1])
# Get model outputs.
# It's returning a tuple for states when using RNNs.
# This is not implemented yet. 😜
output, *_ = self.model(data)
# Calculate and log loss
loss = self.loss_func(output, target)
tracker.add("loss.", loss)
# Calculate and log accuracy
self.accuracy(output, target)
self.accuracy.track()
self.other_metrics(output, target)
# Train the model
if self.mode.is_train:
# Calculate gradients
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip)
# Take optimizer step
self.optimizer.step()
# Log the model parameters and gradients on last batch of every epoch
if batch_idx.is_last and self.is_log_model_params_grads:
tracker.add('model', self.model)
# Clear the gradients
self.optimizer.zero_grad()
# Save the tracked metrics
tracker.save()
def sample(self):
"""
### Sampling function to generate samples periodically while training
"""
# Starting prompt
prompt = self.prompt
# Collect output for printing
log = [(prompt, Text.subtle)]
# Sample 25 tokens
for i in monit.iterate('Sample', 25):
# Tokenize the prompt
data = self.text.text_to_i(prompt).unsqueeze(-1)
data = data.to(self.device)
# Get the model output
output, *_ = self.model(data)
# Get the model prediction (greedy)
output = output.argmax(dim=-1).squeeze()
# Add the prediction to prompt
prompt += self.prompt_separator + self.text.itos[output[-1]]
# Add the prediction for logging
log += [(self.prompt_separator + self.text.itos[output[-1]], Text.value)]
tracker.add({'sampled': prompt})
# Print the sampled output
logger.log(log)
@option(NLPAutoRegressionConfigs.optimizer)
def _optimizer(c: NLPAutoRegressionConfigs):
"""
### Default [optimizer configurations](../optimizers/configs.html)
"""
optimizer = OptimizerConfigs()
optimizer.parameters = c.model.parameters()
optimizer.optimizer = 'Adam'
optimizer.d_model = c.d_model
return optimizer
@option(NLPAutoRegressionConfigs.n_tokens)
def _n_tokens(c: NLPAutoRegressionConfigs):
"""
Get number of tokens
"""
return c.text.n_tokens
@option(NLPAutoRegressionConfigs.tokenizer)
def basic_english():
"""
### Basic english tokenizer
We use character level tokenizer in this experiment.
You can switch by setting,
```
'tokenizer': 'basic_english',
```
in the configurations dictionary when starting the experiment.
"""
from torchtext.data import get_tokenizer
return get_tokenizer('basic_english')
def character_tokenizer(x: str):
"""
### Character level tokenizer
"""
return list(x)
@option(NLPAutoRegressionConfigs.tokenizer)
def character():
"""
### Character level tokenizer configuration
"""
return character_tokenizer
@option(NLPAutoRegressionConfigs.text)
def tiny_shakespeare(c: NLPAutoRegressionConfigs):
"""
### Tiny Shakespeare dataset
It will download from the url if not present
"""
return TextFileDataset(
lab.get_data_path() / 'tiny_shakespeare.txt',
c.tokenizer,
url='https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt')
@option(NLPAutoRegressionConfigs.train_loader)
def sequential_train_loader(c: NLPAutoRegressionConfigs):
"""
### Sequential training data loader
"""
return SequentialDataLoader(text=c.text.train,
dataset=c.text,
batch_size=c.batch_size,
seq_len=c.seq_len)
@option(NLPAutoRegressionConfigs.valid_loader)
def sequential_valid_loader(c: NLPAutoRegressionConfigs):
"""
### Sequential validation data loader
"""
return SequentialDataLoader(text=c.text.valid,
dataset=c.text,
batch_size=c.batch_size,
seq_len=c.seq_len)
def transpose_batch(batch):
"""
### Transpose batch
`DataLoader` collects the batches on the first dimension.
We need to transpose it to be sequence first.
"""
transposed_data = list(zip(*batch))
# Stack the batch along the second dimension `dim=1`
src = torch.stack(transposed_data[0], dim=1)
tgt = torch.stack(transposed_data[1], dim=1)
return src, tgt
@option(NLPAutoRegressionConfigs.train_loader)
def shuffled_train_loader(c: NLPAutoRegressionConfigs):
"""
### Shuffled training data loader
"""
dataset = SequentialUnBatchedDataset(text=c.text.train,
dataset=c.text,
seq_len=c.seq_len)
sampler = RandomSampler(dataset, replacement=c.dataloader_shuffle_with_replacement)
return DataLoader(dataset,
batch_size=c.batch_size,
collate_fn=transpose_batch,
sampler=sampler)
@option(NLPAutoRegressionConfigs.valid_loader)
def shuffled_valid_loader(c: NLPAutoRegressionConfigs):
"""
### Shuffled validation data loader
"""
dataset = SequentialUnBatchedDataset(text=c.text.valid,
dataset=c.text,
seq_len=c.seq_len)
sampler = RandomSampler(dataset, replacement=c.dataloader_shuffle_with_replacement)
return DataLoader(dataset,
batch_size=c.batch_size,
collate_fn=transpose_batch,
sampler=sampler)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/unet/experiment.py | labml_nn/unet/experiment.py | """
---
title: Training a U-Net on Carvana dataset
summary: >
Code for training a U-Net model on Carvana dataset.
---
# Training [U-Net](index.html)
This trains a [U-Net](index.html) model on [Carvana dataset](carvana.html).
You can find the download instructions
[on Kaggle](https://www.kaggle.com/competitions/carvana-image-masking-challenge/data).
Save the training images inside `carvana/train` folder and the masks in `carvana/train_masks` folder.
For simplicity, we do not do a training and validation split.
"""
import numpy as np
import torchvision.transforms.functional
import torch
import torch.utils.data
from labml import lab, tracker, experiment, monit
from labml.configs import BaseConfigs
from labml_nn.helpers.device import DeviceConfigs
from labml_nn.unet import UNet
from labml_nn.unet.carvana import CarvanaDataset
from torch import nn
class Configs(BaseConfigs):
"""
## Configurations
"""
# Device to train the model on.
# [`DeviceConfigs`](../helpers/device.html)
# picks up an available CUDA device or defaults to CPU.
device: torch.device = DeviceConfigs()
# [U-Net](index.html) model
model: UNet
# Number of channels in the image. $3$ for RGB.
image_channels: int = 3
# Number of channels in the output mask. $1$ for binary mask.
mask_channels: int = 1
# Batch size
batch_size: int = 1
# Learning rate
learning_rate: float = 2.5e-4
# Number of training epochs
epochs: int = 4
# Dataset
dataset: CarvanaDataset
# Dataloader
data_loader: torch.utils.data.DataLoader
# Loss function
loss_func = nn.BCELoss()
# Sigmoid function for binary classification
sigmoid = nn.Sigmoid()
# Adam optimizer
optimizer: torch.optim.Adam
def init(self):
# Initialize the [Carvana dataset](carvana.html)
self.dataset = CarvanaDataset(lab.get_data_path() / 'carvana' / 'train',
lab.get_data_path() / 'carvana' / 'train_masks')
# Initialize the model
self.model = UNet(self.image_channels, self.mask_channels).to(self.device)
# Create dataloader
self.data_loader = torch.utils.data.DataLoader(self.dataset, self.batch_size,
shuffle=True, pin_memory=True)
# Create optimizer
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
# Image logging
tracker.set_image("sample", True)
@torch.no_grad()
def sample(self, idx=-1):
"""
### Sample images
"""
# Get a random sample
x, _ = self.dataset[np.random.randint(len(self.dataset))]
# Move data to device
x = x.to(self.device)
# Get predicted mask
mask = self.sigmoid(self.model(x[None, :]))
# Crop the image to the size of the mask
x = torchvision.transforms.functional.center_crop(x, [mask.shape[2], mask.shape[3]])
# Log samples
tracker.save('sample', x * mask)
def train(self):
"""
### Train for an epoch
"""
# Iterate through the dataset.
# Use [`mix`](https://docs.labml.ai/api/monit.html#labml.monit.mix)
# to sample $50$ times per epoch.
for _, (image, mask) in monit.mix(('Train', self.data_loader), (self.sample, list(range(50)))):
# Increment global step
tracker.add_global_step()
# Move data to device
image, mask = image.to(self.device), mask.to(self.device)
# Make the gradients zero
self.optimizer.zero_grad()
# Get predicted mask logits
logits = self.model(image)
# Crop the target mask to the size of the logits. Size of the logits will be smaller if we
# don't use padding in convolutional layers in the U-Net.
mask = torchvision.transforms.functional.center_crop(mask, [logits.shape[2], logits.shape[3]])
# Calculate loss
loss = self.loss_func(self.sigmoid(logits), mask)
# Compute gradients
loss.backward()
# Take an optimization step
self.optimizer.step()
# Track the loss
tracker.save('loss', loss)
def run(self):
"""
### Training loop
"""
for _ in monit.loop(self.epochs):
# Train the model
self.train()
# New line in the console
tracker.new_line()
# Save the model
def main():
# Create experiment
experiment.create(name='unet')
# Create configurations
configs = Configs()
# Set configurations. You can override the defaults by passing the values in the dictionary.
experiment.configs(configs, {})
# Initialize
configs.init()
# Set models for saving and loading
experiment.add_pytorch_models({'model': configs.model})
# Start and run the training loop
with experiment.start():
configs.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/unet/carvana.py | labml_nn/unet/carvana.py | """
---
title: Carvana dataset for the U-Net experiment
summary: >
Carvana dataset for the U-Net experiment.
---
# Carvana Dataset for the [U-Net](index.html) [experiment](experiment.html)
You can find the download instructions
[on Kaggle](https://www.kaggle.com/competitions/carvana-image-masking-challenge/data).
Save the training images inside `carvana/train` folder and the masks in `carvana/train_masks` folder.
"""
from pathlib import Path
import torchvision.transforms.functional
from PIL import Image
import torch.utils.data
from labml import lab
class CarvanaDataset(torch.utils.data.Dataset):
"""
## Carvana Dataset
"""
def __init__(self, image_path: Path, mask_path: Path):
"""
:param image_path: is the path to the images
:param mask_path: is the path to the masks
"""
# Get a dictionary of images by id
self.images = {p.stem: p for p in image_path.iterdir()}
# Get a dictionary of masks by id
self.masks = {p.stem[:-5]: p for p in mask_path.iterdir()}
# Image ids list
self.ids = list(self.images.keys())
# Transformations
self.transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize(572),
torchvision.transforms.ToTensor(),
])
def __getitem__(self, idx: int):
"""
#### Get an image and its mask.
:param idx: is index of the image
"""
# Get image id
id_ = self.ids[idx]
# Load image
image = Image.open(self.images[id_])
# Transform image and convert it to a PyTorch tensor
image = self.transforms(image)
# Load mask
mask = Image.open(self.masks[id_])
# Transform mask and convert it to a PyTorch tensor
mask = self.transforms(mask)
# The mask values were not $1$, so we scale it appropriately.
mask = mask / mask.max()
# Return the image and the mask
return image, mask
def __len__(self):
"""
#### Size of the dataset
"""
return len(self.ids)
# Testing code
if __name__ == '__main__':
ds = CarvanaDataset(lab.get_data_path() / 'carvana' / 'train', lab.get_data_path() / 'carvana' / 'train_masks')
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/unet/__init__.py | labml_nn/unet/__init__.py | """
---
title: U-Net
summary: >
PyTorch implementation and tutorial of U-Net model.
---
# U-Net
This is an implementation of the U-Net model from the paper,
[U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597).
U-Net consists of a contracting path and an expansive path.
The contracting path is a series of convolutional layers and pooling layers,
where the resolution of the feature map gets progressively reduced.
Expansive path is a series of up-sampling layers and convolutional layers
where the resolution of the feature map gets progressively increased.
At every step in the expansive path the corresponding feature map from the contracting path
concatenated with the current feature map.

Here is the [training code](experiment.html) for an experiment that trains a U-Net
on [Carvana dataset](carvana.html).
"""
import torch
import torchvision.transforms.functional
from torch import nn
class DoubleConvolution(nn.Module):
"""
### Two $3 \times 3$ Convolution Layers
Each step in the contraction path and expansive path have two $3 \times 3$
convolutional layers followed by ReLU activations.
In the U-Net paper they used $0$ padding,
but we use $1$ padding so that final feature map is not cropped.
"""
def __init__(self, in_channels: int, out_channels: int):
"""
:param in_channels: is the number of input channels
:param out_channels: is the number of output channels
"""
super().__init__()
# First $3 \times 3$ convolutional layer
self.first = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
self.act1 = nn.ReLU()
# Second $3 \times 3$ convolutional layer
self.second = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
self.act2 = nn.ReLU()
def forward(self, x: torch.Tensor):
# Apply the two convolution layers and activations
x = self.first(x)
x = self.act1(x)
x = self.second(x)
return self.act2(x)
class DownSample(nn.Module):
"""
### Down-sample
Each step in the contracting path down-samples the feature map with
a $2 \times 2$ max pooling layer.
"""
def __init__(self):
super().__init__()
# Max pooling layer
self.pool = nn.MaxPool2d(2)
def forward(self, x: torch.Tensor):
return self.pool(x)
class UpSample(nn.Module):
"""
### Up-sample
Each step in the expansive path up-samples the feature map with
a $2 \times 2$ up-convolution.
"""
def __init__(self, in_channels: int, out_channels: int):
super().__init__()
# Up-convolution
self.up = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2)
def forward(self, x: torch.Tensor):
return self.up(x)
class CropAndConcat(nn.Module):
"""
### Crop and Concatenate the feature map
At every step in the expansive path the corresponding feature map from the contracting path
concatenated with the current feature map.
"""
def forward(self, x: torch.Tensor, contracting_x: torch.Tensor):
"""
:param x: current feature map in the expansive path
:param contracting_x: corresponding feature map from the contracting path
"""
# Crop the feature map from the contracting path to the size of the current feature map
contracting_x = torchvision.transforms.functional.center_crop(contracting_x, [x.shape[2], x.shape[3]])
# Concatenate the feature maps
x = torch.cat([x, contracting_x], dim=1)
#
return x
class UNet(nn.Module):
"""
## U-Net
"""
def __init__(self, in_channels: int, out_channels: int):
"""
:param in_channels: number of channels in the input image
:param out_channels: number of channels in the result feature map
"""
super().__init__()
# Double convolution layers for the contracting path.
# The number of features gets doubled at each step starting from $64$.
self.down_conv = nn.ModuleList([DoubleConvolution(i, o) for i, o in
[(in_channels, 64), (64, 128), (128, 256), (256, 512)]])
# Down sampling layers for the contracting path
self.down_sample = nn.ModuleList([DownSample() for _ in range(4)])
# The two convolution layers at the lowest resolution (the bottom of the U).
self.middle_conv = DoubleConvolution(512, 1024)
# Up sampling layers for the expansive path.
# The number of features is halved with up-sampling.
self.up_sample = nn.ModuleList([UpSample(i, o) for i, o in
[(1024, 512), (512, 256), (256, 128), (128, 64)]])
# Double convolution layers for the expansive path.
# Their input is the concatenation of the current feature map and the feature map from the
# contracting path. Therefore, the number of input features is double the number of features
# from up-sampling.
self.up_conv = nn.ModuleList([DoubleConvolution(i, o) for i, o in
[(1024, 512), (512, 256), (256, 128), (128, 64)]])
# Crop and concatenate layers for the expansive path.
self.concat = nn.ModuleList([CropAndConcat() for _ in range(4)])
# Final $1 \times 1$ convolution layer to produce the output
self.final_conv = nn.Conv2d(64, out_channels, kernel_size=1)
def forward(self, x: torch.Tensor):
"""
:param x: input image
"""
# To collect the outputs of contracting path for later concatenation with the expansive path.
pass_through = []
# Contracting path
for i in range(len(self.down_conv)):
# Two $3 \times 3$ convolutional layers
x = self.down_conv[i](x)
# Collect the output
pass_through.append(x)
# Down-sample
x = self.down_sample[i](x)
# Two $3 \times 3$ convolutional layers at the bottom of the U-Net
x = self.middle_conv(x)
# Expansive path
for i in range(len(self.up_conv)):
# Up-sample
x = self.up_sample[i](x)
# Concatenate the output of the contracting path
x = self.concat[i](x, pass_through.pop())
# Two $3 \times 3$ convolutional layers
x = self.up_conv[i](x)
# Final $1 \times 1$ convolution layer
x = self.final_conv(x)
#
return x
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/adaptive_computation/__init__.py | labml_nn/adaptive_computation/__init__.py | """
---
title: Neural Networks with Adaptive Computation
summary: >
A set of PyTorch implementations/tutorials related to adaptive computation
---
# Neural Networks with Adaptive Computation
These are neural network architectures that change the computation complexity based on the
complexity of the input sample.
* 🚧 TODO: Adaptive Computation Time for Recurrent Neural Networks
* [PonderNet: Learning to Ponder](ponder_net/index.html)
"""
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/adaptive_computation/parity.py | labml_nn/adaptive_computation/parity.py | """
---
title: "Parity Task"
summary: >
This creates data for Parity Task from the paper Adaptive Computation Time
for Recurrent Neural Networks
---
# Parity Task
This creates data for Parity Task from the paper
[Adaptive Computation Time for Recurrent Neural Networks](https://arxiv.org/abs/1603.08983).
The input of the parity task is a vector with $0$'s $1$'s and $-1$'s.
The output is the parity of $1$'s - one if there is an odd number of $1$'s and zero otherwise.
The input is generated by making a random number of elements in the vector either $1$ or $-1$'s.
"""
from typing import Tuple
import torch
from torch.utils.data import Dataset
class ParityDataset(Dataset):
"""
### Parity dataset
"""
def __init__(self, n_samples: int, n_elems: int = 64):
"""
* `n_samples` is the number of samples
* `n_elems` is the number of elements in the input vector
"""
self.n_samples = n_samples
self.n_elems = n_elems
def __len__(self):
"""
Size of the dataset
"""
return self.n_samples
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Generate a sample
"""
# Empty vector
x = torch.zeros((self.n_elems,))
# Number of non-zero elements - a random number between $1$ and total number of elements
n_non_zero = torch.randint(1, self.n_elems + 1, (1,)).item()
# Fill non-zero elements with $1$'s and $-1$'s
x[:n_non_zero] = torch.randint(0, 2, (n_non_zero,)) * 2 - 1
# Randomly permute the elements
x = x[torch.randperm(self.n_elems)]
# The parity
y = (x == 1.).sum() % 2
#
return x, y
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/adaptive_computation/ponder_net/experiment.py | labml_nn/adaptive_computation/ponder_net/experiment.py | """
---
title: "PonderNet Parity Task Experiment"
summary: >
This trains is a PonderNet on Parity Task
---
# [PonderNet](index.html) [Parity Task](../parity.html) Experiment
This trains a [PonderNet](index.html) on [Parity Task](../parity.html).
"""
from typing import Any
import torch
from torch import nn
from torch.utils.data import DataLoader
from labml import tracker, experiment
from labml_nn.helpers.metrics import AccuracyDirect
from labml_nn.helpers.trainer import SimpleTrainValidConfigs, BatchIndex
from labml_nn.adaptive_computation.parity import ParityDataset
from labml_nn.adaptive_computation.ponder_net import ParityPonderGRU, ReconstructionLoss, RegularizationLoss
class Configs(SimpleTrainValidConfigs):
"""
Configurations with a
[simple training loop](../../helpers/trainer.html)
"""
# Number of epochs
epochs: int = 100
# Number of batches per epoch
n_batches: int = 500
# Batch size
batch_size: int = 128
# Model
model: ParityPonderGRU
# $L_{Rec}$
loss_rec: ReconstructionLoss
# $L_{Reg}$
loss_reg: RegularizationLoss
# The number of elements in the input vector.
# *We keep it low for demonstration; otherwise, training takes a lot of time.
# Although the parity task seems simple, figuring out the pattern by looking at samples
# is quite hard.*
n_elems: int = 8
# Number of units in the hidden layer (state)
n_hidden: int = 64
# Maximum number of steps $N$
max_steps: int = 20
# $\lambda_p$ for the geometric distribution $p_G(\lambda_p)$
lambda_p: float = 0.2
# Regularization loss $L_{Reg}$ coefficient $\beta$
beta: float = 0.01
# Gradient clipping by norm
grad_norm_clip: float = 1.0
# Training and validation loaders
train_loader: DataLoader
valid_loader: DataLoader
# Accuracy calculator
accuracy = AccuracyDirect()
def init(self):
# Print indicators to screen
tracker.set_scalar('loss.*', True)
tracker.set_scalar('loss_reg.*', True)
tracker.set_scalar('accuracy.*', True)
tracker.set_scalar('steps.*', True)
# We need to set the metrics to calculate them for the epoch for training and validation
self.state_modules = [self.accuracy]
# Initialize the model
self.model = ParityPonderGRU(self.n_elems, self.n_hidden, self.max_steps).to(self.device)
# $L_{Rec}$
self.loss_rec = ReconstructionLoss(nn.BCEWithLogitsLoss(reduction='none')).to(self.device)
# $L_{Reg}$
self.loss_reg = RegularizationLoss(self.lambda_p, self.max_steps).to(self.device)
# Training and validation loaders
self.train_loader = DataLoader(ParityDataset(self.batch_size * self.n_batches, self.n_elems),
batch_size=self.batch_size)
self.valid_loader = DataLoader(ParityDataset(self.batch_size * 32, self.n_elems),
batch_size=self.batch_size)
def step(self, batch: Any, batch_idx: BatchIndex):
"""
This method gets called by the trainer for each batch
"""
# Set the model mode
self.model.train(self.mode.is_train)
# Get the input and labels and move them to the model's device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Increment step in training mode
if self.mode.is_train:
tracker.add_global_step(len(data))
# Run the model
p, y_hat, p_sampled, y_hat_sampled = self.model(data)
# Calculate the reconstruction loss
loss_rec = self.loss_rec(p, y_hat, target.to(torch.float))
tracker.add("loss.", loss_rec)
# Calculate the regularization loss
loss_reg = self.loss_reg(p)
tracker.add("loss_reg.", loss_reg)
# $L = L_{Rec} + \beta L_{Reg}$
loss = loss_rec + self.beta * loss_reg
# Calculate the expected number of steps taken
steps = torch.arange(1, p.shape[0] + 1, device=p.device)
expected_steps = (p * steps[:, None]).sum(dim=0)
tracker.add("steps.", expected_steps)
# Call accuracy metric
self.accuracy(y_hat_sampled > 0, target)
if self.mode.is_train:
# Compute gradients
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip)
# Optimizer
self.optimizer.step()
# Clear gradients
self.optimizer.zero_grad()
#
tracker.save()
def main():
"""
Run the experiment
"""
experiment.create(name='ponder_net')
conf = Configs()
experiment.configs(conf, {
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 0.0003,
})
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/adaptive_computation/ponder_net/__init__.py | labml_nn/adaptive_computation/ponder_net/__init__.py | """
---
title: "PonderNet: Learning to Ponder"
summary: >
A PyTorch implementation/tutorial of PonderNet: Learning to Ponder.
---
# PonderNet: Learning to Ponder
This is a [PyTorch](https://pytorch.org) implementation of the paper
[PonderNet: Learning to Ponder](https://arxiv.org/abs/2107.05407).
PonderNet adapts the computation based on the input.
It changes the number of steps to take on a recurrent network based on the input.
PonderNet learns this with end-to-end gradient descent.
PonderNet has a step function of the form
$$\hat{y}_n, h_{n+1}, \lambda_n = s(x, h_n)$$
where $x$ is the input, $h_n$ is the state, $\hat{y}_n$ is the prediction at step $n$,
and $\lambda_n$ is the probability of halting (stopping) at current step.
$s$ can be any neural network (e.g. LSTM, MLP, GRU, Attention layer).
The unconditioned probability of halting at step $n$ is then,
$$p_n = \lambda_n \prod_{j=1}^{n-1} (1 - \lambda_j)$$
That is the probability of not being halted at any of the previous steps and halting at step $n$.
During inference, we halt by sampling based on the halting probability $\lambda_n$
and get the prediction at the halting layer $\hat{y}_n$ as the final output.
During training, we get the predictions from all the layers and calculate the losses for each of them.
And then take the weighted average of the losses based on the probabilities of getting halted at each layer
$p_n$.
The step function is applied to a maximum number of steps donated by $N$.
The overall loss of PonderNet is
\begin{align}
L &= L_{Rec} + \beta L_{Reg} \\
L_{Rec} &= \sum_{n=1}^N p_n \mathcal{L}(y, \hat{y}_n) \\
L_{Reg} &= \mathop{KL} \Big(p_n \Vert p_G(\lambda_p) \Big)
\end{align}
$\mathcal{L}$ is the normal loss function between target $y$ and prediction $\hat{y}_n$.
$\mathop{KL}$ is the [Kullback–Leibler divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence).
$p_G$ is the [Geometric distribution](https://en.wikipedia.org/wiki/Geometric_distribution) parameterized by
$\lambda_p$. *$\lambda_p$ has nothing to do with $\lambda_n$; we are just sticking to same notation as the paper*.
$$Pr_{p_G(\lambda_p)}(X = k) = (1 - \lambda_p)^k \lambda_p$$.
The regularization loss biases the network towards taking $\frac{1}{\lambda_p}$ steps and incentivizes
non-zero probabilities for all steps; i.e. promotes exploration.
Here is the [training code `experiment.py`](experiment.html) to train a PonderNet on [Parity Task](../parity.html).
"""
from typing import Tuple
import torch
from torch import nn
class ParityPonderGRU(nn.Module):
"""
## PonderNet with GRU for Parity Task
This is a simple model that uses a [GRU Cell](https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html)
as the step function.
This model is for the [Parity Task](../parity.html) where the input is a vector of `n_elems`.
Each element of the vector is either `0`, `1` or `-1` and the output is the parity
- a binary value that is true if the number of `1`s is odd and false otherwise.
The prediction of the model is the log probability of the parity being $1$.
"""
def __init__(self, n_elems: int, n_hidden: int, max_steps: int):
"""
* `n_elems` is the number of elements in the input vector
* `n_hidden` is the state vector size of the GRU
* `max_steps` is the maximum number of steps $N$
"""
super().__init__()
self.max_steps = max_steps
self.n_hidden = n_hidden
# GRU
# $$h_{n+1} = s_h(x, h_n)$$
self.gru = nn.GRUCell(n_elems, n_hidden)
# $$\hat{y}_n = s_y(h_n)$$
# We could use a layer that takes the concatenation of $h$ and $x$ as input
# but we went with this for simplicity.
self.output_layer = nn.Linear(n_hidden, 1)
# $$\lambda_n = s_\lambda(h_n)$$
self.lambda_layer = nn.Linear(n_hidden, 1)
self.lambda_prob = nn.Sigmoid()
# An option to set during inference so that computation is actually halted at inference time
self.is_halt = False
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
* `x` is the input of shape `[batch_size, n_elems]`
This outputs a tuple of four tensors:
1. $p_1 \dots p_N$ in a tensor of shape `[N, batch_size]`
2. $\hat{y}_1 \dots \hat{y}_N$ in a tensor of shape `[N, batch_size]` - the log probabilities of the parity being $1$
3. $p_m$ of shape `[batch_size]`
4. $\hat{y}_m$ of shape `[batch_size]` where the computation was halted at step $m$
"""
#
batch_size = x.shape[0]
# We get initial state $h_1 = s_h(x)$
h = x.new_zeros((x.shape[0], self.n_hidden))
h = self.gru(x, h)
# Lists to store $p_1 \dots p_N$ and $\hat{y}_1 \dots \hat{y}_N$
p = []
y = []
# $\prod_{j=1}^{n-1} (1 - \lambda_j)$
un_halted_prob = h.new_ones((batch_size,))
# A vector to maintain which samples has halted computation
halted = h.new_zeros((batch_size,))
# $p_m$ and $\hat{y}_m$ where the computation was halted at step $m$
p_m = h.new_zeros((batch_size,))
y_m = h.new_zeros((batch_size,))
# Iterate for $N$ steps
for n in range(1, self.max_steps + 1):
# The halting probability $\lambda_N = 1$ for the last step
if n == self.max_steps:
lambda_n = h.new_ones(h.shape[0])
# $\lambda_n = s_\lambda(h_n)$
else:
lambda_n = self.lambda_prob(self.lambda_layer(h))[:, 0]
# $\hat{y}_n = s_y(h_n)$
y_n = self.output_layer(h)[:, 0]
# $$p_n = \lambda_n \prod_{j=1}^{n-1} (1 - \lambda_j)$$
p_n = un_halted_prob * lambda_n
# Update $\prod_{j=1}^{n-1} (1 - \lambda_j)$
un_halted_prob = un_halted_prob * (1 - lambda_n)
# Halt based on halting probability $\lambda_n$
halt = torch.bernoulli(lambda_n) * (1 - halted)
# Collect $p_n$ and $\hat{y}_n$
p.append(p_n)
y.append(y_n)
# Update $p_m$ and $\hat{y}_m$ based on what was halted at current step $n$
p_m = p_m * (1 - halt) + p_n * halt
y_m = y_m * (1 - halt) + y_n * halt
# Update halted samples
halted = halted + halt
# Get next state $h_{n+1} = s_h(x, h_n)$
h = self.gru(x, h)
# Stop the computation if all samples have halted
if self.is_halt and halted.sum() == batch_size:
break
#
return torch.stack(p), torch.stack(y), p_m, y_m
class ReconstructionLoss(nn.Module):
"""
## Reconstruction loss
$$L_{Rec} = \sum_{n=1}^N p_n \mathcal{L}(y, \hat{y}_n)$$
$\mathcal{L}$ is the normal loss function between target $y$ and prediction $\hat{y}_n$.
"""
def __init__(self, loss_func: nn.Module):
"""
* `loss_func` is the loss function $\mathcal{L}$
"""
super().__init__()
self.loss_func = loss_func
def forward(self, p: torch.Tensor, y_hat: torch.Tensor, y: torch.Tensor):
"""
* `p` is $p_1 \dots p_N$ in a tensor of shape `[N, batch_size]`
* `y_hat` is $\hat{y}_1 \dots \hat{y}_N$ in a tensor of shape `[N, batch_size, ...]`
* `y` is the target of shape `[batch_size, ...]`
"""
# The total $\sum_{n=1}^N p_n \mathcal{L}(y, \hat{y}_n)$
total_loss = p.new_tensor(0.)
# Iterate upto $N$
for n in range(p.shape[0]):
# $p_n \mathcal{L}(y, \hat{y}_n)$ for each sample and the mean of them
loss = (p[n] * self.loss_func(y_hat[n], y)).mean()
# Add to total loss
total_loss = total_loss + loss
#
return total_loss
class RegularizationLoss(nn.Module):
"""
## Regularization loss
$$L_{Reg} = \mathop{KL} \Big(p_n \Vert p_G(\lambda_p) \Big)$$
$\mathop{KL}$ is the [Kullback–Leibler divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence).
$p_G$ is the [Geometric distribution](https://en.wikipedia.org/wiki/Geometric_distribution) parameterized by
$\lambda_p$. *$\lambda_p$ has nothing to do with $\lambda_n$; we are just sticking to same notation as the paper*.
$$Pr_{p_G(\lambda_p)}(X = k) = (1 - \lambda_p)^k \lambda_p$$.
The regularization loss biases the network towards taking $\frac{1}{\lambda_p}$ steps and incentivies non-zero probabilities
for all steps; i.e. promotes exploration.
"""
def __init__(self, lambda_p: float, max_steps: int = 1_000):
"""
* `lambda_p` is $\lambda_p$ - the success probability of geometric distribution
* `max_steps` is the highest $N$; we use this to pre-compute $p_G(\lambda_p)$
"""
super().__init__()
# Empty vector to calculate $p_G(\lambda_p)$
p_g = torch.zeros((max_steps,))
# $(1 - \lambda_p)^k$
not_halted = 1.
# Iterate upto `max_steps`
for k in range(max_steps):
# $$Pr_{p_G(\lambda_p)}(X = k) = (1 - \lambda_p)^k \lambda_p$$
p_g[k] = not_halted * lambda_p
# Update $(1 - \lambda_p)^k$
not_halted = not_halted * (1 - lambda_p)
# Save $Pr_{p_G(\lambda_p)}$
self.p_g = nn.Parameter(p_g, requires_grad=False)
# KL-divergence loss
self.kl_div = nn.KLDivLoss(reduction='batchmean')
def forward(self, p: torch.Tensor):
"""
* `p` is $p_1 \dots p_N$ in a tensor of shape `[N, batch_size]`
"""
# Transpose `p` to `[batch_size, N]`
p = p.transpose(0, 1)
# Get $Pr_{p_G(\lambda_p)}$ upto $N$ and expand it across the batch dimension
p_g = self.p_g[None, :p.shape[1]].expand_as(p)
# Calculate the KL-divergence.
# *The [PyTorch KL-divergence](https://pytorch.org/docs/stable/generated/torch.nn.KLDivLoss.html)
# implementation accepts log probabilities.*
return self.kl_div(p.log(), p_g)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/optimizers/adam_warmup_cosine_decay.py | labml_nn/optimizers/adam_warmup_cosine_decay.py | """
---
title: Adam optimizer with warm-up and cosine decay
summary: A PyTorch implementation/tutorial of Adam optimizer with warm-up and cosine decay for GPT.
---
# Adam Optimizer with Warmup and Cosine Decay
This extends [AMSGrad optimizer](adam.html) and adds a warmup stage.
"""
import math
from typing import Dict
from labml_nn.optimizers import WeightDecay
from labml_nn.optimizers.amsgrad import AMSGrad
class AdamWarmupCosineDecay(AMSGrad):
"""
<a id="EmbeddingsWithPositionalEncoding"></a>
## Adam Optimizer with Warmup and Cosine Decay
This class extends from AMSGrad optimizer defined in [`amsgrad.py`](amsgrad.html).
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16,
weight_decay: WeightDecay = WeightDecay(),
optimized_update: bool = True,
amsgrad=False, warmup=0, total_steps=1e10, defaults=None):
"""
### Initialize the optimizer
* `params` is the list of parameters
* `lr` is the learning rate $\alpha$
* `betas` is a tuple of ($\beta_1$, $\beta_2$)
* `eps` is $\hat{\epsilon}$ or $\epsilon$ based on `optimized_update`
* `weight_decay` is an instance of class `WeightDecay` defined in [`__init__.py`](index.html)
* 'optimized_update' is a flag whether to optimize the bias correction of the second moment
by doing it after adding $\epsilon$
* `amsgrad` is a flag indicating whether to use AMSGrad or fallback to plain Adam
* `warmup` number of warmup steps
* `total_steps` total number of steps. Cosine decay reaches 0 at this,
but stays at 10% of `lr` because we take $\alpha * \max(0.1, decay)$
* `defaults` is a dictionary of default for group values.
This is useful when you want to extend the class `AdamWarmup`.
"""
defaults = {} if defaults is None else defaults
defaults.update(dict(warmup=warmup, total_steps=total_steps))
super().__init__(params, lr, betas, eps, weight_decay, optimized_update, amsgrad, defaults)
def get_lr(self, state: Dict[str, any], group: Dict[str, any]):
"""
### Get learning-rate
$$\alpha \min \bigg(1, \frac{t}{w}\bigg)$$
where $w$ is the number of warmup steps.
"""
# If we are in warmup stage
if group['warmup'] > state['step']:
# A linearly increasing learning rate from $0$ to $\alpha$
return 1e-8 + state['step'] * group['lr'] / group['warmup']
else:
# Constant learning rate $\alpha$
progress = (state['step'] - group['warmup']) / max(1, group['total_steps'] - group['warmup'])
return group['lr'] * max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))
def _test_lr():
"""
### Plot learning rate for different warmups and model sizes

"""
import matplotlib.pyplot as plt
import numpy as np
from torch import nn
model = nn.Linear(10, 10)
opt = AdamWarmupCosineDecay(model.parameters(), warmup=5000, lr=1e-4, total_steps=4e6)
steps = 20_000
plt.plot(np.arange(1, steps), [opt.get_lr({'step': i}, opt.defaults) for i in range(1, steps)])
plt.legend(["5000:4e6", "5000:2e6", "5000:1e6"])
plt.title("Learning Rate")
plt.show()
steps = int(6e6)
step_size = 1000
plt.plot(np.arange(1, steps, step_size), [opt.get_lr({'step': i}, opt.defaults) for i in range(1, steps, step_size)])
plt.legend(["5000:4e6", "5000:2e6", "5000:1e6"])
plt.title("Learning Rate")
plt.show()
if __name__ == '__main__':
_test_lr()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/optimizers/ada_belief.py | labml_nn/optimizers/ada_belief.py | """
---
title: AdaBelief optimizer
summary: A simple PyTorch implementation/tutorial of AdaBelief optimizer.
---
# AdaBelief Optimizer
This is based from AdaBelief
[official implementation](https://github.com/juntang-zhuang/Adabelief-Optimizer)
of the paper
[AdaBelief Optimizer: Adapting Stepsizes by the Belief in Observed Gradients](https://arxiv.org/abs/2010.07468).
This is implemented in [PyTorch](https://pytorch.org) as an extension to [RAdam](radam.html).
The main difference between Adam optimizer and AdaBelief is that,
how it calculates the adaptive learning rate;
instead of dividing by the exponential moving average of square of the gradients,
AdaBelief divides by the exponential mean of variance.
\begin{align}
m_t &\leftarrow \beta_1 m_{t-1} + (1 - \beta_1) \cdot g_t \\
\textcolor{cyan}{s_t} &\textcolor{cyan}{\leftarrow} \textcolor{cyan}{\beta_2 s_{t-1} + (1 - \beta_2) \cdot (g_t - m_t)^2} \\
\hat{m}_t &\leftarrow \frac{m_t}{1-\beta_1^t} \\
\textcolor{cyan}{\hat{s}_t} &\textcolor{cyan}{\leftarrow} \frac{\textcolor{cyan}{s_t} + \textcolor{red}{\epsilon}}{\textcolor{cyan}{1-\beta_2^t}} \\
\theta_t &\leftarrow \theta_{t-1} - \alpha \cdot \frac{\hat{m}_t}{\sqrt{\textcolor{cyan}{\hat{s}_t}} + \epsilon}
\end{align}
🤔 The paper calculates variance as $(g_t - m_t)^2$,
but I feel it should use the bias corrected momentum
$(g_t - \textcolor{orange}{\hat{m}_t})^2$.
I guess this doesn't affect things much because
bias correction is $\approx 1$ after the initial training steps.
"""
from typing import Dict, Any
import torch
from torch import nn
from labml_nn.optimizers import WeightDecay
from labml_nn.optimizers.radam import RAdam
class AdaBelief(RAdam):
"""
## AdaBelief Optimizer
This class extends from RAdam optimizer defined in [`radam.py`](radam.html).
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16,
weight_decay: WeightDecay = WeightDecay(), amsgrad=False,
degenerate_to_sgd=True,
rectify=True, defaults=None):
"""
### Initialize the optimizer
* `params` is the list of parameters
* `lr` is the learning rate $\alpha$
* `betas` is a tuple of ($\beta_1$, $\beta_2$)
* `eps` is $\hat{\epsilon}$ or $\epsilon$ based on `optimized_update`
* `weight_decay` is an instance of class `WeightDecay` defined in [`__init__.py`](index.html)
* `optimized_update` is a flag whether to optimize the bias correction of the second moment
by doing it after adding $\epsilon$
* `amsgrad` is a flag indicating whether to use AMSGrad or fallback to plain Adam
* `degenerate_to_sgd` whether to use sgd when the rectification term $r_t$ is intractable
* `rectify` is whether to use RAdam update
* `defaults` is a dictionary of default for group values.
This is useful when you want to extend the class `AdaBelief`.
"""
defaults = {} if defaults is None else defaults
super().__init__(params, lr, betas, eps, weight_decay, amsgrad, degenerate_to_sgd, defaults)
self.rectify = rectify
def init_state(self, state: Dict[str, any], group: Dict[str, any], param: nn.Parameter):
"""
### Initialize a parameter state
* `state` is the optimizer state of the parameter (tensor)
* `group` stores optimizer attributes of the parameter group
* `param` is the parameter tensor $\theta_{t-1}$
"""
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
# Exponential moving average of variance
state['exp_avg_var'] = torch.zeros_like(param, memory_format=torch.preserve_format)
# If `amsgrad` flag is `True` for this parameter group, we maintain the maximum of
# exponential moving average of variance
if group['amsgrad']:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_var'] = torch.zeros_like(param, memory_format=torch.preserve_format)
def get_ms(self, state: Dict[str, Any], group: Dict[str, Any], grad: torch.Tensor):
"""
### Calculate $m_t$ and $s_t$ or $\max(s_1, s_2, ..., s_{t-1}, s_t)$
* `state` is the optimizer state of the parameter (tensor)
* `group` stores optimizer attributes of the parameter group
* `grad` is the current gradient tensor $g_t$ for the parameter $\theta_{t-1}$
"""
# Get $\beta_1$ and $\beta_2$
beta1, beta2 = group['betas']
# Get $m_{t-1}$ and $s_{t-1}$
m, s = state['exp_avg'], state['exp_avg_var']
# In-place calculation of $m_t$
# $$m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) \cdot g_t$$
m.mul_(beta1).add_(grad, alpha=1 - beta1)
# Difference between gradient and momentum
grad_residual = grad - m
# In-place calculation of $s_t$
# $$s_t \leftarrow \beta_2 s_{t-1} + (1 - \beta_2) \cdot (g_t - m_t)^2$$
s.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1 - beta2)
# If this parameter group is using `amsgrad`
if group['amsgrad']:
# Get $\max(s_1, s_2, ..., s_{t-1})$.
s_max = state['max_exp_avg_var']
# Calculate $\max(s_1, s_2, ..., s_{t-1}, s_t)$.
torch.maximum(s_max, s, out=s_max)
return m, s_max
else:
# $m_t$ and $s_t$ otherwise
return m, s
def step_param(self, state: Dict[str, any], group: Dict[str, any], grad: torch.Tensor, param: torch.nn.Parameter):
"""
### Take an update step for a given parameter tensor
* `state` is the optimizer state of the parameter (tensor)
* `group` stores optimizer attributes of the parameter group
* `grad` is the current gradient tensor $g_t$ for the parameter $\theta_{t-1}$
* `param` is the parameter tensor $\theta_{t-1}$
"""
# Calculate weight decay
grad = self.weight_decay(param, grad, group)
# Get $m_t$ and $v_t$
m, s = self.get_ms(state, group, grad)
# Increment $t$ the number of optimizer steps
state['step'] += 1
if not self.rectify:
# Perform *Adam* update, defined in [`adam.py`](adam.html), with
# $\textcolor{cyan}{s_t} + \textcolor{red}{\epsilon}$ in place of $v_t$.
self.adam_update(state, group, param, m, s + group['eps'])
else:
# Perform *Rectified Adam* update defined in [`radam.py`](radam.html), with
# $\textcolor{cyan}{s_t} + \textcolor{red}{\epsilon}$ in place of $v_t$.
self.r_adam_update(state, group, param, m, s + group['eps'])
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/optimizers/radam.py | labml_nn/optimizers/radam.py | """
---
title: Rectified Adam (RAdam) optimizer
summary: A simple PyTorch implementation/tutorial of RAdam optimizer.
---
# Rectified Adam (RAdam) optimizer
This implementation is based on
[the official implementation](https://github.com/LiyuanLucasLiu/RAdam)
of the paper
[On the Variance of the Adaptive Learning Rate and Beyond](https://arxiv.org/abs/1908.03265).
We have implemented it in [PyTorch](https://pytorch.org)
as an extension to [our AMSGrad implementation](amsgrad.html)
thus requiring only the modifications to be implemented.
Adam optimizer sometimes converges to a bad local optima during the initial stages of the training;
especially when training transformers.
Researches use warmups to counter this; for the the initial training steps (warm-up stage)
they use a low learning rate.
This paper identifies the problem to be the high variance of adaptive learning rate
during initial stages of training, and counters it using a new rectification term to
reduce variance.
The paper also evaluates two variance reduction mechanisms:
* **Adam-2k**: Only compute the adaptive learning rate ($v_t$ in [Adam](adam.html)) during the first 2k steps,
without changing parameters or calculating momentum ($m_t$).
* **Adam-eps**: Adam with large $\epsilon \approx 10^{-4}$.
## Rectified Adam
Let $\sigma(g_1, ..., g_t)$ and $\psi(g_1, ..., g_t)$ be the functions to calculate
momentum and adaptive learning rate.
For Adam, they are
\begin{align}
\sigma(g_1, ..., g_t) &= \frac{(1 - \beta_1)\sum_{i=1}^t \beta_1^{t-i} g_i}{1 - \beta_1^t} \\
\psi(g_1, ..., g_t) &= \sqrt \frac{1 - \beta_2^t}{(1 - \beta_2)\sum_{i=1}^t \beta_2^{t-i} g_i^2}
\end{align}
### Exponential moving average as simple moving average
The distribution of exponential moving average can be approximated as a simple moving average.
\begin{align}
p\Bigg(\frac{(1-\beta_2) \sum_{i=1}^t \beta_2^{t-i} g_i^2}{1 - \beta_2^t} \Bigg) \approx
p\Bigg(\frac{\sum_{i=1}^{f(t,\beta_2)} g_{t+1-i}^2}{f(t,\beta_2)} \Bigg)
\end{align}
Here we are taking the simple moving average of the last $f(t,\beta_2)$ gradients.
$f(t,\beta_2)$ satisfies the following,
\begin{align}
\frac{(1-\beta_2) \sum_{i=1}^t \beta_2^{t-i} \cdot i}{1 - \beta_2^t} =
\frac{\sum_{i=1}^{f(t,\beta_2)} (t+1-i)}{f(t,\beta_2)}
\end{align}
which gives,
$$f(t,\beta_2) = \frac{2}{1-\beta_2} - 1 - \frac{2 t \beta_2^t}{1 - \beta_2^t}$$
### Scaled inverse chi-squared
From above we have
$$
p\Big( \psi^2(g_1, ..., g_t) \Big) \approx
p\Bigg(\frac{\sum_{i=1}^{f(t,\beta_2)} g_{t+1-i}^2}{f(t,\beta_2)} \Bigg)
$$
where $g_i \sim \mathcal{N}(0, \sigma^2)$.
Note that $sigma$ here is the standard deviation and different from $\sigma(.)$ for momentum.
[Scaled inverse chi-squared](https://en.wikipedia.org/wiki/Scaled_inverse_chi-squared_distribution)
is the distribution of squared inverse of mean of $p$ normal distributions.
$$
p\Bigg(\frac{\sum_{i=1}^{f(t,\beta_2)} g_{t+1-i}^2}{f(t,\beta_2)} \Bigg)
\sim
\text{Scale-inv} \mathcal{X}^2(\rho,\frac{1}{\sigma^2})
$$
where $\rho = f(t,\beta_2)$.
### Rectification
They prove that variance of $\psi(.)$ decreases with $\rho$ when
$\psi^2(.) \sim \text{Scale-inv} \mathcal{X}^2(\rho,\frac{1}{\sigma^2})$.
Therefore the variance is minimized at maximal $\rho$ which is
$\rho_{\infty} = \frac{2}{1-\beta_2} - 1$. Let the minimum variance be $C_{\text{var}}$
In order to ensure that the adaptive learning
rate $\psi(.)$ has consistent variance, we rectify the variance with $r$
\begin{align}
r = \sqrt{\frac{C_{\text{var}}}{Var\big[\psi(.)\big]}}
\end{align}
### Approximating $Var[\psi(.)]$
They estimate $Var[\psi(.)] \approx \frac{Var[\psi^2(.)]}{4 \mathbb{E}[\psi^2(.)}$
based on first order expansion of $\sqrt{\psi^2(.)}$
🤪 I didn't get how it was derived.
From $\text{Scale-inv} \mathcal{X}^2$ distribution we have,
\begin{align}
\mathbb{E}\big[\psi^2(.)\big] &= \frac{\rho / \sigma^2}{\rho-2} \\
Var\big[\psi^2(.)\big] &= \frac{2 \rho / \sigma^4}{(\rho-2)^2 (\rho - 2)}
\end{align}
which gives,
$$
Var[\psi(.)] \approx \frac{\rho}{2(\rho-2)(\rho-4)\sigma^2}
$$
### Rectification term
We have
\begin{align}
r &= \sqrt{\frac{C_{\text{var}}}{Var\big[\psi(.)\big]}} \\
Var[\psi(.)] &\approx \frac{\rho}{2(\rho-2)(\rho-4)\sigma^2}
\end{align}
where $C_{\text{var}}$ is $Var\big[\psi(.)\big]$ for $\rho_\infty$.
Lt $\rho$ and step $t$ be $\rho_t$, and $r_t$ be the rectification term
at step $t$.
\begin{align}
C_{\text{var}} &\approx \frac{\rho_\infty}{2(\rho_\infty-2)(\rho_\infty-4)\sigma^2} \\
Var[\psi(g_1,...,g_t)] &\approx \frac{\rho_t}{2(\rho_t-2)(\rho_t-4)\sigma^2}
\end{align}
This gives,
\begin{align}
r_t &= \sqrt{\frac{(\rho_t-2)(\rho_t-4)\rho_\infty}{(\rho_\infty-2)(\rho_\infty-4)\rho_t}}
\end{align}
"""
import math
from typing import Dict, Optional
import torch
from labml_nn.optimizers import WeightDecay
from labml_nn.optimizers.amsgrad import AMSGrad
class RAdam(AMSGrad):
"""
## Rectified Adam Optimizer
This class extends from AMSAdam optimizer defined in [`amsadam.py`](amsadam.html).
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay: WeightDecay = WeightDecay(),
optimized_update: bool = True,
amsgrad=False,
degenerated_to_sgd=True, defaults=None):
"""
### Initialize the optimizer
* `params` is the list of parameters
* `lr` is the learning rate $\alpha$
* `betas` is a tuple of ($\beta_1$, $\beta_2$)
* `eps` is $\hat{\epsilon}$ or $\epsilon$ based on `optimized_update`
* `weight_decay` is an instance of class `WeightDecay` defined in [`__init__.py`](index.html)
* `optimized_update` is a flag whether to optimize the bias correction of the second moment
by doing it after adding $\epsilon$
* `amsgrad` is a flag indicating whether to use AMSGrad or fallback to plain Adam
* `degenerate_to_sgd` whether to use sgd when the rectification term $r_t$ is intractable.
* `defaults` is a dictionary of default for group values.
This is useful when you want to extend the class `RAdam`.
"""
self.degenerated_to_sgd = degenerated_to_sgd
super().__init__(params, lr, betas, eps, weight_decay, optimized_update, amsgrad, defaults)
def step_param(self, state: Dict[str, any], group: Dict[str, any], grad: torch.Tensor, param: torch.nn.Parameter):
"""
### Take an update step for a given parameter tensor
* `state` is the optimizer state of the parameter (tensor)
* `group` stores optimizer attributes of the parameter group
* `grad` is the current gradient tensor $g_t$ for the parameter $\theta_{t-1}$
* `param` is the parameter tensor $\theta_{t-1}$
"""
# Calculate weight decay
grad = self.weight_decay(param, grad, group)
# Get $m_t$ and $v_t$; i.e. $\sigma(.)$ and $\psi(.)$ without bias correction
m, v = self.get_mv(state, group, grad)
# Calculate $t$ the number of optimizer steps
state['step'] += 1
# Perform *RAdam* update
self.r_adam_update(state, group, param, m, v)
@staticmethod
def calc_rectification_term(beta2: float, step: int) -> Optional[float]:
"""
### Calculate rectification term $r_t$
"""
# $\beta_2^t$
beta2_t = beta2 ** step
# $$\rho_\infty = \frac{2}{1 - \beta_2} - 1$$
rho_inf = 2 / (1 - beta2) - 1
# $$\rho_t = \frac{2}{1-\beta_2} - 1 - \frac{2 t \beta_2^t}{1-\beta_2^t}$$
rho = rho_inf - 2 * step * beta2_t / (1 - beta2_t)
# $r_t$ is tractable when $\rho_t >= 4$.
# We are being a little more conservative since it's an approximated value
if rho >= 5:
# $$r_t = \sqrt{\frac{(\rho_t-2)(\rho_t-4)\rho_\infty}{(\rho_\infty-2)(\rho_\infty-4)\rho_t}}$$
r2 = (rho - 4) / (rho_inf - 4) * (rho - 2) / rho * rho_inf / (rho_inf - 2)
return math.sqrt(r2)
else:
return None
def r_adam_update(self, state: Dict[str, any], group: Dict[str, any], param: torch.nn.Parameter,
m: torch.Tensor, v: torch.Tensor):
"""
### Do the *RAdam* parameter update
* `state` is the optimizer state of the parameter (tensor)
* `group` stores optimizer attributes of the parameter group
* `param` is the parameter tensor $\theta_{t-1}$
* `m` and `v` are the uncorrected first and second moments $m_t$ and $v_t$;
i.e. $\sigma(.)$ and $\psi(.)$ without bias correction
"""
# Get $\beta_1$ and $\beta_2$
beta1, beta2 = group['betas']
# Bias correction term for $\hat{m}_t$, $1 - \beta_1^t$
bias_correction1 = 1 - beta1 ** state['step']
# Bias correction term for $\hat{v}_t$, $1 - \beta_2^t$
bias_correction2 = 1 - beta2 ** state['step']
r = self.calc_rectification_term(beta2, state['step'])
# Get learning rate
lr = self.get_lr(state, group)
# If $r_t$ is intractable
if r is not None:
# Whether to optimize the computation by combining scalar computations
if self.optimized_update:
# Denominator $\sqrt{v_t} + \hat{\epsilon}$
denominator = v.sqrt().add_(group['eps'])
# Step size $\alpha \sqrt{r_t} * \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t}$
step_size = lr * math.sqrt(bias_correction2) * r / bias_correction1
# Update parameters $\theta_t \leftarrow \theta_{t-1} - \alpha \sqrt{r_t} \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \cdot
# \frac{m_t}{\sqrt{v_t} + \hat{\epsilon}}$
param.data.addcdiv_(m, denominator, value=-step_size)
# Computation without optimization
else:
# Denominator $\frac{\sqrt{v_t}}{\sqrt{1-\beta_2^t}} + \epsilon$
denominator = (v.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
# Step size $\frac{\alpha \sqrt{r_t}}{1-\beta_1^t}$
step_size = lr * r / bias_correction1
# Update parameters $\theta_t \leftarrow \theta_{t-1} - \alpha \sqrt{r_t} \cdot
# \frac{\hat{m}_t}{\sqrt{\hat{v}_t} + \epsilon}$
param.data.addcdiv_(m, denominator, value=-step_size)
# If $r_t$ is intractable do a SGD with momentum
elif self.degenerated_to_sgd:
# Step size $\frac{\alpha}{1-\beta_1^t}$
step_size = lr / bias_correction1
# Update parameters
# $\theta_t \leftarrow \theta_{t-1} - \alpha \cdot \hat{m}_t$
param.data.add_(m, alpha=-step_size)
def _test_rectification_term():
"""
### Plot $r_t$ against $t$ for various $\beta_2$

"""
import matplotlib.pyplot as plt
import numpy as np
beta2 = [0.9999, 0.999, 0.99, 0.9, 0.8, 0.6, 0.5]
plt.plot(np.arange(1, 5_000), [[RAdam.calc_rectification_term(b, i) for b in beta2] for i in range(1, 5_000)])
plt.legend(beta2)
plt.title("Optimizer")
plt.show()
if __name__ == '__main__':
_test_rectification_term()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/optimizers/adam.py | labml_nn/optimizers/adam.py | """
---
title: Adam Optimizer
summary: A simple PyTorch implementation/tutorial of Adam optimizer
---
# Adam Optimizer
This is a [PyTorch](https://pytorch.org) implementation of popular optimizer *Adam* from paper
[Adam: A Method for Stochastic Optimization](https://arxiv.org/abs/1412.6980).
*Adam* update is,
\begin{align}
m_t &\leftarrow \beta_1 m_{t-1} + (1 - \beta_1) \cdot g_t \\
v_t &\leftarrow \beta_2 v_{t-1} + (1 - \beta_2) \cdot g_t^2 \\
\hat{m}_t &\leftarrow \frac{m_t}{1-\beta_1^t} \\
\hat{v}_t &\leftarrow \frac{v_t}{1-\beta_2^t} \\
\theta_t &\leftarrow \theta_{t-1} - \alpha \cdot \frac{\hat{m}_t}{\sqrt{\hat{v}_t} + \epsilon}
\end{align}
where $\alpha$, $\beta_1$, $\beta_2$ and $\epsilon$ are scalar hyper parameters.
$m_t$ and $v_t$ are first and second order moments.
$\hat{m}_t$ and $\hat{v}_t$ are biased corrected moments.
$\epsilon$ is used as a fix for division by zero error, but also acts as a form of a hyper-parameter
that acts against variance in gradients.
Effective step taken assuming $\epsilon = 0$ is,
$$\Delta t = \alpha \cdot \frac{\hat{m}_t}{\hat{v}_t}$$
This is bounded by,
$$\vert \Delta t \vert \le \alpha \cdot \frac{1 - \beta_1}{\sqrt{1-\beta_2}}$$
when $1-\beta_1 \gt \sqrt{1-\beta_2}$
and
$$\vert \Delta t\vert \le \alpha$$
otherwise.
And in most common scenarios,
$$\vert \Delta t \vert \approx \alpha$$
"""
import math
from typing import Dict, Any, Tuple, Optional
import torch
from labml import tracker
from torch import nn
from labml_nn.optimizers import GenericAdaptiveOptimizer, WeightDecay
class Adam(GenericAdaptiveOptimizer):
"""
## Adam Optimizer
We extend the class `GenericAdaptiveOptimizer` defined in [`__init__.py`](index.html)
to implement the Adam optimizer.
"""
def __init__(self, params,
lr: float = 1e-3, betas: Tuple[float, float] = (0.9, 0.999), eps: float = 1e-16,
weight_decay: WeightDecay = WeightDecay(),
optimized_update: bool = True,
defaults: Optional[Dict[str, Any]] = None):
"""
### Initialize the optimizer
* `params` is the list of parameters
* `lr` is the learning rate $\alpha$
* `betas` is a tuple of ($\beta_1$, $\beta_2$)
* `eps` is $\hat{\epsilon}$ or $\epsilon$ based on `optimized_update`
* `weight_decay` is an instance of class `WeightDecay` defined in [`__init__.py`](index.html)
* `optimized_update` is a flag whether to optimize the bias correction of the second moment
by doing it after adding $\epsilon$
* `defaults` is a dictionary of default for group values.
This is useful when you want to extend the class `Adam`.
"""
defaults = {} if defaults is None else defaults
defaults.update(weight_decay.defaults())
super().__init__(params, defaults, lr, betas, eps)
self.weight_decay = weight_decay
self.optimized_update = optimized_update
def init_state(self, state: Dict[str, any], group: Dict[str, any], param: nn.Parameter):
"""
### Initialize a parameter state
* `state` is the optimizer state of the parameter (tensor)
* `group` stores optimizer attributes of the parameter group
* `param` is the parameter tensor $\theta_{t-1}$
"""
# This is the number of optimizer steps taken on the parameter, $t$
state['step'] = 0
# Exponential moving average of gradients, $m_t$
state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values, $v_t$
state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
def get_mv(self, state: Dict[str, Any], group: Dict[str, Any], grad: torch.Tensor):
"""
### Calculate $m_t$ and and $v_t$
* `state` is the optimizer state of the parameter (tensor)
* `group` stores optimizer attributes of the parameter group
* `grad` is the current gradient tensor $g_t$ for the parameter $\theta_{t-1}$
"""
# Get $\beta_1$ and $\beta_2$
beta1, beta2 = group['betas']
# Get $m_{t-1}$ and $v_{t-1}$
m, v = state['exp_avg'], state['exp_avg_sq']
# In-place calculation of $m_t$
# $$m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) \cdot g_t$$
m.mul_(beta1).add_(grad, alpha=1 - beta1)
# In-place calculation of $v_t$
# $$v_t \leftarrow \beta_2 v_{t-1} + (1 - \beta_2) \cdot g_t^2$$
v.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
return m, v
def get_lr(self, state: Dict[str, any], group: Dict[str, any]):
"""
### Get learning-rate
This returns the modified learning rate based on the state.
For *Adam* this is just the specified learning rate for the parameter group,
$\alpha$.
"""
return group['lr']
def adam_update(self, state: Dict[str, any], group: Dict[str, any], param: torch.nn.Parameter,
m: torch.Tensor, v: torch.Tensor):
"""
### Do the *Adam* parameter update
* `state` is the optimizer state of the parameter (tensor)
* `group` stores optimizer attributes of the parameter group
* `param` is the parameter tensor $\theta_{t-1}$
* `m` and `v` are the uncorrected first and second moments $m_t$ and $v_t$.
This computes the following
\begin{align}
\theta_t &\leftarrow \theta_{t-1} - \alpha \cdot \frac{\hat{m}_t}{\sqrt{\hat{v}_t} + \epsilon}
\end{align}
Since $\alpha$, $\beta_1$, $\beta_2$ and $\epsilon$ are scalars and others are tensors
we modify this calculation to optimize the computation.
\begin{align}
\theta_t &\leftarrow \theta_{t-1} - \alpha \cdot \frac{\hat{m}_t}{\sqrt{\hat{v}_t} + \epsilon} \\
\theta_t &\leftarrow \theta_{t-1} - \alpha \cdot
\frac{m_t / (1-\beta_1^t)}{\sqrt{v_t/(1-\beta_2^t)} + \epsilon} \\
\theta_t &\leftarrow \theta_{t-1} - \alpha \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \cdot
\frac{m_t}{\sqrt{v_t} + \hat{\epsilon}} \\
\end{align}
where
$$\hat{\epsilon} = (1-\beta_2^t) \epsilon$$
is what we should specify as the hyper-parameter.
"""
# Get $\beta_1$ and $\beta_2$
beta1, beta2 = group['betas']
# Bias correction term for $\hat{m}_t$, $1 - \beta_1^t$
bias_correction1 = 1 - beta1 ** state['step']
# Bias correction term for $\hat{v}_t$, $1 - \beta_2^t$
bias_correction2 = 1 - beta2 ** state['step']
# Get learning rate
lr = self.get_lr(state, group)
# Whether to optimize the computation
if self.optimized_update:
# $\sqrt{v_t} + \hat{\epsilon}$
denominator = v.sqrt().add_(group['eps'])
# $\alpha \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t}$
step_size = lr * math.sqrt(bias_correction2) / bias_correction1
# $\theta_t \leftarrow \theta_{t-1} - \alpha \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \cdot
# \frac{m_t}{\sqrt{v_t} + \hat{\epsilon}}$
param.data.addcdiv_(m, denominator, value=-step_size)
# Computation without optimization
else:
# $\frac{\sqrt{v_t}}{\sqrt{1-\beta_2^t}} + \epsilon$
denominator = (v.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
# $\frac{\alpha}{1-\beta_1^t}$
step_size = lr / bias_correction1
# $\theta_t \leftarrow \theta_{t-1} - \alpha \cdot
# \frac{\hat{m}_t}{\sqrt{\hat{v}_t} + \epsilon}$
param.data.addcdiv_(m, denominator, value=-step_size)
def step_param(self, state: Dict[str, any], group: Dict[str, any], grad: torch.Tensor, param: torch.nn.Parameter):
"""
### Take an update step for a given parameter tensor
* `state` is the optimizer state of the parameter (tensor)
* `group` stores optimizer attributes of the parameter group
* `grad` is the current gradient tensor $g_t$ for the parameter $\theta_{t-1}$
* `param` is the parameter tensor $\theta_{t-1}$
"""
# Calculate weight decay
grad = self.weight_decay(param, grad, group)
# Get $m_t$ and $v_t$
m, v = self.get_mv(state, group, grad)
# Increment $t$ the number of optimizer steps
state['step'] += 1
# Perform *Adam* update
self.adam_update(state, group, param, m, v)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/optimizers/configs.py | labml_nn/optimizers/configs.py | """
---
title: Configurable optimizer module
summary: This implements a configurable module for optimizers.
---
# Configurable Optimizer
"""
from typing import Tuple
import torch
from labml.configs import BaseConfigs, option, meta_config
from labml_nn.optimizers import WeightDecay
class OptimizerConfigs(BaseConfigs):
"""
<a id="OptimizerConfigs"></a>
## Optimizer Configurations
"""
# Optimizer
optimizer: torch.optim.Adam
# Weight decay
weight_decay_obj: WeightDecay
# Whether weight decay is decoupled;
# i.e. weight decay is not added to gradients
weight_decouple: bool = True
# Weight decay
weight_decay: float = 0.0
# Whether weight decay is absolute or should be multiplied by learning rate
weight_decay_absolute: bool = False
# Whether the adam update is optimized (different epsilon)
optimized_adam_update: bool = True
# Parameters to be optimized
parameters: any
# Learning rate $\alpha$
learning_rate: float = 0.01
# Beta values $(\beta_1, \beta_2)$ for Adam
betas: Tuple[float, float] = (0.9, 0.999)
# Epsilon $\epsilon$ for adam
eps: float = 1e-08
# Momentum for SGD
momentum: float = 0.5
# Whether to use AMSGrad
amsgrad: bool = False
# Number of warmup optimizer steps
warmup: int = 2_000
# Total number of optimizer steps (for cosine decay)
total_steps: int = int(1e10)
# Whether to degenerate to SGD in AdaBelief
degenerate_to_sgd: bool = True
# Whether to use Rectified Adam in AdaBelief
rectify: bool = True
# Model embedding size for Noam optimizer
d_model: int
rho: float
def __init__(self):
super().__init__(_primary='optimizer')
meta_config(OptimizerConfigs.parameters)
@option(OptimizerConfigs.weight_decay_obj, 'L2')
def _weight_decay(c: OptimizerConfigs):
return WeightDecay(c.weight_decay, c.weight_decouple, c.weight_decay_absolute)
@option(OptimizerConfigs.optimizer, 'SGD')
def _sgd_optimizer(c: OptimizerConfigs):
return torch.optim.SGD(c.parameters, c.learning_rate, c.momentum,
weight_decay=c.weight_decay)
@option(OptimizerConfigs.optimizer, 'Adam')
def _adam_optimizer(c: OptimizerConfigs):
if c.amsgrad:
from labml_nn.optimizers.amsgrad import AMSGrad
return AMSGrad(c.parameters,
lr=c.learning_rate, betas=c.betas, eps=c.eps,
optimized_update=c.optimized_adam_update,
weight_decay=c.weight_decay_obj, amsgrad=c.amsgrad)
else:
from labml_nn.optimizers.adam import Adam
return Adam(c.parameters,
lr=c.learning_rate, betas=c.betas, eps=c.eps,
optimized_update=c.optimized_adam_update,
weight_decay=c.weight_decay_obj)
@option(OptimizerConfigs.optimizer, 'AdamW')
def _adam_warmup_optimizer(c: OptimizerConfigs):
from labml_nn.optimizers.adam_warmup import AdamWarmup
return AdamWarmup(c.parameters,
lr=c.learning_rate, betas=c.betas, eps=c.eps,
weight_decay=c.weight_decay_obj, amsgrad=c.amsgrad, warmup=c.warmup)
@option(OptimizerConfigs.optimizer, 'RAdam')
def _radam_optimizer(c: OptimizerConfigs):
from labml_nn.optimizers.radam import RAdam
return RAdam(c.parameters,
lr=c.learning_rate, betas=c.betas, eps=c.eps,
weight_decay=c.weight_decay_obj, amsgrad=c.amsgrad,
degenerated_to_sgd=c.degenerate_to_sgd)
@option(OptimizerConfigs.optimizer, 'AdaBelief')
def _ada_belief_optimizer(c: OptimizerConfigs):
from labml_nn.optimizers.ada_belief import AdaBelief
return AdaBelief(c.parameters,
lr=c.learning_rate, betas=c.betas, eps=c.eps,
weight_decay=c.weight_decay_obj, amsgrad=c.amsgrad,
degenerate_to_sgd=c.degenerate_to_sgd,
rectify=c.rectify)
@option(OptimizerConfigs.optimizer, 'Noam')
def _noam_optimizer(c: OptimizerConfigs):
from labml_nn.optimizers.noam import Noam
return Noam(c.parameters,
lr=c.learning_rate, betas=c.betas, eps=c.eps,
weight_decay=c.weight_decay_obj, amsgrad=c.amsgrad, warmup=c.warmup,
d_model=c.d_model)
@option(OptimizerConfigs.optimizer, 'Sophia')
def _sophia_optimizer(c: OptimizerConfigs):
from labml_nn.optimizers.sophia import Sophia
return Sophia(c.parameters,
lr=c.learning_rate, betas=c.betas, eps=c.eps,
weight_decay=c.weight_decay_obj, rho=c.rho)
@option(OptimizerConfigs.optimizer, 'AdamWarmupCosineDecay')
def _noam_optimizer(c: OptimizerConfigs):
from labml_nn.optimizers.adam_warmup_cosine_decay import AdamWarmupCosineDecay
return AdamWarmupCosineDecay(c.parameters,
lr=c.learning_rate, betas=c.betas, eps=c.eps,
weight_decay=c.weight_decay_obj, amsgrad=c.amsgrad,
warmup=c.warmup, total_steps=c.total_steps)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/optimizers/adam_fp16.py | labml_nn/optimizers/adam_fp16.py | """
---
title: Adam Optimizer for Half Precision Training
summary: A simple PyTorch implementation/tutorial of Adam optimizer
---
# Adam Optimizer for Half Precision Training
"""
from typing import Dict, Tuple, Optional, Any
import torch
from torch import nn
from torch.optim import Optimizer
from torch.cuda.amp import grad_scaler
from collections import defaultdict, abc
from labml_nn.optimizers import WeightDecay
from labml_nn.optimizers.adam import Adam
class AdamFP16(Adam):
"""
## Adam Optimizer for Half Precision Training
We extend [Adam Optimizer](adam.html) but use FP32 to store gradients and moments.
"""
def __init__(self, params, lr: float = 1e-3, betas: Tuple[float, float] = (0.9, 0.999), eps: float = 1e-16,
weight_decay: WeightDecay = WeightDecay(), optimized_update: bool = True,
defaults: Optional[Dict[str, Any]] = None):
# Parameter to store 32 bit gradients. This get populated by the `GradScaler` defined below.
self.grad_fp32 = {}
# Call the [Adam Optimizer](adam.html) initializer
super().__init__(params, lr, betas, eps, weight_decay, optimized_update, defaults)
def init_state(self, state: Dict[str, any], group: Dict[str, any], param: nn.Parameter):
"""
### Initialize a parameter state
* `state` is the optimizer state of the parameter (tensor)
* `group` stores optimizer attributes of the parameter group
* `param` is the parameter tensor $\theta_{t-1}$
All the state tensors use FP32.
"""
# This is the number of optimizer steps taken on the parameter, $t$
state['step'] = 0
# Exponential moving average of gradients, $m_t$
state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format, dtype=torch.float)
# Exponential moving average of squared gradient values, $v_t$
state['exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format, dtype=torch.float)
# Maintain a FP32 copy of the parameters
state['fp32_copy'] = param.to(torch.float)
def step_param(self, state: Dict[str, any], group: Dict[str, any], grad: torch.Tensor, param: torch.nn.Parameter):
"""
### Take an update step for a given parameter tensor
* `state` is the optimizer state of the parameter (tensor)
* `group` stores optimizer attributes of the parameter group
* `grad` is the current gradient tensor $g_t$ for the parameter $\theta_{t-1}$
* `param` is the parameter tensor $\theta_{t-1}$
"""
# Get the FP32 parameters
param_fp32 = state['fp32_copy']
# Get the FP32 gradients if available
grad_fp32 = self.grad_fp32.get(param, None)
if grad_fp32 is not None:
del self.grad_fp32[param]
grad = grad_fp32
else:
# Otherwise, convert the gradients to FP32
grad = grad.to(torch.float)
# Calculate weight decay
grad = self.weight_decay(param_fp32, grad, group)
# Get $m_t$ and $v_t$
m, v = self.get_mv(state, group, grad)
# Increment $t$ the number of optimizer steps
state['step'] += 1
# Perform *Adam* update
self.adam_update(state, group, param_fp32, m, v)
# Set the parameters
param.data = param_fp32.to(param.dtype)
class GradScalerFP16(grad_scaler.GradScaler):
"""
## Gradient Scaler with half precision gradients
We extend PyTorch gradient scaler to use FP32 gradients.
"""
def _unscale_grads_(self, optimizer: Optimizer, inv_scale: torch.Tensor, found_inf: torch.Tensor,
allow_fp16: bool) -> Dict[torch.device, torch.Tensor]:
per_device_inv_scale = grad_scaler._MultiDeviceReplicator(inv_scale)
per_device_found_inf = grad_scaler._MultiDeviceReplicator(found_inf)
per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated]
with torch.no_grad():
# Loop through parameters
for group in optimizer.param_groups:
for param in group["params"]:
# Skip non-trainable parameters
if param.grad is None:
continue
# Not implemented for sparse tensors
if param.grad.is_sparse:
raise NotImplementedError
# If we are using the `AdamFP16` optimizer set `optimizer.grad_fp32[param]` to the FP32 gradients
if isinstance(optimizer, AdamFP16):
grad = param.grad.to(torch.float)
optimizer.grad_fp32[param] = grad
# Otherwise, do not convert the gradients to FP32
else:
grad = param.grad
per_device_and_dtype_grads[grad.device][grad.dtype].append(grad)
# Unscale all the gradients
for device, per_dtype_grads in per_device_and_dtype_grads.items():
for grads in per_dtype_grads.values():
torch._amp_foreach_non_finite_check_and_unscale_(grads,
per_device_found_inf.get(device),
per_device_inv_scale.get(device))
#
return per_device_found_inf._per_device_tensors
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/optimizers/__init__.py | labml_nn/optimizers/__init__.py | """
---
title: Optimizers
summary: >
A set of PyTorch implementations/tutorials of popular gradient descent based optimizers.
Currently includes Adam, AMSGrad and RAdam optimizers.
---
# Optimizers
## Optimizer Implementations
* [Adam Optimizer](adam.html)
* [AMSGrad Optimizer](amsgrad.html)
* [Adam Optimizer with warmup](adam_warmup.html)
* [Noam Optimizer](noam.html)
* [Rectified Adam Optimizer](radam.html)
* [AdaBelief Optimizer](ada_belief.html)
* [Sophia-G Optimizer](sophia.html)
This [MNIST example](mnist_experiment.html) uses these optimizers.
## Generic Adaptive Optimizer Base class and Weight Decay
This file defines a common base class for *Adam* and extensions of it.
The base class helps use implement other optimizers with minimal code
because of re-usability.
We also define a special class for L2 weight decay, so that we don't
have to implement it inside each of the optimizers,
and can easily extend to other weight decays like L1 without
changing the optimizers.
Here are some concepts on PyTorch optimizers:
### Parameter groups
PyTorch optimizers group parameters into sets called groups.
Each group can have its own hyper-parameters like learning rates.
In most common cases there will be only one group.
This is when you initialize your optimizer with,
```python
Optimizer(model.parameters())
```
You can define multiple parameter groups when initializing the optimizer:
```python
Optimizer([{'params': model1.parameters()}, {'params': model2.parameters(), 'lr': 2}])
```
Here we pass a list of groups. Each group is a dictionary with its parameters under the key 'params'.
You specify any hyper-parameters as well. If the hyper parameters are not defined they will default
to the optimizer level defaults.
You can access (and even change) these groups, and their hyper-parameters with `optimizer.param_groups`.
Most learning rate schedule implementations I've come across do access this and change 'lr'.
### States
Optimizer maintains states (a dictionary) for each parameter (a tensor), in a dictionary `optimizer.state`.
This is where the optimizer maintains things like exponential averages.
"""
from typing import Dict, Tuple, Any
import torch
from torch import nn
from torch.optim.optimizer import Optimizer
class GenericAdaptiveOptimizer(Optimizer):
"""
## Base class for *Adam* and extensions
"""
def __init__(self, params, defaults: Dict[str, Any], lr: float, betas: Tuple[float, float], eps: float):
"""
### Initialize
* `params` is the collection of parameters or set of parameter groups.
* `defaults` a dictionary of default hyper-parameters
* `lr` is the learning rate, $\alpha$
* `betas` is the tuple $(\beta_1, \beta_2)$
* `eps` is $\epsilon$
"""
# Check the hyper-parameters
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
# Add the hyper-parameters to the defaults
defaults.update(dict(lr=lr, betas=betas, eps=eps))
# Initialize the PyTorch optimizer.
# This will create parameter groups with the default hyper-parameters
super().__init__(params, defaults)
def init_state(self, state: Dict[str, any], group: Dict[str, any], param: nn.Parameter):
"""
### Initialize state for a given parameter tensor
This should be overridden with code to initialize `state` for parameters `param`.
`group` is the parameter group dictionary to which `param` belongs.
"""
pass
def step_param(self, state: Dict[str, any], group: Dict[str, any], grad: torch.Tensor, param: torch.Tensor):
"""
### Take optimizer step on a parameter tensor
This should be overridden and take the optimization step on `param` tensor $\theta$,
where `grad` is the gradient for that parameter, $g_t$,
`state` is the optimizer state dictionary for that parameter, and
`group` is the parameter group dictionary `param` belongs to.
"""
pass
@torch.no_grad()
def step(self, closure=None):
"""
### Optimizer step
We have created a template method that does the common stuff every *Adam* based optimizer needs.
"""
# Calculate loss.
#
# 🤔 I'm not sure when you need this. I guess it's if you define a function that
# calculates the loss, does `loss.backward` and return the loss, instead of calling
# it on your own you could pass it to `optimizer.step`. 🤷♂️
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# Iterate through the parameter groups
for group in self.param_groups:
# Iterate through the parameters in the parameter group
for param in group['params']:
# Skip if the parameter has no gradient
if param.grad is None:
continue
# Get the gradient tensor
grad = param.grad.data
# We don't handle sparse gradients
if grad.is_sparse:
raise RuntimeError('GenericAdaptiveOptimizer does not support sparse gradients,'
' please consider SparseAdam instead')
# Get the state for the parameter
state = self.state[param]
# Initialize the state if state is uninitialized
if len(state) == 0:
self.init_state(state, group, param)
# Take the optimization step on the parameter
self.step_param(state, group, grad, param)
# Return the loss, calculated from closure
return loss
class WeightDecay:
"""
## L2 Weight decay
"""
def __init__(self, weight_decay: float = 0., weight_decouple: bool = True, absolute: bool = False):
"""
### Initialize weight decay
* `weight_decay` is the decay coefficient
* `weight_decouple` is a flag indicating whether to add the weight decay to the gradient or directly
decay from the parameter. If added to the gradient it will go through the normal optimizer update.
* `absolute` this flag indicates whether the weight decay coefficient is absolute. This is applicable
when the decay is performed directly on the parameter. If this is false the actual decay is
`weight_decay`
* `learning_rate`.
"""
# Check hyper-parameters
if not 0.0 <= weight_decay:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
self.absolute = absolute
self.weight_decouple = weight_decouple
self.weight_decay = weight_decay
def defaults(self):
"""
Return defaults for parameter groups
"""
return dict(weight_decay=self.weight_decay)
def __call__(self, param: torch.nn.Parameter, grad: torch.Tensor, group: Dict[str, any]):
"""
### Perform weight decay and return the gradient
"""
# If we are doing the decay on the parameter directly
if self.weight_decouple:
# If the weight decay coefficient is absolute
if self.absolute:
param.data.mul_(1.0 - group['weight_decay'])
# Otherwise,
else:
param.data.mul_(1.0 - group['lr'] * group['weight_decay'])
# Return the unmodified gradient
return grad
else:
if group['weight_decay'] != 0:
# Add the weight decay to the gradient and return the modified gradient
return grad.add(param.data, alpha=group['weight_decay'])
else:
return grad
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/optimizers/sophia.py | labml_nn/optimizers/sophia.py | """
---
title: Sophia Optimizer
summary: A simple PyTorch implementation/tutorial of Sophia optimizer
---
# Sophia Optimizer
This is a [PyTorch](https://pytorch.org) implementation of *Sophia-G* from paper
[Sophia: A Scalable Stochastic Second-order Optimizer for Language Model Pre-training](https://arxiv.org/abs/2305.14342).
Official implementation is available at [Liuhong99/Sophia](https://github.com/Liuhong99/Sophia).
Sophia is more adaptive to heterogeneous curvatures than Adam, more resistant
to non-convexity and rapid change of Hessian than Newton’s method, and also uses a low-cost
pre-conditioner.
Sophia keeps diagonal Hessian estimates with EMA across iterations.
The diagonal Hessian $\hat{h}_t$ is calculated every $k$ steps.
\begin{align}
h_t = \beta_2 h_{t-k} + (1 - \beta_2) \hat{h}_t \ \ \ \ \text{ if } t \text{ mod } k = 1; \text{ else } h_t = h_{t-1}
\end{align}
Sophia uses EMA of gradients $m_t$, only considers positive entries of
the diagonal Hessian and does per-coordinate clipping to the update.
\begin{align}
m_t &\leftarrow \beta_1 m_{t-1} + (1 - \beta_1)g_t \\
\theta_{t + 1} &\leftarrow \theta_t - \eta \cdot \operatorname{clip} \bigg(\frac{m_t}{ \max \{h_t, \epsilon \} }, \rho \bigg)
\end{align}
where $\epsilon$ is a very small value to prevent division by $0$.
### Gauss-Newton-Bartlett (GNB) estimator
\begin{align}
\hat{L}(\theta) &= \frac{1}{B} \sum^{B}_{b=1} \ell_{CE} \big( f(\theta, x_b), \hat{y}_b \big) \\
\hat{h}_t &= B \cdot \nabla_\theta \hat{L} (\theta) \odot \nabla_\theta \hat{L} (\theta)
\end{align}
where $x_b$ are the inputs,
$B$ is the batch size (number of inputs/tokens),
$\ell_{CE}$ is cross entropy loss, and
$\hat{y}_b$ are sampled from the logits $f(\theta, x_b)$.
Note that this hessian estimate is always positive and therefore we
can replace $\max \{h_t, \epsilon \}$ with $h_t + \epsilon$.
Sophia with Gauss-Newton-Bartlett (GNB) estimator is **Sophia-G**
Here is an [experiment](../transformers/basic/with_sophia.html) that uses Sophia-G to train a transformer.
"""
from typing import Dict, Any, Tuple, Optional
import torch
from torch import nn
from labml_nn.optimizers import GenericAdaptiveOptimizer, WeightDecay
class Sophia(GenericAdaptiveOptimizer):
"""
## Sophia-G Optimizer
We extend the class `GenericAdaptiveOptimizer` defined in [`__init__.py`](index.html)
to implement the Sophia optimizer.
"""
def __init__(self, params,
lr: float = 1e-4, betas: Tuple[float, float] = (0.9, 0.95), eps: float = 1e-12,
rho: float = 0.03,
weight_decay: WeightDecay = WeightDecay(),
defaults: Optional[Dict[str, Any]] = None):
"""
### Initialize the optimizer
* `params` is the list of parameters
* `lr` is the maximum learning rate $\eta \rho$
* `betas` is a tuple of ($\beta_1$, $\beta_2$)
* `eps` is $\epsilon$
* `pho` is $\rho$
* `weight_decay` is an instance of class `WeightDecay` defined in [`__init__.py`](index.html)
* `defaults` is a dictionary of default for group values.
This is useful when you want to extend the class `Adam`.
"""
defaults = {} if defaults is None else defaults
defaults.update(weight_decay.defaults())
defaults.update(dict(rho=rho))
super().__init__(params, defaults, lr, betas, eps)
self.weight_decay = weight_decay
def init_state(self, state: Dict[str, any], group: Dict[str, any], param: nn.Parameter):
"""
### Initialize a parameter state
* `state` is the optimizer state of the parameter (tensor)
* `group` stores optimizer attributes of the parameter group
* `param` is the parameter tensor $\theta_{t-1}$
"""
# This is the number of optimizer steps taken on the parameter, $t$
state['step'] = 0
# Exponential moving average of gradients, $m_t$
state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
# Exponential moving average of Hessian diagonal, $h_t$
state['hessian'] = torch.zeros_like(param, memory_format=torch.preserve_format)
def update_hessian(self, n_tokens_training_batch):
"""
### Update the EMA of Hessian diagonal $h_t$
* `n_tokens_training_batch` is the number of tokens/inputs in the batch $B$
\begin{align}
\hat{h}_t &= B \cdot \nabla_\theta \hat{L} (\theta) \odot \nabla_\theta \hat{L} (\theta) \\
h_t &= \beta_2 h_{t-k} + (1 - \beta_2) \hat{h}_t
\end{align}
"""
# Iterate through parameter groups
for group in self.param_groups:
# $\beta_2$
_, beta2 = group['betas']
# Iterate through parameters
for p in group['params']:
# Skip parameters without gradients
if p.grad is None:
continue
# Get optimizer state
state = self.state[p]
# Initialize state if empty
if len(state) == 0:
self.init_state(state, group, p)
# Update EMA Hessian diagonal
#
# \begin{align}
# \hat{h}_t &= B \cdot \nabla_\theta \hat{L} (\theta) \odot \nabla_\theta \hat{L} (\theta) \\
# h_t &= \beta_2 h_{t-k} + (1 - \beta_2) \hat{h}_t
# \end{align}
state['hessian'].mul_(beta2).addcmul_(p.grad, p.grad, value=(1 - beta2) * n_tokens_training_batch)
def step_param(self, state: Dict[str, any], group: Dict[str, any], grad: torch.Tensor, param: torch.nn.Parameter):
"""
### Take an update step for a given parameter tensor
* `state` is the optimizer state of the parameter (tensor)
* `group` stores optimizer attributes of the parameter group
* `grad` is the current gradient tensor $g_t$ for the parameter $\theta_{t-1}$
* `param` is the parameter tensor $\theta_{t-1}$
We do the following parameter update,
\begin{align}
\theta_{t + 1} &\leftarrow \theta_t - \eta \cdot \operatorname{clip} \bigg(\frac{m_t}{h_t + \epsilon}, \rho \bigg)
\end{align}
"""
# Calculate weight decay
grad = self.weight_decay(param, grad, group)
# Get $\beta_1$ and $\beta_2$
beta1, beta2 = group['betas']
# Get $\rho$
rho = group['rho']
# Get $m_{t-1}$ and $h_{t}$
m, hessian = state['exp_avg'], state['hessian']
# In-place calculation of $m_t$
# $$m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) \cdot g_t$$
m.mul_(beta1).add_(grad, alpha=1 - beta1)
# Increment $t$ the number of optimizer steps
state['step'] += 1
# Get maximum learning rate $\eta \rho$
lr = group['lr']
# $\eta$
eta = lr / rho
# $$\operatorname{clip} \bigg(\frac{m_t}{h_t + \epsilon}, \rho \bigg)$$
ratio = (m / (hessian + group['eps'])).clamp(-rho, rho)
# $$\theta_{t + 1} \leftarrow \theta_t - \eta \cdot \operatorname{clip} \bigg(\frac{m_t}{h_t + \epsilon}, \rho \bigg)$$
param.data.add_(ratio, alpha=-eta)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/optimizers/adam_warmup.py | labml_nn/optimizers/adam_warmup.py | """
---
title: Adam optimizer with warm-up
summary: A simple PyTorch implementation/tutorial of Adam optimizer with warm-up.
---
# Adam Optimizer with Warmup
This extends [AMSGrad optimizer](amsgrad.html) and adds a warmup stage.
"""
from typing import Dict
from labml_nn.optimizers import WeightDecay
from labml_nn.optimizers.amsgrad import AMSGrad
class AdamWarmup(AMSGrad):
"""
## Adam Optimizer with Warmup
This class extends from AMSGrad optimizer defined in [`amsgrad.py`](amsgrad.html).
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16,
weight_decay: WeightDecay = WeightDecay(),
optimized_update: bool = True,
amsgrad=False, warmup=0, defaults=None):
"""
### Initialize the optimizer
* `params` is the list of parameters
* `lr` is the learning rate $\alpha$
* `betas` is a tuple of ($\beta_1$, $\beta_2$)
* `eps` is $\hat{\epsilon}$ or $\epsilon$ based on `optimized_update`
* `weight_decay` is an instance of class `WeightDecay` defined in [`__init__.py`](index.html)
* 'optimized_update' is a flag whether to optimize the bias correction of the second moment
by doing it after adding $\epsilon$
* `amsgrad` is a flag indicating whether to use AMSGrad or fallback to plain Adam
* `warmup` number of warmup steps
* `defaults` is a dictionary of default for group values.
This is useful when you want to extend the class `AdamWarmup`.
"""
defaults = {} if defaults is None else defaults
defaults.update(dict(warmup=warmup))
super().__init__(params, lr, betas, eps, weight_decay, optimized_update, amsgrad, defaults)
def get_lr(self, state: Dict[str, any], group: Dict[str, any]):
"""
### Get learning-rate
$$\alpha \min \bigg(1, \frac{t}{w}\bigg)$$
where $w$ is the number of warmup steps.
"""
# If we are in warmup stage
if group['warmup'] > state['step']:
# A linearly increasing learning rate from $0$ to $\alpha$
return 1e-8 + state['step'] * group['lr'] / group['warmup']
else:
# Constant learning rate $\alpha$
return group['lr']
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.