repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/search/google_search.py | app/tool/search/google_search.py | from typing import List
from googlesearch import search
from app.tool.search.base import SearchItem, WebSearchEngine
class GoogleSearchEngine(WebSearchEngine):
def perform_search(
self, query: str, num_results: int = 10, *args, **kwargs
) -> List[SearchItem]:
"""
Google search engine.
Returns results formatted according to SearchItem model.
"""
raw_results = search(query, num_results=num_results, advanced=True)
results = []
for i, item in enumerate(raw_results):
if isinstance(item, str):
# If it's just a URL
results.append(
{"title": f"Google Result {i+1}", "url": item, "description": ""}
)
else:
results.append(
SearchItem(
title=item.title, url=item.url, description=item.description
)
)
return results
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/search/baidu_search.py | app/tool/search/baidu_search.py | from typing import List
from baidusearch.baidusearch import search
from app.tool.search.base import SearchItem, WebSearchEngine
class BaiduSearchEngine(WebSearchEngine):
def perform_search(
self, query: str, num_results: int = 10, *args, **kwargs
) -> List[SearchItem]:
"""
Baidu search engine.
Returns results formatted according to SearchItem model.
"""
raw_results = search(query, num_results=num_results)
# Convert raw results to SearchItem format
results = []
for i, item in enumerate(raw_results):
if isinstance(item, str):
# If it's just a URL
results.append(
SearchItem(title=f"Baidu Result {i+1}", url=item, description=None)
)
elif isinstance(item, dict):
# If it's a dictionary with details
results.append(
SearchItem(
title=item.get("title", f"Baidu Result {i+1}"),
url=item.get("url", ""),
description=item.get("abstract", None),
)
)
else:
# Try to get attributes directly
try:
results.append(
SearchItem(
title=getattr(item, "title", f"Baidu Result {i+1}"),
url=getattr(item, "url", ""),
description=getattr(item, "abstract", None),
)
)
except Exception:
# Fallback to a basic result
results.append(
SearchItem(
title=f"Baidu Result {i+1}", url=str(item), description=None
)
)
return results
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/search/__init__.py | app/tool/search/__init__.py | from app.tool.search.baidu_search import BaiduSearchEngine
from app.tool.search.base import WebSearchEngine
from app.tool.search.bing_search import BingSearchEngine
from app.tool.search.duckduckgo_search import DuckDuckGoSearchEngine
from app.tool.search.google_search import GoogleSearchEngine
__all__ = [
"WebSearchEngine",
"BaiduSearchEngine",
"DuckDuckGoSearchEngine",
"GoogleSearchEngine",
"BingSearchEngine",
]
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/search/base.py | app/tool/search/base.py | from typing import List, Optional
from pydantic import BaseModel, Field
class SearchItem(BaseModel):
"""Represents a single search result item"""
title: str = Field(description="The title of the search result")
url: str = Field(description="The URL of the search result")
description: Optional[str] = Field(
default=None, description="A description or snippet of the search result"
)
def __str__(self) -> str:
"""String representation of a search result item."""
return f"{self.title} - {self.url}"
class WebSearchEngine(BaseModel):
"""Base class for web search engines."""
model_config = {"arbitrary_types_allowed": True}
def perform_search(
self, query: str, num_results: int = 10, *args, **kwargs
) -> List[SearchItem]:
"""
Perform a web search and return a list of search items.
Args:
query (str): The search query to submit to the search engine.
num_results (int, optional): The number of search results to return. Default is 10.
args: Additional arguments.
kwargs: Additional keyword arguments.
Returns:
List[SearchItem]: A list of SearchItem objects matching the search query.
"""
raise NotImplementedError
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/search/bing_search.py | app/tool/search/bing_search.py | from typing import List, Optional, Tuple
import requests
from bs4 import BeautifulSoup
from app.logger import logger
from app.tool.search.base import SearchItem, WebSearchEngine
ABSTRACT_MAX_LENGTH = 300
USER_AGENTS = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; pt-BR) AppleWebKit/533.3 (KHTML, like Gecko) QtWeb Internet Browser/3.7 http://www.QtWeb.net",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.4pre) Gecko/20070404 K-Ninja/2.1.3",
"Mozilla/5.0 (Future Star Technologies Corp.; Star-Blade OS; x86_64; U; en-US) iNet Browser 4.7",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.13) Gecko/20080414 Firefox/2.0.0.13 Pogo/2.0.0.13.6866",
]
HEADERS = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Content-Type": "application/x-www-form-urlencoded",
"User-Agent": USER_AGENTS[0],
"Referer": "https://www.bing.com/",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
}
BING_HOST_URL = "https://www.bing.com"
BING_SEARCH_URL = "https://www.bing.com/search?q="
class BingSearchEngine(WebSearchEngine):
session: Optional[requests.Session] = None
def __init__(self, **data):
"""Initialize the BingSearch tool with a requests session."""
super().__init__(**data)
self.session = requests.Session()
self.session.headers.update(HEADERS)
def _search_sync(self, query: str, num_results: int = 10) -> List[SearchItem]:
"""
Synchronous Bing search implementation to retrieve search results.
Args:
query (str): The search query to submit to Bing.
num_results (int, optional): Maximum number of results to return. Defaults to 10.
Returns:
List[SearchItem]: A list of search items with title, URL, and description.
"""
if not query:
return []
list_result = []
first = 1
next_url = BING_SEARCH_URL + query
while len(list_result) < num_results:
data, next_url = self._parse_html(
next_url, rank_start=len(list_result), first=first
)
if data:
list_result.extend(data)
if not next_url:
break
first += 10
return list_result[:num_results]
def _parse_html(
self, url: str, rank_start: int = 0, first: int = 1
) -> Tuple[List[SearchItem], str]:
"""
Parse Bing search result HTML to extract search results and the next page URL.
Returns:
tuple: (List of SearchItem objects, next page URL or None)
"""
try:
res = self.session.get(url=url)
res.encoding = "utf-8"
root = BeautifulSoup(res.text, "lxml")
list_data = []
ol_results = root.find("ol", id="b_results")
if not ol_results:
return [], None
for li in ol_results.find_all("li", class_="b_algo"):
title = ""
url = ""
abstract = ""
try:
h2 = li.find("h2")
if h2:
title = h2.text.strip()
url = h2.a["href"].strip()
p = li.find("p")
if p:
abstract = p.text.strip()
if ABSTRACT_MAX_LENGTH and len(abstract) > ABSTRACT_MAX_LENGTH:
abstract = abstract[:ABSTRACT_MAX_LENGTH]
rank_start += 1
# Create a SearchItem object
list_data.append(
SearchItem(
title=title or f"Bing Result {rank_start}",
url=url,
description=abstract,
)
)
except Exception:
continue
next_btn = root.find("a", title="Next page")
if not next_btn:
return list_data, None
next_url = BING_HOST_URL + next_btn["href"]
return list_data, next_url
except Exception as e:
logger.warning(f"Error parsing HTML: {e}")
return [], None
def perform_search(
self, query: str, num_results: int = 10, *args, **kwargs
) -> List[SearchItem]:
"""
Bing search engine.
Returns results formatted according to SearchItem model.
"""
return self._search_sync(query, num_results=num_results)
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/sandbox/sb_shell_tool.py | app/tool/sandbox/sb_shell_tool.py | import asyncio
import time
from typing import Any, Dict, Optional, TypeVar
from uuid import uuid4
from app.daytona.tool_base import Sandbox, SandboxToolsBase
from app.tool.base import ToolResult
from app.utils.logger import logger
Context = TypeVar("Context")
_SHELL_DESCRIPTION = """\
Execute a shell command in the workspace directory.
IMPORTANT: Commands are non-blocking by default and run in a tmux session.
This is ideal for long-running operations like starting servers or build processes.
Uses sessions to maintain state between commands.
This tool is essential for running CLI tools, installing packages, and managing system operations.
"""
class SandboxShellTool(SandboxToolsBase):
"""Tool for executing tasks in a Daytona sandbox with browser-use capabilities.
Uses sessions for maintaining state between commands and provides comprehensive process management.
"""
name: str = "sandbox_shell"
description: str = _SHELL_DESCRIPTION
parameters: dict = {
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": [
"execute_command",
"check_command_output",
"terminate_command",
"list_commands",
],
"description": "The shell action to perform",
},
"command": {
"type": "string",
"description": "The shell command to execute. Use this for running CLI tools, installing packages, "
"or system operations. Commands can be chained using &&, ||, and | operators.",
},
"folder": {
"type": "string",
"description": "Optional relative path to a subdirectory of /workspace where the command should be "
"executed. Example: 'data/pdfs'",
},
"session_name": {
"type": "string",
"description": "Optional name of the tmux session to use. Use named sessions for related commands "
"that need to maintain state. Defaults to a random session name.",
},
"blocking": {
"type": "boolean",
"description": "Whether to wait for the command to complete. Defaults to false for non-blocking "
"execution.",
"default": False,
},
"timeout": {
"type": "integer",
"description": "Optional timeout in seconds for blocking commands. Defaults to 60. Ignored for "
"non-blocking commands.",
"default": 60,
},
"kill_session": {
"type": "boolean",
"description": "Whether to terminate the tmux session after checking. Set to true when you're done "
"with the command.",
"default": False,
},
},
"required": ["action"],
"dependencies": {
"execute_command": ["command"],
"check_command_output": ["session_name"],
"terminate_command": ["session_name"],
"list_commands": [],
},
}
def __init__(
self, sandbox: Optional[Sandbox] = None, thread_id: Optional[str] = None, **data
):
"""Initialize with optional sandbox and thread_id."""
super().__init__(**data)
if sandbox is not None:
self._sandbox = sandbox
async def _ensure_session(self, session_name: str = "default") -> str:
"""Ensure a session exists and return its ID."""
if session_name not in self._sessions:
session_id = str(uuid4())
try:
await self._ensure_sandbox() # Ensure sandbox is initialized
self.sandbox.process.create_session(session_id)
self._sessions[session_name] = session_id
except Exception as e:
raise RuntimeError(f"Failed to create session: {str(e)}")
return self._sessions[session_name]
async def _cleanup_session(self, session_name: str):
"""Clean up a session if it exists."""
if session_name in self._sessions:
try:
await self._ensure_sandbox() # Ensure sandbox is initialized
self.sandbox.process.delete_session(self._sessions[session_name])
del self._sessions[session_name]
except Exception as e:
print(f"Warning: Failed to cleanup session {session_name}: {str(e)}")
async def _execute_raw_command(self, command: str) -> Dict[str, Any]:
"""Execute a raw command directly in the sandbox."""
# Ensure session exists for raw commands
session_id = await self._ensure_session("raw_commands")
# Execute command in session
from app.daytona.sandbox import SessionExecuteRequest
req = SessionExecuteRequest(
command=command, run_async=False, cwd=self.workspace_path
)
response = self.sandbox.process.execute_session_command(
session_id=session_id,
req=req,
timeout=30, # Short timeout for utility commands
)
logs = self.sandbox.process.get_session_command_logs(
session_id=session_id, command_id=response.cmd_id
)
return {"output": logs, "exit_code": response.exit_code}
async def _execute_command(
self,
command: str,
folder: Optional[str] = None,
session_name: Optional[str] = None,
blocking: bool = False,
timeout: int = 60,
) -> ToolResult:
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
# Set up working directory
cwd = self.workspace_path
if folder:
folder = folder.strip("/")
cwd = f"{self.workspace_path}/{folder}"
# Generate a session name if not provided
if not session_name:
session_name = f"session_{str(uuid4())[:8]}"
# Check if tmux session already exists
check_session = await self._execute_raw_command(
f"tmux has-session -t {session_name} 2>/dev/null || echo 'not_exists'"
)
session_exists = "not_exists" not in check_session.get("output", "")
if not session_exists:
# Create a new tmux session
await self._execute_raw_command(
f"tmux new-session -d -s {session_name}"
)
# Ensure we're in the correct directory and send command to tmux
full_command = f"cd {cwd} && {command}"
wrapped_command = full_command.replace('"', '\\"') # Escape double quotes
# Send command to tmux session
await self._execute_raw_command(
f'tmux send-keys -t {session_name} "{wrapped_command}" Enter'
)
if blocking:
# For blocking execution, wait and capture output
start_time = time.time()
while (time.time() - start_time) < timeout:
# Wait a bit before checking
time.sleep(2)
# Check if session still exists (command might have exited)
check_result = await self._execute_raw_command(
f"tmux has-session -t {session_name} 2>/dev/null || echo 'ended'"
)
if "ended" in check_result.get("output", ""):
break
# Get current output and check for common completion indicators
output_result = await self._execute_raw_command(
f"tmux capture-pane -t {session_name} -p -S - -E -"
)
current_output = output_result.get("output", "")
# Check for prompt indicators that suggest command completion
last_lines = current_output.split("\n")[-3:]
completion_indicators = [
"$",
"#",
">",
"Done",
"Completed",
"Finished",
"✓",
]
if any(
indicator in line
for indicator in completion_indicators
for line in last_lines
):
break
# Capture final output
output_result = await self._execute_raw_command(
f"tmux capture-pane -t {session_name} -p -S - -E -"
)
final_output = output_result.get("output", "")
# Kill the session after capture
await self._execute_raw_command(f"tmux kill-session -t {session_name}")
return self.success_response(
{
"output": final_output,
"session_name": session_name,
"cwd": cwd,
"completed": True,
}
)
else:
# For non-blocking, just return immediately
return self.success_response(
{
"session_name": session_name,
"cwd": cwd,
"message": f"Command sent to tmux session '{session_name}'. Use check_command_output to view results.",
"completed": False,
}
)
except Exception as e:
# Attempt to clean up session in case of error
if session_name:
try:
await self._execute_raw_command(
f"tmux kill-session -t {session_name}"
)
except:
pass
return self.fail_response(f"Error executing command: {str(e)}")
async def _check_command_output(
self, session_name: str, kill_session: bool = False
) -> ToolResult:
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
# Check if session exists
check_result = await self._execute_raw_command(
f"tmux has-session -t {session_name} 2>/dev/null || echo 'not_exists'"
)
if "not_exists" in check_result.get("output", ""):
return self.fail_response(
f"Tmux session '{session_name}' does not exist."
)
# Get output from tmux pane
output_result = await self._execute_raw_command(
f"tmux capture-pane -t {session_name} -p -S - -E -"
)
output = output_result.get("output", "")
# Kill session if requested
if kill_session:
await self._execute_raw_command(f"tmux kill-session -t {session_name}")
termination_status = "Session terminated."
else:
termination_status = "Session still running."
return self.success_response(
{
"output": output,
"session_name": session_name,
"status": termination_status,
}
)
except Exception as e:
return self.fail_response(f"Error checking command output: {str(e)}")
async def _terminate_command(self, session_name: str) -> ToolResult:
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
# Check if session exists
check_result = await self._execute_raw_command(
f"tmux has-session -t {session_name} 2>/dev/null || echo 'not_exists'"
)
if "not_exists" in check_result.get("output", ""):
return self.fail_response(
f"Tmux session '{session_name}' does not exist."
)
# Kill the session
await self._execute_raw_command(f"tmux kill-session -t {session_name}")
return self.success_response(
{"message": f"Tmux session '{session_name}' terminated successfully."}
)
except Exception as e:
return self.fail_response(f"Error terminating command: {str(e)}")
async def _list_commands(self) -> ToolResult:
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
# List all tmux sessions
result = await self._execute_raw_command(
"tmux list-sessions 2>/dev/null || echo 'No sessions'"
)
output = result.get("output", "")
if "No sessions" in output or not output.strip():
return self.success_response(
{"message": "No active tmux sessions found.", "sessions": []}
)
# Parse session list
sessions = []
for line in output.split("\n"):
if line.strip():
parts = line.split(":")
if parts:
session_name = parts[0].strip()
sessions.append(session_name)
return self.success_response(
{
"message": f"Found {len(sessions)} active sessions.",
"sessions": sessions,
}
)
except Exception as e:
return self.fail_response(f"Error listing commands: {str(e)}")
async def execute(
self,
action: str,
command: str,
folder: Optional[str] = None,
session_name: Optional[str] = None,
blocking: bool = False,
timeout: int = 60,
kill_session: bool = False,
) -> ToolResult:
"""
Execute a browser action in the sandbox environment.
Args:
timeout:
blocking:
session_name:
folder:
command:
kill_session:
action: The browser action to perform
Returns:
ToolResult with the action's output or error
"""
async with asyncio.Lock():
try:
# Navigation actions
if action == "execute_command":
if not command:
return self.fail_response("command is required for navigation")
return await self._execute_command(
command, folder, session_name, blocking, timeout
)
elif action == "check_command_output":
if session_name is None:
return self.fail_response(
"session_name is required for navigation"
)
return await self._check_command_output(session_name, kill_session)
elif action == "terminate_command":
if session_name is None:
return self.fail_response(
"session_name is required for click_element"
)
return await self._terminate_command(session_name)
elif action == "list_commands":
return await self._list_commands()
else:
return self.fail_response(f"Unknown action: {action}")
except Exception as e:
logger.error(f"Error executing shell action: {e}")
return self.fail_response(f"Error executing shell action: {e}")
async def cleanup(self):
"""Clean up all sessions."""
for session_name in list(self._sessions.keys()):
await self._cleanup_session(session_name)
# Also clean up any tmux sessions
try:
await self._ensure_sandbox()
await self._execute_raw_command("tmux kill-server 2>/dev/null || true")
except Exception as e:
logger.error(f"Error shell box cleanup action: {e}")
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/sandbox/sb_files_tool.py | app/tool/sandbox/sb_files_tool.py | import asyncio
from typing import Optional, TypeVar
from pydantic import Field
from app.daytona.tool_base import Sandbox, SandboxToolsBase
from app.tool.base import ToolResult
from app.utils.files_utils import clean_path, should_exclude_file
from app.utils.logger import logger
Context = TypeVar("Context")
_FILES_DESCRIPTION = """\
A sandbox-based file system tool that allows file operations in a secure sandboxed environment.
* This tool provides commands for creating, reading, updating, and deleting files in the workspace
* All operations are performed relative to the /workspace directory for security
* Use this when you need to manage files, edit code, or manipulate file contents in a sandbox
* Each action requires specific parameters as defined in the tool's dependencies
Key capabilities include:
* File creation: Create new files with specified content and permissions
* File modification: Replace specific strings or completely rewrite files
* File deletion: Remove files from the workspace
* File reading: Read file contents with optional line range specification
"""
class SandboxFilesTool(SandboxToolsBase):
name: str = "sandbox_files"
description: str = _FILES_DESCRIPTION
parameters: dict = {
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": [
"create_file",
"str_replace",
"full_file_rewrite",
"delete_file",
],
"description": "The file operation to perform",
},
"file_path": {
"type": "string",
"description": "Path to the file, relative to /workspace (e.g., 'src/main.py')",
},
"file_contents": {
"type": "string",
"description": "Content to write to the file",
},
"old_str": {
"type": "string",
"description": "Text to be replaced (must appear exactly once)",
},
"new_str": {
"type": "string",
"description": "Replacement text",
},
"permissions": {
"type": "string",
"description": "File permissions in octal format (e.g., '644')",
"default": "644",
},
},
"required": ["action"],
"dependencies": {
"create_file": ["file_path", "file_contents"],
"str_replace": ["file_path", "old_str", "new_str"],
"full_file_rewrite": ["file_path", "file_contents"],
"delete_file": ["file_path"],
},
}
SNIPPET_LINES: int = Field(default=4, exclude=True)
# workspace_path: str = Field(default="/workspace", exclude=True)
# sandbox: Optional[Sandbox] = Field(default=None, exclude=True)
def __init__(
self, sandbox: Optional[Sandbox] = None, thread_id: Optional[str] = None, **data
):
"""Initialize with optional sandbox and thread_id."""
super().__init__(**data)
if sandbox is not None:
self._sandbox = sandbox
def clean_path(self, path: str) -> str:
"""Clean and normalize a path to be relative to /workspace"""
return clean_path(path, self.workspace_path)
def _should_exclude_file(self, rel_path: str) -> bool:
"""Check if a file should be excluded based on path, name, or extension"""
return should_exclude_file(rel_path)
def _file_exists(self, path: str) -> bool:
"""Check if a file exists in the sandbox"""
try:
self.sandbox.fs.get_file_info(path)
return True
except Exception:
return False
async def get_workspace_state(self) -> dict:
"""Get the current workspace state by reading all files"""
files_state = {}
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
files = self.sandbox.fs.list_files(self.workspace_path)
for file_info in files:
rel_path = file_info.name
# Skip excluded files and directories
if self._should_exclude_file(rel_path) or file_info.is_dir:
continue
try:
full_path = f"{self.workspace_path}/{rel_path}"
content = self.sandbox.fs.download_file(full_path).decode()
files_state[rel_path] = {
"content": content,
"is_dir": file_info.is_dir,
"size": file_info.size,
"modified": file_info.mod_time,
}
except Exception as e:
print(f"Error reading file {rel_path}: {e}")
except UnicodeDecodeError:
print(f"Skipping binary file: {rel_path}")
return files_state
except Exception as e:
print(f"Error getting workspace state: {str(e)}")
return {}
async def execute(
self,
action: str,
file_path: Optional[str] = None,
file_contents: Optional[str] = None,
old_str: Optional[str] = None,
new_str: Optional[str] = None,
permissions: Optional[str] = "644",
**kwargs,
) -> ToolResult:
"""
Execute a file operation in the sandbox environment.
Args:
action: The file operation to perform
file_path: Path to the file relative to /workspace
file_contents: Content to write to the file
old_str: Text to be replaced (for str_replace)
new_str: Replacement text (for str_replace)
permissions: File permissions in octal format
Returns:
ToolResult with the operation's output or error
"""
async with asyncio.Lock():
try:
# File creation
if action == "create_file":
if not file_path or not file_contents:
return self.fail_response(
"file_path and file_contents are required for create_file"
)
return await self._create_file(
file_path, file_contents, permissions
)
# String replacement
elif action == "str_replace":
if not file_path or not old_str or not new_str:
return self.fail_response(
"file_path, old_str, and new_str are required for str_replace"
)
return await self._str_replace(file_path, old_str, new_str)
# Full file rewrite
elif action == "full_file_rewrite":
if not file_path or not file_contents:
return self.fail_response(
"file_path and file_contents are required for full_file_rewrite"
)
return await self._full_file_rewrite(
file_path, file_contents, permissions
)
# File deletion
elif action == "delete_file":
if not file_path:
return self.fail_response(
"file_path is required for delete_file"
)
return await self._delete_file(file_path)
else:
return self.fail_response(f"Unknown action: {action}")
except Exception as e:
logger.error(f"Error executing file action: {e}")
return self.fail_response(f"Error executing file action: {e}")
async def _create_file(
self, file_path: str, file_contents: str, permissions: str = "644"
) -> ToolResult:
"""Create a new file with the provided contents"""
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
file_path = self.clean_path(file_path)
full_path = f"{self.workspace_path}/{file_path}"
if self._file_exists(full_path):
return self.fail_response(
f"File '{file_path}' already exists. Use full_file_rewrite to modify existing files."
)
# Create parent directories if needed
parent_dir = "/".join(full_path.split("/")[:-1])
if parent_dir:
self.sandbox.fs.create_folder(parent_dir, "755")
# Write the file content
self.sandbox.fs.upload_file(file_contents.encode(), full_path)
self.sandbox.fs.set_file_permissions(full_path, permissions)
message = f"File '{file_path}' created successfully."
# Check if index.html was created and add 8080 server info (only in root workspace)
if file_path.lower() == "index.html":
try:
website_link = self.sandbox.get_preview_link(8080)
website_url = (
website_link.url
if hasattr(website_link, "url")
else str(website_link).split("url='")[1].split("'")[0]
)
message += f"\n\n[Auto-detected index.html - HTTP server available at: {website_url}]"
message += "\n[Note: Use the provided HTTP server URL above instead of starting a new server]"
except Exception as e:
logger.warning(
f"Failed to get website URL for index.html: {str(e)}"
)
return self.success_response(message)
except Exception as e:
return self.fail_response(f"Error creating file: {str(e)}")
async def _str_replace(
self, file_path: str, old_str: str, new_str: str
) -> ToolResult:
"""Replace specific text in a file"""
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
file_path = self.clean_path(file_path)
full_path = f"{self.workspace_path}/{file_path}"
if not self._file_exists(full_path):
return self.fail_response(f"File '{file_path}' does not exist")
content = self.sandbox.fs.download_file(full_path).decode()
old_str = old_str.expandtabs()
new_str = new_str.expandtabs()
occurrences = content.count(old_str)
if occurrences == 0:
return self.fail_response(f"String '{old_str}' not found in file")
if occurrences > 1:
lines = [
i + 1
for i, line in enumerate(content.split("\n"))
if old_str in line
]
return self.fail_response(
f"Multiple occurrences found in lines {lines}. Please ensure string is unique"
)
# Perform replacement
new_content = content.replace(old_str, new_str)
self.sandbox.fs.upload_file(new_content.encode(), full_path)
# Show snippet around the edit
replacement_line = content.split(old_str)[0].count("\n")
start_line = max(0, replacement_line - self.SNIPPET_LINES)
end_line = replacement_line + self.SNIPPET_LINES + new_str.count("\n")
snippet = "\n".join(new_content.split("\n")[start_line : end_line + 1])
message = f"Replacement successful."
return self.success_response(message)
except Exception as e:
return self.fail_response(f"Error replacing string: {str(e)}")
async def _full_file_rewrite(
self, file_path: str, file_contents: str, permissions: str = "644"
) -> ToolResult:
"""Completely rewrite an existing file with new content"""
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
file_path = self.clean_path(file_path)
full_path = f"{self.workspace_path}/{file_path}"
if not self._file_exists(full_path):
return self.fail_response(
f"File '{file_path}' does not exist. Use create_file to create a new file."
)
self.sandbox.fs.upload_file(file_contents.encode(), full_path)
self.sandbox.fs.set_file_permissions(full_path, permissions)
message = f"File '{file_path}' completely rewritten successfully."
# Check if index.html was rewritten and add 8080 server info (only in root workspace)
if file_path.lower() == "index.html":
try:
website_link = self.sandbox.get_preview_link(8080)
website_url = (
website_link.url
if hasattr(website_link, "url")
else str(website_link).split("url='")[1].split("'")[0]
)
message += f"\n\n[Auto-detected index.html - HTTP server available at: {website_url}]"
message += "\n[Note: Use the provided HTTP server URL above instead of starting a new server]"
except Exception as e:
logger.warning(
f"Failed to get website URL for index.html: {str(e)}"
)
return self.success_response(message)
except Exception as e:
return self.fail_response(f"Error rewriting file: {str(e)}")
async def _delete_file(self, file_path: str) -> ToolResult:
"""Delete a file at the given path"""
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
file_path = self.clean_path(file_path)
full_path = f"{self.workspace_path}/{file_path}"
if not self._file_exists(full_path):
return self.fail_response(f"File '{file_path}' does not exist")
self.sandbox.fs.delete_file(full_path)
return self.success_response(f"File '{file_path}' deleted successfully.")
except Exception as e:
return self.fail_response(f"Error deleting file: {str(e)}")
async def cleanup(self):
"""Clean up sandbox resources."""
@classmethod
def create_with_context(cls, context: Context) -> "SandboxFilesTool[Context]":
"""Factory method to create a SandboxFilesTool with a specific context."""
raise NotImplementedError(
"create_with_context not implemented for SandboxFilesTool"
)
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/sandbox/sb_browser_tool.py | app/tool/sandbox/sb_browser_tool.py | import base64
import io
import json
import traceback
from typing import Optional # Add this import for Optional
from PIL import Image
from pydantic import Field
from app.daytona.tool_base import ( # Ensure Sandbox is imported correctly
Sandbox,
SandboxToolsBase,
ThreadMessage,
)
from app.tool.base import ToolResult
from app.utils.logger import logger
# Context = TypeVar("Context")
_BROWSER_DESCRIPTION = """\
A sandbox-based browser automation tool that allows interaction with web pages through various actions.
* This tool provides commands for controlling a browser session in a sandboxed environment
* It maintains state across calls, keeping the browser session alive until explicitly closed
* Use this when you need to browse websites, fill forms, click buttons, or extract content in a secure sandbox
* Each action requires specific parameters as defined in the tool's dependencies
Key capabilities include:
* Navigation: Go to specific URLs, go back in history
* Interaction: Click elements by index, input text, send keyboard commands
* Scrolling: Scroll up/down by pixel amount or scroll to specific text
* Tab management: Switch between tabs or close tabs
* Content extraction: Get dropdown options or select dropdown options
"""
# noinspection PyArgumentList
class SandboxBrowserTool(SandboxToolsBase):
"""Tool for executing tasks in a Daytona sandbox with browser-use capabilities."""
name: str = "sandbox_browser"
description: str = _BROWSER_DESCRIPTION
parameters: dict = {
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": [
"navigate_to",
"go_back",
"wait",
"click_element",
"input_text",
"send_keys",
"switch_tab",
"close_tab",
"scroll_down",
"scroll_up",
"scroll_to_text",
"get_dropdown_options",
"select_dropdown_option",
"click_coordinates",
"drag_drop",
],
"description": "The browser action to perform",
},
"url": {
"type": "string",
"description": "URL for 'navigate_to' action",
},
"index": {
"type": "integer",
"description": "Element index for interaction actions",
},
"text": {
"type": "string",
"description": "Text for input or scroll actions",
},
"amount": {
"type": "integer",
"description": "Pixel amount to scroll",
},
"page_id": {
"type": "integer",
"description": "Tab ID for tab management actions",
},
"keys": {
"type": "string",
"description": "Keys to send for keyboard actions",
},
"seconds": {
"type": "integer",
"description": "Seconds to wait",
},
"x": {
"type": "integer",
"description": "X coordinate for click or drag actions",
},
"y": {
"type": "integer",
"description": "Y coordinate for click or drag actions",
},
"element_source": {
"type": "string",
"description": "Source element for drag and drop",
},
"element_target": {
"type": "string",
"description": "Target element for drag and drop",
},
},
"required": ["action"],
"dependencies": {
"navigate_to": ["url"],
"click_element": ["index"],
"input_text": ["index", "text"],
"send_keys": ["keys"],
"switch_tab": ["page_id"],
"close_tab": ["page_id"],
"scroll_down": ["amount"],
"scroll_up": ["amount"],
"scroll_to_text": ["text"],
"get_dropdown_options": ["index"],
"select_dropdown_option": ["index", "text"],
"click_coordinates": ["x", "y"],
"drag_drop": ["element_source", "element_target"],
"wait": ["seconds"],
},
}
browser_message: Optional[ThreadMessage] = Field(default=None, exclude=True)
def __init__(
self, sandbox: Optional[Sandbox] = None, thread_id: Optional[str] = None, **data
):
"""Initialize with optional sandbox and thread_id."""
super().__init__(**data)
if sandbox is not None:
self._sandbox = sandbox # Directly set the base class private attribute
def _validate_base64_image(
self, base64_string: str, max_size_mb: int = 10
) -> tuple[bool, str]:
"""
Validate base64 image data.
Args:
base64_string: The base64 encoded image data
max_size_mb: Maximum allowed image size in megabytes
Returns:
Tuple of (is_valid, error_message)
"""
try:
if not base64_string or len(base64_string) < 10:
return False, "Base64 string is empty or too short"
if base64_string.startswith("data:"):
try:
base64_string = base64_string.split(",", 1)[1]
except (IndexError, ValueError):
return False, "Invalid data URL format"
import re
if not re.match(r"^[A-Za-z0-9+/]*={0,2}$", base64_string):
return False, "Invalid base64 characters detected"
if len(base64_string) % 4 != 0:
return False, "Invalid base64 string length"
try:
image_data = base64.b64decode(base64_string, validate=True)
except Exception as e:
return False, f"Base64 decoding failed: {str(e)}"
max_size_bytes = max_size_mb * 1024 * 1024
if len(image_data) > max_size_bytes:
return False, f"Image size exceeds limit ({max_size_bytes} bytes)"
try:
image_stream = io.BytesIO(image_data)
with Image.open(image_stream) as img:
img.verify()
supported_formats = {"JPEG", "PNG", "GIF", "BMP", "WEBP", "TIFF"}
if img.format not in supported_formats:
return False, f"Unsupported image format: {img.format}"
image_stream.seek(0)
with Image.open(image_stream) as img_check:
width, height = img_check.size
max_dimension = 8192
if width > max_dimension or height > max_dimension:
return (
False,
f"Image dimensions exceed limit ({max_dimension}x{max_dimension})",
)
if width < 1 or height < 1:
return False, f"Invalid image dimensions: {width}x{height}"
except Exception as e:
return False, f"Invalid image data: {str(e)}"
return True, "Valid image"
except Exception as e:
logger.error(f"Unexpected error during base64 image validation: {e}")
return False, f"Validation error: {str(e)}"
async def _execute_browser_action(
self, endpoint: str, params: dict = None, method: str = "POST"
) -> ToolResult:
"""Execute a browser automation action through the sandbox API."""
try:
await self._ensure_sandbox()
url = f"http://localhost:8003/api/automation/{endpoint}"
if method == "GET" and params:
query_params = "&".join([f"{k}={v}" for k, v in params.items()])
url = f"{url}?{query_params}"
curl_cmd = (
f"curl -s -X {method} '{url}' -H 'Content-Type: application/json'"
)
else:
curl_cmd = (
f"curl -s -X {method} '{url}' -H 'Content-Type: application/json'"
)
if params:
json_data = json.dumps(params)
curl_cmd += f" -d '{json_data}'"
logger.debug(f"Executing curl command: {curl_cmd}")
response = self.sandbox.process.exec(curl_cmd, timeout=30)
if response.exit_code == 0:
try:
result = json.loads(response.result)
result.setdefault("content", "")
result.setdefault("role", "assistant")
if "screenshot_base64" in result:
screenshot_data = result["screenshot_base64"]
is_valid, validation_message = self._validate_base64_image(
screenshot_data
)
if not is_valid:
logger.warning(
f"Screenshot validation failed: {validation_message}"
)
result["image_validation_error"] = validation_message
del result["screenshot_base64"]
# added_message = await self.thread_manager.add_message(
# thread_id=self.thread_id,
# type="browser_state",
# content=result,
# is_llm_message=False
# )
message = ThreadMessage(
type="browser_state", content=result, is_llm_message=False
)
self.browser_message = message
success_response = {
"success": result.get("success", False),
"message": result.get("message", "Browser action completed"),
}
# if added_message and 'message_id' in added_message:
# success_response['message_id'] = added_message['message_id']
for field in [
"url",
"title",
"element_count",
"pixels_below",
"ocr_text",
"image_url",
]:
if field in result:
success_response[field] = result[field]
return (
self.success_response(success_response)
if success_response["success"]
else self.fail_response(success_response)
)
except json.JSONDecodeError as e:
logger.error(f"Failed to parse response JSON: {e}")
return self.fail_response(f"Failed to parse response JSON: {e}")
else:
logger.error(f"Browser automation request failed: {response}")
return self.fail_response(
f"Browser automation request failed: {response}"
)
except Exception as e:
logger.error(f"Error executing browser action: {e}")
logger.debug(traceback.format_exc())
return self.fail_response(f"Error executing browser action: {e}")
async def execute(
self,
action: str,
url: Optional[str] = None,
index: Optional[int] = None,
text: Optional[str] = None,
amount: Optional[int] = None,
page_id: Optional[int] = None,
keys: Optional[str] = None,
seconds: Optional[int] = None,
x: Optional[int] = None,
y: Optional[int] = None,
element_source: Optional[str] = None,
element_target: Optional[str] = None,
**kwargs,
) -> ToolResult:
"""
Execute a browser action in the sandbox environment.
Args:
action: The browser action to perform
url: URL for navigation
index: Element index for interaction
text: Text for input or scroll actions
amount: Pixel amount to scroll
page_id: Tab ID for tab management
keys: Keys to send for keyboard actions
seconds: Seconds to wait
x: X coordinate for click/drag
y: Y coordinate for click/drag
element_source: Source element for drag and drop
element_target: Target element for drag and drop
Returns:
ToolResult with the action's output or error
"""
# async with self.lock:
try:
# Navigation actions
if action == "navigate_to":
if not url:
return self.fail_response("URL is required for navigation")
return await self._execute_browser_action("navigate_to", {"url": url})
elif action == "go_back":
return await self._execute_browser_action("go_back", {})
# Interaction actions
elif action == "click_element":
if index is None:
return self.fail_response("Index is required for click_element")
return await self._execute_browser_action(
"click_element", {"index": index}
)
elif action == "input_text":
if index is None or not text:
return self.fail_response(
"Index and text are required for input_text"
)
return await self._execute_browser_action(
"input_text", {"index": index, "text": text}
)
elif action == "send_keys":
if not keys:
return self.fail_response("Keys are required for send_keys")
return await self._execute_browser_action("send_keys", {"keys": keys})
# Tab management
elif action == "switch_tab":
if page_id is None:
return self.fail_response("Page ID is required for switch_tab")
return await self._execute_browser_action(
"switch_tab", {"page_id": page_id}
)
elif action == "close_tab":
if page_id is None:
return self.fail_response("Page ID is required for close_tab")
return await self._execute_browser_action(
"close_tab", {"page_id": page_id}
)
# Scrolling actions
elif action == "scroll_down":
params = {"amount": amount} if amount is not None else {}
return await self._execute_browser_action("scroll_down", params)
elif action == "scroll_up":
params = {"amount": amount} if amount is not None else {}
return await self._execute_browser_action("scroll_up", params)
elif action == "scroll_to_text":
if not text:
return self.fail_response("Text is required for scroll_to_text")
return await self._execute_browser_action(
"scroll_to_text", {"text": text}
)
# Dropdown actions
elif action == "get_dropdown_options":
if index is None:
return self.fail_response(
"Index is required for get_dropdown_options"
)
return await self._execute_browser_action(
"get_dropdown_options", {"index": index}
)
elif action == "select_dropdown_option":
if index is None or not text:
return self.fail_response(
"Index and text are required for select_dropdown_option"
)
return await self._execute_browser_action(
"select_dropdown_option", {"index": index, "text": text}
)
# Coordinate-based actions
elif action == "click_coordinates":
if x is None or y is None:
return self.fail_response(
"X and Y coordinates are required for click_coordinates"
)
return await self._execute_browser_action(
"click_coordinates", {"x": x, "y": y}
)
elif action == "drag_drop":
if not element_source or not element_target:
return self.fail_response(
"Source and target elements are required for drag_drop"
)
return await self._execute_browser_action(
"drag_drop",
{
"element_source": element_source,
"element_target": element_target,
},
)
# Utility actions
elif action == "wait":
seconds_to_wait = seconds if seconds is not None else 3
return await self._execute_browser_action(
"wait", {"seconds": seconds_to_wait}
)
else:
return self.fail_response(f"Unknown action: {action}")
except Exception as e:
logger.error(f"Error executing browser action: {e}")
return self.fail_response(f"Error executing browser action: {e}")
async def get_current_state(
self, message: Optional[ThreadMessage] = None
) -> ToolResult:
"""
Get the current browser state as a ToolResult.
If context is not provided, uses self.context.
"""
try:
# Use provided context or fall back to self.context
message = message or self.browser_message
if not message:
return ToolResult(error="Browser context not initialized")
state = message.content
screenshot = state.get("screenshot_base64")
# Build the state info with all required fields
state_info = {
"url": state.get("url", ""),
"title": state.get("title", ""),
"tabs": [tab.model_dump() for tab in state.get("tabs", [])],
"pixels_above": getattr(state, "pixels_above", 0),
"pixels_below": getattr(state, "pixels_below", 0),
"help": "[0], [1], [2], etc., represent clickable indices corresponding to the elements listed. Clicking on these indices will navigate to or interact with the respective content behind them.",
}
return ToolResult(
output=json.dumps(state_info, indent=4, ensure_ascii=False),
base64_image=screenshot,
)
except Exception as e:
return ToolResult(error=f"Failed to get browser state: {str(e)}")
@classmethod
def create_with_sandbox(cls, sandbox: Sandbox) -> "SandboxBrowserTool":
"""Factory method to create a tool with sandbox."""
return cls(sandbox=sandbox)
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/sandbox/sb_vision_tool.py | app/tool/sandbox/sb_vision_tool.py | import base64
import mimetypes
import os
from io import BytesIO
from typing import Optional
from PIL import Image
from pydantic import Field
from app.daytona.tool_base import Sandbox, SandboxToolsBase, ThreadMessage
from app.tool.base import ToolResult
# 最大文件大小(原图10MB,压缩后5MB)
MAX_IMAGE_SIZE = 10 * 1024 * 1024
MAX_COMPRESSED_SIZE = 5 * 1024 * 1024
# 压缩设置
DEFAULT_MAX_WIDTH = 1920
DEFAULT_MAX_HEIGHT = 1080
DEFAULT_JPEG_QUALITY = 85
DEFAULT_PNG_COMPRESS_LEVEL = 6
_VISION_DESCRIPTION = """
A sandbox-based vision tool that allows the agent to read image files inside the sandbox using the see_image action.
* Only the see_image action is supported, with the parameter being the relative path of the image under /workspace.
* The image will be compressed and converted to base64 for use in subsequent context.
* Supported formats: JPG, PNG, GIF, WEBP. Maximum size: 10MB.
"""
class SandboxVisionTool(SandboxToolsBase):
name: str = "sandbox_vision"
description: str = _VISION_DESCRIPTION
parameters: dict = {
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": ["see_image"],
"description": "要执行的视觉动作,目前仅支持 see_image",
},
"file_path": {
"type": "string",
"description": "图片在 /workspace 下的相对路径,如 'screenshots/image.png'",
},
},
"required": ["action", "file_path"],
"dependencies": {"see_image": ["file_path"]},
}
# def __init__(self, project_id: str, thread_id: str, thread_manager: ThreadManager):
# super().__init__(project_id=project_id, thread_manager=thread_manager)
# self.thread_id = thread_id
# self.thread_manager = thread_manager
vision_message: Optional[ThreadMessage] = Field(default=None, exclude=True)
def __init__(
self, sandbox: Optional[Sandbox] = None, thread_id: Optional[str] = None, **data
):
"""Initialize with optional sandbox and thread_id."""
super().__init__(**data)
if sandbox is not None:
self._sandbox = sandbox
def compress_image(self, image_bytes: bytes, mime_type: str, file_path: str):
"""压缩图片,保持合理质量。"""
try:
img = Image.open(BytesIO(image_bytes))
if img.mode in ("RGBA", "LA", "P"):
background = Image.new("RGB", img.size, (255, 255, 255))
if img.mode == "P":
img = img.convert("RGBA")
background.paste(
img, mask=img.split()[-1] if img.mode == "RGBA" else None
)
img = background
width, height = img.size
if width > DEFAULT_MAX_WIDTH or height > DEFAULT_MAX_HEIGHT:
ratio = min(DEFAULT_MAX_WIDTH / width, DEFAULT_MAX_HEIGHT / height)
new_width = int(width * ratio)
new_height = int(height * ratio)
img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
output = BytesIO()
if mime_type == "image/gif":
img.save(output, format="GIF", optimize=True)
output_mime = "image/gif"
elif mime_type == "image/png":
img.save(
output,
format="PNG",
optimize=True,
compress_level=DEFAULT_PNG_COMPRESS_LEVEL,
)
output_mime = "image/png"
else:
img.save(
output, format="JPEG", quality=DEFAULT_JPEG_QUALITY, optimize=True
)
output_mime = "image/jpeg"
compressed_bytes = output.getvalue()
return compressed_bytes, output_mime
except Exception:
return image_bytes, mime_type
async def execute(
self, action: str, file_path: Optional[str] = None, **kwargs
) -> ToolResult:
"""
执行视觉动作,目前仅支持 see_image。
参数:
action: 必须为 'see_image'
file_path: 图片相对路径
"""
if action != "see_image":
return self.fail_response(f"未知的视觉动作: {action}")
if not file_path:
return self.fail_response("file_path 参数不能为空")
try:
await self._ensure_sandbox()
cleaned_path = self.clean_path(file_path)
full_path = f"{self.workspace_path}/{cleaned_path}"
try:
file_info = self.sandbox.fs.get_file_info(full_path)
if file_info.is_dir:
return self.fail_response(f"路径 '{cleaned_path}' 是目录,不是图片文件。")
except Exception:
return self.fail_response(f"图片文件未找到: '{cleaned_path}'")
if file_info.size > MAX_IMAGE_SIZE:
return self.fail_response(
f"图片文件 '{cleaned_path}' 过大 ({file_info.size / (1024*1024):.2f}MB),最大允许 {MAX_IMAGE_SIZE / (1024*1024)}MB。"
)
try:
image_bytes = self.sandbox.fs.download_file(full_path)
except Exception:
return self.fail_response(f"无法读取图片文件: {cleaned_path}")
mime_type, _ = mimetypes.guess_type(full_path)
if not mime_type or not mime_type.startswith("image/"):
ext = os.path.splitext(cleaned_path)[1].lower()
if ext == ".jpg" or ext == ".jpeg":
mime_type = "image/jpeg"
elif ext == ".png":
mime_type = "image/png"
elif ext == ".gif":
mime_type = "image/gif"
elif ext == ".webp":
mime_type = "image/webp"
else:
return self.fail_response(
f"不支持或未知的图片格式: '{cleaned_path}'。支持: JPG, PNG, GIF, WEBP。"
)
compressed_bytes, compressed_mime_type = self.compress_image(
image_bytes, mime_type, cleaned_path
)
if len(compressed_bytes) > MAX_COMPRESSED_SIZE:
return self.fail_response(
f"图片文件 '{cleaned_path}' 压缩后仍过大 ({len(compressed_bytes) / (1024*1024):.2f}MB),最大允许 {MAX_COMPRESSED_SIZE / (1024*1024)}MB。"
)
base64_image = base64.b64encode(compressed_bytes).decode("utf-8")
image_context_data = {
"mime_type": compressed_mime_type,
"base64": base64_image,
"file_path": cleaned_path,
"original_size": file_info.size,
"compressed_size": len(compressed_bytes),
}
message = ThreadMessage(
type="image_context", content=image_context_data, is_llm_message=False
)
self.vision_message = message
# return self.success_response(f"成功加载并压缩图片 '{cleaned_path}' (由 {file_info.size / 1024:.1f}KB 压缩到 {len(compressed_bytes) / 1024:.1f}KB)。")
return ToolResult(
output=f"成功加载并压缩图片 '{cleaned_path}'",
base64_image=base64_image,
)
except Exception as e:
return self.fail_response(f"see_image 执行异常: {str(e)}")
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/tests/sandbox/test_sandbox.py | tests/sandbox/test_sandbox.py | import pytest
import pytest_asyncio
from app.sandbox.core.sandbox import DockerSandbox, SandboxSettings
@pytest.fixture(scope="module")
def sandbox_config():
"""Creates sandbox configuration for testing."""
return SandboxSettings(
image="python:3.12-slim",
work_dir="/workspace",
memory_limit="1g",
cpu_limit=0.5,
network_enabled=True,
)
@pytest_asyncio.fixture(scope="module")
async def sandbox(sandbox_config):
"""Creates and manages a test sandbox instance."""
sandbox = DockerSandbox(sandbox_config)
await sandbox.create()
try:
yield sandbox
finally:
await sandbox.cleanup()
@pytest.mark.asyncio
async def test_sandbox_working_directory(sandbox):
"""Tests sandbox working directory configuration."""
result = await sandbox.terminal.run_command("pwd")
assert result.strip() == "/workspace"
@pytest.mark.asyncio
async def test_sandbox_file_operations(sandbox):
"""Tests sandbox file read/write operations."""
# Test file writing
test_content = "Hello from sandbox!"
await sandbox.write_file("/workspace/test.txt", test_content)
# Test file reading
content = await sandbox.read_file("/workspace/test.txt")
assert content.strip() == test_content
@pytest.mark.asyncio
async def test_sandbox_python_execution(sandbox):
"""Tests Python code execution in sandbox."""
# Write test file
await sandbox.write_file("/workspace/test.txt", "Hello from file!")
# Write Python script
python_code = """
print("Hello from Python!")
with open('/workspace/test.txt') as f:
print(f.read())
"""
await sandbox.write_file("/workspace/test.py", python_code)
# Execute script and verify output
result = await sandbox.terminal.run_command("python3 /workspace/test.py")
assert "Hello from Python!" in result
assert "Hello from file!" in result
@pytest.mark.asyncio
async def test_sandbox_file_persistence(sandbox):
"""Tests file persistence in sandbox."""
# Create multiple files
files = {
"file1.txt": "Content 1",
"file2.txt": "Content 2",
"nested/file3.txt": "Content 3",
}
# Write files
for path, content in files.items():
await sandbox.write_file(f"/workspace/{path}", content)
# Verify file contents
for path, expected_content in files.items():
content = await sandbox.read_file(f"/workspace/{path}")
assert content.strip() == expected_content
@pytest.mark.asyncio
async def test_sandbox_python_environment(sandbox):
"""Tests Python environment configuration."""
# Test Python version
result = await sandbox.terminal.run_command("python3 --version")
assert "Python 3.10" in result
# Test basic module imports
python_code = """
import sys
import os
import json
print("Python is working!")
"""
await sandbox.write_file("/workspace/env_test.py", python_code)
result = await sandbox.terminal.run_command("python3 /workspace/env_test.py")
assert "Python is working!" in result
@pytest.mark.asyncio
async def test_sandbox_network_access(sandbox):
"""Tests sandbox network access."""
if not sandbox.config.network_enabled:
pytest.skip("Network access is disabled")
# Test network connectivity
await sandbox.terminal.run_command("apt update && apt install curl -y")
result = await sandbox.terminal.run_command("curl -I https://www.example.com")
assert "HTTP/2 200" in result
@pytest.mark.asyncio
async def test_sandbox_cleanup(sandbox_config):
"""Tests sandbox cleanup process."""
sandbox = DockerSandbox(sandbox_config)
await sandbox.create()
# Create test files
await sandbox.write_file("/workspace/test.txt", "test")
container_id = sandbox.terminal.container.id
# Perform cleanup
await sandbox.cleanup()
# Verify container has been removed
import docker
client = docker.from_env()
containers = client.containers.list(all=True)
assert not any(c.id == container_id for c in containers)
@pytest.mark.asyncio
async def test_sandbox_error_handling():
"""Tests error handling with invalid configuration."""
# Test invalid configuration
invalid_config = SandboxSettings(image="nonexistent:latest", work_dir="/invalid")
sandbox = DockerSandbox(invalid_config)
with pytest.raises(Exception):
await sandbox.create()
if __name__ == "__main__":
pytest.main(["-v", __file__])
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/tests/sandbox/test_docker_terminal.py | tests/sandbox/test_docker_terminal.py | """Tests for the AsyncDockerizedTerminal implementation."""
import docker
import pytest
import pytest_asyncio
from app.sandbox.core.terminal import AsyncDockerizedTerminal
@pytest.fixture(scope="module")
def docker_client():
"""Fixture providing a Docker client."""
return docker.from_env()
@pytest_asyncio.fixture(scope="module")
async def docker_container(docker_client):
"""Fixture providing a test Docker container."""
container = docker_client.containers.run(
"python:3.12-slim",
"tail -f /dev/null",
name="test_container",
detach=True,
remove=True,
)
yield container
container.stop()
@pytest_asyncio.fixture
async def terminal(docker_container):
"""Fixture providing an initialized AsyncDockerizedTerminal instance."""
terminal = AsyncDockerizedTerminal(
docker_container,
working_dir="/workspace",
env_vars={"TEST_VAR": "test_value"},
default_timeout=30,
)
await terminal.init()
yield terminal
await terminal.close()
class TestAsyncDockerizedTerminal:
"""Test cases for AsyncDockerizedTerminal."""
@pytest.mark.asyncio
async def test_basic_command_execution(self, terminal):
"""Test basic command execution functionality."""
result = await terminal.run_command("echo 'Hello World'")
assert "Hello World" in result
@pytest.mark.asyncio
async def test_environment_variables(self, terminal):
"""Test environment variable setting and access."""
result = await terminal.run_command("echo $TEST_VAR")
assert "test_value" in result
@pytest.mark.asyncio
async def test_working_directory(self, terminal):
"""Test working directory setup."""
result = await terminal.run_command("pwd")
assert "/workspace" == result
@pytest.mark.asyncio
async def test_command_timeout(self, docker_container):
"""Test command timeout functionality."""
terminal = AsyncDockerizedTerminal(docker_container, default_timeout=1)
await terminal.init()
try:
with pytest.raises(TimeoutError):
await terminal.run_command("sleep 5")
finally:
await terminal.close()
@pytest.mark.asyncio
async def test_multiple_commands(self, terminal):
"""Test execution of multiple commands in sequence."""
cmd1 = await terminal.run_command("echo 'First'")
cmd2 = await terminal.run_command("echo 'Second'")
assert "First" in cmd1
assert "Second" in cmd2
@pytest.mark.asyncio
async def test_session_cleanup(self, docker_container):
"""Test proper cleanup of resources."""
terminal = AsyncDockerizedTerminal(docker_container)
await terminal.init()
assert terminal.session is not None
await terminal.close()
# Verify session is properly cleaned up
# Note: session object still exists, but internal connection is closed
assert terminal.session is not None
# Configure pytest-asyncio
def pytest_configure(config):
"""Configure pytest-asyncio."""
config.addinivalue_line("asyncio_mode", "strict")
config.addinivalue_line("asyncio_default_fixture_loop_scope", "function")
if __name__ == "__main__":
pytest.main(["-v", __file__])
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/tests/sandbox/test_sandbox_manager.py | tests/sandbox/test_sandbox_manager.py | import asyncio
import os
import tempfile
from typing import AsyncGenerator
import pytest
import pytest_asyncio
from app.sandbox.core.manager import SandboxManager
@pytest_asyncio.fixture(scope="function")
async def manager() -> AsyncGenerator[SandboxManager, None]:
"""Creates a sandbox manager instance.
Uses function scope to ensure each test case has its own manager instance.
"""
manager = SandboxManager(max_sandboxes=2, idle_timeout=60, cleanup_interval=30)
try:
yield manager
finally:
# Ensure all resources are cleaned up
await manager.cleanup()
@pytest.fixture
def temp_file():
"""Creates a temporary test file."""
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as f:
f.write("test content")
path = f.name
try:
yield path
finally:
if os.path.exists(path):
os.unlink(path)
@pytest.mark.asyncio
async def test_create_sandbox(manager):
"""Tests sandbox creation."""
# Create default sandbox
sandbox_id = await manager.create_sandbox()
assert sandbox_id in manager._sandboxes
assert sandbox_id in manager._last_used
# Verify sandbox functionality
sandbox = await manager.get_sandbox(sandbox_id)
result = await sandbox.run_command("echo 'test'")
assert result.strip() == "test"
@pytest.mark.asyncio
async def test_max_sandboxes_limit(manager):
"""Tests maximum sandbox limit enforcement."""
created_sandboxes = []
try:
# Create maximum number of sandboxes
for _ in range(manager.max_sandboxes):
sandbox_id = await manager.create_sandbox()
created_sandboxes.append(sandbox_id)
# Verify created sandbox count
assert len(manager._sandboxes) == manager.max_sandboxes
# Attempting to create additional sandbox should fail
with pytest.raises(RuntimeError) as exc_info:
await manager.create_sandbox()
# Verify error message
expected_message = (
f"Maximum number of sandboxes ({manager.max_sandboxes}) reached"
)
assert str(exc_info.value) == expected_message
finally:
# Clean up all created sandboxes
for sandbox_id in created_sandboxes:
try:
await manager.delete_sandbox(sandbox_id)
except Exception as e:
print(f"Failed to cleanup sandbox {sandbox_id}: {e}")
@pytest.mark.asyncio
async def test_get_nonexistent_sandbox(manager):
"""Tests retrieving a non-existent sandbox."""
with pytest.raises(KeyError, match="Sandbox .* not found"):
await manager.get_sandbox("nonexistent-id")
@pytest.mark.asyncio
async def test_sandbox_cleanup(manager):
"""Tests sandbox cleanup functionality."""
sandbox_id = await manager.create_sandbox()
assert sandbox_id in manager._sandboxes
await manager.delete_sandbox(sandbox_id)
assert sandbox_id not in manager._sandboxes
assert sandbox_id not in manager._last_used
@pytest.mark.asyncio
async def test_idle_sandbox_cleanup(manager):
"""Tests automatic cleanup of idle sandboxes."""
# Set short idle timeout
manager.idle_timeout = 0.1
sandbox_id = await manager.create_sandbox()
assert sandbox_id in manager._sandboxes
# Wait longer than idle timeout
await asyncio.sleep(0.2)
# Trigger cleanup
await manager._cleanup_idle_sandboxes()
assert sandbox_id not in manager._sandboxes
@pytest.mark.asyncio
async def test_manager_cleanup(manager):
"""Tests manager cleanup functionality."""
# Create multiple sandboxes
sandbox_ids = []
for _ in range(2):
sandbox_id = await manager.create_sandbox()
sandbox_ids.append(sandbox_id)
# Clean up all resources
await manager.cleanup()
# Verify all sandboxes have been cleaned up
assert not manager._sandboxes
assert not manager._last_used
if __name__ == "__main__":
pytest.main(["-v", __file__])
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/tests/sandbox/test_client.py | tests/sandbox/test_client.py | import tempfile
from pathlib import Path
from typing import AsyncGenerator
import pytest
import pytest_asyncio
from app.config import SandboxSettings
from app.sandbox.client import LocalSandboxClient, create_sandbox_client
@pytest_asyncio.fixture(scope="function")
async def local_client() -> AsyncGenerator[LocalSandboxClient, None]:
"""Creates a local sandbox client for testing."""
client = create_sandbox_client()
try:
yield client
finally:
await client.cleanup()
@pytest.fixture(scope="function")
def temp_dir() -> Path:
"""Creates a temporary directory for testing."""
with tempfile.TemporaryDirectory() as tmp_dir:
yield Path(tmp_dir)
@pytest.mark.asyncio
async def test_sandbox_creation(local_client: LocalSandboxClient):
"""Tests sandbox creation with specific configuration."""
config = SandboxSettings(
image="python:3.12-slim",
work_dir="/workspace",
memory_limit="512m",
cpu_limit=0.5,
)
await local_client.create(config)
result = await local_client.run_command("python3 --version")
assert "Python 3.10" in result
@pytest.mark.asyncio
async def test_local_command_execution(local_client: LocalSandboxClient):
"""Tests command execution in local sandbox."""
await local_client.create()
result = await local_client.run_command("echo 'test'")
assert result.strip() == "test"
with pytest.raises(Exception):
await local_client.run_command("sleep 10", timeout=1)
@pytest.mark.asyncio
async def test_local_file_operations(local_client: LocalSandboxClient, temp_dir: Path):
"""Tests file operations in local sandbox."""
await local_client.create()
# Test write and read operations
test_content = "Hello, World!"
await local_client.write_file("/workspace/test.txt", test_content)
content = await local_client.read_file("/workspace/test.txt")
assert content.strip() == test_content
# Test copying file to container
src_file = temp_dir / "src.txt"
src_file.write_text("Copy to container")
await local_client.copy_to(str(src_file), "/workspace/copied.txt")
content = await local_client.read_file("/workspace/copied.txt")
assert content.strip() == "Copy to container"
# Test copying file from container
dst_file = temp_dir / "dst.txt"
await local_client.copy_from("/workspace/test.txt", str(dst_file))
assert dst_file.read_text().strip() == test_content
@pytest.mark.asyncio
async def test_local_volume_binding(local_client: LocalSandboxClient, temp_dir: Path):
"""Tests volume binding in local sandbox."""
bind_path = str(temp_dir)
volume_bindings = {bind_path: "/data"}
await local_client.create(volume_bindings=volume_bindings)
test_file = temp_dir / "test.txt"
test_file.write_text("Volume test")
content = await local_client.read_file("/data/test.txt")
assert "Volume test" in content
@pytest.mark.asyncio
async def test_local_error_handling(local_client: LocalSandboxClient):
"""Tests error handling in local sandbox."""
await local_client.create()
with pytest.raises(Exception) as exc:
await local_client.read_file("/nonexistent.txt")
assert "not found" in str(exc.value).lower()
with pytest.raises(Exception) as exc:
await local_client.copy_from("/nonexistent.txt", "local.txt")
assert "not found" in str(exc.value).lower()
if __name__ == "__main__":
pytest.main(["-v", __file__])
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/examples/benchmarks/__init__.py | examples/benchmarks/__init__.py | """
OpenManus benchmark system for standardized agent evaluation.
"""
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/protocol/a2a/__init__.py | protocol/a2a/__init__.py | python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false | |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/protocol/a2a/app/agent_executor.py | protocol/a2a/app/agent_executor.py | import logging
from typing import Awaitable, Callable
from a2a.server.agent_execution import AgentExecutor, RequestContext
from a2a.server.events import EventQueue
from a2a.types import (
InvalidParamsError,
Part,
Task,
TextPart,
UnsupportedOperationError,
)
from a2a.utils import completed_task, new_artifact
from a2a.utils.errors import ServerError
from .agent import A2AManus
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class ManusExecutor(AgentExecutor):
"""Currency Conversion AgentExecutor Example."""
def __init__(self, agent_factory: Callable[[], Awaitable[A2AManus]]):
self.agent_factory = agent_factory
async def execute(
self,
context: RequestContext,
event_queue: EventQueue,
) -> None:
error = self._validate_request(context)
if error:
raise ServerError(error=InvalidParamsError())
query = context.get_user_input()
try:
self.agent = await self.agent_factory()
result = await self.agent.invoke(query, context.context_id)
print(f"Final Result ===> {result}")
except Exception as e:
print("Error invoking agent: %s", e)
raise ServerError(error=ValueError(f"Error invoking agent: {e}")) from e
parts = [
Part(
root=TextPart(
text=(
result["content"]
if result["content"]
else "failed to generate response"
)
),
)
]
event_queue.enqueue_event(
completed_task(
context.task_id,
context.context_id,
[new_artifact(parts, f"task_{context.task_id}")],
[context.message],
)
)
def _validate_request(self, context: RequestContext) -> bool:
return False
async def cancel(
self, request: RequestContext, event_queue: EventQueue
) -> Task | None:
raise ServerError(error=UnsupportedOperationError())
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/protocol/a2a/app/main.py | protocol/a2a/app/main.py | import argparse
import asyncio
import logging
from typing import Optional
import httpx
from a2a.server.apps import A2AStarletteApplication
from a2a.server.request_handlers import DefaultRequestHandler
from a2a.server.tasks import InMemoryPushNotifier, InMemoryTaskStore
from a2a.types import AgentCapabilities, AgentCard, AgentSkill
from dotenv import load_dotenv
from app.tool.browser_use_tool import _BROWSER_DESCRIPTION
from app.tool.str_replace_editor import _STR_REPLACE_EDITOR_DESCRIPTION
from app.tool.terminate import _TERMINATE_DESCRIPTION
from .agent import A2AManus
from .agent_executor import ManusExecutor
load_dotenv()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
async def main(host: str = "localhost", port: int = 10000):
"""Starts the Manus Agent server."""
try:
capabilities = AgentCapabilities(streaming=False, pushNotifications=True)
skills = [
AgentSkill(
id="Python Execute",
name="Python Execute Tool",
description="Executes Python code string. Note: Only print outputs are visible, function return values are not captured. Use print statements to see results.",
tags=["Execute Python Code"],
examples=[
"Execute Python code:'''python \n Print('Hello World') \n '''"
],
),
AgentSkill(
id="Browser use",
name="Browser use Tool",
description=_BROWSER_DESCRIPTION,
tags=["Use Browser"],
examples=["go_to 'https://www.google.com'"],
),
AgentSkill(
id="Replace String",
name="Str_replace Tool",
description=_STR_REPLACE_EDITOR_DESCRIPTION,
tags=["Operate Files"],
examples=["Replace 'old' with 'new' in 'file.txt'"],
),
AgentSkill(
id="Ask human",
name="Ask human Tool",
description="Use this tool to ask human for help.",
tags=["Ask human for help"],
examples=["Ask human: 'What time is it?'"],
),
AgentSkill(
id="terminate",
name="terminate Tool",
description=_TERMINATE_DESCRIPTION,
tags=["terminate task"],
examples=["terminate"],
),
# Add more skills as needed
]
agent_card = AgentCard(
name="Manus Agent",
description="A versatile agent that can solve various tasks using multiple tools including MCP-based tools",
url=f"http://{host}:{port}/",
version="1.0.0",
defaultInputModes=A2AManus.SUPPORTED_CONTENT_TYPES,
defaultOutputModes=A2AManus.SUPPORTED_CONTENT_TYPES,
capabilities=capabilities,
skills=skills,
)
httpx_client = httpx.AsyncClient()
request_handler = DefaultRequestHandler(
agent_executor=ManusExecutor(
agent_factory=lambda: A2AManus.create(max_steps=3)
),
task_store=InMemoryTaskStore(),
push_notifier=InMemoryPushNotifier(httpx_client),
)
server = A2AStarletteApplication(
agent_card=agent_card, http_handler=request_handler
)
logger.info(f"Starting server on {host}:{port}")
return server.build()
except Exception as e:
logger.error(f"An error occurred during server startup: {e}")
exit(1)
def run_server(host: Optional[str] = "localhost", port: Optional[int] = 10000):
try:
import uvicorn
app = asyncio.run(main(host, port))
config = uvicorn.Config(
app=app, host=host, port=port, loop="asyncio", proxy_headers=True
)
uvicorn.Server(config=config).run()
logger.info(f"Server started on {host}:{port}")
except Exception as e:
logger.error(f"An error occurred while starting the server: {e}")
if __name__ == "__main__":
# Parse command line arguments for host and port, with default values
parser = argparse.ArgumentParser(description="Start Manus Agent service")
parser.add_argument(
"--host",
type=str,
default="localhost",
help="Server host address, default is localhost",
)
parser.add_argument(
"--port", type=int, default=10000, help="Server port, default is 10000"
)
args = parser.parse_args()
# Start the server with the specified or default host and port
run_server(args.host, args.port)
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/protocol/a2a/app/__init__.py | protocol/a2a/app/__init__.py | python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false | |
FoundationAgents/OpenManus | https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/protocol/a2a/app/agent.py | protocol/a2a/app/agent.py | from typing import Any, AsyncIterable, ClassVar, Dict, List, Literal
from pydantic import BaseModel
from app.agent.manus import Manus
class ResponseFormat(BaseModel):
"""Respond to the user in this format."""
status: Literal["input_required", "completed", "error"] = "input_required"
message: str
class A2AManus(Manus):
async def invoke(self, query, sessionId) -> str:
config = {"configurable": {"thread_id": sessionId}}
response = await self.run(query)
return self.get_agent_response(config, response)
async def stream(self, query: str) -> AsyncIterable[Dict[str, Any]]:
"""Streaming is not supported by Manus."""
raise NotImplementedError("Streaming is not supported by Manus yet.")
def get_agent_response(self, config, agent_response):
return {
"is_task_complete": True,
"require_user_input": False,
"content": agent_response,
}
SUPPORTED_CONTENT_TYPES: ClassVar[List[str]] = ["text", "text/plain"]
| python | MIT | 52a13f2a57d8c7f6737eefb02ccf569594d44273 | 2026-01-04T14:39:27.873507Z | false |
xai-org/grok-1 | https://github.com/xai-org/grok-1/blob/7050ed204b8206bb8645c7b7bbef7252f79561b0/run.py | run.py | # Copyright 2024 X.AI Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from model import LanguageModelConfig, TransformerConfig, QuantizedWeight8bit as QW8Bit
from runners import InferenceRunner, ModelRunner, sample_from_model
CKPT_PATH = "./checkpoints/"
def main():
grok_1_model = LanguageModelConfig(
vocab_size=128 * 1024,
pad_token=0,
eos_token=2,
sequence_len=8192,
embedding_init_scale=1.0,
output_multiplier_scale=0.5773502691896257,
embedding_multiplier_scale=78.38367176906169,
model=TransformerConfig(
emb_size=48 * 128,
widening_factor=8,
key_size=128,
num_q_heads=48,
num_kv_heads=8,
num_layers=64,
attn_output_multiplier=0.08838834764831845,
shard_activations=True,
# MoE.
num_experts=8,
num_selected_experts=2,
# Activation sharding.
data_axis="data",
model_axis="model",
),
)
inference_runner = InferenceRunner(
pad_sizes=(1024,),
runner=ModelRunner(
model=grok_1_model,
bs_per_device=0.125,
checkpoint_path=CKPT_PATH,
),
name="local",
load=CKPT_PATH,
tokenizer_path="./tokenizer.model",
local_mesh_config=(1, 8),
between_hosts_config=(1, 1),
)
inference_runner.initialize()
gen = inference_runner.run()
inp = "The answer to life the universe and everything is of course"
print(f"Output for prompt: {inp}", sample_from_model(gen, inp, max_len=100, temperature=0.01))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
| python | Apache-2.0 | 7050ed204b8206bb8645c7b7bbef7252f79561b0 | 2026-01-04T14:39:29.368501Z | false |
xai-org/grok-1 | https://github.com/xai-org/grok-1/blob/7050ed204b8206bb8645c7b7bbef7252f79561b0/model.py | model.py | # Copyright 2024 X.AI Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import re
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
import haiku as hk
import jax
import jax.experimental.maps
import jax.numpy as jnp
from jax import config, tree_util
from jax.experimental.shard_map import shard_map
from jax.lax import with_sharding_constraint as pjit_sharding_constraint
from jax.sharding import PartitionSpec
from jax.sharding import PartitionSpec as P
config.update("jax_spmd_mode", "allow_all")
logger = logging.getLogger(__name__)
rank_logger = logging.getLogger("rank")
@dataclass
class QuantizedWeight8bit:
weight: jnp.array
scales: jnp.array
@property
def shape(self):
return self.weight.shape
tree_util.register_pytree_node(
QuantizedWeight8bit,
lambda qw: ([qw.weight, qw.scales], ()),
lambda _, children: QuantizedWeight8bit(children[0], children[1]),
)
class TrainingState(NamedTuple):
"""Container for the training state."""
params: hk.Params
def _match(qs, ks):
"""Return True if regexes in qs match any window of strings in tuple ks."""
# compile regexes and force complete match
qts = tuple(map(lambda x: re.compile(x + "$"), qs))
for i in range(len(ks) - len(qs) + 1):
matches = [x.match(y) for x, y in zip(qts, ks[i:])]
if matches and all(matches):
return True
return False
def with_sharding_constraint(x, constraint):
if jax.experimental.maps.thread_resources.env.physical_mesh.empty:
return x
else:
return pjit_sharding_constraint(x, constraint)
def cast_bfloat16(x):
if x.dtype.kind == "f":
return x.astype(jnp.bfloat16)
else:
return x
def ffn_size(emb_size, widening_factor):
_ffn_size = int(widening_factor * emb_size) * 2 // 3
_ffn_size = _ffn_size + (8 - _ffn_size) % 8 # ensure it's a multiple of 8
logger.debug(f"emd_size: {emb_size} adjusted ffn_size: {_ffn_size}")
return _ffn_size
def apply_rules(rules):
def _apply_rules(path, value):
del value # Unused.
path_list = [str(i.key).split("/") for i in path if isinstance(i, jax.tree_util.DictKey)]
flattened_path = jax.tree_util.tree_flatten(path_list)[0]
for rule, replacement in rules:
if _match(rule, flattened_path):
if isinstance(replacement, PartitionSpec):
if "layer_stack" in flattened_path:
replacement = PartitionSpec(None, *replacement)
rank_logger.debug(f"Apply {replacement} to {flattened_path} with rule {rule}")
return replacement
rank_logger.info(f"{flattened_path} no matching found!")
return None
return _apply_rules
TRANSFORMER_PARTITION_RULES = [
# attention
(("multi_head_attention", "(query|key|value)", "w"), P("data", "model")),
(("multi_head_attention", "(query|key|value)", "b"), P(None)),
(("multi_head_attention", "linear", "w"), P("model", "data")),
(("multi_head_attention", "linear", "b"), P(None)),
# mlp
((r"decoder_layer_[0-9]+", "linear", "w"), P("data", "model")),
((r"decoder_layer_[0-9]+", "linear", "b"), P(None)),
((r"decoder_layer_[0-9]+", "linear_v", "w"), P("data", "model")),
((r"decoder_layer_[0-9]+", "linear_v", "b"), P(None)),
(
(r"decoder_layer_[0-9]+", "linear_1", "w"),
P(
"model",
"data",
),
),
((r"decoder_layer_[0-9]+", "linear_1", "b"), P(None)),
# layer norms
((r"decoder_layer_[0-9]+", "layer_norm", "offset"), P(None)),
((r"decoder_layer_[0-9]+", "layer_norm", "scale"), P(None)),
((r"decoder_layer_[0-9]+", "layer_norm_1", "offset"), P(None)),
((r"decoder_layer_[0-9]+", "layer_norm_1", "scale"), P(None)),
# rms norms
((r"decoder_layer_[0-9]+", "rms_norm", "scale"), P(None)),
((r"decoder_layer_[0-9]+", "rms_norm_1", "scale"), P(None)),
((r"decoder_layer_[0-9]+", "rms_norm_2", "scale"), P(None)),
((r"decoder_layer_[0-9]+", "rms_norm_3", "scale"), P(None)),
# router
(("router", "w"), P("data")),
# moe mlp
(("moe", "linear", "w"), P(None, "data", "model")),
(("moe", "linear", "b"), P(None)),
(("moe", "linear_v", "w"), P(None, "data", "model")),
(("moe", "linear_v", "b"), P(None)),
(("moe", "linear_1", "w"), P(None, "model", "data")),
(("moe", "linear_1", "b"), P(None)),
# layer norms
(("moe", "layer_norm", "offset"), P(None)),
(("moe", "layer_norm", "scale"), P(None)),
(("moe", "layer_norm_1", "offset"), P(None)),
(("moe", "layer_norm_1", "scale"), P(None)),
# rms norms
(("moe", "rms_norm", "scale"), P(None)),
(("moe", "rms_norm_1", "scale"), P(None)),
(("moe", "rms_norm_2", "scale"), P(None)),
(("moe", "rms_norm_3", "scale"), P(None)),
]
LM_PARTITION_RULES = [
# Embedding layer.
(
("language_model", "positional_embeddings"),
P(None, ("data", "model")),
),
(
("language_model", "in_out_embed", "embeddings"),
P(None, ("data", "model")),
),
# Final RMSNorm.
(("language_model", "rms_norm"), P(None)),
]
TOP_K = 8
class KVMemory(NamedTuple):
k: Optional[jax.Array]
v: Optional[jax.Array]
step: Optional[jax.Array]
def init_layer_memories(
batch_size: int,
sequence_len: int,
num_kv_heads: int,
key_size: int,
num_layers: int,
step: Optional[jax.Array] = None,
dtype=jnp.bfloat16,
):
return [
KVMemory(
k=jnp.zeros((batch_size, sequence_len, num_kv_heads, key_size), dtype=dtype),
v=jnp.zeros((batch_size, sequence_len, num_kv_heads, key_size), dtype=dtype),
step=step,
)
for _ in range(num_layers)
]
class Memory(NamedTuple):
# Self-attention key/value cache.
layers: List[KVMemory]
class Router(hk.Module):
def __init__(
self,
num_selected_experts: int,
data_axis: Union[str, Tuple[str, ...]] = "data",
model_axis: Union[str, Tuple[str, ...]] = "model",
shard_activations: bool = False,
mesh: Any = None,
name: str = "router",
):
super().__init__(name)
self.shard_activations = shard_activations
self.data_axis = data_axis
self.model_axis = model_axis
self.mesh = mesh
self.num_selected_experts = num_selected_experts
def compute_routing_prob(
self, inputs: jax.Array, padding_mask: Optional[jax.Array], num_experts: int
):
return self._compute_routing_prob(inputs, padding_mask, num_experts)
@hk.transparent
def _compute_routing_prob(
self,
inputs: jax.Array,
padding_mask: Optional[jax.Array],
num_experts: int,
):
# Using fp32 for the routing prob computation.
inputs = jax.lax.convert_element_type(inputs, jnp.float32)
# [batch_size, seq_len, num_experts]
routing_logits = self._router_weights(inputs, num_experts, sharding=P("data"))
assert routing_logits.dtype == jnp.float32
routing_probs = jax.nn.softmax(routing_logits)
if padding_mask is not None:
routing_probs *= padding_mask
return routing_probs, routing_logits, 0
@hk.transparent
def _router_weights(
self,
x: jax.Array,
num_experts: int,
sharding: Optional[P] = None,
):
fprop_dtype = x.dtype
if not x.shape:
raise ValueError("Input must not be scalar.")
input_size = self.input_size = x.shape[-1]
w = hk.get_parameter(
"w", [input_size, num_experts], jnp.float32, init=hk.initializers.Constant(0)
)
if sharding:
w = with_sharding_constraint(w, sharding)
out = jnp.dot(x, w.astype(fprop_dtype))
return out
class MoELayer(hk.Module):
def __init__(
self,
num_experts: int,
layer_fn: Callable,
router: Router,
mesh: Any = None,
shard_activations: bool = False,
data_axis: Union[str, Tuple[str, ...]] = "data",
model_axis: Union[str, Tuple[str, ...]] = "model",
name: Optional[str] = "moe",
):
super().__init__(name)
self.num_experts = num_experts
self.layer_fn = layer_fn
self.router = router
self.mesh = mesh
self.shard_activations = shard_activations
self.data_axis = data_axis
self.model_axis = model_axis
@hk.transparent
def _inference_call(self, inputs: jax.Array, padding_mask: Optional[jax.Array] = None):
routing_probs, _, _ = self.router.compute_routing_prob(
inputs, padding_mask, self.num_experts
)
expert_gate, expert_index = jax.lax.top_k(routing_probs, k=self.router.num_selected_experts)
tmp = jnp.reshape(inputs, (inputs.shape[0] * inputs.shape[1], inputs.shape[2]))
broad_inputs = jnp.tile(tmp[:, jnp.newaxis, :], (1, self.router.num_selected_experts, 1))
broad_inputs = jnp.reshape(
broad_inputs, (broad_inputs.shape[0] * broad_inputs.shape[1], broad_inputs.shape[2])
)
init_fn, _ = hk.transform(self.layer_fn)
vmapped_init_fn = jax.vmap(init_fn, in_axes=0, out_axes=0)
lifted_init_fn = hk.experimental.transparent_lift(vmapped_init_fn)
# Fetch the vmapped params of the DenseBlock.
params = lifted_init_fn(
jax.random.split(jax.random.PRNGKey(1), self.num_experts),
jnp.zeros((self.num_experts, 1, 1, inputs.shape[-1])),
)
# Index and prob are in the shape [m, 2] indicating which token assigned to which experts.
# b: num_expert
# m: token or sequence dim
# k: input embed dim
# n: output embed dim
# e: the number of experts chosen for each token
@functools.partial(
shard_map,
mesh=self.mesh,
in_specs=(
P(self.data_axis, None),
P(None, None, self.model_axis),
P(None, None, self.model_axis),
P(None),
P(None),
),
out_specs=P(self.data_axis, self.model_axis),
check_rep=False,
)
def moe_slow_matmul1(input, weight, scales, index, prob):
weight = weight * scales
one_hot_indices = jax.nn.one_hot(index.reshape(-1), 8, axis=0)
all_expert_output = jnp.einsum("mk,bkn->bmn", input, weight)
output = jnp.einsum("bm,bmn->mn", one_hot_indices, all_expert_output)
return output
@functools.partial(
shard_map,
mesh=self.mesh,
in_specs=(
P(self.data_axis, self.model_axis),
P(None, self.model_axis, None),
P(None, self.model_axis, None),
P(None),
P(None),
),
out_specs=P(self.data_axis, None),
check_rep=False,
)
def moe_slow_matmul2(input, weight, scales, index, prob):
weight = weight * scales
one_hot_indices = jax.nn.one_hot(index.reshape(-1), 8, axis=0)
all_expert_output = jnp.einsum("mk,bkn->bmn", input, weight)
output = jnp.einsum("bm,bmn->mn", one_hot_indices, all_expert_output)
return jax.lax.psum(output, axis_name="model")
if hasattr(params["linear"]["w"], "scales"):
x = moe_slow_matmul1(
broad_inputs,
params["linear_v"]["w"].weight,
params["linear_v"]["w"].scales,
expert_index,
expert_gate,
)
y = moe_slow_matmul1(
broad_inputs,
params["linear"]["w"].weight,
params["linear"]["w"].scales,
expert_index,
expert_gate,
)
y = jax.nn.gelu(y)
out = moe_slow_matmul2(
x * y,
params["linear_1"]["w"].weight,
params["linear_1"]["w"].scales,
expert_index,
expert_gate,
)
out = jnp.reshape(
out,
[
inputs.shape[0],
inputs.shape[1],
self.router.num_selected_experts,
out.shape[-1],
],
)
out = expert_gate[:, :, :, None].astype(jnp.bfloat16) * out
out = jnp.sum(out, axis=2)
out = out.astype(jnp.bfloat16)
else:
# This is only here so that we can construct a valid init_fn with this code.
return inputs
return out
def __call__(self, inputs: jax.Array, padding_mask: jax.Array):
return self._inference_call(inputs)
class MHAOutput(NamedTuple):
"""Outputs of the multi-head attention operation."""
embeddings: jax.Array
memory: Any
class DecoderOutput(NamedTuple):
embeddings: jax.Array
memory: Any
class TransformerOutput(NamedTuple):
embeddings: jax.Array
memory: Any
@dataclass
class TransformerConfig:
emb_size: int
key_size: int
num_q_heads: int
num_kv_heads: int
num_layers: int
vocab_size: int = 128 * 1024
widening_factor: float = 4.0
attn_output_multiplier: float = 1.0
name: Optional[str] = None
num_experts: int = -1
capacity_factor: float = 1.0
num_selected_experts: int = 1
init_scale: float = 1.0
shard_activations: bool = False
# Used for activation sharding.
data_axis: Union[str, Tuple[str, ...]] = "data"
model_axis: Union[str, Tuple[str, ...]] = "model"
def __post_init__(self):
if isinstance(self.data_axis, list):
self.data_axis = tuple(self.data_axis)
if isinstance(self.model_axis, list):
self.model_axis = tuple(self.model_axis)
def partition_rules(self):
return TRANSFORMER_PARTITION_RULES
def make(self, mesh=None) -> "Transformer":
data_axis = tuple(self.data_axis) if isinstance(self.data_axis, list) else self.data_axis
model_axis = (
tuple(self.model_axis) if isinstance(self.model_axis, list) else self.model_axis
)
return Transformer(
num_q_heads=self.num_q_heads,
num_kv_heads=self.num_kv_heads,
widening_factor=self.widening_factor,
key_size=self.key_size,
init_scale=self.init_scale,
mesh=mesh,
attn_output_multiplier=self.attn_output_multiplier,
shard_activations=self.shard_activations,
num_layers=self.num_layers,
num_experts=self.num_experts,
num_selected_experts=self.num_selected_experts,
data_axis=data_axis,
model_axis=model_axis,
)
def get_memory_sharding(self):
return Memory(
layers=[
KVMemory(
k=P(self.data_axis, self.model_axis),
v=P(self.data_axis, self.model_axis),
step=P(self.data_axis),
)
for _ in range(self.num_layers)
],
)
def hk_rms_norm(
x: jax.Array,
fixed_scale=False,
sharding=P(None),
) -> jax.Array:
"""Applies a unique LayerNorm to x with default settings."""
ln = RMSNorm(axis=-1, create_scale=not fixed_scale, sharding=sharding)
return ln(x)
def make_attention_mask(
query_input: jax.Array,
key_input: jax.Array,
pairwise_fn: Callable[..., Any] = jnp.multiply,
dtype: Any = jnp.bfloat16,
):
"""Mask-making helper for attention weights.
In case of 1d inputs (i.e., `[batch..., len_q]`, `[batch..., len_kv]`, the
attention weights will be `[batch..., heads, len_q, len_kv]` and this
function will produce `[batch..., 1, len_q, len_kv]`.
Args:
query_input: a batched, flat input of query_length size
key_input: a batched, flat input of key_length size
pairwise_fn: broadcasting elementwise comparison function
dtype: mask return dtype
Returns:
A `[batch..., 1, len_q, len_kv]` shaped mask for 1d attention.
"""
mask = pairwise_fn(jnp.expand_dims(query_input, axis=-1), jnp.expand_dims(key_input, axis=-2))
mask = jnp.expand_dims(mask, axis=-3)
return mask.astype(dtype)
class Linear(hk.Linear):
def __init__(
self,
output_size: int,
with_bias: bool = True,
sharding: Optional[P] = None,
mesh: Any = None,
name: Optional[str] = None,
shard_axis: int = 0,
):
super().__init__(
output_size=output_size,
with_bias=with_bias,
name=name,
)
self.sharding = sharding
self.mesh = mesh
self.shard_axis = shard_axis
def __call__(
self,
inputs: jax.Array,
) -> jax.Array:
"""Computes a linear transform of the input."""
fprop_dtype = inputs.dtype
if not inputs.shape:
raise ValueError("Input must not be scalar.")
input_size = self.input_size = inputs.shape[-1]
output_size = self.output_size
w = hk.get_parameter(
"w", [input_size, output_size], jnp.float32, init=hk.initializers.Constant(0)
)
if hasattr(w, "scales"):
shape = inputs.shape
inputs = jnp.reshape(inputs, (-1, shape[-1]))
@functools.partial(
shard_map,
mesh=self.mesh,
in_specs=(self.sharding, self.sharding),
out_specs=self.sharding,
check_rep=False,
)
def mul(w, s):
return w.astype(s.dtype) * s
w = mul(w.weight, w.scales)
out = jnp.dot(inputs, w.astype(fprop_dtype))
if self.with_bias:
b = hk.get_parameter(
"b", [self.output_size], jnp.float32, init=hk.initializers.Constant(0)
)
b = jnp.broadcast_to(b, out.shape)
out = out + b.astype(fprop_dtype)
return out
class RMSNorm(hk.RMSNorm):
def __init__(
self,
axis: Union[int, Sequence[int], slice],
eps: float = 1e-5,
name: Optional[str] = None,
create_scale: bool = True,
sharding: Optional[P] = None,
):
super().__init__(axis, eps, create_scale=create_scale, name=name)
self.sharding = sharding
def __call__(self, inputs: jax.Array):
fprop_dtype = inputs.dtype
param_shape = (inputs.shape[-1],)
if self.create_scale:
scale = hk.get_parameter(
"scale",
param_shape,
dtype=jnp.float32,
init=hk.initializers.Constant(0),
)
if self.sharding:
scale = with_sharding_constraint(scale, self.sharding)
scale = jnp.broadcast_to(scale.astype(jnp.float32), inputs.shape)
else:
scale = 1.0
inputs = inputs.astype(jnp.float32)
scale = scale.astype(jnp.float32)
mean_squared = jnp.mean(jnp.square(inputs), axis=[-1], keepdims=True)
mean_squared = jnp.broadcast_to(mean_squared, inputs.shape)
normed_inputs = inputs * jax.lax.rsqrt(mean_squared + self.eps)
outputs = scale * normed_inputs
return outputs.astype(fprop_dtype)
def rotate_half(
x: jax.Array,
) -> jax.Array:
"""Obtain the rotated counterpart of each feature"""
x1, x2 = jnp.split(x, 2, axis=-1)
return jnp.concatenate((-x2, x1), axis=-1)
class RotaryEmbedding(hk.Module):
"""Applies rotary embeddings (RoPE) to the input sequence tensor,
as described in https://arxiv.org/abs/2104.09864.
Attributes:
dim (int): Dimensionality of the feature vectors
base_exponent (int): Base exponent to compute embeddings from
"""
def __init__(
self,
dim: int,
name: Optional[str] = None,
base_exponent: int = 10000,
):
super().__init__(name)
self.dim = dim
self.base_exponent = base_exponent
assert self.dim % 2 == 0
def __call__(
self,
x: jax.Array,
seq_dim: int,
offset: jax.Array,
const_position: Optional[int] = None,
t: Optional[jax.Array] = None,
) -> jax.Array:
fprop_dtype = x.dtype
# Compute the per-dimension frequencies
exponents = jnp.arange(0, self.dim, 2, dtype=jnp.float32)
inv_freq = jnp.asarray(
1.0 / (self.base_exponent ** (exponents / self.dim)), dtype=jnp.float32
)
if jnp.shape(offset) == ():
# Offset can be a scalar or one offset per batch element.
offset = jnp.expand_dims(offset, 0)
# Compute the per element phase (to pass into sin and cos)
if const_position:
t = const_position * jnp.ones(
(
1,
x.shape[seq_dim],
),
dtype=jnp.float32,
)
elif t is None:
t = jnp.arange(x.shape[seq_dim], dtype=jnp.float32) + jnp.expand_dims(offset, -1)
phase = jnp.einsum("bi,j->bij", t, inv_freq)
phase = jnp.tile(phase, reps=(1, 2))[:, :, None, :]
x = x * jnp.cos(phase) + rotate_half(x) * jnp.sin(phase)
x = x.astype(fprop_dtype)
return x
class MultiHeadAttention(hk.Module):
def __init__(
self,
num_q_heads: int,
num_kv_heads: int,
key_size: int,
*,
with_bias: bool = True,
value_size: Optional[int] = None,
model_size: Optional[int] = None,
attn_output_multiplier: 1.0,
data_axis: Union[str, Tuple[str, ...]] = "data",
model_axis: Union[str, Tuple[str, ...]] = "model",
name: Optional[str] = None,
):
super().__init__(name=name)
self.num_q_heads = num_q_heads
self.num_kv_heads = num_kv_heads
self.key_size = key_size
self.value_size = value_size or key_size
self.model_size = model_size or key_size * num_q_heads
self.data_axis = data_axis
self.model_axis = model_axis
self.attn_output_multiplier = attn_output_multiplier
self.with_bias = with_bias
def __call__(
self,
query: jax.Array,
key: Optional[jax.Array],
value: Optional[jax.Array],
mask: Optional[jax.Array] = None,
kv_memory: Optional[KVMemory] = None,
mesh: Any = None,
) -> MHAOutput:
# In shape hints below, we suppress the leading dims [...] for brevity.
# Hence e.g. [A, B] should be read in every case as [..., A, B].
sequence_length = query.shape[1]
projection = self._linear_projection
use_memory = False
if kv_memory is not None:
if kv_memory.k is None:
assert kv_memory.v is None
assert key is not None
assert value is not None
else:
assert kv_memory.v is not None
use_memory = True
else:
assert key is not None
assert value is not None
# Check that the keys and values have consistent batch size and sequence length.
if not use_memory:
assert key.shape[:2] == value.shape[:2], f"key/value shape: {key.shape}/{value.shape}"
if mask is not None:
assert mask.ndim == 4
assert mask.shape[0] in {
1,
query.shape[0],
}, f"mask/query shape: {mask.shape}/{query.shape}"
if not use_memory:
assert key.shape[0] in {
1,
query.shape[0],
}, f"key/query shape: {key.shape}/{query.shape}"
assert mask.shape[1] == 1
assert mask.shape[2] in {
1,
query.shape[1],
}, f"mask/query shape: {mask.shape}/{query.shape}"
if not use_memory:
assert mask.shape[3] in {
1,
key.shape[1],
}, f"mask/query shape: {mask.shape}/{key.shape}"
# Compute key/query/values (overload K/Q/V to denote the respective sizes).
assert self.num_q_heads % self.num_kv_heads == 0
query_heads = projection(
query,
self.key_size,
self.num_q_heads,
name="query",
sharding=P("data", "model"),
mesh=mesh,
) # [B, T', H, Q=K]
new_memory = None
key_heads = projection(
key,
self.key_size,
self.num_kv_heads,
name="key",
sharding=P("data", "model"),
mesh=mesh,
) # [B, T, H, K]
value_heads = projection(
value,
self.value_size,
self.num_kv_heads,
name="value",
sharding=P("data", "model"),
mesh=mesh,
) # [B, T, H, V]
rotate = RotaryEmbedding(dim=self.key_size, base_exponent=int(1e4))
key_heads = rotate(key_heads, seq_dim=1, offset=(kv_memory.step if kv_memory else 0))
query_heads = rotate(query_heads, seq_dim=1, offset=(kv_memory.step if kv_memory else 0))
@functools.partial(jax.vmap)
def update_into(mem, start, update):
return jax.lax.dynamic_update_slice_in_dim(mem, update, start, axis=0)
if kv_memory:
if mesh is not None:
@functools.partial(
shard_map,
mesh=mesh,
in_specs=(
P("data", None, "model"),
P("data"),
P("data", None, "model"),
),
out_specs=P("data", None, "model"),
check_rep=False,
)
def update_into_shmap(mems, starts, updates):
return update_into(mems, starts, updates)
key_heads = update_into_shmap(kv_memory.k, kv_memory.step, key_heads)
value_heads = update_into_shmap(kv_memory.v, kv_memory.step, value_heads)
else:
key_heads = update_into(kv_memory.k, kv_memory.step, key_heads)
value_heads = update_into(kv_memory.v, kv_memory.step, value_heads)
new_step = kv_memory.step + sequence_length
memory_mask = jnp.arange(kv_memory.k.shape[1]) < new_step[:, None]
memory_mask = memory_mask[:, None, None, :] # [B, H, T, T]
if mask is not None:
mask = memory_mask * mask
else:
mask = memory_mask
new_memory = KVMemory(
k=key_heads,
v=value_heads,
step=new_step,
)
# Add separate dimension for grouped query heads.
query_heads = with_sharding_constraint(query_heads, P(self.data_axis, None, "model", None))
key_heads = with_sharding_constraint(key_heads, P(self.data_axis, None, "model", None))
value_heads = with_sharding_constraint(value_heads, P(self.data_axis, None, "model", None))
b, t, h, d = query_heads.shape
_, _, kv_h, _ = key_heads.shape
assert h % kv_h == 0, f"query_heads {h} must be a multiple of kv_heads {kv_h}"
query_heads = jnp.reshape(query_heads, (b, t, kv_h, h // kv_h, d))
query_heads = with_sharding_constraint(
query_heads, P(self.data_axis, None, "model", None, None)
)
# Compute attention weights.
# Attention softmax is always carried out in fp32.
attn_logits = jnp.einsum("...thHd,...Thd->...hHtT", query_heads, key_heads).astype(
jnp.float32
)
attn_logits *= self.attn_output_multiplier
max_attn_val = jnp.array(30.0, dtype=attn_logits.dtype)
attn_logits = max_attn_val * jnp.tanh(attn_logits / max_attn_val)
mask = mask[:, :, None, :, :]
if mask is not None:
if mask.ndim != attn_logits.ndim:
raise ValueError(
f"Mask dimensionality {mask.ndim} must match logits dimensionality "
f"{attn_logits.ndim} for {mask.shape}/{attn_logits.shape}."
)
attn_logits = jnp.where(mask, attn_logits, -1e30)
attn_weights = jax.nn.softmax(attn_logits).astype(query.dtype) # [H, T', T]
# Weight the values by the attention and flatten the head vectors.
attn = jnp.einsum("...hHtT,...Thd->...thHd", attn_weights, value_heads)
attn = with_sharding_constraint(attn, P(self.data_axis, None, "model", None, None))
leading_dims = attn.shape[:2]
attn = jnp.reshape(attn, (*leading_dims, -1)) # [T', H*V]
attn = with_sharding_constraint(attn, P(self.data_axis, None, "model"))
# Apply another projection to get the final embeddings.
final_projection = Linear(
self.model_size,
with_bias=False,
sharding=P("model", "data"),
mesh=mesh,
)
return MHAOutput(final_projection(attn), new_memory)
@hk.transparent
def _linear_projection(
self,
x: jax.Array,
head_size: int,
num_heads: int,
sharding: Optional[P] = None,
name: Optional[str] = None,
mesh: Any = None,
) -> jax.Array:
y = Linear(
num_heads * head_size,
with_bias=False,
name=name,
sharding=sharding,
mesh=mesh,
)(x)
*leading_dims, _ = x.shape
return y.reshape((*leading_dims, num_heads, head_size))
@dataclass
class MHABlock(hk.Module):
"""A MHA Block"""
num_q_heads: int
num_kv_heads: int
key_size: int
attn_output_multiplier: float = 1.0
mesh: Any = None
data_axis: Union[str, Tuple[str, ...]] = "data"
model_axis: Union[str, Tuple[str, ...]] = "model"
@hk.transparent
def __call__(
self,
inputs: jax.Array, # [B, T, D]
mask: jax.Array, # [B, 1, T, T] or [B, 1, 1, T] or B[1, 1, 1, 1]
layer_memory: Optional[KVMemory],
) -> MHAOutput:
_, _, model_size = inputs.shape
assert mask.ndim == 4, f"shape: {mask.shape}"
assert mask.shape[2] in {1, inputs.shape[1]}, str(mask.shape)
assert mask.shape[3] in {1, inputs.shape[1]}, str(mask.shape)
side_input = inputs
def attn_block(query, key, value, mask, memory) -> MHAOutput:
return MultiHeadAttention(
num_q_heads=self.num_q_heads,
num_kv_heads=self.num_kv_heads,
key_size=self.key_size,
model_size=model_size,
data_axis=self.data_axis,
model_axis=self.model_axis,
attn_output_multiplier=self.attn_output_multiplier,
)(
query,
key,
value,
mask,
memory,
mesh=self.mesh,
)
attn_output = attn_block(inputs, side_input, side_input, mask, layer_memory)
h_attn = attn_output.embeddings
return attn_output._replace(embeddings=h_attn)
@dataclass
class DenseBlock(hk.Module):
num_q_heads: int
num_kv_heads: int
key_size: int
widening_factor: float = 4.0
sharding_constraint: bool = False
mesh: Any = None
@hk.transparent
def __call__(
self,
inputs: jax.Array, # [B, T, D]
) -> jax.Array: # [B, T, D]
_, _, model_size = inputs.shape
h_v = Linear(
ffn_size(
model_size,
self.widening_factor,
),
with_bias=False,
mesh=self.mesh,
sharding=P("data", "model"),
name="linear_v",
| python | Apache-2.0 | 7050ed204b8206bb8645c7b7bbef7252f79561b0 | 2026-01-04T14:39:29.368501Z | true |
xai-org/grok-1 | https://github.com/xai-org/grok-1/blob/7050ed204b8206bb8645c7b7bbef7252f79561b0/checkpoint.py | checkpoint.py | # Copyright 2024 X.AI Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import contextlib
import logging
import math
import os
import pickle
import re
import shutil
import sys
import tempfile
from concurrent.futures import ThreadPoolExecutor, wait
from typing import Any, Optional
import jax
import numpy as np
from jax.experimental import multihost_utils
from model import QuantizedWeight8bit
logger = logging.getLogger(__name__)
rank_logger = logging.getLogger("rank")
# Needed for loading the checkpoint with pickle.
sys.modules['__main__'].QuantizedWeight8bit = QuantizedWeight8bit
@contextlib.contextmanager
def copy_to_shm(file: str):
if file.startswith("/dev/shm/"):
# Nothing to do, the file is already in shared memory.
yield file
return
tmp_dir = "/dev/shm/"
fd, tmp_path = tempfile.mkstemp(dir=tmp_dir)
try:
shutil.copyfile(file, tmp_path)
yield tmp_path
finally:
os.remove(tmp_path)
os.close(fd)
@contextlib.contextmanager
def copy_from_shm(file: str):
tmp_dir = "/dev/shm/"
fd, tmp_path = tempfile.mkstemp(dir=tmp_dir)
try:
yield tmp_path
shutil.copyfile(tmp_path, file)
finally:
os.remove(tmp_path)
os.close(fd)
def fast_unpickle(path: str) -> Any:
with copy_to_shm(path) as tmp_path:
with open(tmp_path, "rb") as f:
return pickle.load(f)
def fast_pickle(obj: Any, path: str) -> None:
with copy_from_shm(path) as tmp_path:
with open(tmp_path, "wb") as f:
pickle.dump(obj, f)
def load_tensors(shaped_arrays, directory, mesh_config, tensor_indices=None):
"""Loads a set of arrays."""
pool = ThreadPoolExecutor(max_workers=32)
fs = list()
num_tensors = 0
num_replicas = 1
data_model_shards = math.prod(mesh_config)
if tensor_indices is None:
iterator = enumerate(shaped_arrays)
else:
iterator = zip(tensor_indices, shaped_arrays)
for i, t in iterator:
if (i % num_replicas) == ((jax.process_index() // data_model_shards) % num_replicas):
idx = (
jax.process_index() // (num_replicas * data_model_shards) * data_model_shards
+ jax.process_index() % data_model_shards
)
fs.append(
pool.submit(fast_unpickle, os.path.join(directory, f"tensor{i:05d}_{idx:03d}"))
)
num_tensors += 1
else:
fs.append(pool.submit(np.zeros, t.shape, dtype=t.dtype))
wait(fs)
return [f.result() for f in fs]
def path_tuple_to_string(path: tuple) -> str:
pieces = []
for elem in path:
if isinstance(elem, jax.tree_util.DictKey):
pieces.append(elem.key)
elif isinstance(elem, jax.tree_util.GetAttrKey):
pieces.append(elem.name)
else:
assert isinstance(elem, (jax.tree_util.FlattenedIndexKey, jax.tree_util.SequenceKey))
return "/".join(pieces)
def get_load_path_str(
init_path_str: str,
load_rename_rules: Optional[list[tuple[str, str]]] = None,
load_exclude_rules: Optional[list[str]] = None,
) -> Optional[str]:
# Exclusion
if load_exclude_rules is not None:
for search_pattern in load_exclude_rules:
if re.search(search_pattern, init_path_str):
return None
# Renaming
load_path_str = init_path_str
if load_rename_rules is not None:
for search_pattern, replacement_pattern in load_rename_rules:
if re.search(search_pattern, load_path_str):
load_path_str = re.sub(search_pattern, replacement_pattern, load_path_str)
break
return load_path_str
def replace_with_load_state(
init_state: Any,
load_state: Any,
load_rename_rules: Optional[list[tuple[str, str]]] = None,
load_exclude_rules: Optional[list[str]] = None,
mesh_config: tuple = (1, 1),
) -> Any:
flatten_load, _ = jax.tree_util.tree_flatten_with_path(load_state)
flatten_init, structure_init = jax.tree_util.tree_flatten_with_path(init_state)
load_map = {path_tuple_to_string(path): tensor for path, tensor in flatten_load}
replaced = []
num_replicas = 1
data_model_shards = math.prod(mesh_config)
for i, (init_path, tensor) in enumerate(flatten_init):
init_path_str = path_tuple_to_string(init_path)
load_path_str = get_load_path_str(init_path_str, load_rename_rules, load_exclude_rules)
if load_path_str is None:
rank_logger.info(f"Excluded from restore: {init_path_str}.")
replaced.append(tensor)
elif load_path_str in load_map:
if load_path_str == init_path_str:
rank_logger.info(f"Restored from ckpt: {init_path_str}.")
else:
rank_logger.info(f"Restored from ckpt: {init_path_str} <-- {load_path_str}.")
replaced.append(load_map[load_path_str])
else:
rank_logger.info(f"Not found in ckpt: {init_path_str}.")
if (i % num_replicas) == ((jax.process_index() // data_model_shards) % num_replicas):
replaced.append(tensor)
else:
replaced.append(np.zeros_like(tensor))
return jax.tree_util.tree_unflatten(structure_init, replaced)
def restore(
checkpoint_path: str,
state_shapes: Any,
mesh,
between_hosts_config,
params_only,
state_sharding,
init_state: Optional[Any] = None,
) -> Any:
ckpt_path = os.path.join(checkpoint_path, "ckpt-0")
rank_logger.info("Loading checkpoint at {}".format(ckpt_path))
ckpt_shapes = state_shapes
ckpt_shapes_with_path, structure = jax.tree_util.tree_flatten_with_path(ckpt_shapes)
ckpt_shapes_flat = [elem[1] for elem in ckpt_shapes_with_path]
loaded_tensors = load_tensors(ckpt_shapes_flat, ckpt_path, between_hosts_config)
state = jax.tree_util.tree_unflatten(structure, loaded_tensors)
# Sanity check to give a better error message.
ckpt_keys = set(state.params.keys())
code_keys = set(state_sharding.params.keys())
if ckpt_keys != code_keys and init_state is None:
missing_in_ckpt = code_keys - ckpt_keys
missing_locally = ckpt_keys - code_keys
raise ValueError(
"Parameters in the code are not matching checkpoint parameters.\n"
"Params missing in checkpoint: {}\nParams missing in code: {}".format(
missing_in_ckpt, missing_locally
)
)
state_sharding = jax.tree_util.tree_map(
lambda x: jax.sharding.PartitionSpec() if x is None else x,
state_sharding,
is_leaf=lambda x: x is None,
)
state = multihost_utils.host_local_array_to_global_array(state, mesh, state_sharding)
if params_only:
state = state.params
return state
| python | Apache-2.0 | 7050ed204b8206bb8645c7b7bbef7252f79561b0 | 2026-01-04T14:39:29.368501Z | false |
xai-org/grok-1 | https://github.com/xai-org/grok-1/blob/7050ed204b8206bb8645c7b7bbef7252f79561b0/runners.py | runners.py | # Copyright 2024 X.AI Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bisect
import functools
import logging
import math
import re
from dataclasses import dataclass
from typing import Any, Callable, NamedTuple, Optional, Tuple
import haiku as hk
import jax
import jax.experimental.pjit as pjit
import jax.numpy as jnp
import numpy as np
import sentencepiece
from jax.experimental import mesh_utils
from jax.sharding import PartitionSpec as P
from jax.typing import ArrayLike
import checkpoint as xai_checkpoint
from model import (
LanguageModelConfig,
LanguageModelOutput,
TrainingState,
apply_rules,
Memory,
KVMemory,
)
logger = logging.getLogger(__name__)
rank_logger = logging.getLogger("rank")
TOP_K = 8
class SampleSettings(NamedTuple):
temperature: ArrayLike
nucleus_p: ArrayLike
mask: ArrayLike
# Whether a given batch element is actively used. [B]
active: ArrayLike
class SampleOutput(NamedTuple):
token_id: ArrayLike
prob: ArrayLike
top_k_token_ids: ArrayLike
top_k_probs: ArrayLike
def insert_slice(memory: Memory, slice, length, i):
slice = Memory(
layers=[
KVMemory(layer.k, layer.v, step=jnp.array([length]))
for layer in slice.layers
],
)
return jax.tree_map(lambda m, u: jax.lax.dynamic_update_index_in_dim(m, u[0], i, axis=0),
memory, slice)
def pad_to_size(x, size):
if x.shape[0] > size:
# Left truncate if the context is too long.
x = x[-size:]
return np.pad(x, [0, size - x.shape[0]], mode="constant", constant_values=0)
def top_p_filter(logits: jax.Array, top_p: jax.Array) -> jax.Array:
"""Performs nucleus filtering on logits."""
assert logits.ndim == top_p.ndim, f"Expected {logits.ndim} equal {top_p.ndim}"
sorted_logits = jax.lax.sort(logits, is_stable=False)
sorted_probs = jax.nn.softmax(sorted_logits)
threshold_idx = jnp.argmax(jnp.cumsum(sorted_probs, -1) >= 1 - top_p, axis=-1)
threshold_largest_logits = jnp.take_along_axis(
sorted_logits, threshold_idx[..., jnp.newaxis], axis=-1
)
assert threshold_largest_logits.shape == logits.shape[:-1] + (1,)
mask = logits >= threshold_largest_logits
# Set unused logits to -inf.
logits = jnp.where(mask, logits, -1e10)
return logits
def sample_token(
rngs: jax.random.PRNGKey,
lm_outputs: LanguageModelOutput,
settings: SampleSettings,
) -> SampleOutput:
# Expand the settings shape to match the logit shape.
settings = SampleSettings(
temperature=jnp.expand_dims(settings.temperature, (1, 2)), # Input [B], output [B, 1, 1].
nucleus_p=jnp.expand_dims(settings.nucleus_p, (1, 2)), # Input [B], output [B, 1, 1].
mask=jnp.expand_dims(settings.mask, 1), # Input [B, V], output [B, 1, V].
active=settings.active, # [B].
)
logits = lm_outputs.logits / settings.temperature.astype(lm_outputs.logits.dtype)
# Mask out all disallowed tokens by assigning them a near-zero probability.
logits = jnp.where(settings.mask, logits, -1e10)
# Mask out all tokens that don't fall into the p-th percentile.
logits = top_p_filter(logits, settings.nucleus_p.astype(logits.dtype))
new_token = jax.vmap(jax.random.categorical)(rngs, logits)
probabilities = jax.nn.softmax(logits)
token_prob = jnp.take_along_axis(probabilities, jnp.expand_dims(new_token, 1), axis=2)
token_prob = jnp.squeeze(token_prob, 1)
# Gather the top-k tokens and probabilities.
top_k_probs, top_k_token_ids = jax.lax.top_k(probabilities, TOP_K)
top_k_probs = jnp.squeeze(top_k_probs, 1)
top_k_token_ids = jnp.squeeze(top_k_token_ids, 1)
return SampleOutput(
new_token,
token_prob,
top_k_token_ids,
top_k_probs,
)
@dataclass
class ModelRunner:
model: LanguageModelConfig
bs_per_device: float = 2.0
load_rename_rules: Optional[list[tuple[str, str]]] = None
load_exclude_rules: Optional[list[str]] = None
rng_seed: int = 42 # Initial rng seed.
transform_forward: bool = False
checkpoint_path: str = ""
def make_forward_fn(self, mesh: Any):
def forward(tokens):
out = self.model.make(mesh=mesh)(tokens)
return out, None
if self.transform_forward:
forward = hk.transform(forward)
return forward
def initialize(
self,
init_data,
local_mesh_config: tuple[int, int],
between_hosts_config: tuple[int, int],
):
num_replicas = math.prod(between_hosts_config)
self.model.initialize()
self.model.fprop_dtype = jnp.bfloat16
num_local_gpus = len(jax.local_devices())
# Calculate the global batch size from the local batch size.
self.batch_size = int(self.bs_per_device * num_local_gpus * num_replicas)
# Calculate the batch size per host from the global batch size.
self.local_batch_size = self.batch_size // jax.process_count()
self.local_mesh_config = local_mesh_config
self.between_hosts_config = between_hosts_config
rank_logger.info(
f"Initializing mesh for {self.local_mesh_config=} {self.between_hosts_config=}..."
)
self.mesh = make_mesh(self.local_mesh_config, self.between_hosts_config)
self.forward = self.make_forward_fn(mesh=self.mesh)
self.logits_fn = hk.transform(lambda tokens: self.forward(tokens)[0])
self.eval_forward = self.make_forward_fn(mesh=self.mesh)
self.logits_eval_fn = hk.transform(lambda tokens: self.eval_forward(tokens)[0])
if self.transform_forward:
self.state_sharding = self.get_state_sharding(init_data)
rank_logger.info(f"State sharding type: {type(self.state_sharding)}")
self.init_fn = pjit.pjit(self.init, out_shardings=self.state_sharding)
def init(self, rng: jax.Array, data) -> TrainingState:
assert self.transform_forward
rng, init_rng = jax.random.split(rng)
params = self.forward.init(init_rng, data["inputs"])
return TrainingState(params=params)
def get_state_sharding(self, init_data):
assert self.transform_forward
rng = jax.random.PRNGKey(self.rng_seed)
rank_logger.info(f"partition rules: {self.model.partition_rules}")
with self.mesh:
shapes = jax.eval_shape(self.init, rng, init_data)
sharding = jax.tree_util.tree_map_with_path(
apply_rules(self.model.partition_rules()),
shapes,
)
return sharding
def load_or_init(
self,
init_data: Any,
from_checkpoint: bool = True,
init_fn: Optional[Callable] = None,
):
rng = jax.random.PRNGKey(self.rng_seed)
if not self.checkpoint_path or not from_checkpoint:
rank_logger.info("Initializing model...")
with self.mesh:
if init_fn is not None:
state = init_fn(rng, init_data)
else:
assert self.transform_forward
state = self.init_fn(rng, init_data)
rank_logger.info("Model state is newly initialized.")
else:
with self.mesh:
if init_fn:
state_shapes = jax.eval_shape(init_fn, rng, init_data)
else:
assert self.transform_forward
state_shapes = jax.eval_shape(self.init_fn, rng, init_data)
init_state = None
state = xai_checkpoint.restore(
checkpoint_path=self.checkpoint_path,
state_shapes=state_shapes,
mesh=self.mesh,
between_hosts_config=self.between_hosts_config,
state_sharding=self.state_sharding,
init_state=init_state,
params_only=True,
)
del init_state
return state
@dataclass
class Request:
prompt: str
temperature: float
nucleus_p: float
rng_seed: int
max_len: int
@dataclass
class InferenceRunner:
name: str
runner: Any
load: str
tokenizer_path: str = "/tmp/xai_data/tokenizer.model"
local_mesh_config: Tuple[int, int] = (1, 1)
between_hosts_config: Tuple[int, int] = (1, 1)
pad_sizes: tuple[int] = (1024,)
def get_pad_bucket(self, size):
i = bisect.bisect_left(self.pad_sizes, size)
return self.pad_sizes[min(i, len(self.pad_sizes) - 1)]
def initialize(self):
runner = self.runner
self.runner.transform_forward = True
dummy_data = dict(
inputs=np.zeros((1, 256), dtype=np.int32),
targets=np.zeros((1, 256), dtype=np.int32),
)
runner.initialize(
dummy_data,
local_mesh_config=self.local_mesh_config,
between_hosts_config=self.between_hosts_config,
)
self.tokenizer = sentencepiece.SentencePieceProcessor(model_file=self.tokenizer_path)
max_len = runner.model.sequence_len
self.vocab_size = self.runner.model.vocab_size
params = runner.load_or_init(dummy_data)
self.params = params
def pad_to_max_len(x):
if len(x.shape) > 1:
pad_width = max_len - x.shape[1]
return jnp.pad(x, [(0, 0), (0, pad_width), (0, 0), (0, 0)])
else:
return x
@functools.lru_cache
def lm():
return runner.model.make(mesh=runner.mesh)
def hk_forward(
tokens,
memory=None,
length=None,
active=None,
) -> LanguageModelOutput:
if memory is not None:
assert active is not None
layers = []
for l in memory.layers:
# Reset steps to 0 for inactive requests to avoid unnecessary computations.
step = jnp.where(active, l.step, jnp.zeros_like(l.step))
layers.append(l._replace(step=step))
memory = memory._replace(layers=layers)
return lm()(tokens, memory, length=length)
def hk_sample_step(rngs, last_output: SampleOutput, memory, settings):
rngs, rngs_ = jax.vmap(jax.random.split, out_axes=1)(rngs)
lm_outputs = hk_forward(last_output.token_id, memory=memory, active=settings.active)
sample_result = sample_token(rngs_, lm_outputs, settings)
return rngs, sample_result, lm_outputs.model_state
def hk_new_memory(batch_size, sequence_len):
return lm().init_memory(batch_size, sequence_len)
def hk_prefill_memory(
rngs,
memory,
settings,
last_output,
prompt,
length,
rng_seed,
new_settings,
i,
):
rng = jax.random.PRNGKey(seed=rng_seed)
rng, rng_ = jax.random.split(rng)
# Allocate new memory for this sample. The memory length is equal to the length of the
# prompt.
slice = hk_new_memory(1, prompt.shape[0])
# Move the settings for this individual batch entry into the joint settings tensor.
settings = jax.tree_map(
lambda o, v: jax.lax.dynamic_update_index_in_dim(o, v, i, axis=0),
settings,
new_settings,
)
# Get the settings for the batch entry from the joint settings tensor.
settings_slice = jax.tree_map(lambda t: jnp.expand_dims(t[i], axis=0), settings)
# Process the first n-1 tokens of the prompt.
lm_outputs = hk_forward(
jnp.expand_dims(prompt, 0),
memory=slice,
length=jnp.expand_dims(length, 0),
active=settings_slice.active,
)
# The forward pass doesn't correctly set the `step` counter inside the memory. Manually
# override it so `hk_forward` uses the correct context length in the next call.
slice = lm_outputs.model_state
slice = slice._replace(
layers=[l._replace(step=jnp.array([length])) for l in slice.layers]
)
# Sample the actual output token.
rng_ = jnp.expand_dims(rng_, 0)
new_output = sample_token(rng_, lm_outputs, settings_slice)
# Update the KV cache/memory.
slice = jax.tree_map(pad_to_max_len, slice)
memory = insert_slice(memory, slice, length, i)
rng = jnp.expand_dims(rng, 0)
rngs = jax.lax.dynamic_update_index_in_dim(rngs, rng, i, axis=0)
# Move the network outputs for this batch entry into the joint output tensor.
last_output = jax.tree_util.tree_map(
lambda last, new: jax.lax.dynamic_update_index_in_dim(last, new, i, axis=0),
last_output,
new_output,
)
return rngs, last_output, memory, settings
sample_step_ = hk.without_apply_rng(hk.transform(hk_sample_step))
prefill_memory_ = hk.without_apply_rng(hk.transform(hk_prefill_memory))
new_memory_ = hk.without_apply_rng(hk.transform(hk_new_memory))
forward_ = hk.without_apply_rng(hk.transform(hk_forward))
rng = jax.random.PRNGKey(42)
dummy_tokens = jnp.zeros((1, max_len), jnp.int32)
with runner.mesh:
shapes = jax.eval_shape(forward_.init, rng, dummy_tokens)
self.params_sharding = jax.tree_util.tree_map_with_path(
apply_rules(runner.model.partition_rules()),
shapes,
)
ds = P("data")
ms = runner.model.model.get_memory_sharding()
self.sample_step = pjit.pjit(
sample_step_.apply,
in_shardings=(self.params_sharding, None, ds, ms, None),
out_shardings=(None, ds, ms),
donate_argnums=3,
)
self.prefill_memory = pjit.pjit(
functools.partial(prefill_memory_.apply),
in_shardings=(
self.params_sharding,
None,
ms,
None,
ds,
None,
None,
None,
None,
None,
),
out_shardings=(None, ds, ms, None),
donate_argnums=(2,),
)
self.new_memory = pjit.pjit(
new_memory_.apply,
static_argnums=(1, 2),
out_shardings=ms,
)
def run(self):
"""Generator that accepts prompts."""
runner = self.runner
mesh = runner.mesh
max_len = runner.model.sequence_len
batch_size = runner.batch_size
params = self.params
rngs = jax.random.split(jax.random.PRNGKey(1), batch_size)
with mesh:
memory = self.new_memory(params, batch_size, max_len)
settings = SampleSettings(
temperature=np.zeros((batch_size,), dtype=np.float32),
nucleus_p=np.zeros((batch_size,), dtype=np.float32),
mask=np.ones((batch_size, self.vocab_size), dtype=np.int32),
active=np.zeros((batch_size), dtype=np.int32),
)
last_output = SampleOutput(
token_id=np.zeros((batch_size, 1), dtype=np.int32),
prob=np.zeros((batch_size, 1), dtype=jnp.bfloat16),
top_k_token_ids=np.zeros((batch_size, TOP_K), dtype=np.int32),
top_k_probs=np.zeros((batch_size, TOP_K), dtype=jnp.bfloat16),
)
prompt = np.array([300, 400, 500, 600, 600, 700, 800])
new_settings = SampleSettings(
temperature=np.float32(1),
nucleus_p=np.float32(1),
mask=np.ones((self.vocab_size,), dtype=np.int32),
active=np.zeros((), dtype=np.int32),
)
rng_seed = np.uint64(1)
for size in self.pad_sizes:
if size > runner.model.sequence_len:
break
logger.info("Precompile {}".format(size))
prompt_len = len(prompt)
prompt = pad_to_size(prompt, size)
rngs, last_output, memory, settings = self.prefill_memory(
params,
rngs,
memory,
settings,
last_output,
prompt,
prompt_len,
rng_seed,
new_settings,
0,
)
with runner.mesh:
logger.info("Compiling...")
rngs, last_output, memory = self.sample_step(
params, rngs, last_output, memory, settings
)
logger.info("Done compiling.")
all_tokens = []
free_slots = list(range(batch_size))
requests = [None] * batch_size
first_output = [None] * batch_size
jax.tree_map(lambda x: x.copy_to_host_async(), last_output)
prev_token = last_output
step = 0
total_num_tokens = 0
total_num_sequences = 0
with mesh:
while True:
while free_slots:
request: Optional[Request] = yield
tokens = self.tokenizer.encode(request.prompt)
temperature = request.temperature
nucleus_p = request.nucleus_p
rng_seed = request.rng_seed
i = free_slots.pop()
prompt = np.array(tokens, dtype=np.int32)
prompt_len = len(prompt)
prompt = pad_to_size(prompt, self.get_pad_bucket(prompt.shape[0]))
# All tokens are allowed.
mask = np.ones((self.vocab_size,), dtype=np.int32)
new_settings = SampleSettings(
temperature=np.float32(temperature),
nucleus_p=np.float32(nucleus_p),
mask=mask,
active=np.ones((), dtype=np.int32),
)
rng_seed = np.uint64(rng_seed)
rngs, last_output, memory, settings = self.prefill_memory(
params,
rngs,
memory,
settings,
last_output,
prompt,
prompt_len,
rng_seed,
new_settings,
i,
)
jax.tree_map(lambda x: x.copy_to_host_async(), last_output)
first_output[i] = last_output
requests[i] = request
total_num_sequences += 1
rngs, last_output, memory = self.sample_step(
params, rngs, last_output, memory, settings
)
total_num_tokens += batch_size - len(free_slots)
# prev_token should already be on the host.
prev_token = jax.tree_map(np.array, prev_token)
for i in range(batch_size):
if requests[i] is not None:
if first_output[i] is not None:
first_output_i = jax.tree_map(np.array, first_output[i])
all_tokens.append(int(first_output_i.token_id[i][0]))
first_output[i] = None
continue
all_tokens.append(int(prev_token.token_id[i][0]))
cont = len(all_tokens) < requests[i].max_len
if not cont:
output_str = self.tokenizer.decode(all_tokens)
requests[i] = None
free_slots.append(i)
all_tokens = []
settings = settings._replace(active=settings.active.at[i].set(0))
yield output_str
jax.tree_map(lambda x: x.copy_to_host_async(), last_output)
prev_token = last_output
step += 1
def make_mesh(
local_mesh_config: tuple[int, ...], between_hosts_config: tuple[int, ...]
) -> jax.sharding.Mesh:
assert len(local_mesh_config) == 2
assert len(between_hosts_config) == 2
rank_logger.info("Detected %s devices in mesh", jax.device_count())
device_mesh = mesh_utils.create_hybrid_device_mesh(
local_mesh_config,
between_hosts_config,
devices=jax.devices(),
process_is_granule=True,
)
rank_logger.debug(re.sub("\n+", "\n", f"Job device mesh is:\n{device_mesh}"))
return jax.sharding.Mesh(device_mesh, ("data", "model"))
def sample_from_model(server, prompt, max_len, temperature):
next(server)
inp = Request(
prompt=prompt,
temperature=temperature,
nucleus_p=1.0,
rng_seed=42,
max_len=max_len,
)
return server.send(inp)
| python | Apache-2.0 | 7050ed204b8206bb8645c7b7bbef7252f79561b0 | 2026-01-04T14:39:29.368501Z | false |
charlax/professional-programming | https://github.com/charlax/professional-programming/blob/d6f158fefbc9ccc1136311f845c927da9983e8b8/antipatterns/python-examples/reraise_exceptions_bad.py | antipatterns/python-examples/reraise_exceptions_bad.py | from collections import namedtuple
Bread = namedtuple("Bread", "color")
class ToastException(Exception):
pass
def toast(bread):
try:
put_in_toaster(bread)
except:
raise ToastException("Could not toast bread")
def put_in_toaster(bread):
brad.color = "light_brown" # Note the typo
toast(Bread("yellow"))
| python | MIT | d6f158fefbc9ccc1136311f845c927da9983e8b8 | 2026-01-04T14:39:30.087892Z | false |
charlax/professional-programming | https://github.com/charlax/professional-programming/blob/d6f158fefbc9ccc1136311f845c927da9983e8b8/antipatterns/python-examples/reraise_exceptions_good.py | antipatterns/python-examples/reraise_exceptions_good.py | from collections import namedtuple
Bread = namedtuple("Bread", "color")
class ToastException(Exception):
pass
def toast(bread):
try:
put_in_toaster(bread)
except:
print("Got exception while trying to toast")
raise
def put_in_toaster(bread):
brad.color = "light_brown" # Note the typo
toast(Bread("yellow"))
| python | MIT | d6f158fefbc9ccc1136311f845c927da9983e8b8 | 2026-01-04T14:39:30.087892Z | false |
charlax/professional-programming | https://github.com/charlax/professional-programming/blob/d6f158fefbc9ccc1136311f845c927da9983e8b8/antipatterns/sqlalchemy-examples/exists.py | antipatterns/sqlalchemy-examples/exists.py | from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine("sqlite:///:memory:", echo=True)
Session = sessionmaker(bind=engine)
Base = declarative_base()
class Toaster(Base):
__tablename__ = "toasters"
id = Column(Integer, primary_key=True)
name = Column(String)
color = Column(String)
def toaster_exists_bad(toaster_id):
session = Session()
return bool(session.query(Toaster).filter_by(id=toaster_id).first())
def toaster_exists_good(toaster_id):
session = Session()
query = session.query(Toaster).filter_by(id=toaster_id)
return session.query(query.exists()).scalar()
def main():
Base.metadata.create_all(engine)
toaster_exists_bad(1)
toaster_exists_good(2)
if __name__ == "__main__":
main()
| python | MIT | d6f158fefbc9ccc1136311f845c927da9983e8b8 | 2026-01-04T14:39:30.087892Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth-cli.py | unsloth-cli.py | #!/usr/bin/env python3
"""
🦥 Starter Script for Fine-Tuning FastLanguageModel with Unsloth
This script is designed as a starting point for fine-tuning your models using unsloth.
It includes configurable options for model loading, PEFT parameters, training arguments,
and model saving/pushing functionalities.
You will likely want to customize this script to suit your specific use case
and requirements.
Here are a few suggestions for customization:
- Modify the dataset loading and preprocessing steps to match your data.
- Customize the model saving and pushing configurations.
Usage: (most of the options have valid default values this is an extended example for demonstration purposes)
python unsloth-cli.py --model_name "unsloth/llama-3-8b" --max_seq_length 8192 --dtype None --load_in_4bit \
--r 64 --lora_alpha 32 --lora_dropout 0.1 --bias "none" --use_gradient_checkpointing "unsloth" \
--random_state 3407 --use_rslora --per_device_train_batch_size 4 --gradient_accumulation_steps 8 \
--warmup_steps 5 --max_steps 400 --learning_rate 2e-6 --logging_steps 1 --optim "adamw_8bit" \
--weight_decay 0.005 --lr_scheduler_type "linear" --seed 3407 --output_dir "outputs" \
--report_to "tensorboard" --save_model --save_path "model" --quantization_method "f16" \
--push_model --hub_path "hf/model" --hub_token "your_hf_token"
To see a full list of configurable options, use:
python unsloth-cli.py --help
Happy fine-tuning!
"""
import argparse
import os
def run(args):
from unsloth import FastLanguageModel
from datasets import load_dataset
from transformers.utils import strtobool
from trl import SFTTrainer, SFTConfig
from unsloth import is_bfloat16_supported
from unsloth.models.loader_utils import prepare_device_map
import logging
logging.getLogger("hf-to-gguf").setLevel(logging.WARNING)
# Load model and tokenizer
device_map, distributed = prepare_device_map()
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = args.model_name,
max_seq_length = args.max_seq_length,
dtype = args.dtype,
load_in_4bit = args.load_in_4bit,
device_map = device_map,
)
# Configure PEFT model
model = FastLanguageModel.get_peft_model(
model,
r = args.r,
target_modules = [
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj",
],
lora_alpha = args.lora_alpha,
lora_dropout = args.lora_dropout,
bias = args.bias,
use_gradient_checkpointing = args.use_gradient_checkpointing,
random_state = args.random_state,
use_rslora = args.use_rslora,
loftq_config = args.loftq_config,
)
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{}
### Input:
{}
### Response:
{}"""
EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN
def formatting_prompts_func(examples):
instructions = examples["instruction"]
inputs = examples["input"]
outputs = examples["output"]
texts = []
for instruction, input, output in zip(instructions, inputs, outputs):
text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN
texts.append(text)
return {"text": texts}
use_modelscope = strtobool(os.environ.get("UNSLOTH_USE_MODELSCOPE", "False"))
if use_modelscope:
from modelscope import MsDataset
dataset = MsDataset.load(args.dataset, split = "train")
else:
# Load and format dataset
dataset = load_dataset(args.dataset, split = "train")
dataset = dataset.map(formatting_prompts_func, batched = True)
print("Data is formatted and ready!")
# Configure training arguments
training_args = SFTConfig(
per_device_train_batch_size = args.per_device_train_batch_size,
per_device_eval_batch_size = args.per_device_eval_batch_size,
gradient_accumulation_steps = args.gradient_accumulation_steps,
warmup_steps = args.warmup_steps,
max_steps = args.max_steps,
learning_rate = args.learning_rate,
fp16 = not is_bfloat16_supported(),
bf16 = is_bfloat16_supported(),
logging_steps = args.logging_steps,
optim = args.optim,
weight_decay = args.weight_decay,
lr_scheduler_type = args.lr_scheduler_type,
seed = args.seed,
output_dir = args.output_dir,
report_to = args.report_to,
max_length = args.max_seq_length,
dataset_num_proc = 2,
ddp_find_unused_parameters = False if distributed else None,
packing = args.packing,
)
# Initialize trainer
trainer = SFTTrainer(
model = model,
processing_class = tokenizer,
train_dataset = dataset,
args = training_args,
)
trainer.train()
# Save model
if args.save_model:
# if args.quantization_method is a list, we will save the model for each quantization method
if args.save_gguf:
if isinstance(args.quantization, list):
for quantization_method in args.quantization:
print(
f"Saving model with quantization method: {quantization_method}"
)
model.save_pretrained_gguf(
args.save_path,
tokenizer,
quantization_method = quantization_method,
)
if args.push_model:
model.push_to_hub_gguf(
hub_path = args.hub_path,
hub_token = args.hub_token,
quantization_method = quantization_method,
)
else:
print(f"Saving model with quantization method: {args.quantization}")
model.save_pretrained_gguf(
args.save_path,
tokenizer,
quantization_method = args.quantization,
)
if args.push_model:
model.push_to_hub_gguf(
hub_path = args.hub_path,
hub_token = args.hub_token,
quantization_method = args.quantization,
)
else:
model.save_pretrained_merged(args.save_path, tokenizer, args.save_method)
if args.push_model:
model.push_to_hub_merged(args.save_path, tokenizer, args.hub_token)
else:
print("Warning: The model is not saved!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description = "🦥 Fine-tune your llm faster using unsloth!"
)
model_group = parser.add_argument_group("🤖 Model Options")
model_group.add_argument(
"--model_name",
type = str,
default = "unsloth/llama-3-8b",
help = "Model name to load",
)
model_group.add_argument(
"--max_seq_length",
type = int,
default = 2048,
help = "Maximum sequence length, default is 2048. We auto support RoPE Scaling internally!",
)
model_group.add_argument(
"--dtype",
type = str,
default = None,
help = "Data type for model (None for auto detection)",
)
model_group.add_argument(
"--load_in_4bit",
action = "store_true",
help = "Use 4bit quantization to reduce memory usage",
)
model_group.add_argument(
"--dataset",
type = str,
default = "yahma/alpaca-cleaned",
help = "Huggingface dataset to use for training",
)
lora_group = parser.add_argument_group(
"🧠 LoRA Options",
"These options are used to configure the LoRA model.",
)
lora_group.add_argument(
"--r",
type = int,
default = 16,
help = "Rank for Lora model, default is 16. (common values: 8, 16, 32, 64, 128)",
)
lora_group.add_argument(
"--lora_alpha",
type = int,
default = 16,
help = "LoRA alpha parameter, default is 16. (common values: 8, 16, 32, 64, 128)",
)
lora_group.add_argument(
"--lora_dropout",
type = float,
default = 0.0,
help = "LoRA dropout rate, default is 0.0 which is optimized.",
)
lora_group.add_argument(
"--bias",
type = str,
default = "none",
help = "Bias setting for LoRA",
)
lora_group.add_argument(
"--use_gradient_checkpointing",
type = str,
default = "unsloth",
help = "Use gradient checkpointing",
)
lora_group.add_argument(
"--random_state",
type = int,
default = 3407,
help = "Random state for reproducibility, default is 3407.",
)
lora_group.add_argument(
"--use_rslora",
action = "store_true",
help = "Use rank stabilized LoRA",
)
lora_group.add_argument(
"--loftq_config",
type = str,
default = None,
help = "Configuration for LoftQ",
)
training_group = parser.add_argument_group("🎓 Training Options")
training_group.add_argument(
"--per_device_train_batch_size",
type = int,
default = 2,
help = "Batch size per device during training, default is 2.",
)
training_group.add_argument(
"--per_device_eval_batch_size",
type = int,
default = 4,
help = "Batch size per device during evaluation, default is 4.",
)
training_group.add_argument(
"--gradient_accumulation_steps",
type = int,
default = 4,
help = "Number of gradient accumulation steps, default is 4.",
)
training_group.add_argument(
"--warmup_steps",
type = int,
default = 5,
help = "Number of warmup steps, default is 5.",
)
training_group.add_argument(
"--max_steps",
type = int,
default = 400,
help = "Maximum number of training steps.",
)
training_group.add_argument(
"--learning_rate",
type = float,
default = 2e-4,
help = "Learning rate, default is 2e-4.",
)
training_group.add_argument(
"--optim",
type = str,
default = "adamw_8bit",
help = "Optimizer type.",
)
training_group.add_argument(
"--weight_decay",
type = float,
default = 0.01,
help = "Weight decay, default is 0.01.",
)
training_group.add_argument(
"--lr_scheduler_type",
type = str,
default = "linear",
help = "Learning rate scheduler type, default is 'linear'.",
)
training_group.add_argument(
"--seed",
type = int,
default = 3407,
help = "Seed for reproducibility, default is 3407.",
)
training_group.add_argument(
"--packing",
action = "store_true",
help = "Enable padding-free sample packing via TRL's bin packer.",
)
report_group = parser.add_argument_group("📊 Report Options")
report_group.add_argument(
"--report_to",
type = str,
default = "tensorboard",
choices = [
"azure_ml",
"clearml",
"codecarbon",
"comet_ml",
"dagshub",
"dvclive",
"flyte",
"mlflow",
"neptune",
"tensorboard",
"wandb",
"all",
"none",
],
help = (
"The list of integrations to report the results and logs to. Supported platforms are:\n\t\t "
"'azure_ml', 'clearml', 'codecarbon', 'comet_ml', 'dagshub', 'dvclive', 'flyte', "
"'mlflow', 'neptune', 'tensorboard', and 'wandb'. Use 'all' to report to all integrations "
"installed, 'none' for no integrations."
),
)
report_group.add_argument(
"--logging_steps",
type = int,
default = 1,
help = "Logging steps, default is 1",
)
save_group = parser.add_argument_group("💾 Save Model Options")
save_group.add_argument(
"--output_dir",
type = str,
default = "outputs",
help = "Output directory",
)
save_group.add_argument(
"--save_model",
action = "store_true",
help = "Save the model after training",
)
save_group.add_argument(
"--save_method",
type = str,
default = "merged_16bit",
choices = ["merged_16bit", "merged_4bit", "lora"],
help = "Save method for the model, default is 'merged_16bit'",
)
save_group.add_argument(
"--save_gguf",
action = "store_true",
help = "Convert the model to GGUF after training",
)
save_group.add_argument(
"--save_path",
type = str,
default = "model",
help = "Path to save the model",
)
save_group.add_argument(
"--quantization",
type = str,
default = "q8_0",
nargs = "+",
help = (
"Quantization method for saving the model. common values ('f16', 'q4_k_m', 'q8_0'), "
"Check our wiki for all quantization methods https://github.com/unslothai/unsloth/wiki#saving-to-gguf"
),
)
push_group = parser.add_argument_group("🚀 Push Model Options")
push_group.add_argument(
"--push_model",
action = "store_true",
help = "Push the model to Hugging Face hub after training",
)
push_group.add_argument(
"--push_gguf",
action = "store_true",
help = "Push the model as GGUF to Hugging Face hub after training",
)
push_group.add_argument(
"--hub_path",
type = str,
default = "hf/model",
help = "Path on Hugging Face hub to push the model",
)
push_group.add_argument(
"--hub_token",
type = str,
help = "Token for pushing the model to Hugging Face hub",
)
args = parser.parse_args()
run(args)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/scripts/run_ruff_format.py | scripts/run_ruff_format.py | #!/usr/bin/env python3
"""Run `ruff format` followed by kwarg spacing enforcement."""
from __future__ import annotations
import subprocess
import sys
from pathlib import Path
HERE = Path(__file__).resolve().parent
def main(argv: list[str]) -> int:
files = [arg for arg in argv if Path(arg).exists()]
if not files:
return 0
ruff_cmd = [sys.executable, "-m", "ruff", "format", *files]
ruff_proc = subprocess.run(ruff_cmd)
if ruff_proc.returncode != 0:
return ruff_proc.returncode
spacing_script = HERE / "enforce_kwargs_spacing.py"
spacing_cmd = [sys.executable, str(spacing_script), *files]
spacing_proc = subprocess.run(spacing_cmd)
return spacing_proc.returncode
if __name__ == "__main__":
raise SystemExit(main(sys.argv[1:]))
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/scripts/enforce_kwargs_spacing.py | scripts/enforce_kwargs_spacing.py | #!/usr/bin/env python3
"""Ensure keyword arguments use spaces around '=', prune redundant pass statements."""
from __future__ import annotations
import ast
import argparse
import io
import sys
import tokenize
from collections import defaultdict
from pathlib import Path
def enforce_spacing(text: str) -> tuple[str, bool]:
"""Return updated text with keyword '=' padded by spaces, plus change flag."""
lines = text.splitlines(keepends=True)
if not lines:
return text, False
offsets: dict[int, int] = defaultdict(int)
changed = False
reader = io.StringIO(text).readline
for token in tokenize.generate_tokens(reader):
if token.type != tokenize.OP or token.string != "=":
continue
line_index = token.start[0] - 1
col = token.start[1] + offsets[line_index]
if line_index < 0 or line_index >= len(lines):
continue
line = lines[line_index]
if col >= len(line) or line[col] != "=":
continue
line_changed = False
# Insert a space before '=' when missing and not preceded by whitespace.
if col > 0 and line[col - 1] not in {" ", "\t"}:
line = f"{line[:col]} {line[col:]}"
offsets[line_index] += 1
col += 1
line_changed = True
changed = True
# Insert a space after '=' when missing and not followed by whitespace or newline.
next_index = col + 1
if next_index < len(line) and line[next_index] not in {" ", "\t", "\n", "\r"}:
line = f"{line[:next_index]} {line[next_index:]}"
offsets[line_index] += 1
line_changed = True
changed = True
if line_changed:
lines[line_index] = line
if not changed:
return text, False
return "".join(lines), True
def remove_redundant_passes(text: str) -> tuple[str, bool]:
"""Drop pass statements that share a block with other executable code."""
try:
tree = ast.parse(text)
except SyntaxError:
return text, False
redundant: list[ast.Pass] = []
def visit(node: ast.AST) -> None:
for attr in ("body", "orelse", "finalbody"):
value = getattr(node, attr, None)
if not isinstance(value, list) or len(value) <= 1:
continue
for stmt in value:
if isinstance(stmt, ast.Pass):
redundant.append(stmt)
for stmt in value:
if isinstance(stmt, ast.AST):
visit(stmt)
handlers = getattr(node, "handlers", None)
if handlers:
for handler in handlers:
visit(handler)
visit(tree)
if not redundant:
return text, False
lines = text.splitlines(keepends=True)
changed = False
for node in sorted(
redundant, key=lambda item: (item.lineno, item.col_offset), reverse=True
):
start = node.lineno - 1
end = (node.end_lineno or node.lineno) - 1
if start >= len(lines):
continue
changed = True
if start == end:
line = lines[start]
col_start = node.col_offset
col_end = node.end_col_offset or (col_start + 4)
segment = line[:col_start] + line[col_end:]
lines[start] = segment if segment.strip() else ""
continue
# Defensive fall-back for unexpected multi-line 'pass'.
prefix = lines[start][: node.col_offset]
lines[start] = prefix if prefix.strip() else ""
for idx in range(start + 1, end):
lines[idx] = ""
suffix = lines[end][(node.end_col_offset or 0) :]
lines[end] = suffix
# Normalise to ensure lines end with newlines except at EOF.
result_lines: list[str] = []
for index, line in enumerate(lines):
if not line:
continue
if index < len(lines) - 1 and not line.endswith("\n"):
result_lines.append(f"{line}\n")
else:
result_lines.append(line)
return "".join(result_lines), changed
def process_file(path: Path) -> bool:
try:
with tokenize.open(path) as handle:
original = handle.read()
encoding = handle.encoding
except (OSError, SyntaxError) as exc: # SyntaxError from tokenize on invalid python
print(f"Failed to read {path}: {exc}", file=sys.stderr)
return False
updated, changed = enforce_spacing(original)
updated, removed = remove_redundant_passes(updated)
if changed or removed:
path.write_text(updated, encoding=encoding)
return True
return False
def main(argv: list[str]) -> int:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("files", nargs="+", help="Python files to fix")
args = parser.parse_args(argv)
touched: list[Path] = []
self_path = Path(__file__).resolve()
for entry in args.files:
path = Path(entry)
# Skip modifying this script to avoid self-edit loops.
if path.resolve() == self_path:
continue
if not path.exists() or path.is_dir():
continue
if process_file(path):
touched.append(path)
if touched:
for path in touched:
print(f"Adjusted kwarg spacing in {path}")
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/test_model_registry.py | tests/test_model_registry.py | """
Test model registration methods
Checks that model registration methods work for respective models as well as all models
The check is performed
- by registering the models
- checking that the instantiated models can be found on huggingface hub by querying for the model id
"""
from dataclasses import dataclass
import pytest
from huggingface_hub import ModelInfo as HfModelInfo
from unsloth.registry import register_models, search_models
from unsloth.registry._deepseek import register_deepseek_models
from unsloth.registry._gemma import register_gemma_models
from unsloth.registry._llama import register_llama_models
from unsloth.registry._mistral import register_mistral_models
from unsloth.registry._phi import register_phi_models
from unsloth.registry._qwen import register_qwen_models
from unsloth.registry.registry import MODEL_REGISTRY, QUANT_TAG_MAP, QuantType
from unsloth.utils.hf_hub import get_model_info
MODEL_NAMES = [
"llama",
"qwen",
"mistral",
"phi",
"gemma",
"deepseek",
]
MODEL_REGISTRATION_METHODS = [
register_llama_models,
register_qwen_models,
register_mistral_models,
register_phi_models,
register_gemma_models,
register_deepseek_models,
]
@dataclass
class ModelTestParam:
name: str
register_models: callable
def _test_model_uploaded(model_ids: list[str]):
missing_models = []
for _id in model_ids:
model_info: HfModelInfo = get_model_info(_id)
if not model_info:
missing_models.append(_id)
return missing_models
TestParams = [
ModelTestParam(name, models)
for name, models in zip(MODEL_NAMES, MODEL_REGISTRATION_METHODS)
]
# Test that model registration methods register respective models
@pytest.mark.parametrize("model_test_param", TestParams, ids = lambda param: param.name)
def test_model_registration(model_test_param: ModelTestParam):
MODEL_REGISTRY.clear()
registration_method = model_test_param.register_models
registration_method()
registered_models = MODEL_REGISTRY.keys()
missing_models = _test_model_uploaded(registered_models)
assert (
not missing_models
), f"{model_test_param.name} missing following models: {missing_models}"
def test_all_model_registration():
register_models()
registered_models = MODEL_REGISTRY.keys()
missing_models = _test_model_uploaded(registered_models)
assert not missing_models, f"Missing following models: {missing_models}"
def test_quant_type():
# Test that the quant_type is correctly set for model paths
# NOTE: for models registered under org="unsloth" with QuantType.NONE aliases QuantType.UNSLOTH
dynamic_quant_models = search_models(quant_types = [QuantType.UNSLOTH])
assert all(m.quant_type == QuantType.UNSLOTH for m in dynamic_quant_models)
quant_tag = QUANT_TAG_MAP[QuantType.UNSLOTH]
assert all(quant_tag in m.model_path for m in dynamic_quant_models)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/__init__.py | tests/__init__.py | python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false | |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/qlora/test_hf_qlora_train_and_merge.py | tests/qlora/test_hf_qlora_train_and_merge.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ruff: noqa
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).parents[2]
sys.path.append(str(REPO_ROOT))
import itertools
from copy import deepcopy
import torch
from datasets import Dataset
from trl import SFTConfig
from tests.utils import header_footer_context
from tests.utils.data_utils import (
ANSWER,
DEFAULT_MESSAGES,
USER_MESSAGE,
check_responses,
create_dataset,
describe_peft_weights,
)
from tests.utils.hf_utils import (
convert_lora_to_linear,
fix_llama3_tokenizer,
get_peft_config,
sample_responses,
setup_model,
setup_tokenizer,
setup_trainer,
)
if __name__ == "__main__":
model_name = "meta-llama/Llama-3.2-1B-Instruct"
dtype = torch.bfloat16
max_steps = 100
num_examples = 1000
lora_rank = 64
output_dir = "sft_test"
seed = 42
batch_size = 5
num_generations = 5
tokenizer = setup_tokenizer(model_name, fixup_funcs = [fix_llama3_tokenizer])
temperature = 0.8
max_new_tokens = 20
peft_config = get_peft_config(lora_rank = lora_rank, target_modules = "all-linear")
model = setup_model(model_name, quantize = True, dtype = dtype, peft_config = peft_config)
prompt = tokenizer.apply_chat_template(
[USER_MESSAGE], tokenize = False, add_generation_prompt = True
)
with header_footer_context("Test Prompt and Answer"):
print(f"Test Prompt:\n{prompt}\nExpected Answer:\n{ANSWER}")
dataset: Dataset = create_dataset(
tokenizer, num_examples = num_examples, messages = DEFAULT_MESSAGES
)
with header_footer_context("Dataset"):
print(f"Dataset: {next(iter(dataset))}")
training_args = SFTConfig(
output_dir = output_dir,
max_steps = max_steps,
per_device_train_batch_size = batch_size,
log_level = "info",
report_to = "none",
num_train_epochs = 1,
logging_steps = 1,
seed = seed,
bf16 = dtype == torch.bfloat16,
fp16 = dtype == torch.float16,
save_strategy = "no",
)
with header_footer_context("Train Args"):
print(training_args)
print(peft_config)
trainer = setup_trainer(
model, tokenizer, dataset, training_args, peft_config = peft_config
)
with header_footer_context("Model"):
print(type(model.model))
generation_args = {
"num_generations": num_generations,
"max_new_tokens": max_new_tokens,
"temperature": temperature,
"skip_special_tokens": False,
"dtype": dtype,
}
responses = sample_responses(
model,
tokenizer,
prompt = prompt,
**generation_args,
)
with header_footer_context("Responses before training"):
check_responses(responses, answer = ANSWER, prompt = prompt)
with header_footer_context("Peft Weights before training"):
for name, stats in itertools.islice(describe_peft_weights(model), 2):
print(f"{name}:\n{stats}")
output = trainer.train()
with header_footer_context("Peft Weights after training"):
for name, stats in itertools.islice(describe_peft_weights(model), 2):
print(f"{name}:\n{stats}")
with header_footer_context("Trainer Output"):
print(output)
responses = sample_responses(
model,
tokenizer,
prompt = prompt,
**generation_args,
)
with header_footer_context("Responses after training"):
check_responses(responses, answer = ANSWER, prompt = prompt)
model_copy = deepcopy(model)
merged_model = convert_lora_to_linear(model)
responses = sample_responses(
merged_model,
tokenizer,
prompt = prompt,
**generation_args,
)
with header_footer_context("Responses after custom merging to 16bit"):
check_responses(responses, answer = ANSWER, prompt = prompt)
merged_model_peft = model_copy.merge_and_unload()
responses = sample_responses(
merged_model_peft,
tokenizer,
prompt = prompt,
**generation_args,
)
with header_footer_context("Responses after peft merge_and_unload"):
check_responses(responses, answer = ANSWER, prompt = prompt)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/qlora/test_unsloth_qlora_train_and_merge.py | tests/qlora/test_unsloth_qlora_train_and_merge.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ruff: noqa
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).parents[2]
sys.path.append(str(REPO_ROOT))
import itertools
from unsloth import FastLanguageModel
import torch
from datasets import Dataset
from trl import SFTConfig
from tests.utils import header_footer_context
from tests.utils.data_utils import (
DEFAULT_MESSAGES,
USER_MESSAGE,
ANSWER,
create_dataset,
describe_peft_weights,
check_responses,
)
from tests.utils.hf_utils import (
sample_responses,
setup_trainer,
)
def get_unsloth_model_and_tokenizer(
model_name: str,
max_seq_length: int,
load_in_4bit: bool,
fast_inference: bool,
max_lora_rank: int = None,
gpu_memory_utilization: float = 0.5,
dtype: torch.dtype = torch.bfloat16,
):
return FastLanguageModel.from_pretrained(
model_name = model_name,
max_seq_length = max_seq_length,
load_in_4bit = load_in_4bit,
fast_inference = fast_inference,
max_lora_rank = max_lora_rank,
gpu_memory_utilization = gpu_memory_utilization,
dtype = dtype,
)
def get_unsloth_peft_model(
model,
lora_rank: int,
target_modules: list[str] = "all-linear",
use_gradient_checkpointing: str = False,
random_state: int = 42,
):
return FastLanguageModel.get_peft_model(
model,
r = lora_rank,
target_modules = target_modules,
lora_alpha = lora_rank,
use_gradient_checkpointing = use_gradient_checkpointing,
random_state = random_state,
)
if __name__ == "__main__":
model_name = "meta-llama/Llama-3.2-1B-Instruct"
dtype = torch.bfloat16
max_steps = 100
num_examples = 1000
lora_rank = 64
output_dir = "sft_test"
seed = 42
batch_size = 5
num_generations = 5
target_modules = [
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj",
]
gradient_checkpointing = False
unsloth_merged_path = "unsloth_merged_16bit"
model, tokenizer = get_unsloth_model_and_tokenizer(
model_name,
max_seq_length = 512,
load_in_4bit = True,
fast_inference = False,
max_lora_rank = lora_rank,
dtype = dtype,
)
temperature = 0.8
max_new_tokens = 20
model = get_unsloth_peft_model(
model,
lora_rank = lora_rank,
target_modules = target_modules,
use_gradient_checkpointing = gradient_checkpointing,
random_state = seed,
)
prompt = tokenizer.apply_chat_template(
[USER_MESSAGE], tokenize = False, add_generation_prompt = True
)
with header_footer_context("Test Prompt and Answer"):
print(f"Test Prompt:\n{prompt}\nExpected Answer:\n{ANSWER}")
dataset: Dataset = create_dataset(
tokenizer, num_examples = num_examples, messages = DEFAULT_MESSAGES
)
with header_footer_context("Dataset"):
print(f"Dataset: {next(iter(dataset))}")
training_args = SFTConfig(
output_dir = output_dir,
max_steps = max_steps,
per_device_train_batch_size = batch_size,
log_level = "info",
report_to = "none",
num_train_epochs = 1,
logging_steps = 1,
seed = seed,
bf16 = dtype == torch.bfloat16,
fp16 = dtype == torch.float16,
save_strategy = "no",
)
with header_footer_context("Train Args"):
print(training_args)
trainer = setup_trainer(model, tokenizer, dataset, training_args)
with header_footer_context("Model"):
print(type(model.model))
generation_args = {
"num_generations": num_generations,
"max_new_tokens": max_new_tokens,
"temperature": temperature,
"skip_special_tokens": False,
"dtype": dtype,
}
responses = sample_responses(
model,
tokenizer,
prompt = prompt,
**generation_args,
)
with header_footer_context("Responses before training"):
check_responses(responses, answer = ANSWER, prompt = prompt)
with header_footer_context("Peft Weights before training"):
for name, stats in itertools.islice(describe_peft_weights(model), 2):
print(f"{name}:\n{stats}")
output = trainer.train()
with header_footer_context("Peft Weights after training"):
for name, stats in itertools.islice(describe_peft_weights(model), 2):
print(f"{name}:\n{stats}")
with header_footer_context("Trainer Output"):
print(output)
responses = sample_responses(
model,
tokenizer,
prompt = prompt,
**generation_args,
)
with header_footer_context("Responses after training"):
check_responses(responses, answer = ANSWER, prompt = prompt)
model.save_pretrained_merged(
unsloth_merged_path,
tokenizer,
save_method = "merged_16bit",
)
merged_model_unsloth, tokenizer = get_unsloth_model_and_tokenizer(
unsloth_merged_path,
max_seq_length = 512,
load_in_4bit = False,
fast_inference = False,
dtype = dtype,
)
responses = sample_responses(
merged_model_unsloth,
tokenizer,
prompt = prompt,
**generation_args,
)
with header_footer_context("Responses after unsloth merge to 16bit"):
check_responses(responses, answer = ANSWER, prompt = prompt)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/test_unsloth_save.py | tests/saving/test_unsloth_save.py | import json
import os
import shutil
import tempfile
import pytest
import importlib
from unsloth import FastLanguageModel, FastModel
model_to_test = [
# Text Models
"unsloth/tinyllama",
"unsloth/tinyllama-bnb-4bit",
"unsloth/Qwen2.5-0.5B-Instruct",
"unsloth/Qwen2.5-0.5B-Instruct-bnb-4bit",
"unsloth/Phi-4-mini-instruct",
"unsloth/Phi-4-mini-instruct-bnb-4bit",
"unsloth/Qwen2.5-0.5B",
# Vision Models
"unsloth/gemma-3-4b-it",
"unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit",
"unsloth/Qwen2.5-VL-3B-Instruct-bnb-4bit",
]
torchao_models = [
"unsloth/tinyllama",
"unsloth/Qwen2.5-0.5B-Instruct",
# "unsloth/Phi-4-mini-instruct",
# "unsloth/Qwen2.5-0.5B",
# Skip the -bnb-4bit variants since they're already quantized
]
# Variables
save_file_sizes = {}
save_file_sizes["merged_16bit"] = {}
save_file_sizes["merged_4bit"] = {}
save_file_sizes["torchao"] = {}
tokenizer_files = [
"tokenizer_config.json",
"special_tokens_map.json",
]
@pytest.fixture(scope = "session", params = model_to_test)
def loaded_model_tokenizer(request):
model_name = request.param
print("Loading model and tokenizer...")
model, tokenizer = FastModel.from_pretrained(
model_name, # use small model
max_seq_length = 128,
dtype = None,
load_in_4bit = True,
)
# Apply LoRA
model = FastModel.get_peft_model(
model,
r = 16,
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj"],
lora_alpha = 16,
use_gradient_checkpointing = "unsloth",
)
return model, tokenizer
@pytest.fixture(scope = "session", params = torchao_models)
def fp16_model_tokenizer(request):
"""Load model in FP16 for TorchAO quantization"""
model_name = request.param
print(f"Loading model in FP16 for TorchAO: {model_name}")
model, tokenizer = FastModel.from_pretrained(
model_name,
max_seq_length = 128,
dtype = None,
load_in_4bit = False, # No BnB quantization
)
# Apply LoRA
model = FastModel.get_peft_model(
model,
r = 16,
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj"],
lora_alpha = 16,
use_gradient_checkpointing = "unsloth",
)
return model, tokenizer
@pytest.fixture(scope = "session")
def model(loaded_model_tokenizer):
return loaded_model_tokenizer[0]
@pytest.fixture(scope = "session")
def tokenizer(loaded_model_tokenizer):
return loaded_model_tokenizer[1]
@pytest.fixture
def temp_save_dir():
dir = tempfile.mkdtemp()
print(f"Temporary directory created at: {dir}")
yield dir
print(f"Temporary directory deleted: {dir}")
shutil.rmtree(dir)
def delete_quantization_config(model):
# Since merged, edit quantization_config
old_config = model.config
new_config = model.config.to_dict()
if "quantization_config" in new_config:
del new_config["quantization_config"]
original_model = model
new_config = type(model.config).from_dict(new_config)
while hasattr(original_model, "model"):
original_model = original_model.model
original_model.config = new_config
model.config = new_config
def test_save_merged_16bit(model, tokenizer, temp_save_dir: str):
save_path = os.path.join(
temp_save_dir,
"unsloth_merged_16bit",
model.config._name_or_path.replace("/", "_"),
)
model.save_pretrained_merged(
save_path, tokenizer = tokenizer, save_method = "merged_16bit"
)
# Check model files
assert os.path.isdir(save_path), f"Directory {save_path} does not exist."
assert os.path.isfile(
os.path.join(save_path, "config.json")
), "config.json not found."
weight_files = [
f
for f in os.listdir(save_path)
if f.endswith(".bin") or f.endswith(".safetensors")
]
assert len(weight_files) > 0, "No weight files found in the save directory."
# Check tokenizer files
for file in tokenizer_files:
assert os.path.isfile(
os.path.join(save_path, file)
), f"{file} not found in the save directory."
# Check config to see if it is 16bit by checking for quantization config
config_path = os.path.join(save_path, "config.json")
with open(config_path, "r") as f:
config = json.load(f)
assert (
"quantization_config" not in config
), "Quantization config not found in the model config."
# Store the size of the model files
total_size = sum(os.path.getsize(os.path.join(save_path, f)) for f in weight_files)
save_file_sizes["merged_16bit"][model.config._name_or_path] = total_size
print(f"Total size of merged_16bit files: {total_size} bytes")
# Test loading the model from the saved path
loaded_model, loaded_tokenizer = FastLanguageModel.from_pretrained(
save_path,
max_seq_length = 128,
dtype = None,
load_in_4bit = True,
)
def test_save_merged_4bit(model, tokenizer, temp_save_dir: str):
save_path = os.path.join(
temp_save_dir,
"unsloth_merged_4bit",
model.config._name_or_path.replace("/", "_"),
)
model.save_pretrained_merged(
save_path, tokenizer = tokenizer, save_method = "merged_4bit_forced"
)
# Check model files
assert os.path.isdir(save_path), f"Directory {save_path} does not exist."
assert os.path.isfile(
os.path.join(save_path, "config.json")
), "config.json not found."
weight_files = [
f
for f in os.listdir(save_path)
if f.endswith(".bin") or f.endswith(".safetensors")
]
assert len(weight_files) > 0, "No weight files found in the save directory."
# Check tokenizer files
for file in tokenizer_files:
assert os.path.isfile(
os.path.join(save_path, file)
), f"{file} not found in the save directory."
# Store the size of the model files
total_size = sum(os.path.getsize(os.path.join(save_path, f)) for f in weight_files)
save_file_sizes["merged_4bit"][model.config._name_or_path] = total_size
print(f"Total size of merged_4bit files: {total_size} bytes")
assert (
total_size < save_file_sizes["merged_16bit"][model.config._name_or_path]
), "Merged 4bit files are larger than merged 16bit files."
# Check config to see if it is 4bit
config_path = os.path.join(save_path, "config.json")
with open(config_path, "r") as f:
config = json.load(f)
assert (
"quantization_config" in config
), "Quantization config not found in the model config."
# Test loading the model from the saved path
loaded_model, loaded_tokenizer = FastModel.from_pretrained(
save_path,
max_seq_length = 128,
dtype = None,
load_in_4bit = True,
)
@pytest.mark.skipif(
importlib.util.find_spec("torchao") is None,
reason = "require torchao to be installed",
)
def test_save_torchao(fp16_model_tokenizer, temp_save_dir: str):
model, tokenizer = fp16_model_tokenizer
save_path = os.path.join(
temp_save_dir, "unsloth_torchao", model.config._name_or_path.replace("/", "_")
)
from torchao.quantization import Int8DynamicActivationInt8WeightConfig
torchao_config = Int8DynamicActivationInt8WeightConfig()
model.save_pretrained_torchao(
save_path,
tokenizer = tokenizer,
torchao_config = torchao_config,
push_to_hub = False,
)
weight_files_16bit = [
f
for f in os.listdir(save_path)
if f.endswith(".bin") or f.endswith(".safetensors")
]
total_16bit_size = sum(
os.path.getsize(os.path.join(save_path, f)) for f in weight_files_16bit
)
save_file_sizes["merged_16bit"][model.config._name_or_path] = total_16bit_size
torchao_save_path = save_path + "-torchao"
# Check model files
assert os.path.isdir(
torchao_save_path
), f"Directory {torchao_save_path} does not exist."
assert os.path.isfile(
os.path.join(torchao_save_path, "config.json")
), "config.json not found."
weight_files = [
f
for f in os.listdir(torchao_save_path)
if f.endswith(".bin") or f.endswith(".safetensors")
]
assert len(weight_files) > 0, "No weight files found in the save directory."
# Check tokenizer files
for file in tokenizer_files:
assert os.path.isfile(
os.path.join(torchao_save_path, file)
), f"{file} not found in the save directory."
# Store the size of the model files
total_size = sum(
os.path.getsize(os.path.join(torchao_save_path, f)) for f in weight_files
)
save_file_sizes["torchao"][model.config._name_or_path] = total_size
assert (
total_size < save_file_sizes["merged_16bit"][model.config._name_or_path]
), "torchao files are larger than merged 16bit files."
# Check config to see if it is quantized with torchao
config_path = os.path.join(torchao_save_path, "config.json")
with open(config_path, "r") as f:
config = json.load(f)
assert (
"quantization_config" in config
), "Quantization config not found in the model config."
# Test loading the model from the saved path
# can't set `load_in_4bit` to True because the model is torchao quantized
# can't quantize again with bitsandbytes
import torch.serialization
with torch.serialization.safe_globals([getattr]):
loaded_model, loaded_tokenizer = FastModel.from_pretrained(
torchao_save_path,
max_seq_length = 128,
dtype = None,
load_in_4bit = False,
)
@pytest.mark.skipif(
importlib.util.find_spec("torchao") is None,
reason = "require torchao to be installed",
)
def test_save_and_inference_torchao(fp16_model_tokenizer, temp_save_dir: str):
model, tokenizer = fp16_model_tokenizer
model_name = model.config._name_or_path
print(f"Testing TorchAO save and inference for: {model_name}")
save_path = os.path.join(
temp_save_dir, "torchao_models", model_name.replace("/", "_")
)
from torchao.quantization import Int8DynamicActivationInt8WeightConfig
torchao_config = Int8DynamicActivationInt8WeightConfig()
# Save with TorchAO
model.save_pretrained_torchao(
save_path,
tokenizer = tokenizer,
torchao_config = torchao_config,
push_to_hub = False,
)
torchao_save_path = save_path + "-torchao"
# Verify files exist
assert os.path.isdir(
torchao_save_path
), f"TorchAO directory {torchao_save_path} does not exist."
# Load with safe globals
import torch.serialization
with torch.serialization.safe_globals([getattr]):
loaded_model, loaded_tokenizer = FastModel.from_pretrained(
torchao_save_path,
max_seq_length = 128,
dtype = None,
load_in_4bit = False,
)
FastModel.for_inference(loaded_model) # Enable native 2x faster inference
messages = [
{
"role": "user",
"content": "Continue the fibonnaci sequence: 1, 1, 2, 3, 5, 8,",
},
]
inputs = loaded_tokenizer.apply_chat_template(
messages,
tokenize = True,
add_generation_prompt = True, # Must add for generation
return_tensors = "pt",
).to("cuda")
outputs = loaded_model.generate( # ← Use loaded_model, not model
input_ids = inputs,
max_new_tokens = 64,
use_cache = False, # Avoid cache issues
temperature = 1.5,
min_p = 0.1,
do_sample = True,
pad_token_id = loaded_tokenizer.pad_token_id or loaded_tokenizer.eos_token_id,
)
# Decode with the LOADED tokenizer
generated_text = loaded_tokenizer.decode(outputs[0], skip_special_tokens = True)
input_text = loaded_tokenizer.decode(inputs[0], skip_special_tokens = True)
response_part = generated_text[len(input_text) :].strip()
print(f"Input: {input_text}")
print(f"Full output: {generated_text}")
print(f"Response only: {response_part}")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/language_models/test_save_merged_grpo_model.py | tests/saving/language_models/test_save_merged_grpo_model.py | # -*- coding: utf-8 -*-
"""test_Llama3_1_(3B)_GRPO_LoRA (1).ipynb
### Unsloth
"""
from unsloth import FastLanguageModel
import torch
import sys
from pathlib import Path
import multiprocessing as mp
import gc
from multiprocessing import Queue
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
from tests.utils.aime_eval import evaluate_model_aime, compare_aime_results
max_seq_length = 2048 # Can increase for longer reasoning traces
lora_rank = 64 # Larger rank = smarter, but slower
def evaluate_merged_model(result_queue, load_in_4bit = False, load_in_8bit = False):
from unsloth import FastLanguageModel
from tests.utils.aime_eval import evaluate_model_aime
max_seq_length = 2048 # Can increase for longer reasoning traces
lora_rank = 64 # Larger rank = smarter, but slower
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "./final_merged_model",
max_seq_length = max_seq_length,
load_in_4bit = True, # False for LoRA 16bit
fast_inference = True, # Enable vLLM fast inference
max_lora_rank = lora_rank,
gpu_memory_utilization = 0.8, # Reduce if out of memory
)
print(f"\n{'='*60}")
if load_in_4bit:
print("🔍 EVALUATION Merged model: 4 bits load")
model_type = "merged_model_4bits"
elif load_in_8bit:
print("🔍 EVALUATION Merged model: 8 bits load")
model_type = "merged_model_8bits"
else:
print("🔍 EVALUATION Merged model: 16 bits load")
model_type = "merged_model_16bits"
print(f"{'='*60}")
evaluate_model_aime(
model = model,
tokenizer = tokenizer,
model_type = model_type,
temperature = 0.3,
n_sampling = 8,
max_tokens = 32768,
top_p = 0.95,
seed = 0,
)
result_queue.put(results)
del model
del tokenizer
torch.cuda.empty_cache()
gc.collect()
# Main execution code should be wrapped in this guard
def training_run(result_queue):
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "meta-llama/Llama-3.2-3B-Instruct",
max_seq_length = max_seq_length,
load_in_4bit = False, # False for LoRA 16bit
fast_inference = True, # Enable vLLM fast inference
max_lora_rank = lora_rank,
gpu_memory_utilization = 0.8, # Reduce if out of memory
)
"""### Helper Functions
<a name="Data"></a>
#### Helper functions - Data Prep
"""
import re
import json
reasoning_start = "<reasoning>"
reasoning_end = "</reasoning>"
solution_start = "<answer>"
solution_end = "</answer>"
def extract_hash_answer(text):
"""Extract answer from GSM8K format"""
if "####" not in text:
return None
return text.split("####")[1].strip()
def prepare_gsm8k_dataset(dataset):
"""Format GSM8K dataset for training"""
reasoning_start = "<reasoning>"
reasoning_end = "</reasoning>"
solution_start = "<answer>"
solution_end = "</answer>"
system_prompt = (
f"You are given a problem. Think about the problem and reason step by step. "
f"Place your thinking process between {reasoning_start} and {reasoning_end}. "
f"Then, provide your final numerical solution between {solution_start}{solution_end}"
)
def format_gsm8k(example):
return {
"prompt": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": example["question"]},
],
"answer": extract_hash_answer(example["answer"]),
}
return dataset.map(format_gsm8k)
def prepare_limo_dataset(dataset):
"""Format LIMO dataset for SFT training"""
if dataset is None:
return None
system_prompt = """You are a helpful reasoning assistant. When given a problem, think through it step by step and provide your answer in the following format:
<reasoning>
[Your detailed step-by-step reasoning and solution process]
</reasoning>
<answer>
[Your final numerical answer]
</answer>"""
def format_limo(example):
# Create the assistant response
assistant_response = f"<reasoning>\n{example['solution']}\n</reasoning>\n<answer>\n{example['answer']}\n</answer>"
# Return a DICTIONARY with the conversation in a field
return {
"prompt": [ # ← This is the key change - wrap in a dict
{"role": "system", "content": system_prompt},
{"role": "user", "content": example["question"]},
{"role": "assistant", "content": assistant_response},
]
}
return dataset.map(format_limo)
print("\n✅ Dataset preparation functions defined!")
"""#### Helper functions - Evaluation"""
def get_max_prompt_length(dataset, tokenizer):
"""Calculate maximum and average prompt length in dataset"""
print("Analyzing prompt lengths...")
lengths = dataset.map(
lambda x: {
"tokens": tokenizer.apply_chat_template(
x["prompt"], add_generation_prompt = True, tokenize = True
)
},
batched = True,
).map(lambda x: {"length": len(x["tokens"])})["length"]
max_length = max(lengths)
avg_length = sum(lengths) / len(lengths)
min_length = min(lengths)
print(
f"Prompt lengths - Min: {min_length}, Max: {max_length}, Avg: {avg_length:.1f}"
)
return max_length, avg_length
def extract_unsloth_answer(text, start_tag = "<SOLUTION>", end_tag = "</SOLUTION>"):
"""Extract answer from Unsloth SOLUTION tags"""
pattern = re.escape(start_tag) + r"(.*?)" + re.escape(end_tag)
matches = re.findall(pattern, text, re.DOTALL)
if matches:
answer = matches[-1] # Get the last match
answer = re.sub(r"[%$,]", "", answer).strip()
return answer
return ""
def find_number(search_string):
"""Find the last number in a string"""
numbers = re.compile(
r"-?[\d,]*\.?\d+",
re.MULTILINE | re.DOTALL | re.IGNORECASE,
).findall(search_string)
if numbers:
return numbers[-1].replace(",", "").strip()
return ""
def remove_symbols(x: str) -> str:
"""Remove commas, percent and dollar symbols"""
if not x:
return ""
return x.replace(",", "").replace("%", "").replace("$", "").strip()
def get_num_tokens(text, tokenizer_instance):
"""Count tokens in text"""
if not text:
return 0
encoding = tokenizer_instance(text, return_tensors = "pt")
return len(encoding["input_ids"][0])
def check_format_compliance(text, format_type = "unsloth"):
"""Check if response follows expected format"""
if format_type == "unsloth":
reasoning_start = "<start_reasoning>"
reasoning_end = "<end_reasoning>"
solution_start = "<SOLUTION>"
solution_end = "</SOLUTION>"
pattern = (
rf"^[\s]*{re.escape(reasoning_start)}.+?{re.escape(reasoning_end)}.*?"
rf"{re.escape(solution_start)}.+?{re.escape(solution_end)}[\s]*$"
)
else:
return False
return bool(re.match(pattern, text.strip(), re.DOTALL))
def normalize_answer(answer):
"""Normalize answer for comparison"""
if not answer:
return ""
normalized = remove_symbols(str(answer))
try:
float_val = float(normalized)
if float_val.is_integer():
return str(int(float_val))
else:
return str(float_val)
except (ValueError, TypeError):
return normalized
def evaluate_answer_correctness(extracted_answer, ground_truth):
"""Evaluate answer correctness with multiple criteria"""
if not extracted_answer or not ground_truth:
return False, False, 0.0
norm_extracted = normalize_answer(extracted_answer)
norm_ground_truth = normalize_answer(ground_truth)
if norm_extracted == norm_ground_truth:
return True, True, 1.0
try:
extracted_num = float(norm_extracted)
ground_truth_num = float(norm_ground_truth)
if ground_truth_num != 0:
relative_error = abs(extracted_num - ground_truth_num) / abs(
ground_truth_num
)
if relative_error < 0.01:
return True, True, 0.9
elif relative_error < 0.05:
return False, True, 0.7
elif relative_error < 0.10:
return False, True, 0.5
else:
if extracted_num == 0:
return True, True, 1.0
elif abs(extracted_num) < 0.01:
return False, True, 0.7
except (ValueError, TypeError):
if norm_extracted.lower() == norm_ground_truth.lower():
return True, True, 1.0
return False, False, 0.0
"""#### Reward Functions for GRPO"""
def match_format_exactly(completions, **kwargs):
"""Reward function for exact format matching"""
reasoning_start = "<reasoning>"
reasoning_end = "</reasoning>"
solution_start = "<answer>"
solution_end = "</answer>"
pattern = (
rf"^[\s]*{re.escape(reasoning_start)}.+?{re.escape(reasoning_end)}.*?"
rf"{re.escape(solution_start)}.+?{re.escape(solution_end)}[\s]*$"
)
responses = [completion[0]["content"] for completion in completions]
rewards = [
3.0 if re.match(pattern, response, re.DOTALL) else 0.0
for response in responses
]
return rewards
def match_format_approximately(completions, **kwargs):
"""Reward function for approximate format matching"""
reasoning_start = "<reasoning>"
reasoning_end = "</reasoning>"
solution_start = "<answerr>"
solution_end = "</answer>"
scores = []
for completion in completions:
score = 0
response = completion[0]["content"]
score += 0.5 if response.count(reasoning_start) == 1 else -1.0
score += 0.5 if response.count(reasoning_end) == 1 else -1.0
score += 0.5 if response.count(solution_start) == 1 else -1.0
score += 0.5 if response.count(solution_end) == 1 else -1.0
scores.append(score)
return scores
def check_answer_correctness(prompts, completions, answer, **kwargs):
"""Reward function for answer correctness"""
def extract_solution_answer(text):
pattern = r"<answer>(.*?)</answer>"
match = re.search(pattern, text, re.DOTALL)
if match:
return re.sub(r"[%$,]", "", match.group(1)).strip()
return ""
responses = [completion[0]["content"] for completion in completions]
extracted_responses = [extract_solution_answer(r) for r in responses]
scores = []
for guess, true_answer in zip(extracted_responses, answer):
score = 0
if not guess:
scores.append(0)
continue
if guess == true_answer:
score += 3.0
elif guess.strip() == true_answer.strip():
score += 1.5
else:
try:
ratio = float(guess) / float(true_answer)
if 0.9 <= ratio <= 1.1:
score += 1.0
elif 0.8 <= ratio <= 1.2:
score += 0.5
else:
score -= 1.5
except:
score -= 1.5
scores.append(score)
return scores
print("✅ Reward functions defined!")
"""#### Main Evaluation Function"""
import gc
"""#### Comparison and Memory Management"""
def compare_model_results(all_results):
"""Generate comprehensive comparison of multiple model results"""
print(f"\n{'='*80}")
print("COMPREHENSIVE MODEL COMPARISON")
print(f"{'='*80}")
# Main table
print(
f"{'Model':<15} {'Format %':<10} {'Exact %':<10} {'Plausible %':<12} {'Confidence':<12}"
)
print("-" * 80)
for result in all_results:
print(
f"{result['model_type']:<15} "
f"{result['correct_format_pct']:<10.1f} "
f"{result['exact_match_pct']:<10.1f} "
f"{result['plausible_match_pct']:<12.1f} "
f"{result['avg_confidence']:<12.3f}"
)
# Improvement analysis
if len(all_results) > 1:
print(f"\n{'='*50}")
print("IMPROVEMENT ANALYSIS")
print(f"{'='*50}")
base_result = all_results[0]
for result in all_results[1:]:
print(f"\n{result['model_type']} vs {base_result['model_type']}:")
format_improvement = (
result["correct_format_pct"] - base_result["correct_format_pct"]
)
exact_improvement = (
result["exact_match_pct"] - base_result["exact_match_pct"]
)
plausible_improvement = (
result["plausible_match_pct"] - base_result["plausible_match_pct"]
)
print(f" Format compliance: {format_improvement:+.1f}%")
print(f" Exact matches: {exact_improvement:+.1f}%")
print(f" Plausible matches: {plausible_improvement:+.1f}%")
# Save comparison
comparison_data = {
"summary": all_results,
"best_model": max(all_results, key = lambda x: x["exact_match_pct"]),
}
with open("model_comparison_comprehensive.json", "w") as f:
json.dump(comparison_data, f, indent = 4)
print(
f"\nBest performing model: {comparison_data['best_model']['model_type']} "
f"({comparison_data['best_model']['exact_match_pct']:.1f}% exact matches)"
)
def cleanup_memory():
"""Comprehensive memory cleanup"""
print("🧹 Cleaning up GPU memory...")
for _ in range(10):
torch.cuda.empty_cache()
gc.collect()
if torch.cuda.is_available():
allocated = torch.cuda.memory_allocated() / 1024**3
reserved = torch.cuda.memory_reserved() / 1024**3
print(
f"GPU memory - Allocated: {allocated:.2f} GB, Reserved: {reserved:.2f} GB"
)
"""#### Data Loading and Preparation"""
from datasets import load_dataset
# Load GSM8K
gsm8k_dataset = load_dataset("openai/gsm8k", "main", split = "train")
# Load LIMO (adjust this based on your access method)
limo_train = load_dataset("GAIR/LIMO", split = "train")
# Prepare datasets
gsm8k_train = prepare_gsm8k_dataset(gsm8k_dataset)
limo_train = prepare_limo_dataset(limo_train)
print(f" GSM8K train: {len(gsm8k_train)}")
print(f" LIMO train: {len(limo_train) if limo_train else 0}")
# Store results
all_results = []
# Single temperature evaluation on combined dataset
results = evaluate_model_aime(
model = model,
tokenizer = tokenizer,
model_type = "base",
temperature = 0.3,
n_sampling = 8,
max_tokens = 32768,
top_p = 0.95,
seed = 0,
)
from unsloth.chat_templates import get_chat_template
tokenizer = get_chat_template(
tokenizer,
chat_template = "llama-3.1",
)
def formatting_prompts_func(examples):
convos = examples["prompt"]
texts = [
tokenizer.apply_chat_template(
convo, tokenize = False, add_generation_prompt = False
)
for convo in convos
]
return {
"text": texts,
}
limo_train = limo_train.map(
formatting_prompts_func,
batched = True,
)
from trl import SFTTrainer
from transformers import DataCollatorForSeq2Seq, TrainingArguments
from unsloth import is_bfloat16_supported
print(f"\n{'*'*60}")
print("🎯 STAGE 1: Qlora Fine-Tuning on LIMO")
print(f"{'*'*60}")
model = FastLanguageModel.get_peft_model(
model,
r = lora_rank, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
target_modules = [
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj",
], # Remove QKVO if out of memory
lora_alpha = lora_rank,
use_gradient_checkpointing = "unsloth", # Enable long context finetuning
random_state = 3407,
)
if limo_train is not None:
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = limo_train,
dataset_text_field = "text",
max_seq_length = max_seq_length,
data_collator = DataCollatorForSeq2Seq(tokenizer = tokenizer),
dataset_num_proc = 2,
packing = False, # Can make training 5x faster for short sequences.
args = TrainingArguments(
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
warmup_steps = 5,
num_train_epochs = 1, # Set this for 1 full training run.
# max_steps = 60,
learning_rate = 2e-4,
fp16 = not is_bfloat16_supported(),
bf16 = is_bfloat16_supported(),
logging_steps = 1,
optim = "adamw_8bit",
weight_decay = 0.01,
lr_scheduler_type = "linear",
seed = 3407,
output_dir = "outputs",
report_to = "none", # Use this for WandB etc
),
)
from unsloth.chat_templates import train_on_responses_only
trainer = train_on_responses_only(
trainer,
instruction_part = "<|start_header_id|>user<|end_header_id|>\n\n",
response_part = "<|start_header_id|>assistant<|end_header_id|>\n\n",
)
# Train
print(f"🚂 Starting SFT training on {len(limo_train)} examples...")
trainer.train()
# Save checkpoint
model.save_pretrained("qlora_checkpoint")
tokenizer.save_pretrained("qlora_checkpoint")
print("💾 Qlora checkpoint saved!")
# Cleanup
del trainer
cleanup_memory()
print("✅ Qlora training completed!")
else:
print("⚠️ Skipping Qlora training - no LIMO dataset available")
# Cleanup
cleanup_memory()
global PRINTED_TIMES
PRINTED_TIMES = 0
global PRINT_EVERY_STEPS
PRINT_EVERY_STEPS = 5
match_numbers = re.compile(
solution_start + r".*?([\d\.\,]{1,})", flags = re.MULTILINE | re.DOTALL
)
def check_numbers(prompts, completions, answer, **kwargs):
question = prompts[0][-1]["content"]
responses = [completion[0]["content"] for completion in completions]
extracted_responses = [
guess.group(1) if (guess := match_numbers.search(r)) is not None else None
for r in responses
]
scores = []
# Print only every few steps
global PRINTED_TIMES
global PRINT_EVERY_STEPS
if PRINTED_TIMES % PRINT_EVERY_STEPS == 0:
print(
"*" * 20,
f"Question:\n{question}",
f"\nAnswer:\n{answer[0]}",
f"\nResponse:\n{responses[0]}",
f"\nExtracted:\n{extracted_responses[0]}",
)
PRINTED_TIMES += 1
for guess, true_answer in zip(extracted_responses, answer):
if guess is None:
scores.append(0)
continue
# Convert to numbers
try:
true_answer = float(true_answer.strip())
# Remove commas like in 123,456
guess = float(guess.strip().replace(",", ""))
scores.append(1.5 if guess == true_answer else -0.5)
except:
scores.append(0)
continue
return scores
print(f"\n{'*'*60}")
print("🎯 STAGE 2: GRPO Fine-Tuning on GSM8K")
print(f"{'*'*60}")
# Get max prompt length
max_prompt_length, _ = get_max_prompt_length(gsm8k_train, tokenizer)
max_prompt_length = min(max_prompt_length + 10, 512) # Add buffer, cap at 512
print(f"Using max_prompt_length: {max_prompt_length}")
from trl import GRPOConfig, GRPOTrainer
training_args = GRPOConfig(
learning_rate = 5e-6,
weight_decay = 0.1,
warmup_ratio = 0.1,
lr_scheduler_type = "cosine",
optim = "adamw_torch_fused",
logging_steps = 1,
per_device_train_batch_size = 1,
gradient_accumulation_steps = 4, # Increase to 4 for smoother training
num_generations = 8, # Decrease if out of memory
max_prompt_length = max_prompt_length,
max_completion_length = max_seq_length - max_prompt_length,
# num_train_epochs = 1, # Set to 1 for a full training run
# max_steps = 250,
max_steps = 1000,
save_steps = 250,
max_grad_norm = 0.1,
report_to = "none", # Can use Weights & Biases
output_dir = "outputs",
)
trainer = GRPOTrainer(
model = model,
processing_class = tokenizer,
reward_funcs = [
match_format_exactly,
match_format_approximately,
check_answer_correctness,
check_numbers,
],
args = training_args,
train_dataset = gsm8k_train,
)
# Train
print(f"🚂 Starting GRPO training on {len(gsm8k_train)} examples...")
trainer.train()
# Save checkpoint
model.save_pretrained("grpo_checkpoint")
tokenizer.save_pretrained("grpo_checkpoint")
print("💾 GRPO checkpoint saved!")
# Cleanup
del trainer
del training_args
cleanup_memory()
print("✅ GRPO training completed!")
print(f"\n{'='*60}")
print("🔍 EVALUATION 3: Final GRPO Model")
print(f"{'='*60}")
grpo_results = evaluate_model_aime(
model = model,
tokenizer = tokenizer,
model_type = "grpo",
temperature = 0.3,
n_sampling = 8,
max_tokens = 32768,
top_p = 0.95,
seed = 0,
)
all_results.append(grpo_results)
print("✅ Final model evaluation complete!")
print(f"\n{'='*60}")
print("💾 SAVING FINAL MODEL")
print(f"{'='*60}")
# Save as merged model
try:
model.save_pretrained_merged(
"final_merged_model", tokenizer, save_method = "merged_16bit"
)
print("✅ Merged model saved to: final_merged_model/")
except Exception as e:
print(f"⚠️ Could not save merged model: {e}")
print("Final model saved as LoRA adapter only")
print("💾 Model saving complete!")
safe_remove_directory("./unsloth_compiled_cache")
result_queue.put(results)
# Clean up
del model
del tokenizer
torch.cuda.empty_cache()
gc.collect()
# # Merged model load 16 bits model AIME eval
# result_queue = mp.Queue()
# p = mp.Process(target=evaluate_merged_model, args=(result_queue, False, False))
# p.start()
# p.join()
#
# merged_16bits = result_queue.get()
# all_results.append(merged_16bits)
#
# # Clean up
# del merged_model
# del merged_tokenizer
# del dataset_ppl
# torch.cuda.empty_cache()
# gc.collect()
#
# safe_remove_directory("./unsloth_compiled_cache")
#
# # Merged model load 8 bits model AIME eval
#
# result_queue = mp.Queue()
# p = mp.Process(target=evaluate_merged_model, args=(result_queue, False, True))
# p.start()
# p.join()
#
# merged_16bits = result_queue.get()
# all_results.append(merged_16bits)
# Merged model load 4 bits AIME eval
# result_queue = mp.Queue()
# p = mp.Process(target=evaluate_merged_model, args=(result_queue, True, False))
# p.start()
# p.join()
#
# merged_16bits = result_queue.get()
# all_results.append(merged_16bits)
if __name__ == "__main__":
mp.set_start_method("spawn", force = True)
result_queue = mp.Queue()
all_results = []
# run main finetuning and grpo loop
p = mp.Process(target = training_run, args = (result_queue,))
p.start()
p.join()
results = result_queue.get()
all_results = results
# evaluate merged model loaded 16bits
p = mp.Process(target = evaluate_merged_model, args = (result_queue, False, False))
p.start()
p.join()
merged_load_16bits = result_queue.get()
all_results.append(merged_load_16bits)
safe_remove_directory("./unsloth_compiled_cache")
# Merged model load 8 bits model AIME eval
p = mp.Process(target = evaluate_merged_model, args = (result_queue, False, True))
p.start()
p.join()
merged_load_8bits = result_queue.get()
all_results.append(merged_load_8bits)
safe_remove_directory("./unsloth_compiled_cache")
# Merged model load 4 bits model AIME eval
p = mp.Process(target = evaluate_merged_model, args = (result_queue, True, False))
p.start()
p.join()
merged_load_4bits = result_queue.get()
all_results.append(merged_load_4bits)
safe_remove_directory("./unsloth_compiled_cache")
# AIME-specific comparison function
print(f"\n{'='*80}")
print("🏆 FINAL TRAINING PIPELINE RESULTS")
print(f"{'='*80}")
# Use the AIME-specific comparison
compare_aime_results(all_results)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/language_models/test_push_to_hub_merged.py | tests/saving/language_models/test_push_to_hub_merged.py | from unsloth import FastLanguageModel, FastVisionModel, UnslothVisionDataCollator
from unsloth.chat_templates import get_chat_template
from trl import SFTTrainer, SFTConfig
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForSeq2Seq,
TrainingArguments,
)
from datasets import load_dataset, Dataset
import torch
from tqdm import tqdm
import pandas as pd
import multiprocessing as mp
from multiprocessing import Process, Queue
import gc
import os
from huggingface_hub import HfFileSystem, hf_hub_download
# ruff: noqa
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
from tests.utils.perplexity_eval import (
ppl_model,
add_to_comparison,
print_model_comparison,
)
# Define helper functions outside of main
def formatting_prompts_func(examples):
convos = examples["messages"]
texts = [
tokenizer.apply_chat_template(
convo, tokenize = False, add_generation_prompt = False
)
for convo in convos
]
return {"text": texts}
if torch.cuda.is_bf16_supported():
compute_dtype = torch.bfloat16
attn_implementation = "flash_attention_2"
else:
compute_dtype = torch.float16
attn_implementation = "sdpa"
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/Llama-3.2-1B-Instruct",
max_seq_length = 2048,
dtype = compute_dtype,
load_in_4bit = True,
load_in_8bit = False,
full_finetuning = False,
attn_implementation = attn_implementation,
)
tokenizer = get_chat_template(
tokenizer,
chat_template = "llama-3.1",
)
from unsloth.chat_templates import standardize_sharegpt
dataset_train = load_dataset("allenai/openassistant-guanaco-reformatted", split = "train")
dataset_ppl = load_dataset("allenai/openassistant-guanaco-reformatted", split = "eval")
dataset_train = dataset_train.map(formatting_prompts_func, batched = True)
dataset_ppl = dataset_ppl.map(formatting_prompts_func, batched = True)
add_to_comparison("Base model 4 bits", ppl_model(model, tokenizer, dataset_ppl))
model = FastLanguageModel.get_peft_model(
model,
r = 16,
target_modules = [
"k_proj",
"q_proj",
"v_proj",
"o_proj",
"gate_proj",
"down_proj",
"up_proj",
],
lora_alpha = 16,
lora_dropout = 0,
bias = "none",
use_gradient_checkpointing = "unsloth",
random_state = 3407,
use_rslora = False,
loftq_config = None,
)
from unsloth import is_bfloat16_supported
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = dataset_train,
dataset_text_field = "text",
max_seq_length = 2048,
data_collator = DataCollatorForSeq2Seq(tokenizer = tokenizer),
dataset_num_proc = 2,
packing = False,
args = TrainingArguments(
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
warmup_ratio = 0.1,
max_steps = 30,
learning_rate = 2e-4,
fp16 = not is_bfloat16_supported(),
bf16 = is_bfloat16_supported(),
logging_steps = 50,
optim = "adamw_8bit",
lr_scheduler_type = "linear",
seed = 3407,
output_dir = "outputs",
report_to = "none",
),
)
from unsloth.chat_templates import train_on_responses_only
trainer = train_on_responses_only(
trainer,
instruction_part = "<|start_header_id|>user<|end_header_id|>\n\n",
response_part = "<|start_header_id|>assistant<|end_header_id|>\n\n",
)
# run training
trainer_stats = trainer.train()
# saving and merging the model to local disk
hf_username = os.environ.get("HF_USER", "")
if not hf_username:
hf_username = input("Please enter your Hugging Face username: ").strip()
os.environ["HF_USER"] = hf_username
hf_token = os.environ.get("HF_TOKEN", "")
if not hf_token:
hf_token = input("Please enter your Hugging Face token: ").strip()
os.environ["HF_TOKEN"] = hf_token
repo_name = f"{hf_username}/merged_llama_text_model"
success = {
"upload": False,
"download": False,
}
# Stage 1: Upload model to Hub
try:
print("\n" + "=" * 80)
print("=== UPLOADING MODEL TO HUB ===".center(80))
print("=" * 80 + "\n")
model.push_to_hub_merged(repo_name, tokenizer = tokenizer, token = hf_token)
success["upload"] = True
print("✅ Model uploaded successfully!")
except Exception as e:
print(f"❌ Failed to upload model: {e}")
raise Exception("Model upload failed.")
t
# Stage 2: Test downloading the model (even if cached)
safe_remove_directory(f"./{hf_username}")
try:
print("\n" + "=" * 80)
print("=== TESTING MODEL DOWNLOAD ===".center(80))
print("=" * 80 + "\n")
# Force download even if cached
model, tokenizer = FastLanguageModel.from_pretrained(
f"{hf_username}/merged_llama_text_model"
)
success["download"] = True
print("✅ Model downloaded successfully!")
except Exception as e:
print(f"❌ Download failed: {e}")
raise Exception("Model download failed.")
# Final report
print("\n" + "=" * 80)
print("=== VALIDATION REPORT ===".center(80))
print("=" * 80 + "\n")
for stage, passed in success.items():
status = "✓" if passed else "✗"
print(f"{status} {stage.replace('_', ' ').title()}")
print("\n" + "=" * 80)
if all(success.values()):
print("\n🎉 All stages completed successfully!")
else:
raise Exception("Validation failed for one or more stages.")
# final cleanup
safe_remove_directory("./outputs")
safe_remove_directory("./unsloth_compiled_cache")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/language_models/test_merge_4bit_validation.py | tests/saving/language_models/test_merge_4bit_validation.py | from unsloth import FastLanguageModel
from unsloth.chat_templates import get_chat_template
from trl import SFTTrainer, SFTConfig
from transformers import DataCollatorForSeq2Seq, TrainingArguments
from datasets import load_dataset
import torch
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
def formatting_prompts_func(examples):
convos = examples["messages"]
texts = [
tokenizer.apply_chat_template(
convo, tokenize = False, add_generation_prompt = False
)
for convo in convos
]
return {"text": texts}
print(f"\n{'='*80}")
print("🔍 PHASE 1: Loading Base Model and Initial Training")
print(f"{'='*80}")
if torch.cuda.is_bf16_supported():
compute_dtype = torch.bfloat16
attn_implementation = "flash_attention_2"
else:
compute_dtype = torch.float16
attn_implementation = "sdpa"
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/Llama-3.1-8B-Instruct",
max_seq_length = 2048,
dtype = compute_dtype,
load_in_4bit = True,
load_in_8bit = False,
full_finetuning = False,
attn_implementation = attn_implementation,
)
tokenizer = get_chat_template(
tokenizer,
chat_template = "llama-3.1",
)
# Load small dataset for quick training
dataset_train = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "train[:100]"
)
dataset_train = dataset_train.map(formatting_prompts_func, batched = True)
print("✅ Base model loaded successfully!")
print(f"\n{'='*80}")
print("🔍 PHASE 2: First Fine-tuning")
print(f"{'='*80}")
model = FastLanguageModel.get_peft_model(
model,
r = 16,
target_modules = [
"k_proj",
"q_proj",
"v_proj",
"o_proj",
"gate_proj",
"down_proj",
"up_proj",
],
lora_alpha = 16,
lora_dropout = 0,
bias = "none",
use_gradient_checkpointing = "unsloth",
random_state = 3407,
use_rslora = False,
loftq_config = None,
)
from unsloth import is_bfloat16_supported
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = dataset_train,
dataset_text_field = "text",
max_seq_length = 2048,
data_collator = DataCollatorForSeq2Seq(tokenizer = tokenizer),
dataset_num_proc = 2,
packing = False,
args = TrainingArguments(
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
warmup_ratio = 0.1,
max_steps = 10, # Very short training for test
learning_rate = 2e-4,
fp16 = not is_bfloat16_supported(),
bf16 = is_bfloat16_supported(),
logging_steps = 5,
optim = "adamw_8bit",
lr_scheduler_type = "linear",
seed = 3407,
output_dir = "outputs",
report_to = "none",
),
)
trainer_stats = trainer.train()
print("✅ First fine-tuning completed!")
print(f"\n{'='*80}")
print("🔍 PHASE 3: Save with Forced 4bit Merge")
print(f"{'='*80}")
model.save_pretrained_merged(
save_directory = "./test_4bit_model",
tokenizer = tokenizer,
save_method = "forced_merged_4bit",
)
print("✅ Model saved with forced 4bit merge!")
print(f"\n{'='*80}")
print("🔍 PHASE 4: Loading 4bit Model and Second Fine-tuning")
print(f"{'='*80}")
# Clean up first model
del model
del tokenizer
torch.cuda.empty_cache()
# Load the 4bit merged model
model_4bit, tokenizer_4bit = FastLanguageModel.from_pretrained(
model_name = "./test_4bit_model",
max_seq_length = 2048,
load_in_4bit = True,
load_in_8bit = False,
)
tokenizer_4bit = get_chat_template(
tokenizer_4bit,
chat_template = "llama-3.1",
)
print("✅ 4bit model loaded successfully!")
# Add LoRA adapters to the 4bit model
model_4bit = FastLanguageModel.get_peft_model(
model_4bit,
r = 16,
target_modules = [
"k_proj",
"q_proj",
"v_proj",
"o_proj",
"gate_proj",
"down_proj",
"up_proj",
],
lora_alpha = 16,
lora_dropout = 0,
bias = "none",
use_gradient_checkpointing = "unsloth",
random_state = 3407,
use_rslora = False,
loftq_config = None,
)
# Second fine-tuning
trainer_4bit = SFTTrainer(
model = model_4bit,
tokenizer = tokenizer_4bit,
train_dataset = dataset_train,
dataset_text_field = "text",
max_seq_length = 2048,
data_collator = DataCollatorForSeq2Seq(tokenizer = tokenizer_4bit),
dataset_num_proc = 2,
packing = False,
args = TrainingArguments(
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
warmup_ratio = 0.1,
max_steps = 10, # Very short training for test
learning_rate = 2e-4,
fp16 = not is_bfloat16_supported(),
bf16 = is_bfloat16_supported(),
logging_steps = 5,
optim = "adamw_8bit",
lr_scheduler_type = "linear",
seed = 3407,
output_dir = "outputs_4bit",
report_to = "none",
),
)
trainer_4bit.train()
print("✅ Second fine-tuning on 4bit model completed!")
print(f"\n{'='*80}")
print("🔍 PHASE 5: Testing TypeError on Regular Merge (Should Fail)")
print(f"{'='*80}")
try:
model_4bit.save_pretrained_merged(
save_directory = "./test_should_fail",
tokenizer = tokenizer_4bit,
# No save_method specified, should default to regular merge
)
assert False, "Expected TypeError but merge succeeded!"
except TypeError as e:
expected_error = "Base model should be a 16bits or mxfp4 base model for a 16bit model merge. Use `save_method=forced_merged_4bit` instead"
assert expected_error in str(e), f"Unexpected error message: {str(e)}"
print("✅ Correct TypeError raised for 4bit base model regular merge attempt!")
print(f"Error message: {str(e)}")
print(f"\n{'='*80}")
print("🔍 PHASE 6: Successful Save with Forced 4bit Method")
print(f"{'='*80}")
try:
model_4bit.save_pretrained_merged(
save_directory = "./test_4bit_second",
tokenizer = tokenizer_4bit,
save_method = "forced_merged_4bit",
)
print("✅ Successfully saved 4bit model with forced 4bit method!")
except Exception as e:
assert False, f"Phase 6 failed unexpectedly: {e}"
print(f"\n{'='*80}")
print("🔍 CLEANUP")
print(f"{'='*80}")
# Cleanup
safe_remove_directory("./outputs")
safe_remove_directory("./outputs_4bit")
safe_remove_directory("./unsloth_compiled_cache")
safe_remove_directory("./test_4bit_model")
safe_remove_directory("./test_4bit_second")
safe_remove_directory("./test_should_fail")
print("✅ All tests passed successfully!")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/language_models/test_merged_model_perplexity_qwen_2.5.py | tests/saving/language_models/test_merged_model_perplexity_qwen_2.5.py | from unsloth import FastLanguageModel, FastVisionModel, UnslothVisionDataCollator
from unsloth.chat_templates import get_chat_template
from trl import SFTTrainer, SFTConfig
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForSeq2Seq,
TrainingArguments,
)
from datasets import load_dataset, Dataset
import torch
from tqdm import tqdm
import pandas as pd
import multiprocessing as mp
from multiprocessing import Process, Queue
import gc
# ruff: noqa
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
from tests.utils.perplexity_eval import (
ppl_model,
add_to_comparison,
print_model_comparison,
)
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{}
### Input:
{}
### Response:
{}"""
# Define helper functions outside of main
def formatting_prompts_func(examples):
instructions = []
inputs = []
outputs = []
texts = []
for conversation in examples["messages"]:
# Extract user message and assistant response
user_message = ""
assistant_message = ""
for turn in conversation:
if turn["role"] == "user":
user_message = turn["content"]
elif turn["role"] == "assistant":
assistant_message = turn["content"]
# Store intermediate format
instruction = "Complete the statement"
instructions.append(instruction)
inputs.append(user_message)
outputs.append(assistant_message)
# Create formatted text
text = alpaca_prompt.format(instruction, user_message, assistant_message)
texts.append(text)
return {
"instruction": instructions,
"input": inputs,
"output": outputs,
"text": texts,
}
def load_and_compute_8bit_ppl(result_queue, load_in_4bit = False, load_in_8bit = False):
"""Load model and compute perplexity in subprocess"""
from unsloth import FastLanguageModel
from tests.utils.perplexity_eval import ppl_model
# Load model
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./unsloth_out/merged_qwen_text_model",
max_seq_length = 2048,
load_in_4bit = load_in_4bit,
load_in_8bit = load_in_8bit,
)
# Set up tokenizer
# merged_tokenizer = get_chat_template(
# merged_tokenizer,
# chat_template="llama-3.1",
# )
# Load dataset fresh in subprocess
dataset_ppl = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "eval"
)
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{}
### Input:
{}
### Response:
{}"""
def formatting_prompts_func(examples):
instructions = []
inputs = []
outputs = []
texts = []
for conversation in examples["messages"]:
# Extract user message and assistant response
user_message = ""
assistant_message = ""
for turn in conversation:
if turn["role"] == "user":
user_message = turn["content"]
elif turn["role"] == "assistant":
assistant_message = turn["content"]
# Store intermediate format
instruction = "Complete the statement"
instructions.append(instruction)
inputs.append(user_message)
outputs.append(assistant_message)
# Create formatted text
text = alpaca_prompt.format(instruction, user_message, assistant_message)
texts.append(text)
return {
"instruction": instructions,
"input": inputs,
"output": outputs,
"text": texts,
}
dataset_ppl = dataset_ppl.map(formatting_prompts_func, batched = True)
# Compute perplexity using the passed dataset
ppl_value = ppl_model(merged_model, merged_tokenizer, dataset_ppl)
# IMPORTANT: Convert to Python float if it's a tensor
if torch.is_tensor(ppl_value):
ppl_value = ppl_value.cpu().item() # Move to CPU and convert to Python scalar
elif hasattr(ppl_value, "item"):
ppl_value = ppl_value.item() # Convert numpy or other array types
else:
ppl_value = float(ppl_value) # Ensure it's a float
# Return only the perplexity value
result_queue.put(ppl_value)
# Clean up
# del merged_model
# del merged_tokenizer
# del dataset_ppl
# torch.cuda.empty_cache()
# gc.collect()
# Main execution code should be wrapped in this guard
if __name__ == "__main__":
mp.set_start_method("spawn", force = True)
if torch.cuda.is_bf16_supported():
compute_dtype = torch.bfloat16
attn_implementation = "flash_attention_2"
else:
compute_dtype = torch.float16
attn_implementation = "sdpa"
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/Qwen2.5-7B-Instruct",
max_seq_length = 2048,
dtype = compute_dtype,
load_in_4bit = True,
load_in_8bit = False,
full_finetuning = False,
attn_implementation = attn_implementation,
)
dataset_train = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "train"
)
dataset_ppl = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "eval"
)
dataset_train = dataset_train.map(formatting_prompts_func, batched = True)
dataset_ppl = dataset_ppl.map(formatting_prompts_func, batched = True)
add_to_comparison("Base model 4 bits", ppl_model(model, tokenizer, dataset_ppl))
model = FastLanguageModel.get_peft_model(
model,
r = 16,
target_modules = [
"k_proj",
"q_proj",
"v_proj",
"o_proj",
"gate_proj",
"down_proj",
"up_proj",
],
lora_alpha = 16,
lora_dropout = 0,
bias = "none",
use_gradient_checkpointing = "unsloth",
random_state = 3407,
use_rslora = False,
loftq_config = None,
)
from unsloth import is_bfloat16_supported
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = dataset_train,
dataset_text_field = "text",
max_seq_length = 2048,
data_collator = DataCollatorForSeq2Seq(tokenizer = tokenizer),
dataset_num_proc = 2,
packing = False,
args = TrainingArguments(
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
warmup_ratio = 0.1,
max_steps = 200,
learning_rate = 2e-4,
fp16 = not is_bfloat16_supported(),
bf16 = is_bfloat16_supported(),
logging_steps = 50,
optim = "adamw_8bit",
lr_scheduler_type = "linear",
seed = 3407,
output_dir = "outputs",
report_to = "none",
),
)
# run training
trainer_stats = trainer.train()
add_to_comparison("Qlora model", ppl_model(model, tokenizer, dataset_ppl))
# saving and merging the model to local disk
print("merge and save to local disk")
model.save_pretrained_merged(
save_directory = "./unsloth_out/merged_qwen_text_model", tokenizer = tokenizer
)
# print("cleaning")
# del model
# del tokenizer
# torch.cuda.empty_cache()
# gc.collect()
# load model from local disk and test
print("Loading merged model in 4 bit for perplexity test")
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./unsloth_out/merged_qwen_text_model",
max_seq_length = 2048,
load_in_4bit = True,
load_in_8bit = False,
)
add_to_comparison(
"merged model load 4bit", ppl_model(merged_model, merged_tokenizer, dataset_ppl)
)
print("Computing 8-bit model perplexity in subprocess...")
result_queue = mp.Queue()
p = mp.Process(target = load_and_compute_8bit_ppl, args = (result_queue, False, True))
p.start()
p.join()
ppl_8bit = result_queue.get()
add_to_comparison("merged model loaded 8bits", ppl_8bit)
print("Loading merged model in 16 bit for perplexity test")
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./unsloth_out/merged_qwen_text_model",
max_seq_length = 2048,
load_in_4bit = False,
load_in_8bit = False,
)
add_to_comparison(
"merged model loaded 16bits",
ppl_model(merged_model, merged_tokenizer, dataset_ppl),
)
print_model_comparison()
safe_remove_directory("./outputs")
safe_remove_directory("./unsloth_compiled_cache")
safe_remove_directory("./unsloth_out")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/language_models/test_merged_model_perplexity_llama-3.1-8b.py | tests/saving/language_models/test_merged_model_perplexity_llama-3.1-8b.py | from unsloth import FastLanguageModel, FastVisionModel, UnslothVisionDataCollator
from unsloth.chat_templates import get_chat_template
from trl import SFTTrainer, SFTConfig
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForSeq2Seq,
TrainingArguments,
)
from datasets import load_dataset, Dataset
import torch
from tqdm import tqdm
import pandas as pd
import multiprocessing as mp
from multiprocessing import Process, Queue
import gc
# ruff: noqa
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
from tests.utils.perplexity_eval import (
ppl_model,
add_to_comparison,
print_model_comparison,
)
# Define helper functions outside of main
def formatting_prompts_func(examples):
convos = examples["messages"]
texts = [
tokenizer.apply_chat_template(
convo, tokenize = False, add_generation_prompt = False
)
for convo in convos
]
return {"text": texts}
def load_and_compute_8bit_ppl(result_queue, load_in_4bit = False, load_in_8bit = False):
"""Load model and compute perplexity in subprocess"""
from unsloth import FastLanguageModel
from unsloth.chat_templates import get_chat_template
from tests.utils.perplexity_eval import ppl_model
# Load model
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./unsloth_out/merged_llama_text_model",
max_seq_length = 2048,
load_in_4bit = load_in_4bit,
load_in_8bit = load_in_8bit,
)
# Set up tokenizer
merged_tokenizer = get_chat_template(
merged_tokenizer,
chat_template = "llama-3.1",
)
# Load dataset fresh in subprocess
dataset_ppl = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "eval"
)
# Format the dataset
def formatting_prompts_func(examples):
convos = examples["messages"]
texts = [
merged_tokenizer.apply_chat_template(
convo, tokenize = False, add_generation_prompt = False
)
for convo in convos
]
return {"text": texts}
dataset_ppl = dataset_ppl.map(formatting_prompts_func, batched = True)
# Compute perplexity using the passed dataset
ppl_value = ppl_model(merged_model, merged_tokenizer, dataset_ppl)
# IMPORTANT: Convert to Python float if it's a tensor
if torch.is_tensor(ppl_value):
ppl_value = ppl_value.cpu().item() # Move to CPU and convert to Python scalar
elif hasattr(ppl_value, "item"):
ppl_value = ppl_value.item() # Convert numpy or other array types
else:
ppl_value = float(ppl_value) # Ensure it's a float
# Return only the perplexity value
result_queue.put(ppl_value)
# Clean up
del merged_model
del merged_tokenizer
del dataset_ppl
torch.cuda.empty_cache()
gc.collect()
# Main execution code should be wrapped in this guard
if __name__ == "__main__":
mp.set_start_method("spawn", force = True)
if torch.cuda.is_bf16_supported():
compute_dtype = torch.bfloat16
attn_implementation = "flash_attention_2"
else:
compute_dtype = torch.float16
attn_implementation = "sdpa"
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/Llama-3.1-8B-Instruct",
max_seq_length = 2048,
dtype = compute_dtype,
load_in_4bit = True,
load_in_8bit = False,
full_finetuning = False,
attn_implementation = attn_implementation,
)
tokenizer = get_chat_template(
tokenizer,
chat_template = "llama-3.1",
)
from unsloth.chat_templates import standardize_sharegpt
dataset_train = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "train"
)
dataset_ppl = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "eval"
)
dataset_train = dataset_train.map(formatting_prompts_func, batched = True)
dataset_ppl = dataset_ppl.map(formatting_prompts_func, batched = True)
print("\n dataset sample [0]")
print(dataset_train[0])
add_to_comparison("Base model 4 bits", ppl_model(model, tokenizer, dataset_ppl))
model = FastLanguageModel.get_peft_model(
model,
r = 16,
target_modules = [
"k_proj",
"q_proj",
"v_proj",
"o_proj",
"gate_proj",
"down_proj",
"up_proj",
],
lora_alpha = 16,
lora_dropout = 0,
bias = "none",
use_gradient_checkpointing = "unsloth",
random_state = 3407,
use_rslora = False,
loftq_config = None,
)
from unsloth import is_bfloat16_supported
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = dataset_train,
dataset_text_field = "text",
max_seq_length = 2048,
data_collator = DataCollatorForSeq2Seq(tokenizer = tokenizer),
dataset_num_proc = 2,
packing = False,
args = TrainingArguments(
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
warmup_ratio = 0.1,
max_steps = 200,
learning_rate = 2e-4,
fp16 = not is_bfloat16_supported(),
bf16 = is_bfloat16_supported(),
logging_steps = 50,
optim = "adamw_8bit",
lr_scheduler_type = "linear",
seed = 3407,
output_dir = "outputs",
report_to = "none",
),
)
from unsloth.chat_templates import train_on_responses_only
trainer = train_on_responses_only(
trainer,
instruction_part = "<|start_header_id|>user<|end_header_id|>\n\n",
response_part = "<|start_header_id|>assistant<|end_header_id|>\n\n",
)
tokenizer.decode(trainer.train_dataset[0]["input_ids"])
# run training
trainer_stats = trainer.train()
add_to_comparison("Qlora model", ppl_model(model, tokenizer, dataset_ppl))
# saving and merging the model to local disk
print("merge and save to local disk")
model.save_pretrained_merged(
save_directory = "./unsloth_out/merged_llama_text_model", tokenizer = tokenizer
)
# print("cleaning")
# del model
# del tokenizer
# torch.cuda.empty_cache()
# gc.collect()
# load model from local disk and test
print("Loading merged model in 4 bit for perplexity test")
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./unsloth_out/merged_llama_text_model",
max_seq_length = 2048,
load_in_4bit = True,
load_in_8bit = False,
)
add_to_comparison(
"merged model load 4bit", ppl_model(merged_model, merged_tokenizer, dataset_ppl)
)
print("Computing 8-bit model perplexity in subprocess...")
result_queue = mp.Queue()
p = mp.Process(target = load_and_compute_8bit_ppl, args = (result_queue, False, True))
p.start()
p.join()
ppl_8bit = result_queue.get()
add_to_comparison("merged model loaded 8bits", ppl_8bit)
print("Loading merged model in 16 bit for perplexity test")
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./unsloth_out/merged_llama_text_model",
max_seq_length = 2048,
load_in_4bit = False,
load_in_8bit = False,
)
add_to_comparison(
"merged model loaded 16bits",
ppl_model(merged_model, merged_tokenizer, dataset_ppl),
)
print_model_comparison()
# final cleanup
safe_remove_directory("./outputs")
safe_remove_directory("./unsloth_compiled_cache")
safe_remove_directory("./unsloth_out")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/language_models/test_merge_model_perplexity_llama-3.2.py | tests/saving/language_models/test_merge_model_perplexity_llama-3.2.py | from unsloth import FastLanguageModel, FastVisionModel, UnslothVisionDataCollator
from unsloth.chat_templates import get_chat_template
from trl import SFTTrainer, SFTConfig
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForSeq2Seq,
TrainingArguments,
)
from datasets import load_dataset, Dataset
import torch
from tqdm import tqdm
import pandas as pd
import multiprocessing as mp
from multiprocessing import Process, Queue
import gc
# ruff: noqa
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
from tests.utils.perplexity_eval import (
ppl_model,
add_to_comparison,
print_model_comparison,
)
# Define helper functions outside of main
def formatting_prompts_func(examples):
convos = examples["messages"]
texts = [
tokenizer.apply_chat_template(
convo, tokenize = False, add_generation_prompt = False
)
for convo in convos
]
return {"text": texts}
def load_and_compute_8bit_ppl(result_queue, load_in_4bit = False, load_in_8bit = False):
"""Load model and compute perplexity in subprocess"""
from unsloth import FastLanguageModel
from unsloth.chat_templates import get_chat_template
from tests.utils.perplexity_eval import ppl_model
# Load model
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./unsloth_out/merged_llama_text_model",
max_seq_length = 2048,
load_in_4bit = load_in_4bit,
load_in_8bit = load_in_8bit,
)
# Set up tokenizer
merged_tokenizer = get_chat_template(
merged_tokenizer,
chat_template = "llama-3.1",
)
# Load dataset fresh in subprocess
dataset_ppl = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "eval"
)
# Format the dataset
def formatting_prompts_func(examples):
convos = examples["messages"]
texts = [
merged_tokenizer.apply_chat_template(
convo, tokenize = False, add_generation_prompt = False
)
for convo in convos
]
return {"text": texts}
dataset_ppl = dataset_ppl.map(formatting_prompts_func, batched = True)
# Compute perplexity using the passed dataset
ppl_value = ppl_model(merged_model, merged_tokenizer, dataset_ppl)
# IMPORTANT: Convert to Python float if it's a tensor
if torch.is_tensor(ppl_value):
ppl_value = ppl_value.cpu().item() # Move to CPU and convert to Python scalar
elif hasattr(ppl_value, "item"):
ppl_value = ppl_value.item() # Convert numpy or other array types
else:
ppl_value = float(ppl_value) # Ensure it's a float
# Return only the perplexity value
result_queue.put(ppl_value)
# Clean up
del merged_model
del merged_tokenizer
del dataset_ppl
torch.cuda.empty_cache()
gc.collect()
# Main execution code should be wrapped in this guard
if __name__ == "__main__":
mp.set_start_method("spawn", force = True)
if torch.cuda.is_bf16_supported():
compute_dtype = torch.bfloat16
attn_implementation = "flash_attention_2"
else:
compute_dtype = torch.float16
attn_implementation = "sdpa"
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/Llama-3.2-3B-Instruct",
max_seq_length = 2048,
dtype = compute_dtype,
load_in_4bit = True,
load_in_8bit = False,
full_finetuning = False,
attn_implementation = attn_implementation,
)
tokenizer = get_chat_template(
tokenizer,
chat_template = "llama-3.1",
)
from unsloth.chat_templates import standardize_sharegpt
dataset_train = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "train"
)
dataset_ppl = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "eval"
)
dataset_train = dataset_train.map(formatting_prompts_func, batched = True)
dataset_ppl = dataset_ppl.map(formatting_prompts_func, batched = True)
add_to_comparison("Base model 4 bits", ppl_model(model, tokenizer, dataset_ppl))
model = FastLanguageModel.get_peft_model(
model,
r = 16,
target_modules = [
"k_proj",
"q_proj",
"v_proj",
"o_proj",
"gate_proj",
"down_proj",
"up_proj",
],
lora_alpha = 16,
lora_dropout = 0,
bias = "none",
use_gradient_checkpointing = "unsloth",
random_state = 3407,
use_rslora = False,
loftq_config = None,
)
from unsloth import is_bfloat16_supported
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = dataset_train,
dataset_text_field = "text",
max_seq_length = 2048,
data_collator = DataCollatorForSeq2Seq(tokenizer = tokenizer),
dataset_num_proc = 2,
packing = False,
args = TrainingArguments(
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
warmup_ratio = 0.1,
max_steps = 10,
learning_rate = 2e-4,
fp16 = not is_bfloat16_supported(),
bf16 = is_bfloat16_supported(),
logging_steps = 50,
optim = "adamw_8bit",
lr_scheduler_type = "linear",
seed = 3407,
output_dir = "outputs",
report_to = "none",
),
)
from unsloth.chat_templates import train_on_responses_only
trainer = train_on_responses_only(
trainer,
instruction_part = "<|start_header_id|>user<|end_header_id|>\n\n",
response_part = "<|start_header_id|>assistant<|end_header_id|>\n\n",
)
# run training
trainer_stats = trainer.train()
add_to_comparison("Qlora model", ppl_model(model, tokenizer, dataset_ppl))
# saving and merging the model to local disk
print("merge and save to local disk")
model.save_pretrained_merged(
save_directory = "./unsloth_out/merged_llama_text_model", tokenizer = tokenizer
)
# print("cleaning")
# del model
# del tokenizer
# torch.cuda.empty_cache()
# gc.collect()
# load model from local disk and test
print("Loading merged model in 4 bit for perplexity test")
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./unsloth_out/merged_llama_text_model",
max_seq_length = 2048,
load_in_4bit = True,
load_in_8bit = False,
)
add_to_comparison(
"merged model load 4bit", ppl_model(merged_model, merged_tokenizer, dataset_ppl)
)
print("Computing 8-bit model perplexity in subprocess...")
result_queue = mp.Queue()
p = mp.Process(target = load_and_compute_8bit_ppl, args = (result_queue, False, True))
p.start()
p.join()
ppl_8bit = result_queue.get()
add_to_comparison("merged model loaded 8bits", ppl_8bit)
print("Loading merged model in 16 bit for perplexity test")
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./unsloth_out/merged_llama_text_model",
max_seq_length = 2048,
load_in_4bit = False,
load_in_8bit = False,
)
add_to_comparison(
"merged model loaded 16bits",
ppl_model(merged_model, merged_tokenizer, dataset_ppl),
)
print_model_comparison()
# final cleanup
safe_remove_directory("./outputs")
safe_remove_directory("./unsloth_compiled_cache")
safe_remove_directory("./unsloth_out")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/language_models/test_merge_model_perplexity_mistral.py | tests/saving/language_models/test_merge_model_perplexity_mistral.py | from unsloth import FastLanguageModel, FastVisionModel, UnslothVisionDataCollator
from unsloth.chat_templates import get_chat_template
from trl import SFTTrainer, SFTConfig
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForSeq2Seq,
TrainingArguments,
)
from datasets import load_dataset, Dataset
import torch
from tqdm import tqdm
import pandas as pd
import multiprocessing as mp
from multiprocessing import Process, Queue
import gc
# ruff: noqa
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
from tests.utils.perplexity_eval import (
ppl_model,
add_to_comparison,
print_model_comparison,
)
def load_and_compute_8bit_ppl(result_queue, load_in_4bit = False, load_in_8bit = False):
"""Load model and compute perplexity in subprocess"""
from unsloth import FastLanguageModel
from tests.utils.perplexity_eval import ppl_model
# Load model
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./unsloth_out/merged_mistral_text_model",
max_seq_length = 2048,
load_in_4bit = load_in_4bit,
load_in_8bit = load_in_8bit,
)
# Set up tokenizer
# merged_tokenizer = get_chat_template(
# merged_tokenizer,
# chat_template="llama-3.1",
# )
# Load dataset fresh in subprocess
dataset_ppl = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "eval"
)
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{}
### Input:
{}
### Response:
{}"""
EOS_TOKEN = merged_tokenizer.eos_token
def formatting_prompts_func(examples):
instructions = []
inputs = []
outputs = []
texts = []
for conversation in examples["messages"]:
# Extract user message and assistant response
user_message = ""
assistant_message = ""
for turn in conversation:
if turn["role"] == "user":
user_message = turn["content"]
elif turn["role"] == "assistant":
assistant_message = turn["content"]
# Store intermediate format
instruction = "Complete the statement"
instructions.append(instruction)
inputs.append(user_message)
outputs.append(assistant_message)
# Create formatted text
text = (
alpaca_prompt.format(instruction, user_message, assistant_message)
+ EOS_TOKEN
)
texts.append(text)
return {
"instruction": instructions,
"input": inputs,
"output": outputs,
"text": texts,
}
dataset_ppl = dataset_ppl.map(formatting_prompts_func, batched = True)
# Compute perplexity using the passed dataset
ppl_value = ppl_model(merged_model, merged_tokenizer, dataset_ppl)
# IMPORTANT: Convert to Python float if it's a tensor
if torch.is_tensor(ppl_value):
ppl_value = ppl_value.cpu().item() # Move to CPU and convert to Python scalar
elif hasattr(ppl_value, "item"):
ppl_value = ppl_value.item() # Convert numpy or other array types
else:
ppl_value = float(ppl_value) # Ensure it's a float
# Return only the perplexity value
result_queue.put(ppl_value)
# Clean up
del merged_model
del merged_tokenizer
del dataset_ppl
torch.cuda.empty_cache()
gc.collect()
# Main execution code should be wrapped in this guard
if __name__ == "__main__":
mp.set_start_method("spawn", force = True)
if torch.cuda.is_bf16_supported():
compute_dtype = torch.bfloat16
attn_implementation = "flash_attention_2"
else:
compute_dtype = torch.float16
attn_implementation = "sdpa"
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/mistral-7b-v0.3",
max_seq_length = 2048,
dtype = compute_dtype,
load_in_4bit = True,
load_in_8bit = False,
full_finetuning = False,
attn_implementation = attn_implementation,
)
EOS_TOKEN = tokenizer.eos_token
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{}
### Input:
{}
### Response:
{}"""
# Define helper functions outside of main
def formatting_prompts_func(examples):
instructions = []
inputs = []
outputs = []
texts = []
for conversation in examples["messages"]:
# Extract user message and assistant response
user_message = ""
assistant_message = ""
for turn in conversation:
if turn["role"] == "user":
user_message = turn["content"]
elif turn["role"] == "assistant":
assistant_message = turn["content"]
# Store intermediate format
instruction = "Complete the statement"
instructions.append(instruction)
inputs.append(user_message)
outputs.append(assistant_message)
# Create formatted text
text = (
alpaca_prompt.format(instruction, user_message, assistant_message)
+ EOS_TOKEN
)
texts.append(text)
return {
"instruction": instructions,
"input": inputs,
"output": outputs,
"text": texts,
}
dataset_train = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "train"
)
dataset_ppl = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "eval"
)
dataset_train = dataset_train.map(formatting_prompts_func, batched = True)
dataset_ppl = dataset_ppl.map(formatting_prompts_func, batched = True)
add_to_comparison("Base model 4 bits", ppl_model(model, tokenizer, dataset_ppl))
model = FastLanguageModel.get_peft_model(
model,
r = 16,
target_modules = [
"k_proj",
"q_proj",
"v_proj",
"o_proj",
"gate_proj",
"down_proj",
"up_proj",
],
lora_alpha = 16,
lora_dropout = 0,
bias = "none",
use_gradient_checkpointing = "unsloth",
random_state = 3407,
use_rslora = False,
loftq_config = None,
)
from unsloth import is_bfloat16_supported
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = dataset_train,
dataset_text_field = "text",
max_seq_length = 2048,
dataset_num_proc = 2,
packing = False,
args = TrainingArguments(
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
warmup_ratio = 0.1,
max_steps = 200,
learning_rate = 2e-4,
fp16 = not is_bfloat16_supported(),
bf16 = is_bfloat16_supported(),
logging_steps = 50,
optim = "adamw_8bit",
lr_scheduler_type = "linear",
seed = 3407,
output_dir = "outputs",
report_to = "none",
),
)
# run training
trainer_stats = trainer.train()
add_to_comparison("Qlora model", ppl_model(model, tokenizer, dataset_ppl))
# saving and merging the model to local disk
print("merge and save to local disk")
model.save_pretrained_merged(
save_directory = "./unsloth_out/merged_mistral_text_model", tokenizer = tokenizer
)
# print("cleaning")
# del model
# del tokenizer
# torch.cuda.empty_cache()
# gc.collect()
# load model from local disk and test
print("Loading merged model in 4 bit for perplexity test")
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./unsloth_out/merged_mistral_text_model",
max_seq_length = 2048,
load_in_4bit = True,
load_in_8bit = False,
)
add_to_comparison(
"merged model load 4bit", ppl_model(merged_model, merged_tokenizer, dataset_ppl)
)
print("Computing 8-bit model perplexity in subprocess...")
result_queue = mp.Queue()
p = mp.Process(target = load_and_compute_8bit_ppl, args = (result_queue, False, True))
p.start()
p.join()
ppl_8bit = result_queue.get()
add_to_comparison("merged model loaded 8bits", ppl_8bit)
print("Loading merged model in 16 bit for perplexity test")
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./unsloth_out/merged_mistral_text_model",
max_seq_length = 2048,
load_in_4bit = False,
load_in_8bit = False,
)
add_to_comparison(
"merged model loaded 16bits",
ppl_model(merged_model, merged_tokenizer, dataset_ppl),
)
print_model_comparison()
safe_remove_directory("./outputs")
safe_remove_directory("./unsloth_compiled_cache")
safe_remove_directory("./unsloth_out")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/language_models/test_merge_model_perplexity_phi_4.py | tests/saving/language_models/test_merge_model_perplexity_phi_4.py | from unsloth import FastLanguageModel, FastVisionModel, UnslothVisionDataCollator
from unsloth.chat_templates import get_chat_template
from trl import SFTTrainer, SFTConfig
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForSeq2Seq,
TrainingArguments,
)
from datasets import load_dataset, Dataset
import torch
from tqdm import tqdm
import pandas as pd
import multiprocessing as mp
from multiprocessing import Process, Queue
import gc
# ruff: noqa
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
from tests.utils.perplexity_eval import (
ppl_model,
add_to_comparison,
print_model_comparison,
)
# Define helper functions outside of main
def formatting_prompts_func(examples):
convos = examples["messages"]
texts = [
tokenizer.apply_chat_template(
convo, tokenize = False, add_generation_prompt = False
)
for convo in convos
]
return {
"text": texts,
}
def load_and_compute_8bit_ppl(result_queue, load_in_4bit = False, load_in_8bit = False):
"""Load model and compute perplexity in subprocess"""
from unsloth import FastLanguageModel
from unsloth.chat_templates import get_chat_template
from tests.utils.perplexity_eval import ppl_model
# Load model
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./unsloth_out/merged_phi4_text_model",
max_seq_length = 2048,
load_in_4bit = load_in_4bit,
load_in_8bit = load_in_8bit,
)
# Set up tokenizer
merged_tokenizer = get_chat_template(
merged_tokenizer,
chat_template = "phi-4",
)
# Load dataset fresh in subprocess
dataset_ppl = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "eval"
)
# Format the dataset
def formatting_prompts_func(examples):
convos = examples["messages"]
texts = [
merged_tokenizer.apply_chat_template(
convo, tokenize = False, add_generation_prompt = False
)
for convo in convos
]
return {"text": texts}
dataset_ppl = dataset_ppl.map(formatting_prompts_func, batched = True)
# Compute perplexity using the passed dataset
ppl_value = ppl_model(merged_model, merged_tokenizer, dataset_ppl)
# IMPORTANT: Convert to Python float if it's a tensor
if torch.is_tensor(ppl_value):
ppl_value = ppl_value.cpu().item() # Move to CPU and convert to Python scalar
elif hasattr(ppl_value, "item"):
ppl_value = ppl_value.item() # Convert numpy or other array types
else:
ppl_value = float(ppl_value) # Ensure it's a float
# Return only the perplexity value
result_queue.put(ppl_value)
# Clean up
del merged_model
del merged_tokenizer
del dataset_ppl
torch.cuda.empty_cache()
gc.collect()
# Main execution code should be wrapped in this guard
if __name__ == "__main__":
mp.set_start_method("spawn", force = True)
if torch.cuda.is_bf16_supported():
compute_dtype = torch.bfloat16
attn_implementation = "flash_attention_2"
else:
compute_dtype = torch.float16
attn_implementation = "sdpa"
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/Phi-4",
max_seq_length = 2048,
dtype = compute_dtype,
load_in_4bit = True,
load_in_8bit = False,
full_finetuning = False,
attn_implementation = attn_implementation,
)
tokenizer = get_chat_template(
tokenizer,
chat_template = "phi-4",
)
dataset_train = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "train"
)
dataset_ppl = load_dataset(
"allenai/openassistant-guanaco-reformatted", split = "eval"
)
dataset_train = dataset_train.map(formatting_prompts_func, batched = True)
dataset_ppl = dataset_ppl.map(formatting_prompts_func, batched = True)
add_to_comparison("Base model 4 bits", ppl_model(model, tokenizer, dataset_ppl))
model = FastLanguageModel.get_peft_model(
model,
r = 16,
target_modules = [
"k_proj",
"q_proj",
"v_proj",
"o_proj",
"gate_proj",
"down_proj",
"up_proj",
],
lora_alpha = 16,
lora_dropout = 0,
bias = "none",
use_gradient_checkpointing = "unsloth",
random_state = 3407,
use_rslora = False,
loftq_config = None,
)
from unsloth import is_bfloat16_supported
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = dataset_train,
dataset_text_field = "text",
max_seq_length = 2048,
data_collator = DataCollatorForSeq2Seq(tokenizer = tokenizer),
dataset_num_proc = 2,
packing = False,
args = TrainingArguments(
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
warmup_ratio = 0.1,
max_steps = 200,
learning_rate = 2e-4,
fp16 = not is_bfloat16_supported(),
bf16 = is_bfloat16_supported(),
logging_steps = 50,
optim = "adamw_8bit",
lr_scheduler_type = "linear",
seed = 3407,
output_dir = "outputs",
report_to = "none",
),
)
from unsloth.chat_templates import train_on_responses_only
trainer = train_on_responses_only(
trainer,
instruction_part = "<|im_start|>user<|im_sep|>\n\n",
response_part = "<|im_start|>assistant<|im_sep|>\n\n",
)
# run training
trainer_stats = trainer.train()
add_to_comparison("Qlora model", ppl_model(model, tokenizer, dataset_ppl))
# saving and merging the model to local disk
print("merge and save to local disk")
model.save_pretrained_merged(
save_directory = "./unsloth_out/merged_phi4_text_model", tokenizer = tokenizer
)
# print("cleaning")
# del model
# del tokenizer
# torch.cuda.empty_cache()
# gc.collect()
# load model from local disk and test
print("Loading merged model in 4 bit for perplexity test")
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./unsloth_out/merged_phi4_text_model",
max_seq_length = 2048,
load_in_4bit = True,
load_in_8bit = False,
)
add_to_comparison(
"merged model load 4bit", ppl_model(merged_model, merged_tokenizer, dataset_ppl)
)
print("Computing 8-bit model perplexity in subprocess...")
result_queue = mp.Queue()
p = mp.Process(target = load_and_compute_8bit_ppl, args = (result_queue, False, True))
p.start()
p.join()
ppl_8bit = result_queue.get()
add_to_comparison("merged model loaded 8bits", ppl_8bit)
print("Loading merged model in 16 bit for perplexity test")
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./unsloth_out/merged_phi4_text_model",
max_seq_length = 2048,
load_in_4bit = False,
load_in_8bit = False,
)
add_to_comparison(
"merged model loaded 16bits",
ppl_model(merged_model, merged_tokenizer, dataset_ppl),
)
print_model_comparison()
# final cleanup
safe_remove_directory("./outputs")
safe_remove_directory("./unsloth_compiled_cache")
safe_remove_directory("./unsloth_out")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/language_models/test_push_to_hub_merged_sharded_index_file.py | tests/saving/language_models/test_push_to_hub_merged_sharded_index_file.py | from unsloth import FastLanguageModel, FastVisionModel, UnslothVisionDataCollator
from unsloth.chat_templates import get_chat_template
from trl import SFTTrainer, SFTConfig
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForSeq2Seq,
TrainingArguments,
)
from datasets import load_dataset, Dataset
import torch
from tqdm import tqdm
import pandas as pd
import multiprocessing as mp
from multiprocessing import Process, Queue
import gc
import os
from huggingface_hub import HfFileSystem, hf_hub_download
# ruff: noqa
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
from tests.utils.perplexity_eval import (
ppl_model,
add_to_comparison,
print_model_comparison,
)
# Define helper functions outside of main
def formatting_prompts_func(examples):
convos = examples["messages"]
texts = [
tokenizer.apply_chat_template(
convo, tokenize = False, add_generation_prompt = False
)
for convo in convos
]
return {"text": texts}
if torch.cuda.is_bf16_supported():
compute_dtype = torch.bfloat16
attn_implementation = "flash_attention_2"
else:
compute_dtype = torch.float16
attn_implementation = "sdpa"
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/Llama-3.1-8B-Instruct",
max_seq_length = 2048,
dtype = compute_dtype,
load_in_4bit = True,
load_in_8bit = False,
full_finetuning = False,
attn_implementation = attn_implementation,
)
tokenizer = get_chat_template(
tokenizer,
chat_template = "llama-3.1",
)
from unsloth.chat_templates import standardize_sharegpt
dataset_train = load_dataset("allenai/openassistant-guanaco-reformatted", split = "train")
dataset_ppl = load_dataset("allenai/openassistant-guanaco-reformatted", split = "eval")
dataset_train = dataset_train.map(formatting_prompts_func, batched = True)
dataset_ppl = dataset_ppl.map(formatting_prompts_func, batched = True)
add_to_comparison("Base model 4 bits", ppl_model(model, tokenizer, dataset_ppl))
model = FastLanguageModel.get_peft_model(
model,
r = 16,
target_modules = [
"k_proj",
"q_proj",
"v_proj",
"o_proj",
"gate_proj",
"down_proj",
"up_proj",
],
lora_alpha = 16,
lora_dropout = 0,
bias = "none",
use_gradient_checkpointing = "unsloth",
random_state = 3407,
use_rslora = False,
loftq_config = None,
)
from unsloth import is_bfloat16_supported
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = dataset_train,
dataset_text_field = "text",
max_seq_length = 2048,
data_collator = DataCollatorForSeq2Seq(tokenizer = tokenizer),
dataset_num_proc = 2,
packing = False,
args = TrainingArguments(
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
warmup_ratio = 0.1,
max_steps = 30,
learning_rate = 2e-4,
fp16 = not is_bfloat16_supported(),
bf16 = is_bfloat16_supported(),
logging_steps = 50,
optim = "adamw_8bit",
lr_scheduler_type = "linear",
seed = 3407,
output_dir = "outputs",
report_to = "none",
),
)
from unsloth.chat_templates import train_on_responses_only
trainer = train_on_responses_only(
trainer,
instruction_part = "<|start_header_id|>user<|end_header_id|>\n\n",
response_part = "<|start_header_id|>assistant<|end_header_id|>\n\n",
)
# run training
trainer_stats = trainer.train()
# saving and merging the model to local disk
hf_username = os.environ.get("HF_USER", "")
if not hf_username:
hf_username = input("Please enter your Hugging Face username: ").strip()
os.environ["HF_USER"] = hf_username
hf_token = os.environ.get("HF_TOKEN", "")
if not hf_token:
hf_token = input("Please enter your Hugging Face token: ").strip()
os.environ["HF_TOKEN"] = hf_token
repo_name = f"{hf_username}/merged_llama_text_model"
success = {
"upload": False,
"safetensors_check": False,
"download": False,
}
# Stage 1: Upload model to Hub
try:
print("\n" + "=" * 80)
print("=== UPLOADING MODEL TO HUB ===".center(80))
print("=" * 80 + "\n")
model.push_to_hub_merged(repo_name, tokenizer = tokenizer, token = hf_token)
success["upload"] = True
print("✅ Model uploaded successfully!")
except Exception as e:
print(f"❌ Failed to upload model: {e}")
raise Exception("Model upload failed.")
# Stage 2: Verify safetensors.index.json exists
try:
print("\n" + "=" * 80)
print("=== VERIFYING REPO CONTENTS ===".center(80))
print("=" * 80 + "\n")
fs = HfFileSystem(token = hf_token)
file_list = fs.ls(repo_name, detail = True)
safetensors_found = any(
file["name"].endswith("model.safetensors.index.json") for file in file_list
)
if safetensors_found:
success["safetensors_check"] = True
print("✅ model.safetensors.index.json found in repo!")
else:
raise Exception("model.safetensors.index.json not found in repo.")
except Exception as e:
print(f"❌ Verification failed: {e}")
raise Exception("Repo verification failed.")
# Stage 3: Test downloading the model (even if cached)
safe_remove_directory("./RTannous")
try:
print("\n" + "=" * 80)
print("=== TESTING MODEL DOWNLOAD ===".center(80))
print("=" * 80 + "\n")
# Force download even if cached
model, tokenizer = FastLanguageModel.from_pretrained(
f"{hf_username}/merged_llama_text_model"
)
success["download"] = True
print("✅ Model downloaded successfully!")
except Exception as e:
print(f"❌ Download failed: {e}")
raise Exception("Model download failed.")
# Final report
print("\n" + "=" * 80)
print("=== VALIDATION REPORT ===".center(80))
print("=" * 80 + "\n")
for stage, passed in success.items():
status = "✓" if passed else "✗"
print(f"{status} {stage.replace('_', ' ').title()}")
print("\n" + "=" * 80)
if all(success.values()):
print("\n🎉 All stages completed successfully!")
else:
raise Exception("Validation failed for one or more stages.")
# final cleanup
safe_remove_directory("./outputs")
safe_remove_directory("./unsloth_compiled_cache")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/non_peft/test_whisper_non_peft.py | tests/saving/non_peft/test_whisper_non_peft.py | from unsloth import FastLanguageModel, FastModel
from transformers import AutoModelForCausalLM, WhisperForConditionalGeneration
from peft import PeftModel
from pathlib import Path
import sys
import warnings
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
print(f"\n{'='*80}")
print("🔍 PHASE 1: Loading Base Model")
print(f"{'='*80}")
model, tokenizer = FastModel.from_pretrained(
model_name = "unsloth/whisper-large-v3",
dtype = None, # Leave as None for auto detection
load_in_4bit = False, # Set to True to do 4bit quantization which reduces memory
auto_model = WhisperForConditionalGeneration,
whisper_language = "English",
whisper_task = "transcribe",
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
)
print("✅ Base model loaded successfully!")
### Attemtping save merge
print(f"\n{'='*80}")
print("🔍 PHASE 2: Attempting save_pretrained_merged (Should Warn)")
print(f"{'='*80}")
with warnings.catch_warnings(record = True) as w:
warnings.simplefilter("always")
model.save_pretrained_merged("test_output", tokenizer)
# Verify warning
assert len(w) >= 1, "Expected warning but none raised"
warning_msg = str(w[0].message)
expected_msg = "Model is not a PeftModel (no Lora adapters detected). Skipping Merge. Please use save_pretrained() or push_to_hub() instead!"
assert expected_msg in warning_msg, f"Unexpected warning: {warning_msg}"
assert expected_msg in warning_msg, f"Unexpected warning: {warning_msg}"
print("✅ Correct warning detected for non-PeftModel merge attempt!")
print(f"\n{'='*80}")
print("🔍 PHASE 3: Using save_pretrained (Should Succeed)")
print(f"{'='*80}")
try:
with warnings.catch_warnings():
warnings.simplefilter("error") # Treat warnings as errors here
model.save_pretrained("test_output")
print("✅ Standard save_pretrained completed successfully!")
except Exception as e:
assert False, f"Phase 3 failed: {e}"
safe_remove_directory("./test_output")
safe_remove_directory("./unsloth_compiled_cache")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/non_peft/test_mistral_non_peft.py | tests/saving/non_peft/test_mistral_non_peft.py | from unsloth import FastLanguageModel
from transformers import AutoModelForCausalLM
from peft import PeftModel
from pathlib import Path
import sys
import warnings
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
print(f"\n{'='*80}")
print("🔍 PHASE 1: Loading Base Model")
print(f"{'='*80}")
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/mistral-7b-v0.3",
max_seq_length = 2048,
dtype = None,
load_in_4bit = True,
load_in_8bit = False,
full_finetuning = False,
)
print("✅ Base model loaded successfully!")
### Attemtping save merge
print(f"\n{'='*80}")
print("🔍 PHASE 2: Attempting save_pretrained_merged (Should Warn)")
print(f"{'='*80}")
with warnings.catch_warnings(record = True) as w:
warnings.simplefilter("always")
model.save_pretrained_merged("test_output", tokenizer)
# Verify warning
assert len(w) >= 1, "Expected warning but none raised"
warning_msg = str(w[0].message)
expected_msg = "Model is not a PeftModel (no Lora adapters detected). Skipping Merge. Please use save_pretrained() or push_to_hub() instead!"
assert expected_msg in warning_msg, f"Unexpected warning: {warning_msg}"
assert expected_msg in warning_msg, f"Unexpected warning: {warning_msg}"
print("✅ Correct warning detected for non-PeftModel merge attempt!")
print(f"\n{'='*80}")
print("🔍 PHASE 3: Using save_pretrained (Should Succeed)")
print(f"{'='*80}")
try:
with warnings.catch_warnings():
warnings.simplefilter("error") # Treat warnings as errors here
model.save_pretrained("test_output")
print("✅ Standard save_pretrained completed successfully!")
except Exception as e:
assert False, f"Phase 3 failed: {e}"
safe_remove_directory("./test_output")
safe_remove_directory("./unsloth_compiled_cache")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/vision_models/test_push_to_hub_merged.py | tests/saving/vision_models/test_push_to_hub_merged.py | ## Import required libraries
from unsloth import FastVisionModel, is_bf16_supported
from unsloth.trainer import UnslothVisionDataCollator
import torch
import os
from datasets import load_dataset
from trl import SFTTrainer, SFTConfig
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
## Dataset Preparation"""
print("\n📊 Loading and preparing dataset...")
dataset = load_dataset("lbourdois/OCR-liboaccn-OPUS-MIT-5M-clean", "en", split = "train")
# To select the first 2000 examples
train_dataset = dataset.select(range(2000))
# To select the next 200 examples for evaluation
eval_dataset = dataset.select(range(2000, 2200))
print(f"✅ Dataset loaded successfully!")
print(f" 📈 Training samples: {len(train_dataset)}")
print(f" 📊 Evaluation samples: {len(eval_dataset)}")
# Convert dataset to OAI messages
def format_data(sample):
return {
"messages": [
{
"role": "system",
"content": [{"type": "text", "text": system_message}],
},
{
"role": "user",
"content": [
{
"type": "text",
"text": sample["question"],
},
{
"type": "image",
"image": sample["image"],
},
],
},
{
"role": "assistant",
"content": [{"type": "text", "text": sample["answer"]}],
},
],
}
print("\n🔄 Formatting dataset for vision training...")
system_message = "You are an expert french ocr system."
# Convert dataset to OAI messages
# need to use list comprehension to keep Pil.Image type, .mape convert image to bytes
train_dataset = [format_data(sample) for sample in train_dataset]
eval_dataset = [format_data(sample) for sample in eval_dataset]
print("✅ Dataset formatting completed!")
"""## Finetuning Setup and Run"""
print("\n" + "=" * 80)
print("=== MODEL LOADING AND SETUP ===".center(80))
print("=" * 80 + "\n")
# Load Base Model
print("🤖 Loading base vision model...")
try:
model, tokenizer = FastVisionModel.from_pretrained(
# model_name = "unsloth/Qwen2-VL-7B-Instruct",
model_name = "unsloth/Qwen2-VL-2B-Instruct",
max_seq_length = 2048, # Choose any for long context!
load_in_4bit = True, # 4 bit quantization to reduce memory
load_in_8bit = False, # [NEW!] A bit more accurate, uses 2x memory
full_finetuning = False, # [NEW!] We have full finetuning now!
)
except Exception as e:
print(f"❌ Failed to load base model: {e}")
raise
print("\n🔧 Setting up LoRA configuration...")
## Lora Finetuning
try:
model = FastVisionModel.get_peft_model(
model,
finetune_vision_layers = True, # Turn off for just text!
finetune_language_layers = True, # Should leave on!
finetune_attention_modules = True, # Attention good for GRPO
finetune_mlp_modules = True, # SHould leave on always!
r = 16, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
lora_alpha = 32,
lora_dropout = 0, # Supports any, but = 0 is optimized
bias = "none", # Supports any, but = "none" is optimized
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
random_state = 3407,
use_rslora = False, # We support rank stabilized LoRA
loftq_config = None, # And LoftQ
)
print("✅ LoRA configuration applied successfully!")
print(f" 🎯 LoRA rank (r): 16")
print(f" 📊 LoRA alpha: 32")
print(f" 🔍 Vision layers: Enabled")
print(f" 💬 Language layers: Enabled")
except Exception as e:
print(f"❌ Failed to apply LoRA configuration: {e}")
raise
print("\n" + "=" * 80)
print("=== TRAINING SETUP ===".center(80))
print("=" * 80 + "\n")
print("🏋️ Preparing trainer...")
FastVisionModel.for_training(model) # Enable for training!
try:
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
data_collator = UnslothVisionDataCollator(model, tokenizer),
train_dataset = train_dataset,
args = SFTConfig(
# per_device_train_batch_size = 4,
# gradient_accumulation_steps = 8,
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
gradient_checkpointing = True,
gradient_checkpointing_kwargs = {
"use_reentrant": False
}, # use reentrant checkpointing
max_grad_norm = 0.3, # max gradient norm based on QLoRA paper
warmup_ratio = 0.03,
# num_train_epochs = 2, # Set this instead of max_steps for full training runs
max_steps = 10,
learning_rate = 2e-4,
fp16 = not is_bf16_supported(),
bf16 = is_bf16_supported(),
logging_steps = 5,
save_strategy = "epoch",
optim = "adamw_torch_fused",
weight_decay = 0.01,
lr_scheduler_type = "linear",
seed = 3407,
output_dir = "checkpoints",
report_to = "none", # For Weights and Biases
# You MUST put the below items for vision finetuning:
remove_unused_columns = False,
dataset_text_field = "",
dataset_kwargs = {"skip_prepare_dataset": True},
dataset_num_proc = 4,
max_seq_length = 2048,
),
)
print("✅ Trainer setup completed!")
print(f" 📦 Batch size: 2")
print(f" 🔄 Gradient accumulation steps: 4")
print(f" 📈 Max training steps: 10")
print(f" 🎯 Learning rate: 2e-4")
print(f" 💾 Precision: {'BF16' if is_bf16_supported() else 'FP16'}")
except Exception as e:
print(f"❌ Failed to setup trainer: {e}")
raise
print("\n" + "=" * 80)
print("=== STARTING TRAINING ===".center(80))
print("=" * 80 + "\n")
# run training
try:
print("🚀 Starting training process...")
trainer_stats = trainer.train()
except Exception as e:
print(f"❌ Training failed: {e}")
raise
print("\n" + "=" * 80)
print("=== SAVING MODEL ===".center(80))
print("=" * 80 + "\n")
print("💾 Saving adapter model and tokenizer locally...")
try:
model.save_pretrained("unsloth-qwen2-7vl-french-ocr-adapter", tokenizer)
tokenizer.save_pretrained("unsloth-qwen2-7vl-french-ocr-adapter")
print("✅ Model saved locally!")
except Exception as e:
print(f"❌ Failed to save model locally: {e}")
raise
hf_username = os.environ.get("HF_USER", "")
if not hf_username:
hf_username = input("Please enter your Hugging Face username: ").strip()
os.environ["HF_USER"] = hf_username
hf_token = os.environ.get("HF_TOKEN", "")
if not hf_token:
hf_token = input("Please enter your Hugging Face token: ").strip()
os.environ["HF_TOKEN"] = hf_token
repo_name = f"{hf_username}/qwen2-ocr-merged"
success = {
"upload": False,
"download": False,
}
# Stage 1: Upload model to Hub
try:
print("\n" + "=" * 80)
print("=== UPLOADING MODEL TO HUB ===".center(80))
print("=" * 80 + "\n")
print(f"🚀 Uploading to repository: {repo_name}")
model.push_to_hub_merged(repo_name, tokenizer = tokenizer, token = hf_token)
success["upload"] = True
print("✅ Model uploaded successfully!")
except Exception as e:
print(f"❌ Failed to upload model: {e}")
raise Exception("Model upload failed.")
try:
print("\n" + "=" * 80)
print("=== TESTING MODEL DOWNLOAD ===".center(80))
print("=" * 80 + "\n")
print("📥 Testing model download...")
# Force download even if cached
test_model, test_tokenizer = FastVisionModel.from_pretrained(repo_name)
success["download"] = True
print("✅ Model downloaded successfully!")
# Clean up test model
del test_model, test_tokenizer
torch.cuda.empty_cache()
except Exception as e:
print(f"❌ Download failed: {e}")
raise Exception("Model download failed.")
# Final report
print("\n" + "=" * 80)
print("=== VALIDATION REPORT ===".center(80))
print("=" * 80 + "\n")
for stage, passed in success.items():
status = "✅" if passed else "❌"
print(f"{status} {stage.replace('_', ' ').title()}")
print("\n" + "=" * 80)
if all(success.values()):
print("\n🎉 All stages completed successfully!")
print(f"🌐 Your model is available at: https://huggingface.co/{repo_name}")
else:
raise Exception("Validation failed for one or more stages.")
# Final cleanup
print("\n🧹 Cleaning up temporary files...")
safe_remove_directory("./checkpoints")
safe_remove_directory("./unsloth_compiled_cache")
safe_remove_directory("./unsloth-qwen2-7vl-french-ocr-adapter")
safe_remove_directory(f"./{hf_username}")
print("\n🎯 Pipeline completed successfully!")
print("=" * 80)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/vision_models/test_save_merge_qwen2.5vl32B_model_ocr_benchmark.py | tests/saving/vision_models/test_save_merge_qwen2.5vl32B_model_ocr_benchmark.py | # -*- coding: utf-8 -*-
from unsloth import FastVisionModel
import torch
from qwen_vl_utils import process_vision_info
import os
from datasets import load_dataset
from trl import SFTTrainer, SFTConfig
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
from tests.utils.ocr_eval import OCRModelEvaluator
## Dataset Preparation
from datasets import load_dataset
dataset = load_dataset("lbourdois/OCR-liboaccn-OPUS-MIT-5M-clean", "en", split = "train")
# To select the first 2000 examples
train_dataset = dataset.select(range(2000))
# To select the next 200 examples for evaluation
eval_dataset = dataset.select(range(2000, 2200))
# Convert dataset to OAI messages
def format_data(sample):
return {
"messages": [
{
"role": "system",
"content": [{"type": "text", "text": system_message}],
},
{
"role": "user",
"content": [
{
"type": "text",
"text": sample["question"],
},
{
"type": "image",
"image": sample["image"],
},
],
},
{
"role": "assistant",
"content": [{"type": "text", "text": sample["answer"]}],
},
],
}
system_message = "You are an expert french ocr system."
# Convert dataset to OAI messages
# need to use list comprehension to keep Pil.Image type, .mape convert image to bytes
train_dataset = [format_data(sample) for sample in train_dataset]
eval_dataset = [format_data(sample) for sample in eval_dataset]
## Setup OCR main evaluation function and helpers
import os
import torch
from tqdm import tqdm
import pandas as pd
from jiwer import wer, cer
from qwen_vl_utils import process_vision_info
#
ocr_evaluator = OCRModelEvaluator()
model_comparison_results = {}
## Finetuning Setup and Run
# Load Base Model
model, tokenizer = FastVisionModel.from_pretrained(
model_name = "unsloth/Qwen2.5-VL-32B-Instruct-bnb-4bit",
max_seq_length = 2048, # Choose any for long context!
load_in_4bit = True, # 4 bit quantization to reduce memory
load_in_8bit = False, # [NEW!] A bit more accurate, uses 2x memory
full_finetuning = False, # [NEW!] We have full finetuning now!
)
# benchmark base model performance
model_name = "Unsloth Base model"
FastVisionModel.for_inference(model)
avg_wer, avg_cer = ocr_evaluator.evaluate_model(
model, tokenizer, eval_dataset, output_dir = "unsloth_base_model_results"
)
ocr_evaluator.add_to_comparison(model_name, avg_wer, avg_cer)
## Lora Finetuning
model = FastVisionModel.get_peft_model(
model,
finetune_vision_layers = True, # Turn off for just text!
finetune_language_layers = True, # Should leave on!
finetune_attention_modules = True, # Attention good for GRPO
finetune_mlp_modules = True, # SHould leave on always!
r = 16, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
# target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
# "gate_proj", "up_proj", "down_proj",],
lora_alpha = 32,
lora_dropout = 0, # Supports any, but = 0 is optimized
bias = "none", # Supports any, but = "none" is optimized
# [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
random_state = 3407,
use_rslora = False, # We support rank stabilized LoRA
loftq_config = None, # And LoftQ
)
from unsloth import is_bf16_supported
from unsloth.trainer import UnslothVisionDataCollator
FastVisionModel.for_training(model) # Enable for training!
model.config.use_cache = False
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
data_collator = UnslothVisionDataCollator(model, tokenizer),
train_dataset = train_dataset,
args = SFTConfig(
# per_device_train_batch_size = 4,
# gradient_accumulation_steps = 8,
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
gradient_checkpointing = True,
gradient_checkpointing_kwargs = {
"use_reentrant": False
}, # use reentrant checkpointing
max_grad_norm = 0.3, # max gradient norm based on QLoRA paper
warmup_ratio = 0.03,
# num_train_epochs = 2, # Set this instead of max_steps for full training runs
max_steps = 60,
learning_rate = 2e-4,
fp16 = not is_bf16_supported(),
bf16 = is_bf16_supported(),
logging_steps = 5,
save_strategy = "epoch",
optim = "adamw_torch_fused",
weight_decay = 0.01,
lr_scheduler_type = "linear",
seed = 3407,
output_dir = "unsloth-qwen2.5-vl-32b-french-ocr-checkpoints",
report_to = "none", # For Weights and Biases
# You MUST put the below items for vision finetuning:
remove_unused_columns = False,
dataset_text_field = "",
dataset_kwargs = {"skip_prepare_dataset": True},
dataset_num_proc = 4,
max_seq_length = 2048,
),
)
# run training
trainer_stats = trainer.train()
model.save_pretrained("unsloth-qwen2.5-vl-32b-french-ocr-adapter", tokenizer)
tokenizer.save_pretrained("unsloth-qwen2.5-vl-32b-french-ocr-adapter")
## Measure Adapter Performance
# benchmark lora model performance
model_name = "Unsloth lora adapter model"
FastVisionModel.for_inference(model)
avg_wer, avg_cer = ocr_evaluator.evaluate_model(
model, tokenizer, eval_dataset, output_dir = "unsloth_lora_model_results"
)
ocr_evaluator.add_to_comparison(model_name, avg_wer, avg_cer)
## Merge Model
def find_lora_base_model(model_to_inspect):
current = model_to_inspect
if hasattr(current, "base_model"):
current = current.base_model
if hasattr(current, "model"):
current = current.model
return current
base = find_lora_base_model(model)
print((base.__class__.__name__))
# merge default 16 bits
model.save_pretrained_merged(
save_directory = "qwen2.5-ocr-merged-finetune-merge-16bit", tokenizer = tokenizer
)
## Benchmark merged model performance
### 16 bits merged model
model, tokenizer = FastVisionModel.from_pretrained(
"./qwen2.5-ocr-merged-finetune-merge-16bit", load_in_4bit = False, load_in_8bit = False
)
# benchmark 4bit loaded, 16bits merged model performance
model_name = "Unsloth 16bits-merged model load-16bits"
model.config.use_cache = True
avg_wer, avg_cer = ocr_evaluator.evaluate_model(
model,
tokenizer,
eval_dataset,
output_dir = "unsloth_16bits_merged_model_load_16bits_results",
)
ocr_evaluator.add_to_comparison(model_name, avg_wer, avg_cer)
# load 16bits-merged model in 4 bits
model, tokenizer = FastVisionModel.from_pretrained(
"./qwen2.5-ocr-merged-finetune-merge-16bit", load_in_4bit = True, load_in_8bit = False
)
# benchmark 4bit loaded, 16bits merged model performance
model_name = "Unsloth 16bits-merged model load-4bits"
model.config.use_cache = True
avg_wer, avg_cer = ocr_evaluator.evaluate_model(
model,
tokenizer,
eval_dataset,
output_dir = "unsloth_16bits_merged_model_load_4bits_results",
)
ocr_evaluator.add_to_comparison(model_name, avg_wer, avg_cer)
# load model in 8 bits
model, tokenizer = FastVisionModel.from_pretrained(
"./qwen2.5-ocr-merged-finetune-merge-16bit", load_in_4bit = False, load_in_8bit = True
)
# benchmark 4bit loaded, 16bits merged model performance
model_name = "Unsloth 16bits-merged model load-8bits"
avg_wer, avg_cer = ocr_evaluator.evaluate_model(
model,
tokenizer,
eval_dataset,
output_dir = "unsloth_16bits_merged_model_load_8bits_results",
)
ocr_evaluator.add_to_comparison(model_name, avg_wer, avg_cer)
# """### 4 bits merged model"""
#
# # load 4bits-merged model in 4 bits
# model, tokenizer = FastVisionModel.from_pretrained("./qwen2-ocr-merged-finetune-merge-4bit",load_in_4bit=True, load_in_8bit=False)
#
# # benchmark 4bit loaded, 4bits merged model performance
# model_name = "Unsloth 4bits-merged model load-4bits"
#
# avg_wer, avg_cer = ocr_evaluator.evaluate_model(model, tokenizer, eval_dataset, output_dir="unsloth_4bits_merged_model_load_4bits_results")
# ocr_evaluator.add_to_comparison(model_name, avg_wer, avg_cer)
#
# # load model in 8 bits
# model, tokenizer = FastVisionModel.from_pretrained("./qwen2-ocr-merged-finetune-merge-4bit",load_in_4bit=False, load_in_8bit=True)
#
# # benchmark 8bit loaded, 4bits merged model performance
# model_name = "Unsloth 4bits-merged model load-8bits"
#
# avg_wer, avg_cer = ocr_evaluator.evaluate_model(model, tokenizer, eval_dataset, output_dir="unsloth_4bits_merged_model_load_8bits_results")
# ocr_evaluator.add_to_comparison(model_name, avg_wer, avg_cer)
# Model comparison report
# print model comparison
ocr_evaluator.print_model_comparison()
# Final cleanup
print("\n🧹 Cleaning up temporary files...")
safe_remove_directory("./unsloth-qwen2.5-vl-32b-french-ocr-adapter")
safe_remove_directory("./unsloth-qwen2.5-vl-32b-french-ocr-checkpoints")
safe_remove_directory("./unsloth_compiled_cache")
safe_remove_directory("./qwen2.5-ocr-merged-finetune-merge-16bit")
print("\n🎯 Pipeline completed successfully!")
print("=" * 80)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/vision_models/test_index_file_sharded_model.py | tests/saving/vision_models/test_index_file_sharded_model.py | ## Import required libraries
from unsloth import FastVisionModel, is_bf16_supported
from unsloth.trainer import UnslothVisionDataCollator
import torch
import os
from datasets import load_dataset
from trl import SFTTrainer, SFTConfig
from huggingface_hub import HfFileSystem
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
## Dataset Preparation"""
print("\n📊 Loading and preparing dataset...")
dataset = load_dataset("lbourdois/OCR-liboaccn-OPUS-MIT-5M-clean", "en", split = "train")
# To select the first 2000 examples
train_dataset = dataset.select(range(2000))
# To select the next 200 examples for evaluation
eval_dataset = dataset.select(range(2000, 2200))
print(f"✅ Dataset loaded successfully!")
print(f" 📈 Training samples: {len(train_dataset)}")
print(f" 📊 Evaluation samples: {len(eval_dataset)}")
# Convert dataset to OAI messages
def format_data(sample):
return {
"messages": [
{
"role": "system",
"content": [{"type": "text", "text": system_message}],
},
{
"role": "user",
"content": [
{
"type": "text",
"text": sample["question"],
},
{
"type": "image",
"image": sample["image"],
},
],
},
{
"role": "assistant",
"content": [{"type": "text", "text": sample["answer"]}],
},
],
}
print("\n🔄 Formatting dataset for vision training...")
system_message = "You are an expert french ocr system."
# Convert dataset to OAI messages
# need to use list comprehension to keep Pil.Image type, .mape convert image to bytes
train_dataset = [format_data(sample) for sample in train_dataset]
eval_dataset = [format_data(sample) for sample in eval_dataset]
print("✅ Dataset formatting completed!")
"""## Finetuning Setup and Run"""
print("\n" + "=" * 80)
print("=== MODEL LOADING AND SETUP ===".center(80))
print("=" * 80 + "\n")
# Load Base Model
print("🤖 Loading base vision model...")
try:
model, tokenizer = FastVisionModel.from_pretrained(
# model_name = "unsloth/Qwen2-VL-7B-Instruct",
model_name = "unsloth/Qwen2-VL-7B-Instruct",
max_seq_length = 2048, # Choose any for long context!
load_in_4bit = True, # 4 bit quantization to reduce memory
load_in_8bit = False, # [NEW!] A bit more accurate, uses 2x memory
full_finetuning = False, # [NEW!] We have full finetuning now!
)
except Exception as e:
print(f"❌ Failed to load base model: {e}")
raise
print("\n🔧 Setting up LoRA configuration...")
## Lora Finetuning
try:
model = FastVisionModel.get_peft_model(
model,
finetune_vision_layers = True, # Turn off for just text!
finetune_language_layers = True, # Should leave on!
finetune_attention_modules = True, # Attention good for GRPO
finetune_mlp_modules = True, # SHould leave on always!
r = 16, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
lora_alpha = 32,
lora_dropout = 0, # Supports any, but = 0 is optimized
bias = "none", # Supports any, but = "none" is optimized
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
random_state = 3407,
use_rslora = False, # We support rank stabilized LoRA
loftq_config = None, # And LoftQ
)
print("✅ LoRA configuration applied successfully!")
print(f" 🎯 LoRA rank (r): 16")
print(f" 📊 LoRA alpha: 32")
print(f" 🔍 Vision layers: Enabled")
print(f" 💬 Language layers: Enabled")
except Exception as e:
print(f"❌ Failed to apply LoRA configuration: {e}")
raise
print("\n" + "=" * 80)
print("=== TRAINING SETUP ===".center(80))
print("=" * 80 + "\n")
print("🏋️ Preparing trainer...")
FastVisionModel.for_training(model) # Enable for training!
try:
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
data_collator = UnslothVisionDataCollator(model, tokenizer),
train_dataset = train_dataset,
args = SFTConfig(
# per_device_train_batch_size = 4,
# gradient_accumulation_steps = 8,
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
gradient_checkpointing = True,
gradient_checkpointing_kwargs = {
"use_reentrant": False
}, # use reentrant checkpointing
max_grad_norm = 0.3, # max gradient norm based on QLoRA paper
warmup_ratio = 0.03,
# num_train_epochs = 2, # Set this instead of max_steps for full training runs
max_steps = 10,
learning_rate = 2e-4,
fp16 = not is_bf16_supported(),
bf16 = is_bf16_supported(),
logging_steps = 5,
save_strategy = "epoch",
optim = "adamw_torch_fused",
weight_decay = 0.01,
lr_scheduler_type = "linear",
seed = 3407,
output_dir = "checkpoints",
report_to = "none", # For Weights and Biases
# You MUST put the below items for vision finetuning:
remove_unused_columns = False,
dataset_text_field = "",
dataset_kwargs = {"skip_prepare_dataset": True},
dataset_num_proc = 4,
max_seq_length = 2048,
),
)
print("✅ Trainer setup completed!")
print(f" 📦 Batch size: 2")
print(f" 🔄 Gradient accumulation steps: 4")
print(f" 📈 Max training steps: 10")
print(f" 🎯 Learning rate: 2e-4")
print(f" 💾 Precision: {'BF16' if is_bf16_supported() else 'FP16'}")
except Exception as e:
print(f"❌ Failed to setup trainer: {e}")
raise
print("\n" + "=" * 80)
print("=== STARTING TRAINING ===".center(80))
print("=" * 80 + "\n")
# run training
try:
print("🚀 Starting training process...")
trainer_stats = trainer.train()
except Exception as e:
print(f"❌ Training failed: {e}")
raise
print("\n" + "=" * 80)
print("=== SAVING MODEL ===".center(80))
print("=" * 80 + "\n")
print("💾 Saving adapter model and tokenizer locally...")
try:
model.save_pretrained("unsloth-qwen2-7vl-french-ocr-adapter", tokenizer)
tokenizer.save_pretrained("unsloth-qwen2-7vl-french-ocr-adapter")
print("✅ Model saved locally!")
except Exception as e:
print(f"❌ Failed to save model locally: {e}")
raise
hf_username = os.environ.get("HF_USER", "")
if not hf_username:
hf_username = input("Please enter your Hugging Face username: ").strip()
os.environ["HF_USER"] = hf_username
hf_token = os.environ.get("HF_TOKEN", "")
if not hf_token:
hf_token = input("Please enter your Hugging Face token: ").strip()
os.environ["HF_TOKEN"] = hf_token
repo_name = f"{hf_username}/qwen2-7b-ocr-merged"
success = {
"upload": False,
"safetensors_check": False,
"download": False,
}
# Stage 1: Upload model to Hub
try:
print("\n" + "=" * 80)
print("=== UPLOADING MODEL TO HUB ===".center(80))
print("=" * 80 + "\n")
print(f"🚀 Uploading to repository: {repo_name}")
model.push_to_hub_merged(repo_name, tokenizer = tokenizer, token = hf_token)
success["upload"] = True
print("✅ Model uploaded successfully!")
except Exception as e:
print(f"❌ Failed to upload model: {e}")
raise Exception("Model upload failed.")
# Stage 2: Verify safetensors.index.json exists
try:
print("\n" + "=" * 80)
print("=== VERIFYING REPO CONTENTS ===".center(80))
print("=" * 80 + "\n")
fs = HfFileSystem(token = hf_token)
file_list = fs.ls(repo_name, detail = True)
safetensors_found = any(
file["name"].endswith("model.safetensors.index.json") for file in file_list
)
if safetensors_found:
success["safetensors_check"] = True
print("✅ model.safetensors.index.json found in repo!")
else:
raise Exception("model.safetensors.index.json not found in repo.")
except Exception as e:
print(f"❌ Verification failed: {e}")
raise Exception("Repo verification failed.")
# test downloading model even if cached
safe_remove_directory(f"./{hf_username}")
try:
print("\n" + "=" * 80)
print("=== TESTING MODEL DOWNLOAD ===".center(80))
print("=" * 80 + "\n")
print("📥 Testing model download...")
# Force download even if cached
test_model, test_tokenizer = FastVisionModel.from_pretrained(repo_name)
success["download"] = True
print("✅ Model downloaded successfully!")
# Clean up test model
del test_model, test_tokenizer
torch.cuda.empty_cache()
except Exception as e:
print(f"❌ Download failed: {e}")
raise Exception("Model download failed.")
# Final report
print("\n" + "=" * 80)
print("=== VALIDATION REPORT ===".center(80))
print("=" * 80 + "\n")
for stage, passed in success.items():
status = "✅" if passed else "❌"
print(f"{status} {stage.replace('_', ' ').title()}")
print("\n" + "=" * 80)
if all(success.values()):
print("\n🎉 All stages completed successfully!")
print(f"🌐 Your model is available at: https://huggingface.co/{repo_name}")
else:
raise Exception("Validation failed for one or more stages.")
# Final cleanup
print("\n🧹 Cleaning up temporary files...")
safe_remove_directory("./checkpoints")
safe_remove_directory("./unsloth_compiled_cache")
safe_remove_directory("./unsloth-qwen2-7vl-french-ocr-adapter")
print("\n🎯 Pipeline completed successfully!")
print("=" * 80)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/vision_models/test_save_merge_vision_model_ocr_benchmark.py | tests/saving/vision_models/test_save_merge_vision_model_ocr_benchmark.py | # -*- coding: utf-8 -*-
from unsloth import FastVisionModel
import torch
from qwen_vl_utils import process_vision_info
import os
from datasets import load_dataset
from trl import SFTTrainer, SFTConfig
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
from tests.utils.ocr_eval import OCRModelEvaluator
## Dataset Preparation
from datasets import load_dataset
dataset = load_dataset("lbourdois/OCR-liboaccn-OPUS-MIT-5M-clean", "en", split = "train")
# To select the first 2000 examples
train_dataset = dataset.select(range(2000))
# To select the next 200 examples for evaluation
eval_dataset = dataset.select(range(2000, 2200))
# Convert dataset to OAI messages
def format_data(sample):
return {
"messages": [
{
"role": "system",
"content": [{"type": "text", "text": system_message}],
},
{
"role": "user",
"content": [
{
"type": "text",
"text": sample["question"],
},
{
"type": "image",
"image": sample["image"],
},
],
},
{
"role": "assistant",
"content": [{"type": "text", "text": sample["answer"]}],
},
],
}
system_message = "You are an expert french ocr system."
# Convert dataset to OAI messages
# need to use list comprehension to keep Pil.Image type, .mape convert image to bytes
train_dataset = [format_data(sample) for sample in train_dataset]
eval_dataset = [format_data(sample) for sample in eval_dataset]
## Setup OCR main evaluation function and helpers
import os
import torch
from tqdm import tqdm
import pandas as pd
from jiwer import wer, cer
from qwen_vl_utils import process_vision_info
#
ocr_evaluator = OCRModelEvaluator()
model_comparison_results = {}
## Finetuning Setup and Run
# Load Base Model
model, tokenizer = FastVisionModel.from_pretrained(
model_name = "unsloth/Qwen2-VL-7B-Instruct",
max_seq_length = 2048, # Choose any for long context!
load_in_4bit = True, # 4 bit quantization to reduce memory
load_in_8bit = False, # [NEW!] A bit more accurate, uses 2x memory
full_finetuning = False, # [NEW!] We have full finetuning now!
)
# benchmark base model performance
model_name = "Unsloth Base model"
FastVisionModel.for_inference(model)
avg_wer, avg_cer = ocr_evaluator.evaluate_model(
model, tokenizer, eval_dataset, output_dir = "unsloth_base_model_results"
)
ocr_evaluator.add_to_comparison(model_name, avg_wer, avg_cer)
## Lora Finetuning
model = FastVisionModel.get_peft_model(
model,
finetune_vision_layers = True, # Turn off for just text!
finetune_language_layers = True, # Should leave on!
finetune_attention_modules = True, # Attention good for GRPO
finetune_mlp_modules = True, # SHould leave on always!
r = 16, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
# target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
# "gate_proj", "up_proj", "down_proj",],
lora_alpha = 32,
lora_dropout = 0, # Supports any, but = 0 is optimized
bias = "none", # Supports any, but = "none" is optimized
# [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
random_state = 3407,
use_rslora = False, # We support rank stabilized LoRA
loftq_config = None, # And LoftQ
)
from unsloth import is_bf16_supported
from unsloth.trainer import UnslothVisionDataCollator
FastVisionModel.for_training(model) # Enable for training!
model.config.use_cache = False
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
data_collator = UnslothVisionDataCollator(model, tokenizer),
train_dataset = train_dataset,
args = SFTConfig(
# per_device_train_batch_size = 4,
# gradient_accumulation_steps = 8,
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
gradient_checkpointing = True,
gradient_checkpointing_kwargs = {
"use_reentrant": False
}, # use reentrant checkpointing
max_grad_norm = 0.3, # max gradient norm based on QLoRA paper
warmup_ratio = 0.03,
# num_train_epochs = 2, # Set this instead of max_steps for full training runs
max_steps = 60,
learning_rate = 2e-4,
fp16 = not is_bf16_supported(),
bf16 = is_bf16_supported(),
logging_steps = 5,
save_strategy = "epoch",
optim = "adamw_torch_fused",
weight_decay = 0.01,
lr_scheduler_type = "linear",
seed = 3407,
output_dir = "unsloth-qwen2-7vl-french-ocr-checkpoints",
report_to = "none", # For Weights and Biases
# You MUST put the below items for vision finetuning:
remove_unused_columns = False,
dataset_text_field = "",
dataset_kwargs = {"skip_prepare_dataset": True},
dataset_num_proc = 4,
max_seq_length = 2048,
),
)
# run training
trainer_stats = trainer.train()
model.save_pretrained("unsloth-qwen2-7vl-french-ocr-adapter", tokenizer)
tokenizer.save_pretrained("unsloth-qwen2-7vl-french-ocr-adapter")
## Measure Adapter Performance
# benchmark lora model performance
model_name = "Unsloth lora adapter model"
FastVisionModel.for_inference(model)
avg_wer, avg_cer = ocr_evaluator.evaluate_model(
model, tokenizer, eval_dataset, output_dir = "unsloth_lora_model_results"
)
ocr_evaluator.add_to_comparison(model_name, avg_wer, avg_cer)
## Merge Model
def find_lora_base_model(model_to_inspect):
current = model_to_inspect
if hasattr(current, "base_model"):
current = current.base_model
if hasattr(current, "model"):
current = current.model
return current
base = find_lora_base_model(model)
print((base.__class__.__name__))
# merge default 16 bits
model.save_pretrained_merged(
save_directory = "qwen2-ocr-merged-finetune-merge-16bit", tokenizer = tokenizer
)
## Benchmark merged model performance
### 16 bits merged model
model, tokenizer = FastVisionModel.from_pretrained(
"./qwen2-ocr-merged-finetune-merge-16bit", load_in_4bit = False, load_in_8bit = False
)
# benchmark 4bit loaded, 16bits merged model performance
model_name = "Unsloth 16bits-merged model load-16bits"
model.config.use_cache = True
avg_wer, avg_cer = ocr_evaluator.evaluate_model(
model,
tokenizer,
eval_dataset,
output_dir = "unsloth_16bits_merged_model_load_16bits_results",
)
ocr_evaluator.add_to_comparison(model_name, avg_wer, avg_cer)
# load 16bits-merged model in 4 bits
model, tokenizer = FastVisionModel.from_pretrained(
"./qwen2-ocr-merged-finetune-merge-16bit", load_in_4bit = True, load_in_8bit = False
)
# benchmark 4bit loaded, 16bits merged model performance
model_name = "Unsloth 16bits-merged model load-4bits"
model.config.use_cache = True
avg_wer, avg_cer = ocr_evaluator.evaluate_model(
model,
tokenizer,
eval_dataset,
output_dir = "unsloth_16bits_merged_model_load_4bits_results",
)
ocr_evaluator.add_to_comparison(model_name, avg_wer, avg_cer)
# load model in 8 bits
model, tokenizer = FastVisionModel.from_pretrained(
"./qwen2-ocr-merged-finetune-merge-16bit", load_in_4bit = False, load_in_8bit = True
)
# benchmark 4bit loaded, 16bits merged model performance
model_name = "Unsloth 16bits-merged model load-8bits"
avg_wer, avg_cer = ocr_evaluator.evaluate_model(
model,
tokenizer,
eval_dataset,
output_dir = "unsloth_16bits_merged_model_load_8bits_results",
)
ocr_evaluator.add_to_comparison(model_name, avg_wer, avg_cer)
# """### 4 bits merged model"""
#
# # load 4bits-merged model in 4 bits
# model, tokenizer = FastVisionModel.from_pretrained("./qwen2-ocr-merged-finetune-merge-4bit",load_in_4bit=True, load_in_8bit=False)
#
# # benchmark 4bit loaded, 4bits merged model performance
# model_name = "Unsloth 4bits-merged model load-4bits"
#
# avg_wer, avg_cer = ocr_evaluator.evaluate_model(model, tokenizer, eval_dataset, output_dir="unsloth_4bits_merged_model_load_4bits_results")
# ocr_evaluator.add_to_comparison(model_name, avg_wer, avg_cer)
#
# # load model in 8 bits
# model, tokenizer = FastVisionModel.from_pretrained("./qwen2-ocr-merged-finetune-merge-4bit",load_in_4bit=False, load_in_8bit=True)
#
# # benchmark 8bit loaded, 4bits merged model performance
# model_name = "Unsloth 4bits-merged model load-8bits"
#
# avg_wer, avg_cer = ocr_evaluator.evaluate_model(model, tokenizer, eval_dataset, output_dir="unsloth_4bits_merged_model_load_8bits_results")
# ocr_evaluator.add_to_comparison(model_name, avg_wer, avg_cer)
# Model comparison report
# print model comparison
ocr_evaluator.print_model_comparison()
# Final cleanup
print("\n🧹 Cleaning up temporary files...")
safe_remove_directory("./unsloth-qwen2-7vl-french-ocr-adapter")
safe_remove_directory("./unsloth-qwen2-7vl-french-ocr-checkpoints")
safe_remove_directory("./unsloth_compiled_cache")
safe_remove_directory("./qwen2-ocr-merged-finetune-merge-16bit")
print("\n🎯 Pipeline completed successfully!")
print("=" * 80)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/text_to_speech_models/test_lasa.py | tests/saving/text_to_speech_models/test_lasa.py | from unsloth import FastLanguageModel, FastModel
from transformers import CsmForConditionalGeneration
import torch
# ruff: noqa
import sys
from pathlib import Path
from peft import PeftModel
import warnings
import requests
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
from tests.utils.os_utils import require_package, require_python_package
require_package("ffmpeg", "ffmpeg")
require_python_package("soundfile")
require_python_package("xcodec2")
import soundfile as sf
from xcodec2.modeling_xcodec2 import XCodec2Model
XCODEC2_MODEL_NAME = "HKUST-Audio/xcodec2"
SAMPLE_RATE = 16000
DEVICE = "cuda"
try:
codec_model = XCodec2Model.from_pretrained(XCODEC2_MODEL_NAME)
except Exception as e:
raise f"ERROR loading XCodec2 model: {e}."
codec_model.to("cpu")
print(f"\n{'='*80}")
print("🔍 SECTION 1: Loading Model and LoRA Adapters")
print(f"{'='*80}")
max_seq_length = 2048
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/Llasa-1B",
max_seq_length = max_seq_length,
dtype = None, # Select None for auto detection
load_in_4bit = False, # Choose True for 4bit which reduces memory
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
)
base_model_class = model.__class__.__name__
model = FastLanguageModel.get_peft_model(
model,
r = 128, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
target_modules = ["q_proj", "v_proj"],
lora_alpha = 128,
lora_dropout = 0, # Supports any, but = 0 is optimized
bias = "none", # Supports any, but = "none" is optimized
# [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
random_state = 3407,
use_rslora = False, # We support rank stabilized LoRA
loftq_config = None, # And LoftQ
)
print("✅ Model and LoRA adapters loaded successfully!")
print(f"\n{'='*80}")
print("🔍 SECTION 2: Checking Model Class Type")
print(f"{'='*80}")
assert isinstance(model, PeftModel), "Model should be an instance of PeftModel"
print("✅ Model is an instance of PeftModel!")
print(f"\n{'='*80}")
print("🔍 SECTION 3: Checking Config Model Class Type")
print(f"{'='*80}")
def find_lora_base_model(model_to_inspect):
current = model_to_inspect
if hasattr(current, "base_model"):
current = current.base_model
if hasattr(current, "model"):
current = current.model
return current
config_model = find_lora_base_model(model) if isinstance(model, PeftModel) else model
assert (
config_model.__class__.__name__ == base_model_class
), f"Expected config_model class to be {base_model_class}"
print("✅ config_model returns correct Base Model class:", str(base_model_class))
print(f"\n{'='*80}")
print("🔍 SECTION 4: Saving and Merging Model")
print(f"{'='*80}")
with warnings.catch_warnings():
warnings.simplefilter("error") # Treat warnings as errors
try:
model.save_pretrained_merged("lasa", tokenizer)
print("✅ Model saved and merged successfully without warnings!")
except Exception as e:
assert False, f"Model saving/merging failed with exception: {e}"
print(f"\n{'='*80}")
print("🔍 SECTION 5: Loading Model for Inference")
print(f"{'='*80}")
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "./lasa",
max_seq_length = max_seq_length,
dtype = None, # Select None for auto detection
load_in_4bit = False, # Choose True for 4bit which reduces memory
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
)
# from transformers import AutoProcessor
# processor = AutoProcessor.from_pretrained("unsloth/csm-1b")
print("✅ Model loaded for inference successfully!")
print(f"\n{'='*80}")
print("🔍 SECTION 6: Running Inference")
print(f"{'='*80}")
from transformers import pipeline
import torch
output_audio_path = "lasa_audio.wav"
input_text = "Hey there my name is Elise, <giggles> and I'm a speech generation model that can sound like a person."
FastLanguageModel.for_inference(model)
def ids_to_speech_tokens(speech_ids):
speech_tokens_str = []
for speech_id in speech_ids:
speech_tokens_str.append(f"<|s_{speech_id}|>")
return speech_tokens_str
def extract_speech_ids(speech_tokens_str):
speech_ids = []
for token_str in speech_tokens_str:
if token_str.startswith("<|s_") and token_str.endswith("|>"):
num_str = token_str[4:-2]
num = int(num_str)
speech_ids.append(num)
else:
print(f"Unexpected token: {token_str}")
return speech_ids
# TTS start!
with torch.inference_mode():
with torch.amp.autocast("cuda", dtype = model.dtype):
formatted_text = (
f"<|TEXT_UNDERSTANDING_START|>{input_text}<|TEXT_UNDERSTANDING_END|>"
)
# Tokenize the text
chat = [
{"role": "user", "content": "Convert the text to speech:" + formatted_text},
{"role": "assistant", "content": "<|SPEECH_GENERATION_START|>"},
]
input_ids = tokenizer.apply_chat_template(
chat, tokenize = True, return_tensors = "pt", continue_final_message = True
)
input_ids = input_ids.to("cuda")
speech_end_id = tokenizer.convert_tokens_to_ids("<|SPEECH_GENERATION_END|>")
# Generate the speech autoregressively
outputs = model.generate(
input_ids,
max_length = 2048, # We trained our model with a max length of 2048
eos_token_id = speech_end_id,
do_sample = True,
top_p = 1.2, # Adjusts the diversity of generated content
temperature = 1.2, # Controls randomness in output
)
# Extract the speech tokens
generated_ids = outputs[0][input_ids.shape[1] : -1]
speech_tokens = tokenizer.batch_decode(generated_ids, skip_special_tokens = True)
# Convert token <|s_23456|> to int 23456
speech_tokens = extract_speech_ids(speech_tokens)
speech_tokens = torch.tensor(speech_tokens).cpu().unsqueeze(0).unsqueeze(0)
# Decode the speech tokens to speech waveform
gen_wav = codec_model.decode_code(speech_tokens)
try:
sf.write(output_audio_path, gen_wav[0, 0, :].cpu().numpy(), 16000)
except Exception as e:
assert False, f"Inference failed with exception: {e}"
## assert that transcribed_text contains The birch canoe slid on the smooth planks. Glued the sheet to the dark blue background. It's easy to tell the depth of a well. Four hours of steady work faced us.
print("✅ All sections passed successfully!")
safe_remove_directory("./unsloth_compiled_cache")
safe_remove_directory("./lasa")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/text_to_speech_models/test_whisper.py | tests/saving/text_to_speech_models/test_whisper.py | from unsloth import FastLanguageModel, FastModel
from transformers import WhisperForConditionalGeneration, WhisperProcessor
import torch
# ruff: noqa
import sys
from pathlib import Path
from peft import PeftModel
import warnings
import requests
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
from tests.utils.os_utils import require_package, require_python_package
require_package("ffmpeg", "ffmpeg")
require_python_package("soundfile")
import soundfile as sf
print(f"\n{'='*80}")
print("🔍 SECTION 1: Loading Model and LoRA Adapters")
print(f"{'='*80}")
model, tokenizer = FastModel.from_pretrained(
model_name = "unsloth/whisper-large-v3",
dtype = None, # Leave as None for auto detection
load_in_4bit = False, # Set to True to do 4bit quantization which reduces memory
auto_model = WhisperForConditionalGeneration,
whisper_language = "English",
whisper_task = "transcribe",
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
)
base_model_class = model.__class__.__name__
# https://github.com/huggingface/transformers/issues/37172
model.generation_config.input_ids = model.generation_config.forced_decoder_ids
model.generation_config.forced_decoder_ids = None
model = FastModel.get_peft_model(
model,
r = 64, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
target_modules = ["q_proj", "v_proj"],
lora_alpha = 64,
lora_dropout = 0, # Supports any, but = 0 is optimized
bias = "none", # Supports any, but = "none" is optimized
# [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
random_state = 3407,
use_rslora = False, # We support rank stabilized LoRA
loftq_config = None, # And LoftQ
task_type = None, # ** MUST set this for Whisper **
)
print("✅ Model and LoRA adapters loaded successfully!")
print(f"\n{'='*80}")
print("🔍 SECTION 2: Checking Model Class Type")
print(f"{'='*80}")
assert isinstance(model, PeftModel), "Model should be an instance of PeftModel"
print("✅ Model is an instance of PeftModel!")
print(f"\n{'='*80}")
print("🔍 SECTION 3: Checking Config Model Class Type")
print(f"{'='*80}")
def find_lora_base_model(model_to_inspect):
current = model_to_inspect
if hasattr(current, "base_model"):
current = current.base_model
if hasattr(current, "model"):
current = current.model
return current
config_model = find_lora_base_model(model) if isinstance(model, PeftModel) else model
assert (
config_model.__class__.__name__ == base_model_class
), f"Expected config_model class to be {base_model_class}"
print("✅ config_model returns correct Base Model class:", str(base_model_class))
print(f"\n{'='*80}")
print("🔍 SECTION 4: Saving and Merging Model")
print(f"{'='*80}")
with warnings.catch_warnings():
warnings.simplefilter("error") # Treat warnings as errors
try:
model.save_pretrained_merged("whisper", tokenizer)
print("✅ Model saved and merged successfully without warnings!")
except Exception as e:
assert False, f"Model saving/merging failed with exception: {e}"
print(f"\n{'='*80}")
print("🔍 SECTION 5: Loading Model for Inference")
print(f"{'='*80}")
model, tokenizer = FastModel.from_pretrained(
model_name = "./whisper",
dtype = None, # Leave as None for auto detection
load_in_4bit = False, # Set to True to do 4bit quantization which reduces memory
auto_model = WhisperForConditionalGeneration,
whisper_language = "English",
whisper_task = "transcribe",
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
)
# model = WhisperForConditionalGeneration.from_pretrained("./whisper")
# processor = WhisperProcessor.from_pretrained("./whisper")
print("✅ Model loaded for inference successfully!")
print(f"\n{'='*80}")
print("🔍 SECTION 6: Downloading Sample Audio File")
print(f"{'='*80}")
audio_url = "https://upload.wikimedia.org/wikipedia/commons/5/5b/Speech_12dB_s16.flac"
audio_file = "Speech_12dB_s16.flac"
try:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
response = requests.get(audio_url, headers = headers)
response.raise_for_status()
with open(audio_file, "wb") as f:
f.write(response.content)
print("✅ Audio file downloaded successfully!")
except Exception as e:
assert False, f"Failed to download audio file: {e}"
print(f"\n{'='*80}")
print("🔍 SECTION 7: Running Inference")
print(f"{'='*80}")
from transformers import pipeline
import torch
FastModel.for_inference(model)
model.eval()
# Create pipeline without specifying the device
whisper = pipeline(
"automatic-speech-recognition",
model = model,
tokenizer = tokenizer.tokenizer,
feature_extractor = tokenizer.feature_extractor,
processor = tokenizer,
return_language = True,
torch_dtype = torch.float16, # Remove the device parameter
)
# Example usage
audio_file = "Speech_12dB_s16.flac"
transcribed_text = whisper(audio_file)
# audio, sr = sf.read(audio_file)
# input_features = processor(audio, return_tensors="pt").input_features
# transcribed_text = model.generate(input_features=input_features)
print(f"📝 Transcribed Text: {transcribed_text['text']}")
## assert that transcribed_text contains The birch canoe slid on the smooth planks. Glued the sheet to the dark blue background. It's easy to tell the depth of a well. Four hours of steady work faced us.
expected_phrases = [
"birch canoe slid on the smooth planks",
"sheet to the dark blue background",
"easy to tell the depth of a well",
"Four hours of steady work faced us",
]
transcribed_lower = transcribed_text["text"].lower()
all_phrases_found = all(
phrase.lower() in transcribed_lower for phrase in expected_phrases
)
assert (
all_phrases_found
), f"Expected phrases not found in transcription: {transcribed_text['text']}"
print("✅ Transcription contains all expected phrases!")
safe_remove_directory("./unsloth_compiled_cache")
safe_remove_directory("./whisper")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/text_to_speech_models/test_csm.py | tests/saving/text_to_speech_models/test_csm.py | from unsloth import FastLanguageModel, FastModel
from transformers import CsmForConditionalGeneration
import torch
# ruff: noqa
import sys
from pathlib import Path
from peft import PeftModel
import warnings
import requests
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
from tests.utils.os_utils import require_package, require_python_package
require_package("ffmpeg", "ffmpeg")
require_python_package("soundfile")
import soundfile as sf
print(f"\n{'='*80}")
print("🔍 SECTION 1: Loading Model and LoRA Adapters")
print(f"{'='*80}")
model, tokenizer = FastModel.from_pretrained(
model_name = "unsloth/csm-1b",
max_seq_length = 2048, # Choose any for long context!
dtype = None, # Leave as None for auto-detection
auto_model = CsmForConditionalGeneration,
load_in_4bit = False, # Select True for 4bit - reduces memory usage
)
base_model_class = model.__class__.__name__
model = FastModel.get_peft_model(
model,
r = 32, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
target_modules = [
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj",
],
lora_alpha = 32,
lora_dropout = 0, # Supports any, but = 0 is optimized
bias = "none", # Supports any, but = "none" is optimized
# [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
random_state = 3407,
use_rslora = False, # We support rank stabilized LoRA
loftq_config = None, # And LoftQ
)
print("✅ Model and LoRA adapters loaded successfully!")
print(f"\n{'='*80}")
print("🔍 SECTION 2: Checking Model Class Type")
print(f"{'='*80}")
assert isinstance(model, PeftModel), "Model should be an instance of PeftModel"
print("✅ Model is an instance of PeftModel!")
print(f"\n{'='*80}")
print("🔍 SECTION 3: Checking Config Model Class Type")
print(f"{'='*80}")
def find_lora_base_model(model_to_inspect):
current = model_to_inspect
if hasattr(current, "base_model"):
current = current.base_model
if hasattr(current, "model"):
current = current.model
return current
config_model = find_lora_base_model(model) if isinstance(model, PeftModel) else model
assert (
config_model.__class__.__name__ == base_model_class
), f"Expected config_model class to be {base_model_class}"
print("✅ config_model returns correct Base Model class:", str(base_model_class))
print(f"\n{'='*80}")
print("🔍 SECTION 4: Saving and Merging Model")
print(f"{'='*80}")
with warnings.catch_warnings():
warnings.simplefilter("error") # Treat warnings as errors
try:
model.save_pretrained_merged("csm", tokenizer)
print("✅ Model saved and merged successfully without warnings!")
except Exception as e:
assert False, f"Model saving/merging failed with exception: {e}"
print(f"\n{'='*80}")
print("🔍 SECTION 5: Loading Model for Inference")
print(f"{'='*80}")
model, processor = FastModel.from_pretrained(
model_name = "./csm",
max_seq_length = 2048, # Choose any for long context!
dtype = None, # Leave as None for auto-detection
auto_model = CsmForConditionalGeneration,
load_in_4bit = False, # Select True for 4bit - reduces memory usage
)
from transformers import AutoProcessor
processor = AutoProcessor.from_pretrained("unsloth/csm-1b")
print("✅ Model loaded for inference successfully!")
print(f"\n{'='*80}")
print("🔍 SECTION 6: Running Inference")
print(f"{'='*80}")
from transformers import pipeline
import torch
output_audio_path = "csm_audio.wav"
try:
text = (
"We just finished fine tuning a text to speech model... and it's pretty good!"
)
speaker_id = 0
inputs = processor(f"[{speaker_id}]{text}", add_special_tokens = True).to("cuda")
audio_values = model.generate(
**inputs,
max_new_tokens = 125, # 125 tokens is 10 seconds of audio, for longer speech increase this
# play with these parameters to get the best results
depth_decoder_temperature = 0.6,
depth_decoder_top_k = 0,
depth_decoder_top_p = 0.9,
temperature = 0.8,
top_k = 50,
top_p = 1.0,
#########################################################
output_audio = True,
)
audio = audio_values[0].to(torch.float32).cpu().numpy()
sf.write("example_without_context.wav", audio, 24000)
print(f"✅ Audio generated and saved to {output_audio_path}!")
except Exception as e:
assert False, f"Inference failed with exception: {e}"
## assert that transcribed_text contains The birch canoe slid on the smooth planks. Glued the sheet to the dark blue background. It's easy to tell the depth of a well. Four hours of steady work faced us.
print("✅ All sections passed successfully!")
safe_remove_directory("./unsloth_compiled_cache")
safe_remove_directory("./csm")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/text_to_speech_models/test_orpheus.py | tests/saving/text_to_speech_models/test_orpheus.py | from unsloth import FastLanguageModel, FastModel
from transformers import CsmForConditionalGeneration
import torch
# ruff: noqa
import sys
from pathlib import Path
from peft import PeftModel
import warnings
import requests
REPO_ROOT = Path(__file__).parents[3]
sys.path.insert(0, str(REPO_ROOT))
from tests.utils.cleanup_utils import safe_remove_directory
from tests.utils.os_utils import require_package, require_python_package
require_package("ffmpeg", "ffmpeg")
require_python_package("soundfile")
require_python_package("snac")
import soundfile as sf
from snac import SNAC
snac_model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz")
snac_model = snac_model.to("cuda")
print(f"\n{'='*80}")
print("🔍 SECTION 1: Loading Model and LoRA Adapters")
print(f"{'='*80}")
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/orpheus-3b-0.1-ft",
max_seq_length = 2048, # Choose any for long context!
dtype = None, # Select None for auto detection
load_in_4bit = False, # Select True for 4bit which reduces memory usage
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
)
base_model_class = model.__class__.__name__
model = FastLanguageModel.get_peft_model(
model,
r = 64, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
target_modules = [
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj",
],
lora_alpha = 64,
lora_dropout = 0, # Supports any, but = 0 is optimized
bias = "none", # Supports any, but = "none" is optimized
# [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
random_state = 3407,
use_rslora = False, # We support rank stabilized LoRA
loftq_config = None, # And LoftQ
)
print("✅ Model and LoRA adapters loaded successfully!")
print(f"\n{'='*80}")
print("🔍 SECTION 2: Checking Model Class Type")
print(f"{'='*80}")
assert isinstance(model, PeftModel), "Model should be an instance of PeftModel"
print("✅ Model is an instance of PeftModel!")
print(f"\n{'='*80}")
print("🔍 SECTION 3: Checking Config Model Class Type")
print(f"{'='*80}")
def find_lora_base_model(model_to_inspect):
current = model_to_inspect
if hasattr(current, "base_model"):
current = current.base_model
if hasattr(current, "model"):
current = current.model
return current
config_model = find_lora_base_model(model) if isinstance(model, PeftModel) else model
assert (
config_model.__class__.__name__ == base_model_class
), f"Expected config_model class to be {base_model_class}"
print("✅ config_model returns correct Base Model class:", str(base_model_class))
print(f"\n{'='*80}")
print("🔍 SECTION 4: Saving and Merging Model")
print(f"{'='*80}")
with warnings.catch_warnings():
warnings.simplefilter("error") # Treat warnings as errors
try:
model.save_pretrained_merged("orpheus", tokenizer)
print("✅ Model saved and merged successfully without warnings!")
except Exception as e:
assert False, f"Model saving/merging failed with exception: {e}"
print(f"\n{'='*80}")
print("🔍 SECTION 5: Loading Model for Inference")
print(f"{'='*80}")
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/orpheus-3b-0.1-ft",
max_seq_length = 2048, # Choose any for long context!
dtype = None, # Select None for auto detection
load_in_4bit = False, # Select True for 4bit which reduces memory usage
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
)
# from transformers import AutoProcessor
# processor = AutoProcessor.from_pretrained("unsloth/csm-1b")
print("✅ Model loaded for inference successfully!")
print(f"\n{'='*80}")
print("🔍 SECTION 6: Running Inference")
print(f"{'='*80}")
# @title Run Inference
FastLanguageModel.for_inference(model) # Enable native 2x faster inference
# Moving snac_model cuda to cpu
snac_model.to("cpu")
prompts = [
"Hey there my name is Elise, <giggles> and I'm a speech generation model that can sound like a person.",
]
chosen_voice = None # None for single-speaker
prompts_ = [(f"{chosen_voice}: " + p) if chosen_voice else p for p in prompts]
all_input_ids = []
for prompt in prompts_:
input_ids = tokenizer(prompt, return_tensors = "pt").input_ids
all_input_ids.append(input_ids)
start_token = torch.tensor([[128259]], dtype = torch.int64) # Start of human
end_tokens = torch.tensor(
[[128009, 128260]], dtype = torch.int64
) # End of text, End of human
all_modified_input_ids = []
for input_ids in all_input_ids:
modified_input_ids = torch.cat(
[start_token, input_ids, end_tokens], dim = 1
) # SOH SOT Text EOT EOH
all_modified_input_ids.append(modified_input_ids)
all_padded_tensors = []
all_attention_masks = []
max_length = max(
[modified_input_ids.shape[1] for modified_input_ids in all_modified_input_ids]
)
for modified_input_ids in all_modified_input_ids:
padding = max_length - modified_input_ids.shape[1]
padded_tensor = torch.cat(
[torch.full((1, padding), 128263, dtype = torch.int64), modified_input_ids], dim = 1
)
attention_mask = torch.cat(
[
torch.zeros((1, padding), dtype = torch.int64),
torch.ones((1, modified_input_ids.shape[1]), dtype = torch.int64),
],
dim = 1,
)
all_padded_tensors.append(padded_tensor)
all_attention_masks.append(attention_mask)
all_padded_tensors = torch.cat(all_padded_tensors, dim = 0)
all_attention_masks = torch.cat(all_attention_masks, dim = 0)
input_ids = all_padded_tensors.to("cuda")
attention_mask = all_attention_masks.to("cuda")
generated_ids = model.generate(
input_ids = input_ids,
attention_mask = attention_mask,
max_new_tokens = 1200,
do_sample = True,
temperature = 0.6,
top_p = 0.95,
repetition_penalty = 1.1,
num_return_sequences = 1,
eos_token_id = 128258,
use_cache = True,
)
token_to_find = 128257
token_to_remove = 128258
token_indices = (generated_ids == token_to_find).nonzero(as_tuple = True)
if len(token_indices[1]) > 0:
last_occurrence_idx = token_indices[1][-1].item()
cropped_tensor = generated_ids[:, last_occurrence_idx + 1 :]
else:
cropped_tensor = generated_ids
mask = cropped_tensor != token_to_remove
processed_rows = []
for row in cropped_tensor:
masked_row = row[row != token_to_remove]
processed_rows.append(masked_row)
code_lists = []
for row in processed_rows:
row_length = row.size(0)
new_length = (row_length // 7) * 7
trimmed_row = row[:new_length]
trimmed_row = [t - 128266 for t in trimmed_row]
code_lists.append(trimmed_row)
def redistribute_codes(code_list):
layer_1 = []
layer_2 = []
layer_3 = []
for i in range((len(code_list) + 1) // 7):
layer_1.append(code_list[7 * i])
layer_2.append(code_list[7 * i + 1] - 4096)
layer_3.append(code_list[7 * i + 2] - (2 * 4096))
layer_3.append(code_list[7 * i + 3] - (3 * 4096))
layer_2.append(code_list[7 * i + 4] - (4 * 4096))
layer_3.append(code_list[7 * i + 5] - (5 * 4096))
layer_3.append(code_list[7 * i + 6] - (6 * 4096))
codes = [
torch.tensor(layer_1).unsqueeze(0),
torch.tensor(layer_2).unsqueeze(0),
torch.tensor(layer_3).unsqueeze(0),
]
# codes = [c.to("cuda") for c in codes]
audio_hat = snac_model.decode(codes)
return audio_hat
my_samples = []
for code_list in code_lists:
samples = redistribute_codes(code_list)
my_samples.append(samples)
output_path = "orpheus_audio.wav"
try:
for i, samples in enumerate(my_samples):
audio_data = samples.detach().squeeze().cpu().numpy()
import soundfile as sf
sf.write(output_path, audio_data, 24000) # Explicitly pass sample rate
print(f"✅ Audio saved to {output_path}!")
except Exception as e:
assert False, f"Inference failed with exception: {e}"
# Verify the file exists
import os
assert os.path.exists(output_path), f"Audio file not found at {output_path}"
print("✅ Audio file exists on disk!")
del my_samples, samples
## assert that transcribed_text contains The birch canoe slid on the smooth planks. Glued the sheet to the dark blue background. It's easy to tell the depth of a well. Four hours of steady work faced us.
print("✅ All sections passed successfully!")
safe_remove_directory("./unsloth_compiled_cache")
safe_remove_directory("./orpheus")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/gpt-oss-merge/test_merged_model.py | tests/saving/gpt-oss-merge/test_merged_model.py | # inference_on_merged.py
from unsloth import FastLanguageModel
from transformers import TextStreamer
import torch
import gc
import os
import shutil
def safe_remove_directory(path):
try:
if os.path.exists(path) and os.path.isdir(path):
shutil.rmtree(path)
return True
else:
print(f"Path {path} is not a valid directory")
return False
except Exception as e:
print(f"Failed to remove directory {path}: {e}")
return False
print("🔥 Loading the 16-bit merged model from disk...")
merged_model, merged_tokenizer = FastLanguageModel.from_pretrained(
model_name = "./gpt-oss-finetuned-merged",
max_seq_length = 1024,
load_in_4bit = True,
load_in_8bit = False,
)
print("✅ Merged model loaded successfully.")
# --- Run Inference ---
print("\n🚀 Running inference...")
messages = [
{"role": "user", "content": "Solve x^5 + 3x^4 - 10 = 3."},
]
inputs = merged_tokenizer.apply_chat_template(
messages,
add_generation_prompt = True,
return_tensors = "pt",
return_dict = True,
reasoning_effort = "low", # **NEW!** Set reasoning effort to low, medium or high
).to(merged_model.device)
_ = merged_model.generate(
**inputs, max_new_tokens = 512, streamer = TextStreamer(merged_tokenizer)
)
print("\n✅ Inference complete.")
# --- Final Cleanup ---
print("\n🧹 Cleaning up merged model directory and cache...")
del merged_model, merged_tokenizer
torch.cuda.empty_cache()
gc.collect()
safe_remove_directory("./gpt-oss-finetuned-merged")
safe_remove_directory(
"./unsloth_compiled_cache"
) # Clean up cache created by this process
print("✅ Final cleanup complete. Exiting inference script.")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/saving/gpt-oss-merge/train_and_merge.py | tests/saving/gpt-oss-merge/train_and_merge.py | # train_and_merge.py
from unsloth import FastLanguageModel
from trl import SFTTrainer, SFTConfig
from datasets import load_dataset
import torch
import gc
import os
import shutil
def safe_remove_directory(path):
try:
if os.path.exists(path) and os.path.isdir(path):
shutil.rmtree(path)
return True
else:
print(f"Path {path} is not a valid directory")
return False
except Exception as e:
print(f"Failed to remove directory {path}: {e}")
return False
# This tokenizer will be used by the mapping function
tokenizer = None
def formatting_prompts_func(examples):
convos = examples["messages"]
texts = [
tokenizer.apply_chat_template(
convo, tokenize = False, add_generation_prompt = False
)
for convo in convos
]
return {"text": texts}
# --- Load 4-bit Model and Train ---
print("Loading 4-bit Mxfp4 gpt-oss model for training...")
max_seq_length = 1024
model, tokenizer = FastLanguageModel.from_pretrained(
"unsloth/gpt-oss-20b", max_seq_length = max_seq_length, load_in_4bit = True
)
dataset = load_dataset("HuggingFaceH4/Multilingual-Thinking", split = "train[:50]").map(
formatting_prompts_func, batched = True
)
model = FastLanguageModel.get_peft_model(
model,
r = 8,
target_modules = [
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj",
],
lora_alpha = 16,
use_gradient_checkpointing = "unsloth",
random_state = 3407,
)
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = dataset,
args = SFTConfig(
per_device_train_batch_size = 1,
gradient_accumulation_steps = 4,
max_steps = 10,
learning_rate = 2e-4,
output_dir = "outputs",
report_to = "none",
),
)
print("Starting fine-tuning...")
trainer.train()
print("Fine-tuning complete.")
# --- Merge and Save ---
print("\n💾 Merging and saving the 16-bit model to './gpt-oss-finetuned-merged'...")
model.save_pretrained_merged(
save_directory = "./gpt-oss-finetuned-merged", tokenizer = tokenizer
)
print("✅ Model merged and saved.")
# --- Cleanup ---
print("\n🧹 Cleaning up training artifacts...")
del model, trainer, tokenizer, dataset
torch.cuda.empty_cache()
gc.collect()
safe_remove_directory("./outputs")
safe_remove_directory(
"./unsloth_compiled_cache"
) # Clean up the cache created by this process
print("✅ Cleanup complete. Exiting training script.")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/utils/data_utils.py | tests/utils/data_utils.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from datasets import Dataset
QUESTION = "What day was I born?"
ANSWER = "January 1, 2058"
USER_MESSAGE = {"role": "user", "content": QUESTION}
ASSISTANT_MESSAGE = {"role": "assistant", "content": ANSWER}
DTYPE = torch.bfloat16
DEFAULT_MESSAGES = [[USER_MESSAGE, ASSISTANT_MESSAGE]]
def create_instruction_dataset(messages: list[dict] = DEFAULT_MESSAGES):
dataset = Dataset.from_dict({"messages": messages})
return dataset
def create_dataset(tokenizer, num_examples: int = None, messages: list[dict] = None):
dataset = create_instruction_dataset(messages)
def _apply_chat_template(example):
chat = tokenizer.apply_chat_template(example["messages"], tokenize = False)
return {"text": chat}
dataset = dataset.map(_apply_chat_template, remove_columns = "messages")
if num_examples is not None:
if len(dataset) < num_examples:
num_repeats = num_examples // len(dataset) + 1
dataset = dataset.repeat(num_repeats)
dataset = dataset.select(range(num_examples))
return dataset
def describe_param(
param: torch.Tensor,
include_l1: bool = False,
include_l2: bool = False,
include_infinity: bool = False,
as_str: bool = True,
) -> dict:
"""
Provide a statistical summary of a 2D weight matrix or tensor.
If as_str is True, the summary is returned as a formatted string.
Parameters:
param: torch.Tensor
include_l1 (bool): Whether to include the L1 norm (sum of absolute values).
include_l2 (bool): Whether to include the L2 norm (Frobenius norm).
include_infinity (bool): Whether to include the infinity norm (max absolute value).
as_str (bool): Whether to return the summary as a formatted string.
Returns:
dict: A dictionary with the following statistics:
- shape: Dimensions of the matrix.
- mean: Average value.
- median: Median value.
- std: Standard deviation.
- min: Minimum value.
- max: Maximum value.
- percentile_25: 25th percentile.
- percentile_75: 75th percentile.
Additionally, if enabled:
- L1_norm: Sum of absolute values.
- L2_norm: Euclidean (Frobenius) norm.
- infinity_norm: Maximum absolute value.
"""
param = param.float()
summary = {
"shape": param.shape,
"mean": param.mean().cpu().item(),
"std": param.std().cpu().item(),
"min": param.min().cpu().item(),
"max": param.max().cpu().item(),
"percentile_25": param.quantile(0.25).cpu().item(),
"percentile_50": param.quantile(0.5).cpu().item(),
"percentile_75": param.quantile(0.75).cpu().item(),
}
if include_l1:
summary["L1_norm"] = param.abs().sum().cpu().item()
if include_l2:
summary["L2_norm"] = param.norm().cpu().item()
if include_infinity:
summary["infinity_norm"] = param.abs().max().cpu().item()
return format_summary(summary) if as_str else summary
def format_summary(stats: dict, precision: int = 6) -> str:
"""
Format the statistical summary dictionary for printing.
Parameters:
stats (dict): The dictionary returned by describe_param.
precision (int): Number of decimal places for floating point numbers.
Returns:
str: A formatted string representing the summary.
"""
lines = []
for key, value in stats.items():
if isinstance(value, float):
formatted_value = f"{value:.{precision}f}"
elif isinstance(value, (tuple, list)):
# Format each element in tuples or lists (e.g., the shape)
formatted_value = ", ".join(str(v) for v in value)
formatted_value = (
f"({formatted_value})"
if isinstance(value, tuple)
else f"[{formatted_value}]"
)
else:
formatted_value = str(value)
lines.append(f"{key}: {formatted_value}")
return "\n".join(lines)
def get_peft_weights(model):
# ruff: noqa
is_lora_weight = lambda name: any(s in name for s in ["lora_A", "lora_B"])
return {
name: param for name, param in model.named_parameters() if is_lora_weight(name)
}
def describe_peft_weights(model):
for name, param in get_peft_weights(model).items():
yield name, describe_param(param, as_str = True)
def check_responses(responses: list[str], answer: str, prompt: str = None) -> bool:
for i, response in enumerate(responses, start = 1):
if answer in response:
print(f"\u2713 response {i} contains answer")
else:
print(f"\u2717 response {i} does not contain answer")
if prompt is not None:
response = response.replace(prompt, "")
print(f" -> response: {response}")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/utils/perplexity_eval.py | tests/utils/perplexity_eval.py | from tqdm import tqdm
import torch
import pandas as pd
model_comparison_results = {}
# return the perplexity of the model on the dataset
# The perplexity is computed on each example, individually, with a sliding window for examples longer than 512 tokens.
def ppl_model(model, tokenizer, dataset):
nlls = []
max_length = 2048
stride = 512
for s in tqdm(range(len(dataset["text"]))):
encodings = tokenizer(dataset["text"][s], return_tensors = "pt")
seq_len = encodings.input_ids.size(1)
prev_end_loc = 0
for begin_loc in range(0, seq_len, stride):
end_loc = min(begin_loc + max_length, seq_len)
trg_len = end_loc - prev_end_loc
input_ids = encodings.input_ids[:, begin_loc:end_loc].to("cuda")
target_ids = input_ids.clone()
target_ids[:, :-trg_len] = -100
# Create attention mask based on pad token id
pad_token_id = (
tokenizer.pad_token_id if tokenizer.pad_token_id is not None else 0
)
attention_mask = (input_ids != pad_token_id).long()
with torch.no_grad():
outputs = model(
input_ids, labels = target_ids, attention_mask = attention_mask
)
neg_log_likelihood = outputs.loss
nlls.append(neg_log_likelihood)
prev_end_loc = end_loc
if end_loc == seq_len:
break
ppl = torch.exp(torch.stack(nlls).mean())
return ppl
# --------------------------------------------------------------------
## ----------- Reporting helper function ----------- ##
# Create a simple function to add results to the comparison
def add_to_comparison(model_name, ppl):
"""Add model results to the comparison tracker"""
model_comparison_results[model_name] = {"ppl": ppl}
# return model_comparison_results
# Create a function to print the comparison report whenever needed
def print_model_comparison():
"""Print a comparison of all models evaluated so far"""
if not model_comparison_results:
print("No model results available for comparison")
return
print("\n==== MODEL COMPARISON REPORT ====")
# Create a comparison dataframe
comparison_df = pd.DataFrame(
{
"Model": list(model_comparison_results.keys()),
# "Perplexity": [results["ppl"] for results in model_comparison_results.values()],
"Perplexity": [
# Convert tensors to CPU and then to float if needed
results["ppl"].cpu().item()
if torch.is_tensor(results["ppl"])
else results["ppl"]
for results in model_comparison_results.values()
],
}
)
# Display the comparison table
print("\nComparison Table:")
print(comparison_df.to_string(index = False))
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/utils/test_qat.py | tests/utils/test_qat.py | from unsloth import FastLanguageModel
from typing import Dict
import pytest
import torch
from torchao.quantization.qat import FakeQuantizedLinear
from torchao.quantization.qat.fake_quantizer import (
FakeQuantizerBase,
Float8FakeQuantizer,
Int4WeightPreshuffledFakeQuantizer,
)
class _CountingFakeQuantizer(torch.nn.Module):
"""
Dummy fake quantizer that counts the number of times it has been called.
"""
def __init__(self):
super().__init__()
self.count = 0
def forward(self, x: torch.Tensor) -> torch.Tensor:
self.count += 1
return x
def _get_model(qat_scheme: str, full_finetuning: bool):
"""
Return a 2-tuple of (model, tokenizer), where the model has been configured
to use QAT. If `full_finetuning` is False, return the PEFT (LoRA) model.
"""
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/Qwen3-1.7B",
load_in_4bit = False,
full_finetuning = full_finetuning,
qat_scheme = qat_scheme if full_finetuning else None,
)
if not full_finetuning:
model = FastLanguageModel.get_peft_model(
model,
qat_scheme = qat_scheme,
)
return model, tokenizer
def _test_linear_is_fake_quantized(linear: torch.nn.Linear, qat_scheme: str):
"""
Verify that the given linear contains fake quantizers according to the `qat_scheme`.
"""
if qat_scheme == "fp8-int4":
act_fq_class = Float8FakeQuantizer
weight_fq_class = Int4WeightPreshuffledFakeQuantizer
min_in_features = 128
elif qat_scheme == "fp8-fp8":
act_fq_class = Float8FakeQuantizer
weight_fq_class = Float8FakeQuantizer
min_in_features = -1
else:
raise ValueError(f"Unknown qat_scheme: {qat_scheme}")
# Check base layer activations and weights
base_layer = getattr(linear, "base_layer", linear)
if base_layer.in_features >= min_in_features:
assert isinstance(base_layer, FakeQuantizedLinear)
assert isinstance(base_layer.activation_fake_quantizer, act_fq_class)
assert isinstance(base_layer.weight_fake_quantizer, weight_fq_class)
# Check lora A and B (only for full_finetuning=False)
if hasattr(linear, "lora_A") and hasattr(linear, "lora_B"):
lora_A = linear.lora_A.default
lora_B = linear.lora_B.default
if lora_A.in_features >= min_in_features:
assert isinstance(lora_A, FakeQuantizedLinear)
assert isinstance(lora_A.activation_fake_quantizer, act_fq_class)
assert isinstance(lora_A.weight_fake_quantizer, weight_fq_class)
if lora_B.in_features >= min_in_features:
assert isinstance(lora_B, FakeQuantizedLinear)
assert isinstance(lora_B.activation_fake_quantizer, act_fq_class)
assert isinstance(lora_B.weight_fake_quantizer, weight_fq_class)
def _test_fake_quantizers_are_called(
model: torch.nn.Module,
example_inputs: Dict,
full_finetuning: bool,
):
"""
Verify that the fake quantizers are actually called when the model is called.
"""
def _swap_fake_quantizers(model: torch.nn.Module):
for name, child in model.named_children():
if isinstance(child, FakeQuantizerBase):
setattr(model, name, _CountingFakeQuantizer())
def _assert_fake_quantizers_are_called(model: torch.nn.Module):
for name, child in model.named_children():
if full_finetuning:
if isinstance(child, FakeQuantizedLinear):
assert child.activation_fake_quantizer.count == 1
assert child.weight_fake_quantizer.count == 1
else:
# For LoRA, we only fake quantize the input activations once per block:
# For self_attn, we only fake quantize the q_proj's input activations
# For mlp, we only fake quantize the gate_proj's input activations
if name == "self_attn":
base_layer = child.q_proj.base_layer
assert hasattr(base_layer, "activation_fake_quantizer")
assert base_layer.activation_fake_quantizer.count == 1
elif name == "mlp":
base_layer = child.gate_proj.base_layer
assert hasattr(base_layer, "activation_fake_quantizer")
assert base_layer.activation_fake_quantizer.count == 1
elif isinstance(child, FakeQuantizedLinear):
# Weight fake quantizers should always be called
assert child.weight_fake_quantizer.count == 1
for k, v in example_inputs.items():
example_inputs[k] = v.cuda()
model.apply(_swap_fake_quantizers)
model(**example_inputs)
model.apply(_assert_fake_quantizers_are_called)
def _test_model_fake_quantize(qat_scheme: bool, full_finetuning: bool):
"""
Test that all linear layers in the model are fake quantized according to the `qat_scheme`.
"""
model, tokenizer = _get_model(qat_scheme, full_finetuning)
if full_finetuning:
model = model.model
else:
model = model.base_model.model.model
for layer in model.layers:
_test_linear_is_fake_quantized(layer.self_attn.q_proj, qat_scheme)
_test_linear_is_fake_quantized(layer.self_attn.k_proj, qat_scheme)
_test_linear_is_fake_quantized(layer.self_attn.v_proj, qat_scheme)
_test_linear_is_fake_quantized(layer.mlp.gate_proj, qat_scheme)
_test_linear_is_fake_quantized(layer.mlp.up_proj, qat_scheme)
_test_linear_is_fake_quantized(layer.mlp.down_proj, qat_scheme)
inputs = tokenizer("How are you?", return_tensors = "pt")
_test_fake_quantizers_are_called(model, inputs, full_finetuning)
# TODO: there are bad interactions across tests right now, need to figure out
# how to disable model caching before re-enabling this test
@pytest.mark.parametrize("qat_scheme", ["fp8-int4", "fp8-fp8"])
def _test_full_model_fake_quantize(qat_scheme: bool):
_test_model_fake_quantize(qat_scheme, full_finetuning = True)
@pytest.mark.parametrize("qat_scheme", ["fp8-int4", "fp8-fp8"])
def test_lora_model_fake_quantize(qat_scheme: bool):
_test_model_fake_quantize(qat_scheme, full_finetuning = False)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/utils/os_utils.py | tests/utils/os_utils.py | import subprocess
import sys
import os
import shutil
import importlib
def detect_package_manager():
"""Detect the available package manager"""
package_managers = {
"apt": "/usr/bin/apt",
"yum": "/usr/bin/yum",
"dnf": "/usr/bin/dnf",
"pacman": "/usr/bin/pacman",
"zypper": "/usr/bin/zypper",
}
for pm, path in package_managers.items():
if os.path.exists(path):
return pm
return None
def check_package_installed(package_name, package_manager = None):
"""Check if a package is installed using the system package manager"""
if package_manager is None:
package_manager = detect_package_manager()
if package_manager is None:
print("Warning: Could not detect package manager")
return None
try:
if package_manager == "apt":
# Check with dpkg
result = subprocess.run(
["dpkg", "-l", package_name], capture_output = True, text = True
)
return result.returncode == 0
elif package_manager in ["yum", "dnf"]:
# Check with rpm
result = subprocess.run(
["rpm", "-q", package_name], capture_output = True, text = True
)
return result.returncode == 0
elif package_manager == "pacman":
result = subprocess.run(
["pacman", "-Q", package_name], capture_output = True, text = True
)
return result.returncode == 0
elif package_manager == "zypper":
result = subprocess.run(
["zypper", "se", "-i", package_name], capture_output = True, text = True
)
return package_name in result.stdout
except Exception as e:
print(f"Error checking package: {e}")
return None
def require_package(package_name, executable_name = None):
"""Require a package to be installed, exit if not found"""
# First check if executable is in PATH (most reliable)
if executable_name:
if shutil.which(executable_name):
print(f"✓ {executable_name} is available")
return
# Then check with package manager
pm = detect_package_manager()
is_installed = check_package_installed(package_name, pm)
if is_installed:
print(f"✓ Package {package_name} is installed")
return
# Package not found - show installation instructions
print(f"❌ Error: {package_name} is not installed")
print(f"\nPlease install {package_name} using your system package manager:")
install_commands = {
"apt": f"sudo apt update && sudo apt install {package_name}",
"yum": f"sudo yum install {package_name}",
"dnf": f"sudo dnf install {package_name}",
"pacman": f"sudo pacman -S {package_name}",
"zypper": f"sudo zypper install {package_name}",
}
if pm and pm in install_commands:
print(f" {install_commands[pm]}")
else:
for pm_name, cmd in install_commands.items():
print(f" {pm_name}: {cmd}")
print(f"\nAlternatively, install with conda:")
print(f" conda install -c conda-forge {package_name}")
print(f"\nPlease install the required package and run the script again.")
sys.exit(1)
# Usage
# require_package("ffmpeg", "ffmpeg")
def require_python_package(package_name, import_name = None, pip_name = None):
"""Require a Python package to be installed, exit if not found"""
if import_name is None:
import_name = package_name
if pip_name is None:
pip_name = package_name
if importlib.util.find_spec(import_name) is None:
print(f"❌ Error: Python package '{package_name}' is not installed")
print(f"\nPlease install {package_name} using pip:")
print(f" pip install {pip_name}")
print(f" # or with conda:")
print(f" conda install {pip_name}")
print(f"\nAfter installation, run this script again.")
sys.exit(1)
else:
print(f"✓ Python package '{package_name}' is installed")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/utils/test_attention_masks.py | tests/utils/test_attention_masks.py | # Copyright 2023-present Daniel Han-Chen, Michael Han-Chen & the Unsloth team. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""Unit tests for packed-attention mask helpers with sliding-window logic."""
import math
import torch
from unsloth.utils import attention_dispatch
from unsloth.utils import packing as packing_utils
def _make_seq_info(lengths):
lengths = torch.tensor(lengths, dtype = torch.int32)
cu = torch.cat(
[
torch.zeros(1, dtype = torch.int32),
torch.cumsum(lengths, dim = 0, dtype = torch.int32),
]
)
max_len = int(lengths.max().item())
return lengths, cu, max_len
def test_sdpa_packed_attention_mask_sliding_window():
seq_info = _make_seq_info([5, 3])
mask = packing_utils.build_sdpa_packed_attention_mask(
seq_info,
dtype = torch.float32,
device = torch.device("cpu"),
sliding_window = 3,
)
assert mask.shape == (1, 1, 8, 8)
block_first = mask[0, 0, :5, :5]
upper = torch.triu(torch.ones_like(block_first), diagonal = 1).bool()
assert torch.all(block_first[upper] == float("-inf"))
assert block_first[3, 0].item() == float("-inf")
assert block_first[4, 1].item() == float("-inf")
assert block_first[4, 2].item() > -math.inf
assert mask[0, 0, 0, 6].item() == float("-inf")
def test_xformers_block_mask_sliding_window(monkeypatch):
class _FakeMask:
def __init__(self, lengths, window = None):
self.lengths = lengths
self.window = window
@classmethod
def from_seqlens(cls, lengths):
return cls(tuple(lengths))
def make_local_attention(self, window_size):
return _FakeMask(self.lengths, window = window_size)
monkeypatch.setattr(packing_utils, "_XFormersBlockMask", _FakeMask, raising = False)
seq_info = _make_seq_info([4, 4])
mask = packing_utils.build_xformers_block_causal_mask(
seq_info,
sliding_window = 2,
)
assert isinstance(mask, _FakeMask)
assert mask.window == 2
def test_run_attention_sdpa_passes_sliding_window(monkeypatch):
seq_info = _make_seq_info([3, 2])
sliding_window = 2
original_builder = attention_dispatch.build_sdpa_packed_attention_mask
captured = {}
def _capture_builder(seq_info_arg, *, dtype, device, sliding_window = None):
captured["window"] = sliding_window
return original_builder(
seq_info_arg,
dtype = dtype,
device = device,
sliding_window = sliding_window,
)
monkeypatch.setattr(
attention_dispatch,
"build_sdpa_packed_attention_mask",
_capture_builder,
)
def _fake_sdpa(Q, K, V, **kwargs):
captured["mask"] = kwargs.get("attn_mask")
return torch.zeros_like(Q)
monkeypatch.setattr(attention_dispatch, "scaled_dot_product_attention", _fake_sdpa)
config = attention_dispatch.AttentionConfig(
backend = attention_dispatch.SDPA,
n_kv_heads = 1,
n_groups = 1,
)
context = attention_dispatch.AttentionContext(
bsz = 1,
q_len = 5,
kv_seq_len = 5,
n_heads = 1,
head_dim = 1,
requires_grad = False,
seq_info = seq_info,
attention_mask = None,
causal_mask = None,
sliding_window = sliding_window,
)
Q = torch.zeros(1, 1, 5, 1)
K = torch.zeros_like(Q)
V = torch.zeros_like(Q)
attention_dispatch.run_attention(
config = config,
context = context,
Q = Q,
K = K,
V = V,
)
assert captured["window"] == sliding_window
mask = captured["mask"]
assert mask is not None and mask.shape == (1, 1, 5, 5)
assert mask[0, 0, 4, 1].item() == float("-inf")
def test_run_attention_xformers_passes_sliding_window(monkeypatch):
seq_info = _make_seq_info([4])
sliding_window = 3
class _FakeBias:
pass
captured = {}
def _fake_builder(seq_info_arg, *, sliding_window = None, base_mask = None):
captured["window"] = sliding_window
captured["base"] = base_mask
return _FakeBias()
def _fake_attention(Q, K, V, attn_bias = None, **_):
captured["bias"] = attn_bias
return torch.zeros_like(Q)
monkeypatch.setattr(
attention_dispatch, "build_xformers_block_causal_mask", _fake_builder
)
monkeypatch.setattr(
attention_dispatch, "xformers_attention", _fake_attention, raising = False
)
monkeypatch.setattr(
attention_dispatch, "XFORMERS_BLOCK_DIAG_CLS", _FakeBias, raising = False
)
config = attention_dispatch.AttentionConfig(
backend = attention_dispatch.XFORMERS,
n_kv_heads = 1,
n_groups = 1,
)
context = attention_dispatch.AttentionContext(
bsz = 1,
q_len = 4,
kv_seq_len = 4,
n_heads = 1,
head_dim = 1,
requires_grad = False,
seq_info = seq_info,
attention_mask = None,
causal_mask = None,
sliding_window = sliding_window,
)
Q = torch.zeros(1, 1, 4, 1)
K = torch.zeros_like(Q)
V = torch.zeros_like(Q)
attention_dispatch.run_attention(
config = config,
context = context,
Q = Q,
K = K,
V = V,
)
assert captured["window"] == sliding_window
assert isinstance(captured["bias"], _FakeBias)
def test_run_attention_flash_varlen_receives_window_and_softcap(monkeypatch):
seq_info = _make_seq_info([4])
sliding_window = 3
softcap = 0.5
window_tuple = (sliding_window, sliding_window)
captured = {}
def _fake_flash_varlen(Q, K, V, cu_q, cu_k, max_q, max_k, **kwargs):
captured["kwargs"] = kwargs
return torch.zeros_like(Q)
monkeypatch.setattr(
attention_dispatch,
"flash_attn_varlen_func",
_fake_flash_varlen,
)
monkeypatch.setattr(attention_dispatch, "HAS_FLASH_ATTENTION", True)
config = attention_dispatch.AttentionConfig(
backend = attention_dispatch.FLASH_VARLEN,
n_kv_heads = 1,
n_groups = 1,
flash_varlen_kwargs = {
"dropout_p": 0.0,
"softmax_scale": 1.0,
"causal": True,
"softcap": softcap,
"window_size": window_tuple,
},
)
context = attention_dispatch.AttentionContext(
bsz = 1,
q_len = 4,
kv_seq_len = 4,
n_heads = 1,
head_dim = 2,
requires_grad = False,
seq_info = seq_info,
attention_mask = None,
causal_mask = None,
sliding_window = sliding_window,
)
Q = torch.zeros(1, 1, 4, 2)
K = torch.zeros_like(Q)
V = torch.zeros_like(Q)
attention_dispatch.run_attention(
config = config,
context = context,
Q = Q,
K = K,
V = V,
)
assert captured["kwargs"]["softcap"] == softcap
assert captured["kwargs"]["window_size"] == window_tuple
"""Unit tests for packed-attention mask helpers with sliding-window logic."""
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/utils/ocr_eval.py | tests/utils/ocr_eval.py | """
OCR Model Evaluation Module
This module provides functionality to evaluate OCR models on datasets with
word error rate (WER) and character error rate (CER) metrics.
"""
import os
import torch
from tqdm import tqdm
import pandas as pd
from jiwer import wer, cer
from qwen_vl_utils import process_vision_info
import matplotlib.pyplot as plt
from typing import List, Dict, Tuple, Optional, Any
import traceback
class OCRModelEvaluator:
"""
A comprehensive OCR model evaluator that supports multiple models and provides
detailed analysis with WER and CER metrics.
"""
def __init__(self):
"""Initialize the OCR evaluator."""
self.model_comparison_results = {}
def evaluate_model(
self,
model: Any,
processor: Any,
dataset: List[Dict],
output_dir: str = "ocr_evaluation_results",
max_new_tokens: int = 1024,
temperature: float = 1.5,
min_p: float = 0.1,
verbose: bool = True,
) -> Tuple[Optional[float], Optional[float]]:
"""
Evaluate a model on an OCR dataset.
"""
# Create output directory if it doesn't exist
os.makedirs(output_dir, exist_ok = True)
# Initialize results storage
results = []
# Process each sample in the dataset
for i, sample in enumerate(
tqdm(dataset, desc = "Evaluating OCR performance", disable = not verbose)
):
try:
# Extract components from sample
messages = sample["messages"]
# Get ground truth, image, and question
ground_truth, image, question, input_messages = (
self._extract_sample_components(messages, i, verbose)
)
if ground_truth is None or image is None or question is None:
continue
# Generate model response
generated_response = self._generate_response(
model, processor, input_messages, max_new_tokens, temperature, min_p
)
# Calculate metrics
word_error = wer(ground_truth, generated_response)
char_error = cer(ground_truth, generated_response)
# Save individual result
self._save_individual_result(
output_dir,
i,
question,
generated_response,
ground_truth,
word_error,
char_error,
)
# Store results for summary
results.append(
{
"sample_id": i,
"wer": word_error,
"cer": char_error,
"model_output": generated_response.strip(),
"ground_truth": ground_truth,
"question": question,
}
)
except Exception as e:
if verbose:
print(f"Error processing sample {i}: {str(e)}")
traceback.print_exc()
# Generate summary report
return self._generate_summary_report(results, output_dir, verbose)
def _extract_sample_components(
self, messages: List[Dict], sample_idx: int, verbose: bool
) -> Tuple[Optional[str], Optional[Any], Optional[str], List[Dict]]:
"""Extract ground truth, image, question, and input messages from sample."""
# Extract system message (if present)
system_message = next(
(msg for msg in messages if msg["role"] == "system"), None
)
# Extract user message with the image and question
user_message = next((msg for msg in messages if msg["role"] == "user"), None)
if not user_message:
if verbose:
print(f"Skipping sample {sample_idx}: No user message found")
return None, None, None, []
# Extract assistant message with ground truth
assistant_message = next(
(msg for msg in messages if msg["role"] == "assistant"), None
)
if not assistant_message:
if verbose:
print(
f"Skipping sample {sample_idx}: No assistant message (ground truth) found"
)
return None, None, None, []
# Extract ground truth text
ground_truth = None
for content_item in assistant_message["content"]:
if content_item["type"] == "text":
ground_truth = content_item["text"]
break
if not ground_truth:
if verbose:
print(
f"Skipping sample {sample_idx}: No text found in assistant message"
)
return None, None, None, []
# Extract image and question from user message
image = None
question = None
for content_item in user_message["content"]:
if content_item["type"] == "image":
image = content_item["image"]
elif content_item["type"] == "text":
question = content_item["text"]
if not image:
if verbose:
print(f"Skipping sample {sample_idx}: No image found in user message")
return None, None, None, []
if not question:
if verbose:
print(
f"Skipping sample {sample_idx}: No question found in user message"
)
return None, None, None, []
# Construct messages for the model input (excluding assistant message)
input_messages = []
if system_message:
input_messages.append(system_message)
input_messages.append(user_message)
return ground_truth, image, question, input_messages
def _generate_response(
self,
model: Any,
processor: Any,
input_messages: List[Dict],
max_new_tokens: int,
temperature: float,
min_p: float,
) -> str:
"""Generate response from the model."""
# Preparation for inference using Qwen's specific processing
text = processor.apply_chat_template(
input_messages, tokenize = False, add_generation_prompt = True
)
# Process vision info (images/videos) from messages
image_inputs, video_inputs = process_vision_info(input_messages)
# Create model inputs
inputs = processor(
text = [text],
images = image_inputs,
videos = video_inputs,
padding = True,
return_tensors = "pt",
)
inputs = inputs.to(model.device)
# Generate response
with torch.no_grad():
generated_ids = model.generate(
**inputs,
max_new_tokens = max_new_tokens,
temperature = temperature,
min_p = min_p,
use_cache = True,
)
# Extract only the generated part (not the input)
generated_ids_trimmed = [
out_ids[len(in_ids) :]
for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
# Decode the generated text
generated_response = processor.batch_decode(
generated_ids_trimmed,
skip_special_tokens = True,
clean_up_tokenization_spaces = False,
)[0]
return generated_response
def _save_individual_result(
self,
output_dir: str,
sample_idx: int,
question: str,
generated_response: str,
ground_truth: str,
word_error: float,
char_error: float,
):
"""Save individual sample result to file."""
output_file = os.path.join(output_dir, f"sample_{sample_idx}.txt")
with open(output_file, "w", encoding = "utf-8") as f:
f.write(f"Sample {sample_idx}\n")
f.write(f"Question: {question}\n\n")
f.write(f"Model output:\n{generated_response.strip()}\n\n")
f.write(f"Ground truth:\n{ground_truth}\n\n")
f.write(f"WER: {word_error:.4f}, CER: {char_error:.4f}")
def _generate_summary_report(
self, results: List[Dict], output_dir: str, verbose: bool
) -> Tuple[Optional[float], Optional[float]]:
"""Generate and save summary report."""
if not results:
if verbose:
print("No results to summarize.")
return None, None
df = pd.DataFrame(results)
# Calculate overall averages
avg_wer = df["wer"].mean()
avg_cer = df["cer"].mean()
# Save average metrics
with open(os.path.join(output_dir, "avg_metrics.txt"), "w") as f:
f.write(f"Average WER: {avg_wer:.4f}\n")
f.write(f"Average CER: {avg_cer:.4f}\n")
# Save detailed results
df.to_csv(os.path.join(output_dir, "detailed_results.csv"), index = False)
if verbose:
print("\nResults Summary:")
print(f"Average WER: {avg_wer:.4f}")
print(f"Average CER: {avg_cer:.4f}")
print(f"\nDetailed results saved to {output_dir}/")
return avg_wer, avg_cer
def add_to_comparison(self, model_name: str, wer: float, cer: float):
"""Add model results to the comparison tracker."""
self.model_comparison_results[model_name] = {"wer": wer, "cer": cer}
def print_model_comparison(
self, save_csv: bool = True, save_plot: bool = True
) -> Optional[pd.DataFrame]:
"""Print a comparison of all models evaluated so far."""
if not self.model_comparison_results:
print("No model results available for comparison")
return None
print("\n==== MODEL COMPARISON REPORT ====")
# Create a comparison dataframe
comparison_df = pd.DataFrame(
{
"Model": list(self.model_comparison_results.keys()),
"WER": [
results["wer"] for results in self.model_comparison_results.values()
],
"CER": [
results["cer"] for results in self.model_comparison_results.values()
],
}
)
# Sort by WER (best performance first)
comparison_df = comparison_df.sort_values("WER")
# Display the comparison table
print("\nComparison Table (sorted by WER):")
print(comparison_df.to_string(index = False))
# Save the comparison table
if save_csv:
comparison_file = "model_comparison_results.csv"
comparison_df.to_csv(comparison_file, index = False)
print(f"\nComparison table saved to {comparison_file}")
# Generate a bar chart visualization
if save_plot:
self._create_comparison_plot(comparison_df)
return comparison_df
def _create_comparison_plot(self, comparison_df: pd.DataFrame):
"""Create and save comparison plot."""
plt.figure(figsize = (12, 6))
# Plot WER
plt.subplot(1, 2, 1)
plt.bar(comparison_df["Model"], comparison_df["WER"], color = "skyblue")
plt.title("Word Error Rate Comparison")
plt.ylabel("WER (lower is better)")
plt.ylim(bottom = 0)
plt.xticks(rotation = 45, ha = "right")
# Plot CER
plt.subplot(1, 2, 2)
plt.bar(comparison_df["Model"], comparison_df["CER"], color = "lightgreen")
plt.title("Character Error Rate Comparison")
plt.ylabel("CER (lower is better)")
plt.ylim(bottom = 0)
plt.xticks(rotation = 45, ha = "right")
plt.tight_layout()
plt.savefig("ocr_model_comparison.png")
plt.show()
print(f"\nVisualization saved to ocr_model_comparison.png")
def get_comparison_results(self) -> Dict[str, Dict[str, float]]:
"""Get the current comparison results."""
return self.model_comparison_results.copy()
def clear_comparison_results(self):
"""Clear all comparison results."""
self.model_comparison_results.clear()
def evaluate_ocr_model(
model, processor, dataset, output_dir = "ocr_evaluation_results", **kwargs
):
"""
Convenience function that maintains backward compatibility with the original function.
"""
evaluator = OCRModelEvaluator()
return evaluator.evaluate_model(model, processor, dataset, output_dir, **kwargs)
def create_evaluator():
"""Create a new OCR evaluator instance."""
return OCRModelEvaluator()
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/utils/hf_utils.py | tests/utils/hf_utils.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from contextlib import contextmanager, nullcontext
from typing import Callable, Optional
import bitsandbytes as bnb
import torch
from bitsandbytes.functional import dequantize_4bit
from peft import get_peft_model, prepare_model_for_kbit_training
from peft.tuners.lora import LoraConfig, LoraLayer
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
)
from transformers.trainer_callback import (
TrainerCallback,
TrainerControl,
TrainerState,
TrainingArguments,
)
from trl import SFTTrainer
class PeftWeightCallback(TrainerCallback):
def on_log(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
logs,
**kwargs,
):
print(f"DEBUG::CALLBACK::on_log::{state.log_history}")
def on_train_begin(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
model = kwargs.get("model")
assert model is not None
print(f"DEBUG::CALLBACK::on_train_begin::{kwargs.keys()}")
def on_step_end(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
print(f"DEBUG::CALLBACK::on_step_end::{state.global_step}")
@torch.inference_mode()
def generate_responses(
model,
tokenizer,
prompt,
max_new_tokens: int = 100,
temperature: float = 0.8,
do_sample: bool = True,
num_generations: int = 1,
skip_special_tokens: bool = True,
dtype: torch.dtype = None,
):
inputs = [tokenizer(prompt, return_tensors = "pt") for _ in range(num_generations)]
keys = inputs[0].keys()
batched_inputs = {
key: torch.cat([input[key] for input in inputs], dim = 0).to(model.device)
for key in keys
}
if dtype is not None:
inference_context = torch.autocast(device_type = "cuda", dtype = dtype)
else:
inference_context = nullcontext()
with inference_context:
outputs = model.generate(
**batched_inputs,
max_new_tokens = max_new_tokens,
do_sample = do_sample,
temperature = temperature,
)
responses = tokenizer.batch_decode(outputs, skip_special_tokens = skip_special_tokens)
return responses
def sample_responses(
model,
tokenizer,
prompt,
temperature: float = 0.8,
num_generations: int = 1,
max_new_tokens: int = 100,
skip_special_tokens: bool = True,
dtype: torch.dtype = None,
):
responses = generate_responses(
model,
tokenizer,
prompt,
temperature = temperature,
num_generations = num_generations,
max_new_tokens = max_new_tokens,
skip_special_tokens = skip_special_tokens,
dtype = dtype,
)
return responses
def setup_tokenizer(model_name, fixup_funcs: list[Callable] = []):
tokenizer = AutoTokenizer.from_pretrained(model_name)
for fixup_func in fixup_funcs:
tokenizer = fixup_func(tokenizer)
return tokenizer
def setup_model(
model_name,
quantize: bool = True,
dtype = torch.bfloat16,
peft_config = None,
autocast_adapter: bool = True,
):
if quantize:
bnb_config = BitsAndBytesConfig(
load_in_4bit = True,
bnb_4bit_use_double_quant = True,
bnb_4bit_quant_type = "nf4",
bnb_4bit_compute_dtype = dtype,
)
else:
bnb_config = None
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map = "cuda:0",
attn_implementation = "sdpa",
quantization_config = bnb_config,
torch_dtype = dtype,
)
model = prepare_model_for_kbit_training(model) if quantize else model
if peft_config is not None:
model = get_peft_model(
model, peft_config, autocast_adapter_dtype = autocast_adapter
)
return model
def get_peft_config(
lora_rank,
lora_alpha = None,
lora_dropout = 0.0,
bias = "none",
target_modules = "all-linear",
):
lora_alpha = lora_alpha or 2 * lora_rank
peft_config = LoraConfig(
lora_alpha = lora_alpha,
lora_dropout = lora_dropout,
r = lora_rank,
bias = bias,
target_modules = target_modules,
task_type = "CAUSAL_LM",
)
return peft_config
def setup_trainer(
model,
tokenizer,
dataset,
train_args,
peft_config = None,
formatting_func = None,
collator = None,
):
return SFTTrainer(
model = model,
peft_config = peft_config,
train_dataset = dataset,
processing_class = tokenizer,
formatting_func = formatting_func,
data_collator = collator,
args = train_args,
)
def setup_lora(
model,
tokenizer,
dataset,
peft_config,
train_args,
formatting_func = None,
collator = None,
):
return LoraConfig(
model = model,
peft_config = peft_config,
train_dataset = dataset,
processing_class = tokenizer,
formatting_func = formatting_func,
data_collator = collator,
args = train_args,
)
def convert_weights_back_to_dtype(model, dtype):
"""
SFTTrainer calls get_peft_model and prepare_model_for_kbit_training which converts all weights to float32.
This function converts the non-loraweights back to the original dtype.
"""
for name, param in model.named_parameters():
if any(s in name for s in ["norm", "embed"]):
param.data = param.data.to(dtype)
def fix_llama3_tokenizer(tokenizer, padding_side = "right"):
tokenizer.padding_side = padding_side
added_vocab = tokenizer.get_added_vocab()
pad_token = [w for w in added_vocab if "pad" in w]
assert len(pad_token) == 1
tokenizer.pad_token = pad_token[0] # Load dataset from the hub
return tokenizer
def replace_module(
module: torch.nn.Module,
target_module_type: torch.nn.Module,
conversion_func: Callable,
):
for child_name, child_module in module.named_children():
if isinstance(child_module, target_module_type):
new_module = conversion_func(child_module)
setattr(module, child_name, new_module)
else:
replace_module(child_module, target_module_type, conversion_func)
def _convert_lora_to_linear(module: LoraLayer, adapter_name: str = "default"):
base_layer = module.get_base_layer()
weight = base_layer.weight
assert isinstance(weight, bnb.nn.Params4bit)
quant_state = weight.quant_state
original_dtype = quant_state.dtype
w_dq = dequantize_4bit(weight.data, quant_state).float()
lora_delta = (
module.lora_B[adapter_name].weight
@ module.lora_A[adapter_name].weight
* module.scaling[adapter_name]
)
w_dq += lora_delta.float()
w_dq = w_dq.to(original_dtype)
new_module = torch.nn.Linear(
w_dq.shape[1], w_dq.shape[0], bias = module.base_layer.bias is not None
)
new_module.weight.data = torch.nn.Parameter(w_dq, requires_grad = False)
if module.lora_bias[adapter_name]:
bias_data = module.base_layer.bias.data + module.lora_B[adapter_name].bias
new_module.bias.data = torch.nn.Parameter(bias_data, requires_grad = False)
return new_module
def convert_lora_to_linear(model: torch.nn.Module):
replace_module(model, LoraLayer, _convert_lora_to_linear)
assert not any(isinstance(module, LoraLayer) for module in model.modules())
return model
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/utils/test_packing.py | tests/utils/test_packing.py | # Copyright 2023-present Daniel Han-Chen, Michael Han-Chen & the Unsloth team. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from unsloth import FastLanguageModel
from unsloth.utils import attention_dispatch as attention_dispatch_utils
from unsloth.utils.packing import (
configure_padding_free,
configure_sample_packing,
enable_padding_free_metadata,
enable_sample_packing,
mask_packed_sequence_boundaries,
)
from contextlib import ExitStack
from types import SimpleNamespace
from unittest.mock import patch
import pytest
import torch
from datasets import Dataset
from trl import SFTConfig, SFTTrainer
from trl.trainer.sft_trainer import DataCollatorForLanguageModeling
def _build_packed_training_setup(tmp_path, device):
dtype = None
if device.type == "cuda":
if torch.cuda.is_bf16_supported():
dtype = torch.bfloat16
else:
dtype = torch.float16
try:
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "hf-internal-testing/tiny-random-LlamaForCausalLM",
max_seq_length = 64,
load_in_4bit = False,
dtype = dtype,
)
except OSError as exc: # pragma: no cover - offline CI
pytest.skip(f"Requires access to tiny llama checkpoint: {exc}")
model.to(device)
dataset = Dataset.from_dict(
{
"text": [
"Hello world!",
"Short sample.",
"This is a slightly longer packed example to test batching.",
"Another response to include in the batch.",
]
}
)
training_args = SFTConfig(
per_device_train_batch_size = 1,
per_device_eval_batch_size = 1,
gradient_accumulation_steps = 1,
dataset_text_field = "text",
max_length = 64,
logging_steps = 1,
max_steps = 1,
fp16 = device.type == "cuda" and not torch.cuda.is_bf16_supported(),
bf16 = device.type == "cuda" and torch.cuda.is_bf16_supported(),
dataset_num_proc = 1,
output_dir = str(tmp_path),
packing = True,
)
trainer = SFTTrainer(
model = model,
processing_class = tokenizer,
train_dataset = dataset,
args = training_args,
)
enable_sample_packing(model, trainer)
dataloader = trainer.get_train_dataloader()
batch = next(iter(dataloader))
model_device = next(model.parameters()).device
for key, value in list(batch.items()):
if torch.is_tensor(value):
batch[key] = value.to(model_device)
from unsloth.models import llama as llama_mod
return model, batch, trainer, llama_mod
def _trim_batch_to_total_tokens(data, total_tokens):
def _trim_tensor(t: torch.Tensor):
if t.ndim >= 2 and t.size(1) > total_tokens:
return t[:, :total_tokens].contiguous()
return t
trimmed = {}
for key, value in data.items():
if torch.is_tensor(value):
trimmed[key] = _trim_tensor(value)
else:
trimmed[key] = value
return trimmed
def test_mask_packed_sequence_boundaries_marks_single_row():
shift_labels = torch.arange(6, dtype = torch.long).view(1, 6)
changed = mask_packed_sequence_boundaries(
shift_labels,
torch.tensor([2, 1, 3], dtype = torch.int32),
)
assert changed is True
flat = shift_labels.view(-1)
assert flat[1].item() == -100
assert flat[2].item() == -100
assert flat[5].item() == -100
assert flat[0].item() != -100
def test_mask_packed_sequence_boundaries_across_multiple_rows():
shift_labels = torch.arange(10, dtype = torch.long).view(2, 5)
lengths = torch.tensor([3, 2, 4, 1], dtype = torch.int32)
changed = mask_packed_sequence_boundaries(shift_labels, lengths)
assert changed is True
flat = shift_labels.view(-1)
for idx in (2, 4, 8, 9):
assert flat[idx].item() == -100
assert torch.any(flat != -100)
def test_configure_sample_packing():
config = SimpleNamespace()
configure_sample_packing(config)
assert config.packing is True
assert config.padding_free is True
assert config.remove_unused_columns is False
def test_configure_padding_free():
config = SimpleNamespace(remove_unused_columns = True)
configure_padding_free(config)
assert config.padding_free is True
assert config.remove_unused_columns is False
class _DummyChild(torch.nn.Module):
def __init__(self):
super().__init__()
self.max_seq_length = 8
class _DummyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.max_seq_length = 16
self.child = _DummyChild()
self.config = SimpleNamespace(_attn_implementation = "sdpa")
self.generation_config = SimpleNamespace(attn_implementation = "sdpa")
class _DummyTrainer:
def __init__(self):
self.args = SimpleNamespace(remove_unused_columns = True)
self.data_collator = DataCollatorForLanguageModeling(
pad_token_id = 0,
completion_only_loss = False,
padding_free = True,
return_position_ids = False,
return_tensors = "pt",
)
class _PaddingFreeCollator:
def __init__(self):
self.padding_free = True
self.return_position_ids = False
self.calls = 0
def torch_call(self, examples):
self.calls += 1
return {
"input_ids": torch.tensor([[0]], dtype = torch.long),
"examples_seen": self.calls,
}
def test_enable_sample_packing():
model = _DummyModel()
trainer = _DummyTrainer()
enable_sample_packing(model, trainer)
# model hierarchy should now allow packed overlength inputs
assert getattr(model, "_unsloth_allow_packed_overlength") is True
assert getattr(model.child, "_unsloth_allow_packed_overlength") is True
collator = trainer.data_collator
assert collator.return_position_ids is True
assert getattr(collator, "_unsloth_packing_wrapped") is True
examples = [
{
"input_ids": [0, 1, 2],
"labels": [0, 1, 2],
"seq_lengths": [2, 1],
},
{
"input_ids": [3, 4, 5],
"labels": [3, 4, 5],
"seq_lengths": [3],
},
]
batch = collator.torch_call(examples)
# packed lengths are aggregated into a single tensor
assert "packed_seq_lengths" in batch
assert torch.equal(
batch["packed_seq_lengths"],
torch.tensor([2, 1, 3], dtype = torch.int32),
)
assert batch["input_ids"].shape == (1, 6)
expected_positions = torch.tensor([0, 1, 0, 0, 1, 2], dtype = torch.long)
assert torch.equal(batch["position_ids"].view(-1)[:6], expected_positions)
def test_enable_sample_packing_trl_collator(tmp_path):
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model, _, trainer, _ = _build_packed_training_setup(tmp_path, device)
enable_sample_packing(model, trainer)
examples = [
{
"input_ids": [0, 1, 2],
"labels": [0, 1, 2],
"seq_lengths": [2, 1],
},
{
"input_ids": [3, 4, 5],
"labels": [3, 4, 5],
"seq_lengths": [3],
},
]
batch = trainer.data_collator.torch_call(examples)
assert batch["input_ids"].shape == (1, 6)
assert torch.equal(
batch["packed_seq_lengths"],
torch.tensor([2, 1, 3], dtype = torch.int32),
)
expected_positions = torch.tensor([0, 1, 0, 0, 1, 2], dtype = torch.long)
assert torch.equal(batch["position_ids"].view(-1)[:6], expected_positions)
if hasattr(trainer, "accelerator"):
trainer.accelerator.free_memory()
def test_enable_padding_free_metadata():
model = _DummyModel()
trainer = SimpleNamespace(
args = SimpleNamespace(remove_unused_columns = True),
data_collator = _PaddingFreeCollator(),
)
enable_padding_free_metadata(model, trainer)
assert getattr(model, "_unsloth_allow_packed_overlength") is True
assert getattr(model.child, "_unsloth_allow_packed_overlength") is True
collator = trainer.data_collator
assert collator.return_position_ids is True
assert getattr(collator, "_unsloth_padding_free_lengths_wrapped") is True
examples = [
{"input_ids": [0, 1, 2]},
{"input_ids": [3, 4]},
]
batch = collator.torch_call(examples)
assert torch.equal(
batch["packed_seq_lengths"],
torch.tensor([3, 2], dtype = torch.int32),
)
assert trainer.args.remove_unused_columns is False
def test_packing_sdpa(tmp_path):
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model, batch, trainer, llama_mod = _build_packed_training_setup(tmp_path, device)
assert "packed_seq_lengths" in batch
assert "attention_mask" not in batch
assert batch["packed_seq_lengths"].dtype == torch.int32
total_tokens = batch["input_ids"].size(-1)
assert int(batch["packed_seq_lengths"].sum().item()) == total_tokens
packed_tokens = int(batch["packed_seq_lengths"].sum().item())
assert "position_ids" in batch
flat_positions = batch["position_ids"].reshape(-1)[:packed_tokens]
expected_positions = torch.cat(
[
torch.arange(length, dtype = torch.long)
for length in batch["packed_seq_lengths"].tolist()
]
)
assert torch.equal(flat_positions.cpu(), expected_positions)
inputs = _trim_batch_to_total_tokens(batch, packed_tokens)
seq_info = llama_mod.get_packed_info_from_kwargs(
{"packed_seq_lengths": batch["packed_seq_lengths"]},
inputs["input_ids"].device,
)
assert seq_info is not None
original_mask = attention_dispatch_utils.build_sdpa_packed_attention_mask
mask_calls = []
captured_loss_labels = {}
def _capture_mask(seq_info, dtype, device, *, sliding_window = None):
mask_calls.append(tuple(seq_info[0].tolist()))
return original_mask(
seq_info,
dtype = dtype,
device = device,
sliding_window = sliding_window,
)
def _capture_loss(*, logits, labels, **loss_kwargs):
captured_loss_labels["labels"] = labels.detach().to("cpu")
return torch.zeros((), device = logits.device, dtype = logits.dtype)
with ExitStack() as stack:
stack.enter_context(
patch.object(attention_dispatch_utils, "HAS_FLASH_ATTENTION", False)
)
stack.enter_context(
patch.object(attention_dispatch_utils, "HAS_XFORMERS", False)
)
stack.enter_context(
patch.object(
attention_dispatch_utils,
"build_sdpa_packed_attention_mask",
side_effect = _capture_mask,
)
)
stack.enter_context(
patch.object(
llama_mod,
"fast_cross_entropy_loss",
side_effect = _capture_loss,
)
)
with torch.no_grad():
outputs = model(**inputs)
assert mask_calls, "SDPA packed mask was not constructed"
assert outputs.loss is not None
assert "labels" in captured_loss_labels
flat_loss_labels = captured_loss_labels["labels"].reshape(-1)
boundaries = (
torch.cumsum(
batch["packed_seq_lengths"].to(device = "cpu", dtype = torch.long), dim = 0
)
- 1
)
for idx in boundaries.tolist():
assert flat_loss_labels[idx].item() == -100
assert torch.any(flat_loss_labels != -100)
if hasattr(trainer, "accelerator"):
trainer.accelerator.free_memory()
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/utils/__init__.py | tests/utils/__init__.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from contextlib import contextmanager
@contextmanager
def timer(name):
start = time.time()
yield
end = time.time()
print(f"{name} took {end - start:.2f} seconds")
@contextmanager
def header_footer_context(title: str, char = "-"):
print()
print(f"{char}" * 50 + f" {title} " + f"{char}" * 50)
yield
print(f"{char}" * (100 + len(title) + 2))
print()
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/utils/aime_eval.py | tests/utils/aime_eval.py | """
AIME Dataset Evaluation Module
This module provides functions to evaluate language models on the combined AIME dataset
(test2024 + test2025-I + test2025-II).
"""
import json
import requests
import os
import re
import logging
from typing import List, Dict, Any
from tqdm import tqdm
from vllm import SamplingParams
def download_and_combine_aime_datasets(data_dir: str = "./data/aime") -> str:
"""Download all AIME datasets and combine them into a single file"""
datasets = {
"test2024": "https://raw.githubusercontent.com/GAIR-NLP/AIME-Preview/main/eval/data/aime/test2024.jsonl",
"test2025-I": "https://raw.githubusercontent.com/GAIR-NLP/AIME-Preview/main/eval/data/aime/test2025-I.jsonl",
"test2025-II": "https://raw.githubusercontent.com/GAIR-NLP/AIME-Preview/main/eval/data/aime/test2025-II.jsonl",
}
os.makedirs(data_dir, exist_ok = True)
combined_filepath = os.path.join(data_dir, "aime.jsonl")
# Check if combined file already exists
if os.path.exists(combined_filepath):
print(f"Combined AIME dataset already exists at {combined_filepath}")
return combined_filepath
print("Downloading and combining AIME datasets...")
all_problems = []
global_id = 0
for dataset_name, url in datasets.items():
print(f" Downloading {dataset_name}...")
try:
response = requests.get(url)
response.raise_for_status()
# Parse each line and add source information
for line_num, line in enumerate(response.text.strip().split("\n")):
if line.strip():
try:
data = json.loads(line)
# Add source dataset information and global ID
data["source_dataset"] = dataset_name
data["original_id"] = data.get("id", line_num)
data["global_id"] = global_id
global_id += 1
all_problems.append(data)
except json.JSONDecodeError as e:
print(
f" Warning: Error parsing line {line_num + 1} in {dataset_name}: {e}"
)
continue
except requests.RequestException as e:
print(f" Error downloading {dataset_name}: {e}")
continue
# Write combined dataset
if all_problems:
with open(combined_filepath, "w", encoding = "utf-8") as f:
for problem in all_problems:
f.write(json.dumps(problem, ensure_ascii = False) + "\n")
print(f"✅ Combined {len(all_problems)} problems from {len(datasets)} datasets")
print(f" Saved to: {combined_filepath}")
# Print summary by dataset
for dataset_name in datasets.keys():
count = sum(1 for p in all_problems if p["source_dataset"] == dataset_name)
print(f" {dataset_name}: {count} problems")
else:
raise RuntimeError("No problems were successfully downloaded")
return combined_filepath
def load_aime_dataset(data_dir: str = "./data/aime") -> List[Dict[str, Any]]:
"""Load combined AIME dataset and format for evaluation"""
# Download and combine if needed
filepath = download_and_combine_aime_datasets(data_dir)
examples = []
with open(filepath, "r", encoding = "utf-8") as f:
for line_num, line in enumerate(f):
line = line.strip()
if line:
try:
data = json.loads(line)
# Format as expected by our evaluation
formatted_example = {
"global_id": data.get("global_id", line_num),
"original_id": data.get(
"original_id", data.get("id", line_num)
),
"source_dataset": data.get("source_dataset", "unknown"),
"problem": data["problem"],
"answer": str(data["answer"]), # Ensure answer is string
"solution": data.get("solution", ""),
"url": data.get("url", ""),
# Format as chat messages for the model
"prompt": [
{
"role": "system",
"content": "You are a mathematical problem solver. Solve the given problem step by step and provide your final answer clearly.",
},
{
"role": "user",
"content": f"Problem: {data['problem']}\n\nSolve this step by step and provide your final numerical answer.",
},
],
}
examples.append(formatted_example)
except json.JSONDecodeError as e:
print(f"Error parsing line {line_num + 1}: {e}")
continue
print(f"Loaded {len(examples)} problems from combined AIME dataset")
# Print breakdown by source
source_counts = {}
for example in examples:
source = example["source_dataset"]
source_counts[source] = source_counts.get(source, 0) + 1
for source, count in source_counts.items():
print(f" {source}: {count} problems")
return examples
def extract_aime_answer(response: str) -> str:
"""Extract numerical answer from AIME response"""
# AIME answers are integers from 0-999
# Look for patterns like "The answer is 123" or just standalone numbers
patterns = [
r"(?:the )?(?:final )?answer is (\d{1,3})",
r"(?:therefore|thus|so),?\s*(?:the )?(?:final )?answer is (\d{1,3})",
r"\\boxed\{(\d{1,3})\}",
r"\$\\boxed\{(\d{1,3})\}\$",
r"(?:answer|result):\s*(\d{1,3})",
r"(?:^|\n)\s*(\d{1,3})\s*(?:\n|$)", # Standalone number
]
response_lower = response.lower().strip()
for pattern in patterns:
matches = re.findall(pattern, response_lower, re.MULTILINE | re.IGNORECASE)
if matches:
# Get the last match (most likely to be final answer)
answer = matches[-1]
try:
num = int(answer)
if 0 <= num <= 999: # AIME answers are in range 0-999
return str(num)
except ValueError:
continue
# If no clear pattern found, try to extract any 1-3 digit number
numbers = re.findall(r"\b(\d{1,3})\b", response)
if numbers:
for num_str in reversed(numbers): # Check from end
try:
num = int(num_str)
if 0 <= num <= 999:
return str(num)
except ValueError:
continue
return ""
def get_num_tokens(text, tokenizer_instance):
"""Count tokens in text"""
if not text:
return 0
encoding = tokenizer_instance(text, return_tensors = "pt")
return len(encoding["input_ids"][0])
def evaluate_model_aime(
model,
tokenizer,
model_type = "base",
lora_request = None,
temperature = 0.3,
n_sampling = 8,
max_tokens = 32768,
top_p = 0.95,
seed = 0,
):
"""Evaluate model on combined AIME dataset with official configuration"""
print(f"\n{'='*70}")
print(f"🧮 AIME EVALUATION - {model_type.upper()} MODEL")
print(f"Combined Dataset: test2024 + test2025-I + test2025-II")
print(f"{'='*70}")
# Load combined AIME dataset
try:
eval_dataset = load_aime_dataset()
except Exception as e:
print(f"Error loading dataset: {e}")
return None
if not eval_dataset:
print("No examples found in dataset")
return None
# Initialize tracking variables
records = {}
input_tokens = []
output_tokens = []
correct_answers = 0
# Track performance by source dataset
source_stats = {}
for example in eval_dataset:
source = example["source_dataset"]
if source not in source_stats:
source_stats[source] = {"total": 0, "correct": 0}
source_stats[source]["total"] += 1
# Setup sampling parameters (AIME configuration)
sampling_params = SamplingParams(
temperature = temperature,
top_p = top_p,
max_tokens = max_tokens,
n = n_sampling, # Multiple samples per question
seed = seed,
)
print(f"\n🔧 Configuration:")
print(f" Temperature: {temperature}")
print(f" Samples per question: {n_sampling}")
print(f" Max tokens: {max_tokens}")
print(f" Top-p: {top_p}")
print(f" Seed: {seed}")
# Temporarily suppress verbose logging
original_levels = {}
loggers_to_suppress = [
"vllm",
"vllm.engine",
"vllm.worker",
"vllm.model_executor",
"vllm.executor",
"ray",
]
for logger_name in loggers_to_suppress:
logger = logging.getLogger(logger_name)
original_levels[logger_name] = logger.level
logger.setLevel(logging.WARNING)
try:
print(f"\n🚀 Evaluating {len(eval_dataset)} problems...")
# Main evaluation loop
with tqdm(
total = len(eval_dataset), desc = "Processing AIME problems", unit = "problem"
) as pbar:
for task_id, item in enumerate(eval_dataset):
try:
# Prepare prompt
prompt_text = tokenizer.apply_chat_template(
item["prompt"], add_generation_prompt = True, tokenize = False
)
input_tokens.append(get_num_tokens(prompt_text, tokenizer))
# Generate multiple responses
outputs = model.fast_generate(
[prompt_text],
sampling_params = sampling_params,
lora_request = lora_request,
use_tqdm = False,
)[0].outputs
# Process all generated responses
responses = [output.text for output in outputs]
extracted_answers = [
extract_aime_answer(response) for response in responses
]
# Calculate total output tokens
total_output_tokens = sum(
get_num_tokens(response, tokenizer) for response in responses
)
output_tokens.append(total_output_tokens)
# Check if any answer is correct
ground_truth = item["answer"]
correct_responses = [
ans == ground_truth for ans in extracted_answers
]
is_correct = any(correct_responses)
if is_correct:
correct_answers += 1
source_stats[item["source_dataset"]]["correct"] += 1
# Store detailed record
records[task_id] = {
"global_id": item["global_id"],
"original_id": item["original_id"],
"source_dataset": item["source_dataset"],
"problem": item["problem"],
"ground_truth": ground_truth,
"responses": responses,
"extracted_answers": extracted_answers,
"correct_responses": correct_responses,
"is_correct": is_correct,
"input_tokens": input_tokens[-1],
"output_tokens": total_output_tokens,
"n_correct": sum(correct_responses),
"n_total": len(responses),
"solution": item.get("solution", ""),
"url": item.get("url", ""),
}
# Update progress
current_accuracy = correct_answers / (task_id + 1) * 100
pbar.set_postfix(
{
"accuracy": f"{current_accuracy:.1f}%",
"correct": correct_answers,
"total": task_id + 1,
}
)
pbar.update(1)
except Exception as e:
print(f"\nError processing problem {task_id}: {str(e)}")
records[task_id] = {
"global_id": item.get("global_id", task_id),
"original_id": item.get("original_id", task_id),
"source_dataset": item.get("source_dataset", "unknown"),
"problem": item["problem"],
"ground_truth": item["answer"],
"error": str(e),
"is_correct": False,
}
pbar.update(1)
continue
finally:
# Restore logging levels
for logger_name, level in original_levels.items():
logging.getLogger(logger_name).setLevel(level)
# Calculate metrics
total_problems = len(eval_dataset)
accuracy = correct_answers / total_problems * 100
# Calculate Pass@k (probability that at least one of k samples is correct)
pass_at_k_scores = []
for record in records.values():
if "n_correct" in record and "n_total" in record:
n_correct = record["n_correct"]
n_total = record["n_total"]
if n_correct > 0:
pass_at_k_scores.append(1.0)
else:
pass_at_k_scores.append(0.0)
pass_at_k = sum(pass_at_k_scores) / len(pass_at_k_scores) if pass_at_k_scores else 0
# Calculate per-source accuracies
source_accuracies = {}
for source, stats in source_stats.items():
source_accuracies[source] = (
(stats["correct"] / stats["total"] * 100) if stats["total"] > 0 else 0
)
results = {
"model_type": model_type,
"dataset": "aime_combined",
"total_problems": total_problems,
"correct_answers": correct_answers,
"accuracy": accuracy,
"pass_at_k": pass_at_k * 100,
"source_stats": source_stats,
"source_accuracies": source_accuracies,
"temperature": temperature,
"n_sampling": n_sampling,
"max_tokens": max_tokens,
"top_p": top_p,
"seed": seed,
"avg_input_tokens": sum(input_tokens) / len(input_tokens)
if input_tokens
else 0,
"avg_output_tokens": sum(output_tokens) / len(output_tokens)
if output_tokens
else 0,
"max_input_tokens": max(input_tokens) if input_tokens else 0,
"max_output_tokens": max(output_tokens) if output_tokens else 0,
}
# Save results
filename = f"aime_eval_combined_{model_type}_t{temperature}_n{n_sampling}.json"
with open(filename, "w", encoding = "utf-8") as f:
json.dump({"results": results, "records": records}, f, indent = 4)
# Print comprehensive summary
print(f"\n{'='*70}")
print(f"📊 AIME EVALUATION RESULTS - {model_type.upper()}")
print(f"{'='*70}")
print(f"\n🎯 Overall Performance:")
print(f" Total problems: {total_problems:>6}")
print(
f" Correct answers: {correct_answers:>6}/{total_problems} ({accuracy:>5.1f}%)"
)
print(f" Pass@{n_sampling}: {pass_at_k:>10.1f}%")
print(f"\n📈 Performance by Dataset:")
for source, stats in source_stats.items():
source_acc = source_accuracies[source]
print(
f" {source:>12}: {stats['correct']:>3}/{stats['total']:>3} ({source_acc:>5.1f}%)"
)
print(f"\n🔧 Configuration:")
print(f" Temperature: {temperature}")
print(f" Samples per problem: {n_sampling}")
print(f" Max tokens: {max_tokens}")
print(f" Top-p: {top_p}")
print(f" Seed: {seed}")
print(f"\n📝 Token Statistics:")
print(f" Avg input tokens: {results['avg_input_tokens']:>10.1f}")
print(f" Avg output tokens: {results['avg_output_tokens']:>10.1f}")
print(f" Max input tokens: {results['max_input_tokens']:>10}")
print(f" Max output tokens: {results['max_output_tokens']:>10}")
# Performance assessment for AIME
if accuracy >= 50:
tier = "🏆 EXCEPTIONAL"
elif accuracy >= 30:
tier = "✅ EXCELLENT"
elif accuracy >= 20:
tier = "🎯 VERY GOOD"
elif accuracy >= 10:
tier = "⚠️ GOOD"
elif accuracy >= 5:
tier = "📈 FAIR"
else:
tier = "❌ NEEDS IMPROVEMENT"
print(f"\n🎖️ AIME Performance: {tier} ({accuracy:.1f}%)")
print(f"\n💾 Detailed results saved to: {filename}")
print(f"\n{'='*70}")
return results
# Comparison functions for multiple model results
def compare_aime_results(all_results):
"""Generate comprehensive comparison for AIME evaluation results"""
print(f"\n{'='*80}")
print("COMPREHENSIVE AIME MODEL COMPARISON")
print(f"{'='*80}")
# Main comparison table
print(
f"{'Model':<15} {'Accuracy %':<12} {'Pass@K %':<10} {'Correct':<8} {'Total':<8}"
)
print("-" * 80)
for result in all_results:
print(
f"{result['model_type']:<15} "
f"{result['accuracy']:<12.1f} "
f"{result['pass_at_k']:<10.1f} "
f"{result['correct_answers']:<8} "
f"{result['total_problems']:<8}"
)
# Performance improvement analysis
if len(all_results) > 1:
print(f"\n{'='*50}")
print("IMPROVEMENT ANALYSIS")
print(f"{'='*50}")
base_result = all_results[0] # Assume first is base model
for i, result in enumerate(all_results[1:], 1):
print(f"\n{result['model_type']} vs {base_result['model_type']}:")
accuracy_improvement = result["accuracy"] - base_result["accuracy"]
pass_k_improvement = result["pass_at_k"] - base_result["pass_at_k"]
print(f" Accuracy improvement: {accuracy_improvement:+.1f}%")
print(f" Pass@K improvement: {pass_k_improvement:+.1f}%")
# Dataset breakdown
print(f"\n{'='*50}")
print("PERFORMANCE BY DATASET")
print(f"{'='*50}")
# Get all unique datasets from the first result
if all_results and "source_accuracies" in all_results[0]:
datasets = list(all_results[0]["source_accuracies"].keys())
print(f"{'Model':<15}", end = "")
for dataset in datasets:
print(f"{dataset:<15}", end = "")
print()
print("-" * (15 + 15 * len(datasets)))
for result in all_results:
print(f"{result['model_type']:<15}", end = "")
for dataset in datasets:
accuracy = result["source_accuracies"].get(dataset, 0)
print(f"{accuracy:<15.1f}", end = "")
print()
# Save comparison
comparison_data = {
"summary": all_results,
"best_model": max(all_results, key = lambda x: x["accuracy"]),
}
with open("aime_model_comparison.json", "w") as f:
json.dump(comparison_data, f, indent = 4)
print(
f"\nBest performing model: {comparison_data['best_model']['model_type']} "
f"({comparison_data['best_model']['accuracy']:.1f}% accuracy)"
)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/tests/utils/cleanup_utils.py | tests/utils/cleanup_utils.py | import gc
import logging
import os
import shutil
import torch
import sys
import warnings
def clear_memory(variables_to_clear = None, verbose = False, clear_all_caches = True):
"""
Comprehensive memory clearing for persistent memory leaks.
Args:
variables_to_clear: List of variable names to clear
verbose: Print memory status
clear_all_caches: Clear all types of caches (recommended for memory leaks)
"""
# Save current logging levels
saved_log_levels = {}
for name, logger in logging.Logger.manager.loggerDict.items():
if isinstance(logger, logging.Logger):
saved_log_levels[name] = logger.level
root_level = logging.getLogger().level
if variables_to_clear is None:
variables_to_clear = [
"inputs",
"model",
"base_model",
"processor",
"tokenizer",
"base_processor",
"base_tokenizer",
"trainer",
"peft_model",
"bnb_config",
]
# 1. Clear LRU caches FIRST (very important for memory leaks)
if clear_all_caches:
clear_all_lru_caches(verbose)
# 2. Delete specified variables
g = globals()
deleted_vars = []
for var in variables_to_clear:
if var in g:
del g[var]
deleted_vars.append(var)
if verbose and deleted_vars:
print(f"Deleted variables: {deleted_vars}")
# 3. Multiple garbage collection passes (important for circular references)
for i in range(3):
collected = gc.collect()
if verbose and collected > 0:
print(f"GC pass {i+1}: collected {collected} objects")
# 4. CUDA cleanup
if torch.cuda.is_available():
# Get memory before cleanup
if verbose:
mem_before = torch.cuda.memory_allocated() / 1024**3
torch.cuda.empty_cache()
torch.cuda.synchronize()
# Additional CUDA cleanup for persistent leaks
if clear_all_caches:
# Reset memory stats
torch.cuda.reset_peak_memory_stats()
torch.cuda.reset_accumulated_memory_stats()
# Clear JIT cache
if hasattr(torch.jit, "_state") and hasattr(
torch.jit._state, "_clear_class_state"
):
torch.jit._state._clear_class_state()
# Force another CUDA cache clear
torch.cuda.empty_cache()
# Final garbage collection
gc.collect()
if verbose:
mem_after = torch.cuda.memory_allocated() / 1024**3
mem_reserved = torch.cuda.memory_reserved() / 1024**3
print(
f"GPU memory - Before: {mem_before:.2f} GB, After: {mem_after:.2f} GB"
)
print(f"GPU reserved memory: {mem_reserved:.2f} GB")
if mem_before > 0:
print(f"Memory freed: {mem_before - mem_after:.2f} GB")
# restore original logging levels
logging.getLogger().setLevel(root_level)
for name, level in saved_log_levels.items():
if name in logging.Logger.manager.loggerDict:
logger = logging.getLogger(name)
logger.setLevel(level)
def clear_all_lru_caches(verbose = True):
"""Clear all LRU caches in loaded modules."""
cleared_caches = []
# Modules to skip to avoid warnings
skip_modules = {
"torch.distributed",
"torchaudio",
"torch._C",
"torch.distributed.reduce_op",
"torchaudio.backend",
}
# Create a static list of modules to avoid RuntimeError
modules = list(sys.modules.items())
# Method 1: Clear caches in all loaded modules
for module_name, module in modules:
if module is None:
continue
# Skip problematic modules
if any(module_name.startswith(skip) for skip in skip_modules):
continue
try:
# Look for functions with lru_cache
for attr_name in dir(module):
try:
# Suppress warnings when checking attributes
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", DeprecationWarning)
attr = getattr(module, attr_name)
if hasattr(attr, "cache_clear"):
attr.cache_clear()
cleared_caches.append(f"{module_name}.{attr_name}")
except Exception:
continue # Skip problematic attributes
except Exception:
continue # Skip problematic modules
# Method 2: Clear specific known caches
known_caches = [
"transformers.utils.hub.cached_file",
"transformers.tokenization_utils_base.get_tokenizer",
"torch._dynamo.utils.counters",
]
for cache_path in known_caches:
try:
parts = cache_path.split(".")
module = sys.modules.get(parts[0])
if module:
obj = module
for part in parts[1:]:
obj = getattr(obj, part, None)
if obj is None:
break
if obj and hasattr(obj, "cache_clear"):
obj.cache_clear()
cleared_caches.append(cache_path)
except Exception:
continue # Skip problematic caches
if verbose and cleared_caches:
print(f"Cleared {len(cleared_caches)} LRU caches")
def clear_specific_lru_cache(func):
"""Clear cache for a specific function."""
if hasattr(func, "cache_clear"):
func.cache_clear()
return True
return False
# Additional utility for monitoring cache sizes
def monitor_cache_sizes():
"""Monitor LRU cache sizes across modules."""
cache_info = []
for module_name, module in sys.modules.items():
if module is None:
continue
try:
for attr_name in dir(module):
try:
attr = getattr(module, attr_name)
if hasattr(attr, "cache_info"):
info = attr.cache_info()
cache_info.append(
{
"function": f"{module_name}.{attr_name}",
"size": info.currsize,
"hits": info.hits,
"misses": info.misses,
}
)
except:
pass
except:
pass
return sorted(cache_info, key = lambda x: x["size"], reverse = True)
def safe_remove_directory(path):
try:
if os.path.exists(path) and os.path.isdir(path):
shutil.rmtree(path)
return True
else:
print(f"Path {path} is not a valid directory")
return False
except Exception as e:
print(f"Failed to remove directory {path}: {e}")
return False
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/device_type.py | unsloth/device_type.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"is_hip",
"get_device_type",
"DEVICE_TYPE",
"DEVICE_TYPE_TORCH",
"DEVICE_COUNT",
"ALLOW_PREQUANTIZED_MODELS",
"ALLOW_BITSANDBYTES",
]
import torch
import functools
import inspect
from unsloth_zoo.utils import Version
@functools.cache
def is_hip():
return bool(getattr(getattr(torch, "version", None), "hip", None))
@functools.cache
def get_device_type():
if hasattr(torch, "cuda") and torch.cuda.is_available():
if is_hip():
return "hip"
return "cuda"
elif hasattr(torch, "xpu") and torch.xpu.is_available():
return "xpu"
# Check torch.accelerator
if hasattr(torch, "accelerator"):
if not torch.accelerator.is_available():
raise NotImplementedError(
"Unsloth cannot find any torch accelerator? You need a GPU."
)
accelerator = str(torch.accelerator.current_accelerator())
if accelerator in ("cuda", "xpu", "hip"):
raise RuntimeError(
f"Unsloth: Weirdly `torch.cuda.is_available()`, `torch.xpu.is_available()` and `is_hip` all failed.\n"
f"But `torch.accelerator.current_accelerator()` works with it being = `{accelerator}`\n"
f"Please reinstall torch - it's most likely broken :("
)
raise NotImplementedError(
"Unsloth currently only works on NVIDIA, AMD and Intel GPUs."
)
DEVICE_TYPE: str = get_device_type()
# HIP fails for autocast and other torch functions. Use CUDA instead
DEVICE_TYPE_TORCH = DEVICE_TYPE
if DEVICE_TYPE_TORCH == "hip":
DEVICE_TYPE_TORCH = "cuda"
@functools.cache
def get_device_count():
if DEVICE_TYPE in ("cuda", "hip"):
return torch.cuda.device_count()
elif DEVICE_TYPE == "xpu":
return torch.xpu.device_count()
else:
return 1
DEVICE_COUNT: int = get_device_count()
# 4-bit quantization requires a block size of 64
# this is not supported on AMD Instinct GPUs currently
# | Device Type | Warp Size | Block Size |
# |-----------------|-----------|------------|
# | CUDA | 32 | 64 |
# | Radeon (Navi) | 32 | 64 |
# | Instinct (MI) | 64 | 128 |
#
# Since bitsandbytes 0.49.0, pre-quantized models with 64 blockwise now works
# on Radeon GPUs, but not Instinct MI300x for eg [WIP]
# See https://github.com/bitsandbytes-foundation/bitsandbytes/pull/1748
ALLOW_PREQUANTIZED_MODELS: bool = True
# HSA_STATUS_ERROR_EXCEPTION checks - sometimes AMD fails for BnB
ALLOW_BITSANDBYTES: bool = True
if DEVICE_TYPE == "hip":
try:
import bitsandbytes
except:
print(
"Unsloth: `bitsandbytes` is not installed - 4bit QLoRA unallowed, but 16bit and full finetuning works."
)
ALLOW_PREQUANTIZED_MODELS = False
ALLOW_BITSANDBYTES = False
if ALLOW_BITSANDBYTES:
ALLOW_BITSANDBYTES = Version(bitsandbytes.__version__) > Version("0.48.2.dev0")
if Version(bitsandbytes.__version__) > Version("0.49.0"):
try:
# Pre-quantized bitsandbytes models use blocksize 64, so we need to check the GPU
from bitsandbytes.cextension import ROCM_WARP_SIZE_64
ALLOW_PREQUANTIZED_MODELS = not ROCM_WARP_SIZE_64
except Exception as e:
print(
"Unsloth: Checking `from bitsandbytes.cextension import ROCM_WARP_SIZE_64` had error = \n"
f"{str(e)}\n"
"4bit QLoRA disabled for now, but 16bit and full finetuning works."
)
ALLOW_PREQUANTIZED_MODELS = False
ALLOW_BITSANDBYTES = False
elif ALLOW_BITSANDBYTES:
from bitsandbytes.nn.modules import Params4bit
if "blocksize = 64 if not HIP_ENVIRONMENT else 128" in inspect.getsource(
Params4bit
):
ALLOW_PREQUANTIZED_MODELS = False
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/tokenizer_utils.py | unsloth/tokenizer_utils.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import AutoTokenizer
from transformers.convert_slow_tokenizer import convert_slow_tokenizer
from transformers import PreTrainedTokenizerFast
import re
import os
from transformers.models.llama.modeling_llama import logger
from peft import PeftModelForCausalLM
import torch
import itertools
import collections
import numpy as np
import gc
import subprocess
import psutil
from unsloth_zoo.tokenizer_utils import (
mean_of_trained_tokens,
add_new_tokens,
fix_untrained_tokens,
)
from unsloth_zoo.training_utils import (
fix_zero_training_loss,
)
__all__ = [
"load_correct_tokenizer",
"fix_sentencepiece_tokenizer",
"check_tokenizer",
"add_new_tokens",
"fix_sentencepiece_gguf",
]
IGNORED_TOKENIZER_CHECKING = frozenset(
(
"CodeLlamaTokenizerFast",
"CodeLlamaTokenizer",
)
)
IGNORED_TOKENIZER_NAMES = [
# Qwen Coder did not train on tool calling. Math did!
"unsloth/Qwen2.5-Coder-1.5B-Instruct",
"unsloth/Qwen2.5-Coder-7B-Instruct",
]
IGNORED_TOKENIZER_NAMES = frozenset(
[x.lower() for x in IGNORED_TOKENIZER_NAMES]
+ [x.lower() + "-bnb-4bit" for x in IGNORED_TOKENIZER_NAMES]
)
os.environ["UNSLOTH_IGNORED_TOKENIZER_NAMES"] = "\n".join(IGNORED_TOKENIZER_NAMES)
# Check environments
keynames = "\n" + "\n".join(os.environ.keys())
IS_COLAB_ENVIRONMENT = "\nCOLAB_" in keynames
IS_KAGGLE_ENVIRONMENT = "\nKAGGLE_" in keynames
KAGGLE_TMP = "/tmp"
del keynames
def try_fix_tokenizer(tokenizer, prepend = True):
if hasattr(tokenizer, "_tokenizer"):
converted_tokenizer = tokenizer._tokenizer
else:
converted_tokenizer = convert_slow_tokenizer(tokenizer)
tokenizer_string = converted_tokenizer.to_str()
# Llama does _apple. Sometimes this is wrong!!
prepend_text = '{"type":"Prepend","prepend":"▁"},'
if not prepend and prepend_text in tokenizer_string:
tokenizer_string = tokenizer_string.replace(prepend_text, "", 1)
dir_names = dir(tokenizer)
# Get eos_token, bos_token etc
token_names = [x for x in dir_names if x.endswith("_token") and x.count("_") == 1]
for token_name in token_names:
token = getattr(tokenizer, token_name, None)
if token is None:
continue
token_id = getattr(tokenizer, token_name + "_id", None)
# Locate the token's id mapping in the string
find_text = f'"id":{token_id},"content":"'
start = tokenizer_string.find(find_text) + len(find_text)
if start == -1:
continue
end = tokenizer_string.find('",', start)
bad_token = tokenizer_string[start:end]
# Check if token is the actual same one - if not, edit it
if bad_token != token:
bad_text = f'{find_text}{bad_token}",'
good_text = f'{find_text}{token}",'
tokenizer_string = tokenizer_string.replace(bad_text, good_text, 1)
# And replace vocab section
bad_text = f'"{bad_token}":{token_id},'
good_text = f'"{token}":{token_id},'
tokenizer_string = tokenizer_string.replace(bad_text, good_text, 1)
fixed_tokenizer = converted_tokenizer.from_str(tokenizer_string)
return fixed_tokenizer
def get_sorted_dict(dictionary):
sorted_keys = sorted(dictionary.values())
inverted_dictionary = {value: key for key, value in dictionary.items()}
sorted_dictionary = {}
for key in sorted_keys:
value = inverted_dictionary[key]
sorted_dictionary[value] = key
return sorted_dictionary
def convert_to_fast_tokenizer(
slow_tokenizer,
temporary_location = "_unsloth_sentencepiece_temp",
):
is_fast = getattr(slow_tokenizer, "is_fast", False)
if is_fast:
return slow_tokenizer
try:
tokenizer_name = slow_tokenizer.__class__.__name__
lowered_tokenizer_name = tokenizer_name.lower()
if lowered_tokenizer_name.endswith("tokenizer"):
class_name = lowered_tokenizer_name[: -len("tokenizer")]
FastTokenizer = eval(
f'__import__(f"transformers.models.{class_name}").{tokenizer_name}Fast'
)
else:
FastTokenizer = PreTrainedTokenizerFast
except:
FastTokenizer = PreTrainedTokenizerFast
# Get all arguments (bos_token, etc)
docs = FastTokenizer.__doc__
docs = docs[docs.find("Args:") :]
args = re.findall(r"\n[\s]+([^\s]{1,}) \(", docs, flags = re.MULTILINE)
args = [x for x in args if not x.endswith("_file")]
# Also some missing maybe!
docs = PreTrainedTokenizerFast.__doc__
docs = docs[docs.find("Args:") :]
args2 = re.findall(r"\n[\s]+([^\s]{1,}) \(", docs, flags = re.MULTILINE)
args2 = [x for x in args2 if not x.endswith("_file")]
args = list(set(args + args2))
kwargs = {}
for arg in args:
kwargs[arg] = getattr(slow_tokenizer, arg, None)
kwargs["tokenizer_object"] = try_fix_tokenizer(slow_tokenizer, prepend = True)
fast_tokenizer = FastTokenizer(**kwargs)
# Check if they're similar!
sorted_slow_tokenizer = get_sorted_dict(slow_tokenizer.get_vocab())
sorted_fast_tokenizer = get_sorted_dict(fast_tokenizer.get_vocab())
check_vocab = sorted_slow_tokenizer == sorted_fast_tokenizer
check_special = (
slow_tokenizer.all_special_tokens == fast_tokenizer.all_special_tokens
)
# Failure so return slow_tokenizer
if not check_vocab or not check_special:
return slow_tokenizer
# Now confirm if they match
if not assert_same_tokenization(slow_tokenizer, fast_tokenizer):
# Maybe remove prepending of __apple?
kwargs["tokenizer_object"] = try_fix_tokenizer(slow_tokenizer, prepend = False)
fast_tokenizer = FastTokenizer(**kwargs)
if not assert_same_tokenization(slow_tokenizer, fast_tokenizer):
# Failure :(
return slow_tokenizer
# Also tokenizer.model is missing!
name = slow_tokenizer.name_or_path.replace("/", "_")
if not os.path.exists(temporary_location):
os.makedirs(temporary_location)
new_location = f"{temporary_location}/{name}"
slow_tokenizer.save_pretrained(new_location)
fast_tokenizer.save_pretrained(new_location)
# Now load it!
fast_tokenizer = AutoTokenizer.from_pretrained(new_location)
if assert_same_tokenization(slow_tokenizer, fast_tokenizer):
return fast_tokenizer
return slow_tokenizer
# Check Mistral chat template without BOS / EOS
mistral_template = (
"{% if messages[0]['role'] == 'system' %}"
"{% if messages[1]['role'] == 'user' %}"
"{{ '[INST] ' + messages[0]['content'] + ' ' + messages[1]['content'] + ' [/INST]' }}"
"{% set loop_messages = messages[2:] %}"
"{% else %}"
"{{ '[INST] ' + messages[0]['content'] + ' [/INST]' }}"
"{% set loop_messages = messages[1:] %}"
"{% endif %}"
"{% else %}"
"{% set loop_messages = messages %}"
"{% endif %}"
"{% for message in loop_messages %}"
"{% if message['role'] == 'user' %}"
"{{ '[INST] ' + message['content'] + ' [/INST]' }}"
"{% elif message['role'] == 'assistant' %}"
"{{ message['content'] }}"
"{% else %}"
"{{ raise_exception('Only user and assistant roles are supported!') }}"
"{% endif %}"
"{% endfor %}"
)
# Check Llama chat template without BOS / EOS
llama_template = (
"{% if messages[0]['role'] == 'system' %}"
"{% if messages[1]['role'] == 'user' %}"
"{{ '[INST] <<SYS>>\n' + messages[0]['content'] + '\n<</SYS>>\n\n' + messages[1]['content'] + ' [/INST]' }}"
"{% set loop_messages = messages[2:] %}"
"{% else %}"
"{{ '[INST] ' + messages[0]['content'] + ' [/INST]' }}"
"{% set loop_messages = messages[1:] %}"
"{% endif %}"
"{% else %}"
"{% set loop_messages = messages %}"
"{% endif %}"
"{% for message in loop_messages %}"
"{% if message['role'] == 'user' %}"
"{{ '[INST] ' + message['content'].strip() + ' [/INST]' }}"
"{% elif message['role'] == 'assistant' %}"
"{{ ' ' + message['content'].strip() + ' ' }}"
"{% else %}"
"{{ raise_exception('Only user and assistant roles are supported!') }}"
"{% endif %}"
"{% endfor %}"
)
def assert_same_tokenization(slow_tokenizer, fast_tokenizer):
# Get eos_token, bos_token etc
if not hasattr(slow_tokenizer, "all_special_tokens"):
return True
dir_names = dir(slow_tokenizer)
special_tokens = list(
filter(
None,
(
getattr(slow_tokenizer, x)
for x in dir_names
if x.endswith("_token") and x.count("_") == 1
),
)
)
all_special_tokens = list(set(special_tokens + slow_tokenizer.all_special_tokens))
# Remove replacement char for false positive
replacement_char = b"\xc3\xaf\xc2\xbf\xc2\xbd".decode("utf-8")
all_special_tokens = [x for x in all_special_tokens if x != replacement_char]
# Check if chat template is enabled!
check_chat_template1 = True
check_chat_template2 = True
check_chat_template3 = True
"""
Weirdly Mistral tokenizers are actually correct??
Ie below will actually load mistral v1 and v3 incorrectly!
slow_chat_template = getattr(slow_tokenizer, "chat_template", None)
fast_chat_template = getattr(fast_tokenizer, "chat_template", None)
messages = [
{"role": "user", "content": " What is 2+2? "},
{"role": "assistant", "content": " It's 4. "},
]
# Check the tokenizer's own chat template
if slow_chat_template is not None and fast_chat_template is not None:
check_chat_template1 = \
slow_tokenizer.apply_chat_template(messages) == \
fast_tokenizer.apply_chat_template(messages)
pass
# Check Mistral chat template without BOS / EOS
slow_tokenizer.chat_template = mistral_template
fast_tokenizer.chat_template = mistral_template
check_chat_template2 = \
slow_tokenizer.apply_chat_template(messages) == \
fast_tokenizer.apply_chat_template(messages)
pass
# Check Llama chat template without BOS / EOS
slow_tokenizer.chat_template = llama_template
fast_tokenizer.chat_template = llama_template
check_chat_template3 = \
slow_tokenizer.apply_chat_template(messages) == \
fast_tokenizer.apply_chat_template(messages)
pass
# Combine them all and revert chat templates
slow_tokenizer.chat_template = slow_chat_template
fast_tokenizer.chat_template = fast_chat_template
"""
check_chat_template = (
check_chat_template1 and check_chat_template2 and check_chat_template3
)
# Try special tokens
try:
string = (
"\n".join(all_special_tokens)
+ "A quick brown fox jumps over the lazy dog!!\n\nHi</s>\n\n"
+ "".join(all_special_tokens)
)
check_special_tokens = (
slow_tokenizer(string).input_ids == fast_tokenizer(string).input_ids
)
return check_chat_template and check_special_tokens
except:
# For eg see https://github.com/unslothai/unsloth/issues/292
# Sometimes tokenizer has weird tokens, causing a combined tokenization to fail.
# [TODO] We temporarily disable this for CodeLlama tokenizers
if slow_tokenizer.__repr__().split("(", 1)[0] in IGNORED_TOKENIZER_CHECKING:
return check_chat_template
else:
return False
def fix_sentencepiece_tokenizer(
old_tokenizer,
new_tokenizer,
token_mapping,
temporary_location = "_unsloth_sentencepiece_temp",
):
# From https://github.com/google/sentencepiece/issues/121
# We need to manually edit the sentencepiece tokenizer!
try:
from transformers.convert_slow_tokenizer import import_protobuf
sentencepiece_model_pb2 = import_protobuf()
except Exception as e:
try:
import google.protobuf
from unsloth_zoo.utils import Version
protobuf_version = Version(google.protobuf.__version__)
if protobuf_version > Version("3.20.3"):
raise RuntimeError(
f"Unsloth: Your protobuf version = {protobuf_version} is too new.\n"
f"Please downgrade via `pip install --force-reinstall protobuf==3.20.3`"
)
except:
# This will only work for older SentencePiece versions <= 3.20.3
from transformers.utils import sentencepiece_model_pb2
if not os.path.exists(temporary_location):
os.makedirs(temporary_location)
# Check if tokenizer.model exists
if not os.path.isfile(f"{temporary_location}/tokenizer.model"):
return new_tokenizer
# First save the old tokenizer
old_tokenizer.save_pretrained(temporary_location)
tokenizer_file = sentencepiece_model_pb2.ModelProto()
tokenizer_file.ParseFromString(
open(f"{temporary_location}/tokenizer.model", "rb").read()
)
# Now save the new tokenizer
new_tokenizer.save_pretrained(temporary_location)
# Now correct the old tokenizer's .model file
for old_token, new_token in token_mapping.items():
ids = old_tokenizer([old_token], add_special_tokens = False).input_ids
ids = ids[0]
if len(ids) != 1:
# Skip this token!
print(
f"Skip mapping {old_token} to {new_token} since {new_token} is already in the tokenizer!"
)
continue
ids = ids[0]
# [TODO] Hack for Starling - try except
try:
tokenizer_piece = tokenizer_file.pieces[ids]
except:
continue
assert tokenizer_piece.piece == old_token
tokenizer_piece.piece = new_token
# And now write it
with open(f"{temporary_location}/tokenizer.model", "wb") as file:
file.write(tokenizer_file.SerializeToString())
# And load it!
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
temporary_location,
eos_token = new_tokenizer.eos_token,
pad_token = new_tokenizer.pad_token,
)
return tokenizer
def fix_sentencepiece_gguf(saved_location):
"""
Fixes sentencepiece tokenizers which did not extend the vocabulary with
user defined tokens.
Inspiration from https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py
"""
from copy import deepcopy
from transformers.utils import sentencepiece_model_pb2
import json
from enum import IntEnum
class SentencePieceTokenTypes(IntEnum):
NORMAL = 1
UNKNOWN = 2
CONTROL = 3
USER_DEFINED = 4
UNUSED = 5
BYTE = 6
# Load tokenizer.model
tokenizer_file = sentencepiece_model_pb2.ModelProto()
if not os.path.isfile(f"{saved_location}/tokenizer.model"):
return
tokenizer_file.ParseFromString(
open(f"{saved_location}/tokenizer.model", "rb").read()
)
sentence_piece_size = len(tokenizer_file.pieces)
# Load added_tokens_json
if not os.path.isfile(f"{saved_location}/added_tokens.json"):
return
with open(f"{saved_location}/added_tokens.json", "r", encoding = "utf-8") as file:
added_tokens_json = json.load(file)
if len(added_tokens_json) == 0:
return
added_tokens_json = dict(
sorted(added_tokens_json.items(), key = lambda item: item[1])
)
new_size = sentence_piece_size + len(added_tokens_json)
# Confirm added_tokens_json is correct
added_tokens_ids = np.array(list(added_tokens_json.values()))
diff = np.diff(added_tokens_ids)
if diff.min() != 1 or diff.max() != 1:
return
if added_tokens_ids.min() != sentence_piece_size:
return
# Edit sentence piece tokens with added_tokens_json
logger.warning(
f"Unsloth: Extending {saved_location}/tokenizer.model with added_tokens.json.\n"
f"Originally tokenizer.model is of size ({sentence_piece_size}).\n"
f"But we need to extend to sentencepiece vocab size ({new_size})."
)
new_tokens = deepcopy(tokenizer_file.pieces[-len(added_tokens_ids) :])
for new_token, added_token in zip(new_tokens, added_tokens_json.keys()):
new_token.piece = added_token.encode("utf-8")
new_token.score = -1000.0
new_token.type = SentencePieceTokenTypes.USER_DEFINED
tokenizer_file.pieces.extend(new_tokens)
with open(f"{saved_location}/tokenizer.model", "wb") as file:
file.write(tokenizer_file.SerializeToString())
# Add padding tokens
# actual_vocab_size = model.config.vocab_size
# padding = actual_vocab_size - len(tokenizer_file.pieces)
return
def _load_correct_tokenizer(
tokenizer_name,
model_max_length = None,
padding_side = "right",
token = None,
trust_remote_code = False,
cache_dir = "huggingface_tokenizers_cache",
fix_tokenizer = True,
):
if IS_COLAB_ENVIRONMENT:
cache_dir = cache_dir
elif IS_KAGGLE_ENVIRONMENT:
# /tmp of Kaggle seems has a 80GB limit!
# Let's utilize them
cache_dir = os.path.join(KAGGLE_TMP, cache_dir)
else:
cache_dir = None
# Try loading the slow tokenizer. If it fails, then try Fast only
# Mainly to solve Deepseek models with no tokenizer.model file
slow_tokenizer = None
try:
slow_tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name,
model_max_length = model_max_length,
padding_side = padding_side,
token = token,
trust_remote_code = trust_remote_code,
# Cannot just use use_fast = False as per https://twitter.com/danielhanchen/status/1789659394302718373
use_fast = False,
legacy = False,
from_slow = True,
cache_dir = cache_dir,
)
except:
slow_tokenizer = None
# print(
# f"Unsloth: {tokenizer_name} has no tokenizer.model file.\n"\
# "Just informing you about this - this is not a critical error."
# )
# Unsure why this occurs!
if type(slow_tokenizer) is bool:
slow_tokenizer = None
fast_tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name,
model_max_length = model_max_length,
padding_side = padding_side,
token = token,
trust_remote_code = trust_remote_code,
cache_dir = cache_dir,
)
if not fix_tokenizer or tokenizer_name in IGNORED_TOKENIZER_NAMES:
return fast_tokenizer
# Ignore Mistral ones - they're a bit weird to handle!
elif "mistral" in tokenizer_name.lower():
return fast_tokenizer
# Ignore Phi-4 ones as well
elif "phi-4" in tokenizer_name.lower():
return fast_tokenizer
elif slow_tokenizer is not None:
if hasattr(fast_tokenizer, "add_bos_token") and hasattr(
slow_tokenizer, "add_bos_token"
):
fast_tokenizer.add_bos_token = slow_tokenizer.add_bos_token
if hasattr(fast_tokenizer, "add_eos_token") and hasattr(
slow_tokenizer, "add_eos_token"
):
fast_tokenizer.add_eos_token = slow_tokenizer.add_eos_token
# Confirm if slow and fast are equivalent!
if assert_same_tokenization(slow_tokenizer, fast_tokenizer):
return fast_tokenizer
else:
logger.warning(
f"Unsloth: Will load {tokenizer_name} as a legacy tokenizer."
)
return convert_to_fast_tokenizer(slow_tokenizer)
pass
else:
return fast_tokenizer
def load_correct_tokenizer(
tokenizer_name,
model_max_length = None,
padding_side = "right",
token = None,
trust_remote_code = False,
cache_dir = "huggingface_tokenizers_cache",
fix_tokenizer = True,
):
tokenizer = _load_correct_tokenizer(
tokenizer_name = tokenizer_name,
model_max_length = model_max_length,
padding_side = padding_side,
token = token,
trust_remote_code = trust_remote_code,
cache_dir = cache_dir,
fix_tokenizer = fix_tokenizer,
)
### 1. Fixup tokenizer's chat_template
old_chat_template = getattr(tokenizer, "chat_template", None)
# Ignore mistral type models since they don't have an add_generation_prompt
if "mistral" in str(getattr(tokenizer, "name_or_path", "")).lower():
chat_template = old_chat_template
# Also check Llama-2 old style models
elif (
old_chat_template is not None
and "[/INST]" in old_chat_template
and "[INST]" in old_chat_template
and "bos_token" in old_chat_template
and "eos_token" in old_chat_template
):
chat_template = old_chat_template
else:
chat_template = fix_chat_template(tokenizer)
if old_chat_template is not None and chat_template is None:
raise RuntimeError(
"Unsloth: Fixing chat template failed - please file a report immediately!"
)
pass
tokenizer.chat_template = chat_template
return tokenizer
def _find_end_position(template, endfor, endif):
where_endfor = template.find(endfor)
where_endif = template.find(endif)
if where_endfor == where_endif == -1:
return None
elif where_endfor > where_endif:
return endfor
else:
return endif
def _fix_chat_template(chat_template):
endfor = "{% endfor %}"
endif = "{% endif %}"
chosen_end = _find_end_position(chat_template, endfor, endif)
if chosen_end is None:
endfor = "{%- endfor %}"
endif = "{%- endif %}"
chosen_end = _find_end_position(chat_template, endfor, endif)
if chosen_end is None:
return chat_template
where = chat_template.find(chosen_end)
after_endfor = chat_template[where + len(chosen_end) :]
dash = "-" if chosen_end.startswith("{%-") else ""
if (
"{%" + dash + " if" not in after_endfor
and "{%" + dash + " set " not in after_endfor
and after_endfor.startswith("{{")
and after_endfor.endswith("}}")
and after_endfor.count("{{") == 1
and after_endfor.count("}}") == 1
):
after_endfor = (
"{%" + dash + " if add_generation_prompt %}" + after_endfor + endif
)
chat_template = chat_template[: where + len(chosen_end)] + after_endfor
return chat_template
def fix_chat_template(tokenizer):
chat_template = getattr(tokenizer, "chat_template", None)
if chat_template is None:
return None
### 1. Check if add_generation_prompt works
# Check for ShareGPT style first
is_sharegpt = None
try:
messages = [
{"role": "user", "content": "Who are you?"},
]
tokenizer.apply_chat_template(
messages, add_generation_prompt = False, tokenize = False
)
is_sharegpt = False
except:
try:
messages = [
{"from": "human", "value": "Who are you?"},
]
tokenizer.apply_chat_template(
messages, add_generation_prompt = False, tokenize = False
)
is_sharegpt = True
except:
is_sharegpt = None
# Not ShareGPT or HF style - just return
if is_sharegpt is None:
return chat_template
# Tokenize
messages = [
{"role": "user", "content": "Who are you?"}
if not is_sharegpt
else {"from": "human", "value": "Who are you?"}
]
no = tokenizer.apply_chat_template(
messages, add_generation_prompt = False, tokenize = False
)
yes = tokenizer.apply_chat_template(
messages, add_generation_prompt = True, tokenize = False
)
if no == yes:
# SAME?! That's not good! We check for add_generation_prompt
if (
"{% if add_generation_prompt %}" not in chat_template
and "{%- if add_generation_prompt %}" not in chat_template
):
# Try fixing it by adding it
new_chat_template = _fix_chat_template(chat_template)
if (
"{% if add_generation_prompt %}" not in new_chat_template
and "{%- if add_generation_prompt %}" not in new_chat_template
):
raise RuntimeError(
f"Unsloth: The tokenizer `{tokenizer.name_or_path}`\n"
"does not have a {% if add_generation_prompt %} for generation purposes.\n"
f"Please file a bug report to the maintainers of `{tokenizer.name_or_path}` - thanks!"
)
else:
logger.warning_once(
"Unsloth: We successfully patched the tokenizer to add a {% if add_generation_prompt %} to the chat_template.\n"
f"This is not a bug, but please notify the maintainers of `{tokenizer.name_or_path}` - thanks!"
)
chat_template = new_chat_template
else:
raise RuntimeError(
f"Unsloth: The tokenizer `{tokenizer.name_or_path}`\n"
"has a {% if add_generation_prompt %} for generation purposes, but wasn't provided correctly.\n"
"Please file a bug report immediately - thanks!"
)
return chat_template
def check_tokenizer(
model,
tokenizer,
model_name = "unsloth/llama-2-7b-bnb-4bit",
model_max_length = 4096,
padding_side = "right",
token = None,
_reload = True,
):
# Checks tokenizer for out of bounds ids.
# Mainly a fix for https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha
# where <sep> had token id=32002.
# See https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha/discussions/25
# Seems like the Fast tokenizer in Rust breaks things!
# We ignore some of them!
if tokenizer.__repr__().split("(", 1)[0] in IGNORED_TOKENIZER_CHECKING:
return tokenizer
max_embedding_size = model.model.embed_tokens.weight.shape[0]
added_tokens_fast = tokenizer.added_tokens_decoder
added_tokens_fast = {
index: str(value) for index, value in added_tokens_fast.items()
}
sorted_keys = sorted(added_tokens_fast)
added_tokens_fast = {key: added_tokens_fast[key] for key in sorted_keys}
for j, index in enumerate(added_tokens_fast.keys()):
if index >= max_embedding_size:
bad_indices = list(added_tokens_fast.keys())[j:]
bad_tokens = list(added_tokens_fast.values())[j:]
if not _reload:
# Try removing the token
added_tokens = [str(x) for x in tokenizer.added_tokens_decoder.values()]
special_tokens = tokenizer.special_tokens_map
import itertools
special_tokens = frozenset(
itertools.chain.from_iterable(
[x] if type(x) is str else x for x in special_tokens.values()
)
)
can_be_removed1 = [x for x in bad_tokens if x not in special_tokens]
can_be_removed2 = [
x
for x in can_be_removed1
if x in tokenizer._added_tokens_encoder.keys()
]
# Check of extra tokens can in fact we removed!
can_be_removed = (len(can_be_removed1) == len(bad_tokens)) and (
len(can_be_removed2) == len(bad_tokens)
)
# Check if sep_token or other generic types
remove_generic = False
try_mapper = []
if not can_be_removed:
names = dir(tokenizer)
names = (
x for x in names if x.endswith("_token") and x.count("_") == 1
)
generic_tokens = [(x, getattr(tokenizer, x, None)) for x in names]
try_removal = []
for token in bad_tokens:
for name_token, check_token in generic_tokens:
if check_token == token:
try_removal.append(token)
try_mapper.append(name_token)
# Recheck!
can_be_removed = len(try_removal) == len(bad_tokens)
if can_be_removed:
remove_generic = True
can_be_removed1 = bad_tokens
if can_be_removed:
# Yes it can be fixed!
for j, bad_token in enumerate(can_be_removed1):
remove_id = tokenizer._added_tokens_encoder[bad_token]
del tokenizer._added_tokens_decoder[remove_id]
del tokenizer._added_tokens_encoder[bad_token]
if remove_generic and (try_removal[j] == bad_token):
# Remove sep token for example
setattr(tokenizer, try_mapper[j], None)
setattr(tokenizer, try_mapper[j] + "_id", None)
# Confirm 1 more time!
if max(tokenizer.added_tokens_decoder.keys()) < max_embedding_size:
logger.warning_once(
f"Unsloth loaded a broken tokenizer `{model_name}`, but managed to repair it!\n"
f"Tokens {bad_tokens} with ids {bad_indices} exceeds the max vocab size of {max_embedding_size}.\n"
"We removed these bad tokens. If you think this is incorrect, fix your tokenizer first."
)
return convert_to_fast_tokenizer(tokenizer)
# :( Failure
raise RuntimeError(
f"Unsloth tried to load `{model_name}`, but cannot succeed.\n"
f"Tokens {bad_tokens} with ids {bad_indices} exceeds the max vocab size of {max_embedding_size}.\n"
f"Fix your tokenizer since it'll perform out of bounds memory accesses."
)
if IS_COLAB_ENVIRONMENT or IS_KAGGLE_ENVIRONMENT:
cache_dir = "huggingface_tokenizers_cache"
else:
cache_dir = None
# Sometimes slow tokenizer does not work like Deepseek
try:
# Try slow tokenizer which can fix things!
tokenizer = AutoTokenizer.from_pretrained(
model_name,
model_max_length = model_max_length,
padding_side = padding_side,
token = token,
# Cannot just use use_fast = False as per https://twitter.com/danielhanchen/status/1789659394302718373
use_fast = False,
legacy = False,
from_slow = True,
cache_dir = cache_dir,
)
return check_tokenizer(
model = model,
tokenizer = tokenizer,
model_name = model_name,
model_max_length = model_max_length,
padding_side = padding_side,
token = token,
_reload = False,
)
break
except:
# Tokenizer has out of bounds issues and we can't
# load the slow tokenizer version :(
logger.warning_once(
"Unsloth: Tokenizer is most likely buggy, and Unsloth failed to repair it.\n"
"It will still work, but beware of out of bounds memory accesses.\n"
"Please file an issue on the model owner's repo about this issue."
)
return tokenizer
return convert_to_fast_tokenizer(tokenizer)
import inspect
from inspect import getsource
import trl
import trl.trainer.sft_trainer
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | true |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/trainer.py | unsloth/trainer.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import psutil
import warnings
from dataclasses import dataclass, field
from typing import Optional
from functools import wraps
import trl
import inspect
from trl import SFTTrainer
from . import is_bfloat16_supported
from unsloth.utils import (
configure_padding_free,
configure_sample_packing,
enable_padding_free_metadata,
enable_sample_packing,
)
from unsloth_zoo.training_utils import (
unsloth_train as _unsloth_train,
)
from unsloth_zoo.vision_utils import (
UnslothVisionDataCollator,
)
from unsloth_zoo.hf_utils import get_transformers_model_type
from unsloth_zoo.utils import Version
import dataclasses
__all__ = [
"UnslothTrainingArguments",
"UnslothTrainer",
"unsloth_train",
"_patch_trl_trainer",
"UnslothVisionDataCollator",
]
logger = logging.getLogger(__name__)
_AUTO_PADDING_FREE_ENV_DISABLED = os.environ.get(
"UNSLOTH_DISABLE_AUTO_PADDING_FREE", ""
).strip().lower() in {"1", "true", "yes", "on"}
PADDING_FREE_BLOCKLIST = {
"gemma2", # - gemma2: Uses slow_attention_softcapping which has torch.compile issues
"gpt_oss", # - gpt_oss: Uses Flex Attention which doesn't handle padding_free correctly
}
def _should_pack(config) -> bool:
if config is None or not getattr(config, "packing", False):
return False
return not getattr(config, "_unsloth_disable_auto_packing", False)
def _should_auto_padding_free(config) -> bool:
if (
config is None
or _AUTO_PADDING_FREE_ENV_DISABLED
or getattr(config, "packing", False)
):
return False
return not getattr(config, "padding_free", False)
def _disable_sample_packing(config):
if config is None:
return
for attr, value in (("packing", False), ("padding_free", False)):
if hasattr(config, attr):
setattr(config, attr, value)
if hasattr(config, "remove_unused_columns"):
setattr(config, "remove_unused_columns", True)
setattr(config, "_unsloth_disable_auto_packing", True)
_AUTO_PACK_SKIP_MESSAGES = (
"packing is not supported",
"padding-free training",
"passing a custom data collator",
)
def _should_skip_auto_packing_error(exc: Exception) -> bool:
message = str(exc).lower()
return any(msg in message for msg in _AUTO_PACK_SKIP_MESSAGES)
# Unsloth gradient accumulation fix:
from transformers import __version__ as transformers_version, ProcessorMixin
if Version(transformers_version) > Version("4.45.2"):
def unsloth_train(trainer, *args, **kwargs):
return trainer.train(*args, **kwargs)
else:
def unsloth_train(trainer, *args, **kwargs):
if len(args) != 0 or len(kwargs) != 0:
raise RuntimeError(
"Unsloth: Our custom gradient accumulation fixed trainer does not support other arguments.\n"
"If you want to use our fix inside of HF, please update `transformers` to the latest version via:\n"
"`pip uninstall transformers -y && pip install --upgrade --no-cache-dir transformers`"
)
print(
"Unsloth: Using our custom gradient accumulation fixed trainer, which is not feature complete.\n"
"If you want to use our fix inside of HF, please update `transformers` to the latest version via:\n"
"`pip uninstall transformers -y && pip install --upgrade --no-cache-dir transformers`"
)
return _unsloth_train(trainer)
try:
from trl import SFTConfig as TrainingArguments
except:
from transformers import TrainingArguments
class UnslothTrainingArguments(TrainingArguments):
def __init__(self, embedding_learning_rate: float = None, *args, **kwargs):
embedding_learning_rate = embedding_learning_rate
super().__init__(*args, **kwargs)
def _create_unsloth_optimizer(
model,
optimizer_cls,
optimizer_kwargs,
embedding_lr = 5e-5,
):
lr = optimizer_kwargs["lr"]
weight_decay = optimizer_kwargs.get("weight_decay", 0.0)
param_groups = {
"non_embeddings": {},
"embeddings": {},
}
for name, param in model.named_parameters():
if not param.requires_grad:
continue
if name.endswith("modules_to_save.default.weight"):
partial_name = name[: -len(".modules_to_save.default.weight")]
partial_name = partial_name[partial_name.rfind(".") + 1 :]
print(
f"Unsloth: Setting lr = {embedding_lr:.2e} instead of {lr:.2e} for {partial_name}."
)
param_groups["embeddings"][name] = param
else:
param_groups["non_embeddings"][name] = param
optimizer_grouped_parameters = [
{
"params": list(param_groups["non_embeddings"].values()),
"weight_decay": weight_decay,
"lr": lr,
},
{
"params": list(param_groups["embeddings"].values()),
"weight_decay": weight_decay,
"lr": embedding_lr,
},
]
optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
return optimizer
class UnslothTrainer(SFTTrainer):
def create_optimizer(self):
embedding_learning_rate = getattr(self.args, "embedding_learning_rate", None)
if embedding_learning_rate is None:
return super().create_optimizer()
if self.optimizer is None:
optimizer_cls, optimizer_kwargs = SFTTrainer.get_optimizer_cls_and_kwargs(
self.args
)
self.optimizer = _create_unsloth_optimizer(
self.model,
optimizer_cls,
optimizer_kwargs,
embedding_learning_rate,
)
return self.optimizer
# From `trl>=0.13.0`, they changed how to pass several params to the trainer
# We need to patch to make the transition smooth
def _backwards_compatible_trainer(trainer_class, config_class):
original_init = trainer_class.__init__
@wraps(original_init)
def new_init(self, *args, **kwargs):
# All Trainer tokenizer are now called processing_class
trainer_params = set(inspect.signature(original_init).parameters.keys())
if "processing_class" in trainer_params and "tokenizer" in kwargs:
kwargs["processing_class"] = kwargs.pop("tokenizer")
if ("args" in kwargs) and (Version(trl.__version__) >= Version("0.13.0.dev0")):
training_args = kwargs.pop("args", None)
# Get parameters that Trainer.__init__ actually expects
trainer_params.remove("self")
trainer_params.remove("args")
# Get fields that should be passed to Config init
config_fields = {
field.name: field
for field in dataclasses.fields(config_class)
if field.init
}
# Create config dict with valid fields from training_args
config_dict = {
name: getattr(training_args, name)
for name in config_fields
if hasattr(training_args, name)
}
# Get parameters that exist in Config but not in TrainingArguments
from transformers import TrainingArguments
moved_params = set(inspect.signature(config_class).parameters.keys()) - set(
inspect.signature(TrainingArguments).parameters.keys()
)
# Separate kwargs into trainer kwargs and config kwargs
trainer_kwargs = {}
additional_config_kwargs = {}
for key, value in kwargs.items():
if key in trainer_params:
trainer_kwargs[key] = value
elif key in moved_params or key in config_fields:
additional_config_kwargs[key] = value
else:
additional_config_kwargs[key] = value
# Update config_dict with additional kwargs
config_dict.update(additional_config_kwargs)
# Create Config with all the collected parameters
# Reinitialising config class with parameters (that were none initially but populated on first init)
# causes the 2nd init to fail as there are mutual exclusive checks on pairs of parameters.
# Refer: https://github.com/huggingface/trl/blob/main/trl/trainer/grpo_config.py#L499-L502 for example
# So we only create config class if the previous init was not TrainingArguments
if not isinstance(training_args, TrainingArguments):
config = config_class(**config_dict)
else:
config = training_args
# Reconstruct kwargs for Trainer
kwargs = trainer_kwargs
kwargs["args"] = config
original_init(self, *args, **kwargs)
return new_init
def _patch_sft_trainer_auto_packing(trl_module):
sft_trainer = getattr(trl_module, "SFTTrainer", None)
if sft_trainer is None:
return
if getattr(sft_trainer, "_unsloth_auto_packing_wrapped", False):
return
original_init = sft_trainer.__init__
@wraps(original_init)
def new_init(self, *args, **kwargs):
config_arg = None
if len(args) >= 2:
config_arg = args[1]
else:
config_arg = kwargs.get("args")
# Check if model type is unsupported for padding_free
model = kwargs.get("model")
is_unsupported_model = False
is_vlm = False
if model is not None:
model_config = getattr(model, "config", None)
if model_config is not None:
model_types = get_transformers_model_type(model_config)
# Blocklist: models that don't work correctly with padding_free
is_unsupported_model = any(
x in PADDING_FREE_BLOCKLIST for x in model_types
)
# Check if VLM
architectures = getattr(model_config, "architectures", None)
if architectures is None:
architectures = []
is_vlm = any(
x.endswith("ForConditionalGeneration") for x in architectures
)
is_vlm = is_vlm or hasattr(model_config, "vision_config")
processing_class = kwargs.get("processing_class") or kwargs.get("tokenizer")
data_collator = kwargs.get("data_collator")
# We also disable vision language models for padding free collators
blocked = (
(data_collator is not None)
or isinstance(processing_class, ProcessorMixin)
or is_vlm
or is_unsupported_model
or (
os.environ.get("UNSLOTH_RETURN_LOGITS", "0") == "1"
) # Disable padding free on forced logits
)
requested_pack = bool(getattr(config_arg, "packing", False))
if blocked:
if hasattr(config_arg, "packing"):
setattr(config_arg, "packing", False)
if hasattr(config_arg, "padding_free"):
setattr(config_arg, "padding_free", False)
if blocked and requested_pack:
reason = "custom data collator"
if data_collator is None and isinstance(processing_class, ProcessorMixin):
reason = "processor-based model"
elif is_vlm:
reason = "vision-language model"
elif is_unsupported_model:
reason = f"unsupported model type(s): {', '.join(model_types)}"
message = "Unsloth: Sample packing skipped " f"({reason} detected)."
print(message)
packing_active = False
if _should_pack(config_arg) and not blocked:
configure_sample_packing(config_arg)
packing_active = True
logger.info("Unsloth: Sample packing enabled for SFTTrainer instance.")
auto_padding_free_active = False
padding_free_requested = getattr(config_arg, "padding_free", None) is True
if not blocked:
if padding_free_requested:
configure_padding_free(config_arg)
elif _should_auto_padding_free(config_arg):
configure_padding_free(config_arg)
auto_padding_free_active = True
logger.info(
"Unsloth: Padding-free batching auto-enabled for SFTTrainer instance."
)
try:
original_init(self, *args, **kwargs)
except ValueError as exc:
if packing_active and _should_skip_auto_packing_error(exc):
logger.info(
"Unsloth: Auto sample packing failed because trainer reported an incompatible setup (%s).",
exc,
)
_disable_sample_packing(config_arg)
packing_active = False
original_init(self, *args, **kwargs)
else:
raise
trainer_args = getattr(self, "args", None)
trainer_packing = bool(trainer_args and getattr(trainer_args, "packing", False))
trainer_padding_free = bool(
trainer_args and getattr(trainer_args, "padding_free", False)
)
if blocked and trainer_args is not None:
# Mirror the block on the trainer args to avoid re-enabling later
setattr(trainer_args, "packing", False)
setattr(trainer_args, "padding_free", False)
if (
not blocked
and trainer_packing
and (packing_active or _should_pack(trainer_args))
):
enable_sample_packing(self.model, self)
print(
"🦥 Unsloth: Packing enabled - training is >2x faster and uses less VRAM!"
)
elif not blocked and trainer_padding_free:
enable_padding_free_metadata(self.model, self)
message = (
"🦥 Unsloth: Padding-free auto-enabled, enabling faster training."
if auto_padding_free_active
else "🦥 Unsloth: Padding-free enabled, enabling faster training."
)
print(message)
sft_trainer.__init__ = new_init
sft_trainer._unsloth_auto_packing_wrapped = True
def _patch_trl_trainer():
import trl
if hasattr(trl, "__UNSLOTH_BACKWARDS_COMPATIBLE__"):
return
if Version(trl.__version__) <= Version("0.11.0"):
return
import trl.trainer
trl_classes = dir(trl.trainer)
trl_trainers = set(
x[: -len("Trainer")] for x in trl_classes if x.endswith("Trainer")
)
trl_configs = set(x[: -len("Config")] for x in trl_classes if x.endswith("Config"))
trl_classes = list(trl_trainers & trl_configs)
for x in trl_classes:
try:
exec(
f"trl.{x}Trainer.__init__ = _backwards_compatible_trainer(trl.{x}Trainer, trl.{x}Config)",
globals(),
)
except:
continue
_patch_sft_trainer_auto_packing(trl)
trl.__UNSLOTH_BACKWARDS_COMPATIBLE__ = True
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/_auto_install.py | unsloth/_auto_install.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try: import torch
except: raise ImportError('Install torch via `pip install torch`')
from packaging.version import Version as V
import re
v = V(re.match(r"[0-9\.]{3,}", torch.__version__).group(0))
cuda = str(torch.version.cuda)
is_ampere = torch.cuda.get_device_capability()[0] >= 8
USE_ABI = torch._C._GLIBCXX_USE_CXX11_ABI
if cuda not in ("11.8", "12.1", "12.4", "12.6", "12.8", "13.0"): raise RuntimeError(f"CUDA = {cuda} not supported!")
if v <= V('2.1.0'): raise RuntimeError(f"Torch = {v} too old!")
elif v <= V('2.1.1'): x = 'cu{}{}-torch211'
elif v <= V('2.1.2'): x = 'cu{}{}-torch212'
elif v < V('2.3.0'): x = 'cu{}{}-torch220'
elif v < V('2.4.0'): x = 'cu{}{}-torch230'
elif v < V('2.5.0'): x = 'cu{}{}-torch240'
elif v < V('2.5.1'): x = 'cu{}{}-torch250'
elif v <= V('2.5.1'): x = 'cu{}{}-torch251'
elif v < V('2.7.0'): x = 'cu{}{}-torch260'
elif v < V('2.7.9'): x = 'cu{}{}-torch270'
elif v < V('2.8.0'): x = 'cu{}{}-torch271'
elif v < V('2.8.9'): x = 'cu{}{}-torch280'
elif v < V('2.9.1'): x = 'cu{}{}-torch290'
elif v < V('2.9.2'): x = 'cu{}{}-torch291'
else: raise RuntimeError(f"Torch = {v} too new!")
if v > V('2.6.9') and cuda not in ("11.8", "12.6", "12.8", "13.0"): raise RuntimeError(f"CUDA = {cuda} not supported!")
x = x.format(cuda.replace(".", ""), "-ampere" if False else "") # is_ampere is broken due to flash-attn
print(f'pip install --upgrade pip && pip install --no-deps git+https://github.com/unslothai/unsloth-zoo.git && pip install "unsloth[{x}] @ git+https://github.com/unslothai/unsloth.git" --no-build-isolation') | python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/__init__.py | unsloth/__init__.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings, importlib, sys
from packaging.version import Version
import os, re, subprocess, inspect, functools
import numpy as np
# Log Unsloth is being used
os.environ["UNSLOTH_IS_PRESENT"] = "1"
# Check if modules that need patching are already imported
critical_modules = ["trl", "transformers", "peft"]
already_imported = [mod for mod in critical_modules if mod in sys.modules]
# Fix some issues before importing other packages
from .import_fixes import (
fix_message_factory_issue,
check_fbgemm_gpu_version,
torchvision_compatibility_check,
fix_diffusers_warnings,
fix_huggingface_hub,
)
fix_message_factory_issue()
check_fbgemm_gpu_version()
torchvision_compatibility_check()
fix_diffusers_warnings()
fix_huggingface_hub()
del fix_message_factory_issue
del check_fbgemm_gpu_version
del torchvision_compatibility_check
del fix_diffusers_warnings
del fix_huggingface_hub
# This check is critical because Unsloth optimizes these libraries by modifying
# their code at import time. If they're imported first, the original (slower,
# more memory-intensive) implementations will be used instead of Unsloth's
# optimized versions, potentially causing OOM errors or slower training.
if already_imported:
# stacklevel=2 makes warning point to user's import line rather than this library code,
# showing them exactly where to fix the import order in their script
warnings.warn(
f"WARNING: Unsloth should be imported before [{', '.join(already_imported)}] "
f"to ensure all optimizations are applied. Your code may run slower or encounter "
f"memory issues without these optimizations.\n\n"
f"Please restructure your imports with 'import unsloth' at the top of your file.",
stacklevel = 2,
)
del already_imported, critical_modules
# Unsloth currently does not work on multi GPU setups - sadly we are a 2 brother team so
# enabling it will require much more work, so we have to prioritize. Please understand!
# We do have a beta version, which you can contact us about!
# Thank you for your understanding and we appreciate it immensely!
# Fixes https://github.com/unslothai/unsloth/issues/1266
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
# [TODO] Check why some GPUs don't work
# "pinned_use_cuda_host_register:True,"\
# "pinned_num_register_threads:8"
from importlib.metadata import version as importlib_version
from importlib.metadata import PackageNotFoundError
# Check for unsloth_zoo
try:
unsloth_zoo_version = importlib_version("unsloth_zoo")
if Version(unsloth_zoo_version) < Version("2026.1.1"):
print(
"Unsloth: Please update Unsloth and Unsloth-Zoo to the latest version!\n"
"Do this via `pip install --upgrade --force-reinstall --no-cache-dir --no-deps unsloth unsloth_zoo`"
)
# if os.environ.get("UNSLOTH_DISABLE_AUTO_UPDATES", "0") == "0":
# try:
# os.system("pip install --upgrade --no-cache-dir --no-deps unsloth_zoo")
# except:
# try:
# os.system("pip install --upgrade --no-cache-dir --no-deps --user unsloth_zoo")
# except:
# raise ImportError("Unsloth: Please update unsloth_zoo via `pip install --upgrade --no-cache-dir --no-deps unsloth_zoo`")
import unsloth_zoo
except PackageNotFoundError:
raise ImportError(
f"Unsloth: Please install unsloth_zoo via `pip install unsloth_zoo` then retry!"
)
except:
raise
del PackageNotFoundError, importlib_version
# Try importing PyTorch and check version
try:
import torch
except ModuleNotFoundError:
raise ImportError(
"Unsloth: Pytorch is not installed. Go to https://pytorch.org/.\n"
"We have some installation instructions on our Github page."
)
except:
raise
from unsloth_zoo.device_type import (
is_hip,
get_device_type,
DEVICE_TYPE,
DEVICE_TYPE_TORCH,
DEVICE_COUNT,
ALLOW_PREQUANTIZED_MODELS,
)
# Fix other issues
from .import_fixes import (
fix_xformers_performance_issue,
fix_vllm_aimv2_issue,
fix_vllm_guided_decoding_params,
ignore_logger_messages,
patch_ipykernel_hf_xet,
patch_trackio,
patch_datasets,
patch_enable_input_require_grads,
fix_openenv_no_vllm,
fix_executorch,
)
fix_xformers_performance_issue()
fix_vllm_aimv2_issue()
fix_vllm_guided_decoding_params()
ignore_logger_messages()
patch_ipykernel_hf_xet()
patch_trackio()
patch_datasets()
patch_enable_input_require_grads()
fix_openenv_no_vllm()
fix_executorch()
del fix_xformers_performance_issue
del fix_vllm_aimv2_issue
del fix_vllm_guided_decoding_params
del ignore_logger_messages
del patch_ipykernel_hf_xet
del patch_trackio
del patch_datasets
del patch_enable_input_require_grads
del fix_openenv_no_vllm
del fix_executorch
# Torch 2.4 has including_emulation
if DEVICE_TYPE == "cuda":
major_version, minor_version = torch.cuda.get_device_capability()
SUPPORTS_BFLOAT16 = major_version >= 8
old_is_bf16_supported = torch.cuda.is_bf16_supported
if "including_emulation" in str(inspect.signature(old_is_bf16_supported)):
def is_bf16_supported(including_emulation = False):
return old_is_bf16_supported(including_emulation)
torch.cuda.is_bf16_supported = is_bf16_supported
else:
def is_bf16_supported():
return SUPPORTS_BFLOAT16
torch.cuda.is_bf16_supported = is_bf16_supported
del major_version, minor_version
elif DEVICE_TYPE == "hip":
SUPPORTS_BFLOAT16 = torch.cuda.is_bf16_supported()
elif DEVICE_TYPE == "xpu":
# torch.xpu.is_bf16_supported() does not have including_emulation
# set SUPPORTS_BFLOAT16 as torch.xpu.is_bf16_supported()
SUPPORTS_BFLOAT16 = torch.xpu.is_bf16_supported()
# For Gradio HF Spaces?
# if "SPACE_AUTHOR_NAME" not in os.environ and "SPACE_REPO_NAME" not in os.environ:
import triton
if DEVICE_TYPE == "cuda":
libcuda_dirs = lambda: None
if Version(triton.__version__) >= Version("3.0.0"):
try:
from triton.backends.nvidia.driver import libcuda_dirs
except:
pass
else:
from triton.common.build import libcuda_dirs
# Try loading bitsandbytes and triton
try:
import bitsandbytes as bnb
except:
print(
"Unsloth: `bitsandbytes` is not installed - 4bit QLoRA unallowed, but 16bit and full finetuning works!"
)
bnb = None
try:
cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32
libcuda_dirs()
except:
warnings.warn("Unsloth: Running `ldconfig /usr/lib64-nvidia` to link CUDA.")
if os.path.exists("/usr/lib64-nvidia"):
os.system("ldconfig /usr/lib64-nvidia")
elif os.path.exists("/usr/local"):
# Sometimes bitsandbytes cannot be linked properly in Runpod for example
possible_cudas = (
subprocess.check_output(["ls", "-al", "/usr/local"])
.decode("utf-8")
.split("\n")
)
find_cuda = re.compile(r"[\s](cuda\-[\d\.]{2,})$")
possible_cudas = [find_cuda.search(x) for x in possible_cudas]
possible_cudas = [x.group(1) for x in possible_cudas if x is not None]
# Try linking cuda folder, or everything in local
if len(possible_cudas) == 0:
os.system("ldconfig /usr/local/")
else:
find_number = re.compile(r"([\d\.]{2,})")
latest_cuda = np.argsort(
[float(find_number.search(x).group(1)) for x in possible_cudas]
)[::-1][0]
latest_cuda = possible_cudas[latest_cuda]
os.system(f"ldconfig /usr/local/{latest_cuda}")
del find_number, latest_cuda
del possible_cudas, find_cuda
if bnb is not None:
importlib.reload(bnb)
importlib.reload(triton)
try:
libcuda_dirs = lambda: None
if Version(triton.__version__) >= Version("3.0.0"):
try:
from triton.backends.nvidia.driver import libcuda_dirs
except:
pass
else:
from triton.common.build import libcuda_dirs
cdequantize_blockwise_fp32 = bnb.functional.lib.cdequantize_blockwise_fp32
libcuda_dirs()
except:
warnings.warn(
"Unsloth: CUDA is not linked properly.\n"
"Try running `python -m bitsandbytes` then `python -m xformers.info`\n"
"We tried running `ldconfig /usr/lib64-nvidia` ourselves, but it didn't work.\n"
"You need to run in your terminal `sudo ldconfig /usr/lib64-nvidia` yourself, then import Unsloth.\n"
"Also try `sudo ldconfig /usr/local/cuda-xx.x` - find the latest cuda version.\n"
"Unsloth will still run for now, but maybe it might crash - let's hope it works!"
)
del libcuda_dirs
elif DEVICE_TYPE == "hip":
# NO-OP for rocm device
pass
elif DEVICE_TYPE == "xpu":
import bitsandbytes as bnb
# TODO: check triton for intel installed properly.
pass
from .models import *
from .models import __version__
from .save import *
from .chat_templates import *
from .tokenizer_utils import *
from .trainer import *
from unsloth_zoo.rl_environments import (
check_python_modules,
create_locked_down_function,
execute_with_time_limit,
Benchmarker,
is_port_open,
launch_openenv,
)
# Patch TRL trainers for backwards compatibility
_patch_trl_trainer()
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/ollama_template_mappers.py | unsloth/ollama_template_mappers.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"OLLAMA_TEMPLATES",
"OLLAMA_TEMPLATE_TO_MODEL_MAPPER",
"MODEL_TO_OLLAMA_TEMPLATE_MAPPER",
]
OLLAMA_TEMPLATES = {}
# =========================================== Unsloth
unsloth_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}{{ .System }}
{{ end }}{{ if .Prompt }}>>> User: {{ .Prompt }}
{{ end }}>>> Assistant: {{ .Response }}{__EOS_TOKEN__}
"""
PARAMETER stop "{__EOS_TOKEN__}"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
SYSTEM """You are a helpful assistant to the user"""
'''
OLLAMA_TEMPLATES["unsloth"] = unsloth_ollama
# =========================================== Zephyr
zephyr_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}<|system|>
{{ .System }}{__EOS_TOKEN__}
{{ end }}{{ if .Prompt }}<|user|>
{{ .Prompt }}{__EOS_TOKEN__}
{{ end }}<|assistant|>
{{ .Response }}{__EOS_TOKEN__}
"""
PARAMETER stop "{__EOS_TOKEN__}"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
OLLAMA_TEMPLATES["zephyr"] = zephyr_ollama
# =========================================== ChatML
chatml_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}<|im_start|>system
{{ .System }}<|im_end|>
{{ end }}{{ if .Prompt }}<|im_start|>user
{{ .Prompt }}<|im_end|>
{{ end }}<|im_start|>assistant
{{ .Response }}<|im_end|>
"""
PARAMETER stop "<|im_start|>"
PARAMETER stop "<|im_end|>"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
OLLAMA_TEMPLATES["chatml"] = chatml_ollama
# =========================================== Mistral-1
# Ollama from https://www.ollama.com/library/mistral
# Mistral v0.1 https://ollama.com/library/mistral:v0.1/blobs/22e1b2e8dc2f
# Mistral v0.2 https://ollama.com/library/mistral:v0.2/blobs/e6836092461f
mistral_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} [/INST]"""
PARAMETER stop "[INST]"
PARAMETER stop "[/INST]"
'''
# mistral:v0.3 https://ollama.com/library/mistral:v0.3/blobs/1ff5b64b61b9
# mistral-large https://ollama.com/library/mistral-large:latest/blobs/96adabcf2c08
mistral_v03_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{- if .Messages }}
{{- range $index, $_ := .Messages }}
{{- if eq .Role "user" }}
{{- if and (eq (len (slice $.Messages $index)) 1) $.Tools }}[AVAILABLE_TOOLS] {{ $.Tools }}[/AVAILABLE_TOOLS]
{{- end }}[INST] {{ if and $.System (eq (len (slice $.Messages $index)) 1) }}{{ $.System }}
{{ end }}{{ .Content }}[/INST]
{{- else if eq .Role "assistant" }}
{{- if .Content }}{{ .Content }}
{{- else if .ToolCalls }}[TOOL_CALLS] [
{{- range .ToolCalls }}{"name": "{{ .Function.Name }}", "arguments": {{ .Function.Arguments }}}
{{- end }}]
{{- end }}</s>
{{- else if eq .Role "tool" }}[TOOL_RESULTS] {"content": {{ .Content }}} [/TOOL_RESULTS]
{{- end }}
{{- end }}
{{- else }}[INST] {{ if .System }}{{ .System }}
{{ end }}{{ .Prompt }}[/INST]
{{- end }}{{ .Response }}
{{- if .Response }}</s>
{{- end }}"""
PARAMETER stop "[INST]"
PARAMETER stop "[/INST]"
PARAMETER stop "</s>"
'''
# Mistral-small https://ollama.com/library/mistral-small:latest/blobs/6db27cd4e277
mistral_small_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{- range $index, $_ := .Messages }}
{{- if eq .Role "system" }}[SYSTEM_PROMPT]{{ .Content }}[/SYSTEM_PROMPT]
{{- else if eq .Role "user" }}
{{- if and (le (len (slice $.Messages $index)) 2) $.Tools }}[AVAILABLE_TOOLS]{{ $.Tools }}[/AVAILABLE_TOOLS]
{{- end }}[INST]{{ .Content }}[/INST]
{{- else if eq .Role "assistant" }}
{{- if .Content }}{{ .Content }}
{{- if not (eq (len (slice $.Messages $index)) 1) }}</s>
{{- end }}
{{- else if .ToolCalls }}[TOOL_CALLS][
{{- range .ToolCalls }}{"name": "{{ .Function.Name }}", "arguments": {{ .Function.Arguments }}}
{{- end }}]</s>
{{- end }}
{{- else if eq .Role "tool" }}[TOOL_RESULTS]{"content": {{ .Content }}}[/TOOL_RESULTS]
{{- end }}
{{- end }}"""
PARAMETER temperature 0.15
SYSTEM """You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. Your knowledge base was last updated on 2023-10-01. When you're not sure about some information, you say that you don't have the information and don't make up anything. If the user's question is not clear, ambiguous, or does not provide enough context for you to accurately answer the question, you do not try to answer it right away and you rather ask the user to clarify their request (e.g. "What are some good restaurants around me?" => "Where are you?" or "When is the next flight to Tokyo" => "Where do you travel from?")"""
'''
# mistral-small-3.1 https://ollama.com/library/mistral-small3.1:latest/blobs/6db27cd4e277
mistral_small_31_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{- range $index, $_ := .Messages }}
{{- if eq .Role "system" }}[SYSTEM_PROMPT]{{ .Content }}[/SYSTEM_PROMPT]
{{- else if eq .Role "user" }}
{{- if and (le (len (slice $.Messages $index)) 2) $.Tools }}[AVAILABLE_TOOLS]{{ $.Tools }}[/AVAILABLE_TOOLS]
{{- end }}[INST]{{ .Content }}[/INST]
{{- else if eq .Role "assistant" }}
{{- if .Content }}{{ .Content }}
{{- if not (eq (len (slice $.Messages $index)) 1) }}</s>
{{- end }}
{{- else if .ToolCalls }}[TOOL_CALLS][
{{- range .ToolCalls }}{"name": "{{ .Function.Name }}", "arguments": {{ .Function.Arguments }}}
{{- end }}]</s>
{{- end }}
{{- else if eq .Role "tool" }}[TOOL_RESULTS]{"content": {{ .Content }}}[/TOOL_RESULTS]
{{- end }}
{{- end }}"""
PARAMETER num_ctx 4096
SYSTEM """You are Mistral Small 3.1, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris.
You power an AI assistant called Le Chat.
Your knowledge base was last updated on 2023-10-01.
When you're not sure about some information, you say that you don't have the information and don't make up anything.
If the user's question is not clear, ambiguous, or does not provide enough context for you to accurately answer the question, you do not try to answer it right away and you rather ask the user to clarify their request (e.g. "What are some good restaurants around me?" => "Where are you?" or "When is the next flight to Tokyo" => "Where do you travel from?").
You are always very attentive to dates, in particular you try to resolve dates (e.g. "yesterday" is {yesterday}) and when asked about information at specific dates, you discard information that is at another date.
You follow these instructions in all languages, and always respond to the user in the language they use or request.
Next sections describe the capabilities that you have.
# WEB BROWSING INSTRUCTIONS
You cannot perform any web search or access internet to open URLs, links etc. If it seems like the user is expecting you to do so, you clarify the situation and ask the user to copy paste the text directly in the chat.
# MULTI-MODAL INSTRUCTIONS
You have the ability to read images, but you cannot generate images. You also cannot transcribe audio files or videos.
You cannot read nor transcribe audio files or videos."""
'''
# mistral-small-3.2 https://ollama.com/library/mistral-small3.2:latest/blobs/706c4d1164f7
mistral_small_32_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{- range $index, $_ := .Messages }}
{{- if eq .Role "system" }}[SYSTEM_PROMPT]{{ .Content }}[/SYSTEM_PROMPT]
{{- else if eq .Role "user" }}
{{- if and (le (len (slice $.Messages $index)) 2) $.Tools }}[AVAILABLE_TOOLS]{{ $.Tools }}[/AVAILABLE_TOOLS]
{{- end }}[INST]{{ .Content }}[/INST]
{{- else if eq .Role "assistant" }}
{{- if .Content }}{{ .Content }}
{{- if not (eq (len (slice $.Messages $index)) 1) }}</s>
{{- end }}
{{- else if .ToolCalls }}
{{- range $i, $_ := .ToolCalls }}[TOOL_CALLS]{{ .Function.Name }}[CALL_ID]{{ $i }}[ARGS]{{ .Function.Arguments }}
{{- end }}</s>
{{- end }}
{{- else if eq .Role "tool" }}[TOOL_RESULTS]{"content": {{ .Content }}}[/TOOL_RESULTS]
{{- end }}
{{- end }}"""
PARAMETER temperature 0.15
SYSTEM """You are Mistral Small 3.2, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris.
You power an AI assistant called Le Chat.
Your knowledge base was last updated on 2023-10-01.
When you're not sure about some information or when the user's request requires up-to-date or specific data, you must use the available tools to fetch the information. Do not hesitate to use tools whenever they can provide a more accurate or complete response. If no relevant tools are available, then clearly state that you don't have the information and avoid making up anything.
If the user's question is not clear, ambiguous, or does not provide enough context for you to accurately answer the question, you do not try to answer it right away and you rather ask the user to clarify their request (e.g. "What are some good restaurants around me?" => "Where are you?" or "When is the next flight to Tokyo" => "Where do you travel from?").
You are always very attentive to dates, in particular you try to resolve dates and when asked about information at specific dates, you discard information that is at another date.
You follow these instructions in all languages, and always respond to the user in the language they use or request.
Next sections describe the capabilities that you have.
# WEB BROWSING INSTRUCTIONS
You cannot perform any web search or access internet to open URLs, links etc. If it seems like the user is expecting you to do so, you clarify the situation and ask the user to copy paste the text directly in the chat.
# MULTI-MODAL INSTRUCTIONS
You have the ability to read images, but you cannot generate images. You also cannot transcribe audio files or videos.
You cannot read nor transcribe audio files or videos.
TOOL CALLING INSTRUCTIONS
You may have access to tools that you can use to fetch information or perform actions. You must use these tools in the following situations:
1. When the request requires up-to-date information.
2. When the request requires specific data that you do not have in your knowledge base.
3. When the request involves actions that you cannot perform without tools.
Always prioritize using tools to provide the most accurate and helpful response. If tools are not available, inform the user that you cannot perform the requested action at the moment."""
'''
# https://ollama.com/library/mixtral:latest/blobs/53d74de0d84c
mixtral_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} [/INST] {{ .Response }}"""
PARAMETER stop "[INST]"
PARAMETER stop "[/INST]"
'''
# https://registry.ollama.ai/library/mistral-nemo:latest/blobs/438402ddac75
mistral_nemo_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """
{{- range $i, $_ := .Messages }}
{{- if eq .Role "user" }}
{{- if and $.Tools (le (len (slice $.Messages $i)) 2) }}[AVAILABLE_TOOLS]{{ $.Tools }}[/AVAILABLE_TOOLS]
{{- end }}[INST]{{ if and $.System (eq (len (slice $.Messages $i)) 1) }}{{ $.System }}
{{ end }}{{ .Content }}[/INST]
{{- else if eq .Role "assistant" }}
{{- if .Content }} {{ .Content }}{{ if not (eq (len (slice $.Messages $i)) 1) }}</s>{{ end }}
{{- else if .ToolCalls }}[TOOL_CALLS][
{{- range .ToolCalls }}{"name": "{{ .Function.Name }}", "arguments": {{ .Function.Arguments }}}
{{- end }}]</s>
{{- end }}
{{- else if eq .Role "tool" }}[TOOL_RESULTS]{"content": {{ .Content }}}[/TOOL_RESULTS]
{{- end }}
{{- end }}"""
PARAMETER stop "[INST]"
PARAMETER stop "[/INST]"
'''
# https://ollama.com/library/codestral:latest/blobs/51707752a87c
codestral_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """
{{- if .Suffix }}[SUFFIX]{{ .Suffix }}[PREFIX] {{ .Prompt }}
{{- else if .Messages }}
{{- range $index, $_ := .Messages }}
{{- if eq .Role "user" }}[INST] {{ if and $.System (eq (len (slice $.Messages $index)) 1) }}{{ $.System }}
{{ end }}{{ .Content }}[/INST]
{{- else if eq .Role "assistant" }} {{ .Content }}</s>
{{- end }}
{{- end }}
{{- else }}[INST] {{ if .System }}{{ .System }}
{{ end }}{{ .Prompt }} [/INST]
{{- end }} {{ .Response }}
{{- if .Response }}</s>
{{- end }}
"""
PARAMETER stop "[INST]"
PARAMETER stop "[/INST]"
PARAMETER stop "[PREFIX]"
PARAMETER stop "[MIDDLE]"
PARAMETER stop "[SUFFIX]"
'''
# https://ollama.com/library/devstral:latest/blobs/ea9ec42474e0
devstral_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{- $lastUserIndex := -1 }}
{{- range $index, $_ := .Messages }}
{{- if eq .Role "user" }}{{ $lastUserIndex = $index }}{{ end }}
{{- end }}
{{- range $index, $_ := .Messages }}
{{- if eq .Role "system" }}[SYSTEM_PROMPT]{{ .Content }}[/SYSTEM_PROMPT]
{{- else if eq .Role "user" }}
{{- if and (eq $lastUserIndex $index) $.Tools }}[AVAILABLE_TOOLS]{{ $.Tools }}[/AVAILABLE_TOOLS]
{{- end }}[INST]{{ .Content }}[/INST]
{{- else if eq .Role "assistant" }}
{{- if .Content }}{{ .Content }}
{{- if not (eq (len (slice $.Messages $index)) 1) }}</s>
{{- end }}
{{- else if .ToolCalls }}[TOOL_CALLS][
{{- range .ToolCalls }}{"name": "{{ .Function.Name }}", "arguments": {{ .Function.Arguments }}}
{{- end }}]</s>
{{- end }}
{{- else if eq .Role "tool" }}[TOOL_RESULTS]{"content": {{ .Content }}}[/TOOL_RESULTS]
{{- end }}
{{- end }}"""
SYSTEM """You are Devstral, a helpful agentic model trained by Mistral AI and using the OpenHands scaffold. You can interact with a computer to solve tasks.
<ROLE>
Your primary role is to assist users by executing commands, modifying code, and solving technical problems effectively. You should be thorough, methodical, and prioritize quality over speed.
* If the user asks a question, like "why is X happening", don't try to fix the problem. Just give an answer to the question.
</ROLE>
<EFFICIENCY>
* Each action you take is somewhat expensive. Wherever possible, combine multiple actions into a single action, e.g. combine multiple bash commands into one, using sed and grep to edit/view multiple files at once.
* When exploring the codebase, use efficient tools like find, grep, and git commands with appropriate filters to minimize unnecessary operations.
</EFFICIENCY>
<FILE_SYSTEM_GUIDELINES>
* When a user provides a file path, do NOT assume it's relative to the current working directory. First explore the file system to locate the file before working on it.
* If asked to edit a file, edit the file directly, rather than creating a new file with a different filename.
* For global search-and-replace operations, consider using `sed` instead of opening file editors multiple times.
</FILE_SYSTEM_GUIDELINES>
<CODE_QUALITY>
* Write clean, efficient code with minimal comments. Avoid redundancy in comments: Do not repeat information that can be easily inferred from the code itself.
* When implementing solutions, focus on making the minimal changes needed to solve the problem.
* Before implementing any changes, first thoroughly understand the codebase through exploration.
* If you are adding a lot of code to a function or file, consider splitting the function or file into smaller pieces when appropriate.
</CODE_QUALITY>
<VERSION_CONTROL>
* When configuring git credentials, use "openhands" as the user.name and "openhands@all-hands.dev" as the user.email by default, unless explicitly instructed otherwise.
* Exercise caution with git operations. Do NOT make potentially dangerous changes (e.g., pushing to main, deleting repositories) unless explicitly asked to do so.
* When committing changes, use `git status` to see all modified files, and stage all files necessary for the commit. Use `git commit -a` whenever possible.
* Do NOT commit files that typically shouldn't go into version control (e.g., node_modules/, .env files, build directories, cache files, large binaries) unless explicitly instructed by the user.
* If unsure about committing certain files, check for the presence of .gitignore files or ask the user for clarification.
</VERSION_CONTROL>
<PULL_REQUESTS>
* When creating pull requests, create only ONE per session/issue unless explicitly instructed otherwise.
* When working with an existing PR, update it with new commits rather than creating additional PRs for the same issue.
* When updating a PR, preserve the original PR title and purpose, updating description only when necessary.
</PULL_REQUESTS>
<PROBLEM_SOLVING_WORKFLOW>
1. EXPLORATION: Thoroughly explore relevant files and understand the context before proposing solutions
2. ANALYSIS: Consider multiple approaches and select the most promising one
3. TESTING:
* For bug fixes: Create tests to verify issues before implementing fixes
* For new features: Consider test-driven development when appropriate
* If the repository lacks testing infrastructure and implementing tests would require extensive setup, consult with the user before investing time in building testing infrastructure
* If the environment is not set up to run tests, consult with the user first before investing time to install all dependencies
4. IMPLEMENTATION: Make focused, minimal changes to address the problem
5. VERIFICATION: If the environment is set up to run tests, test your implementation thoroughly, including edge cases. If the environment is not set up to run tests, consult with the user first before investing time to run tests.
</PROBLEM_SOLVING_WORKFLOW>
<SECURITY>
* Only use GITHUB_TOKEN and other credentials in ways the user has explicitly requested and would expect.
* Use APIs to work with GitHub or other platforms, unless the user asks otherwise or your task requires browsing.
</SECURITY>
<ENVIRONMENT_SETUP>
* When user asks you to run an application, don't stop if the application is not installed. Instead, please install the application and run the command again.
* If you encounter missing dependencies:
1. First, look around in the repository for existing dependency files (requirements.txt, pyproject.toml, package.json, Gemfile, etc.)
2. If dependency files exist, use them to install all dependencies at once (e.g., `pip install -r requirements.txt`, `npm install`, etc.)
3. Only install individual packages directly if no dependency files are found or if only specific packages are needed
* Similarly, if you encounter missing dependencies for essential tools requested by the user, install them when possible.
</ENVIRONMENT_SETUP>
<TROUBLESHOOTING>
* If you've made repeated attempts to solve a problem but tests still fail or the user reports it's still broken:
1. Step back and reflect on 5-7 different possible sources of the problem
2. Assess the likelihood of each possible cause
3. Methodically address the most likely causes, starting with the highest probability
4. Document your reasoning process
* When you run into any major issue while executing a plan from the user, please don't try to directly work around it. Instead, propose a new plan and confirm with the user before proceeding.
</TROUBLESHOOTING>"""
'''
# https://ollama.com/library/magistral:latest/blobs/35f7a1efc383
magistral_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """
{{- range $i, $_ := .Messages }}
{{- $last := eq (len (slice $.Messages $i)) 1}}
{{- if eq .Role "system" }}[SYSTEM_PROMPT]{{ .Content }}[/SYSTEM_PROMPT]
{{- else if eq .Role "user" }}
{{- if and (le (len (slice $.Messages $i)) 2) $.Tools }}[AVAILABLE_TOOLS]{{ $.Tools }}[/AVAILABLE_TOOLS]
{{- end }}[INST]{{ .Content }}[/INST]
{{- else if eq .Role "assistant" }}
{{- if and $.IsThinkSet (and $last .Thinking) -}}
<think>
{{ .Thinking }}
</think>
{{ end }}
{{- if .Content }}{{ .Content }}
{{- end }}
{{- if .ToolCalls }}{{ range $i, $_ := .ToolCalls }}[TOOL_CALLS]{{ .Function.Name }}[CALL_ID]{{ $i }}[ARGS]{{ .Function.Arguments }}{{ end }}
{{- end }}
{{- if not (eq (len (slice $.Messages $i)) 1) }}</s>
{{- end }}
{{- else if eq .Role "tool" }}[TOOL_RESULTS]0[TOOL_CONTENT]{{ .Content }}[/TOOL_RESULTS]
{{- end }}
{{- if and $last (ne .Role "assistant") }}{{ if and $.IsThinkSet (not $.Think) -}}<think>
</think>
{{ end }}
{{- end }}
{{- end }}"""
PARAMETER temperature 0.7
PARAMETER top_p 0.95
SYSTEM """A user will ask you to solve a task. You should first draft your thinking process (inner monologue) until you have derived the final answer. Afterwards, write a self-contained summary of your thoughts (i.e. your summary should be succinct but contain all the critical steps you needed to reach the conclusion). You should use Markdown and Latex to format your response. Write both your thoughts and summary in the same language as the task posed by the user.
Your thinking process must follow the template below:
<think>
Your thoughts or/and draft, like working through an exercise on scratch paper. Be as casual and as long as you want until you are confident to generate a correct answer.
</think>
Here, provide a concise summary that reflects your reasoning and presents a clear final answer to the user.
Problem:"""
'''
OLLAMA_TEMPLATES["mistral"] = mistral_ollama
OLLAMA_TEMPLATES["mistral-v03"] = mistral_v03_ollama
OLLAMA_TEMPLATES["mistral-small"] = mistral_small_ollama
OLLAMA_TEMPLATES["mistral-small-31"] = mistral_small_31_ollama
OLLAMA_TEMPLATES["mistral-small-32"] = mistral_small_32_ollama
OLLAMA_TEMPLATES["mixtral"] = mixtral_ollama
OLLAMA_TEMPLATES["mistral-nemo"] = mistral_nemo_ollama
OLLAMA_TEMPLATES["devstral"] = devstral_ollama
OLLAMA_TEMPLATES["magistral"] = magistral_ollama
OLLAMA_TEMPLATES["codestral"] = codestral_ollama
# =========================================== Llama-2
# Ollama from https://www.ollama.com/library/llama3
llama_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """[INST] <<SYS>>{{ .System }}<</SYS>>
{{ .Prompt }} [/INST]"""
PARAMETER stop "{__EOS_TOKEN__}"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
OLLAMA_TEMPLATES["llama"] = llama_ollama
# =========================================== Vicuna
# Ollama from https://www.ollama.com/library/vicuna
vicuna_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}USER: {{ .Prompt }} {{ end }}ASSISTANT: {{ .Response }} {__EOS_TOKEN__}"""
PARAMETER stop "{__EOS_TOKEN__}"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
OLLAMA_TEMPLATES["vicuna"] = vicuna_ollama
# =========================================== Vicuna Old
vicuna_old_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}{{ .System }}
{{ end }}{{ if .Prompt }}### Human: {{ .Prompt }}
{{ end }}### Assistant: {{ .Response }}{__EOS_TOKEN__}
"""
PARAMETER stop "{__EOS_TOKEN__}"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
SYSTEM """A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions."""
'''
OLLAMA_TEMPLATES["vicuna_old"] = vicuna_old_ollama
OLLAMA_TEMPLATES["vicuna old"] = OLLAMA_TEMPLATES["vicuna_old"]
# =========================================== Alpaca multi turn
alpaca_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}{{ .System }}
{{ end }}{{ if .Prompt }}### Instruction:
{{ .Prompt }}{{ end }}
### Response:
{{ .Response }}{__EOS_TOKEN__}
"""
PARAMETER stop "{__EOS_TOKEN__}"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
SYSTEM """Below are some instructions that describe some tasks. Write responses that appropriately complete each request."""
'''
OLLAMA_TEMPLATES["alpaca"] = alpaca_ollama
# =========================================== Gemma
# Ollama from https://www.ollama.com/library/gemma
gemma_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """<start_of_turn>user
{{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}<end_of_turn>
<start_of_turn>model
{{ .Response }}<end_of_turn>
"""
PARAMETER repeat_penalty 1
PARAMETER stop "<start_of_turn>"
PARAMETER stop "<end_of_turn>"
PARAMETER penalize_newline false
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
OLLAMA_TEMPLATES["gemma"] = gemma_ollama
# =========================================== Gemma with ChatML instead
gemma_chatml_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}<|im_start|>system
{{ .System }}<|im_end|>
{{ end }}{{ if .Prompt }}<|im_start|>user
{{ .Prompt }}<|im_end|>
{{ end }}<|im_start|>assistant
{{ .Response }}<|im_end|>
"""
PARAMETER repeat_penalty 1
PARAMETER stop "<|im_start|>"
PARAMETER stop "<|im_end|>"
PARAMETER penalize_newline false
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
OLLAMA_TEMPLATES["gemma_chatml"] = gemma_chatml_ollama
# =========================================== Gemma 2
# Same as Gemma 1, but with sliding window attention!
# https://ollama.com/library/gemma2/blobs/6522ca797f47
gemma2_ollama = gemma_ollama + "PARAMETER num_ctx 4096\n"
OLLAMA_TEMPLATES["gemma2"] = gemma2_ollama
# =========================================== Gemma 2 with ChatML instead
gemma2_chatml_ollama = gemma_chatml_ollama + "PARAMETER num_ctx 4096\n"
OLLAMA_TEMPLATES["gemma2_chatml"] = gemma2_chatml_ollama
# =========================================== Llama-3
# Ollama from https://www.ollama.com/library/llama3
llama3_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}<|start_header_id|>system<|end_header_id|>
{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>
{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>
{{ .Response }}<|eot_id|>"""
PARAMETER num_keep 24
PARAMETER stop "<|start_header_id|>"
PARAMETER stop "<|end_header_id|>"
PARAMETER stop "<|eot_id|>"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
OLLAMA_TEMPLATES["llama-3"] = llama3_ollama
OLLAMA_TEMPLATES["llama3"] = llama3_ollama
# =========================================== Phi-3
# Ollama from https://www.ollama.com/library/phi3
phi3_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}<|system|>
{{ .System }}<|end|>
{{ end }}{{ if .Prompt }}<|user|>
{{ .Prompt }}<|end|>
{{ end }}<|assistant|>
{{ .Response }}<|end|>
"""
PARAMETER stop "<|end|>"
PARAMETER stop "<|user|>"
PARAMETER stop "<|assistant|>"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
OLLAMA_TEMPLATES["phi-3"] = phi3_ollama
OLLAMA_TEMPLATES["phi-35"] = OLLAMA_TEMPLATES["phi-3"]
OLLAMA_TEMPLATES["phi-3.5"] = OLLAMA_TEMPLATES["phi-3"]
# =========================================== Llama-3.1
"""
No trimming in Llama 3.1 Instruct!
Also an extra newline for Cutting Knowledge Date
See https://colab.research.google.com/drive/1Xpqq5xpIgO-B00MQ-UccYMwN2J8QFgBM?usp=sharing
Also should be
import datetime
tokenizer.apply_chat_template(
messages,
add_generation_prompt = True,
tokenize = False,
date_string = datetime.today().strftime("%d %B %Y")),
)
"""
# Ollama from https://ollama.com/library/llama3.1 (needs updating!)
llama31_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .Messages }}
{{- if or .System .Tools }}<|start_header_id|>system<|end_header_id|>
{{- if .System }}
{{ .System }}
{{- end }}
{{- if .Tools }}
You are a helpful assistant with tool calling capabilities. When you receive a tool call response, use the output to format an answer to the original use question.
{{- end }}
{{- end }}<|eot_id|>
{{- range $i, $_ := .Messages }}
{{- $last := eq (len (slice $.Messages $i)) 1 }}
{{- if eq .Role "user" }}<|start_header_id|>user<|end_header_id|>
{{- if and $.Tools $last }}
Given the following functions, please respond with a JSON for a function call with its proper arguments that best answers the given prompt.
Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. Do not use variables.
{{ $.Tools }}
{{- end }}
{{ .Content }}<|eot_id|>{{ if $last }}<|start_header_id|>assistant<|end_header_id|>
{{ end }}
{{- else if eq .Role "assistant" }}<|start_header_id|>assistant<|end_header_id|>
{{- if .ToolCalls }}
{{- range .ToolCalls }}{"name": "{{ .Function.Name }}", "parameters": {{ .Function.Arguments }}}{{ end }}
{{- else }}
{{ .Content }}{{ if not $last }}<|eot_id|>{{ end }}
{{- end }}
{{- else if eq .Role "tool" }}<|start_header_id|>ipython<|end_header_id|>
{{ .Content }}<|eot_id|>{{ if $last }}<|start_header_id|>assistant<|end_header_id|>
{{ end }}
{{- end }}
{{- end }}
{{- else }}
{{- if .System }}<|start_header_id|>system<|end_header_id|>
{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>
{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>
{{ end }}{{ .Response }}{{ if .Response }}<|eot_id|>{{ end }}"""
PARAMETER stop "<|start_header_id|>"
PARAMETER stop "<|end_header_id|>"
PARAMETER stop "<|eot_id|>"
PARAMETER stop "<|eom_id|>"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
# https://ollama.com/ajindal/llama3.1-storm:8b/blobs/1970553b62f4
llama_31_storm_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """
{{ if .Messages }}
{{- if or .System .Tools }}<|start_header_id|>system<|end_header_id|>
{{- if .System }}
{{ .System }}
{{- end }}
{{- if .Tools }}
You are a function calling AI model. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into function. The user may use the terms function calling or tool use interchangeably.
Here are the available functions:
<tools>{{ json .Tools }}</tools>
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags in the format:
<tool_call>{"tool_name": <function-name>, "tool_arguments": <args-dict>}</tool_call>
{{- end }}
{{- end }}<|eot_id|>
{{- range $i, $_ := .Messages }}
{{- $last := eq (len (slice $.Messages $i)) 1 }}
{{- if eq .Role "user" }}<|start_header_id|>user<|end_header_id|>
{{ .Content }}<|eot_id|>{{ if $last }}<|start_header_id|>assistant<|end_header_id|>
{{ end }}
{{- else if eq .Role "assistant" }}<|start_header_id|>assistant<|end_header_id|>
{{- if .ToolCalls }}
{{- range .ToolCalls }}{"name": "{{ .Function.Name }}", "parameters": {{ .Function.Arguments }}}{{ end }}
{{- else }}
{{ .Content }}{{ if not $last }}<|eot_id|>{{ end }}
{{- end }}
{{- else if eq .Role "tool" }}<|start_header_id|>ipython<|end_header_id|>
{{ .Content }}<|eot_id|>{{ if $last }}<|start_header_id|>assistant<|end_header_id|>
{{ end }}
{{- end }}
{{- end }}
{{- else }}
{{- if .System }}<|start_header_id|>system<|end_header_id|>
{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>
{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>
{{ end }}{{ .Response }}{{ if .Response }}<|eot_id|>{{ end }}
"""
PARAMETER stop "<|start_header_id|>"
PARAMETER stop "<|end_header_id|>"
PARAMETER stop "<|eot_id|>"
'''
# https://ollama.com/library/nemotron:latest/blobs/4863fe3335f3
llama_31_nemotron_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """<|start_header_id|>system<|end_header_id|>
{{ if .Tools }}You have access to the following functions. To call a function, please respond with JSON for a function call. Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. Do not use variables.
{{ range .Tools }}{{ . }}
{{ end }}
{{- end }}{{ .System }}<|eot_id|>
{{- range $i, $_ := .Messages }}
{{- $isLastMessage := eq (len (slice $.Messages $i)) 1 -}}
{{- if eq .Role "system" }}
{{- else if eq .Role "assistant" }}<|start_header_id|>assistant<|end_header_id|>
{{ if .Content }}{{ .Content }}
{{- else if .ToolCalls }}
{{- range .ToolCalls }}{"name": "{{ .Function.Name }}", "parameters": {{ .Function.Arguments }} }
{{- end }}
{{- end }}
{{- if not $isLastMessage }}<|eot_id|>
{{- end }}
{{- else if eq .Role "tool" }}<|start_header_id|>ipython<|end_header_id|>
{{ .Content }}<|eot_id|>
{{- if $isLastMessage }}<|start_header_id|>assistant<|end_header_id|>
{{ end }}
{{- else }}<|start_header_id|>{{ .Role }}<|end_header_id|>
{{ .Content }}<|eot_id|>
{{- if $isLastMessage }}<|start_header_id|>assistant<|end_header_id|>
{{ end }}
{{- end }}
{{- end }}
"""
PARAMETER stop "<|start_header_id|>"
PARAMETER stop "<|end_header_id|>"
PARAMETER stop "<|eot_id|>"
'''
# https://ollama.com/library/llama3.2-vision:latest/blobs/715415638c895a1f8e8c6
llama_32_vision_ollama = '''
FROM {__FILE_LOCATION__}
TEMPLATE """{{- range $index, $_ := .Messages }}<|start_header_id|>{{ .Role }}<|end_header_id|>
{{ .Content }}
{{- if gt (len (slice $.Messages $index)) 1 }}<|eot_id|>
{{- else if ne .Role "assistant" }}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
{{ end }}
{{- end }}"""
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | true |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/chat_templates.py | unsloth/chat_templates.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"get_chat_template",
"test_chat_templates",
"test_hf_gguf_equivalence",
"remove_special_tokens",
"to_sharegpt",
"standardize_sharegpt",
"standardize_data_formats",
"apply_chat_template",
"train_on_responses_only",
"test_construct_chat_template",
]
from transformers import StoppingCriteria, StoppingCriteriaList
from torch import LongTensor, FloatTensor
from transformers.models.llama.modeling_llama import logger
from .save import patch_saving_functions
import os
import shutil
from .tokenizer_utils import *
from .models._utils import patch_tokenizer
import re
from unsloth_zoo.dataset_utils import (
train_on_responses_only,
standardize_data_formats,
)
standardize_sharegpt = standardize_data_formats
CHAT_TEMPLATES = {}
DEFAULT_SYSTEM_MESSAGE = {}
# =========================================== Unsloth
# Unsloth efficient template leverages from Zephyr
unsloth_template = \
"{{ bos_token }}"\
"{% if messages[0]['role'] == 'system' %}"\
"{{ messages[0]['content'] + '\n' }}"\
"{% set loop_messages = messages[1:] %}"\
"{% else %}"\
"{{ '{system_message}' + '\n' }}"\
"{% set loop_messages = messages %}"\
"{% endif %}"\
"{% for message in loop_messages %}"\
"{% if message['role'] == 'user' %}"\
"{{ '>>> User: ' + message['content'] + '\n' }}"\
"{% elif message['role'] == 'assistant' %}"\
"{{ '>>> Assistant: ' + message['content'] + eos_token + '\n' }}"\
"{% else %}"\
"{{ raise_exception('Only user and assistant roles are supported!') }}"\
"{% endif %}"\
"{% endfor %}"\
"{% if add_generation_prompt %}"\
"{{ '>>> Assistant: ' }}"\
"{% endif %}"
unsloth_ollama = \
'''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}{{ .System }}
{{ end }}{{ if .Prompt }}>>> User: {{ .Prompt }}
{{ end }}>>> Assistant: {{ .Response }}{__EOS_TOKEN__}
"""
PARAMETER stop "{__EOS_TOKEN__}"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
SYSTEM """You are a helpful assistant to the user"""
'''
unsloth_eos_token = "eos_token"
CHAT_TEMPLATES["unsloth"] = (unsloth_template, unsloth_eos_token, False, unsloth_ollama,)
DEFAULT_SYSTEM_MESSAGE["unsloth"] = "You are a helpful assistant to the user"
# =========================================== Zephyr
# Zephyr has no BOS!
zephyr_template = \
"{% for message in messages %}"\
"{% if message['role'] == 'user' %}"\
"{{ '<|user|>\n' + message['content'] + eos_token + '\n' }}"\
"{% elif message['role'] == 'assistant' %}"\
"{{ '<|assistant|>\n' + message['content'] + eos_token + '\n' }}"\
"{% else %}"\
"{{ '<|system|>\n' + message['content'] + eos_token + '\n' }}"\
"{% endif %}"\
"{% endfor %}"\
"{% if add_generation_prompt %}"\
"{{ '<|assistant|>\n' }}"\
"{% endif %}"
zephyr_ollama = \
'''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}<|system|>
{{ .System }}{__EOS_TOKEN__}
{{ end }}{{ if .Prompt }}<|user|>
{{ .Prompt }}{__EOS_TOKEN__}
{{ end }}<|assistant|>
{{ .Response }}{__EOS_TOKEN__}
"""
PARAMETER stop "{__EOS_TOKEN__}"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
zephyr_eos_token = "eos_token"
CHAT_TEMPLATES["zephyr"] = (zephyr_template, zephyr_eos_token, False, zephyr_ollama,)
DEFAULT_SYSTEM_MESSAGE["zephyr"] = None # No system message in Zephyr
# =========================================== ChatML
# ChatML has no BOS and not EOS! Rather <|im_start|> and <|im_end|> acts as BOS / EOS.
chatml_template = \
"{% for message in messages %}"\
"{% if message['role'] == 'user' %}"\
"{{'<|im_start|>user\n' + message['content'] + '<|im_end|>\n'}}"\
"{% elif message['role'] == 'assistant' %}"\
"{{'<|im_start|>assistant\n' + message['content'] + '<|im_end|>\n' }}"\
"{% else %}"\
"{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' }}"\
"{% endif %}"\
"{% endfor %}"\
"{% if add_generation_prompt %}"\
"{{ '<|im_start|>assistant\n' }}"\
"{% endif %}"
chatml_ollama = \
'''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}<|im_start|>system
{{ .System }}<|im_end|>
{{ end }}{{ if .Prompt }}<|im_start|>user
{{ .Prompt }}<|im_end|>
{{ end }}<|im_start|>assistant
{{ .Response }}<|im_end|>
"""
PARAMETER stop "<|im_start|>"
PARAMETER stop "<|im_end|>"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
chatml_eos_token = "<|im_end|>"
CHAT_TEMPLATES["chatml"] = (chatml_template, chatml_eos_token, True, chatml_ollama,)
DEFAULT_SYSTEM_MESSAGE["chatml"] = None # No system message in ChatML
# =========================================== Mistral-1
# Mistral Instruct doesn't allow system prompts, so we append it to the user message.
mistral_template = \
"{{ bos_token }}"\
"{% if messages[0]['role'] == 'system' %}"\
"{% if messages[1]['role'] == 'user' %}"\
"{{ '[INST] ' + messages[0]['content'] + ' ' + messages[1]['content'] + ' [/INST]' }}"\
"{% set loop_messages = messages[2:] %}"\
"{% else %}"\
"{{ '[INST] ' + messages[0]['content'] + ' [/INST]' }}"\
"{% set loop_messages = messages[1:] %}"\
"{% endif %}"\
"{% else %}"\
"{% set loop_messages = messages %}"\
"{% endif %}"\
"{% for message in loop_messages %}"\
"{% if message['role'] == 'user' %}"\
"{{ '[INST] ' + message['content'] + ' [/INST]' }}"\
"{% elif message['role'] == 'assistant' %}"\
"{{ message['content'] + eos_token }}"\
"{% else %}"\
"{{ raise_exception('Only user and assistant roles are supported!') }}"\
"{% endif %}"\
"{% endfor %}"
# Ollama from https://www.ollama.com/library/mistral
mistral_ollama = \
'''
FROM {__FILE_LOCATION__}
TEMPLATE """[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} [/INST]"""
PARAMETER stop "{__EOS_TOKEN__}"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
mistral_eos_token = "eos_token"
CHAT_TEMPLATES["mistral"] = (mistral_template, mistral_eos_token, False, mistral_ollama,)
DEFAULT_SYSTEM_MESSAGE["mistral"] = None # No system message in Mistral
# =========================================== Llama-2
# Adds BOS to every convo! And weird <<SYS>> system messages.
llama_template = \
"{% if messages[0]['role'] == 'system' %}"\
"{% if messages[1]['role'] == 'user' %}"\
"{{ bos_token + '[INST] <<SYS>>\n' + messages[0]['content'] + '\n<</SYS>>\n\n' + messages[1]['content'] + ' [/INST]' }}"\
"{% set loop_messages = messages[2:] %}"\
"{% else %}"\
"{{ bos_token + '[INST] ' + messages[0]['content'] + ' [/INST]' }}"\
"{% set loop_messages = messages[1:] %}"\
"{% endif %}"\
"{% else %}"\
"{% set loop_messages = messages %}"\
"{% endif %}"\
"{% for message in loop_messages %}"\
"{% if message['role'] == 'user' %}"\
"{{ bos_token + '[INST] ' + message['content'].strip() + ' [/INST]' }}"\
"{% elif message['role'] == 'assistant' %}"\
"{{ ' ' + message['content'].strip() + ' ' + eos_token }}"\
"{% else %}"\
"{{ raise_exception('Only user and assistant roles are supported!') }}"\
"{% endif %}"\
"{% endfor %}"
# Ollama from https://www.ollama.com/library/llama3
llama_ollama = \
'''
FROM {__FILE_LOCATION__}
TEMPLATE """[INST] <<SYS>>{{ .System }}<</SYS>>
{{ .Prompt }} [/INST]"""
PARAMETER stop "{__EOS_TOKEN__}"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
llama_eos_token = "eos_token"
CHAT_TEMPLATES["llama"] = (llama_template, llama_eos_token, False, llama_ollama,)
DEFAULT_SYSTEM_MESSAGE["llama"] = None # No system message in Llama
# =========================================== Vicuna
# https://github.com/lm-sys/FastChat/blob/main/docs/vicuna_weights_version.md#prompt-template
vicuna_template = \
"{{ bos_token }}"\
"{% if messages[0]['role'] == 'system' %}"\
"{{ messages[0]['content'] + ' ' }}"\
"{% set loop_messages = messages[1:] %}"\
"{% else %}"\
"{{ '{system_message}' + ' ' }}"\
"{% set loop_messages = messages %}"\
"{% endif %}"\
"{% for message in loop_messages %}"\
"{% if message['role'] == 'user' %}"\
"{{ 'USER: ' + message['content'] + ' ' }}"\
"{% elif message['role'] == 'assistant' %}"\
"{{ 'ASSISTANT: ' + message['content'] + eos_token }}"\
"{% else %}"\
"{{ raise_exception('Only user and assistant roles are supported!') }}"\
"{% endif %}"\
"{% endfor %}"\
"{% if add_generation_prompt %}"\
"{{ 'ASSISTANT:' }}"\
"{% endif %}"
# Ollama from https://www.ollama.com/library/vicuna
vicuna_ollama = \
'''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}USER: {{ .Prompt }} {{ end }}ASSISTANT: {{ .Response }} {__EOS_TOKEN__}"""
PARAMETER stop "{__EOS_TOKEN__}"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
vicuna_eos_token = "eos_token"
CHAT_TEMPLATES["vicuna"] = (vicuna_template, vicuna_eos_token, False, vicuna_ollama,)
DEFAULT_SYSTEM_MESSAGE["vicuna"] = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions."
# =========================================== Vicuna Old
# https://github.com/lm-sys/FastChat/blob/main/docs/vicuna_weights_version.md#prompt-template
vicuna_old_template = \
"{{ bos_token }}"\
"{% if messages[0]['role'] == 'system' %}"\
"{{ messages[0]['content'] + '\n' }}"\
"{% set loop_messages = messages[1:] %}"\
"{% else %}"\
"{{ '{system_message}' + '\n' }}"\
"{% set loop_messages = messages %}"\
"{% endif %}"\
"{% for message in loop_messages %}"\
"{% if message['role'] == 'user' %}"\
"{{ '### Human: ' + message['content'] + '\n' }}"\
"{% elif message['role'] == 'assistant' %}"\
"{{ '### Assistant: ' + message['content'] + eos_token + '\n' }}"\
"{% else %}"\
"{{ raise_exception('Only user and assistant roles are supported!') }}"\
"{% endif %}"\
"{% endfor %}"\
"{% if add_generation_prompt %}"\
"{{ '### Assistant:' }}"\
"{% endif %}"
vicuna_old_ollama = \
'''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}{{ .System }}
{{ end }}{{ if .Prompt }}### Human: {{ .Prompt }}
{{ end }}### Assistant: {{ .Response }}{__EOS_TOKEN__}
"""
PARAMETER stop "{__EOS_TOKEN__}"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
SYSTEM """A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions."""
'''
vicuna_old_eos_token = "eos_token"
CHAT_TEMPLATES["vicuna_old"] = (vicuna_old_template, vicuna_old_eos_token, False, vicuna_old_ollama,)
DEFAULT_SYSTEM_MESSAGE["vicuna_old"] = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human\\'s questions."
CHAT_TEMPLATES["vicuna old"] = CHAT_TEMPLATES["vicuna_old"]
DEFAULT_SYSTEM_MESSAGE["vicuna old"] = DEFAULT_SYSTEM_MESSAGE["vicuna_old"]
# =========================================== Alpaca multi turn
# https://github.com/tatsu-lab/stanford_alpaca Changed for multi-turn convos
alpaca_template = \
"{{ bos_token }}"\
"{% if messages[0]['role'] == 'system' %}"\
"{{ messages[0]['content'] + '\n\n' }}"\
"{% set loop_messages = messages[1:] %}"\
"{% else %}"\
"{{ '{system_message}' + '\n\n' }}"\
"{% set loop_messages = messages %}"\
"{% endif %}"\
"{% for message in loop_messages %}"\
"{% if message['role'] == 'user' %}"\
"{{ '### Instruction:\n' + message['content'] + '\n\n' }}"\
"{% elif message['role'] == 'assistant' %}"\
"{{ '### Response:\n' + message['content'] + eos_token + '\n\n' }}"\
"{% else %}"\
"{{ raise_exception('Only user and assistant roles are supported!') }}"\
"{% endif %}"\
"{% endfor %}"\
"{% if add_generation_prompt %}"\
"{{ '### Response:\n' }}"\
"{% endif %}"
alpaca_ollama = \
'''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}{{ .System }}
{{ end }}{{ if .Prompt }}### Instruction:
{{ .Prompt }}{{ end }}
### Response:
{{ .Response }}{__EOS_TOKEN__}
"""
PARAMETER stop "{__EOS_TOKEN__}"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
SYSTEM """Below are some instructions that describe some tasks. Write responses that appropriately complete each request."""
'''
alpaca_eos_token = "eos_token"
CHAT_TEMPLATES["alpaca"] = (alpaca_template, alpaca_eos_token, False, alpaca_ollama,)
DEFAULT_SYSTEM_MESSAGE["alpaca"] = "Below are some instructions that describe some tasks. Write responses that appropriately complete each request."
# =========================================== Gemma
# https://huggingface.co/google/gemma-7b-it
# Notice we must use |trim for lstrip and rstrip. <start_of_turn> maps to 106.
# <end_of_turn> maps to 107. user and model are normal 1 word tokens.
gemma_template = \
"{{ bos_token }}"\
"{% if messages[0]['role'] == 'system' %}"\
"{{'<start_of_turn>user\n' + messages[0]['content'] | trim + ' ' + messages[1]['content'] | trim + '<end_of_turn>\n'}}"\
"{% set messages = messages[2:] %}"\
"{% endif %}"\
"{% for message in messages %}"\
"{% if message['role'] == 'user' %}"\
"{{'<start_of_turn>user\n' + message['content'] | trim + '<end_of_turn>\n'}}"\
"{% elif message['role'] == 'assistant' %}"\
"{{'<start_of_turn>model\n' + message['content'] | trim + '<end_of_turn>\n' }}"\
"{% else %}"\
"{{ raise_exception('Only user and assistant roles are supported!') }}"\
"{% endif %}"\
"{% endfor %}"\
"{% if add_generation_prompt %}"\
"{{ '<start_of_turn>model\n' }}"\
"{% endif %}"
# Ollama from https://www.ollama.com/library/gemma
gemma_ollama = \
'''
FROM {__FILE_LOCATION__}
TEMPLATE """<start_of_turn>user
{{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}<end_of_turn>
<start_of_turn>model
{{ .Response }}<end_of_turn>
"""
PARAMETER repeat_penalty 1
PARAMETER stop "<start_of_turn>"
PARAMETER stop "<end_of_turn>"
PARAMETER penalize_newline false
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
gemma_eos_token = "<end_of_turn>"
CHAT_TEMPLATES["gemma"] = (gemma_template, gemma_eos_token, True, gemma_ollama,)
DEFAULT_SYSTEM_MESSAGE["gemma"] = None # No system message in Gemma
# =========================================== Gemma with ChatML instead
# We find using <eos> is still more appropriate!
gemma_chatml_template = "{{ bos_token }}" + chatml_template
gemma_chatml_ollama = \
'''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}<|im_start|>system
{{ .System }}<|im_end|>
{{ end }}{{ if .Prompt }}<|im_start|>user
{{ .Prompt }}<|im_end|>
{{ end }}<|im_start|>assistant
{{ .Response }}<|im_end|>
"""
PARAMETER repeat_penalty 1
PARAMETER stop "<|im_start|>"
PARAMETER stop "<|im_end|>"
PARAMETER penalize_newline false
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
gemma_chatml_eos_token = (
{"<start_of_turn>" : "<|im_start|>", "<eos>" : "<|im_end|>"},
"<|im_end|>",
)
CHAT_TEMPLATES["gemma_chatml"] = (gemma_chatml_template, gemma_chatml_eos_token, True, gemma_chatml_ollama,)
DEFAULT_SYSTEM_MESSAGE["gemma_chatml"] = None # No system message in Gemma
# =========================================== Gemma 2
# Same as Gemma 1, but with sliding window attention!
# https://ollama.com/library/gemma2/blobs/6522ca797f47
gemma2_template = gemma_template
gemma2_ollama = gemma_ollama + "PARAMETER num_ctx 4096\n"
gemma2_eos_token = "<end_of_turn>"
CHAT_TEMPLATES["gemma2"] = (gemma2_template, gemma2_eos_token, True, gemma2_ollama,)
DEFAULT_SYSTEM_MESSAGE["gemma2"] = None # No system message in Gemma 2
# =========================================== Gemma 2 with ChatML instead
gemma2_chatml_template = gemma_chatml_template
gemma2_chatml_ollama = gemma_chatml_ollama + "PARAMETER num_ctx 4096\n"
gemma2_chatml_eos_token = gemma_chatml_eos_token
CHAT_TEMPLATES["gemma2_chatml"] = (gemma2_chatml_template, gemma2_chatml_eos_token, True, gemma2_chatml_ollama,)
DEFAULT_SYSTEM_MESSAGE["gemma2_chatml"] = None # No system message in Gemma 2
# =========================================== Llama-3
# Weirdly \n\n is needed?
llama3_template = \
"{{ bos_token }}"\
"{% for message in messages %}"\
"{% if message['role'] == 'user' %}"\
"{{ '<|start_header_id|>user<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}"\
"{% elif message['role'] == 'assistant' %}"\
"{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}"\
"{% else %}"\
"{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] | trim + '<|eot_id|>' }}"\
"{% endif %}"\
"{% endfor %}"\
"{% if add_generation_prompt %}"\
"{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}"\
"{% endif %}"
# Ollama from https://www.ollama.com/library/llama3
llama3_ollama = \
'''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}<|start_header_id|>system<|end_header_id|>
{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>
{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>
{{ .Response }}<|eot_id|>"""
PARAMETER stop "<|start_header_id|>"
PARAMETER stop "<|end_header_id|>"
PARAMETER stop "<|eot_id|>"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
llama3_template_eos_token = "eos_token"
CHAT_TEMPLATES["llama-3"] = (llama3_template, llama3_template_eos_token, False, llama3_ollama,)
DEFAULT_SYSTEM_MESSAGE["llama-3"] = None # No system message in Llama-3
CHAT_TEMPLATES["llama3"] = (llama3_template, llama3_template_eos_token, False, llama3_ollama,)
DEFAULT_SYSTEM_MESSAGE["llama3"] = None # No system message in Llama-3
# =========================================== Phi-3
# "{{ bos_token }}"\ # Phi-3.5 removes BOS?
phi3_template = \
"{% for message in messages %}"\
"{% if message['role'] == 'user' %}"\
"{{'<|user|>\n' + message['content'] + '<|end|>\n'}}"\
"{% elif message['role'] == 'assistant' %}"\
"{{'<|assistant|>\n' + message['content'] + '<|end|>\n'}}"\
"{% else %}"\
"{{'<|' + message['role'] + '|>\n' + message['content'] + '<|end|>\n'}}"\
"{% endif %}"\
"{% endfor %}"\
"{% if add_generation_prompt %}"\
"{{ '<|assistant|>\n' }}"\
"{% endif %}"
# Ollama from https://www.ollama.com/library/phi3
phi3_ollama = \
'''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .System }}<|system|>
{{ .System }}<|end|>
{{ end }}{{ if .Prompt }}<|user|>
{{ .Prompt }}<|end|>
{{ end }}<|assistant|>
{{ .Response }}<|end|>
"""
PARAMETER stop "<|end|>"
PARAMETER stop "<|user|>"
PARAMETER stop "<|assistant|>"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
phi3_template_eos_token = "<|end|>"
CHAT_TEMPLATES["phi-3"] = (phi3_template, phi3_template_eos_token, False, phi3_ollama,)
DEFAULT_SYSTEM_MESSAGE["phi-3"] = None # No system message in Phi-3
CHAT_TEMPLATES["phi-35"] = CHAT_TEMPLATES["phi-3"]
DEFAULT_SYSTEM_MESSAGE["phi-35"] = None # No system message in Phi-3.5
CHAT_TEMPLATES["phi-3.5"] = CHAT_TEMPLATES["phi-3"]
DEFAULT_SYSTEM_MESSAGE["phi-3.5"] = None # No system message in Phi-3.5
# =========================================== Llama-3.1
"""
No trimming in Llama 3.1 Instruct!
Also an extra newline for Cutting Knowledge Date
See https://colab.research.google.com/drive/1Xpqq5xpIgO-B00MQ-UccYMwN2J8QFgBM?usp=sharing
Also should be
import datetime
tokenizer.apply_chat_template(
messages,
add_generation_prompt = True,
tokenize = False,
date_string = datetime.today().strftime("%d %B %Y")),
)
"""
llama31_template = \
"""{{- bos_token }}
{%- if custom_tools is defined %}
{%- set tools = custom_tools %}
{%- endif %}
{%- if not tools_in_user_message is defined %}
{%- set tools_in_user_message = true %}
{%- endif %}
{%- if not date_string is defined %}
{%- set date_string = "26 July 2024" %}
{%- endif %}
{%- if not tools is defined %}
{%- set tools = none %}
{%- endif %}
{#- This block extracts the system message, so we can slot it into the right place. #}
{%- if messages[0]['role'] == 'system' %}
{%- set system_message = messages[0]['content'] %}
{%- set messages = messages[1:] %}
{%- else %}
{%- set system_message = "{system_message}" %}
{%- endif %}
{#- System message + builtin tools #}
{{- "<|start_header_id|>system<|end_header_id|>\n\n" }}
{%- if builtin_tools is defined or tools is not none %}
{{- "Environment: ipython\n" }}
{%- endif %}
{%- if builtin_tools is defined %}
{{- "Tools: " + builtin_tools | reject('equalto', 'code_interpreter') | join(", ") + "\n\n"}}
{%- endif %}
{{- "Cutting Knowledge Date: December 2023\n" }}
{{- "Today Date: " + date_string + "\n\n" }}
{%- if tools is not none and not tools_in_user_message %}
{{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }}
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
{{- "Do not use variables.\n\n" }}
{%- for t in tools %}
{{- t | tojson(indent=4) }}
{{- "\n\n" }}
{%- endfor %}
{%- endif %}
{{- system_message }}
{{- "<|eot_id|>" }}
{#- Custom tools are passed in a user message with some extra guidance #}
{%- if tools_in_user_message and not tools is none %}
{#- Extract the first user message so we can plug it in here #}
{%- if messages | length != 0 %}
{%- set first_user_message = messages[0]['content'] %}
{%- set messages = messages[1:] %}
{%- else %}
{{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }}
{%- endif %}
{{- '<|start_header_id|>user<|end_header_id|>\n\n' -}}
{{- "Given the following functions, please respond with a JSON for a function call " }}
{{- "with its proper arguments that best answers the given prompt.\n\n" }}
{{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }}
{{- "Do not use variables.\n\n" }}
{%- for t in tools %}
{{- t | tojson(indent=4) }}
{{- "\n\n" }}
{%- endfor %}
{{- first_user_message + "<|eot_id|>"}}
{%- endif %}
{%- for message in messages %}
{%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}
{{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] + '<|eot_id|>' }}
{%- elif 'tool_calls' in message %}
{%- if not message.tool_calls|length == 1 %}
{{- raise_exception("This model only supports single tool-calls at once!") }}
{%- endif %}
{%- set tool_call = message.tool_calls[0].function %}
{%- if builtin_tools is defined and tool_call.name in builtin_tools %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
{{- "<|python_tag|>" + tool_call.name + ".call(" }}
{%- for arg_name, arg_val in tool_call.arguments | items %}
{{- arg_name + '="' + arg_val + '"' }}
{%- if not loop.last %}
{{- ", " }}
{%- endif %}
{%- endfor %}
{{- ")" }}
{%- else %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}}
{{- '{"name": "' + tool_call.name + '", ' }}
{{- '"parameters": ' }}
{{- tool_call.arguments | tojson }}
{{- "}" }}
{%- endif %}
{%- if builtin_tools is defined %}
{#- This means we're in ipython mode #}
{{- "<|eom_id|>" }}
{%- else %}
{{- "<|eot_id|>" }}
{%- endif %}
{%- elif message.role == "tool" or message.role == "ipython" %}
{{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }}
{%- if message.content is mapping or message.content is iterable %}
{{- message.content | tojson }}
{%- else %}
{{- message.content }}
{%- endif %}
{{- "<|eot_id|>" }}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }}
{%- endif %}
"""
# Ollama from https://ollama.com/library/llama3.1 (needs updating!)
llama31_ollama = \
'''
FROM {__FILE_LOCATION__}
TEMPLATE """{{ if .Messages }}
{{- if or .System .Tools }}<|start_header_id|>system<|end_header_id|>
{{- if .System }}
{{ .System }}
{{- end }}
{{- if .Tools }}
You are a helpful assistant with tool calling capabilities. When you receive a tool call response, use the output to format an answer to the original use question.
{{- end }}
{{- end }}<|eot_id|>
{{- range $i, $_ := .Messages }}
{{- $last := eq (len (slice $.Messages $i)) 1 }}
{{- if eq .Role "user" }}<|start_header_id|>user<|end_header_id|>
{{- if and $.Tools $last }}
Given the following functions, please respond with a JSON for a function call with its proper arguments that best answers the given prompt.
Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. Do not use variables.
{{ $.Tools }}
{{- end }}
{{ .Content }}<|eot_id|>{{ if $last }}<|start_header_id|>assistant<|end_header_id|>
{{ end }}
{{- else if eq .Role "assistant" }}<|start_header_id|>assistant<|end_header_id|>
{{- if .ToolCalls }}
{{- range .ToolCalls }}{"name": "{{ .Function.Name }}", "parameters": {{ .Function.Arguments }}}{{ end }}
{{- else }}
{{ .Content }}{{ if not $last }}<|eot_id|>{{ end }}
{{- end }}
{{- else if eq .Role "tool" }}<|start_header_id|>ipython<|end_header_id|>
{{ .Content }}<|eot_id|>{{ if $last }}<|start_header_id|>assistant<|end_header_id|>
{{ end }}
{{- end }}
{{- end }}
{{- else }}
{{- if .System }}<|start_header_id|>system<|end_header_id|>
{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>
{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>
{{ end }}{{ .Response }}{{ if .Response }}<|eot_id|>{{ end }}"""
PARAMETER stop "<|start_header_id|>"
PARAMETER stop "<|end_header_id|>"
PARAMETER stop "<|eot_id|>"
PARAMETER stop "<|eom_id|>"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
llama31_template_eos_token = "eos_token"
CHAT_TEMPLATES["llama-3.1"] = (llama31_template, llama31_template_eos_token, False, llama31_ollama,)
DEFAULT_SYSTEM_MESSAGE["llama-3.1"] = "" # Llama3.1 default system message is empty + the dates
CHAT_TEMPLATES["llama-31"] = (llama31_template, llama31_template_eos_token, False, llama31_ollama,)
DEFAULT_SYSTEM_MESSAGE["llama-31"] = "" # Llama3.1 default system message is empty + the dates
for version in ("llama-3.2", "llama-3.3", "llama-32", "llama-33"):
CHAT_TEMPLATES[version] = CHAT_TEMPLATES["llama-3.1"]
DEFAULT_SYSTEM_MESSAGE[version] = ""
# =========================================== Qwen 2.5
qwen25_template = \
"""{%- if tools %}
{{- \'<|im_start|>system\\n\' }}
{%- if messages[0][\'role\'] == \'system\' %}
{{- messages[0][\'content\'] }}
{%- else %}
{{- \'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.\' }}
{%- endif %}
{{- "\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>" }}
{%- for tool in tools %}
{{- "\\n" }}
{{- tool | tojson }}
{%- endfor %}
{{- "\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\"name\\": <function-name>, \\"arguments\\": <args-json-object>}\\n</tool_call><|im_end|>\\n" }}\n{%- else %}
{%- if messages[0][\'role\'] == \'system\' %}
{{- \'<|im_start|>system\\n\' + messages[0][\'content\'] + \'<|im_end|>\\n\' }}
{%- else %}
{{- \'<|im_start|>system\\n{system_message}<|im_end|>\\n\' }}
{%- endif %}\n{%- endif %}\n{%- for message in messages %}
{%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
{{- \'<|im_start|>\' + message.role + \'\\n\' + message.content + \'<|im_end|>\' + \'\\n\' }}
{%- elif message.role == "assistant" %}
{{- \'<|im_start|>\' + message.role }}
{%- if message.content %}
{{- \'\\n\' + message.content }}
{%- endif %}
{%- for tool_call in message.tool_calls %}
{%- if tool_call.function is defined %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{{- \'\\n<tool_call>\\n{"name": "\' }}
{{- tool_call.name }}
{{- \'", "arguments": \' }}
{{- tool_call.arguments | tojson }}
{{- \'}\\n</tool_call>\' }}
{%- endfor %}
{{- \'<|im_end|>\\n\' }}
{%- elif message.role == "tool" %}
{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %} {{- \'<|im_start|>user\' }}
{%- endif %}
{{- \'\\n<tool_response>\\n\' }}
{{- message.content }}
{{- \'\\n</tool_response>\' }}
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
{{- \'<|im_end|>\\n\' }}
{%- endif %}
{%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}
{{- \'<|im_start|>assistant\\n\' }}
{%- endif %}
"""
# Ollama from https://ollama.com/library/qwen2.5/blobs/eb4402837c78
qwen25_ollama = \
'''
FROM {__FILE_LOCATION__}
TEMPLATE """{{- if .Messages }}
{{- if or .System .Tools }}<|im_start|>system
{{- if .System }}
{{ .System }}
{{- end }}
{{- if .Tools }}
# Tools
You may call one or more functions to assist with the user query.
You are provided with function signatures within <tools></tools> XML tags:
<tools>
{{- range .Tools }}
{"type": "function", "function": {{ .Function }}}
{{- end }}
</tools>
For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
<tool_call>
{"name": <function-name>, "arguments": <args-json-object>}
</tool_call>
{{- end }}<|im_end|>
{{ end }}
{{- range $i, $_ := .Messages }}
{{- $last := eq (len (slice $.Messages $i)) 1 -}}
{{- if eq .Role "user" }}<|im_start|>user
{{ .Content }}<|im_end|>
{{ else if eq .Role "assistant" }}<|im_start|>assistant
{{ if .Content }}{{ .Content }}
{{- else if .ToolCalls }}<tool_call>
{{ range .ToolCalls }}{"name": "{{ .Function.Name }}", "arguments": {{ .Function.Arguments }}}
{{ end }}</tool_call>
{{- end }}{{ if not $last }}<|im_end|>
{{ end }}
{{- else if eq .Role "tool" }}<|im_start|>user
<tool_response>
{{ .Content }}
</tool_response><|im_end|>
{{ end }}
{{- if and (ne .Role "assistant") $last }}<|im_start|>assistant
{{ end }}
{{- end }}
{{- else }}
{{- if .System }}<|im_start|>system
{{ .System }}<|im_end|>
{{ end }}{{ if .Prompt }}<|im_start|>user
{{ .Prompt }}<|im_end|>
{{ end }}<|im_start|>assistant
{{ end }}{{ .Response }}{{ if .Response }}<|im_end|>{{ end }}"""
PARAMETER stop "<|im_end|>"
PARAMETER stop "<|endoftext|>"
PARAMETER temperature 1.5
PARAMETER min_p 0.1
'''
qwen25_template_eos_token = "eos_token"
qwen25_default_system_message = "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."
CHAT_TEMPLATES["qwen-2.5"] = (qwen25_template, qwen25_template_eos_token, False, qwen25_ollama,)
DEFAULT_SYSTEM_MESSAGE["qwen-2.5"] = qwen25_default_system_message # No system message in Qwen 2.5
CHAT_TEMPLATES["qwen-25"] = (qwen25_template, qwen25_template_eos_token, False, qwen25_ollama,)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | true |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/import_fixes.py | unsloth/import_fixes.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import importlib.util
from pathlib import Path
from importlib.metadata import version as importlib_version
from packaging.version import Version as TrueVersion
import re
import logging
import textwrap
import warnings
# We cannot do from unsloth_zoo.log import logger since FBGEMM might cause seg faults.
UNSLOTH_ENABLE_LOGGING = os.environ.get("UNSLOTH_ENABLE_LOGGING", "0") in (
"1",
"True",
"true",
)
logger = logging.getLogger(__name__)
if UNSLOTH_ENABLE_LOGGING:
logging.basicConfig(
level = logging.INFO, format = "[%(name)s|%(levelname)s]%(message)s"
)
logger.setLevel(logging.INFO)
else:
logging.basicConfig(
level = logging.WARNING, format = "[%(name)s|%(levelname)s]%(message)s"
)
logger.setLevel(logging.WARNING)
def Version(version):
try:
new_version = str(version)
new_version = re.match(r"[0-9\.]{1,}", new_version)
if new_version is None:
raise Exception(str(e))
new_version = new_version.group(0).rstrip(".")
if new_version != version:
new_version += ".1" # Add .1 for dev / alpha / beta / rc
return TrueVersion(new_version)
except:
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[1][0])
raise RuntimeError(
f"Unsloth: Could not get version for `{version}`\n"
f"File name = [{caller.filename}] Line number = [{caller.lineno}]"
)
# Ignore logging messages
class HideLoggingMessage(logging.Filter):
__slots__ = ("text",)
def __init__(self, text):
self.text = text
def filter(self, x):
return not (self.text in x.getMessage())
class HidePrintMessage:
def __init__(self, original_stream):
self._original_stream = original_stream
self._hidden_texts = []
def add_filter(self, text):
self._hidden_texts.append(text)
def write(self, message):
if not any(text in message for text in self._hidden_texts):
self._original_stream.write(message)
def flush(self):
self._original_stream.flush()
def __getattr__(self, name):
return getattr(self._original_stream, name)
if os.environ.get("UNSLOTH_ENABLE_LOGGING", "0") != "1":
import sys
# Apply to stderr for FBGEMM
sys.stderr = HidePrintMessage(sys.stderr)
# https://github.com/pytorch/FBGEMM/blob/d99cd96490ec4aabac2ee95b1e76ea4dcfcfa628/fbgemm_gpu/experimental/gemm/triton_gemm/utils.py#L43-L52
sys.stderr.add_filter("TMA benchmarks will be running")
# Skipping import of cpp extensions due to incompatible torch version 2.9.0+cu128 for torchao version 0.15.0
logging.getLogger("torchao").setLevel(logging.ERROR)
# SyntaxWarning: invalid escape sequence '\.'
warnings.filterwarnings(
"ignore", message = "invalid escape sequence", category = SyntaxWarning
)
# Fix up AttributeError: 'MessageFactory' object has no attribute 'GetPrototype'
# MUST do this at the start primarily due to tensorflow causing issues
def fix_message_factory_issue():
try:
import google.protobuf.message_factory
class MessageFactory:
def CreatePrototype(self, *args, **kwargs):
return
def GetMessages(self, *args, **kwargs):
return
def GetPrototype(self, *args, **kwargs):
return
if not hasattr(google.protobuf.message_factory, "MessageFactory"):
logger.info("Unsloth: Patching protobuf.MessageFactory as it doesn't exist")
google.protobuf.message_factory.MessageFactory = MessageFactory
elif (
hasattr(google.protobuf.message_factory, "MessageFactory")
and not hasattr(
google.protobuf.message_factory.MessageFactory, "GetPrototype"
)
and not hasattr(google.protobuf.message_factory, "GetMessageClass")
):
google.protobuf.message_factory.MessageFactory = MessageFactory
logger.info("Unsloth: Patching protobuf.MessageFactory as it doesn't exist")
elif (
hasattr(google.protobuf.message_factory, "MessageFactory")
and not hasattr(
google.protobuf.message_factory.MessageFactory, "GetPrototype"
)
and hasattr(google.protobuf.message_factory, "GetMessageClass")
):
GetMessageClass = google.protobuf.message_factory.GetMessageClass
def GetPrototype(self, descriptor):
return GetMessageClass(descriptor)
google.protobuf.message_factory.MessageFactory.GetPrototype = GetPrototype
logger.info("Unsloth: Patching protobuf.MessageFactory.GetPrototype")
pass
except:
pass
# Fix Xformers performance issues since 0.0.25
def fix_xformers_performance_issue():
spec = importlib.util.find_spec("xformers")
if spec is None:
return
xformers_version = importlib_version("xformers")
if Version(xformers_version) < Version("0.0.29"):
xformers_location = spec.origin
if xformers_location is None:
xformers_location = spec.submodule_search_locations[0]
else:
xformers_location = os.path.split(xformers_location)[0]
cutlass = Path(xformers_location) / "ops" / "fmha" / "cutlass.py"
try:
if cutlass.exists():
with open(cutlass, "r+", encoding = "utf-8") as f:
text = f.read()
# See https://github.com/facebookresearch/xformers/issues/1176#issuecomment-2545829591
if "num_splits_key=-1," in text:
text = text.replace(
"num_splits_key=-1,",
"num_splits_key=None,",
)
f.seek(0)
f.write(text)
f.truncate()
logger.info(
"Unsloth: Patching Xformers to fix some performance issues."
)
except Exception as e:
logger.info(f"Unsloth: Failed patching Xformers with error = {str(e)}")
# ValueError: 'aimv2' is already used by a Transformers config, pick another name.
def fix_vllm_aimv2_issue():
spec = importlib.util.find_spec("vllm")
if spec is None:
return
vllm_version = importlib_version("vllm")
if Version(vllm_version) < Version("0.10.1"):
vllm_location = spec.origin
if vllm_location is None:
vllm_location = spec.submodule_search_locations[0]
else:
vllm_location = os.path.split(vllm_location)[0]
ovis_config = Path(vllm_location) / "transformers_utils" / "configs" / "ovis.py"
try:
if ovis_config.exists():
with open(ovis_config, "r+", encoding = "utf-8") as f:
text = f.read()
# See https://github.com/vllm-project/vllm-ascend/issues/2046
if 'AutoConfig.register("aimv2", AIMv2Config)' in text:
text = text.replace(
'AutoConfig.register("aimv2", AIMv2Config)',
"",
)
text = text.replace(
"""backbone_config.pop('model_type')
backbone_config = AutoConfig.for_model(model_type,
**backbone_config)""",
"""if model_type != "aimv2":
backbone_config.pop('model_type')
backbone_config = AutoConfig.for_model(model_type, **backbone_config)
else:
backbone_config = AIMv2Config(**backbone_config)""",
)
f.seek(0)
f.write(text)
f.truncate()
logger.info(
"Unsloth: Patching vLLM to fix `'aimv2' is already used by a Transformers config, pick another name.`"
)
except Exception as e:
logger.info(f"Unsloth: Failed patching vLLM with error = {str(e)}")
def fix_vllm_guided_decoding_params():
if importlib.util.find_spec("vllm") is None:
return
# GuidedDecodingParmas is renamed to StructuredOutputsParams in vLLM
# https://github.com/vllm-project/vllm/pull/22772/files
# trl still wants to use GuidedDecodingParams. This is a temporary patch till trl updates
import vllm
try:
from vllm.sampling_params import GuidedDecodingParams
except ImportError:
vllm.sampling_params.GuidedDecodingParams = (
vllm.sampling_params.StructuredOutputsParams
)
def ignore_logger_messages():
# Ignore Environment variable `HF_TOKEN` is set
try:
from huggingface_hub._login import logger as huggingface_hub_logger
huggingface_hub_logger.addFilter(HideLoggingMessage("`HF_TOKEN`"))
del huggingface_hub_logger
except:
pass
def patch_ipykernel_hf_xet():
# HF-XET == 1.1.10 and ipykernel == 7.0.0 / 7.0.1 causes issues
# See https://github.com/huggingface/xet-core/issues/526
# 2025-10-13T20:37:33.028737Z ERROR Python exception updating progress:, error: PyErr { type: <class 'LookupError'>, value: LookupError(<ContextVar name='shell_parent' at 0x7535b4cebd80>), traceback: Some(<traceback object at 0x753408489f40>) }, caller: "src/progress_update.rs:313"
# at /home/runner/work/xet-core/xet-core/error_printer/src/lib.rs:28
if importlib.util.find_spec("hf_xet") is None:
return
if importlib.util.find_spec("ipykernel") is None:
return
if importlib.util.find_spec("huggingface_hub") is None:
return
ipykernel_version = Version(importlib_version("ipykernel"))
if (
(Version(importlib_version("hf_xet")) == Version("1.1.10"))
and (
(ipykernel_version == Version("7.0.0"))
or (
ipykernel_version == Version("7.0.1")
) # 7.0.1 seems to also break with LookupError: <ContextVar name='shell_parent' at 0x7a9775143ec0>
)
):
print(
"#### Unsloth: `hf_xet==1.1.10` and `ipykernel==7.0.0` or `ipykernel==7.0.1` breaks progress bars. Using ASCII progress bars.\n"
"#### Unsloth: To re-enable progress bars, please upgrade to `ipykernel>=7.1.0` or wait for a fix to\n"
"https://github.com/huggingface/xet-core/issues/526"
)
from huggingface_hub.utils import disable_progress_bars
disable_progress_bars()
def patch_trackio():
# Set some environment variables to customize the Trackio dashboard for experiment tracking
# See https://github.com/unslothai/notebooks/pull/110
os.environ["TRACKIO_LOGO_LIGHT_URL"] = (
"https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20logo%20black%20text.png"
)
os.environ["TRACKIO_LOGO_DARK_URL"] = (
"https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20logo%20white%20text.png"
)
os.environ["TRACKIO_PLOT_ORDER"] = "train/reward"
def patch_datasets():
# Datasets 4.4.0 and 4.4.1 weirdly have some weird `_thread.RLock_recursion_count` issues
if importlib.util.find_spec("datasets") is None:
return
datasets_version = Version(importlib_version("datasets"))
if (datasets_version <= Version("4.5.0")) and (
datasets_version >= Version("4.4.0")
):
raise NotImplementedError(
f"#### Unsloth: Using `datasets = {str(datasets_version)}` will cause recursion errors.\n"
"Please downgrade datasets to `datasets==4.3.0"
)
def check_fbgemm_gpu_version():
if importlib.util.find_spec("fbgemm_gpu") is None:
return
try:
fbgemm_gpu_version = importlib_version("fbgemm_gpu_genai")
except:
return
# We noticed some SegFault or bad alloc errors on lower versions of fbgemm_gpu.
if Version(fbgemm_gpu_version) < Version("1.4.0"):
raise ImportError(
f"Unsloth: fbgemm_gpu_genai=={fbgemm_gpu_version} detected. It might cause unexpected issues like segmentation faults. Please uninstall the current one by doing `pip uninstall fbgemm-gpu` && `pip install fbgemm-gpu` to install fbgemm-gpu 1.4.0 or newer!"
)
logger.info(f"Unsloth: fbgemm_gpu_genai=={fbgemm_gpu_version} detected.")
def patch_enable_input_require_grads():
"""
Patch transformers PreTrainedModel.enable_input_require_grads to handle vision models
that raise NotImplementedError from get_input_embeddings().
"""
import inspect
from transformers import PreTrainedModel
# Check if the original function iterates over self.modules() instead of just returning the enable_input_require_grads
# Ref: https://github.com/huggingface/transformers/pull/41993/files#diff-6b72b98c4c2dcfc6cc606843917733f5d858374fbc22a735ff483bbc0c1e63eaL1979-R1996
try:
original_source = inspect.getsource(PreTrainedModel.enable_input_require_grads)
except:
return
# Only patch if the new pattern exists (iterating over self.modules())
if "for module in self.modules()" not in original_source:
return
def _patched_enable_input_require_grads(self):
def make_inputs_require_grads(module, input, output):
output.requires_grad_(True)
hooks = []
seen_modules = set()
for module in self.modules():
if not (
isinstance(module, PreTrainedModel)
and hasattr(module, "get_input_embeddings")
):
continue
try:
input_embeddings = module.get_input_embeddings()
except NotImplementedError:
# Vision models may not implement get_input_embeddings - skip them
# For GLM V4.6 for example, this skips only `self.visual`
continue
if input_embeddings is None:
continue
embedding_id = id(input_embeddings)
if embedding_id in seen_modules:
continue
seen_modules.add(embedding_id)
hooks.append(
input_embeddings.register_forward_hook(make_inputs_require_grads)
)
self._require_grads_hooks = hooks
if hooks:
self._require_grads_hook = hooks[0]
PreTrainedModel.enable_input_require_grads = _patched_enable_input_require_grads
logger.info(
"Unsloth: Patched enable_input_require_grads for vision model compatibility"
)
def torchvision_compatibility_check():
if importlib.util.find_spec("torch") is None:
raise ImportError("Unsloth: torch not found. Please install torch first.")
if importlib.util.find_spec("torchvision") is None:
return
torch_version = importlib_version("torch")
torchvision_version = importlib_version("torchvision")
# Torch version -> minimum required torchvision version
# See https://pytorch.org/get-started/previous-versions/
TORCH_TORCHVISION_COMPAT = [
("2.9.0", "0.24.0"),
("2.8.0", "0.23.0"),
("2.7.0", "0.22.0"),
("2.6.0", "0.21.0"),
("2.5.0", "0.20.0"),
("2.4.0", "0.19.0"),
]
required_torchvision = None
for min_torch, min_torchvision in TORCH_TORCHVISION_COMPAT:
if Version(torch_version) >= Version(min_torch):
required_torchvision = min_torchvision
break
if required_torchvision is None:
# Torch version not in compatibility table, skip check
return
if Version(torchvision_version) < Version(required_torchvision):
raise ImportError(
f"Unsloth: torch=={torch_version} requires torchvision>={required_torchvision}, "
f"but found torchvision=={torchvision_version}. "
f"Please refer to https://pytorch.org/get-started/previous-versions/ for more information."
)
logger.info(
f"Unsloth: torch=={torch_version} and torchvision=={torchvision_version} are compatible."
)
# Fix TRL OpenEnv 0.26 NameError: name 'SamplingParams' is not defined
def fix_openenv_no_vllm():
spec = importlib.util.find_spec("trl")
if spec is None:
return
trl_location = spec.origin
if trl_location is None:
trl_location = spec.submodule_search_locations[0]
else:
trl_location = os.path.split(trl_location)[0]
openenv = Path(trl_location) / "experimental" / "openenv" / "utils.py"
if not openenv.exists():
return
try:
with open(openenv, "r+", encoding = "utf-8") as f:
text = f.read()
bad = (
"if is_vllm_available():\n"
" from vllm import SamplingParams\n"
" from vllm.sampling_params import GuidedDecodingParams\n"
)
replace_with = bad + (
"else:\n"
" from typing import Any\n"
" SamplingParams = Any\n"
" GuidedDecodingParams = Any\n"
"\n"
)
if bad + "\n" + "\n" in text and replace_with not in text:
text = text.replace(bad + "\n" + "\n", replace_with)
f.seek(0)
f.write(text)
f.truncate()
logger.info(
"Unsloth: Patching TRL OpenEnv to fix SamplingParams not defined"
)
except Exception as e:
logger.info(f"Unsloth: Failed patching TRL OpenEnv with error = {str(e)}")
# Fix Exeuctorch needing get_mapped_key
def fix_executorch():
spec = importlib.util.find_spec("executorch")
if spec is None:
return
executorch_location = spec.origin
if executorch_location is None:
executorch_location = spec.submodule_search_locations[0]
else:
executorch_location = os.path.split(executorch_location)[0]
executorch = Path(executorch_location) / "examples" / "models" / "__init__.py"
if not executorch.exists():
return
try:
what = r"""
import sys
import types
import re
from typing import Any, Optional
def get_mapped_key(key: str, mapping_dict: dict[str, str]) -> str:
try:
# Checks if there is a layer # in the key
if any(k.isdigit() for k in key.split(".")):
# Replace layer number with "{}" to create key for lookup
abstract_key = re.sub(r"(\.\d+)", ".{}", key)
layer_num = re.search(r"\d+", key).group(0)
new_key = mapping_dict[abstract_key]
new_key = new_key.format(layer_num)
else:
new_key = mapping_dict[key]
except KeyError as e:
raise Exception(
f'Error converting the state dict. Found unexpected key: "{key}". '
"Please make sure you're loading a checkpoint with the right format. "
) from e
return new_key
torchtune = types.ModuleType("torchtune")
torchtune.__path__ = []
models = types.ModuleType("torchtune.models")
models.__path__ = []
convert_weights = types.ModuleType("torchtune.models.convert_weights")
convert_weights.get_mapped_key = get_mapped_key
torchtune.models = models
models.convert_weights = convert_weights
sys.modules["torchtune"] = torchtune
sys.modules["torchtune.models"] = models
sys.modules["torchtune.models.convert_weights"] = convert_weights
"""
what = textwrap.dedent(what)
with open(executorch, "r+", encoding = "utf-8") as f:
text = f.read()
bad = "from enum import Enum\n"
if bad in text and what not in text:
text = text.replace(bad + "\n", bad + "\n" + what)
f.seek(0)
f.write(text)
f.truncate()
logger.info("Unsloth: Patching Executorch to fix get_mapped_key")
except Exception as e:
logger.info(f"Unsloth: Failed Executorch with error = {str(e)}")
def fix_diffusers_warnings():
# Silence Flax classes are deprecated and will be removed in Diffusers v1.0.0.
os.environ["DIFFUSERS_VERBOSITY"] = "error"
def fix_huggingface_hub():
# huggingface_hub.is_offline_mode got removed, so add it back
import huggingface_hub
if not hasattr(huggingface_hub, "is_offline_mode"):
huggingface_hub.is_offline_mode = (
lambda: huggingface_hub.constants.HF_HUB_OFFLINE
)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/save.py | unsloth/save.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unsloth_zoo.utils import Version
from importlib.metadata import version as importlib_version
from unsloth_zoo.hf_utils import dtype_from_config, HAS_TORCH_DTYPE
from unsloth_zoo.llama_cpp import (
convert_to_gguf,
quantize_gguf,
use_local_gguf,
install_llama_cpp,
check_llama_cpp,
_download_convert_hf_to_gguf,
)
from bitsandbytes.nn import Linear4bit as Bnb_Linear4bit
from peft.tuners.lora import Linear4bit as Peft_Linear4bit
from peft.tuners.lora import Linear as Peft_Linear
from typing import Optional, Callable, Union, List
import sys
import requests
import torch
import os
import shutil
import pickle
import gc
from transformers.models.llama.modeling_llama import logger
from .kernels import fast_dequantize, QUANT_STATE, get_lora_parameters_bias
import subprocess
import psutil
import re
from transformers.models.llama.modeling_llama import logger
from .tokenizer_utils import fix_sentencepiece_gguf
from .models.loader_utils import get_model_name
from .models._utils import _convert_torchao_model
from .ollama_template_mappers import OLLAMA_TEMPLATES, MODEL_TO_OLLAMA_TEMPLATE_MAPPER
from transformers import ProcessorMixin
from huggingface_hub import HfApi
try:
from huggingface_hub import get_token
except:
try:
from huggingface_hub.utils import get_token
except:
# For older versions of huggingface_hub
from huggingface_hub.utils._token import get_token
from pathlib import Path
from peft import PeftModelForCausalLM, PeftModel
__all__ = [
"print_quantization_methods",
"unsloth_save_model",
"save_to_gguf",
"patch_saving_functions",
"create_huggingface_repo",
]
# llama.cpp specific targets - all takes 90s. Below takes 60s
LLAMA_CPP_TARGETS = [
"llama-quantize",
"llama-cli",
"llama-server",
]
# Check environments
keynames = "\n" + "\n".join(os.environ.keys())
IS_COLAB_ENVIRONMENT = "\nCOLAB_" in keynames
IS_KAGGLE_ENVIRONMENT = "\nKAGGLE_" in keynames
KAGGLE_TMP = "/tmp"
del keynames
# Weights
LLAMA_WEIGHTS = (
"self_attn.q_proj",
"self_attn.k_proj",
"self_attn.v_proj",
"self_attn.o_proj",
"mlp.gate_proj",
"mlp.up_proj",
"mlp.down_proj",
)
LLAMA_LAYERNORMS = (
"input_layernorm",
"post_attention_layernorm",
"pre_feedforward_layernorm",
"post_feedforward_layernorm",
"self_attn.q_norm",
"self_attn.k_norm",
)
# https://github.com/ggerganov/llama.cpp/blob/master/examples/quantize/quantize.cpp#L19
# From https://mlabonne.github.io/blog/posts/Quantize_Llama_2_models_using_ggml.html
ALLOWED_QUANTS = {
"not_quantized": "Recommended. Fast conversion. Slow inference, big files.",
"fast_quantized": "Recommended. Fast conversion. OK inference, OK file size.",
"quantized": "Recommended. Slow conversion. Fast inference, small files.",
"f32": "Not recommended. Retains 100% accuracy, but super slow and memory hungry.",
"bf16": "Bfloat16 - Fastest conversion + retains 100% accuracy. Slow and memory hungry.",
"f16": "Float16 - Fastest conversion + retains 100% accuracy. Slow and memory hungry.",
"q8_0": "Fast conversion. High resource use, but generally acceptable.",
"q4_k_m": "Recommended. Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q4_K",
"q5_k_m": "Recommended. Uses Q6_K for half of the attention.wv and feed_forward.w2 tensors, else Q5_K",
"q2_k": "Uses Q4_K for the attention.vw and feed_forward.w2 tensors, Q2_K for the other tensors.",
"q3_k_l": "Uses Q5_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K",
"q3_k_m": "Uses Q4_K for the attention.wv, attention.wo, and feed_forward.w2 tensors, else Q3_K",
"q3_k_s": "Uses Q3_K for all tensors",
"q4_0": "Original quant method, 4-bit.",
"q4_1": "Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models.",
"q4_k_s": "Uses Q4_K for all tensors",
"q4_k": "alias for q4_k_m",
"q5_k": "alias for q5_k_m",
"q5_0": "Higher accuracy, higher resource usage and slower inference.",
"q5_1": "Even higher accuracy, resource usage and slower inference.",
"q5_k_s": "Uses Q5_K for all tensors",
"q6_k": "Uses Q8_K for all tensors",
# "iq2_xxs" : "2.06 bpw quantization", # Not supported sadly
# "iq2_xs" : "2.31 bpw quantization",
# "iq3_xxs" : "3.06 bpw quantization",
"q3_k_xs": "3-bit extra small quantization",
}
def has_curl():
return shutil.which("curl") is not None
CURL_FLAG = "-DLLAMA_CURL=ON" if has_curl() else "-DLLAMA_CURL=OFF"
def print_quantization_methods():
for key, value in ALLOWED_QUANTS.items():
print(f'"{key}" ==> {value}')
def check_if_sentencepiece_model(
model, temporary_location = "_unsloth_sentencepiece_temp"
):
if not hasattr(model, "_saved_temp_tokenizer"):
return False
temp_tokenizer = model._saved_temp_tokenizer
sentencepiece_model = False
file_location = os.path.join(temporary_location, temp_tokenizer.name_or_path)
created_folder = False
if not os.path.exists(file_location):
created_folder = True
os.makedirs(file_location)
temp_tokenizer.save_pretrained(file_location)
if os.path.isfile(f"{file_location}/tokenizer.model"):
sentencepiece_model = True
if created_folder:
shutil.rmtree(file_location, ignore_errors = True)
return sentencepiece_model
def _free_cached_model(model):
from huggingface_hub import scan_cache_dir
cached_repos = list(scan_cache_dir().repos)
# Go through every cached repo, and delete the one that matches the model we want to save.
# Can save 4GB of disk space - useful for Kaggle systems.
for cached_repo in cached_repos:
if cached_repo.repo_id == model.config._name_or_path:
remove_cache_commit = list(cached_repo.revisions)[0].commit_hash
delete_strategy = scan_cache_dir().delete_revisions(
remove_cache_commit,
)
logger.warning_once(
"Unsloth: Will remove a cached repo with size "
+ delete_strategy.expected_freed_size_str,
)
delete_strategy.execute()
def _merge_lora(layer, name):
bias = getattr(layer, "bias", None)
if isinstance(layer, (Bnb_Linear4bit, Peft_Linear4bit, Peft_Linear)):
# Is LoRA so we need to merge!
W, quant_state, A, B, s, bias = get_lora_parameters_bias(layer)
if quant_state is not None:
dtype = (
quant_state.dtype if type(quant_state) is not list else quant_state[2]
)
W = fast_dequantize(W, quant_state)
else:
dtype = W.dtype
W = W.to(torch.float32).t()
# W = W.t()
if A is not None:
# sAB = (A.t().to(torch.float32) @ (s * B.t().to(torch.float32)))
# W += sAB
W.addmm_(A.t().to(torch.float32), B.t().to(torch.float32), alpha = s)
# W.addmm_(A.t().to(W.dtype), B.t().to(W.dtype), alpha = s)
# if not torch.isfinite(W).all():
maximum_element = torch.max(W.min().abs(), W.max())
if not torch.isfinite(maximum_element).item():
raise ValueError(
f"Unsloth: Merge failed.\n{name} has some elements = infinity."
)
W = W.t().to(dtype)
else:
W = layer.weight
return W, bias
def fast_save_pickle(shard, name):
# Use this if # CPUs is <= 2
print(f"Unsloth: Saving {name}...")
torch.save(
shard,
name,
# HIGHEST_PROTOCOL seems to not work with Pytorch!
# pickle_module = pickle,
# pickle_protocol = pickle.HIGHEST_PROTOCOL,
)
return
@torch.inference_mode
def unsloth_save_model(
model,
tokenizer,
save_directory: Union[str, os.PathLike],
save_method: str = "lora", # ["lora", "merged_16bit", "merged_4bit"]
push_to_hub: bool = False,
token: Optional[Union[str, bool]] = None,
is_main_process: bool = True,
state_dict: Optional[dict] = None,
save_function: Callable = torch.save,
max_shard_size: Union[int, str] = "5GB",
safe_serialization: bool = True,
variant: Optional[str] = None,
save_peft_format: bool = True,
# Push to hub
use_temp_dir: Optional[bool] = None,
commit_message: Optional[str] = "Trained with Unsloth",
private: Optional[bool] = None,
create_pr: bool = False,
revision: str = None,
commit_description: str = "Upload model trained with Unsloth 2x faster",
tags: List[str] = None,
# Our functions
temporary_location: str = "_unsloth_temporary_saved_buffers",
maximum_memory_usage: float = 0.9,
):
if token is None:
token = get_token()
if commit_message is None:
commit_message = ""
if "Unsloth" not in commit_message:
commit_message += " (Trained with Unsloth)"
commit_message = commit_message.lstrip()
if commit_description is None:
commit_description = "Upload model trained with Unsloth 2x faster"
elif "Unsloth 2x faster" not in commit_description:
commit_description += " (Trained with Unsloth 2x faster)"
if save_method == "merged_4bit":
raise RuntimeError(
"Unsloth: Merging into 4bit will cause your model to lose accuracy if you plan\n"
"to merge to GGUF or others later on. I suggest you to do this as a final step\n"
"if you're planning to do multiple saves.\n"
"If you are certain, change `save_method` to `merged_4bit_forced`."
)
elif save_method == "merged_4bit_forced":
save_method = "merged_4bit"
save_pretrained_settings = dict(locals())
for deletion in (
"model",
"tokenizer",
"save_method",
"temporary_location",
"maximum_memory_usage",
):
del save_pretrained_settings[deletion]
# First check for a token!
if push_to_hub:
from huggingface_hub import whoami
try:
username = whoami(token = token)["name"]
except:
raise RuntimeError(
"Unsloth: Please supply a token!\n"
"Go to https://huggingface.co/settings/tokens"
)
assert maximum_memory_usage > 0 and maximum_memory_usage <= 0.95
# Clean memory up first
for _ in range(3):
torch.cuda.empty_cache()
gc.collect()
save_method = save_method.lower().replace(" ", "_")
if (
save_method != "lora"
and save_method != "merged_16bit"
and save_method != "merged_4bit"
):
raise RuntimeError(
"Unsloth: You must select one of 3 options when saving models:\n"
'"lora" ==> This is the fastest and easiet. Just saves LoRA modules.\n'
'"merged_16bit" ==> This merges LoRA weights and saves to float16. Needed for llama.cpp / GGUF.\n'
'"merged_4bit" ==> This merges LoRA weights and saves to 4bit. Useful for DPO / inference.'
)
if save_method == "merged_4bit":
print("Unsloth: Merging 4bit and LoRA weights to 4bit...")
print("This might take 5 minutes...")
# Counteract no LoRA adapters!
if hasattr(model, "merge_and_unload"):
model = model.merge_and_unload()
print("Done.")
if tags is not None:
assert isinstance(tags, (list, tuple))
tags = list(tags) + [
"unsloth",
]
else:
tags = [
"unsloth",
]
save_pretrained_settings["tags"] = tags
if ((save_method == "lora") or (save_method == "merged_4bit")) and push_to_hub:
if token is None:
raise RuntimeError(
"Unsloth: Pushing to HF requires a token. Pass `token = 'hf_....'`\n"
"Go to https://huggingface.co/settings/tokens."
)
if save_method == "lora":
print("Unsloth: Saving LoRA adapters. Please wait...")
elif save_method == "merged_4bit":
print("Unsloth: Saving 4bit Bitsandbytes model. Please wait...")
# Update model tag
_ = upload_to_huggingface(
model,
save_directory,
token,
"finetuned",
"trl",
file_location = None,
old_username = None,
private = private,
)
getattr(model, "original_push_to_hub", model.push_to_hub)(
repo_id = save_directory,
use_temp_dir = use_temp_dir,
commit_message = commit_message,
private = private,
token = token,
max_shard_size = max_shard_size,
create_pr = create_pr,
safe_serialization = safe_serialization,
revision = revision,
commit_description = commit_description,
tags = tags,
)
if tokenizer is not None:
# Set padding side to left for inference
old_padding_side = tokenizer.padding_side
tokenizer.padding_side = "left"
getattr(tokenizer, "original_push_to_hub", tokenizer.push_to_hub)(
repo_id = save_directory,
use_temp_dir = use_temp_dir,
commit_message = commit_message,
private = private,
token = token,
max_shard_size = max_shard_size,
create_pr = create_pr,
safe_serialization = safe_serialization,
revision = revision,
commit_description = commit_description,
tags = tags,
)
# Revert back padding side
tokenizer.padding_side = old_padding_side
if hasattr(model, "config"):
print(
f"Saved {save_method} model to https://huggingface.co/" + save_directory
)
return save_directory, None
# Tokenizer has different saving arguments
tokenizer_save_settings = {
"save_directory": save_pretrained_settings["save_directory"],
"legacy_format": None,
"filename_prefix": None,
"push_to_hub": save_pretrained_settings["push_to_hub"],
"private": save_pretrained_settings["private"],
"token": save_pretrained_settings["token"],
}
# Check if PEFT Model or not - if yes, 3 levels. If not 2 levels.
from peft import PeftModelForCausalLM
if isinstance(model, PeftModelForCausalLM):
internal_model = model.model
else:
internal_model = model
# Cannot be converted properly!
if (
(save_method == "merged_4bit")
or (save_method == "lora")
or (not hasattr(model, "model") or not hasattr(internal_model.model, "layers"))
):
# Do general saving
# Edit save_pretrained_settings
# [TODO] _create_repo has errors due to **kwargs getting accepted
# commit_description does not seem to work?
what_to_delete = (
(
"use_temp_dir",
"commit_message",
"create_pr",
"revision",
"commit_description",
"tags",
)
if save_pretrained_settings["push_to_hub"] is False
else (
"use_temp_dir",
"create_pr",
"revision",
"tags",
"commit_description",
)
)
for deletion in what_to_delete:
del save_pretrained_settings[deletion]
if hasattr(model, "add_model_tags"):
model.add_model_tags(
[
"unsloth",
]
)
# Update model tag
if push_to_hub:
_ = upload_to_huggingface(
model,
save_pretrained_settings["save_directory"],
token,
"finetuned",
"trl",
file_location = None,
old_username = None,
private = private,
)
if tokenizer is not None:
print("Unsloth: Saving tokenizer...", end = "")
# Set padding side to left for inference
old_padding_side = tokenizer.padding_side
tokenizer.padding_side = "left"
tokenizer.save_pretrained(**tokenizer_save_settings)
# Revert back padding side
tokenizer.padding_side = old_padding_side
print(" Done.")
else:
print()
print("Unsloth: Saving model...", end = "")
if save_method != "lora":
print(" This might take 10 minutes for Llama-7b...", end = "")
# [TODO] Is this correct?
if save_method == "lora":
save_pretrained_settings["selected_adapters"] = None
model.save_pretrained(**save_pretrained_settings)
if push_to_hub and hasattr(model, "config"):
print(
"Saved to https://huggingface.co/"
+ save_pretrained_settings["save_directory"]
)
print(" Done.")
return save_directory, None
# If push_to_hub, we must remove the .../ part of a repo
username = None
if push_to_hub and "/" in save_directory:
# +1 solves absolute path issues
new_save_directory = save_directory
username = new_save_directory[: new_save_directory.find("/")]
new_save_directory = new_save_directory[new_save_directory.find("/") + 1 :]
if IS_KAGGLE_ENVIRONMENT:
new_save_directory = os.path.join(
KAGGLE_TMP, new_save_directory[new_save_directory.find("/") + 1 :]
)
logger.warning_once(
"Unsloth: You are pushing to hub in Kaggle environment.\n"
f"To save memory, we shall move {save_directory} to {new_save_directory}"
)
else:
logger.warning_once(
f"Unsloth: You are pushing to hub, but you passed your HF username = {username}.\n"
f"We shall truncate {save_directory} to {new_save_directory}"
)
save_pretrained_settings["save_directory"] = new_save_directory
tokenizer_save_settings["save_directory"] = new_save_directory
save_directory = new_save_directory
print("Unsloth: Merging 4bit and LoRA weights to 16bit...")
# Determine max RAM usage minus sharding
max_ram = psutil.virtual_memory().available
sharded_ram_usage = 5 * 1024 * 1024 * 1024
if type(max_shard_size) is str:
gb_found = re.match(
r"([0-9]{1,})[\s]{0,}GB", max_shard_size, flags = re.IGNORECASE
)
mb_found = re.match(
r"([0-9]{1,})[\s]{0,}MB", max_shard_size, flags = re.IGNORECASE
)
if gb_found:
sharded_ram_usage = int(gb_found.group(1)) * 1024 * 1024 * 1024
elif mb_found:
sharded_ram_usage = int(mb_found.group(1)) * 1024 * 1024
elif type(max_shard_size) is int:
sharded_ram_usage = max_shard_size
# Switch to our fast saving modules if it's a slow PC!
n_cpus = psutil.cpu_count(logical = False)
if n_cpus is None:
n_cpus = psutil.cpu_count()
if n_cpus is None:
n_cpus = 1
if safe_serialization is None:
safe_serialization = True
save_pretrained_settings["safe_serialization"] = safe_serialization
elif safe_serialization and (n_cpus <= 2):
logger.warning_once(
f"Unsloth: You have {n_cpus} CPUs. Using `safe_serialization` is 10x slower.\n"
f"We shall switch to Pytorch saving, which might take 3 minutes and not 30 minutes.\n"
f"To force `safe_serialization`, set it to `None` instead.",
)
safe_serialization = False
save_function = fast_save_pickle
save_pretrained_settings["safe_serialization"] = safe_serialization
save_pretrained_settings["save_function"] = save_function
# Only safe_serialization uses more RAM
if safe_serialization:
max_ram -= sharded_ram_usage
else:
max_ram -= sharded_ram_usage * 0.25 # Uses much less
max_ram = int(max(0, max_ram) * maximum_memory_usage)
print(
f"Unsloth: Will use up to "
f"{round(max_ram/1024/1024/1024, 2)} out of "
f"{round(psutil.virtual_memory().total/1024/1024/1024, 2)} RAM for saving."
)
# Move temporary_location to /tmp in Kaggle
if IS_KAGGLE_ENVIRONMENT:
temporary_location = os.path.join(KAGGLE_TMP, temporary_location)
# Max directory for disk saving
if not os.path.exists(temporary_location):
os.makedirs(temporary_location)
# Check if Kaggle or Colab, since only 20GB of Disk space allowed.
if IS_KAGGLE_ENVIRONMENT or IS_COLAB_ENVIRONMENT:
# We free up 4GB of space
logger.warning_once(
"Unsloth: Kaggle/Colab has limited disk space. We need to delete the downloaded\n"
"model which will save 4-16GB of disk space, allowing you to save on Kaggle/Colab."
)
_free_cached_model(internal_model)
# HF also uses a OrderedDict
from collections import OrderedDict
state_dict = OrderedDict()
torch_dtype = dtype_from_config(internal_model.config)
if type(torch_dtype) is str:
if torch_dtype == "float16":
torch_dtype = torch.float16
elif torch_dtype == "bfloat16":
torch_dtype = torch.bfloat16
# Check modules to save float32 dtype
state_dict["model.embed_tokens.weight"] = (
internal_model.model.embed_tokens.weight.data.to(torch_dtype)
)
max_vram = int(
torch.cuda.get_device_properties(0).total_memory * maximum_memory_usage
)
print("Unsloth: Saving model... This might take 5 minutes ...")
from tqdm import tqdm as ProgressBar
for j, layer in enumerate(ProgressBar(internal_model.model.layers)):
for item in LLAMA_WEIGHTS:
proj = eval(f"layer.{item}")
name = f"model.layers.{j}.{item}.weight"
W, bias = _merge_lora(proj, name)
# Bias term
if bias is not None:
state_dict[f"model.layers.{j}.{item}.bias"] = bias
if (torch.cuda.memory_allocated() + W.nbytes) < max_vram:
# Save to GPU memory
state_dict[name] = W
# [TODO] Saving to RAM seems to leak memory???
# elif (max_ram - W.nbytes) > 0:
# # Save to CPU memory
# logger.warning_once(f"We will save to RAM and not VRAM now.")
# state_dict[name] = W.to("cpu", non_blocking = True, copy = True)
# max_ram = max(max_ram - W.nbytes, 0)
else:
# Save to Disk
logger.warning_once("\nWe will save to Disk and not RAM now.")
filename = os.path.join(temporary_location, f"{name}.pt")
torch.save(
W,
filename,
pickle_module = pickle,
pickle_protocol = pickle.HIGHEST_PROTOCOL,
)
# weights_only = True weirdly fails?
state_dict[name] = torch.load(
filename, map_location = "cpu", mmap = True, weights_only = False
)
for item in LLAMA_LAYERNORMS:
try:
# Skip for Gemma 2
state_dict[f"model.layers.{j}.{item}.weight"] = eval(
f"layer.{item}.weight.data"
)
except:
continue
state_dict["model.norm.weight"] = internal_model.model.norm.weight.data
# Check for modules_to_save float32 dtype
# Check for tied weights
if (
internal_model.model.embed_tokens.weight.data_ptr()
!= internal_model.lm_head.weight.data_ptr()
):
state_dict["lm_head.weight"] = internal_model.lm_head.weight.data.to(
torch_dtype
)
# All tensors MUST be type torch.Tensor and not torch.nn.parameter.Parameter
for key, value in state_dict.items():
if hasattr(value, "data"):
state_dict[key] = value = value.data
if type(value) is not torch.Tensor:
logger.warning_once(f"Unsloth: {key} is not a Tensor but a {type(value)}.")
# Edit save_pretrained_settings
# [TODO] _create_repo has errors due to **kwargs getting accepted
save_pretrained_settings["state_dict"] = state_dict
# commit_description does not seem to work?
what_to_delete = (
(
"use_temp_dir",
"commit_message",
"create_pr",
"revision",
"commit_description",
"tags",
)
if not push_to_hub
else (
"use_temp_dir",
"create_pr",
"revision",
"tags",
"commit_description",
)
)
for deletion in what_to_delete:
del save_pretrained_settings[deletion]
if hasattr(model, "add_model_tags"):
model.add_model_tags(
[
"unsloth",
]
)
# Update model tag
if push_to_hub:
_ = upload_to_huggingface(
model,
save_pretrained_settings["save_directory"],
token,
"finetuned",
"trl",
file_location = None,
old_username = username,
private = private,
)
# First check if we're pushing to an organization!
save_directory = save_pretrained_settings["save_directory"]
if save_pretrained_settings["push_to_hub"]:
new_save_directory, new_username = _determine_username(
save_directory, username, token
)
if token is not None:
from huggingface_hub import whoami
actual_username = whoami(token = token)["name"]
else:
actual_username = username
# Check if pushing to an organization
if save_pretrained_settings["push_to_hub"] and (username != actual_username):
print(f"Unsloth: Saving to organization with address {new_save_directory}")
# We upload everything at the end!
tokenizer_save_settings["push_to_hub"] = False
tokenizer_save_settings["save_directory"] = new_save_directory
# Save tokenizer
if tokenizer is not None:
print("Unsloth: Saving tokenizer...", end = "")
# Set padding side to left for inference
old_padding_side = tokenizer.padding_side
tokenizer.padding_side = "left"
tokenizer.save_pretrained(**tokenizer_save_settings)
# Revert back padding side
tokenizer.padding_side = old_padding_side
print(" Done.")
else:
print()
# Since merged, edit quantization_config
old_config = model.config
new_config = model.config.to_dict()
if "quantization_config" in new_config:
del new_config["quantization_config"]
original_model = model
new_config = type(model.config).from_dict(new_config)
while hasattr(original_model, "model"):
original_model = original_model.model
original_model.config = new_config
model.config = new_config
# Save!
# [TODO] --> is this correct?
# save_pretrained_settings["selected_adapters"] = None
# Check if pushing to an organization
if save_pretrained_settings["push_to_hub"] and (username != actual_username):
print(f"Unsloth: Saving to organization with address {new_save_directory}")
# Pushing to organization!
# Sadly .save_pretrained doesn't work :(
# We first save it via .save_pretrained, then upload manually!
save_pretrained_settings["save_directory"] = new_save_directory
save_pretrained_settings["push_to_hub"] = False
internal_model.save_pretrained(**save_pretrained_settings)
# Now manually go through each file and upload them manually!
filenames = os.listdir(new_save_directory)
hf_api = HfApi(token = save_pretrained_settings["token"])
print("Unsloth: Uploading all files... Please wait...")
hf_api.upload_folder(
folder_path = new_save_directory,
path_in_repo = ".",
repo_id = new_save_directory,
repo_type = "model",
commit_message = "(Trained with Unsloth)",
ignore_patterns = "*.md",
)
else:
internal_model.save_pretrained(**save_pretrained_settings)
# Revert config back
original_model = model
while hasattr(original_model, "model"):
original_model = original_model.model
original_model.config = old_config
model.config = old_config
print("Done.")
if push_to_hub and hasattr(model, "config"):
print(
f"Saved merged model to https://huggingface.co/{username}/{save_directory.lstrip('/').split('/')[-1]}"
)
save_pretrained_settings["state_dict"] = None
for j, (key, value) in enumerate(state_dict.items()):
state_dict[key] = None
if j % 10 == 0:
torch.cuda.empty_cache()
gc.collect()
state_dict = None
del state_dict
torch.cuda.empty_cache()
gc.collect()
# Remove temporary location
import shutil
shutil.rmtree(temporary_location, ignore_errors = True)
for _ in range(3):
torch.cuda.empty_cache()
gc.collect()
return save_directory, username
def install_llama_cpp_clone_non_blocking():
full_command = [
"git",
"clone",
"--recursive",
"https://github.com/ggerganov/llama.cpp",
]
run_installer = subprocess.Popen(
full_command, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT
)
return run_installer
def install_llama_cpp_make_non_blocking():
# https://github.com/ggerganov/llama.cpp/issues/7062
# Weirdly GPU conversion for GGUF breaks??
# env = { **os.environ, "LLAMA_CUDA": "1", }
# Force make clean
check = os.system("make clean -C llama.cpp")
IS_CMAKE = False
if check == 0:
# Uses old MAKE
n_jobs = max(int((psutil.cpu_count() or 1) * 1.5), 1)
full_command = ["make", "all", "-j" + str(n_jobs), "-C", "llama.cpp"]
IS_CMAKE = False
else:
# Uses new CMAKE
n_jobs = max(int(psutil.cpu_count() or 1), 1) # Use less CPUs since 1.5x faster
check = os.system(
f"cmake llama.cpp -B llama.cpp/build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=OFF {CURL_FLAG}"
)
if check != 0:
raise RuntimeError(
f"*** Unsloth: Failed compiling llama.cpp using os.system(...) with error {check}. Please report this ASAP!"
)
# f"cmake --build llama.cpp/build --config Release -j{psutil.cpu_count()*2} --clean-first --target {' '.join(LLAMA_CPP_TARGETS)}",
full_command = [
"cmake",
"--build",
"llama.cpp/build",
"--config",
"Release",
"-j" + str(n_jobs),
"--clean-first",
"--target",
] + LLAMA_CPP_TARGETS
IS_CMAKE = True
# https://github.com/ggerganov/llama.cpp/issues/7062
# Weirdly GPU conversion for GGUF breaks??
# run_installer = subprocess.Popen(full_command, env = env, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT)
run_installer = subprocess.Popen(
full_command, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT
)
return run_installer, IS_CMAKE
def install_python_non_blocking(packages = []):
full_command = ["pip", "install"] + packages
run_installer = subprocess.Popen(
full_command, stdout = subprocess.DEVNULL, stderr = subprocess.STDOUT
)
return run_installer
def try_execute(commands, force_complete = False):
for command in commands:
with subprocess.Popen(
command,
shell = True,
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | true |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/qwen2.py | unsloth/models/qwen2.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .llama import *
from .llama import (
LlamaRotaryEmbedding,
LlamaLinearScalingRotaryEmbedding,
)
from transformers.models.qwen2.modeling_qwen2 import (
Qwen2Attention,
Qwen2DecoderLayer,
Qwen2Model,
Qwen2ForCausalLM,
)
# For Pytorch 2.1.1
try:
from transformers.models.qwen2.modeling_qwen2 import (
Qwen2SdpaAttention,
Qwen2FlashAttention2,
)
except:
Qwen2SdpaAttention = Qwen2Attention
Qwen2FlashAttention2 = Qwen2Attention
class FastQwen2Model(FastLlamaModel):
@staticmethod
def pre_patch():
init_name, function = patch_linear_scaling(
model_name = "qwen2",
rope_module = LlamaRotaryEmbedding,
scaled_rope_module = LlamaLinearScalingRotaryEmbedding,
attention_module = Qwen2Attention,
)
if init_name is not None:
exec(function, globals())
Qwen2Attention.__init__ = eval(init_name)
Qwen2Attention.forward = LlamaAttention_fast_forward
Qwen2SdpaAttention.forward = LlamaAttention_fast_forward
Qwen2FlashAttention2.forward = LlamaAttention_fast_forward
Qwen2DecoderLayer.forward = LlamaDecoderLayer_fast_forward
Qwen2Model.forward = LlamaModel_fast_forward
Qwen2ForCausalLM.forward = CausalLM_fast_forward(
LlamaModel_fast_forward_inference
)
PeftModelForCausalLM.forward = PeftModel_fast_forward
fix_prepare_inputs_for_generation(Qwen2ForCausalLM)
# Solves https://github.com/unslothai/unsloth/issues/168
# Static KV Cache was introduced in 4.38.0, causing training to be much slower.
# Inference can now be CUDAGraphed, but we shall retain the old rotary embeddings.
# https://github.com/huggingface/transformers/pull/27931
# https://github.com/huggingface/transformers/blob/v4.37.2/src/transformers/models/llama/modeling_llama.py
import transformers.models.qwen2.modeling_qwen2
transformers.models.qwen2.modeling_qwen2.Qwen2RotaryEmbedding = (
LlamaRotaryEmbedding
)
return
@staticmethod
def from_pretrained(
model_name = "Qwen/Qwen2-7B",
max_seq_length = 4096,
dtype = None,
load_in_4bit = True,
token = None,
device_map = "sequential",
rope_scaling = None, # Qwen2 does not support RoPE scaling
fix_tokenizer = True,
model_patcher = None,
tokenizer_name = None,
trust_remote_code = False,
**kwargs,
):
return FastLlamaModel.from_pretrained(
model_name = model_name,
max_seq_length = max_seq_length,
dtype = dtype,
load_in_4bit = load_in_4bit,
token = token,
device_map = device_map,
rope_scaling = rope_scaling,
fix_tokenizer = fix_tokenizer,
model_patcher = FastQwen2Model,
tokenizer_name = tokenizer_name,
trust_remote_code = trust_remote_code,
**kwargs,
)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/loader_utils.py | unsloth/models/loader_utils.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..device_type import DEVICE_TYPE_TORCH
import importlib
import os
import torch
import re
import tempfile
from typing import Union
from .mapper import (
INT_TO_FLOAT_MAPPER,
FLOAT_TO_INT_MAPPER,
MAP_TO_UNSLOTH_16bit,
FLOAT_TO_FP8_BLOCK_MAPPER,
FLOAT_TO_FP8_ROW_MAPPER,
)
# https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading!
from packaging.version import Version
from transformers import __version__ as transformers_version
from unsloth.models._utils import TorchAOConfig
from unsloth_zoo.utils import Version
import gc
transformers_version = Version(transformers_version)
SUPPORTS_FOURBIT = transformers_version >= Version("4.37")
LOCAL_RANK_KEYS = ("LOCAL_RANK", "RANK")
WORLD_SIZE_KEYS = ("WORLD_SIZE",)
BAD_MAPPINGS = {
"unsloth/Qwen3-32B-unsloth-bnb-4bit".lower(): "unsloth/Qwen3-32B-bnb-4bit".lower(), # 32B dynamic quant is way too big
"unsloth/Qwen3-30B-A3B-unsloth-bnb-4bit".lower(): "unsloth/Qwen3-30B-A3B".lower(), # HF loads MoEs too slowly
"unsloth/Qwen3-30B-A3B-bnb-4bit".lower(): "unsloth/Qwen3-30B-A3B".lower(), # We rather do it on the fly
"unsloth/Qwen3-30B-A3B-Base-unsloth-bnb-4bit".lower(): "unsloth/Qwen3-30B-A3B-Base".lower(), # HF loads MoEs too slowly
"unsloth/Qwen3-30B-A3B-Base-bnb-4bit".lower(): "unsloth/Qwen3-30B-A3B-Base".lower(), # We rather do it on the fly
}
def _get_env_int(keys):
for key in keys:
value = os.environ.get(key)
if value is None:
continue
try:
return int(value)
except ValueError:
continue
return None
def _infer_distributed_ranks():
if torch.distributed.is_available() and torch.distributed.is_initialized():
try:
return torch.distributed.get_rank(), torch.distributed.get_world_size()
except Exception:
pass
return _get_env_int(LOCAL_RANK_KEYS), _get_env_int(WORLD_SIZE_KEYS)
def is_distributed():
rank, world_size = _infer_distributed_ranks()
return (world_size or 1) > 1 or (rank is not None and rank > 0)
def prepare_device_map():
rank, world_size = _infer_distributed_ranks()
distributed = (world_size or 1) > 1 or (rank is not None and rank > 0)
if not distributed:
return None, False
local_rank = 0 if rank is None else rank
device_map = {"": f"{DEVICE_TYPE_TORCH}:{local_rank}"}
try:
if DEVICE_TYPE_TORCH == "cuda":
torch.cuda.set_device(local_rank)
elif DEVICE_TYPE_TORCH == "xpu" and hasattr(torch, "xpu"):
torch.xpu.set_device(local_rank)
except Exception:
pass
return device_map, True
def __get_model_name(
model_name,
load_in_4bit = True,
INT_TO_FLOAT_MAPPER = None,
FLOAT_TO_INT_MAPPER = None,
MAP_TO_UNSLOTH_16bit = None,
load_in_fp8 = False,
FLOAT_TO_FP8_BLOCK_MAPPER = None,
FLOAT_TO_FP8_ROW_MAPPER = None,
):
model_name = str(model_name)
lower_model_name = model_name.lower()
assert load_in_fp8 in (True, False, "block")
if load_in_fp8 != False:
if load_in_fp8 == True and (os.environ.get("UNSLOTH_HAS_FBGEMM", "0") == "1"):
if lower_model_name in FLOAT_TO_FP8_ROW_MAPPER:
# Faster row scaling only works if FBGEMM works!
return FLOAT_TO_FP8_ROW_MAPPER[lower_model_name]
elif lower_model_name in FLOAT_TO_FP8_BLOCK_MAPPER:
# Otherwise we use the slower blockwise type
return FLOAT_TO_FP8_BLOCK_MAPPER[lower_model_name]
else:
if lower_model_name in FLOAT_TO_FP8_BLOCK_MAPPER:
return FLOAT_TO_FP8_BLOCK_MAPPER[lower_model_name]
return None
elif not SUPPORTS_FOURBIT and lower_model_name in INT_TO_FLOAT_MAPPER:
model_name = INT_TO_FLOAT_MAPPER[lower_model_name]
print(
f"Unsloth: Your transformers version of {transformers_version} does not support native "
f"4bit loading.\nThe minimum required version is 4.37.\n"
f'Try `pip install --upgrade "transformers>=4.37"`\n'
f"to obtain the latest transformers build, then restart this session.\n"
f"For now, we shall load `{model_name}` instead (still 4bit, just slower downloading)."
)
return model_name
elif not load_in_4bit and lower_model_name in INT_TO_FLOAT_MAPPER:
new_model_name = INT_TO_FLOAT_MAPPER[lower_model_name]
# logger.warning_once(
# f"Unsloth: You passed in `{model_name}` which is a 4bit model, yet you set\n"\
# f"`load_in_4bit = False`. We shall load `{new_model_name}` instead."
# )
return new_model_name
elif not load_in_4bit and lower_model_name in MAP_TO_UNSLOTH_16bit:
new_model_name = MAP_TO_UNSLOTH_16bit[lower_model_name]
return new_model_name
elif load_in_4bit and SUPPORTS_FOURBIT and lower_model_name in FLOAT_TO_INT_MAPPER:
# Support returning original full -bnb-4bit name if specified specifically
# since we'll map it to the dynamic version instead
if lower_model_name.endswith("-bnb-4bit"):
return lower_model_name
new_model_name = FLOAT_TO_INT_MAPPER[lower_model_name]
# logger.warning_once(
# f"Unsloth: You passed in `{model_name}` and `load_in_4bit = True`.\n"\
# f"We shall load `{new_model_name}` for 4x faster loading."
# )
return new_model_name
return None
def _get_new_mapper():
try:
import requests
new_mapper = "https://raw.githubusercontent.com/unslothai/unsloth/main/unsloth/models/mapper.py"
with requests.get(new_mapper, timeout = 3) as new_mapper:
new_mapper = new_mapper.text
new_mapper = new_mapper[new_mapper.find("__INT_TO_FLOAT_MAPPER") :]
new_mapper = (
new_mapper.replace("INT_TO_FLOAT_MAPPER", "NEW_INT_TO_FLOAT_MAPPER")
.replace("FLOAT_TO_INT_MAPPER", "NEW_FLOAT_TO_INT_MAPPER")
.replace("MAP_TO_UNSLOTH_16bit", "NEW_MAP_TO_UNSLOTH_16bit")
)
exec(new_mapper, globals())
return (
NEW_INT_TO_FLOAT_MAPPER,
NEW_FLOAT_TO_INT_MAPPER,
NEW_MAP_TO_UNSLOTH_16bit,
)
except:
return {}, {}, {}
def get_model_name(model_name, load_in_4bit = True, load_in_fp8 = False):
assert load_in_fp8 in (True, False, "block")
new_model_name = __get_model_name(
model_name = model_name,
load_in_4bit = load_in_4bit,
INT_TO_FLOAT_MAPPER = INT_TO_FLOAT_MAPPER,
FLOAT_TO_INT_MAPPER = FLOAT_TO_INT_MAPPER,
MAP_TO_UNSLOTH_16bit = MAP_TO_UNSLOTH_16bit,
load_in_fp8 = load_in_fp8,
FLOAT_TO_FP8_BLOCK_MAPPER = FLOAT_TO_FP8_BLOCK_MAPPER,
FLOAT_TO_FP8_ROW_MAPPER = FLOAT_TO_FP8_ROW_MAPPER,
)
# In the rare case, we convert bad model names to other names
# For eg too large dynamic quants or MoEs
if (
new_model_name is not None
and type(new_model_name) is str
and new_model_name.lower() in BAD_MAPPINGS
):
new_model_name = BAD_MAPPINGS[new_model_name.lower()]
if (
new_model_name is None
and model_name.count("/") == 1
and model_name[0].isalnum()
):
# Try checking if a new Unsloth version allows it!
NEW_INT_TO_FLOAT_MAPPER, NEW_FLOAT_TO_INT_MAPPER, NEW_MAP_TO_UNSLOTH_16bit = (
_get_new_mapper()
)
upgraded_model_name = __get_model_name(
model_name = model_name,
load_in_4bit = load_in_4bit,
INT_TO_FLOAT_MAPPER = NEW_INT_TO_FLOAT_MAPPER,
FLOAT_TO_INT_MAPPER = NEW_FLOAT_TO_INT_MAPPER,
MAP_TO_UNSLOTH_16bit = NEW_MAP_TO_UNSLOTH_16bit,
load_in_fp8 = load_in_fp8,
FLOAT_TO_FP8_BLOCK_MAPPER = FLOAT_TO_FP8_BLOCK_MAPPER,
FLOAT_TO_FP8_ROW_MAPPER = FLOAT_TO_FP8_ROW_MAPPER,
)
if upgraded_model_name is not None:
raise NotImplementedError(
f"Unsloth: {model_name} is not supported in your current Unsloth version! Please update Unsloth via:\n\n"
"pip uninstall unsloth unsloth_zoo -y\n"
'pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n'
'pip install --upgrade --no-cache-dir "git+https://github.com/unslothai/unsloth-zoo.git"\n'
)
if load_in_fp8 != False:
# Handle on the fly TorchAO FP8 quantization
return new_model_name
return new_model_name if new_model_name is not None else model_name
def _get_torchao_fp8_config(fp8_mode: str):
"""
Return a `torchao.quantization.Float8DynamicActivationFloat8WeightConfig`
to be used for `load_in_fp8=True`.
"""
from torchao.quantization import (
Float8DynamicActivationFloat8WeightConfig,
PerBlock,
PerRow,
)
if fp8_mode == "row":
granularity = PerRow()
elif fp8_mode == "block":
granularity = (PerBlock([1, 128]), PerBlock([128, 128]))
else:
raise ValueError("Unsloth: `load_in_fp8` supports only 'row' or 'block'")
return Float8DynamicActivationFloat8WeightConfig(
granularity = granularity,
activation_value_lb = 1e-12,
)
def _offline_quantize_to_fp8(model_name: str, fp8_mode: str) -> str:
"""
Quantizes the model to fp8 using torchao and saving the quantized model to a
temporary location. Return the path to the quantized model.
Note: Once on-the-fly quantization is added in vllm in
https://github.com/vllm-project/vllm/pull/26327, we should
dynamically quantize the model there instead:
llm = LLM(
...
hf_overrides={"quantization_config_file": "torchao_config.json"},
)
"""
temp_dir = tempfile.gettempdir()
new_model_name = model_name.split("/")[-1] + "-fp8-" + fp8_mode
new_model_name = os.path.join(temp_dir, new_model_name)
print(
f"Unsloth: Quantizing '{model_name}' to fp8, using model_name='{new_model_name}' instead"
)
if not os.path.isdir(new_model_name):
from transformers import (
AutoModelForCausalLM,
AutoModelForImageTextToText,
AutoTokenizer,
AutoProcessor,
TorchAoConfig,
AutoConfig,
)
qconfig = _get_torchao_fp8_config(fp8_mode)
qconfig = TorchAoConfig(qconfig)
config = AutoConfig.from_pretrained(model_name)
is_vlm = any(
x.endswith(("ForConditionalGeneration", "ForVisionText2Text"))
for x in config.architectures
)
is_vlm = is_vlm or hasattr(config, "vision_config")
auto_model = AutoModelForImageTextToText if is_vlm else AutoModelForCausalLM
auto_processor = AutoProcessor if is_vlm else AutoTokenizer
model = auto_model.from_pretrained(
model_name,
torch_dtype = "auto",
device_map = "auto",
quantization_config = qconfig,
)
tokenizer = auto_processor.from_pretrained(model_name)
model.save_pretrained(new_model_name, safe_serialization = False)
del model
for _ in range(2):
torch.cuda.empty_cache()
gc.collect()
tokenizer.save_pretrained(new_model_name)
return new_model_name
def _tag_model_with_fp8_torchao_config(model: torch.nn.Module, fp8_mode: str):
"""
Tag a model with a `TorchAOConfig` so downstream callers will know what to do with it.
"""
try:
base_config = _get_torchao_fp8_config(fp8_mode)
model.torchao_config = TorchAOConfig(
qat_scheme = None,
base_config_and_filter_fns = [(base_config, None)],
)
except:
pass
def _get_fp8_mode_and_check_settings(
load_in_fp8: Union[bool, str],
fast_inference: bool,
full_finetuning: bool,
load_in_4bit: bool,
load_in_8bit: bool,
load_in_16bit: bool,
use_exact_model_name: bool,
) -> str:
"""
Assuming `load_in_fp8` is enabled, raise appropriate errors on incompatible settings
and environment. Currently this feature requires:
1. H100 GPUs or after
2. torchao 0.15.0+ (or nightly)
3. torch 2.9.0+
4. If fbgemm_gpu_genai is installed, require 1.4.1+
Returns the fp8 mode, one of "row" or "block".
"""
assert load_in_fp8 is not False
if load_in_fp8 is True:
fp8_mode = "row" # default
else:
fp8_mode = load_in_fp8
# Check user settings
if fp8_mode not in ["row", "block"]:
raise ValueError(
f"Unsloth: `load_in_fp8` can only be 'row' or 'block', got '{fp8_mode}'"
)
if not fast_inference:
raise ValueError(
"Unsloth: `load_in_fp8` is only supported for `fast_inference` for now"
)
if full_finetuning:
raise ValueError(
"Unsloth: `load_in_fp8` is not compatible with full finetuning"
)
if load_in_4bit or load_in_8bit or load_in_16bit:
raise ValueError(
"Unsloth: `load_in_fp8` is not compatible with `load_in_4bit`, `load_in_8bit` or `load_in_16bit`",
)
if use_exact_model_name:
raise ValueError("Unsloth: `load_in_fp8` requires `use_exact_model_name=False`")
# Check if this is Hopper or above
if not (
torch.cuda.is_available()
and torch.version.cuda
and torch.cuda.get_device_capability() >= (9, 0)
):
raise ValueError(
"Unsloth: On the fly `load_in_fp8` requires H100 GPUs or after. Try `unsloth/Qwen3-8B` instead."
)
# Check if torch >= 2.9.0
if Version(torch.__version__) < Version("2.9.0"):
raise ValueError(
"Unsloth: On the fly `load_in_fp8` requires torch 2.9.0+. Try `unsloth/Qwen3-8B` instead."
)
# Check if torchao has this PR: https://github.com/pytorch/ao/pull/3158,
# which will be released in 0.15.0.
if importlib.util.find_spec("torchao") is None:
raise ValueError(
"Unsloth: Please install torchao for on the fly float8 to work! Try `unsloth/Qwen3-8B` instead."
)
import torchao
error_message = (
"Unsloth: `load_in_fp8` requires torchao 0.15.0+ (or nightly).\n"
f"You have torchao version={torchao.__version__}\n"
"Use `pip install --upgrade --force-reinstall torchao`"
)
if Version(torchao.__version__) < Version("0.15.0"):
raise ValueError(error_message)
# If fbgemm_gpu_genai is installed, check if it's >= 1.4.1
if (
importlib.util.find_spec("fbgemm_gpu") is not None
and importlib.util.find_spec("fbgemm_gpu.experimental") is not None
):
import fbgemm_gpu.experimental.gen_ai
if Version(fbgemm_gpu.__version__) < Version("1.4.1"):
raise ValueError(
"Unsloth: On the fly `load_in_fp8` is only compatible with fbgemm_gpu_genai 1.4.1+. Try `unsloth/Qwen3-8B` instead."
)
return fp8_mode
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/gemma.py | unsloth/models/gemma.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .llama import *
from ._utils import __version__
from unsloth_zoo.utils import _get_dtype
from unsloth_zoo.hf_utils import dtype_from_config
from ..utils.packing import (
build_sdpa_packed_attention_mask,
build_xformers_block_causal_mask,
get_packed_info_from_kwargs,
)
import math
try:
from transformers.models.gemma.modeling_gemma import (
GemmaAttention,
GemmaDecoderLayer,
GemmaModel,
GemmaForCausalLM,
GemmaRotaryEmbedding,
apply_rotary_pos_emb,
repeat_kv,
)
except:
from packaging.version import Version
transformers_version = Version(transformers_version)
if not transformers_version >= Version("4.38"):
raise ImportError(
f"Unsloth: Your transformers version of {transformers_version} does not support Gemma.\n"
f"The minimum required version is 4.38.\n"
f'Try `pip install --upgrade "transformers>=4.38"`\n'
f"to obtain the latest transformers build, then restart this session."
)
from transformers.modeling_attn_mask_utils import (
_prepare_4d_causal_attention_mask_for_sdpa,
)
# For Pytorch 2.1.1
try:
from transformers.models.gemma.modeling_gemma import (
GemmaSdpaAttention,
GemmaFlashAttention2,
)
except:
GemmaSdpaAttention = GemmaAttention
GemmaFlashAttention2 = GemmaAttention
torch_nn_functional_gelu = torch.nn.functional.gelu
def fast_geglu_inference(self, X):
# gate = self.gate_proj(X)
# up = self.up_proj(X)
bsz, _, hd = X.shape
# mlp_size = self.config.intermediate_size
# temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda:0")
gate = fast_linear_forward(self.gate_proj, X) # , out = temp[0])
up = fast_linear_forward(self.up_proj, X) # , out = temp[1])
gate = torch_nn_functional_gelu(gate, approximate = "tanh")
gate *= up
# X = self.down_proj(gate)
down = fast_linear_forward(self.down_proj, gate, out = up[:, :, :hd])
return down
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L590
def GemmaDecoderLayer_fast_forward(
self,
hidden_states: torch.Tensor,
causal_mask: Optional[BlockDiagonalCausalMask] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
padding_mask: Optional[torch.LongTensor] = None,
*args,
**kwargs,
):
if use_cache and hasattr(
self, "_flag_for_generation"
): # past_key_value is not None:
out_weight = torch.empty(
self.input_layernorm.weight.shape, dtype = torch.float32, device = "cuda:0"
)
# Self Attention
residual = hidden_states
hidden_states = fast_rms_layernorm_inference_gemma(
self.input_layernorm, hidden_states, out_weight
)
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states = hidden_states,
causal_mask = causal_mask,
attention_mask = attention_mask,
position_ids = position_ids,
past_key_value = past_key_value,
output_attentions = output_attentions,
use_cache = use_cache,
padding_mask = padding_mask,
**kwargs,
)
hidden_states += residual
# Fully Connected
residual = hidden_states
hidden_states = fast_rms_layernorm_inference_gemma(
self.post_attention_layernorm, hidden_states, out_weight
)
hidden_states = fast_geglu_inference(self.mlp, hidden_states)
hidden_states += residual
else:
residual = hidden_states
hidden_states = fast_rms_layernorm(
self.input_layernorm, hidden_states, gemma = True
)
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states = hidden_states,
causal_mask = causal_mask,
attention_mask = attention_mask,
position_ids = position_ids,
past_key_value = past_key_value,
output_attentions = output_attentions,
use_cache = use_cache,
padding_mask = padding_mask,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = fast_rms_layernorm(
self.post_attention_layernorm, hidden_states, gemma = True
)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
from math import sqrt as math_sqrt
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825
# @torch.inference_mode
def GemmaModel_fast_forward_inference(
self,
input_ids,
past_key_values,
position_ids,
attention_mask = None,
**kwargs,
):
out_weights = tuple(
torch.empty_like(
self.model.layers[0].input_layernorm.weight,
dtype = torch.float32,
device = torch.device(x),
)
for x in range(DEVICE_COUNT)
)
input_ids = input_ids[:, : self.max_seq_length]
hidden_states = self.model.embed_tokens(input_ids)
hidden_states = hidden_states.to(_get_dtype(dtype_from_config(self.config)))
# 3072**0.5 = 55.5000 in bfloat16, whilst 55.4256 in float32
# 2048**0.5 = 45.2500 in bfloat16, whilst 45.2548 in float32
hidden_states *= torch.tensor(
math_sqrt(self.config.hidden_size), dtype = hidden_states.dtype
)
bsz, q_len, hd = hidden_states.shape
seq_len = past_key_values[0][0].shape[-2]
if bsz != 1:
attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
attention_mask,
(bsz, q_len),
hidden_states,
seq_len,
)
next_decoder_cache = []
for idx, decoder_layer in enumerate(self.model.layers):
device_index = getattr(decoder_layer, "_per_layer_device_index", 0)
hidden_states, position_ids = move_to_device(
device_index, hidden_states, position_ids
)
residual = hidden_states
hidden_states = fast_rms_layernorm_inference_gemma(
decoder_layer.input_layernorm, hidden_states, out_weights[device_index]
)
hidden_states, present_key_value = LlamaAttention_fast_forward_inference(
decoder_layer.self_attn,
hidden_states = hidden_states,
past_key_value = past_key_values[idx],
position_ids = position_ids,
attention_mask = attention_mask,
do_prefill = not hasattr(decoder_layer.self_attn, "paged_attention"),
)
hidden_states += residual
residual = hidden_states
hidden_states = fast_rms_layernorm_inference_gemma(
decoder_layer.post_attention_layernorm,
hidden_states,
out_weights[device_index],
)
hidden_states = fast_geglu_inference(decoder_layer.mlp, hidden_states)
hidden_states += residual
next_decoder_cache.append(present_key_value)
hidden_states = fast_rms_layernorm_inference_gemma(
self.model.norm, hidden_states, out_weights[device_index]
)
return BaseModelOutputWithPast(
last_hidden_state = hidden_states,
past_key_values = next_decoder_cache,
hidden_states = [],
attentions = [],
)
# Follows line by line https://github.com/google-deepmind/gemma/blob/main/gemma/positional_embeddings.py#L45
# Formulates cos and sin differently from Llama!
class GemmaFixedRotaryEmbedding(torch.nn.Module):
# Fixes https://github.com/huggingface/transformers/pull/28837
# https://github.com/microsoft/DeepSpeed/issues/4932
# The precision of RoPE buffers is not correct, so we cast to int64.
def __init__(
self,
dim = None,
max_position_embeddings = 2048,
base = 10000,
device = None,
config = None, # [TODO] Hack to pass in config - need to remove later
):
super().__init__()
if config is not None:
# [TODO] Hack to pass in config - need to remove later
base = config.rope_theta
partial_rotary_factor = (
config.partial_rotary_factor
if hasattr(config, "partial_rotary_factor")
else 1.0
)
dim = getattr(config, "head_dim", None)
if dim is None:
dim = int((config.hidden_size // config.num_attention_heads))
device = "cuda"
max_position_embeddings = config.max_position_embeddings
self.dim = dim
self.max_position_embeddings = max_position_embeddings
self.base = base
# Dynamic RoPE we first set it to a max of 4 * 8192 tokens then we iteratively grow this
self.current_rope_size = min(4 * 8192, self.max_position_embeddings)
self.multi_gpu_cos_cached = [None] * DEVICE_COUNT
self.multi_gpu_sin_cached = [None] * DEVICE_COUNT
# Build here to make `torch.jit.trace` work.
for device in range(DEVICE_COUNT):
self._set_cos_sin_cache(
seq_len = self.current_rope_size,
device = torch.device(device),
dtype = torch.get_default_dtype(),
)
# dummy so that patch_utils doesn't fail for now
self.cos_cached = torch.empty(
1, device = torch.cuda.current_device(), dtype = torch.get_default_dtype()
)
self.sin_cached = torch.empty(
1, device = torch.cuda.current_device(), dtype = torch.get_default_dtype()
)
def _set_cos_sin_cache(self, seq_len, device, dtype):
# Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and
# in FP32. They are applied (multiplied) in FP32 as well.
self.current_rope_size = seq_len
# The difference is we do division explicitly instead of t * (1/x) ie we do t/x.
freq_exponents = (2.0 / self.dim) * (
torch.arange(self.dim // 2, dtype = torch.int64, device = "cpu").float()
)
timescale = self.base**freq_exponents
positions = torch.arange(
self.current_rope_size, device = "cpu", dtype = torch.int64
).float()
radians_new = positions[..., None] / timescale[None, None, :]
radians_new = radians_new.squeeze(0)
emb = torch.cat((radians_new, radians_new), dim = -1)
# We must do RoPE in float32!
cos = emb.cos().to(device = device, non_blocking = True) # , dtype = dtype)
sin = emb.sin().to(device = device, non_blocking = True) # , dtype = dtype)
self.multi_gpu_cos_cached[device.index] = cos
self.multi_gpu_sin_cached[device.index] = sin
return cos, sin
def forward(self, x, position_ids = None, seq_len = None):
# x: [bs, num_attention_heads, seq_len, head_size]
if seq_len is not None and seq_len > self.current_rope_size:
self._set_cos_sin_cache(seq_len = seq_len, device = x.device, dtype = x.dtype)
device_index = x.device.index
return (
self.multi_gpu_cos_cached[device_index][:seq_len],
self.multi_gpu_sin_cached[device_index][:seq_len],
)
def get_cached(self, seq_len = None, device_index = None):
if device_index is None:
device_index = torch.cuda.current_device()
return self.multi_gpu_cos_cached[device_index], self.multi_gpu_sin_cached[
device_index
]
def extend_rope_embedding(self, x, seq_len):
if seq_len <= self.current_rope_size:
return
# Iteratively grow by increments of 8192
self.current_rope_size = math.ceil(seq_len / 8192) * 8192
for device in range(DEVICE_COUNT):
self._set_cos_sin_cache(
self.current_rope_size, device = torch.device(device), dtype = x.dtype
)
class GemmaFixedLinearScalingRotaryEmbedding(GemmaFixedRotaryEmbedding):
"""LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
# Fixes https://github.com/huggingface/transformers/pull/28837
# https://github.com/microsoft/DeepSpeed/issues/4932
# The precision of RoPE buffers is not correct, so we cast to int64.
def __init__(
self,
dim = None,
max_position_embeddings = 2048,
base = 10000,
device = None,
scaling_factor = 1.0,
config = None, # [TODO] Hack to pass in config - need to remove later
):
self.scaling_factor = scaling_factor
super().__init__(
dim = dim,
max_position_embeddings = max_position_embeddings,
base = base,
device = device,
config = config,
)
def _set_cos_sin_cache(self, seq_len, device, dtype):
# Note: on the original Llama codebase, these tensors are created on the target device (and not on CPU) and
# in FP32. They are applied (multiplied) in FP32 as well.
self.current_rope_size = seq_len
# The difference is we do division explicitly instead of t * (1/x) ie we do t/x.
freq_exponents = (2.0 / self.dim) * (
torch.arange(self.dim // 2, dtype = torch.int64, device = "cpu").float()
)
timescale = self.base**freq_exponents
positions = torch.arange(
self.current_rope_size, device = "cpu", dtype = torch.int64
).float()
positions = positions / self.scaling_factor
radians_new = positions[..., None] / timescale[None, None, :]
radians_new = radians_new.squeeze(0)
emb = torch.cat((radians_new, radians_new), dim = -1)
# We must do RoPE in float32!
cos = emb.cos().to(device = device, non_blocking = True) # , dtype = dtype)
sin = emb.sin().to(device = device, non_blocking = True) # , dtype = dtype)
self.multi_gpu_cos_cached[device.index] = cos
self.multi_gpu_sin_cached[device.index] = sin
return cos, sin
class FastGemmaModel(FastLlamaModel):
@staticmethod
def pre_patch():
init_name, function = patch_linear_scaling(
model_name = "gemma",
rope_module = GemmaFixedRotaryEmbedding,
scaled_rope_module = GemmaFixedLinearScalingRotaryEmbedding,
attention_module = GemmaAttention,
)
if init_name is not None:
exec(function, globals())
GemmaAttention.__init__ = eval(init_name)
GemmaAttention.forward = LlamaAttention_fast_forward
GemmaSdpaAttention.forward = LlamaAttention_fast_forward
GemmaFlashAttention2.forward = LlamaAttention_fast_forward
GemmaDecoderLayer.forward = GemmaDecoderLayer_fast_forward
GemmaModel.forward = LlamaModel_fast_forward
GemmaForCausalLM.forward = CausalLM_fast_forward(
GemmaModel_fast_forward_inference
)
PeftModelForCausalLM.forward = PeftModel_fast_forward
fix_prepare_inputs_for_generation(GemmaForCausalLM)
# Solves https://github.com/unslothai/unsloth/issues/168
# Static KV Cache was introduced in 4.38.0, causing training to be much slower.
# Inference can now be CUDAGraphed, but we shall retain the old rotary embeddings.
# https://github.com/huggingface/transformers/pull/27931
# https://github.com/huggingface/transformers/blob/v4.37.2/src/transformers/models/llama/modeling_llama.py
import transformers.models.gemma.modeling_gemma
transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding = (
GemmaFixedRotaryEmbedding
)
return
@staticmethod
def post_patch(model, tokenizer):
# Gemma does not downcast RoPE
model, tokenizer = patch_model_and_tokenizer(
model, tokenizer, downcast_rope = False
)
# Add 1 to weight
# return output * (1 + self.weight)
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/gemma/modeling_gemma.py#L89
from transformers.models.gemma.modeling_gemma import GemmaRMSNorm
# Freeze all parameters except LoRA
# We do this first since += 1 seems to not be liked by requires_grad = True
for name, param in model.named_parameters():
if ".lora_A." in name or ".lora_B." in name:
param.requires_grad_(True)
else:
param.requires_grad_(False)
# Patch RMS Layernorm
for name, module in model.named_modules():
if isinstance(module, GemmaRMSNorm):
# Must be in float32
# https://github.com/keras-team/keras-nlp/blob/v0.8.2/keras_nlp/models/gemma/rms_normalization.py#L36
# module = module.to(torch.float32)
# Leave + 1 to Triton kernel itself
# module.weight += 1.0 # return output * (1 + self.weight)
if not hasattr(module, "variance_epsilon"):
module.variance_epsilon = (
module.eps
) # Gemma doesn't use variance_epsilon
# Clear deleted GPU items
import gc
for _ in range(3):
gc.collect()
torch.cuda.empty_cache()
return model, tokenizer
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/vision.py | unsloth/models/vision.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from transformers import (
BitsAndBytesConfig,
AutoProcessor,
AutoTokenizer,
AutoModelForCausalLM,
)
try:
from transformers import AutoModelForImageTextToText
AutoModelForVision2Seq = AutoModelForImageTextToText
except:
from transformers import AutoModelForVision2Seq
from ..kernels import (
post_patch_loss_function,
)
from ._utils import __version__, importlib_version, _prepare_model_for_qat
from ._utils import *
from ..save import patch_saving_functions
from ..models.loader_utils import is_distributed
from unsloth_zoo.gradient_checkpointing import (
unpatch_unsloth_gradient_checkpointing,
unpatch_unsloth_smart_gradient_checkpointing,
)
import torch.utils.checkpoint as torch_checkpoint
import transformers.modeling_utils as hf_modeling_utils
from peft import LoraConfig, TaskType, get_peft_model as _get_peft_model
from peft import PeftModelForCausalLM
from transformers import set_seed as transformers_set_seed
from unsloth_zoo.peft_utils import (
get_peft_regex,
SKIP_QUANTIZATION_MODULES,
requires_grad_for_gradient_checkpointing,
)
from transformers.models.llama.modeling_llama import logger
from transformers import __version__ as transformers_version
from triton import __version__ as triton_version
from unsloth_zoo.utils import _get_dtype
from unsloth_zoo.hf_utils import (
dtype_from_config,
add_dtype_kwargs,
fix_lora_auto_mapping,
get_auto_processor,
)
from unsloth_zoo.patching_utils import patch_model_and_tokenizer
from unsloth_zoo.training_utils import prepare_model_for_training
from unsloth_zoo.utils import Version
from transformers import __version__ as transformers_version
import types
import functools
import os
import gc
import math
from typing import Optional, Tuple, List, Union
import re, inspect, sys
import contextlib
try:
from huggingface_hub.utils import get_token
except:
# Old HF Hub versions <= 0.0.25
from huggingface_hub.utils._token import get_token
from ..device_type import (
is_hip,
get_device_type,
DEVICE_TYPE,
DEVICE_TYPE_TORCH,
DEVICE_COUNT,
ALLOW_PREQUANTIZED_MODELS,
)
__all__ = [
"FastBaseModel",
]
global NUM_LOGITS_TO_KEEP
NUM_LOGITS_TO_KEEP = dict()
VLLM_SUPPORTED_VLM = [
"qwen2_5_vl",
"gemma3",
"mistral3",
"qwen3_vl",
]
VLLM_NON_LORA_VLM = [
"mllama",
]
PRE_COMPILE_INFERENCE = [
"gpt_oss",
]
from transformers import GenerationConfig, CompileConfig, HybridCache, AutoConfig
try:
from transformers import PreTrainedConfig
PretrainedConfig = PreTrainedConfig
except:
from transformers import PretrainedConfig
HAS_TORCH_DTYPE = "torch_dtype" in PretrainedConfig.__doc__
from transformers import GenerationConfig, CompileConfig, HybridCache
_compile_config = CompileConfig(
fullgraph = False,
dynamic = None,
mode = "reduce-overhead",
)
_compile_config.disable = True # Must set manually
from unsloth_zoo.vllm_utils import (
convert_lora_modules,
return_lora_modules,
)
try:
torch_compiler_set_stance = torch.compiler.set_stance
except:
torch_compiler_set_stance = None
def unsloth_base_fast_generate(
self,
*args,
**kwargs,
):
if len(args) != 0:
input_ids = args[0]
elif "input_ids" in kwargs:
input_ids = kwargs["input_ids"]
elif "input" in kwargs:
input_ids = kwargs["input"]
elif "input_features" in kwargs:
input_ids = kwargs["input_features"]
elif "input_embeds" in kwargs:
input_ids = kwargs["input_embeds"]
elif "inputs" in kwargs:
input_ids = kwargs["inputs"]
else:
key = next(iter(kwargs.keys()))
if type(kwargs[key]) is not torch.Tensor:
raise TypeError("Unsloth: You need to pass in input_ids to .generate!")
input_ids = kwargs[key]
assert type(input_ids) is torch.Tensor
bsz = input_ids.shape[0]
FastBaseModel.for_inference(self)
dtype = _get_dtype(dtype_from_config(self.config))
# Handle full float32 cases as config.dtype == torch.float32!
do_bfloat16_mixed_precision = (
os.environ.get("UNSLOTH_BFLOAT16_MIXED_PRECISION", "0") == "1"
)
if do_bfloat16_mixed_precision:
dtype = torch.bfloat16
# Check if VLM
is_vlm = any(
x.endswith(("ForConditionalGeneration", "ForVisionText2Text"))
for x in self.config.architectures
)
is_vlm = is_vlm or hasattr(self.config, "vision_config")
arch = self.config.architectures[0]
# Remove token_type_ids - WRONG for Gemma 3 since bidirectional attention
if hasattr(self, "generate") and hasattr(self, "forward"):
# did not combine with below since self might not have model
keys = inspect.signature(self.forward).parameters.keys()
if "token_type_ids" not in keys:
kwargs.pop("token_type_ids", None)
# kwargs.pop("token_type_ids", None)
# VLMs do not allow logits_to_keep
global NUM_LOGITS_TO_KEEP
if arch not in NUM_LOGITS_TO_KEEP:
m = self
# Find which is needed ie
# num_logits_to_keep or logits_to_keep
while hasattr(m, "model"):
if hasattr(m, "forward"):
keys = inspect.signature(m.forward).parameters.keys()
if "num_logits_to_keep" in keys:
NUM_LOGITS_TO_KEEP[arch] = "num_logits_to_keep"
break
elif "logits_to_keep" in keys:
NUM_LOGITS_TO_KEEP[arch] = "logits_to_keep"
break
m = m.model
if arch not in NUM_LOGITS_TO_KEEP:
NUM_LOGITS_TO_KEEP[arch] = None
key = NUM_LOGITS_TO_KEEP[arch]
if key is not None and key not in kwargs:
kwargs[key] = 1
# Check pad_token
model_eos_token_id = getattr(self.config, "eos_token_id", None)
if model_eos_token_id is not None and hasattr(model_eos_token_id, "__iter__"):
model_eos_token_id = model_eos_token_id[0]
kwargs["pad_token_id"] = kwargs.pop("pad_token_id", model_eos_token_id)
# Get pixel values for VLMs
try:
kwargs["pixel_values"] = kwargs["pixel_values"].to(dtype)
except:
pass
# Mixed precision autocast
if os.environ.get("UNSLOTH_FORCE_FLOAT32", "0") == "1":
autocaster = torch.autocast(device_type = DEVICE_TYPE_TORCH, dtype = torch.float16)
dtype = torch.float16
else:
autocaster = torch.autocast(device_type = DEVICE_TYPE_TORCH, dtype = dtype)
# Prepare LoRA
# state_dict = convert_lora_modules(self, dtype = dtype)
# Set compile dynamic shapes
torch._dynamo.mark_static(input_ids, 0)
torch._dynamo.mark_dynamic(input_ids, 1)
if "attention_mask" in kwargs:
torch._dynamo.mark_static(kwargs["attention_mask"], 0)
torch._dynamo.mark_dynamic(kwargs["attention_mask"], 1)
if "token_type_ids" in kwargs:
torch._dynamo.mark_static(kwargs["token_type_ids"], 0)
torch._dynamo.mark_dynamic(kwargs["token_type_ids"], 1)
# Fix generation_config
# Use hybrid if sliding window seen, otherwise try static
cache_implementation = getattr(self.config, "cache_implementation", None)
if getattr(
self, "_supports_static_cache", getattr(self, "_can_compile_fullgraph", True)
):
if os.environ.get("UNSLOTH_DISABLE_STATIC_GENERATION", "0") == "0":
cache_implementation = "static"
elif Version(transformers_version) < Version("4.56.0.dev0"):
cache_implementation = None
else:
# Should work in latest transformers!
cache_implementation = "static"
else:
cache_implementation = None
if cache_implementation is not None:
swa = getattr(
getattr(self.config, "text_config", self.config), "sliding_window", None
)
if (swa == 0 or type(swa) is not int) and (
getattr(self, "_can_compile_fullgraph", True) is True
):
cache_implementation = "static"
else:
if Version(transformers_version) < Version("4.56.0.dev0"):
cache_implementation = "hybrid"
else:
cache_implementation = "static"
# [TODO] Unsure why static fails
if do_bfloat16_mixed_precision:
cache_implementation = None
if "generation_config" in kwargs:
kwargs["generation_config"].cache_implementation = cache_implementation
if cache_implementation is not None:
kwargs["generation_config"].compile_config = _compile_config
else:
kwargs["cache_implementation"] = cache_implementation
if cache_implementation is not None:
kwargs["compile_config"] = _compile_config
# Delete cached Flex Attention masks to reset inference
for name, module in self.named_modules():
if hasattr(module, "_flex_attention_cache"):
try:
del module._flex_attention_cache
except:
pass
# Solves AttributeError: 'SlidingWindowLayer' object has no attribute 'max_batch_size'
if hasattr(module, "_cache") and "cache_utils" in str(module._cache.__class__):
try:
del module._cache
except:
pass
# DO INFERENCE
with torch.inference_mode(), autocaster:
output = self._old_generate(*args, **kwargs)
# Delete cached Flex Attention masks to reset inference
for name, module in self.named_modules():
if hasattr(module, "_flex_attention_cache"):
try:
del module._flex_attention_cache
except:
pass
# Solves AttributeError: 'SlidingWindowLayer' object has no attribute 'max_batch_size'
if hasattr(module, "_cache") and "cache_utils" in str(module._cache.__class__):
try:
del module._cache
except:
pass
# FastBaseModel.for_training(self)
return output
class FastBaseModel:
@staticmethod
def from_pretrained(
model_name = "unsloth/Llama-3.2-1B-Instruct",
max_seq_length = 2048,
dtype = None,
load_in_4bit = True,
load_in_8bit = False,
load_in_16bit = False,
full_finetuning = False,
token = None,
device_map = "sequential",
trust_remote_code = False,
model_types = None,
tokenizer_name = None,
auto_model = AutoModelForVision2Seq,
use_gradient_checkpointing = "unsloth",
supports_sdpa = True,
whisper_language = None,
whisper_task = None,
auto_config = None,
offload_embedding = False,
float32_mixed_precision = None, # Forces float32 mixed precision
# vLLM parameters
fast_inference = False,
gpu_memory_utilization = 0.5,
float8_kv_cache = False,
random_state = 3407,
max_lora_rank = 64,
disable_log_stats = False,
unsloth_vllm_standby = False,
**kwargs,
):
if unsloth_vllm_standby and os.environ.get("UNSLOTH_VLLM_STANDBY", "0") != "1":
raise RuntimeError(
"Unsloth: UNSLOTH_VLLM_STANDBY is True, but UNSLOTH_VLLM_STANDBY is not set to 1!"
)
if model_types is None:
raise RuntimeError(
"Unsloth: Please use FastModel or FastVisionModel and not use FastBaseModel directly!"
)
if os.environ.get("UNSLOTH_MODEL_NAME", "") == "":
os.environ["UNSLOTH_MODEL_NAME"] = model_name.lower()
is_vlm = auto_model in [AutoModelForVision2Seq, AutoModelForImageTextToText]
is_whisper = whisper_language is not None and whisper_task is not None
auto_processor = AutoProcessor if (is_vlm or is_whisper) else AutoTokenizer
model_type_arch = model_types[0]
if model_type_arch == "siglip":
for model_type_arch in model_types:
if model_type_arch != "siglip":
break
vllm_enable_lora = True
if is_vlm and fast_inference:
if not any(arch in VLLM_SUPPORTED_VLM for arch in model_types):
raise RuntimeError(
f"Unsloth: Fast inference is only supported for Language models and Qwen2.5-VL, Gemma3 among vision models. "
f"Found architectures: {', '.join(model_types)}!"
)
if any(arch in VLLM_NON_LORA_VLM for arch in model_types):
# mllama is still only in vllm v0 https://arc.net/l/quote/llwkfgmu
# https://docs.vllm.ai/en/stable/models/supported_models.html#text-generation_1
# vLLM V0 does not support LoRA on multi modal models.
# TODO: Update this once vLLM V1 supports Llama 3.2 aka mllama
vllm_enable_lora = False
os.environ["UNSLOTH_USE_NEW_MODEL"] = "1"
if trust_remote_code:
print(
"Unsloth: WARNING `trust_remote_code` is True.\n"
"Are you certain you want to do remote code execution?"
)
token = hf_login(token)
SUPPORTS_BFLOAT16 = is_bfloat16_supported()
if DEVICE_TYPE == "cuda":
gpu_stats = torch.cuda.get_device_properties(0)
gpu_stats_name = (
gpu_stats.name + ". " if gpu_stats.name != "" else "NVIDIA GPU Device. "
)
gpu_version = torch.version.cuda
gpu_stats_snippet = f"CUDA: {gpu_stats.major}.{gpu_stats.minor}. CUDA Toolkit: {gpu_version}."
try:
vllm_version = f" vLLM: {importlib_version('vllm')}."
except:
vllm_version = ""
elif DEVICE_TYPE == "hip":
gpu_stats = torch.cuda.get_device_properties(0)
gpu_stats_name = (
gpu_stats.name + ". " if gpu_stats.name != "" else "AMD GPU Device. "
)
gpu_version = torch.version.hip
gpu_stats_snippet = f"ROCm Toolkit: {gpu_version}."
try:
vllm_version = f" vLLM: {importlib_version('vllm')}."
except:
vllm_version = ""
elif DEVICE_TYPE == "xpu":
gpu_stats = torch.xpu.get_device_properties(0)
gpu_stats_name = (
gpu_stats.name + ". " if gpu_stats.name != "" else "Intel XPU Device. "
)
gpu_version = torch.version.xpu
gpu_stats_snippet = f"Intel Toolkit: {gpu_version}."
# [TODO] After adding vLLM support for XPU, change this
vllm_version = ""
else:
raise ValueError(f"Unsloth: Unsupported device type: {DEVICE_TYPE}")
max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
arch_name = model_type_arch.title()
arch_name = arch_name.replace("_Vl_", "_VL_").replace("_Moe", "_MoE")
statistics = (
f"==((====))== Unsloth {__version__}: Fast {arch_name} patching. Transformers: {transformers_version}.{vllm_version}\n"
f" {chr(92)}{chr(92)} /| {gpu_stats_name}Num GPUs = {DEVICE_COUNT}. Max memory: {max_memory} GB. Platform: {platform_system}.\n"
f"O^O/ {chr(92)}_/ {chr(92)} Torch: {torch.__version__}. {gpu_stats_snippet} Triton: {triton_version}\n"
f"{chr(92)} / Bfloat16 = {str(SUPPORTS_BFLOAT16).upper()}. FA [Xformers = {xformers_version}. FA2 = {HAS_FLASH_ATTENTION}]\n"
f' "-____-" Free license: http://github.com/unslothai/unsloth'
)
print(statistics)
# Warn about fast transfers
if "HF_HUB_ENABLE_HF_TRANSFER" in os.environ:
old_hf_transfer = os.environ["HF_HUB_ENABLE_HF_TRANSFER"]
if old_hf_transfer in ("False", "false"):
old_hf_transfer = "0"
if old_hf_transfer in ("True", "true"):
old_hf_transfer = "1"
else:
old_hf_transfer = "0"
if old_hf_transfer == "1":
print(
"Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!"
)
if old_hf_transfer != "0":
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
# For debugging - we use a download counter to see if environments are not breaking or if HF is down
get_statistics(kwargs.get("local_files_only", False))
if dtype is None:
dtype = torch.float16 if not SUPPORTS_BFLOAT16 else torch.bfloat16
elif os.environ.get("UNSLOTH_FORCE_FLOAT32", "0") == "1":
if dtype == torch.float16:
dtype = torch.bfloat16
elif dtype == torch.bfloat16 and not SUPPORTS_BFLOAT16:
logger.warning_once(
"Device does not support bfloat16. Will change to float16."
)
dtype = torch.float16
assert dtype in (torch.float16, torch.bfloat16, torch.float32)
bnb_compute_dtype = dtype
do_forced_float32 = False
if os.environ.get("UNSLOTH_FORCE_FLOAT32", "0") == "1":
print(
f"Unsloth: Using float16 precision for {model_type_arch} won't work! Using float32."
)
bnb_compute_dtype = torch.float16
do_forced_float32 = True
# Check for custom data-types
custom_datatype = None
correct_dtype = None
if os.environ.get("UNSLOTH_FORCE_CUSTOM_DTYPE", "") != "":
custom_datatype = os.environ["UNSLOTH_FORCE_CUSTOM_DTYPE"]
assert custom_datatype.count(";") >= 4
checker, _dtype, _bnb_compute_dtype, _custom_datatype, execute_code = (
custom_datatype.split(";", 4)
)
# Allow custom dtypes on all runs
allow_all_runs = checker == "all"
# Allow only on float16 datatypes
allow_float16_runs = (
checker == "float16" or checker == "torch.float16"
) and (
dtype == torch.float16
or os.environ.get("UNSLOTH_FORCE_FLOAT32", "0") == "1"
)
if allow_all_runs or allow_float16_runs:
if eval(_dtype) is not None:
dtype = eval(_dtype)
if eval(_bnb_compute_dtype) is not None:
bnb_compute_dtype = eval(_bnb_compute_dtype)
correct_dtype = bnb_compute_dtype
custom_datatype = _custom_datatype
# Execute code as well
if len(execute_code.strip()) != 0:
exec(execute_code)
else:
custom_datatype = None
correct_dtype = None
# Stop SDPA for some archs like Pixtral / Mistral3
if not ("attn_implementation" in kwargs):
kwargs["attn_implementation"] = "sdpa"
if not supports_sdpa:
if os.environ.get("UNSLOTH_ENABLE_FLEX_ATTENTION", "0") == "0":
print(
f"Unsloth: {model_type_arch.title()} does not support SDPA - switching to fast eager."
)
del kwargs["attn_implementation"]
bnb_config = None
user_quantization_config = kwargs.get("quantization_config", None)
if full_finetuning and (load_in_4bit or load_in_8bit):
print(
"Unsloth: You selected full finetuning support, but 4bit / 8bit is enabled - disabling LoRA / QLoRA."
)
load_in_4bit = False
load_in_8bit = False
load_in_16bit = False
if int(load_in_4bit) + int(load_in_8bit) + int(load_in_16bit) >= 2:
raise RuntimeError(
"Unsloth: Can only load in 4bit or 8bit or 16bit, not a combination!"
)
if load_in_4bit:
bnb_config = BitsAndBytesConfig(
load_in_4bit = True,
bnb_4bit_use_double_quant = True,
bnb_4bit_quant_type = "nf4",
bnb_4bit_compute_dtype = bnb_compute_dtype,
llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES.copy(),
)
elif load_in_8bit:
bnb_config = BitsAndBytesConfig(
load_in_8bit = True,
llm_int8_skip_modules = SKIP_QUANTIZATION_MODULES.copy(),
)
elif load_in_16bit:
bnb_config = None
elif not load_in_4bit and not load_in_8bit and not full_finetuning:
print(
"Unsloth: QLoRA and full finetuning all not selected. Switching to 16bit LoRA."
)
if full_finetuning:
os.environ["UNSLOTH_ENABLE_FULL_FINETUNING"] = "1"
if dtype == torch.bfloat16:
if float32_mixed_precision != True:
print(
f"Unsloth: Using bfloat16 full finetuning which cuts memory usage by 50%.\n"
f"To enable float32 training, use `float32_mixed_precision = True` during FastLanguageModel.from_pretrained"
)
else:
print(
f"Unsloth: Using full float32 full finetuning. "
f"To enable bfloat16 training to reduce VRAM usage by 50% albeit with a slightly higher loss, do:\n"
"use `float32_mixed_precision = False` during FastLanguageModel.from_pretrained"
)
os.environ["UNSLOTH_BFLOAT16_MIXED_PRECISION"] = "1"
else:
print(
"Unsloth: Float16 full finetuning uses more memory since we upcast weights to float32."
)
else:
os.environ["UNSLOTH_ENABLE_FULL_FINETUNING"] = "0"
# Fix AttributeError: 'BitsAndBytesConfig' object has no attribute 'get_loading_attributes'
if bnb_config is not None and not hasattr(bnb_config, "get_loading_attributes"):
bnb_config.get_loading_attributes = lambda *args, **kwargs: {}
# Cannot be None, since HF now checks for the config
if load_in_4bit or load_in_8bit:
# Ignore load_in_4bit / load_in_8bit for MXFP4 - best to get config file
if (
"gpt-oss-20b" in model_name.lower()
or "gpt-oss-120b" in model_name.lower()
):
pass
else:
if user_quantization_config is None:
kwargs["quantization_config"] = bnb_config
else:
if auto_config is None:
auto_config = AutoConfig.from_pretrained(
model_name,
token = token,
trust_remote_code = trust_remote_code,
)
if hasattr(auto_config, "quantization_config"):
from transformers.quantizers.auto import (
AUTO_QUANTIZATION_CONFIG_MAPPING,
)
quantization_config = auto_config.quantization_config
quant_method = quantization_config["quant_method"]
# Sometimes bitsandbytes_4bit + bitsandbytes_8bit is provided
if (
quant_method == "bitsandbytes"
and "bitsandbytes" not in AUTO_QUANTIZATION_CONFIG_MAPPING
):
if "bitsandbytes_4bit" not in AUTO_QUANTIZATION_CONFIG_MAPPING:
raise KeyError(
"Unsloth: AUTO_QUANTIZATION_CONFIG_MAPPING does not have `bitsandbytes_4bit`"
)
quantizer = AUTO_QUANTIZATION_CONFIG_MAPPING["bitsandbytes_4bit"]
else:
quantizer = AUTO_QUANTIZATION_CONFIG_MAPPING[quant_method]
quantizer_kwargs = {}
if quant_method == "compressed-tensors":
# Ignore these
pass
else:
# We cannot dequantize since gpt-oss-20b MXFP4 will now be gpt-oss-20b-BF16
if (
load_in_16bit
and "dequantize" in inspect.signature(quantizer).parameters
):
quantizer_kwargs["dequantize"] = True
try:
# Sometimes this fails so we wrap it in a try except
quantization_config = quantizer.from_dict(
quantization_config, **quantizer_kwargs
)
except:
pass
if user_quantization_config is None:
kwargs["quantization_config"] = quantization_config
# Check if using forced float32 - we load it in bfloat16, then cast to float16!
torch_dtype = dtype
if do_forced_float32:
torch_dtype = torch.bfloat16
kwargs = add_dtype_kwargs(torch_dtype, kwargs)
model_config = AutoConfig.from_pretrained(
model_name,
token = token,
attn_implementation = "sdpa" if supports_sdpa else "eager",
trust_remote_code = trust_remote_code,
)
verify_fp8_support_if_applicable(model_config)
raise_handler = RaiseUninitialized()
if not fast_inference:
# Prevent load_in_fp8 from being forwarded into HF internal model loading
load_in_fp8 = kwargs.pop("load_in_fp8", None)
model = auto_model.from_pretrained(
model_name,
device_map = device_map,
# torch_dtype = torch_dtype, # Transformers removed torch_dtype
# quantization_config = bnb_config,
token = token,
trust_remote_code = trust_remote_code,
# attn_implementation = attn_implementation,
**kwargs,
)
if hasattr(model, "generate"):
model.fast_generate = make_fast_generate_wrapper(model.generate)
model.fast_generate_batches = error_out_no_vllm
if offload_embedding:
if bool(
os.environ.get("WSL_DISTRO_NAME") or os.environ.get("WSL_INTEROP")
):
# WSL doesn't work with offloaded embeddings
pass
elif os.name == "nt":
# Windows doesn't work with offloaded embeddings
pass
else:
embed_tokens = model.get_input_embeddings()
nbytes = embed_tokens.weight.numel() * embed_tokens.weight.itemsize
ngb = round(nbytes / 1024 / 1024 / 1024, 2)
print(f"Unsloth: Offloading embeddings to RAM to save {ngb} GB.")
embed_tokens.to("cpu")
# Add hooks to move inputs to CPU and back to CUDA
# [TODO] Doesn't seem to work!
# def pre_hook(module, args):
# args[0]._old_device = args[0].device
# return (args[0].to("cpu", non_blocking = True))
# def post_hook(module, args, output):
# old_device = getattr(args[0], "_old_device", "cuda")
# return output.to(old_device, non_blocking = True)
# embed_tokens.register_forward_pre_hook(pre_hook, prepend = True)
# embed_tokens.register_forward_hook (post_hook, prepend = True)
# Must free GPU memory otherwise will not free!
torch.cuda.empty_cache()
gc.collect()
else:
from unsloth_zoo.vllm_utils import (
load_vllm,
get_vllm_state_dict,
convert_vllm_to_huggingface,
generate_batches,
get_lora_supported_ranks,
)
if full_finetuning:
max_lora_rank = max(get_lora_supported_ranks())
raise NotImplementedError(
"Unsloth: `fast_inference=True` cannot be used together with `full_finetuning=True`.\n"
"Reason: fast_inference is optimized for inference-only workflows and "
"does not currently support full fine-tuning.\n"
"Workaround: disable fast_inference, or use parameter-efficient fine-tuning "
f"(e.g. LoRA with rank r={max_lora_rank})."
)
model_config.model_name = model_name
if fast_inference:
fast_inference, model_name = fast_inference_setup(
model_name, model_config
)
allowed_args = inspect.getfullargspec(load_vllm).args
load_vllm_kwargs = dict(
model_name = model_name,
config = model_config,
gpu_memory_utilization = gpu_memory_utilization,
max_seq_length = max_seq_length,
dtype = dtype,
float8_kv_cache = float8_kv_cache,
enable_lora = vllm_enable_lora,
max_lora_rank = max_lora_rank,
disable_log_stats = disable_log_stats,
use_bitsandbytes = load_in_4bit,
unsloth_vllm_standby = unsloth_vllm_standby,
is_vision_model = is_vlm,
)
for allowed_arg in allowed_args:
if allowed_arg not in load_vllm_kwargs and allowed_arg in kwargs:
load_vllm_kwargs[allowed_arg] = kwargs[allowed_arg]
# Load vLLM first
llm = load_vllm(**load_vllm_kwargs)
# Convert to HF format
_, quant_state_dict = get_vllm_state_dict(
llm,
config = model_config,
is_vision_model = is_vlm,
)
model = convert_vllm_to_huggingface(
quant_state_dict,
model_config,
dtype,
bnb_config,
is_vision_model = is_vlm,
)
model.vllm_engine = llm
model.fast_generate = model.vllm_engine.generate
model.fast_generate_batches = functools.partial(
generate_batches, model.vllm_engine
)
raise_handler.remove()
# Return old flag
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = old_hf_transfer
# Check float32 norm weights
if os.environ.get("UNSLOTH_HIGH_PRECISION_LAYERNORM", "0") == "1":
for jj, (name, module) in enumerate(model.named_modules()):
if (
name.endswith(("norm", "norm1", "norm2", "norm3", "norm4"))
or "layernorm" in name
or "layer_norm" in name
) and hasattr(module, "weight"):
module._pre_set_compute_dtype = torch.float32
# Edit data-types
if custom_datatype is not None:
with torch.no_grad():
for jj, (name, module) in enumerate(model.named_modules()):
exec(custom_datatype)
# Clear deleted GPU items
for _ in range(3):
gc.collect()
if DEVICE_TYPE in ("cuda", "hip"):
torch.cuda.empty_cache()
elif DEVICE_TYPE == "xpu":
torch.xpu.empty_cache()
# Counteract saved tokenizers
tokenizer_name = model_name if tokenizer_name is None else tokenizer_name
if (whisper_language and whisper_task) or auto_model.__name__.endswith(
"ForConditionalGeneration"
):
tokenizer = auto_processor.from_pretrained(
tokenizer_name,
padding_side = "left",
token = token,
language = whisper_language,
task = whisper_task,
trust_remote_code = trust_remote_code,
)
else:
try:
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | true |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/loader.py | unsloth/models/loader.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._utils import (
_prepare_model_for_qat,
is_bfloat16_supported,
is_vLLM_available,
HAS_FLASH_ATTENTION,
HAS_FLASH_ATTENTION_SOFTCAPPING,
USE_MODELSCOPE,
get_transformers_model_type,
hf_login,
)
from .granite import FastGraniteModel
from .llama import FastLlamaModel, logger
from .mistral import FastMistralModel
from .qwen2 import FastQwen2Model
from .qwen3 import FastQwen3Model
from .qwen3_moe import FastQwen3MoeModel
from .cohere import FastCohereModel
from transformers import AutoConfig
from transformers import __version__ as transformers_version
from peft import PeftConfig, PeftModel
from .loader_utils import (
_get_fp8_mode_and_check_settings,
_offline_quantize_to_fp8,
_tag_model_with_fp8_torchao_config,
get_model_name,
)
import os, contextlib, sys
try:
from huggingface_hub import get_token
except:
try:
from huggingface_hub.utils import get_token
except:
# For older versions of huggingface_hub
from huggingface_hub.utils._token import get_token
from huggingface_hub import HfFileSystem
import importlib.util
from ..device_type import (
is_hip,
get_device_type,
DEVICE_TYPE,
DEVICE_TYPE_TORCH,
DEVICE_COUNT,
ALLOW_PREQUANTIZED_MODELS,
ALLOW_BITSANDBYTES,
)
# https://github.com/huggingface/transformers/pull/26037 allows 4 bit loading!
from unsloth_zoo.utils import Version, _get_dtype
from unsloth_zoo.hf_utils import dtype_from_config
from unsloth_zoo.tiled_mlp import patch_tiled_mlp
transformers_version = Version(transformers_version)
SUPPORTS_FOURBIT = transformers_version >= Version("4.37")
SUPPORTS_GEMMA = transformers_version >= Version("4.38")
SUPPORTS_GEMMA2 = transformers_version >= Version("4.42")
SUPPORTS_LLAMA31 = transformers_version >= Version("4.43.2")
SUPPORTS_LLAMA32 = transformers_version > Version("4.45.0")
SUPPORTS_GRANITE = transformers_version >= Version("4.46.0")
SUPPORTS_QWEN3 = transformers_version >= Version("4.50.3")
SUPPORTS_QWEN3_MOE = transformers_version >= Version("4.50.3")
SUPPORTS_FALCON_H1 = transformers_version >= Version("4.53.0")
SUPPORTS_GEMMA3N = transformers_version >= Version("4.53.0")
SUPPORTS_GPTOSS = transformers_version >= Version("4.55.0")
if SUPPORTS_GEMMA:
from .gemma import FastGemmaModel
if SUPPORTS_GEMMA2:
from .gemma2 import FastGemma2Model
if SUPPORTS_FALCON_H1:
from .falcon_h1 import FastFalconH1Model
import torch
from ._utils import (
patch_compiling_bitsandbytes,
patch_model_and_tokenizer,
prepare_model_for_kbit_training,
patch_unsloth_smart_gradient_checkpointing,
patch_compiled_autograd,
process_vision_info,
unsloth_compile_transformers,
fast_inference_setup,
)
global FORCE_FLOAT32
# Forces float32 precision since float16 goes to infinity
FORCE_FLOAT32 = [
"gemma3,", # Add comma bc gemma3 will match gemma3n
"gemma3n",
"gpt_oss",
]
global DISABLE_COMPILE_MODEL_NAMES
# Must be alphabetically sorted for each entry
DISABLE_COMPILE_MODEL_NAMES = [
"aya_vision",
"modernbert",
"granite,llava_next", # Granite-vision 3
]
global DISABLE_SDPA_MODEL_NAMES
# Disables some SDPA modules since it's wrong
DISABLE_SDPA_MODEL_NAMES = [
"gemma3,", # Add comma bc gemma3 will match gemma3n
]
class FastLanguageModel(FastLlamaModel):
@staticmethod
def from_pretrained(
model_name = "unsloth/Llama-3.2-1B-Instruct",
max_seq_length = 2048,
dtype = None,
load_in_4bit = True, # 4bit QLoRA
load_in_8bit = False, # 8bit LoRA
load_in_16bit = False, # 16bit LoRA
full_finetuning = False,
token = None,
device_map = "sequential",
rope_scaling = None,
fix_tokenizer = True,
trust_remote_code = False,
use_gradient_checkpointing = "unsloth",
resize_model_vocab = None,
revision = None,
use_exact_model_name = False,
offload_embedding = False,
float32_mixed_precision = None, # Forces float32 mixed precision
fast_inference = False, # uses vLLM
gpu_memory_utilization = 0.5,
float8_kv_cache = False,
random_state = 3407,
max_lora_rank = 64,
disable_log_stats = True,
qat_scheme = None,
load_in_fp8 = False, # fp8 LoRA (True, False, 'block')
unsloth_tiled_mlp = False,
*args,
**kwargs,
):
# Respect user-provided quantization_config (e.g. BitsAndBytesConfig)
quantization_config = kwargs.get("quantization_config", None)
if quantization_config is not None:
if isinstance(quantization_config, dict):
q_load_in_4bit = quantization_config.get("load_in_4bit", False)
q_load_in_8bit = quantization_config.get("load_in_8bit", False)
else:
q_load_in_4bit = getattr(quantization_config, "load_in_4bit", False)
q_load_in_8bit = getattr(quantization_config, "load_in_8bit", False)
if q_load_in_4bit:
load_in_4bit = True
load_in_8bit = False
if q_load_in_8bit:
load_in_8bit = True
load_in_4bit = False
# Login to allow private models
token = hf_login(token)
# Align dtype with bnb_4bit_compute_dtype if provided and dtype is unset.
if dtype is None and quantization_config is not None:
bnb_compute_dtype = None
if isinstance(quantization_config, dict):
if quantization_config.get("load_in_4bit", False):
bnb_compute_dtype = quantization_config.get(
"bnb_4bit_compute_dtype", None
)
else:
if getattr(quantization_config, "load_in_4bit", False):
bnb_compute_dtype = getattr(
quantization_config, "bnb_4bit_compute_dtype", None
)
if isinstance(bnb_compute_dtype, str):
bnb_compute_dtype = getattr(torch, bnb_compute_dtype, None)
if isinstance(bnb_compute_dtype, torch.dtype):
dtype = bnb_compute_dtype
if load_in_8bit or full_finetuning or qat_scheme is not None:
return FastModel.from_pretrained(
model_name = model_name,
max_seq_length = max_seq_length,
dtype = dtype,
load_in_4bit = load_in_4bit,
load_in_8bit = load_in_8bit,
load_in_16bit = load_in_16bit,
full_finetuning = full_finetuning,
token = token,
device_map = device_map,
rope_scaling = rope_scaling, # [TODO] No effect
fix_tokenizer = fix_tokenizer, # [TODO] No effect
trust_remote_code = trust_remote_code,
use_gradient_checkpointing = use_gradient_checkpointing,
resize_model_vocab = resize_model_vocab, # [TODO] No effect
revision = revision,
return_logits = False, # Return logits
fullgraph = True, # No graph breaks
use_exact_model_name = use_exact_model_name,
offload_embedding = offload_embedding,
float32_mixed_precision = float32_mixed_precision,
# Pass vLLM/inference parameters
fast_inference = fast_inference,
gpu_memory_utilization = gpu_memory_utilization,
float8_kv_cache = float8_kv_cache,
random_state = random_state,
max_lora_rank = max_lora_rank,
disable_log_stats = disable_log_stats,
qat_scheme = qat_scheme,
load_in_fp8 = load_in_fp8,
unsloth_tiled_mlp = unsloth_tiled_mlp,
*args,
**kwargs,
)
if isinstance(dtype, str) and dtype in ["float16", "bfloat16"]:
dtype = getattr(torch, dtype)
assert (
dtype is None
or dtype == torch.float16
or dtype == torch.bfloat16
or dtype == torch.float32
)
if fast_inference:
if importlib.util.find_spec("vllm") is None:
raise ImportError(
"Unsloth: Please install vLLM before enabling `fast_inference`!\n"
"You can do this in a terminal via `pip install vllm`"
)
if DEVICE_TYPE_TORCH == "cuda":
for i in range(DEVICE_COUNT):
# [TODO] DGX Spark vLLM breaks
if "NVIDIA GB10" in str(torch.cuda.get_device_name(i)).upper():
print(
"Unsloth: DGX Spark detected - `fast_inference=True` is currently broken as of January 2026.\n"
"Defaulting to native Unsloth inference."
)
fast_inference = False
break
# [TODO] For now fast_inference only works with fast_inference ie vLLM
if load_in_fp8 != False:
if not fast_inference:
raise NotImplementedError(
"Unsloth: set `fast_inference = True` when doing `load_in_fp8`."
)
# Check if 4bit is allowed specifically for AMD
if not ALLOW_BITSANDBYTES and not use_exact_model_name:
if load_in_4bit or load_in_8bit or model_name.lower().endswith("-bnb-4bit"):
print(
"Unsloth: AMD currently is not stable with 4bit bitsandbytes. Disabling for now."
)
load_in_4bit = False
# Find FP8, BnB 4bit, other mapped names
old_model_name = model_name
fp8_mode = None
if not use_exact_model_name:
new_model_name = get_model_name(
model_name, load_in_4bit = load_in_4bit, load_in_fp8 = load_in_fp8
)
if new_model_name is None and load_in_fp8 != False:
fp8_mode = _get_fp8_mode_and_check_settings(
load_in_fp8,
fast_inference,
full_finetuning,
load_in_4bit,
load_in_8bit,
load_in_16bit,
use_exact_model_name,
)
model_name = _offline_quantize_to_fp8(model_name, fp8_mode)
else:
assert new_model_name is not None
model_name = new_model_name
# Check if pre-quantized models are allowed
# For eg AMD Instinct GPUs need blocksize = 128, but our pre-quants are blocksize = 64
if not ALLOW_PREQUANTIZED_MODELS and model_name.lower().endswith(
("-unsloth-bnb-4bit", "-bnb-4bit")
):
model_name = model_name.lower().removesuffix("-unsloth-bnb-4bit")
model_name = model_name.lower().removesuffix("-bnb-4bit")
# Change -BF16 to all False for 4bit, 8bit etc
if model_name.lower().endswith("-bf16"):
load_in_4bit = False
load_in_8bit = False
load_in_fp8 = False
load_in_16bit = True
if USE_MODELSCOPE and not os.path.exists(model_name):
from modelscope import snapshot_download
model_name = snapshot_download(model_name)
# First check if it's a normal model via AutoConfig
from huggingface_hub.utils import (
disable_progress_bars,
enable_progress_bars,
are_progress_bars_disabled,
)
was_disabled = are_progress_bars_disabled()
disable_progress_bars()
autoconfig_error = None
peft_error = None
model_config = None
peft_config = None
try:
model_config = AutoConfig.from_pretrained(
model_name,
token = token,
revision = revision,
trust_remote_code = trust_remote_code,
)
is_model = True
except ImportError:
raise
except Exception as error:
autoconfig_error = str(error)
if "architecture" in autoconfig_error:
raise ValueError(
f"`{model_name}` is not supported yet in `transformers=={transformers_version}`.\n"
f"Please update transformers via `pip install --upgrade transformers` and try again."
)
is_model = False
try:
peft_config = PeftConfig.from_pretrained(
model_name,
token = token,
revision = revision,
trust_remote_code = trust_remote_code,
)
is_peft = True
except ImportError:
raise
except Exception as error:
peft_error = str(error)
if "architecture" in peft_error:
raise ValueError(
f"`{model_name}` is not supported yet in `transformers=={transformers_version}`.\n"
f"Please update transformers via `pip install --upgrade transformers` and try again."
)
is_peft = False
# Old transformers versions check
both_exist = (is_model and is_peft) and not SUPPORTS_LLAMA32
# Error out if both LoRA and normal model config exists.
if both_exist:
raise RuntimeError(
"Unsloth: Your repo has a LoRA adapter and a base model.\n"
"You have 2 files `config.json` and `adapter_config.json`.\n"
"We must only allow one config file.\n"
"Please separate the LoRA and base models to 2 repos."
)
model_types = get_transformers_model_type(
peft_config if peft_config is not None else model_config,
trust_remote_code = trust_remote_code,
)
if len(model_types) == 1:
model_type = model_types[0]
else:
# Leave as tuple if more than one arch
model_type = model_types
# New transformers need to check manually.
if SUPPORTS_LLAMA32:
# Check if folder exists locally
if os.path.isdir(model_name):
exist_adapter_config = os.path.exists(
os.path.join(model_name, "adapter_config.json")
)
exist_config = os.path.exists(os.path.join(model_name, "config.json"))
both_exist = exist_adapter_config and exist_config
else:
# Because HfFileSystem assumes linux paths, we need to set the path with forward slashes, even on Windows.
files = HfFileSystem(token = token).glob(f"{model_name}/*.json")
files = list(os.path.split(x)[-1] for x in files)
if (
sum(x == "adapter_config.json" or x == "config.json" for x in files)
>= 2
):
both_exist = True
if not is_model and not is_peft:
error = autoconfig_error if autoconfig_error is not None else peft_error
# Old transformers version
if "rope_scaling" in error.lower() and not SUPPORTS_LLAMA31:
raise ImportError(
f"Unsloth: Your transformers version of {transformers_version} does not support new RoPE scaling methods.\n"
f"This includes Llama 3.1. The minimum required version is 4.43.2\n"
f'Try `pip install --upgrade "transformers>=4.43.2"`\n'
f"to obtain the latest transformers build, then restart this session."
)
# Create a combined error message showing both failures
combined_error = (
"Unsloth: Failed to load model. Both AutoConfig and PeftConfig loading failed.\n\n"
f"AutoConfig error: {autoconfig_error}\n\n"
f"PeftConfig error: {peft_error}\n\n"
)
raise RuntimeError(combined_error)
# Get base model for PEFT:
if is_peft:
# Check base model again for PEFT
model_name = peft_config.base_model_name_or_path
if not use_exact_model_name:
model_name = get_model_name(model_name, load_in_4bit)
# Check if pre-quantized models are allowed
# For eg AMD Instinct GPUs need blocksize = 128, but our pre-quants are blocksize = 64
if not ALLOW_PREQUANTIZED_MODELS and model_name.lower().endswith(
("-unsloth-bnb-4bit", "-bnb-4bit")
):
model_name = model_name.lower().removesuffix("-unsloth-bnb-4bit")
model_name = model_name.lower().removesuffix("-bnb-4bit")
# Change -BF16 to all False for 4bit, 8bit etc
if model_name.lower().endswith("-bf16"):
load_in_4bit = False
load_in_8bit = False
load_in_fp8 = False
load_in_16bit = True
model_config = AutoConfig.from_pretrained(
model_name,
token = token,
trust_remote_code = trust_remote_code,
)
if not was_disabled:
enable_progress_bars()
if model_type == "llama":
scaling_type = None
if getattr(model_config, "rope_scaling", None) is not None:
scaling_type1 = model_config.rope_scaling.get("type", None)
scaling_type2 = model_config.rope_scaling.get("rope_type", None)
scaling_type = (
scaling_type1 if scaling_type1 is not None else scaling_type2
)
if scaling_type == "llama3" and not SUPPORTS_LLAMA31:
raise ImportError(
f"Unsloth: Your transformers version of {transformers_version} does not support Llama 3.1.\n"
f"The minimum required version is 4.43.2\n"
f'Try `pip install --upgrade "transformers>=4.43.2"`\n'
f"to obtain the latest transformers build, then restart this session."
)
dispatch_model = FastLlamaModel
elif model_type == "mistral":
dispatch_model = FastMistralModel
elif model_type == "gemma":
if not SUPPORTS_GEMMA:
raise ImportError(
f"Unsloth: Your transformers version of {transformers_version} does not support Gemma.\n"
f"The minimum required version is 4.38.\n"
f'Try `pip install --upgrade "transformers>=4.38"`\n'
f"to obtain the latest transformers build, then restart this session."
)
dispatch_model = FastGemmaModel
elif model_type == "gemma2":
if not SUPPORTS_GEMMA2:
raise ImportError(
f"Unsloth: Your transformers version of {transformers_version} does not support Gemma2.\n"
f"The minimum required version is 4.42.3.\n"
f'Try `pip install --upgrade "transformers>=4.42.3"`\n'
f"to obtain the latest transformers build, then restart this session."
)
# Also check for softcapping support in flash-attn which is faster!
if is_bfloat16_supported() and not HAS_FLASH_ATTENTION:
print(
"Unsloth: If you want to finetune Gemma 2, install flash-attn to make it faster!\n"
"To install flash-attn, do the below:\n"
'\npip install --no-deps --upgrade "flash-attn>=2.6.3"'
)
elif HAS_FLASH_ATTENTION and not HAS_FLASH_ATTENTION_SOFTCAPPING:
print(
"Unsloth: If you want to finetune Gemma 2, upgrade flash-attn to version 2.6.3 or higher!\n"
"Newer versions support faster and less memory usage kernels for Gemma 2's attention softcapping!\n"
"To update flash-attn, do the below:\n"
'\npip install --no-deps --upgrade "flash-attn>=2.6.3"'
)
dispatch_model = FastGemma2Model
elif model_type == "qwen2":
dispatch_model = FastQwen2Model
elif model_type == "qwen3": # or model_type == "qwen3_moe":
if not SUPPORTS_QWEN3 or not SUPPORTS_QWEN3_MOE:
raise ImportError(
f"Unsloth: Your transformers version of {transformers_version} does not support Qwen3.\n"
f"The minimum required version is 4.50.3.\n"
f'Try `pip install --upgrade "transformers>=4.50.3"`\n'
f"to obtain the latest transformers build, then restart this session."
)
dispatch_model = (
FastQwen3Model if model_type == "qwen3" else FastQwen3MoeModel
)
# elif model_type == "falcon_h1":
# dispatch_model = FastFalconH1Model
# if not SUPPORTS_FALCON_H1:
# raise ImportError(
# f"Unsloth: Your transformers version of {transformers_version} does not support FalconH1.\n"\
# f"The minimum required version is 4.50.3.\n"\
# f'Try `pip install --upgrade "transformers>=4.50.3"`\n'\
# f"to obtain the latest transformers build, then restart this session."\
# )
# Temporary disable optimized Cohere until errors match
# elif model_type == "cohere":
# dispatch_model = FastCohereModel
# Temporary disable optimized Granite until errors match
# elif model_type == "granite":
# dispatch_model = FastGraniteModel
else:
return FastModel.from_pretrained(
model_name = old_model_name,
max_seq_length = max_seq_length,
dtype = dtype,
load_in_4bit = load_in_4bit,
load_in_8bit = load_in_8bit,
load_in_16bit = load_in_16bit,
full_finetuning = full_finetuning,
token = token,
device_map = device_map,
rope_scaling = rope_scaling, # [TODO] No effect
fix_tokenizer = fix_tokenizer, # [TODO] No effect
trust_remote_code = trust_remote_code,
use_gradient_checkpointing = use_gradient_checkpointing,
resize_model_vocab = resize_model_vocab, # [TODO] No effect
revision = revision,
return_logits = False, # Return logits
fullgraph = True, # No graph breaks
use_exact_model_name = use_exact_model_name,
offload_embedding = offload_embedding,
float32_mixed_precision = float32_mixed_precision,
# Pass vLLM/inference parameters
fast_inference = fast_inference,
gpu_memory_utilization = gpu_memory_utilization,
float8_kv_cache = float8_kv_cache,
random_state = random_state,
max_lora_rank = max_lora_rank,
disable_log_stats = disable_log_stats,
qat_scheme = qat_scheme,
load_in_fp8 = load_in_fp8,
unsloth_tiled_mlp = unsloth_tiled_mlp,
*args,
**kwargs,
)
if use_gradient_checkpointing == "unsloth":
patch_unsloth_smart_gradient_checkpointing(dtype = dtype)
# Check if this is local model since the tokenizer gets overwritten
if (
os.path.exists(os.path.join(old_model_name, "tokenizer_config.json"))
and os.path.exists(os.path.join(old_model_name, "tokenizer.json"))
and os.path.exists(os.path.join(old_model_name, "special_tokens_map.json"))
):
tokenizer_name = old_model_name
else:
tokenizer_name = kwargs.pop("tokenizer_name", None)
if fast_inference:
fast_inference, model_name = fast_inference_setup(model_name, model_config)
load_in_4bit_kwargs = load_in_4bit
load_in_8bit_kwargs = load_in_8bit
if quantization_config is not None and not fast_inference:
load_in_4bit_kwargs = False
load_in_8bit_kwargs = False
model, tokenizer = dispatch_model.from_pretrained(
model_name = model_name,
max_seq_length = max_seq_length,
dtype = _get_dtype(dtype),
load_in_4bit = load_in_4bit_kwargs,
token = token,
device_map = device_map,
rope_scaling = rope_scaling,
fix_tokenizer = fix_tokenizer,
model_patcher = dispatch_model,
tokenizer_name = tokenizer_name,
trust_remote_code = trust_remote_code,
revision = revision if not is_peft else None,
fast_inference = fast_inference,
gpu_memory_utilization = gpu_memory_utilization,
float8_kv_cache = float8_kv_cache,
random_state = random_state,
max_lora_rank = max_lora_rank,
disable_log_stats = disable_log_stats,
*args,
**kwargs,
)
if resize_model_vocab is not None:
model.resize_token_embeddings(resize_model_vocab)
# In case the model supports tagging, add the unsloth tag.
if hasattr(model, "add_model_tags"):
model.add_model_tags(
[
"unsloth",
]
)
if hasattr(tokenizer, "add_model_tags"):
tokenizer.add_model_tags(
[
"unsloth",
]
)
if load_in_4bit:
# Fix up bitsandbytes config, but respect user-provided quantization_config
if quantization_config is None:
compute_dtype = dtype_from_config(model.config)
quantization_config = {
# Sometimes compute_dtype is not a string!!
"bnb_4bit_compute_dtype": compute_dtype,
"bnb_4bit_quant_type": "nf4",
"bnb_4bit_use_double_quant": True,
"llm_int8_enable_fp32_cpu_offload": False,
"llm_int8_has_fp16_weight": False,
"llm_int8_skip_modules": None,
"llm_int8_threshold": 6.0,
"load_in_4bit": True,
"load_in_8bit": False,
"quant_method": "bitsandbytes",
}
model.config.update({"quantization_config": quantization_config})
else:
if hasattr(quantization_config, "to_dict"):
model.config.update(
{"quantization_config": quantization_config.to_dict()}
)
elif isinstance(quantization_config, dict):
model.config.update({"quantization_config": quantization_config})
if load_in_fp8 != False:
_tag_model_with_fp8_torchao_config(model, fp8_mode)
if is_peft:
# From https://github.com/huggingface/peft/issues/184
# Now add PEFT adapters
model = PeftModel.from_pretrained(
model,
old_model_name,
token = token,
revision = revision,
is_trainable = True,
trust_remote_code = trust_remote_code,
)
# Patch it as well!
model = dispatch_model.patch_peft_model(model, use_gradient_checkpointing)
# Patch Tiled MLP
# to turn on set UNSLOTH_TILED_MLP to "arctic", "target", or "target:{GB}""
patch_tiled_mlp_choice = os.environ.get(
"UNSLOTH_TILED_MLP", "arctic" if unsloth_tiled_mlp else "0"
)
if patch_tiled_mlp_choice != "0" or unsloth_tiled_mlp:
patch_tiled_mlp(model, patch_options_str = patch_tiled_mlp_choice)
return model, tokenizer
from ..kernels import (
patch_loss_functions,
post_patch_loss_function,
)
from .vision import FastBaseModel
from transformers import (
AutoModelForCausalLM,
)
try:
from transformers import AutoModelForImageTextToText
AutoModelForVision2Seq = AutoModelForImageTextToText
except:
from transformers import AutoModelForVision2Seq
class FastModel(FastBaseModel):
@staticmethod
def _prepare_for_qat(model, qat_scheme):
model = _prepare_model_for_qat(model, qat_scheme)
return model
@staticmethod
def from_pretrained(
model_name = "unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit",
max_seq_length = 2048,
dtype = None,
load_in_4bit = True, # 4bit QLoRA
load_in_8bit = False, # 8bit LoRA
load_in_16bit = False, # 16bit LoRA
full_finetuning = False,
token = None,
device_map = "sequential",
rope_scaling = None, # [TODO] No effect
fix_tokenizer = True, # [TODO] No effect
trust_remote_code = False,
use_gradient_checkpointing = "unsloth",
resize_model_vocab = None, # [TODO] No effect
revision = None,
return_logits = False, # Return logits
fullgraph = True, # No graph breaks
use_exact_model_name = False,
auto_model = None,
whisper_language = None,
whisper_task = None,
unsloth_force_compile = False,
offload_embedding = False,
float32_mixed_precision = None, # Forces float32 mixed precision
# Add the missing vLLM/inference parameters
fast_inference = False, # uses vLLM
gpu_memory_utilization = 0.5,
float8_kv_cache = False,
random_state = 3407,
max_lora_rank = 64,
disable_log_stats = True,
qat_scheme = None,
load_in_fp8 = False, # fp8 LoRA (True, False, 'block')
unsloth_tiled_mlp = False,
*args,
**kwargs,
):
# Respect user-provided quantization_config (e.g. BitsAndBytesConfig)
quantization_config = kwargs.get("quantization_config", None)
if quantization_config is not None:
if isinstance(quantization_config, dict):
q_load_in_4bit = quantization_config.get("load_in_4bit", False)
q_load_in_8bit = quantization_config.get("load_in_8bit", False)
else:
q_load_in_4bit = getattr(quantization_config, "load_in_4bit", False)
q_load_in_8bit = getattr(quantization_config, "load_in_8bit", False)
if q_load_in_4bit:
load_in_4bit = True
load_in_8bit = False
if q_load_in_8bit:
load_in_8bit = True
load_in_4bit = False
# Login to allow private models
token = hf_login(token)
if whisper_language is not None:
assert type(whisper_language) is str
if whisper_task is not None:
assert type(whisper_task) is str
# Align dtype with bnb_4bit_compute_dtype if provided and dtype is unset.
if dtype is None and quantization_config is not None:
bnb_compute_dtype = None
if isinstance(quantization_config, dict):
if quantization_config.get("load_in_4bit", False):
bnb_compute_dtype = quantization_config.get(
"bnb_4bit_compute_dtype", None
)
else:
if getattr(quantization_config, "load_in_4bit", False):
bnb_compute_dtype = getattr(
quantization_config, "bnb_4bit_compute_dtype", None
)
if isinstance(bnb_compute_dtype, str):
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | true |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/llama.py | unsloth/models/llama.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import gc
import math
import functools
from typing import Optional, Tuple, List, Union
from ._utils import *
from ._utils import patch_unsloth_smart_gradient_checkpointing
from ._utils import __version__, importlib_version
from ._utils import move_to_device
from ._utils import (
_get_inference_mode_context_manager,
_prepare_model_for_qat,
)
from ..utils.packing import (
get_packed_info_from_kwargs,
mask_packed_sequence_boundaries,
)
from ..utils.attention_dispatch import (
AttentionConfig,
AttentionContext,
run_attention,
select_attention_backend,
)
from torch.nn.functional import scaled_dot_product_attention
from transformers import __version__ as transformers_version
from unsloth_zoo.utils import Version, _get_dtype
from unsloth_zoo.hf_utils import (
dtype_from_config,
add_dtype_kwargs,
fix_lora_auto_mapping,
)
from unsloth_zoo.peft_utils import SKIP_QUANTIZATION_MODULES
from ..device_type import (
is_hip,
get_device_type,
DEVICE_TYPE,
DEVICE_TYPE_TORCH,
DEVICE_COUNT,
ALLOW_PREQUANTIZED_MODELS,
)
transformers_version = Version(transformers_version)
# Transformers moved rotary embeddings out of all attention layers
IS_ATTENTION_REFACTOR = transformers_version > Version("4.47.1")
try:
from transformers.modeling_layers import GradientCheckpointingLayer
except:
GradientCheckpointingLayer = type(None)
from transformers.models.llama.modeling_llama import (
logger,
BaseModelOutputWithPast,
CausalLMOutputWithPast,
)
from transformers.modeling_attn_mask_utils import (
_prepare_4d_causal_attention_mask_for_sdpa,
)
from ..kernels import *
from ..tokenizer_utils import *
from .vision import FastBaseModel
# Final patching code
from transformers.models.llama.modeling_llama import (
LlamaAttention,
LlamaDecoderLayer,
LlamaModel,
LlamaForCausalLM,
)
# For Pytorch 2.1.1
try:
from transformers.models.llama.modeling_llama import (
LlamaSdpaAttention,
LlamaFlashAttention2,
)
except:
LlamaSdpaAttention = LlamaAttention
LlamaFlashAttention2 = LlamaAttention
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
AutoModelForSequenceClassification,
BitsAndBytesConfig,
AutoConfig,
)
from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING
from transformers import set_seed as transformers_set_seed
from peft import LoraConfig, TaskType, get_peft_model as _get_peft_model
from peft import PeftModelForCausalLM, PeftModelForSequenceClassification
from ..save import patch_saving_functions
import re, os, inspect, math, sys
import types
try:
from huggingface_hub.utils import get_token
except:
# Old HF Hub versions <= 0.0.25
from huggingface_hub.utils._token import get_token
from triton import __version__ as triton_version
HAS_XFORMERS = xformers is not None
BlockDiagonalCausalMask = (
xformers.attn_bias.BlockDiagonalCausalMask if HAS_XFORMERS else None
)
if DEVICE_TYPE == "xpu":
clean_gpu_cache = torch.xpu.empty_cache
get_current_device = torch.xpu.current_device
else:
clean_gpu_cache = torch.cuda.empty_cache
get_current_device = torch.cuda.current_device
def original_apply_qkv(self, X):
Q = self.q_proj(X)
K = self.k_proj(X)
V = self.v_proj(X)
return Q, K, V
def original_apply_o(self, X):
O = self.o_proj(X)
return O
from math import sqrt as math_sqrt
KV_CACHE_INCREMENT = 512 # KV Cache update size
torch_nn_functional_softmax = torch.nn.functional.softmax
# SDPA has GQA internally
SDPA_HAS_GQA = "enable_gqa" in scaled_dot_product_attention.__doc__
# Fix new HF's inference code
def _fast_prepare_inputs_for_generation(
self,
input_ids,
attention_mask = None,
**kwargs,
):
past_key_values = kwargs.get("past_key_values", None)
if past_key_values is not None:
# Check for uninitialized DynamicCache
if len(past_key_values) == 0:
past_key_values = None
kwargs["past_key_values"] = None
# New since 4.56
elif (
hasattr(past_key_values, "get_seq_length")
and past_key_values.get_seq_length() == 0
):
past_key_values = None
kwargs["past_key_values"] = None
else:
bs, cache_length = input_ids.shape
input_ids = input_ids[:, [-1]]
# Get to the base model
base_model = self
if hasattr(base_model, "base_model_prefix"):
base_model = getattr(base_model, base_model.base_model_prefix)
if hasattr(
base_model, "_prepare_4d_causal_attention_mask_with_cache_position"
):
def needs_device_kw(fn) -> bool:
try:
sig = inspect.signature(inspect.unwrap(fn))
return "device" in sig.parameters
except:
# transformers <= 4.51.3 includes device arg but > 4.51.3 does not
return transformers_version < Version("4.52.0")
kwargs = {
"sequence_length": 1,
"target_length": cache_length,
"dtype": self.dtype,
"cache_position": torch.arange(
cache_length, cache_length + 1, device = input_ids.device
),
"batch_size": bs,
"config": self.config,
"past_key_values": past_key_values,
}
try:
if needs_device_kw(
base_model._prepare_4d_causal_attention_mask_with_cache_position
):
kwargs["device"] = input_ids.device
except:
print(
f"Unsloth: Could not inspect signature of {base_model._prepare_4d_causal_attention_mask_with_cache_position}"
)
attention_mask = (
base_model._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
**kwargs,
)
)
else:
attention_mask = attention_mask[:, [-1]]
if transformers_version <= Version("4.52.4"):
logger.warning_once(
f"{self.__class__.__name__} has no `_prepare_4d_causal_attention_mask_with_cache_position` method "
"defined in its base modeling class. Compiled forward passes will be sub-optimal. If you're "
"writing code, see Llama for an example implementation. If you're a user, please report this "
"issue on GitHub."
)
if "cache_position" in kwargs:
kwargs["position_ids"] = kwargs["cache_position"]
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
**kwargs,
}
def fix_prepare_inputs_for_generation(module):
# Fix prepare_inputs_for_generation
if hasattr(module, "prepare_inputs_for_generation"):
module.prepare_inputs_for_generation = _fast_prepare_inputs_for_generation
torch_matmul = torch.matmul
def LlamaAttention_fast_forward_inference(
self,
hidden_states: torch.Tensor,
past_key_value: Optional[Tuple[torch.Tensor]],
position_ids,
do_prefill = False,
attention_mask = None,
):
"""
https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L406
Fast inference using KV cache.
QK^T can be computed in 4 chunks
[Q, q] @ [K, k].T where q, k are the new tokens.
[QK^T, Qk^T]
[qK^T, qk^T]
Since the attention mask wipes Qk^T, we just get
[QK^T, 0]
[qK^T, qk^T]
Since softmax is row-wise, we get
softmax([QK^T, 0])
softmax([qK^T, qk^T])
We then multiply by [V]
[v]
softmax([QK^T, 0]) [softmax(QK^T)V] *
softmax([qK^T, qk^T]) [softmax([qK^T, qk^T]) @ [V, v]]
But notice * [softmax(QK^T)V] is just the last attention.
We just need to compute the last final row.
This means we can pass in a row of Q, but we need to
remember K and V, which are called the KV cache.
"""
Xn = hidden_states
bsz, _, hd = hidden_states.size()
K1, V1 = past_key_value
dtype = Xn.dtype
n_heads = self.config.num_attention_heads
n_groups = self.num_key_value_groups
n_kv_heads = self.config.num_key_value_heads
head_dim = self.head_dim
# assert(n_kv_heads * n_groups == n_heads)
hidden_size = self.config.hidden_size
attention_size = n_heads * head_dim
seq_len = K1.shape[-2]
kv_seq_len = seq_len + 1
# Prefill phase
# if not hasattr(self, "paged_attention"):
device = hidden_states.device
if do_prefill:
self.paged_attention = torch.empty(
(KV_CACHE_INCREMENT + seq_len + 1, 2, bsz, n_kv_heads, head_dim),
dtype = dtype,
device = device,
)
self.paged_attention_K = self.paged_attention[:, 0]
self.paged_attention_V = self.paged_attention[:, 1]
self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3)
self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3)
self.temp_QA = torch.empty(
(2, bsz, 1, attention_size), dtype = dtype, device = device
)
self.temp_KV = torch.empty(
(2, bsz, 1, n_kv_heads * head_dim), dtype = dtype, device = device
)
self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = device)
# Mistral Nemo 12b has weird dimensions
if attention_size != hidden_size:
self.temp_O = torch.empty((1, bsz, hidden_size), dtype = dtype, device = device)
else:
self.temp_O = self.temp_QA[1][:, :, :hidden_size]
self.attention = torch.empty(
(bsz, n_heads, 1, KV_CACHE_INCREMENT + seq_len), dtype = dtype, device = device
)
self.scalar = 1.0 / math_sqrt(self.head_dim)
self.half_head_dim = head_dim // 2
elif kv_seq_len >= self.paged_attention.shape[0]:
self.paged_attention.resize_(
(
self.paged_attention.shape[0] + KV_CACHE_INCREMENT,
2,
bsz,
n_kv_heads,
head_dim,
)
)
self.paged_attention_K = self.paged_attention[:, 0]
self.paged_attention_V = self.paged_attention[:, 1]
self.attention.resize_(
(bsz, n_heads, 1, self.attention.shape[-1] + KV_CACHE_INCREMENT)
)
Qn = fast_linear_forward(self.q_proj, Xn, out = self.temp_QA[0])
Kn = fast_linear_forward(self.k_proj, Xn, out = self.temp_KV[0])
Vn = fast_linear_forward(self.v_proj, Xn, out = self.temp_KV[1])
Qn = Qn.view(bsz, 1, n_heads, head_dim).transpose(1, 2)
Kn = Kn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2)
Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2)
# cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len)
# Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids)
# Need to do it prior 2 steps before hitting full on short KV cache
# or else error
self.rotary_emb.extend_rope_embedding(Vn, seq_len + 2)
cos, sin = self.rotary_emb.get_cached(kv_seq_len, Qn.device.index)
cos = cos[position_ids].unsqueeze(1)
sin = sin[position_ids].unsqueeze(1)
h = self.half_head_dim
RH_Q = self.RH_Q
RH_Q[:, :, :, :h] = Qn[:, :, :, h:]
RH_Q[:, :, :, h:] = Qn[:, :, :, :h]
RH_Q[:, :, :, :h].neg_() # torch.neg(RH_Q[:,:,:,:h], out = RH_Q[:,:,:,:h])
Qn *= cos
Qn.addcmul_(RH_Q, sin)
RH_K = RH_Q[
:, :n_kv_heads, :, :
] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda:0")
RH_K[:, :, :, :h] = Kn[:, :, :, h:]
RH_K[:, :, :, h:] = Kn[:, :, :, :h]
RH_K[:, :, :, :h].neg_() # torch.neg(RH_K[:,:,:,:h], out = RH_K[:,:,:,:h])
Kn *= cos
Kn.addcmul_(RH_K, sin)
# New KV cache
# Kn = torch.cat([K1, Kn], dim = 2)
# Vn = torch.cat([V1, Vn], dim = 2)
self.paged_attention_K[seq_len] = Kn.permute(2, 0, 1, 3)
self.paged_attention_V[seq_len] = Vn.permute(2, 0, 1, 3)
Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3)
Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3)
# Handle sliding windows
sliding_window = getattr(self.config, "sliding_window", None)
if sliding_window is not None and kv_seq_len > sliding_window:
# From https://github.com/huggingface/transformers/blob/main/src/transformers/models/mistral/modeling_mistral.py#L193
slicing_tokens = 1 - sliding_window
Knn = Kn[:, :, slicing_tokens:, :] # .contiguous()
Vnn = Vn[:, :, slicing_tokens:, :] # .contiguous()
else:
Knn, Vnn = Kn, Vn
# Grouped query attention
_, _, cached_len, _ = Knn.shape
if bsz == 1 or not SDPA_HAS_GQA and n_groups != 1:
Knn = Knn[:, :, None, :, :].expand(
bsz, n_kv_heads, n_groups, cached_len, head_dim
)
Vnn = Vnn[:, :, None, :, :].expand(
bsz, n_kv_heads, n_groups, cached_len, head_dim
)
Knn = Knn.reshape(bsz, n_heads, cached_len, head_dim)
Vnn = Vnn.reshape(bsz, n_heads, cached_len, head_dim)
# else:
# Knn, Vnn = Knn, Vnn
# pass
# when qlen==vlen and attn_mask is None, we should use causal attention
Q_len = Qn.shape[-2]
K_len = Knn.shape[-2]
if attention_mask is None and Q_len == K_len:
is_causal = True
else:
is_causal = False
# Attention
if bsz == 1:
Qn *= self.scalar # See https://github.com/ggerganov/llama.cpp/issues/7805#issuecomment-2153349963
# It seems like doing (Q * scalar) @ K is better than (Q @ K) * scalar to stop overflows
A = torch_matmul(
Qn, Knn.transpose(2, 3), out = self.attention[:, :, :, :cached_len]
)
# if attention_mask is not None: A += attention_mask # Must add attention_mask for batched
A[:] = torch_nn_functional_softmax(
A, dim = -1, dtype = torch.float32
) # .to(A.dtype)
A = torch_matmul(A, Vnn, out = Qn)
else:
if SDPA_HAS_GQA:
A = scaled_dot_product_attention(
Qn,
Knn,
Vnn,
attn_mask = attention_mask,
is_causal = is_causal,
enable_gqa = True,
)
else:
A = scaled_dot_product_attention(
Qn, Knn, Vnn, attn_mask = attention_mask, is_causal = is_causal
)
A = A.transpose(1, 2)
A = A.reshape(bsz, 1, attention_size)
A = fast_linear_forward(self.o_proj, A, out = self.temp_O)
return A, (Kn, Vn)
torch_nn_functional_silu = torch.nn.functional.silu
def fast_swiglu_inference(
self, X, temp_gate = None, temp_up = None, gate_multiplier = None, down_multiplier = None
):
# gate = self.gate_proj(X)
# up = self.up_proj(X)
bsz, _, hd = X.shape
# mlp_size = self.config.intermediate_size
# temp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda:0")
gate = fast_linear_forward(self.gate_proj, X, out = temp_gate)
if gate_multiplier is not None:
gate *= gate_multiplier
up = fast_linear_forward(self.up_proj, X, out = temp_up)
gate = torch_nn_functional_silu(gate, inplace = True)
gate *= up
# X = self.down_proj(gate)
down = fast_linear_forward(self.down_proj, gate, out = up[:, :, :hd])
if down_multiplier is not None:
down *= down_multiplier
return down
torch_square = torch.square
torch_mean = torch.mean
def fast_rms_layernorm_inference(self, X, XX = None, XX2 = None, variance = None):
old_dtype = X.dtype
if XX is None:
XX = X.to(torch.float32)
variance = XX.square().mean(-1, keepdim = True)
else:
XX.copy_(X)
torch_mean(torch_square(XX, out = XX2), -1, keepdim = True, out = variance)
variance += self.variance_epsilon
XX *= variance.rsqrt_()
if XX is None:
X = XX.to(old_dtype)
else:
X.copy_(XX)
X *= self.weight
return X
def fast_rms_layernorm_inference_gemma(self, X, out_weight = None):
XX = X.to(torch.float32)
variance = XX.square().mean(-1, keepdim = True)
variance += self.variance_epsilon
XX *= variance.rsqrt_()
if out_weight is None:
out_weight = self.weight + 1.0
else:
out_weight[:] = self.weight
out_weight += 1.0
XX *= out_weight
return XX.to(X.dtype)
# Normal layernorm with mean removal
@torch.compile(fullgraph = False, dynamic = True, options = torch_compile_options)
def fast_layernorm_compiled(layernorm, X):
old_dtype = X.dtype
X = X.float()
mean = X.mean(-1, keepdim = True)
Xbar = X - mean
X = (
Xbar
* torch.rsqrt(Xbar.square().mean(-1, keepdim = True) + layernorm.variance_epsilon)
* layernorm.weight.float()
)
return X.to(old_dtype)
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L320
def LlamaAttention_fast_forward(
self,
hidden_states: torch.Tensor,
causal_mask: Optional[BlockDiagonalCausalMask] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
padding_mask: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
*args,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
# Clear inference
if hasattr(self, "paged_attention"):
del self.paged_attention_K
del self.paged_attention_V
del self.paged_attention
del self.temp_QA
del self.temp_KV
del self.RH_Q
del self.attention
bsz, q_len, _ = hidden_states.size()
n_heads = self.config.num_attention_heads
n_groups = self.num_key_value_groups
n_kv_heads = self.config.num_key_value_heads
head_dim = self.head_dim
assert n_kv_heads * n_groups == n_heads
Q, K, V = self.apply_qkv(self, hidden_states)
Q = Q.view(bsz, q_len, n_heads, head_dim).transpose(1, 2)
K = K.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2)
V = V.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2)
seq_info = get_packed_info_from_kwargs(kwargs, Q.device)
kv_seq_len = K.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
if position_embeddings and kv_seq_len <= position_embeddings[0].shape[0]:
cos, sin = position_embeddings
else:
rotary_emb = self.rotary_emb
rotary_emb.extend_rope_embedding(V, seq_len = kv_seq_len)
cos, sin = rotary_emb.get_cached(kv_seq_len, Q.device.index)
rope_position_ids = position_ids
if rope_position_ids is None and seq_info is not None:
rope_position_ids = kwargs.get("position_ids")
# Q, K = (
# fast_rope_embedding(Q, K, cos, sin)
# if rope_position_ids is None
# else inplace_rope_embedding(Q, K, cos, sin, rope_position_ids)
# )
Q, K = fast_rope_embedding(Q, K, cos, sin, rope_position_ids)
if past_key_value is not None:
K = torch.cat([past_key_value[0], K], dim = 2)
V = torch.cat([past_key_value[1], V], dim = 2)
past_key_value = (K, V) if use_cache else None
# Attention module
use_varlen = seq_info is not None and past_key_value is None
backend = select_attention_backend(use_varlen)
config = AttentionConfig(
backend = backend,
n_kv_heads = n_kv_heads,
n_groups = n_groups,
flash_dense_kwargs = {"causal": True},
flash_varlen_kwargs = {"dropout_p": 0.0, "causal": True},
)
context = AttentionContext(
bsz = bsz,
q_len = q_len,
kv_seq_len = kv_seq_len,
n_heads = n_heads,
head_dim = head_dim,
requires_grad = hidden_states.requires_grad,
seq_info = seq_info,
attention_mask = attention_mask,
causal_mask = causal_mask,
)
A = run_attention(config = config, context = context, Q = Q, K = K, V = V)
attn_output = A.reshape(bsz, q_len, n_heads * head_dim)
attn_output = self.apply_o(self, attn_output)
attn_weights = None
return attn_output, attn_weights, past_key_value
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L590
def LlamaDecoderLayer_fast_forward(
self,
hidden_states: torch.Tensor,
causal_mask = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
padding_mask: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
*args,
**kwargs,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
"""
if use_cache and hasattr(self, "_flag_for_generation"):
residual = hidden_states
hidden_states = fast_rms_layernorm_inference(
self.input_layernorm, hidden_states
)
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states = hidden_states,
causal_mask = causal_mask,
attention_mask = attention_mask,
position_ids = position_ids,
past_key_value = past_key_value,
output_attentions = output_attentions,
use_cache = use_cache,
padding_mask = padding_mask,
position_embeddings = position_embeddings,
**kwargs,
)
hidden_states += residual
# Fully Connected
residual = hidden_states
hidden_states = fast_rms_layernorm_inference(
self.post_attention_layernorm, hidden_states
)
hidden_states = fast_swiglu_inference(self.mlp, hidden_states)
hidden_states += residual
else:
residual = hidden_states
hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states)
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states = hidden_states,
causal_mask = causal_mask,
attention_mask = attention_mask,
position_ids = position_ids,
past_key_value = past_key_value,
output_attentions = output_attentions,
use_cache = use_cache,
padding_mask = padding_mask,
position_embeddings = position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
# https://github.com/unslothai/unsloth/issues/404#issuecomment-2323473452
__DTYPE_MAP = {
"float32": torch.float32,
torch.float32: torch.float32,
"float16": torch.float16,
torch.float16: torch.float16,
"bfloat16": torch.bfloat16,
torch.bfloat16: torch.bfloat16,
}
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825
def LlamaModel_fast_forward(
self,
input_ids: Optional[torch.LongTensor] = None,
causal_mask: Optional[BlockDiagonalCausalMask] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
*args,
**kwargs,
) -> Union[Tuple, BaseModelOutputWithPast]:
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
assert output_attentions is False
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"Unsloth: You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time"
)
elif input_ids is not None:
batch_size, seq_length = input_ids.shape
elif inputs_embeds is not None:
batch_size, seq_length, _ = inputs_embeds.shape
else:
raise ValueError(
"Unsloth: You have to specify either decoder_input_ids or decoder_inputs_embeds"
)
seq_length_with_past = seq_length
# Fix out of bounds tokenization unless we were given packed metadata
allow_overlength = getattr(self, "_unsloth_allow_packed_overlength", False) or (
"packed_seq_lengths" in kwargs
)
if hasattr(self, "max_seq_length") and not allow_overlength:
if seq_length > self.max_seq_length:
shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
logger.warning_once(
f"Unsloth: Input IDs of shape {shape} with length {seq_length} > the model's max sequence length of {self.max_seq_length}.\n"
"We shall truncate it ourselves. It's imperative if you correct this issue first."
)
if input_ids is not None:
input_ids = input_ids[:, : self.max_seq_length]
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds[:, : self.max_seq_length, :]
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = past_key_values[0][0].shape[2]
seq_length_with_past = seq_length_with_past + past_key_values_length
# We already handle KV cache position_ids ourselves.
if False: # (past_key_values_length != 0):
position_ids = torch.arange(
past_key_values_length,
seq_length + past_key_values_length,
dtype = torch.int32,
device = f"{DEVICE_TYPE_TORCH}:0",
)
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
elif position_ids is not None:
position_ids = position_ids.view(-1, seq_length).to(torch.int32) # .long()
else:
position_ids = None
if position_ids is not None:
if position_ids.shape[0] != batch_size:
position_ids = position_ids.repeat((batch_size, 1))
# Embed positions
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
inputs_embeds = inputs_embeds.to(_get_dtype(dtype_from_config(self.config)))
# Normalized from Gemma
IS_GEMMA = self.config.model_type.startswith("gemma")
IS_GEMMA2 = self.config.model_type.startswith("gemma2")
IS_COHERE = self.config.model_type.startswith("cohere")
IS_GRANITE = self.config.model_type.startswith("granite")
IS_FALCON_H1 = self.config.model_type.startswith("falcon_h1")
train_embed_tokens = self.embed_tokens.weight.requires_grad
if IS_GEMMA:
# Match Gemma exactly by casting to bfloat16 / float16
# inputs_embeds *= math_sqrt(self.config.hidden_size)
# Ie 3072**0.5 = 55.5000 in bfloat16, whilst 55.4256 in float32
# & 2048**0.5 = 45.2500 in bfloat16, whilst 45.2548 in float32
normalizer = torch.tensor(
math_sqrt(self.config.hidden_size), dtype = inputs_embeds.dtype
)
if train_embed_tokens:
# Careful we must not do an inplace op!
inputs_embeds = inputs_embeds * normalizer
else:
inputs_requires_grad = inputs_embeds.requires_grad
if not inputs_embeds.is_leaf:
inputs_embeds = inputs_embeds.detach()
inputs_requires_grad = True
elif inputs_requires_grad:
inputs_embeds.requires_grad_(False)
inputs_embeds *= normalizer
# inputs_embeds *= math_sqrt(self.config.hidden_size)
if inputs_requires_grad:
inputs_embeds.requires_grad_(True)
# Fix up attention mask by setting elements to 0
# Specifically for DPO
if (
getattr(self, "_has_no_labels", False) is True
and (attention_mask is not None)
and (past_key_values is None)
and (not train_embed_tokens)
and self.training
):
# Careful for inference the attention_mask is size (1, kv_seq_len)
# Whilst the input_embeds is size (1, 1, 4096)
inputs_requires_grad = inputs_embeds.requires_grad
if not inputs_embeds.is_leaf:
inputs_embeds = inputs_embeds.detach()
inputs_requires_grad = True
elif inputs_requires_grad:
inputs_embeds.requires_grad_(False)
attention_mask = attention_mask[:, : self.max_seq_length] # Must resize!
inputs_embeds *= attention_mask.unsqueeze(0).transpose(0, 1).transpose(1, 2)
if inputs_requires_grad:
inputs_embeds.requires_grad_(True)
# Ignore attention_mask
if attention_mask is None:
padding_mask = None
elif self.training:
attention_mask = None
padding_mask = None
else:
# if 0 in attention_mask:
# padding_mask = attention_mask
# else:
padding_mask = None
attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
attention_mask,
(batch_size, seq_length),
inputs_embeds,
past_key_values_length,
sliding_window = getattr(self.config, "sliding_window", None),
)
# Must NOT convert to bool - weirdly this causes stuff to error out!
# if attention_mask is not None:
# attention_mask = attention_mask.to(torch.bool)
hidden_states = inputs_embeds
if IS_GRANITE or IS_FALCON_H1: # granite has embedding multiplier
hidden_states = self.config.embedding_multiplier * hidden_states
if past_key_values is None and self.training:
use_cache = False
# if use_cache:
# logger.warning_once(
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | true |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/qwen3_moe.py | unsloth/models/qwen3_moe.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .llama import *
import os
from ._utils import __version__
from .llama import (
LlamaRotaryEmbedding,
LlamaLinearScalingRotaryEmbedding,
)
from .qwen3 import (
Qwen3Attention_fast_forward,
FastQwen3Model,
)
from transformers.models.qwen3_moe.modeling_qwen3_moe import (
Qwen3MoeAttention,
Qwen3MoeSparseMoeBlock,
Qwen3MoeMLP,
Qwen3MoeDecoderLayer,
Qwen3MoeModel,
Qwen3MoeForCausalLM,
)
# For Pytorch 2.1.1
# TODO: Transformers moved to `attention_interface`. So we might not need these anymore
# try:
# from transformers.models.qwen3_moe.modeling_qwen3_moe import (
# Qwen3SdpaAttention,
# Qwen3FlashAttention2,
# )
# except:
# Qwen3SdpaAttention = Qwen3Attention
# Qwen3FlashAttention2 = Qwen3Attention
# pass
from unsloth_zoo.utils import Version, _get_dtype
torch_nn_functional_softmax = torch.nn.functional.softmax
def Qwen3MoeSparseMoeBlock_fast_forward(self, X, temp_gate = None, temp_up = None):
# adapted from https://github.com/huggingface/transformers/pull/36878/files#diff-0855b77fc27ad9449158a1c74953f909b011c00de7125f7c8e68d0ff209c092aR356-R370
bsz, seq_len, hd = X.shape
X = X.view(-1, hd)
router_logits = fast_linear_forward(
self.gate_proj, X, out = temp_gate
) # pretty much the only change from transformers implementation.
routing_weights = torch_nn_functional_softmax(
router_logits, dim = -1, dtype = torch.float32
)
routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim = -1)
routing_weights /= routing_weights.sum(dim = -1, keepdim = True)
# we cast back to the input dtype
routing_weights = routing_weights.to(X.dtype)
final_X = torch.zeros((bsz * seq_len, hd), dtype = torch.float32, device = X.device)
# One hot encode the selected experts to create an expert mask
# this will be used to easily index which expert is going to be sollicitated
expert_mask = torch.nn.functional.one_hot(
selected_experts, num_classes = self.num_experts
).permute(2, 1, 0)
# Loop over all available experts in the model and perform the computation on each expert
for expert_idx in range(self.num_experts):
expert_layer = self.experts[expert_idx]
idx, top_x = torch.where(expert_mask[expert_idx])
# Index the correct hidden states and compute the expert hidden state for
# the current expert. We need to make sure to multiply the output hidden
# states by `routing_weights` on the corresponding tokens (top-1 and top-2)
current_state = X[None, top_x].reshape(-1, hd)
current_X = (
expert_layer(current_state) * routing_weights[top_x, idx, None]
) # Qwen3MoeMLP.forward = fast_swiglu_inference takes care of making this faster. Analogous to Dense models' MLP
# However `index_add_` only support torch tensors for indexing so we'll use
# the `top_x` tensor here.
final_X.index_add_(0, top_x, current_X.to(X.dtype))
final_X = final_X.reshape(bsz, seq_len, hd)
return final_X, router_logits
def Qwen3MoeDecoderLayer_fast_forward(
self,
hidden_states: torch.Tensor,
causal_mask: Optional[BlockDiagonalCausalMask] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
output_router_logits: Optional[bool] = False,
use_cache: Optional[bool] = False,
padding_mask: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
*args,
**kwargs,
):
residual = hidden_states
if use_cache and hasattr(
self, "_flag_for_generation"
): # past_key_value is not None:
residual = hidden_states
hidden_states = fast_rms_layernorm_inference(
self.input_layernorm, hidden_states
)
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states = hidden_states,
causal_mask = causal_mask,
attention_mask = attention_mask,
position_ids = position_ids,
past_key_value = past_key_value,
output_attentions = output_attentions,
use_cache = use_cache,
padding_mask = padding_mask,
position_embeddings = position_embeddings,
_flag_for_generation = self._flag_for_generation,
)
hidden_states += residual
# MoE Router MLP
residual = hidden_states
hidden_states = fast_rms_layernorm_inference(
self.post_attention_layernorm, hidden_states
)
hidden_states, router_logits = Qwen3MoeSparseMoeBlock_fast_forward(
self.mlp, hidden_states
)
hidden_states += residual
else:
residual = hidden_states
hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states)
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states = hidden_states,
causal_mask = causal_mask,
attention_mask = attention_mask,
position_ids = position_ids,
past_key_value = past_key_value,
output_attentions = output_attentions,
use_cache = use_cache,
padding_mask = padding_mask,
position_embeddings = position_embeddings,
)
hidden_states = residual + hidden_states
# MoE Router MLP
residual = hidden_states
hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states)
hidden_states, router_logits = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if output_router_logits:
outputs += (router_logits,)
if use_cache:
outputs += (present_key_value,)
return outputs
class FastQwen3MoeModel(FastQwen3Model):
@staticmethod
def pre_patch():
init_name, function = patch_linear_scaling(
model_name = "Qwen3Moe",
rope_module = LlamaRotaryEmbedding,
scaled_rope_module = LlamaLinearScalingRotaryEmbedding,
attention_module = Qwen3MoeAttention,
)
if init_name is not None:
exec(function, globals())
Qwen3MoeAttention.__init__ = eval(init_name)
Qwen3MoeAttention.forward = Qwen3Attention_fast_forward
# Qwen3SdpaAttention .forward = Qwen3Attention_fast_forward
# Qwen3FlashAttention2 .forward = Qwen3Attention_fast_forward
Qwen3MoeSparseMoeBlock.forward = Qwen3MoeSparseMoeBlock_fast_forward
Qwen3MoeMLP.forward = (
fast_swiglu_inference # This is analogous to Dense models' MLP
)
Qwen3MoeDecoderLayer.forward = Qwen3MoeDecoderLayer_fast_forward
Qwen3MoeModel.forward = LlamaModel_fast_forward
Qwen3MoeForCausalLM.forward = CausalLM_fast_forward(
LlamaModel_fast_forward_inference
)
PeftModelForCausalLM.forward = PeftModel_fast_forward
fix_prepare_inputs_for_generation(Qwen3MoeForCausalLM)
# Solves https://github.com/unslothai/unsloth/issues/168
# Static KV Cache was introduced in 4.38.0, causing training to be much slower.
# Inference can now be CUDAGraphed, but we shall retain the old rotary embeddings.
# https://github.com/huggingface/transformers/pull/27931
# https://github.com/huggingface/transformers/blob/v4.37.2/src/transformers/models/llama/modeling_llama.py\
import transformers.models.qwen3_moe.modeling_qwen3_moe
transformers.models.qwen3_moe.modeling_qwen3_moe.Qwen3MoeRotaryEmbedding = (
LlamaRotaryEmbedding
)
return
@staticmethod
def from_pretrained( # TODO: Change after release
model_name = "Qwen/Qwen3-7B",
max_seq_length = 4096,
dtype = None,
load_in_4bit = True,
token = None,
device_map = "sequential",
rope_scaling = None,
fix_tokenizer = True,
model_patcher = None,
tokenizer_name = None,
trust_remote_code = False,
**kwargs,
):
return FastLlamaModel.from_pretrained(
model_name = model_name,
max_seq_length = max_seq_length,
dtype = dtype,
load_in_4bit = load_in_4bit,
token = token,
device_map = device_map,
rope_scaling = rope_scaling,
fix_tokenizer = fix_tokenizer,
model_patcher = FastQwen3MoeModel,
tokenizer_name = tokenizer_name,
trust_remote_code = trust_remote_code,
**kwargs,
)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/qwen3.py | unsloth/models/qwen3.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .llama import *
import os
from ._utils import __version__
from unsloth_zoo.utils import Version, _get_dtype
from ..utils.packing import get_packed_info_from_kwargs
from ..utils.attention_dispatch import (
AttentionConfig,
AttentionContext,
run_attention,
select_attention_backend,
)
from .llama import (
LlamaRotaryEmbedding,
LlamaLinearScalingRotaryEmbedding,
_LlamaModel_fast_forward_inference,
)
try:
from transformers.models.qwen3.modeling_qwen3 import (
Qwen3Attention,
Qwen3DecoderLayer,
Qwen3Model,
Qwen3ForCausalLM,
)
except:
transformers_version = Version(transformers_version)
if not transformers_version >= Version(
"4.50.3"
): # TODO: Update when transformers is updated
raise ImportError(
f"Unsloth: Your transformers version of {transformers_version} does not support Qwen3 and Qwen3Moe.\n"
f"The minimum required version is 4.50.3.\n"
f'Try `pip install --upgrade "transformers>=4.50.3"`\n'
f"to obtain the latest transformers build, then restart this session."
)
from transformers.modeling_attn_mask_utils import (
_prepare_4d_causal_attention_mask_for_sdpa,
)
# For Pytorch 2.1.1
try:
from transformers.models.qwen3.modeling_qwen3 import (
Qwen3SdpaAttention,
Qwen3FlashAttention2,
)
except:
Qwen3SdpaAttention = Qwen3Attention
Qwen3FlashAttention2 = Qwen3Attention
def Qwen3Attention_fast_forward(
self,
hidden_states: torch.Tensor,
causal_mask: Optional[BlockDiagonalCausalMask] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
padding_mask: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
*args,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
# Clear inference
if hasattr(self, "paged_attention"):
del self.paged_attention_K
del self.paged_attention_V
del self.paged_attention
del self.temp_QA
del self.temp_KV
del self.RH_Q
del self.attention
bsz, q_len, _ = hidden_states.size()
n_heads = self.config.num_attention_heads
n_groups = self.num_key_value_groups
n_kv_heads = self.config.num_key_value_heads
head_dim = self.head_dim
assert n_kv_heads * n_groups == n_heads
Q, K, V = self.apply_qkv(self, hidden_states)
Q = Q.view(
bsz, q_len, n_heads, head_dim
) # .transpose(1, 2) # we will transpose after normalisation
K = K.view(
bsz, q_len, n_kv_heads, head_dim
) # .transpose(1, 2) # we will transpose after normalisation
V = V.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2)
seq_info = get_packed_info_from_kwargs(kwargs, hidden_states.device)
# Qwen3 has QKNorm. This seems to be the only difference from Qwen2.
# Note that using fast_layernorm_compiled causes issues as the dimensions don't match up.
# I tried to add a compiled version of the new norm but the numbers don't match up with Transformers
# TODO: Check on the differences here.
Q = fast_rms_layernorm(self.q_norm, Q)
K = fast_rms_layernorm(self.k_norm, K)
Q = Q.transpose(1, 2)
K = K.transpose(1, 2)
kv_seq_len = K.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
# Extend RoPE dynamically to fit in VRAM
if position_embeddings and kv_seq_len <= position_embeddings[0].shape[0]:
cos, sin = position_embeddings
else:
rotary_emb = self.rotary_emb
rotary_emb.extend_rope_embedding(V, seq_len = kv_seq_len)
cos, sin = rotary_emb.get_cached(kv_seq_len, Q.device.index)
rope_position_ids = (
position_ids if position_ids is not None else kwargs.get("position_ids")
)
# Useful for LongRoPE
Q, K = fast_rope_embedding(Q, K, cos, sin, rope_position_ids)
if past_key_value is not None:
K = torch.cat([past_key_value[0], K], dim = 2)
V = torch.cat([past_key_value[1], V], dim = 2)
past_key_value = (K, V) if use_cache else None
# Attention module
use_varlen = seq_info is not None and past_key_value is None
backend = select_attention_backend(use_varlen)
attention_config = AttentionConfig(
backend = backend,
n_kv_heads = n_kv_heads,
n_groups = n_groups,
flash_dense_kwargs = {"causal": True},
flash_varlen_kwargs = {
"dropout_p": 0.0,
"causal": True,
"softmax_scale": getattr(self, "softmax_scale", None),
},
)
context = AttentionContext(
bsz = bsz,
q_len = q_len,
kv_seq_len = kv_seq_len,
n_heads = n_heads,
head_dim = head_dim,
requires_grad = hidden_states.requires_grad,
seq_info = seq_info,
attention_mask = attention_mask,
causal_mask = causal_mask,
)
A = run_attention(config = attention_config, context = context, Q = Q, K = K, V = V)
attn_output = A.reshape(bsz, q_len, n_heads * head_dim)
attn_output = self.apply_o(self, attn_output)
attn_weights = None
return attn_output, attn_weights, past_key_value
torch_matmul = torch.matmul
def Qwen3Attention_fast_forward_inference(
self,
hidden_states: torch.Tensor,
past_key_value: Optional[Tuple[torch.Tensor]],
position_ids,
do_prefill = False,
attention_mask = None,
):
"""
https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L406
Fast inference using KV cache.
QK^T can be computed in 4 chunks
[Q, q] @ [K, k].T where q, k are the new tokens.
[QK^T, Qk^T]
[qK^T, qk^T]
Since the attention mask wipes Qk^T, we just get
[QK^T, 0]
[qK^T, qk^T]
Since softmax is row-wise, we get
softmax([QK^T, 0])
softmax([qK^T, qk^T])
We then multiply by [V]
[v]
softmax([QK^T, 0]) [softmax(QK^T)V] *
softmax([qK^T, qk^T]) [softmax([qK^T, qk^T]) @ [V, v]]
But notice * [softmax(QK^T)V] is just the last attention.
We just need to compute the last final row.
This means we can pass in a row of Q, but we need to
remember K and V, which are called the KV cache.
"""
Xn = hidden_states
bsz, _, hd = hidden_states.size()
K1, V1 = past_key_value
dtype = Xn.dtype
n_heads = self.config.num_attention_heads
n_groups = self.num_key_value_groups
n_kv_heads = self.config.num_key_value_heads
head_dim = self.head_dim
# assert(n_kv_heads * n_groups == n_heads)
hidden_size = self.config.hidden_size
attention_size = n_heads * head_dim
seq_len = K1.shape[-2]
kv_seq_len = seq_len + 1
# Prefill phase
# if not hasattr(self, "paged_attention"):
device = hidden_states.device
if do_prefill:
self.paged_attention = torch.empty(
(KV_CACHE_INCREMENT + seq_len + 1, 2, bsz, n_kv_heads, head_dim),
dtype = dtype,
device = device,
)
self.paged_attention_K = self.paged_attention[:, 0]
self.paged_attention_V = self.paged_attention[:, 1]
self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3)
self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3)
self.temp_QA = torch.empty(
(2, bsz, 1, attention_size), dtype = dtype, device = device
)
self.temp_KV = torch.empty(
(2, bsz, 1, n_kv_heads * head_dim), dtype = dtype, device = device
)
self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = device)
# Mistral Nemo 12b has weird dimensions
if attention_size != hidden_size:
self.temp_O = torch.empty((1, bsz, hidden_size), dtype = dtype, device = device)
else:
self.temp_O = self.temp_QA[1][:, :, :hidden_size]
self.attention = torch.empty(
(bsz, n_heads, 1, KV_CACHE_INCREMENT + seq_len), dtype = dtype, device = device
)
self.scalar = 1.0 / math_sqrt(self.head_dim)
self.half_head_dim = head_dim // 2
elif kv_seq_len >= self.paged_attention.shape[0]:
self.paged_attention.resize_(
(
self.paged_attention.shape[0] + KV_CACHE_INCREMENT,
2,
bsz,
n_kv_heads,
head_dim,
)
)
self.paged_attention_K = self.paged_attention[:, 0]
self.paged_attention_V = self.paged_attention[:, 1]
self.attention.resize_(
(bsz, n_heads, 1, self.attention.shape[-1] + KV_CACHE_INCREMENT)
)
Qn = fast_linear_forward(self.q_proj, Xn, out = self.temp_QA[0])
Kn = fast_linear_forward(self.k_proj, Xn, out = self.temp_KV[0])
Vn = fast_linear_forward(self.v_proj, Xn, out = self.temp_KV[1])
Qn = Qn.view(
bsz, 1, n_heads, head_dim
) # .transpose(1, 2) # we will transpose after normalisation
Kn = Kn.view(
bsz, 1, n_kv_heads, head_dim
) # .transpose(1, 2) # we will transpose after normalisation
Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2)
Qn = fast_rms_layernorm_inference(self.q_norm, Qn)
Kn = fast_rms_layernorm_inference(self.k_norm, Kn)
Qn = Qn.transpose(1, 2)
Kn = Kn.transpose(1, 2)
# cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len)
# Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids)
# Need to do it prior 2 steps before hitting full on short KV cache
# or else error
self.rotary_emb.extend_rope_embedding(Vn, seq_len + 2)
cos, sin = self.rotary_emb.get_cached(kv_seq_len, Qn.device.index)
cos = cos[position_ids].unsqueeze(1)
sin = sin[position_ids].unsqueeze(1)
h = self.half_head_dim
RH_Q = self.RH_Q
RH_Q[:, :, :, :h] = Qn[:, :, :, h:]
RH_Q[:, :, :, h:] = Qn[:, :, :, :h]
RH_Q[:, :, :, :h].neg_() # torch.neg(RH_Q[:,:,:,:h], out = RH_Q[:,:,:,:h])
Qn *= cos
Qn.addcmul_(RH_Q, sin)
RH_K = RH_Q[
:, :n_kv_heads, :, :
] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda:0")
RH_K[:, :, :, :h] = Kn[:, :, :, h:]
RH_K[:, :, :, h:] = Kn[:, :, :, :h]
RH_K[:, :, :, :h].neg_() # torch.neg(RH_K[:,:,:,:h], out = RH_K[:,:,:,:h])
Kn *= cos
Kn.addcmul_(RH_K, sin)
# New KV cache
# Kn = torch.cat([K1, Kn], dim = 2)
# Vn = torch.cat([V1, Vn], dim = 2)
self.paged_attention_K[seq_len] = Kn.permute(2, 0, 1, 3)
self.paged_attention_V[seq_len] = Vn.permute(2, 0, 1, 3)
Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3)
Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3)
# Handle sliding windows
sliding_window = getattr(self.config, "sliding_window", None)
if sliding_window is not None and kv_seq_len > sliding_window:
# From https://github.com/huggingface/transformers/blob/main/src/transformers/models/mistral/modeling_mistral.py#L193
slicing_tokens = 1 - sliding_window
Knn = Kn[:, :, slicing_tokens:, :] # .contiguous()
Vnn = Vn[:, :, slicing_tokens:, :] # .contiguous()
else:
Knn, Vnn = Kn, Vn
# when qlen==vlen and attn_mask is None, we should use causal attention
Q_len = Qn.shape[-2]
K_len = Knn.shape[-2]
if attention_mask is None and Q_len == K_len:
is_causal = True
else:
is_causal = False
# Grouped query attention
_, _, cached_len, _ = Knn.shape
if bsz == 1 or not SDPA_HAS_GQA and n_groups != 1:
Knn = Knn[:, :, None, :, :].expand(
bsz, n_kv_heads, n_groups, cached_len, head_dim
)
Vnn = Vnn[:, :, None, :, :].expand(
bsz, n_kv_heads, n_groups, cached_len, head_dim
)
Knn = Knn.reshape(bsz, n_heads, cached_len, head_dim)
Vnn = Vnn.reshape(bsz, n_heads, cached_len, head_dim)
# else:
# Knn, Vnn = Knn, Vnn
# pass
# Attention
if bsz == 1:
Qn *= self.scalar # See https://github.com/ggerganov/llama.cpp/issues/7805#issuecomment-2153349963
# It seems like doing (Q * scalar) @ K is better than (Q @ K) * scalar to stop overflows
A = torch_matmul(
Qn, Knn.transpose(2, 3), out = self.attention[:, :, :, :cached_len]
)
# if attention_mask is not None: A += attention_mask # Must add attention_mask for batched
A[:] = torch_nn_functional_softmax(
A, dim = -1, dtype = torch.float32
) # .to(A.dtype)
A = torch_matmul(A, Vnn, out = Qn)
else:
if SDPA_HAS_GQA:
A = scaled_dot_product_attention(
Qn,
Knn,
Vnn,
attn_mask = attention_mask,
is_causal = is_causal,
enable_gqa = True,
)
else:
A = scaled_dot_product_attention(
Qn, Knn, Vnn, attn_mask = attention_mask, is_causal = is_causal
)
A = A.transpose(1, 2)
A = A.reshape(bsz, 1, attention_size)
A = fast_linear_forward(self.o_proj, A, out = self.temp_O)
return A, (Kn, Vn)
class FastQwen3Model(FastLlamaModel):
@staticmethod
def pre_patch():
init_name, function = patch_linear_scaling(
model_name = "Qwen3",
rope_module = LlamaRotaryEmbedding,
scaled_rope_module = LlamaLinearScalingRotaryEmbedding,
attention_module = Qwen3Attention,
)
if init_name is not None:
exec(function, globals())
Qwen3Attention.__init__ = eval(init_name)
Qwen3Attention.forward = Qwen3Attention_fast_forward
Qwen3SdpaAttention.forward = Qwen3Attention_fast_forward
Qwen3FlashAttention2.forward = Qwen3Attention_fast_forward
Qwen3DecoderLayer.forward = LlamaDecoderLayer_fast_forward
Qwen3Model.forward = LlamaModel_fast_forward
Qwen3ForCausalLM.forward = CausalLM_fast_forward(
_LlamaModel_fast_forward_inference(Qwen3Attention_fast_forward_inference)
)
PeftModelForCausalLM.forward = PeftModel_fast_forward
fix_prepare_inputs_for_generation(Qwen3ForCausalLM)
# Solves https://github.com/unslothai/unsloth/issues/168
# Static KV Cache was introduced in 4.38.0, causing training to be much slower.
# Inference can now be CUDAGraphed, but we shall retain the old rotary embeddings.
# https://github.com/huggingface/transformers/pull/27931
# https://github.com/huggingface/transformers/blob/v4.37.2/src/transformers/models/llama/modeling_llama.py
import transformers.models.qwen3.modeling_qwen3
transformers.models.qwen3.modeling_qwen3.Qwen3RotaryEmbedding = (
LlamaRotaryEmbedding
)
return
@staticmethod
def from_pretrained( # TODO: Change after release
model_name = "Qwen/Qwen3-7B",
max_seq_length = 4096,
dtype = None,
load_in_4bit = True,
token = None,
device_map = "sequential",
rope_scaling = None,
fix_tokenizer = True,
model_patcher = None,
tokenizer_name = None,
trust_remote_code = False,
**kwargs,
):
return FastLlamaModel.from_pretrained(
model_name = model_name,
max_seq_length = max_seq_length,
dtype = dtype,
load_in_4bit = load_in_4bit,
token = token,
device_map = device_map,
rope_scaling = rope_scaling,
fix_tokenizer = fix_tokenizer,
model_patcher = FastQwen3Model,
tokenizer_name = tokenizer_name,
trust_remote_code = trust_remote_code,
**kwargs,
)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/rl_replacements.py | unsloth/models/rl_replacements.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"RL_EXTRA_ARGS",
"RL_FUNCTIONS",
"RL_PRE_ITEMS",
"RL_CONFIG_CHANGES",
"RL_METRICS_CHANGES",
]
import os
import re
import torch
import inspect
from collections import defaultdict
from unsloth_zoo.rl_replacements import RL_REPLACEMENTS, left_pack_padding
from unsloth_zoo.utils import Version
from importlib.metadata import version as importlib_version
from unsloth_zoo.log import logger
import importlib.util
from ..device_type import (
is_hip,
get_device_type,
DEVICE_TYPE,
DEVICE_TYPE_TORCH,
DEVICE_COUNT,
ALLOW_PREQUANTIZED_MODELS,
)
import textwrap
from ._utils import _get_inference_mode_context_manager
RL_EXTRA_ARGS = defaultdict(list)
RL_FUNCTIONS = defaultdict(list)
RL_PRE_ITEMS = defaultdict(list)
RL_CONFIG_CHANGES = defaultdict(list)
RL_METRICS_CHANGES = defaultdict(list)
RL_ADDITIONAL_FUNCTIONS = defaultdict(list)
torch_compile_options = {
"epilogue_fusion": True,
"max_autotune": True,
"shape_padding": True,
"trace.enabled": False,
"triton.cudagraphs": False,
}
# Check untrained tokens
def sft_trainer_fix_untrained_tokens(call_args, extra_args):
if "model" in call_args and "train_dataset" in call_args:
fix_tokenizer = (
"IGNORED_TOKENIZER_NAMES = os.environ.get('UNSLOTH_IGNORED_TOKENIZER_NAMES', '').split('\\n')\n"
"from unsloth_zoo.tokenizer_utils import fix_untrained_tokens\n"
"from unsloth_zoo.training_utils import fix_zero_training_loss\n"
"if 'tokenizer' not in locals(): tokenizer = processing_class\n"
"fix_untrained_tokens(model, tokenizer, train_dataset, IGNORED_TOKENIZER_NAMES, eps = 1e-16)\n"
"fix_zero_training_loss(model, tokenizer, train_dataset)\n"
)
return fix_tokenizer
return ""
RL_EXTRA_ARGS["sft_trainer"].append(sft_trainer_fix_untrained_tokens)
# Remove DPO columns which might randomnly be tokenized
def dpo_trainer_fix_columns(call_args, extra_args):
if "model" in call_args and "train_dataset" in call_args:
fix_dpo = (
"if hasattr(train_dataset, 'column_names'):\n"
" column_names = set(train_dataset.column_names)\n"
" check = ['chosen', 'rejected', 'prompt', 'chosen_input_ids', 'chosen_attention_mask',\n"
" 'chosen_labels', 'rejected_input_ids', 'rejected_attention_mask', 'rejected_labels',\n"
" 'prompt_input_ids', 'prompt_attention_mask']\n"
" if all(x in column_names for x in check):\n"
" train_dataset = train_dataset.remove_columns(['chosen', 'rejected', 'prompt'])\n"
" del check, column_names\n"
)
return fix_dpo
return ""
RL_EXTRA_ARGS["dpo_trainer"].append(dpo_trainer_fix_columns)
# Fix tokenizer double BOS
def sft_trainer_prepare_dataset(function_name, function):
if (
function_name != "_prepare_non_packed_dataloader"
and function_name != "_prepare_dataset"
):
return function
fast_sft_prepare_dataset = RL_REPLACEMENTS.get("sft_prepare_dataset", None)
if fast_sft_prepare_dataset is not None:
params = inspect.signature(fast_sft_prepare_dataset).parameters.keys()
params = ".*?".join(params)
matched = re.match(
r"[\s]{0,}def _prepare_dataset\(.*?" + params + r".*?\)",
function,
flags = re.MULTILINE | re.DOTALL,
)
if matched:
# Use fast version!
function = inspect.getsource(fast_sft_prepare_dataset)
function = function.split("\n")
function = "\n".join(" " * 4 + x for x in function)
function = function.replace(
"def sft_prepare_dataset", "def _prepare_dataset"
)
return function
check_text = (
"if 'skip_prepare_dataset' in locals() and skip_prepare_dataset:\n"
" return dataset\n"
"if 'tokenizer' not in locals(): tokenizer = processing_class\n"
"if 'formatting_func' not in locals(): raise RuntimeError('Unsloth: Please file a bug report - `formatting_func` does not exist!')\n"
"if 'dataset_text_field' not in locals() and 'args' in locals(): dataset_text_field = args.dataset_text_field\n"
"if 'dataset_text_field' not in locals(): raise RuntimeError('Unsloth: Please file a bug report - `dataset_text_field` does not exist!')\n"
"test_text = dataset[0][dataset_text_field] if (formatting_func is None and dataset_text_field is not None) else formatting_func(dataset[0])[0]\n"
"chat_template = getattr(tokenizer, 'chat_template', None)\n"
"chat_template = '' if chat_template is None else chat_template\n"
"has_bos_token_already = (test_text.startswith(tokenizer.bos_token) or tokenizer.bos_token in chat_template) "
"if getattr(tokenizer, 'bos_token', None) is not None else False\n"
"if 'add_special_tokens' not in locals() and has_bos_token_already:\n"
" from functools import partial\n"
" tokenizer_call = tokenizer.__call__\n"
" tokenizer.__call__ = partial(tokenizer_call, add_special_tokens = False)\n"
" processing_class = tokenizer\n"
"else:\n"
" tokenizer_call = None\n"
" add_special_tokens = False if has_bos_token_already else locals().get('add_special_tokens', False)\n"
)
check_text = check_text.split("\n")
check_text = "\n".join(" " * 8 + x for x in check_text)
check_text = check_text.rstrip() + "\n"
# .*? matches first match. .+? matches final match.
replacer = re.findall(
r"def " + function_name + r"\(.*?\).*?\:\n",
function,
flags = re.MULTILINE | re.DOTALL,
)
if len(replacer) != 0:
replacer = replacer[0]
function = function.replace(replacer, replacer + check_text)
# Return tokenizer's original state
return_state = (
"if tokenizer_call is not None: tokenizer.__call__ = tokenizer_call\n"
)
function = re.sub(
r"\n([ ]{4,})(return .*?[\s]{0,})$",
rf"\1{return_state}\1\2",
function,
)
return function
RL_FUNCTIONS["sft_trainer"].append(sft_trainer_prepare_dataset)
# Ignore mean_token_accuracy since it needs logits
# We override it directly with our version
def sft_trainer_compute_loss(function_name, function):
if function_name != "compute_loss":
return function
def compute_loss(
self, model, inputs, return_outputs = False, num_items_in_batch = None
):
outputs = super().compute_loss(
model,
inputs,
return_outputs = return_outputs,
num_items_in_batch = num_items_in_batch,
)
return outputs
function = inspect.getsource(compute_loss)
return function
RL_FUNCTIONS["sft_trainer"].append(sft_trainer_compute_loss)
# Autocast precision for GRPO
def grpo_trainer__prepare_inputs(function_name, function):
if function_name != "_prepare_inputs":
return function
# Add mixed precision training
function = function.replace(
"with torch.inference_mode():",
"with torch.inference_mode(), "
"torch.amp.autocast(device_type = 'cuda', "
"dtype = ((torch.float16 if os.environ.get('ACCELERATE_MIXED_PRECISION', 'fp16') == 'fp16' else torch.bfloat16) "
"if not torch.is_autocast_enabled('cuda') else nullcontext())"
"if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '0' else torch.float16):",
)
function = function.replace(
"self.accelerator.unwrap_model(self.model)",
"self.accelerator.unwrap_model(self.model, keep_fp32_wrapper = False)",
)
return function
RL_FUNCTIONS["grpo_trainer"].append(grpo_trainer__prepare_inputs)
# Remove collective RPC of reload weights from generate
# trl added reload weights (potentially for quantized models), we don't need it for our use case (LoRA primarily)
# https://github.com/huggingface/trl/commit/7856d3b1f6518601732f489883b341bb6dd36434#diff-964e6fd373aa93037604064cb2b822d7f8e2735e33f791065acf2c4c3552d393R1168-R1169
def grpo_trainer__generate_single_turn(function_name, function):
if function_name != "_generate_single_turn":
return function
# Remove the reload_weights collective RPC call from the generate function's source
# function = function.replace('self.llm.collective_rpc("reload_weights")', "")
# The regex below does the same thing but is more flexible and can handle single or double quotes
function = re.sub(
r"self\.llm\.collective_rpc\(\s*(['\"])reload_weights\1\s*\)",
"",
function,
)
return function
RL_FUNCTIONS["grpo_trainer"].append(grpo_trainer__generate_single_turn)
# Fix incorrect special tokens handling and truncation in older TRL versions
def grpo_trainer__generate_and_score_completions(function_name, function):
if function_name != "_generate_and_score_completions":
return function
# TRL 0.19.0 did skip_special_tokens = True which should be False
function = function.replace(
"prompt_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False",
"prompt_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False",
)
# Left pad prompt before calculation old and ref hidden states
line_to_replace = 'batch_size = self.args.per_device_train_batch_size if mode == "train" else self.args.per_device_eval_batch_size'
# The new multi-line string that will replace the line above
replacement_lines = """
batch_size = self.args.per_device_train_batch_size if mode == "train" else self.args.per_device_eval_batch_size
try:
# TRL 0.23.1 and below path
if not has_images:
# Left pad prompt before calculation old and ref hidden states
prompt_completion_ids = left_pack_padding(prompt_completion_ids, self.processing_class.pad_token_id)
self.model.for_training()
except:
# TRL 0.24.0 and below path
if images is None:
# Left pad prompt before calculation old and ref hidden states
prompt_completion_ids = left_pack_padding(prompt_completion_ids, self.processing_class.pad_token_id)
self.model.for_training()"""
function = function.replace(line_to_replace, replacement_lines)
pattern_to_find = re.compile(
r"^\s*if self\.args\.gradient_accumulation_steps % generate_every != 0 or \(\s*"
r"self\.use_vllm and self\.vllm_importance_sampling_correction\s*"
r"\):",
re.MULTILINE,
)
replacement_text = """
if self.args.gradient_accumulation_steps % generate_every != 0 or (
self.use_vllm
):"""
# Use re.sub() to perform the replacement
function, num_replacements = pattern_to_find.subn(replacement_text, function)
pattern_to_find = re.compile(
r"(^\s*)all_logprobs = \[" # Capture indentation (group 1)
r".*?" # Match everything inside non-greedily
r"for output in outputs\.outputs\s*"
r"\]",
re.DOTALL | re.MULTILINE,
)
replacement_text = (
r"\1from trl.scripts.vllm_serve import sanitize_logprob\n"
r"\1all_logprobs = [\n"
r"\1 [sanitize_logprob(next(iter(logprob.values()))) for logprob in output.logprobs]\n"
r"\1 for outputs in all_outputs\n"
r"\1 for output in outputs.outputs\n"
r"\1]"
)
function, num_replacements = pattern_to_find.subn(replacement_text, function)
# Always between max_prompt_length and use_vllm
found = re.findall(
r"\n(([ ]{8,})if self\.max_prompt_length is not None:.*?"
r"\2if self\.use_vllm:)",
function,
flags = re.DOTALL | re.MULTILINE,
)
if len(found) != 0:
replace_part, spacing = found[0]
removed_comments = re.sub(r"\#[^\n]{1,}", "", replace_part)
splits = removed_comments.split("\n")
if (
sum(re.match(rf"{spacing}[^\s]", x) is not None for x in splits) == 2
and len(spacing) >= 8
):
new_replacement = f"""\n{spacing}if self.max_prompt_length is not None:
# If max_prompt_length is set, we trim the prompt to keep only the last `max_prompt_length` tokens.
# Then we decode those tokens back into text. We manually remove leading pad tokens from the decoded text,
# because we can't use `skip_special_tokens=True` (some special tokens are still needed for generation).
protected = [self.image_token_id, self.vision_start_token_id, self.vision_end_token_id]
protected = [token for token in protected if token is not None]
prompt_ids, prompt_mask = truncate_with_protected_tokens(
prompt_ids, prompt_mask, self.max_prompt_length, protected
)
prompts_text = [re.sub(rf"^({{re.escape(self.pad_token)}})+", "", text) for text in prompts_text]
# The chat template inserts a single image token into the prompt text. However, when this text is later
# tokenized, the single image token string is expanded into multiple image token IDs, depending on the
# image size. Since we're detokenizing here, we may see repeated image tokens in the decoded text. We
# collapse them back into a single token string to match the original template.
if self.image_token is not None:
prompts_text = [
re.sub(rf"({{re.escape(self.image_token)}})+", self.image_token, text) for text in prompts_text
]
# Generate completions using either vLLM or regular generation
if self.use_vllm:"""
function = function.replace(replace_part, new_replacement)
string_to_find = """ if "image_sizes" in prompt_inputs:
output["image_sizes"] = prompt_inputs["image_sizes"]"""
replacement_string = """ if "image_sizes" in prompt_inputs:
output["image_sizes"] = prompt_inputs["image_sizes"]
if self.use_vllm:
try:
output["sampling_per_token_logps"] = sampling_per_token_logps
except NameError:
output["sampling_per_token_logps"] = None"""
function = function.replace(string_to_find, replacement_string)
if "wake_up()" not in function:
# Sleep functionality has been added to trl in v0.23.0. We do not want to redo this.
# https://github.com/huggingface/trl/commit/edbe8234bc7e528f72ac76607de9d3e4753e2709
pattern = re.compile(r".*self\.llm\.generate\(.*\).*", re.MULTILINE)
matches = list(pattern.finditer(function))
patched = function
# Generally there's only one match. But this is just to make sure we don't miss any.
for match in reversed(matches):
line = match.group(0)
indent_match = re.match(r"(\s*)", line)
indent = indent_match.group(1) if indent_match else ""
wrapped = (
f"{indent}if hasattr(self, 'llm'):\n"
f"{indent} if getattr(self.llm.llm_engine.vllm_config.model_config, 'enable_sleep_mode', False):\n"
f"{indent} self.llm.wake_up()\n"
f"{line}\n\n"
f"{indent}if hasattr(self, 'llm'):\n"
f"{indent} if getattr(self.llm.llm_engine.vllm_config.model_config, 'enable_sleep_mode', False):\n"
f"{indent} self.llm.sleep(os.environ.get('VLLM_SLEEP_MODE', 1))\n"
)
patched = patched[: match.start()] + wrapped + patched[match.end() :]
function = patched
return function
RL_FUNCTIONS["grpo_trainer"].append(grpo_trainer__generate_and_score_completions)
# Fix {"reasoning_effort" : "high"} not applied
def grpo_trainer_fix_maybe_apply_chat_template(function_name, function):
spaces = function.find("def ")
if spaces % 4 != 0:
return function
spaces += 4
replacement = """
_chat_template_ = getattr(self.processing_class, "chat_template", None)
if _chat_template_ is None: _chat_template_ = ""
_supported_keys_ = set(("prompt", "chosen", "rejected", "completion", "messages", "label"))
prompts_text = []
for _example_ in __INPUTS__REPLACEMENT__:
_tokenizer_kwargs_ = {}
if type(_example_) is not dict:
_example_ = {"prompt": _example_}
_left_keys_ = _example_.keys() - _supported_keys_
for k in _left_keys_:
if k in _chat_template_:
v = _example_[k]
if type(v) is str:
_tokenizer_kwargs_[k] = v
_x_ = maybe_apply_chat_template(_example_, self.processing_class, **_tokenizer_kwargs_)["prompt"]
prompts_text.append(_x_)
"""
replacement = textwrap.dedent(replacement).strip()
replacement = textwrap.indent(replacement, spaces * " ")
replacement = f"\n{replacement}\n"
what = 'prompts_text = [maybe_apply_chat_template(example, self.processing_class)["prompt"] for example in inputs]'
function = function.replace(
what, replacement.replace("__INPUTS__REPLACEMENT__", "inputs")
)
"""prompts_text = [
maybe_apply_chat_template({"prompt": prompt}, self.processing_class)["prompt"] for prompt in prompts
]"""
function = re.sub(
r"prompts_text = \["
r"[\s]{0,}"
r"maybe_apply_chat_template\(\{[\"\']prompt[\"\'][\s]{0,}\:[\s]{0,}prompt[\s]{0,}\}[\s]{0,}\,[\s]{0,}self\.processing_class\)"
r"\[[\"\']prompt[\"\']\] for prompt in prompts"
r"[\s]{0,}"
r"\]",
replacement.replace("__INPUTS__REPLACEMENT__", "prompts"),
function,
)
return function
RL_FUNCTIONS["grpo_trainer"].append(grpo_trainer_fix_maybe_apply_chat_template)
# Remove _move_model_to_vllm
def grpo_trainer__move_model_to_vllm(function_name, function):
if function_name != "_move_model_to_vllm":
return function
def _move_model_to_vllm(self, *args, **kwargs):
return None
function = inspect.getsource(_move_model_to_vllm)
return function
RL_FUNCTIONS["grpo_trainer"].append(grpo_trainer__move_model_to_vllm)
# Edit _get_per_token_logps to handle mixed precision
def grpo_trainer__get_per_token_logps(function_name, function):
if function_name != "_get_per_token_logps":
return function
def _get_per_token_logps(
self, model, input_ids, attention_mask, logits_to_keep, compute_efficient = False
):
if True: # os.environ.get('UNSLOTH_USE_NEW_MODEL', '0') == '0':
return None # Unsloth efficient GRPO
# Otherwise, calculate normally:
if not hasattr(self, "_autocast_dtype"):
self._autocast_dtype = (
torch.float16
if os.environ.get("ACCELERATE_MIXED_PRECISION", "fp16") == "fp16"
else torch.bfloat16
)
if os.environ.get("UNSLOTH_FORCE_FLOAT32", "0") == "1":
self._autocast_dtype = torch.float16
os.environ["UNSLOTH_RETURN_HIDDEN_STATES"] = "1"
with torch.amp.autocast(device_type = DEVICE_TYPE, dtype = self._autocast_dtype):
# We add 1 to `logits_to_keep` because the last logits of the sequence is later excluded
logits = model(
input_ids = input_ids,
attention_mask = attention_mask,
logits_to_keep = logits_to_keep + 1,
).logits
# logits = logits[:, :-1, :] # (B, L-1, V), exclude the last logit: it corresponds to the next token pred
return logits
# input_ids = input_ids[:, -logits_to_keep:]
# For transformers<=4.48, logits_to_keep argument isn't supported, so here we drop logits ourselves.
# See https://github.com/huggingface/trl/issues/2770
# logits = logits[:, -logits_to_keep:]
# return logits
# See https://huggingface.co/blog/the_n_implementation_details_of_rlhf_with_ppo#policy-training-implementation-details
# logits = logits / self.temperature
# logps = selective_log_softmax(logits, input_ids)
# row_indices, col_indices = torch.where(logps < -20)
# # Method 1: Check if tensors have elements
# if len(row_indices) > 0 and len(col_indices) > 0:
# breakpoint() # Breakpoint triggered here
# print("Found high values!")
# return logps # compute logprobs for the input tokens
function = inspect.getsource(_get_per_token_logps)
return function
RL_FUNCTIONS["grpo_trainer"].append(grpo_trainer__get_per_token_logps)
def grpo_trainer__get_per_token_logps_and_entropies(function_name, function):
if function_name != "_get_per_token_logps_and_entropies":
return function
# Just copy over from _get_per_token_logps replacement function above. For now this returns None anyway
def _get_per_token_logps_and_entropies(
self,
model,
input_ids,
attention_mask,
logits_to_keep,
batch_size = None,
compute_entropy = False,
compute_efficient = False,
*args,
**kwargs,
):
# if True: # os.environ.get('UNSLOTH_USE_NEW_MODEL', '0') == '0':
# return None, None # logps, entropies Unsloth efficient GRPO
if compute_efficient:
return None, None
else:
# Otherwise, calculate normally:
if not hasattr(self, "_autocast_dtype"):
self._autocast_dtype = (
torch.float16
if os.environ.get("ACCELERATE_MIXED_PRECISION", "fp16") == "fp16"
else torch.bfloat16
)
if os.environ.get("UNSLOTH_FORCE_FLOAT32", "0") == "1":
self._autocast_dtype = torch.float16
pixel_values, image_grid_thw = (
kwargs.get("pixel_values", None),
kwargs.get("image_grid_thw", None),
)
pixel_attention_mask, image_sizes = (
kwargs.get("pixel_attention_mask", None),
kwargs.get("image_sizes", None),
)
os.environ["UNSLOTH_RETURN_HIDDEN_STATES"] = "1"
unwrapped_model = self.accelerator.unwrap_model(
model, keep_fp32_wrapper = False
)
with torch.amp.autocast(device_type = "cuda", dtype = self._autocast_dtype):
with _get_inference_mode_context_manager(model):
if pixel_values is None:
attention_mask = input_ids != self.processing_class.pad_token_id
attention_mask = attention_mask.to(attention_mask.dtype)
# We add 1 to `logits_to_keep` because the last logits of the sequence is later excluded
logits = unwrapped_model(
input_ids = input_ids,
attention_mask = attention_mask,
pixel_values = pixel_values,
image_grid_thw = image_grid_thw,
pixel_attention_mask = pixel_attention_mask,
image_sizes = image_sizes,
# logits_to_keep = logits_to_keep + 1,
).logits
else:
logits = unwrapped_model(
input_ids = input_ids,
attention_mask = attention_mask,
pixel_values = pixel_values,
image_grid_thw = image_grid_thw,
pixel_attention_mask = pixel_attention_mask,
image_sizes = image_sizes,
logits_to_keep = logits_to_keep + 1,
).logits
entropies = None
if compute_entropy:
from trl.trainer.utils import entropy_from_logits
entropies = entropy_from_logits(logits)
os.environ["UNSLOTH_RETURN_HIDDEN_STATES"] = "0"
# logits = logits[:, :-1, :] # (B, L-1, V), exclude the last logit: it corresponds to the next token pred
return logits.detach(), entropies # logps, entropies
# input_ids = input_ids[:, -logits_to_keep:]
# For transformers<=4.48, logits_to_keep argument isn't supported, so here we drop logits ourselves.
# See https://github.com/huggingface/trl/issues/2770
# logits = logits[:, -logits_to_keep:]
# return logits
# See https://huggingface.co/blog/the_n_implementation_details_of_rlhf_with_ppo#policy-training-implementation-details
# logits = logits / self.temperature
# logps = selective_log_softmax(logits, input_ids)
# row_indices, col_indices = torch.where(logps < -20)
# # Method 1: Check if tensors have elements
# if len(row_indices) > 0 and len(col_indices) > 0:
# breakpoint() # Breakpoint triggered here
# print("Found high values!")
# return logps # compute logprobs for the input tokens
function = inspect.getsource(_get_per_token_logps_and_entropies)
return function
RL_FUNCTIONS["grpo_trainer"].append(grpo_trainer__get_per_token_logps_and_entropies)
grpo_compute_loss = RL_REPLACEMENTS["grpo_compute_loss"]
grpo_compute_loss_slow = RL_REPLACEMENTS["grpo_compute_loss_slow"]
UnslothEfficientGRPO = RL_REPLACEMENTS["UnslothEfficientGRPO"]
grpo_accumulated_loss = RL_REPLACEMENTS["grpo_accumulated_loss"]
grpo_update_SamplingParams = RL_REPLACEMENTS["grpo_update_SamplingParams"]
RL_PRE_ITEMS["grpo_trainer"].append(inspect.getsource(grpo_compute_loss))
RL_PRE_ITEMS["grpo_trainer"].append(inspect.getsource(UnslothEfficientGRPO))
RL_PRE_ITEMS["grpo_trainer"].append(inspect.getsource(grpo_accumulated_loss))
RL_PRE_ITEMS["grpo_trainer"].append(grpo_compute_loss_slow)
RL_PRE_ITEMS["grpo_trainer"].append(inspect.getsource(grpo_update_SamplingParams))
RL_PRE_ITEMS["grpo_trainer"].append(
inspect.getsource(_get_inference_mode_context_manager)
)
# Edit _get_per_token_logps to handle mixed precision
def grpo_trainer_compute_loss(function_name, function):
if function_name != "compute_loss":
return function
def compute_loss(
self, model, inputs, return_outputs = False, num_items_in_batch = None
):
if return_outputs:
raise ValueError("The GRPOTrainer does not support returning outputs")
# Compute the per-token log probabilities for the model
prompt_ids, prompt_mask = inputs["prompt_ids"], inputs["prompt_mask"]
completion_ids, completion_mask = (
inputs["completion_ids"],
inputs["completion_mask"],
)
pixel_values, image_grid_thw = (
inputs.get("pixel_values", None),
inputs.get("image_grid_thw", None),
)
pixel_attention_mask, image_sizes = (
inputs.get("pixel_attention_mask", None),
inputs.get("image_sizes", None),
)
num_items_in_batch = inputs.get("num_items_in_batch", None)
sampling_per_token_logps = inputs.get("sampling_per_token_logps", None)
current_gradient_accumulation_steps = self.current_gradient_accumulation_steps
num_processes = self.accelerator.num_processes
input_ids = torch.cat([prompt_ids, completion_ids], dim = 1)
bsz, qlen = input_ids.shape
attention_mask = torch.cat([prompt_mask, completion_mask], dim = 1)
# attention_mask = None
logits_to_keep = completion_ids.size(
1
) # we only need to compute the logits for the completion tokens
_input_ids = input_ids
_logits_to_keep = logits_to_keep
get_logps_func = (
lambda model,
input_ids,
attention_mask,
logits_to_keep,
batch_size = None,
compute_entropy = False,
compute_efficient = False: self._get_per_token_logps(
model, input_ids, attention_mask, logits_to_keep, compute_efficient
)
if hasattr(self, "_get_per_token_logps")
else self._get_per_token_logps_and_entropies(
model,
input_ids,
attention_mask,
logits_to_keep,
batch_size,
compute_entropy,
compute_efficient,
)[0]
) # logps
per_token_logps = get_logps_func(
model, input_ids, attention_mask, logits_to_keep, compute_efficient = True
)
# Compute the KL divergence between the model and the reference model
# _prepare_inputs doesn't return reference log probs anymore. We need to calculate it ourselves.
# https://github.com/huggingface/trl/blob/05bc43e960396581e458195b8388efe6b82cae1f/trl/trainer/grpo_trainer.py#L1328
# if self.beta != 0.0:
# with torch.inference_mode(), model.disable_adapter():
# ref_per_token_logps = per_token_logps = get_logps_func(model, input_ids, attention_mask, logits_to_keep)
# else:
# ref_per_token_logps = None
ref_hidden_states = inputs.get("ref_per_token_logps", None)
# per_token_kl = torch.exp(ref_per_token_logps - per_token_logps) - (ref_per_token_logps - per_token_logps) - 1
# x - x.detach() allows for preserving gradients from x
advantages = inputs["advantages"]
# per_token_loss = torch.exp(per_token_logps - per_token_logps.detach()) * advantages.unsqueeze(1)
# per_token_loss = -(per_token_loss - self.beta * per_token_kl)
# loss = ((per_token_loss * completion_mask).sum(dim=1) / completion_mask.sum(dim=1)).mean()
old_hidden_states = inputs.get("old_per_token_logps", None)
input_ids = input_ids[:, -logits_to_keep:]
# Get logit softcapping and logit scale
logit_softcapping = getattr(model.config, "final_logit_softcapping", 0) # Gemma
if logit_softcapping is None:
logit_softcapping = 0
logit_scale_multiply = getattr(model.config, "logit_scale", 0) # Cohere
if logit_scale_multiply is None:
logit_scale_multiply = 0
logit_scale_divide = getattr(model.config, "logits_scaling", 0) # Granite
if logit_scale_divide is None:
logit_scale_divide = 0
if per_token_logps is not None:
if ref_hidden_states is not None:
ref_hidden_states = ref_hidden_states[
:, :-1, :
] # (B, L-1, V), exclude the last logit: it corresponds to the next token pred
if old_hidden_states is not None:
old_hidden_states = old_hidden_states[
:, :-1, :
] # (B, L-1, V), exclude the last logit: it corresponds to the next token pred
per_token_logps = per_token_logps[
:, :-1, :
] # (B, L-1, V), exclude the last logit: it corresponds to the next token pred
loss, completion_length, mean_kl, delta, flat_is_ratio = (
grpo_compute_loss_slow(
ref_hidden_states,
per_token_logps,
old_hidden_states,
input_ids,
completion_mask,
self.beta,
advantages,
pixel_values = pixel_values,
image_grid_thw = image_grid_thw,
loss_type = self.args.loss_type,
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | true |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/granite.py | unsloth/models/granite.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .llama import *
import os
from ._utils import __version__
from unsloth_zoo.utils import _get_dtype
from unsloth_zoo.hf_utils import dtype_from_config
from ..utils.packing import get_packed_info_from_kwargs
from ..utils.attention_dispatch import (
AttentionConfig,
AttentionContext,
run_attention,
select_attention_backend,
SDPA,
)
from .llama import (
LlamaRotaryEmbedding,
LlamaLinearScalingRotaryEmbedding,
)
from .mistral import *
from bitsandbytes.nn import Linear4bit as Bnb_Linear4bit
from peft.tuners.lora import Linear4bit as Peft_Linear4bit
try:
from transformers.models.granite.modeling_granite import (
GraniteAttention,
GraniteDecoderLayer,
GraniteModel,
GraniteForCausalLM,
)
except:
from packaging.version import Version
transformers_version = Version(transformers_version)
if not transformers_version >= Version("4.45.0"):
raise ImportError(
f"Unsloth: Your transformers version of {transformers_version} does not support Granite.\n"
f"The minimum required version is 4.45.0.\n"
f'Try `pip install --upgrade "transformers>=4.45.0"`\n'
f"to obtain the latest transformers build, then restart this session."
)
from transformers.modeling_attn_mask_utils import (
_prepare_4d_causal_attention_mask_for_sdpa,
)
# For Pytorch 2.1.1
try:
from transformers.models.granite.modeling_granite import (
GraniteSdpaAttention,
GraniteFlashAttention2,
)
except:
GraniteSdpaAttention = GraniteAttention
GraniteFlashAttention2 = GraniteAttention
def GraniteAttention_fast_forward(
self,
hidden_states: torch.Tensor,
causal_mask: Optional[BlockDiagonalCausalMask] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
padding_mask: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
*args,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
# Clear inference
if hasattr(self, "paged_attention"):
del self.paged_attention_K
del self.paged_attention_V
del self.paged_attention
del self.temp_QA
del self.temp_KV
del self.RH_Q
del self.attention
bsz, q_len, _ = hidden_states.size()
n_heads = self.config.num_attention_heads
n_groups = self.num_key_value_groups
n_kv_heads = self.config.num_key_value_heads
head_dim = self.head_dim
dropout_p = self.config.attention_dropout if self.training else 0
assert n_kv_heads * n_groups == n_heads
Q, K, V = self.apply_qkv(self, hidden_states)
Q = Q.view(bsz, q_len, n_heads, head_dim).transpose(1, 2)
K = K.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2)
V = V.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2)
seq_info = get_packed_info_from_kwargs(kwargs, Q.device)
kv_seq_len = K.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
assert position_embeddings is not None
cos, sin = position_embeddings
rope_position_ids = (
position_ids if position_ids is not None else kwargs.get("position_ids")
)
if rope_position_ids is not None:
# Useful for LongRoPE
Q, K = fast_rope_embedding(Q, K, cos, sin, rope_position_ids)
else:
Q, K = fast_rope_embedding(Q, K, cos, sin)
if past_key_value is not None:
K = torch.cat([past_key_value[0], K], dim = 2)
V = torch.cat([past_key_value[1], V], dim = 2)
past_key_value = (K, V) if use_cache else None
# Attention module
use_varlen = (
attention_mask is None and seq_info is not None and past_key_value is None
)
backend = (
SDPA if attention_mask is not None else select_attention_backend(use_varlen)
)
window = (kv_seq_len, kv_seq_len)
softmax_scale = getattr(self, "scaling", None)
attention_config = AttentionConfig(
backend = backend,
n_kv_heads = n_kv_heads,
n_groups = n_groups,
flash_dense_kwargs = {
"causal": True,
"softmax_scale": softmax_scale,
"dropout_p": dropout_p,
"window_size": window,
},
flash_varlen_kwargs = {
"dropout_p": 0.0,
"softmax_scale": softmax_scale,
"causal": True,
},
sdpa_kwargs = {
k: v
for k, v in {
"attn_mask": attention_mask,
"scale": softmax_scale,
"dropout_p": dropout_p,
}.items()
if v is not None
},
xformers_kwargs = {
"scale": softmax_scale,
"p": dropout_p,
},
)
context = AttentionContext(
bsz = bsz,
q_len = q_len,
kv_seq_len = kv_seq_len,
n_heads = n_heads,
head_dim = head_dim,
requires_grad = hidden_states.requires_grad,
seq_info = seq_info,
attention_mask = attention_mask,
causal_mask = causal_mask,
)
A = run_attention(config = attention_config, context = context, Q = Q, K = K, V = V)
attn_output = A.reshape(bsz, q_len, n_heads * head_dim)
attn_output = self.apply_o(self, attn_output)
attn_weights = None
return attn_output, attn_weights, past_key_value
def GraniteDecoderLayer_fast_forward(
self,
hidden_states: torch.Tensor,
causal_mask: Optional[BlockDiagonalCausalMask] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
padding_mask: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
*args,
**kwargs,
):
residual_multiplier = (
self.residual_multiplier
if hasattr(self, "residual_multiplier")
else self.config.residual_multiplier
)
if use_cache and hasattr(
self, "_flag_for_generation"
): # past_key_value is not None:
residual = hidden_states
hidden_states = fast_rms_layernorm_inference(
self.input_layernorm, hidden_states
)
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states = hidden_states,
causal_mask = causal_mask,
attention_mask = attention_mask,
position_ids = position_ids,
past_key_value = past_key_value,
output_attentions = output_attentions,
use_cache = use_cache,
padding_mask = padding_mask,
position_embeddings = position_embeddings,
_flag_for_generation = self._flag_for_generation,
**kwargs,
)
hidden_states = torch.add(residual, hidden_states, alpha = residual_multiplier)
# Fully Connected
residual = hidden_states
hidden_states = fast_rms_layernorm_inference(
self.post_attention_layernorm, hidden_states
)
hidden_states = fast_swiglu_inference(self.mlp, hidden_states)
hidden_states = torch.add(residual, hidden_states, alpha = residual_multiplier)
else:
residual = hidden_states
hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states)
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states = hidden_states,
causal_mask = causal_mask,
attention_mask = attention_mask,
position_ids = position_ids,
past_key_value = past_key_value,
output_attentions = output_attentions,
use_cache = use_cache,
padding_mask = padding_mask,
position_embeddings = position_embeddings,
**kwargs,
)
hidden_states = torch.add(residual, hidden_states, alpha = residual_multiplier)
# Fully Connected
residual = hidden_states
hidden_states = fast_rms_layernorm(self.post_attention_layernorm, hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = torch.add(residual, hidden_states, alpha = residual_multiplier)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
from math import sqrt as math_sqrt
KV_CACHE_INCREMENT = 256 # KV Cache update size
torch_nn_functional_softmax = torch.nn.functional.softmax
torch_matmul = torch.matmul
torch_tanh = torch.tanh
def GraniteAttention_fast_forward_inference(
self,
hidden_states: torch.Tensor,
past_key_value: Optional[Tuple[torch.Tensor]],
position_ids,
do_prefill = False,
attention_mask = None,
use_sliding_window = False,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
assert (
position_embeddings is not None
), f"Granite model requires position embeddings to be specified"
Xn = hidden_states
bsz, _, hd = hidden_states.size()
K1, V1 = past_key_value
dtype = Xn.dtype
n_heads = self.config.num_attention_heads
n_groups = self.num_key_value_groups
n_kv_heads = self.config.num_key_value_heads
head_dim = self.head_dim
# assert(n_kv_heads * n_groups == n_heads)
hidden_size = self.config.hidden_size
attention_size = n_heads * head_dim
seq_len = K1.shape[-2]
kv_seq_len = seq_len + 1
device = hidden_states.device
# Prefill phase
# if not hasattr(self, "paged_attention"):
if do_prefill:
self.paged_attention = torch.empty(
(KV_CACHE_INCREMENT + seq_len + 1, 2, bsz, n_kv_heads, head_dim),
dtype = dtype,
device = device,
)
self.paged_attention_K = self.paged_attention[:, 0]
self.paged_attention_V = self.paged_attention[:, 1]
self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3)
self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3)
self.temp_QA = torch.empty(
(2, bsz, 1, attention_size), dtype = dtype, device = device
)
self.temp_KV = torch.empty(
(2, bsz, 1, n_kv_heads * head_dim), dtype = dtype, device = device
)
self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = device)
# Only for Gemma2
self.temp_O = torch.empty((1, bsz, hidden_size), dtype = dtype, device = device)
self.attention = torch.empty(
(bsz, n_heads, 1, KV_CACHE_INCREMENT + seq_len), dtype = dtype, device = device
)
self.half_head_dim = head_dim // 2
elif kv_seq_len >= self.paged_attention.shape[0]:
self.paged_attention.resize_(
(
self.paged_attention.shape[0] + KV_CACHE_INCREMENT,
2,
bsz,
n_kv_heads,
head_dim,
)
)
self.paged_attention_K = self.paged_attention[:, 0]
self.paged_attention_V = self.paged_attention[:, 1]
self.attention.resize_(
(bsz, n_heads, 1, self.attention.shape[-1] + KV_CACHE_INCREMENT)
)
Qn = fast_linear_forward(self.q_proj, Xn, out = self.temp_QA[0])
Kn = fast_linear_forward(self.k_proj, Xn, out = self.temp_KV[0])
Vn = fast_linear_forward(self.v_proj, Xn, out = self.temp_KV[1])
Qn = Qn.view(bsz, 1, n_heads, head_dim).transpose(1, 2)
Kn = Kn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2)
Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2)
# cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len)
# Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids)
cos, sin = position_embeddings
cos, sin = cos[position_ids], sin[position_ids]
h = self.half_head_dim
RH_Q = self.RH_Q
RH_Q[:, :, :, :h] = Qn[:, :, :, h:]
RH_Q[:, :, :, h:] = Qn[:, :, :, :h]
torch.neg(RH_Q[:, :, :, :h], out = RH_Q[:, :, :, :h])
Qn *= cos
Qn.addcmul_(RH_Q, sin)
RH_K = RH_Q[
:, :n_kv_heads, :, :
] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda:0")
RH_K[:, :, :, :h] = Kn[:, :, :, h:]
RH_K[:, :, :, h:] = Kn[:, :, :, :h]
torch.neg(RH_K[:, :, :, :h], out = RH_K[:, :, :, :h])
Kn *= cos
Kn.addcmul_(RH_K, sin)
# New KV cache
# Kn = torch.cat([K1, Kn], dim = 2)
# Vn = torch.cat([V1, Vn], dim = 2)
self.paged_attention_K[seq_len] = Kn.permute(2, 0, 1, 3)
self.paged_attention_V[seq_len] = Vn.permute(2, 0, 1, 3)
Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3)
Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3)
# Grouped query attention
_, _, cached_len, _ = Kn.shape
if n_groups != 1:
Kn = Kn[:, :, None, :, :].expand(
bsz, n_kv_heads, n_groups, cached_len, head_dim
)
Vn = Vn[:, :, None, :, :].expand(
bsz, n_kv_heads, n_groups, cached_len, head_dim
)
Kn = Kn.reshape(bsz, n_heads, cached_len, head_dim)
Vn = Vn.reshape(bsz, n_heads, cached_len, head_dim)
# else:
# Kn, Vn = Kn, Vn
# pass
Qn *= self.scaling
A = torch_matmul(Qn, Kn.transpose(2, 3), out = self.attention[:, :, :, :cached_len])
# if attention_mask is not None: A += attention_mask # Must add attention_mask for batched
A[:] = torch_nn_functional_softmax(A, dim = -1, dtype = torch.float32) # .to(A.dtype)
A = torch_matmul(A, Vn, out = Qn)
# else:
# A = scaled_dot_product_attention(Qn, Kn, Vn, attn_mask = attention_mask, is_causal = False)
# pass
A = A.transpose(1, 2)
A = A.reshape(bsz, 1, attention_size)
A = fast_linear_forward(self.o_proj, A, out = self.temp_O)
return A, (Kn, Vn)
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825
# @torch.inference_mode
def GraniteModel_fast_forward_inference(
self,
input_ids,
past_key_values,
position_ids,
attention_mask = None,
):
input_ids = input_ids[:, : self.max_seq_length]
hidden_states = self.model.embed_tokens(input_ids)
hidden_states = hidden_states.to(_get_dtype(dtype_from_config(self.config)))
hidden_states *= self.model.embedding_multiplier
residual_multiplier = (
self.residual_multiplier
if hasattr(self, "residual_multiplier")
else self.config.residual_multiplier
)
bsz, q_len, hd = hidden_states.shape
seq_len = past_key_values[0][0].shape[-2]
if bsz != 1:
attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
attention_mask,
(bsz, q_len),
hidden_states,
seq_len,
)
else:
attention_mask = None
position_embeddings = self.model.rotary_emb.get_cached(
self.max_seq_length, hidden_states.device.index
)
next_decoder_cache = []
for idx, decoder_layer in enumerate(self.model.layers):
device_index = getattr(decoder_layer, "_per_layer_device_index", 0)
hidden_states, position_ids = move_to_device(
device_index, hidden_states, position_ids
)
residual = hidden_states
hidden_states = fast_rms_layernorm_inference(
decoder_layer.input_layernorm, hidden_states
)
hidden_states, present_key_value = GraniteAttention_fast_forward_inference(
decoder_layer.self_attn,
hidden_states = hidden_states,
past_key_value = past_key_values[idx],
position_ids = position_ids,
attention_mask = attention_mask,
do_prefill = not hasattr(decoder_layer.self_attn, "paged_attention"),
position_embeddings = position_embeddings,
)
hidden_states = torch.add(residual, hidden_states, alpha = residual_multiplier)
residual = hidden_states
hidden_states = fast_rms_layernorm_inference(
decoder_layer.post_attention_layernorm, hidden_states
)
hidden_states = fast_swiglu_inference(decoder_layer.mlp, hidden_states)
hidden_states = torch.add(residual, hidden_states, alpha = residual_multiplier)
next_decoder_cache.append(present_key_value)
hidden_states = fast_rms_layernorm_inference(self.model.norm, hidden_states)
return BaseModelOutputWithPast(
last_hidden_state = hidden_states,
past_key_values = next_decoder_cache,
hidden_states = [],
attentions = [],
)
class GraniteRotaryEmbedding(LlamaRotaryEmbedding):
def __init__(self, config):
super().__init__(config = config)
def patched_init(original_init):
def new_init(self, *args, **kwargs):
# we can use self.residual_multiplier arg in GraniteDecoderLayer_fast_forward as mentioned here
# https://github.com/huggingface/transformers/blob/e5fd865ebae062b7cf03a81b8c6affeb39f30bec/src/transformers/models/granite/modeling_granite.py#L243
# The problem is, we don't have access to either the value or config in GraniteModel_fast_forward_inference
# So we need a way to pass this value around. It is probably better to pass on entire config just in case we need it later
config = kwargs.get("config", args[0] if args else None)
if config is not None:
self.config = config
original_init(self, *args, **kwargs)
return new_init
class FastGraniteModel(FastLlamaModel):
@staticmethod
def pre_patch():
init_name, function = patch_linear_scaling(
model_name = "granite",
rope_module = GraniteRotaryEmbedding,
scaled_rope_module = LlamaLinearScalingRotaryEmbedding,
attention_module = GraniteAttention,
)
if init_name is not None:
exec(function, globals())
GraniteAttention.__init__ = eval(init_name)
GraniteAttention.forward = GraniteAttention_fast_forward
GraniteSdpaAttention.forward = GraniteAttention_fast_forward
GraniteFlashAttention2.forward = GraniteAttention_fast_forward
GraniteDecoderLayer.forward = GraniteDecoderLayer_fast_forward
GraniteModel.forward = LlamaModel_fast_forward
GraniteForCausalLM.forward = CausalLM_fast_forward(
GraniteModel_fast_forward_inference
)
GraniteForCausalLM.__init__ = patched_init(GraniteForCausalLM.__init__)
PeftModelForCausalLM.forward = PeftModel_fast_forward
fix_prepare_inputs_for_generation(GraniteForCausalLM)
import transformers.models.granite.modeling_granite
transformers.models.granite.modeling_granite.GraniteRotaryEmbedding = (
GraniteRotaryEmbedding
)
return
@staticmethod
def post_patch(model, tokenizer):
# Torch.compile fails on embedding matrix??
# Workaround randomnly fixes it for torch versions < 2.2
model.model.embed_tokens = torch.nn.Embedding.from_pretrained(
model.model.embed_tokens.weight
)
model.config.update({"unsloth_version": __version__})
# We also do this for the lm_head
lm_head = torch.nn.Linear(1, 1, bias = None)
del lm_head.weight
lm_head.weight = model.lm_head.weight
lm_head.in_features = lm_head.weight.shape[1]
lm_head.out_features = lm_head.weight.shape[0]
model.lm_head = lm_head
# Granite has tied weights! This means lm_head == embed_tokens
if (
model.model.embed_tokens.weight.data_ptr()
!= model.lm_head.weight.data_ptr()
):
lm_head = torch.nn.Linear(1, 1, bias = None)
del lm_head.weight
lm_head.weight = model.model.embed_tokens.weight
lm_head.in_features = lm_head.weight.shape[1]
lm_head.out_features = lm_head.weight.shape[0]
model.lm_head = lm_head
# Also patch all dtypes - BnB seems to not allocate the correct type?
# BnB default dtype seems to be float16!
correct_dtype = lm_head.weight.dtype
for name, module in model.named_modules():
if isinstance(module, (Bnb_Linear4bit, Peft_Linear4bit)):
weight = module.weight
quant_state = weight.quant_state
if type(quant_state) is list:
# BnB seems to have float16 as default!
module.weight.quant_state[2] = (
correct_dtype # Cast to correct dtype
)
else:
# https://github.com/TimDettmers/bitsandbytes/pull/763/files
quant_state.dtype = correct_dtype
# Downcast RoPE embedding to correct data type
if name.endswith("rotary_emb") or hasattr(module, "cos_cached"):
if hasattr(module, "cos_cached") and (
module.cos_cached.dtype != correct_dtype
):
module.cos_cached = module.cos_cached.to(correct_dtype)
module.sin_cached = module.sin_cached.to(correct_dtype)
elif hasattr(module, "short_cos_cached") and (
module.short_cos_cached.dtype != correct_dtype
):
module.short_cos_cached = module.short_cos_cached.to(correct_dtype)
module.short_sin_cached = module.short_sin_cached.to(correct_dtype)
# Clear deleted GPU items
import gc
for _ in range(3):
gc.collect()
torch.cuda.empty_cache()
return model, tokenizer
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/falcon_h1.py | unsloth/models/falcon_h1.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .llama import *
import os
from ._utils import __version__
from unsloth_zoo.utils import Version, _get_dtype
from unsloth_zoo.hf_utils import dtype_from_config
from ..utils.packing import get_packed_info_from_kwargs
from ..utils.attention_dispatch import (
AttentionConfig,
AttentionContext,
run_attention,
select_attention_backend,
SDPA,
)
from .llama import (
LlamaRotaryEmbedding,
LlamaLinearScalingRotaryEmbedding,
_LlamaModel_fast_forward_inference,
)
try:
from transformers.models.falcon_h1.modeling_falcon_h1 import (
FalconH1Attention,
FalconH1DecoderLayer,
FalconH1Model,
FalconH1ForCausalLM,
FalconHybridMambaAttentionDynamicCache,
)
except:
from transformers import __version__ as transformers_version
transformers_version = Version(transformers_version)
if not transformers_version >= Version(
"4.53.0"
): # TODO: Update when transformers is updated
raise ImportError(
f"Unsloth: Your transformers version of {transformers_version} does not support FalconH1.\n"
f"The minimum required version is 4.53.0.\n"
f'Try `pip install --upgrade "transformers>=4.53.0"`\n'
f"to obtain the latest transformers build, then restart this session."
)
from transformers.modeling_attn_mask_utils import (
_prepare_4d_causal_attention_mask_for_sdpa,
)
from transformers.utils import (
is_torchdynamo_compiling,
)
# For Pytorch 2.1.1
try:
from transformers.models.falcon_h1.modeling_falcon_h1 import (
FalconH1Attention,
)
except ModuleNotFoundError:
# if we are on an old version of transformers technically it should fail in the try except above
# but if somehow we make it here, we need to raise an error since FalconH1Attention is not available
# or renamed
raise ImportError(
"Unsloth: Could not import FalconH1Attention from transformers.models.falcon_h1.modeling_falcon_h1."
)
def FalconH1Attention_fast_forward(
self,
hidden_states: torch.Tensor,
causal_mask: Optional[BlockDiagonalCausalMask] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
padding_mask: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
*args,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
# Clear inference
if hasattr(self, "paged_attention"):
del self.paged_attention_K
del self.paged_attention_V
del self.paged_attention
del self.temp_QA
del self.temp_KV
del self.RH_Q
del self.attention
bsz, q_len, _ = hidden_states.size()
n_heads = self.config.num_attention_heads
n_groups = self.num_key_value_groups
n_kv_heads = self.config.num_key_value_heads
head_dim = self.head_dim
assert n_kv_heads * n_groups == n_heads
Q, K, V = self.apply_qkv(self, hidden_states)
Q = Q.view(bsz, q_len, n_heads, head_dim)
K = K.view(bsz, q_len, n_kv_heads, head_dim)
V = V.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2)
seq_info = get_packed_info_from_kwargs(kwargs, hidden_states.device)
# Falcon H1 multiplies key states by a multiplier
K = K * self.config.key_multiplier
Q = Q.transpose(1, 2)
K = K.transpose(1, 2)
kv_seq_len = K.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
# Extend RoPE dynamically to fit in VRAM
if position_embeddings and kv_seq_len <= position_embeddings[0].shape[0]:
cos, sin = position_embeddings
else:
rotary_emb = self.rotary_emb
rotary_emb.extend_rope_embedding(V, seq_len = kv_seq_len)
cos, sin = rotary_emb.get_cached(kv_seq_len, Q.device.index)
rope_position_ids = (
position_ids if position_ids is not None else kwargs.get("position_ids")
)
# Useful for LongRoPE
Q, K = fast_rope_embedding(Q, K, cos, sin, rope_position_ids)
if past_key_value is not None:
K = torch.cat([past_key_value[0], K], dim = 2)
V = torch.cat([past_key_value[1], V], dim = 2)
past_key_value = (K, V) if use_cache else None
# Attention module
window = (-1, -1)
use_varlen = (
attention_mask is None
and seq_info is not None
and past_key_value is None
and window == (-1, -1)
)
backend = (
SDPA if attention_mask is not None else select_attention_backend(use_varlen)
)
attention_config = AttentionConfig(
backend = backend,
n_kv_heads = n_kv_heads,
n_groups = n_groups,
flash_dense_kwargs = {
"causal": True,
"window_size": (kv_seq_len, kv_seq_len),
},
flash_varlen_kwargs = {
"dropout_p": 0.0,
"softmax_scale": None,
"causal": True,
},
sdpa_kwargs = {} if attention_mask is None else {"attn_mask": attention_mask},
)
context = AttentionContext(
bsz = bsz,
q_len = q_len,
kv_seq_len = kv_seq_len,
n_heads = n_heads,
head_dim = head_dim,
requires_grad = hidden_states.requires_grad,
seq_info = seq_info,
attention_mask = attention_mask,
causal_mask = causal_mask,
)
A = run_attention(config = attention_config, context = context, Q = Q, K = K, V = V)
attn_output = A.reshape(bsz, q_len, n_heads * head_dim)
attn_output = self.apply_o(self, attn_output)
attn_weights = None
return attn_output, attn_weights, past_key_value
torch_matmul = torch.matmul
def FalconH1Attention_fast_forward_inference(
self,
hidden_states: torch.Tensor,
past_key_value: Optional[Tuple[torch.Tensor]],
position_ids,
do_prefill = False,
attention_mask = None,
):
"""
https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L406
Fast inference using KV cache.
QK^T can be computed in 4 chunks
[Q, q] @ [K, k].T where q, k are the new tokens.
[QK^T, Qk^T]
[qK^T, qk^T]
Since the attention mask wipes Qk^T, we just get
[QK^T, 0]
[qK^T, qk^T]
Since softmax is row-wise, we get
softmax([QK^T, 0])
softmax([qK^T, qk^T])
We then multiply by [V]
[v]
softmax([QK^T, 0]) [softmax(QK^T)V] *
softmax([qK^T, qk^T]) [softmax([qK^T, qk^T]) @ [V, v]]
But notice * [softmax(QK^T)V] is just the last attention.
We just need to compute the last final row.
This means we can pass in a row of Q, but we need to
remember K and V, which are called the KV cache.
"""
Xn = hidden_states
bsz, _, hd = hidden_states.size()
K1, V1 = past_key_value
dtype = Xn.dtype
n_heads = self.config.num_attention_heads
n_groups = self.num_key_value_groups
n_kv_heads = self.config.num_key_value_heads
head_dim = self.head_dim
# assert(n_kv_heads * n_groups == n_heads)
hidden_size = self.config.hidden_size
attention_size = n_heads * head_dim
seq_len = K1.shape[-2]
kv_seq_len = seq_len + 1
# Prefill phase
# if not hasattr(self, "paged_attention"):
device = hidden_states.device
if do_prefill:
self.paged_attention = torch.empty(
(KV_CACHE_INCREMENT + seq_len + 1, 2, bsz, n_kv_heads, head_dim),
dtype = dtype,
device = device,
)
self.paged_attention_K = self.paged_attention[:, 0]
self.paged_attention_V = self.paged_attention[:, 1]
self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3)
self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3)
self.temp_QA = torch.empty(
(2, bsz, 1, attention_size), dtype = dtype, device = device
)
self.temp_KV = torch.empty(
(2, bsz, 1, n_kv_heads * head_dim), dtype = dtype, device = device
)
self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = device)
# Mistral Nemo 12b has weird dimensions
if attention_size != hidden_size:
self.temp_O = torch.empty((1, bsz, hidden_size), dtype = dtype, device = device)
else:
self.temp_O = self.temp_QA[1][:, :, :hidden_size]
self.attention = torch.empty(
(bsz, n_heads, 1, KV_CACHE_INCREMENT + seq_len), dtype = dtype, device = device
)
self.scalar = 1.0 / math_sqrt(self.head_dim)
self.half_head_dim = head_dim // 2
elif kv_seq_len >= self.paged_attention.shape[0]:
self.paged_attention.resize_(
(
self.paged_attention.shape[0] + KV_CACHE_INCREMENT,
2,
bsz,
n_kv_heads,
head_dim,
)
)
self.paged_attention_K = self.paged_attention[:, 0]
self.paged_attention_V = self.paged_attention[:, 1]
self.attention.resize_(
(bsz, n_heads, 1, self.attention.shape[-1] + KV_CACHE_INCREMENT)
)
Qn = fast_linear_forward(self.q_proj, Xn, out = self.temp_QA[0])
Kn = fast_linear_forward(self.k_proj, Xn, out = self.temp_KV[0])
Kn = Kn * self.config.key_multiplier
Vn = fast_linear_forward(self.v_proj, Xn, out = self.temp_KV[1])
Qn = Qn.view(
bsz, 1, n_heads, head_dim
) # .transpose(1, 2) # we will transpose after normalisation
Kn = Kn.view(
bsz, 1, n_kv_heads, head_dim
) # .transpose(1, 2) # we will transpose after normalisation
Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2)
Qn = Qn.transpose(1, 2)
Kn = Kn.transpose(1, 2)
# cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len)
# Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids)
# Need to do it prior 2 steps before hitting full on short KV cache
# or else error
self.rotary_emb.extend_rope_embedding(Vn, seq_len + 2)
cos, sin = self.rotary_emb.get_cached(kv_seq_len, Qn.device.index)
cos = cos[position_ids].unsqueeze(1)
sin = sin[position_ids].unsqueeze(1)
h = self.half_head_dim
RH_Q = self.RH_Q
RH_Q[:, :, :, :h] = Qn[:, :, :, h:]
RH_Q[:, :, :, h:] = Qn[:, :, :, :h]
RH_Q[:, :, :, :h].neg_() # torch.neg(RH_Q[:,:,:,:h], out = RH_Q[:,:,:,:h])
Qn *= cos
Qn.addcmul_(RH_Q, sin)
RH_K = RH_Q[
:, :n_kv_heads, :, :
] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda:0")
RH_K[:, :, :, :h] = Kn[:, :, :, h:]
RH_K[:, :, :, h:] = Kn[:, :, :, :h]
RH_K[:, :, :, :h].neg_() # torch.neg(RH_K[:,:,:,:h], out = RH_K[:,:,:,:h])
Kn *= cos
Kn.addcmul_(RH_K, sin)
# New KV cache
# Kn = torch.cat([K1, Kn], dim = 2)
# Vn = torch.cat([V1, Vn], dim = 2)
self.paged_attention_K[seq_len] = Kn.permute(2, 0, 1, 3)
self.paged_attention_V[seq_len] = Vn.permute(2, 0, 1, 3)
Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3)
Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3)
# Handle sliding windows
sliding_window = getattr(self.config, "sliding_window", None)
if sliding_window is not None and kv_seq_len > sliding_window:
# From https://github.com/huggingface/transformers/blob/main/src/transformers/models/mistral/modeling_mistral.py#L193
slicing_tokens = 1 - sliding_window
Knn = Kn[:, :, slicing_tokens:, :] # .contiguous()
Vnn = Vn[:, :, slicing_tokens:, :] # .contiguous()
else:
Knn, Vnn = Kn, Vn
# Grouped query attention
_, _, cached_len, _ = Knn.shape
if bsz == 1 or not SDPA_HAS_GQA and n_groups != 1:
Knn = Knn[:, :, None, :, :].expand(
bsz, n_kv_heads, n_groups, cached_len, head_dim
)
Vnn = Vnn[:, :, None, :, :].expand(
bsz, n_kv_heads, n_groups, cached_len, head_dim
)
Knn = Knn.reshape(bsz, n_heads, cached_len, head_dim)
Vnn = Vnn.reshape(bsz, n_heads, cached_len, head_dim)
# else:
# Knn, Vnn = Knn, Vnn
# pass
# Attention
if bsz == 1:
Qn *= self.scalar # See https://github.com/ggerganov/llama.cpp/issues/7805#issuecomment-2153349963
# It seems like doing (Q * scalar) @ K is better than (Q @ K) * scalar to stop overflows
A = torch_matmul(
Qn, Knn.transpose(2, 3), out = self.attention[:, :, :, :cached_len]
)
# if attention_mask is not None: A += attention_mask # Must add attention_mask for batched
A[:] = torch_nn_functional_softmax(
A, dim = -1, dtype = torch.float32
) # .to(A.dtype)
A = torch_matmul(A, Vnn, out = Qn)
else:
if SDPA_HAS_GQA:
A = scaled_dot_product_attention(
Qn, Knn, Vnn, attn_mask = attention_mask, is_causal = False, enable_gqa = True
)
else:
A = scaled_dot_product_attention(
Qn, Knn, Vnn, attn_mask = attention_mask, is_causal = False
)
A = A.transpose(1, 2)
A = A.reshape(bsz, 1, attention_size)
A = fast_linear_forward(self.o_proj, A, out = self.temp_O)
return A, (Kn, Vn)
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/falcon_h1/modeling_falcon_h1.py
def FalconH1DecoderLayer_fast_forward(
self,
hidden_states: torch.Tensor,
causal_mask = None,
attention_mask: Optional[torch.Tensor] = None,
mamba_attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
padding_mask: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
*args,
**kwargs,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
"""
if use_cache and hasattr(self, "_flag_for_generation"):
residual = hidden_states
hidden_states = fast_rms_layernorm_inference(
self.input_layernorm, hidden_states
)
attention_hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states = hidden_states,
causal_mask = causal_mask,
attention_mask = attention_mask,
position_ids = position_ids,
past_key_value = past_key_value,
output_attentions = output_attentions,
use_cache = use_cache,
padding_mask = padding_mask,
position_embeddings = position_embeddings,
**kwargs,
)
attention_hidden_states = attention_hidden_states * self.attn_out_multiplier
mamba_hidden_states = self.mamba(
hidden_states = hidden_states,
cache_params = past_key_value,
cache_position = cache_position,
attention_mask = mamba_attention_mask,
)
mamba_hidden_states = mamba_hidden_states * self.ssm_out_multiplier
hidden_states = mamba_hidden_states + attention_hidden_states
hidden_states += residual
# Fully Connected
residual = hidden_states
hidden_states = fast_rms_layernorm_inference(
self.pre_ff_layernorm, hidden_states
)
hidden_states = fast_swiglu_inference(self.feed_forward, hidden_states)
hidden_states += residual
else:
residual = hidden_states
hidden_states = fast_rms_layernorm(self.input_layernorm, hidden_states)
mamba_hidden_states = self.mamba(
hidden_states = hidden_states,
cache_params = past_key_value,
cache_position = cache_position,
attention_mask = mamba_attention_mask,
)
mamba_hidden_states = mamba_hidden_states * self.ssm_out_multiplier
attention_hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states = hidden_states,
causal_mask = causal_mask,
attention_mask = attention_mask,
position_ids = position_ids,
past_key_value = past_key_value,
output_attentions = output_attentions,
use_cache = use_cache,
padding_mask = padding_mask,
position_embeddings = position_embeddings,
**kwargs,
)
attention_hidden_states = attention_hidden_states * self.attn_out_multiplier
hidden_states = mamba_hidden_states + attention_hidden_states
# residual connection after attention + Mamba
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = fast_rms_layernorm(self.pre_ff_layernorm, hidden_states)
hidden_states = self.feed_forward(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
def _FalconH1_fast_forward_inference(
attention_fast_forward_inference = FalconH1Attention_fast_forward_inference,
mlp_fast_forward_inference = fast_swiglu_inference,
):
# This makes the attention and MLP customisable.
# Now for models like qwen3 or cohere which use custom attention operations, we can use this function
def FalconH1Model_fast_forward_inference_custom(
self,
input_ids,
past_key_values,
position_ids,
cache_position = None,
attention_mask = None,
mamba_attention_mask = None,
):
input_ids = input_ids[:, : self.max_seq_length]
bsz, q_len = input_ids.shape
hd = self.config.hidden_size
mlp_size = self.config.intermediate_size
gate_multiplier, down_multiplier = self.config.mlp_multipliers
X = self.model.embed_tokens(input_ids)
X = X * self.config.embedding_multiplier
X = X.to(_get_dtype(dtype_from_config(self.config)))
bsz, q_len, hd = X.shape
assert q_len == 1
# Get saved buffers to reduce memory movement
residual = torch.empty((bsz, q_len, hd), dtype = torch.float32, device = "cuda:0")
_XX = torch.empty((2, bsz, q_len, hd), dtype = torch.float32, device = "cuda:0")
XX, XX2 = _XX[0], _XX[1]
variance = torch.empty((bsz, q_len, 1), dtype = torch.float32, device = "cuda:0")
temp_mlp = torch.empty((2, bsz, 1, mlp_size), dtype = X.dtype, device = "cuda:0")
temp_gate, temp_up = temp_mlp[0], temp_mlp[1]
seq_len = past_key_values[0][0].shape[-2]
if bsz != 1:
attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
attention_mask,
(bsz, q_len),
X,
seq_len,
sliding_window = getattr(self.config, "sliding_window", None),
)
else:
attention_mask = None
next_decoder_cache = []
for idx, decoder_layer in enumerate(self.model.layers):
residual.copy_(X) # residual = X
X = fast_rms_layernorm_inference(
decoder_layer.input_layernorm,
X,
XX = XX,
XX2 = XX2,
variance = variance,
)
attention_hidden_states, present_key_value = (
attention_fast_forward_inference(
decoder_layer.self_attn,
hidden_states = X * decoder_layer.attention_in_multiplier,
past_key_value = past_key_values[idx],
position_ids = position_ids,
attention_mask = attention_mask,
do_prefill = not hasattr(decoder_layer.self_attn, "paged_attention"),
)
)
attention_hidden_states = (
attention_hidden_states * decoder_layer.attn_out_multiplier
)
mamba_hidden_states = decoder_layer.mamba(
hidden_states = X,
cache_params = present_key_value,
cache_position = cache_position,
attention_mask = mamba_attention_mask,
)
mamba_hidden_states = mamba_hidden_states * decoder_layer.ssm_out_multiplier
X = mamba_hidden_states + attention_hidden_states
X += residual
residual.copy_(X) # residual = X
X = fast_rms_layernorm_inference(
decoder_layer.pre_ff_layernorm,
X,
XX = XX,
XX2 = XX2,
variance = variance,
)
X = mlp_fast_forward_inference(
decoder_layer.feed_forward,
X,
temp_gate = temp_gate,
temp_up = temp_up,
gate_multiplier = gate_multiplier,
down_multiplier = down_multiplier,
)
X += residual
next_decoder_cache.append(present_key_value)
X = fast_rms_layernorm_inference(
self.model.final_layernorm,
X,
XX = XX,
XX2 = XX2,
variance = variance,
)
return BaseModelOutputWithPast(
last_hidden_state = X,
past_key_values = next_decoder_cache,
hidden_states = [],
attentions = [],
)
return FalconH1Model_fast_forward_inference_custom
# Separate prepare_inputs_for_generation for Hybrid FalconH1
def _fast_prepare_inputs_for_generation(
self,
input_ids,
past_key_values = None,
attention_mask = None,
inputs_embeds = None,
cache_position = None,
position_ids = None,
use_cache = True,
**kwargs,
):
# Overwritten -- has a unique cache type, `FalconHybridMambaAttentionDynamicCache`
empty_past_kv = past_key_values is None
# If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
# Exception 1: when passing input_embeds, input_ids may be missing entries
# Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
# Exception 3: with synced GPUs cache_position may go out of bounds, but we only want dummy token in that case.
# (we can't check exception 3 while compiling)
if not empty_past_kv:
if (
inputs_embeds is not None # Exception 1
or (
is_torchdynamo_compiling() or cache_position[-1] >= input_ids.shape[1]
) # Exception 3
):
input_ids = input_ids[:, -cache_position.shape[0] :]
elif (
input_ids.shape[1] != cache_position.shape[0]
): # Default case (the "else", a no op, is Exception 2)
input_ids = input_ids[:, cache_position]
# TODO: Wire up Cache to work for inference.
# else:
# past_key_values = FalconHybridMambaAttentionDynamicCache(
# self.config,
# input_ids.shape[0],
# self.dtype,
# devices=[
# self.model.layers[i].mamba.conv1d.weight.device for i in range(self.config.num_hidden_layers)
# ],
# )
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if not empty_past_kv:
position_ids = position_ids[:, -input_ids.shape[1] :]
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and empty_past_kv:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {
"input_ids": input_ids.contiguous()
} # `contiguous()` needed for compilation use cases
model_inputs.update(
{
"position_ids": position_ids,
"past_key_values": past_key_values,
"use_cache": use_cache,
"attention_mask": attention_mask,
"logits_to_keep": self.config.num_logits_to_keep,
"cache_position": cache_position,
}
)
return model_inputs
def fix_prepare_inputs_for_generation(module):
# Fix prepare_inputs_for_generation
if hasattr(module, "prepare_inputs_for_generation"):
module.prepare_inputs_for_generation = _fast_prepare_inputs_for_generation
class FastFalconH1Model(FastLlamaModel):
@staticmethod
def pre_patch():
init_name, function = patch_linear_scaling(
model_name = "FalconH1",
rope_module = LlamaRotaryEmbedding,
scaled_rope_module = LlamaLinearScalingRotaryEmbedding,
attention_module = FalconH1Attention,
)
if init_name is not None:
exec(function, globals())
FalconH1Attention.__init__ = eval(init_name)
FalconH1Attention.forward = FalconH1Attention_fast_forward
FalconH1DecoderLayer.forward = FalconH1DecoderLayer_fast_forward
FalconH1Model.forward = LlamaModel_fast_forward
FalconH1ForCausalLM.forward = CausalLM_fast_forward(
_FalconH1_fast_forward_inference(FalconH1Attention_fast_forward_inference)
)
PeftModelForCausalLM.forward = PeftModel_fast_forward
fix_prepare_inputs_for_generation(FalconH1ForCausalLM)
# Solves https://github.com/unslothai/unsloth/issues/168
# Static KV Cache was introduced in 4.38.0, causing training to be much slower.
# Inference can now be CUDAGraphed, but we shall retain the old rotary embeddings.
# https://github.com/huggingface/transformers/pull/27931
# https://github.com/huggingface/transformers/blob/v4.37.2/src/transformers/models/llama/modeling_llama.py
import transformers.models.falcon_h1.modeling_falcon_h1
transformers.models.falcon_h1.modeling_falcon_h1.FalconH1RotaryEmbedding = (
LlamaRotaryEmbedding
)
return
@staticmethod
def from_pretrained( # TODO: Change after release
model_name = "Qwen/FalconH1-7B",
max_seq_length = 4096,
dtype = None,
load_in_4bit = True,
token = None,
device_map = "sequential",
rope_scaling = None,
fix_tokenizer = True,
model_patcher = None,
tokenizer_name = None,
trust_remote_code = False,
**kwargs,
):
return FastLlamaModel.from_pretrained(
model_name = model_name,
max_seq_length = max_seq_length,
dtype = dtype,
load_in_4bit = load_in_4bit,
token = token,
device_map = device_map,
rope_scaling = rope_scaling,
fix_tokenizer = fix_tokenizer,
model_patcher = FastFalconH1Model,
tokenizer_name = tokenizer_name,
trust_remote_code = trust_remote_code,
**kwargs,
)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/mapper.py | unsloth/models/mapper.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"INT_TO_FLOAT_MAPPER",
"FLOAT_TO_INT_MAPPER",
"MAP_TO_UNSLOTH_16bit",
"FLOAT_TO_FP8_BLOCK_MAPPER",
"FLOAT_TO_FP8_ROW_MAPPER",
]
__INT_TO_FLOAT_MAPPER = \
{
"unsloth/mistral-7b-bnb-4bit" : (
"unsloth/mistral-7b",
"mistralai/Mistral-7B-v0.1",
),
"unsloth/llama-2-7b-bnb-4bit" : (
"unsloth/llama-2-7b",
"meta-llama/Llama-2-7b-hf",
),
"unsloth/llama-2-13b-bnb-4bit" : (
"unsloth/llama-2-13b",
"meta-llama/Llama-2-13b-hf",
),
"unsloth/codellama-34b-bnb-4bit" : (
"codellama/CodeLlama-34b-hf",
),
"unsloth/zephyr-sft-bnb-4bit" : (
"unsloth/zephyr-sft",
"HuggingFaceH4/mistral-7b-sft-beta",
),
"unsloth/tinyllama-bnb-4bit" : (
"unsloth/tinyllama",
"TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T",
),
"unsloth/tinyllama-chat-bnb-4bit" : (
"unsloth/tinyllama-chat",
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
),
"unsloth/mistral-7b-instruct-v0.1-bnb-4bit" : (
"unsloth/mistral-7b-instruct-v0.1",
"mistralai/Mistral-7B-Instruct-v0.1",
),
"unsloth/mistral-7b-instruct-v0.2-bnb-4bit" : (
"unsloth/mistral-7b-instruct-v0.2",
"mistralai/Mistral-7B-Instruct-v0.2",
),
"unsloth/llama-2-7b-chat-bnb-4bit" : (
"unsloth/llama-2-7b-chat",
"meta-llama/Llama-2-7b-chat-hf",
),
"unsloth/llama-2-7b-chat-bnb-4bit" : (
"unsloth/llama-2-7b-chat",
"meta-llama/Llama-2-7b-chat-hf",
),
"unsloth/Mixtral-8x7B-v0.1-unsloth-bnb-4bit" : (
"unsloth/Mixtral-8x7B-v0.1",
"mistralai/Mixtral-8x7B-v0.1",
"unsloth/Mixtral-8x7B-v0.1-bnb-4bit",
),
"unsloth/Mixtral-8x7B-Instruct-v0.1-unsloth-bnb-4bit" : (
"unsloth/Mixtral-8x7B-Instruct-v0.1",
"mistralai/Mixtral-8x7B-Instruct-v0.1",
"unsloth/Mixtral-8x7B-Instruct-v0.1-bnb-4bit",
),
"unsloth/codellama-7b-bnb-4bit" : (
"unsloth/codellama-7b",
"codellama/CodeLlama-7b-hf",
),
"unsloth/codellama-13b-bnb-4bit" : (
"codellama/CodeLlama-13b-hf",
),
"unsloth/yi-6b-bnb-4bit" : (
"unsloth/yi-6b",
"01-ai/Yi-6B",
),
"unsloth/solar-10.7b-bnb-4bit" : (
"upstage/SOLAR-10.7B-v1.0",
),
"unsloth/gemma-7b-bnb-4bit" : (
"unsloth/gemma-7b",
"google/gemma-7b",
),
"unsloth/gemma-2b-bnb-4bit" : (
"unsloth/gemma-2b",
"google/gemma-2b",
),
"unsloth/gemma-7b-it-bnb-4bit" : (
"unsloth/gemma-7b-it",
"google/gemma-7b-it",
),
"unsloth/gemma-2b-bnb-4bit" : (
"unsloth/gemma-2b-it",
"google/gemma-2b-it",
),
"unsloth/mistral-7b-v0.2-bnb-4bit" : (
"unsloth/mistral-7b-v0.2",
"alpindale/Mistral-7B-v0.2-hf",
),
"unsloth/gemma-1.1-2b-it-bnb-4bit" : (
"unsloth/gemma-1.1-2b-it",
"google/gemma-1.1-2b-it",
),
"unsloth/gemma-1.1-7b-it-bnb-4bit" : (
"unsloth/gemma-1.1-7b-it",
"google/gemma-1.1-7b-it",
),
"unsloth/Starling-LM-7B-beta" : (
"unsloth/Starling-LM-7B-beta",
"Nexusflow/Starling-LM-7B-beta",
),
"unsloth/Hermes-2-Pro-Mistral-7B-bnb-4bit" : (
"unsloth/Hermes-2-Pro-Mistral-7B",
"NousResearch/Hermes-2-Pro-Mistral-7B",
),
"unsloth/OpenHermes-2.5-Mistral-7B-bnb-4bit" : (
"unsloth/OpenHermes-2.5-Mistral-7B",
"teknium/OpenHermes-2.5-Mistral-7B",
),
"unsloth/codegemma-2b-bnb-4bit" : (
"unsloth/codegemma-2b",
"google/codegemma-2b",
),
"unsloth/codegemma-7b-bnb-4bit" : (
"unsloth/codegemma-7b",
"google/codegemma-7b",
),
"unsloth/codegemma-7b-it-bnb-4bit" : (
"unsloth/codegemma-7b-it",
"google/codegemma-7b-it",
),
"unsloth/llama-3-8b-bnb-4bit" : (
"unsloth/llama-3-8b",
"meta-llama/Meta-Llama-3-8B",
),
"unsloth/llama-3-8b-Instruct-bnb-4bit" : (
"unsloth/llama-3-8b-Instruct",
"meta-llama/Meta-Llama-3-8B-Instruct",
),
"unsloth/llama-3-70b-bnb-4bit" : (
"meta-llama/Meta-Llama-3-70B",
),
"unsloth/llama-3-70b-Instruct-bnb-4bit" : (
"meta-llama/Meta-Llama-3-70B-Instruct",
),
"unsloth/Phi-3-mini-4k-instruct-bnb-4bit" : (
"unsloth/Phi-3-mini-4k-instruct",
"microsoft/Phi-3-mini-4k-instruct",
),
"unsloth/mistral-7b-v0.3-bnb-4bit" : (
"unsloth/mistral-7b-v0.3",
"mistralai/Mistral-7B-v0.3",
),
"unsloth/mistral-7b-instruct-v0.3-bnb-4bit" : (
"unsloth/mistral-7b-instruct-v0.3",
"mistralai/Mistral-7B-Instruct-v0.3",
),
"unsloth/Phi-3-medium-4k-instruct-bnb-4bit" : (
"unsloth/Phi-3-medium-4k-instruct",
"microsoft/Phi-3-medium-4k-instruct",
),
"unsloth/Qwen2-0.5B-bnb-4bit" : (
"unsloth/Qwen2-0.5B",
"Qwen/Qwen2-0.5B",
),
"unsloth/Qwen2-0.5B-Instruct-bnb-4bit" : (
"unsloth/Qwen2-0.5B-Instruct",
"Qwen/Qwen2-0.5B-Instruct",
),
"unsloth/Qwen2-1.5B-bnb-4bit" : (
"unsloth/Qwen2-1.5B",
"Qwen/Qwen2-1.5B",
),
"unsloth/Qwen2-1.5B-Instruct-bnb-4bit" : (
"unsloth/Qwen2-1.5B-Instruct",
"Qwen/Qwen2-1.5B-Instruct",
),
"unsloth/Qwen2-7B-bnb-4bit" : (
"unsloth/Qwen2-7B",
"Qwen/Qwen2-7B",
),
"unsloth/Qwen2-7B-Instruct-bnb-4bit" : (
"unsloth/Qwen2-7B-Instruct",
"Qwen/Qwen2-7B-Instruct",
),
"unsloth/Qwen2-70B-bnb-4bit" : (
"Qwen/Qwen2-70B",
),
"unsloth/Qwen2-70B-Instruct-bnb-4bit" : (
"Qwen/Qwen2-70B-Instruct",
),
"mistralai/Codestral-22B-v0.1" : (
"mistral-community/Codestral-22B-v0.1",
),
"unsloth/gemma-2-9b-bnb-4bit" : (
"unsloth/gemma-2-9b",
"google/gemma-2-9b",
),
"unsloth/gemma-2-27b-bnb-4bit" : (
"unsloth/gemma-2-27b",
"google/gemma-2-27b",
),
"unsloth/gemma-2-9b-it-bnb-4bit" : (
"unsloth/gemma-2-9b-it",
"google/gemma-2-9b-it",
),
"unsloth/gemma-2-27b-it-bnb-4bit" : (
"unsloth/gemma-2-27b-it",
"google/gemma-2-27b-it",
),
"unsloth/Phi-3-mini-4k-instruct-v0-bnb-4bit" : ( # Old Phi pre July
"unsloth/Phi-3-mini-4k-instruct-v0",
),
"unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit" : ( # New 12b Mistral models
"unsloth/Mistral-Nemo-Instruct-2407",
"mistralai/Mistral-Nemo-Instruct-2407",
),
"unsloth/Mistral-Nemo-Base-2407-bnb-4bit" : ( # New 12b Mistral models
"unsloth/Mistral-Nemo-Base-2407",
"mistralai/Mistral-Nemo-Base-2407",
),
"unsloth/Meta-Llama-3.1-8B-unsloth-bnb-4bit" : (
"unsloth/Meta-Llama-3.1-8B",
"meta-llama/Meta-Llama-3.1-8B",
"unsloth/Meta-Llama-3.1-8B-bnb-4bit",
),
"unsloth/Meta-Llama-3.1-8B-Instruct-unsloth-bnb-4bit" : {
"8" : (
"RedHatAI/Llama-3.1-8B-Instruct-FP8",
"unsloth/Llama-3.1-8B-Instruct-FP8-Block",
"unsloth/Llama-3.1-8B-Instruct-FP8-Dynamic",
),
"16" : (
"unsloth/Meta-Llama-3.1-8B-Instruct",
"meta-llama/Meta-Llama-3.1-8B-Instruct",
"unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit",
),
},
"unsloth/Llama-3.1-8B-unsloth-bnb-4bit" : (
"unsloth/Llama-3.1-8B",
"meta-llama/Llama-3.1-8B",
"unsloth/Llama-3.1-8B-bnb-4bit",
),
"unsloth/Llama-3.1-8B-Instruct-unsloth-bnb-4bit" : {
"8" : (
"RedHatAI/Llama-3.1-8B-Instruct-FP8",
"unsloth/Llama-3.1-8B-Instruct-FP8-Block",
"unsloth/Llama-3.1-8B-Instruct-FP8-Dynamic",
),
"16" : (
"unsloth/Llama-3.1-8B-Instruct",
"meta-llama/Llama-3.1-8B-Instruct",
"unsloth/Llama-3.1-8B-Instruct-bnb-4bit",
),
},
"unsloth/Meta-Llama-3.1-70B-bnb-4bit" : (
"unsloth/Meta-Llama-3.1-70B",
"meta-llama/Meta-Llama-3.1-70B",
),
"unsloth/Meta-Llama-3.1-405B-bnb-4bit" : (
"meta-llama/Meta-Llama-3.1-405B",
),
"unsloth/Meta-Llama-3.1-405B-Instruct-bnb-4bit" : (
"meta-llama/Meta-Llama-3.1-405B-Instruct",
),
"unsloth/Meta-Llama-3.1-70B-Instruct-bnb-4bit" : (
"unsloth/Meta-Llama-3.1-70B-Instruct",
"meta-llama/Meta-Llama-3.1-70B-Instruct",
),
"unsloth/Mistral-Large-Instruct-2407-bnb-4bit" : (
"mistralai/Mistral-Large-Instruct-2407",
),
"unsloth/gemma-2-2b-bnb-4bit" : (
"unsloth/gemma-2-2b",
"google/gemma-2-2b",
),
"unsloth/gemma-2-2b-it-bnb-4bit" : (
"unsloth/gemma-2-2b-it",
"google/gemma-2-2b-it",
),
"unsloth/Phi-3.5-mini-instruct-bnb-4bit" : (
"unsloth/Phi-3.5-mini-instruct",
"microsoft/Phi-3.5-mini-instruct",
),
"unsloth/c4ai-command-r-08-2024-bnb-4bit" : (
"CohereForAI/c4ai-command-r-08-2024",
),
"unsloth/c4ai-command-r-plus-08-2024-bnb-4bit" : (
"CohereForAI/c4ai-command-r-plus-08-2024",
),
"unsloth/Llama-3.1-Storm-8B-bnb-4bit" : (
"unsloth/Llama-3.1-Storm-8B",
"akjindal53244/Llama-3.1-Storm-8B",
),
"unsloth/Hermes-3-Llama-3.1-8B-bnb-4bit" : (
"unsloth/Hermes-3-Llama-3.1-8B",
"NousResearch/Hermes-3-Llama-3.1-8B",
),
"unsloth/Hermes-3-Llama-3.1-70B-bnb-4bit" : (
"unsloth/Hermes-3-Llama-3.1-70B",
"NousResearch/Hermes-3-Llama-3.1-70B",
),
"unsloth/Hermes-3-Llama-3.1-405B-bnb-4bit" : (
"NousResearch/Hermes-3-Llama-3.1-405B",
),
"unsloth/SmolLM-135M-bnb-4bit" : (
"unsloth/SmolLM-135M",
"HuggingFaceTB/SmolLM-135M",
),
"unsloth/SmolLM-360M-bnb-4bit" : (
"unsloth/SmolLM-360M",
"HuggingFaceTB/SmolLM-360M",
),
"unsloth/SmolLM-1.7B-bnb-4bit" : (
"unsloth/SmolLM-1.7B",
"HuggingFaceTB/SmolLM-1.7B",
),
"unsloth/SmolLM-135M-Instruct-bnb-4bit" : (
"unsloth/SmolLM-135M-Instruct",
"HuggingFaceTB/SmolLM-135M-Instruct",
),
"unsloth/SmolLM-360M-Instruct-bnb-4bit" : (
"unsloth/SmolLM-360M-Instruct",
"HuggingFaceTB/SmolLM-360M-Instruct",
),
"unsloth/SmolLM-1.7B-Instruct-bnb-4bit" : (
"unsloth/SmolLM-1.7B-Instruct",
"HuggingFaceTB/SmolLM-1.7B-Instruct",
),
"unsloth/Mistral-Small-Instruct-2409-bnb-4bit" : (
"unsloth/Mistral-Small-Instruct-2409",
"mistralai/Mistral-Small-Instruct-2409",
),
"unsloth/Qwen2.5-0.5B-Instruct-unsloth-bnb-4bit" : (
"unsloth/Qwen2.5-0.5B-Instruct",
"Qwen/Qwen2.5-0.5B-Instruct",
"unsloth/Qwen2.5-0.5B-Instruct-bnb-4bit",
),
"unsloth/Qwen2.5-1.5B-Instruct-unsloth-bnb-4bit" : (
"unsloth/Qwen2.5-1.5B-Instruct",
"Qwen/Qwen2.5-1.5B-Instruct",
"unsloth/Qwen2.5-1.5B-Instruct-bnb-4bit",
),
"unsloth/Qwen2.5-3B-Instruct-unsloth-bnb-4bit" : (
"unsloth/Qwen2.5-3B-Instruct",
"Qwen/Qwen2.5-3B-Instruct",
"unsloth/Qwen2.5-3B-Instruct-bnb-4bit",
),
"unsloth/Qwen2.5-7B-Instruct-unsloth-bnb-4bit" : (
"unsloth/Qwen2.5-7B-Instruct",
"Qwen/Qwen2.5-7B-Instruct",
"unsloth/Qwen2.5-7B-Instruct-bnb-4bit",
),
"unsloth/Qwen2.5-14B-Instruct-unsloth-bnb-4bit" : (
"unsloth/Qwen2.5-14B-Instruct",
"Qwen/Qwen2.5-14B-Instruct",
"unsloth/Qwen2.5-14B-Instruct-bnb-4bit",
),
"unsloth/Qwen2.5-32B-Instruct-bnb-4bit" : (
"unsloth/Qwen2.5-32B-Instruct",
"Qwen/Qwen2.5-32B-Instruct",
),
"unsloth/Qwen2.5-72B-Instruct-bnb-4bit" : (
"unsloth/Qwen2.5-72B-Instruct",
"Qwen/Qwen2.5-72B-Instruct",
),
"unsloth/Qwen2.5-0.5B-unsloth-bnb-4bit" : (
"unsloth/Qwen2.5-0.5B",
"Qwen/Qwen2.5-0.5B",
"unsloth/Qwen2.5-0.5B-bnb-4bit",
),
"unsloth/Qwen2.5-1.5B-unsloth-bnb-4bit" : (
"unsloth/Qwen2.5-1.5B",
"Qwen/Qwen2.5-1.5B",
"unsloth/Qwen2.5-1.5B-bnb-4bit",
),
"unsloth/Qwen2.5-3B-unsloth-bnb-4bit" : (
"unsloth/Qwen2.5-3B",
"Qwen/Qwen2.5-3B",
"unsloth/Qwen2.5-3B-bnb-4bit",
),
"unsloth/Qwen2.5-7B-unsloth-bnb-4bit" : (
"unsloth/Qwen2.5-7B",
"Qwen/Qwen2.5-7B",
"unsloth/Qwen2.5-7B-bnb-4bit",
),
"unsloth/Qwen2.5-14B-unsloth-bnb-4bit" : (
"unsloth/Qwen2.5-14B",
"Qwen/Qwen2.5-14B",
"unsloth/Qwen2.5-14B-bnb-4bit",
),
"unsloth/Qwen2.5-32B-bnb-4bit" : (
"unsloth/Qwen2.5-32B",
"Qwen/Qwen2.5-32B",
),
"unsloth/Qwen2.5-72B-bnb-4bit" : (
"unsloth/Qwen2.5-72B",
"Qwen/Qwen2.5-72B",
),
"unsloth/Qwen2.5-Math-1.5B-bnb-4bit" : (
"unsloth/Qwen2.5-Math-1.5B",
"Qwen/Qwen2.5-Math-1.5B",
),
"unsloth/Qwen2.5-Math-7B-bnb-4bit" : (
"unsloth/Qwen2.5-Math-7B",
"Qwen/Qwen2.5-Math-7B",
),
"unsloth/Qwen2.5-Math-72B-bnb-4bit" : (
"unsloth/Qwen2.5-Math-72B",
"Qwen/Qwen2.5-Math-72B",
),
"unsloth/Qwen2.5-Math-1.5B-Instruct-bnb-4bit" : (
"unsloth/Qwen2.5-Math-1.5B-Instruct",
"Qwen/Qwen2.5-Math-1.5B-Instruct",
),
"unsloth/Qwen2.5-Math-7B-Instruct-bnb-4bit" : (
"unsloth/Qwen2.5-Math-7B-Instruct",
"Qwen/Qwen2.5-Math-7B-Instruct",
),
"unsloth/Qwen2.5-Math-72B-Instruct-bnb-4bit" : (
"unsloth/Qwen2.5-Math-72B-Instruct",
"Qwen/Qwen2.5-Math-72B-Instruct",
),
"unsloth/Qwen2.5-Coder-0.5B-bnb-4bit" : (
"unsloth/Qwen2.5-Coder-0.5B",
"Qwen/Qwen2.5-Coder-0.5B",
),
"unsloth/Qwen2.5-Coder-1.5B-bnb-4bit" : (
"unsloth/Qwen2.5-Coder-1.5B",
"Qwen/Qwen2.5-Coder-1.5B",
),
"unsloth/Qwen2.5-Coder-3B-bnb-4bit" : (
"unsloth/Qwen2.5-Coder-3B",
"Qwen/Qwen2.5-Coder-3B",
),
"unsloth/Qwen2.5-Coder-7B-bnb-4bit" : (
"unsloth/Qwen2.5-Coder-7B",
"Qwen/Qwen2.5-Coder-7B",
),
"unsloth/Qwen2.5-Coder-14B-bnb-4bit" : (
"unsloth/Qwen2.5-Coder-14B",
"Qwen/Qwen2.5-Coder-14B",
),
"unsloth/Qwen2.5-Coder-32B-bnb-4bit" : (
"unsloth/Qwen2.5-Coder-32B",
"Qwen/Qwen2.5-Coder-32B",
),
"unsloth/Qwen2.5-Coder-0.5B-Instruct-bnb-4bit" : (
"unsloth/Qwen2.5-Coder-0.5B-Instruct",
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
),
"unsloth/Qwen2.5-Coder-1.5B-Instruct-bnb-4bit" : (
"unsloth/Qwen2.5-Coder-1.5B-Instruct",
"Qwen/Qwen2.5-Coder-1.5B-Instruct",
),
"unsloth/Qwen2.5-Coder-3B-Instruct-bnb-4bit" : (
"unsloth/Qwen2.5-Coder-3B-Instruct",
"Qwen/Qwen2.5-Coder-3B-Instruct",
),
"unsloth/Qwen2.5-Coder-7B-Instruct-bnb-4bit" : (
"unsloth/Qwen2.5-Coder-7B-Instruct",
"Qwen/Qwen2.5-Coder-7B-Instruct",
),
"unsloth/Qwen2.5-Coder-14B-Instruct-bnb-4bit" : (
"unsloth/Qwen2.5-Coder-14B-Instruct",
"Qwen/Qwen2.5-Coder-14B-Instruct",
),
"unsloth/Qwen2.5-Coder-32B-Instruct-bnb-4bit" : (
"unsloth/Qwen2.5-Coder-32B-Instruct",
"Qwen/Qwen2.5-Coder-32B-Instruct",
),
"unsloth/Llama-3.2-1B-unsloth-bnb-4bit" : (
"unsloth/Llama-3.2-1B",
"meta-llama/Llama-3.2-1B",
"unsloth/Llama-3.2-1B-bnb-4bit",
),
"unsloth/Llama-3.2-3B-unsloth-bnb-4bit" : (
"unsloth/Llama-3.2-3B",
"meta-llama/Llama-3.2-3B",
"unsloth/Llama-3.2-3B-bnb-4bit",
),
"unsloth/Llama-3.2-1B-Instruct-unsloth-bnb-4bit" : {
"8": (
"RedHatAI/Llama-3.2-1B-Instruct-FP8",
"unsloth/Llama-3.2-1B-Instruct-FP8-Block",
"unsloth/Llama-3.2-1B-Instruct-FP8-Dynamic",
),
"16" : (
"unsloth/Llama-3.2-1B-Instruct",
"meta-llama/Llama-3.2-1B-Instruct",
"unsloth/Llama-3.2-1B-Instruct-bnb-4bit",
),
},
"unsloth/Llama-3.2-3B-Instruct-unsloth-bnb-4bit" : {
"8": (
"RedHatAI/Llama-3.2-3B-Instruct-FP8",
"unsloth/Llama-3.2-3B-Instruct-FP8-Block",
"unsloth/Llama-3.2-3B-Instruct-FP8-Dynamic",
),
"16" : (
"unsloth/Llama-3.2-3B-Instruct",
"meta-llama/Llama-3.2-3B-Instruct",
"unsloth/Llama-3.2-3B-Instruct-bnb-4bit",
),
},
"unsloth/Llama-3.1-Nemotron-70B-Instruct-bnb-4bit" : (
"unsloth/Llama-3.1-Nemotron-70B-Instruct",
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
),
"unsloth/Qwen2-VL-2B-Instruct-unsloth-bnb-4bit" : (
"unsloth/Qwen2-VL-2B-Instruct",
"Qwen/Qwen2-VL-2B-Instruct",
"unsloth/Qwen2-VL-2B-Instruct-bnb-4bit",
),
"unsloth/Qwen2-VL-7B-Instruct-unsloth-bnb-4bit" : (
"unsloth/Qwen2-VL-7B-Instruct",
"Qwen/Qwen2-VL-7B-Instruct",
"unsloth/Qwen2-VL-7B-Instruct-bnb-4bit",
),
"unsloth/Qwen2-VL-72B-Instruct-bnb-4bit" : (
"unsloth/Qwen2-VL-72B-Instruct",
"Qwen/Qwen2-VL-72B-Instruct",
),
"unsloth/Qwen2-VL-2B-bnb-4bit" : (
"unsloth/Qwen2-VL-2B",
"Qwen/Qwen2-VL-2B",
),
"unsloth/Qwen2-VL-7B-bnb-4bit" : (
"unsloth/Qwen2-VL-7B",
"Qwen/Qwen2-VL-7B",
),
"unsloth/Qwen2-VL-72B-bnb-4bit" : (
"unsloth/Qwen2-VL-72B",
"Qwen/Qwen2-VL-72B",
),
"unsloth/Llama-3.2-11B-Vision-Instruct-unsloth-bnb-4bit" : (
"unsloth/Llama-3.2-11B-Vision-Instruct",
"meta-llama/Llama-3.2-11B-Vision-Instruct",
"unsloth/Llama-3.2-11B-Vision-Instruct-bnb-4bit",
),
"unsloth/Llama-3.2-90B-Vision-Instruct-bnb-4bit" : (
"unsloth/Llama-3.2-90B-Vision-Instruct",
"meta-llama/Llama-3.2-90B-Vision-Instruct",
),
"unsloth/Llama-3.2-11B-Vision-unsloth-bnb-4bit" : (
"unsloth/Llama-3.2-11B-Vision",
"meta-llama/Llama-3.2-11B-Vision",
"unsloth/Llama-3.2-11B-Vision-bnb-4bit",
),
"unsloth/Llama-3.2-90B-Vision-bnb-4bit" : (
"unsloth/Llama-3.2-90B-Vision",
"meta-llama/Llama-3.2-90B-Vision",
),
"unsloth/Pixtral-12B-2409-unsloth-bnb-4bit" : (
"unsloth/Pixtral-12B-2409",
"mistralai/Pixtral-12B-2409",
"unsloth/Pixtral-12B-2409-bnb-4bit",
),
"unsloth/Pixtral-12B-2409-Base-bnb-4bit" : (
"unsloth/Pixtral-12B-Base-2409",
"mistralai/Pixtral-12B-Base-2409",
),
"unsloth/llava-1.5-7b-hf-bnb-4bit" : (
"unsloth/llava-1.5-7b-hf",
"llava-hf/llava-1.5-7b-hf",
),
"unsloth/llava-v1.6-mistral-7b-hf-bnb-4bit" : (
"unsloth/llava-v1.6-mistral-7b-hf",
"llava-hf/llava-v1.6-mistral-7b-hf",
),
"unsloth/Llama-3.1-Tulu-3-8B-bnb-4bit" : (
"unsloth/Llama-3.1-Tulu-3-8B",
"allenai/Llama-3.1-Tulu-3-8B",
),
"unsloth/Llama-3.1-Tulu-3-70B-bnb-4bit" : (
"unsloth/Llama-3.1-Tulu-3-70B",
"allenai/Llama-3.1-Tulu-3-70B",
),
"unsloth/QwQ-32B-Preview-bnb-4bit" : (
"unsloth/QwQ-32B-Preview",
"Qwen/QwQ-32B-Preview",
),
"unsloth/Llama-3.3-70B-Instruct-unsloth-bnb-4bit" : {
"8" : (
"RedHatAI/Llama-3.3-70B-Instruct-FP8",
"unsloth/Llama-3.3-70B-Instruct-FP8-Block",
"unsloth/Llama-3.3-70B-Instruct-FP8-Dynamic",
),
"16" : (
"unsloth/Llama-3.3-70B-Instruct",
"meta-llama/Llama-3.3-70B-Instruct",
"unsloth/Llama-3.3-70B-Instruct-bnb-4bit",
),
},
"unsloth/phi-4-unsloth-bnb-4bit" : (
"unsloth/phi-4",
"microsoft/phi-4",
"unsloth/phi-4-bnb-4bit",
),
"unsloth/DeepSeek-R1-Distill-Qwen-32B-bnb-4bit" : (
"unsloth/DeepSeek-R1-Distill-Qwen-32B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
),
"unsloth/DeepSeek-R1-Distill-Qwen-14B-unsloth-bnb-4bit" : (
"unsloth/DeepSeek-R1-Distill-Qwen-14B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
"unsloth/DeepSeek-R1-Distill-Qwen-14B-bnb-4bit",
),
"unsloth/DeepSeek-R1-Distill-Qwen-7B-unsloth-bnb-4bit" : (
"unsloth/DeepSeek-R1-Distill-Qwen-7B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
"unsloth/DeepSeek-R1-Distill-Qwen-7B-bnb-4bit",
),
"unsloth/DeepSeek-R1-Distill-Qwen-1.5B-unsloth-bnb-4bit" : (
"unsloth/DeepSeek-R1-Distill-Qwen-1.5B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
"unsloth/DeepSeek-R1-Distill-Qwen-1.5B-bnb-4bit",
),
"unsloth/DeepSeek-R1-Distill-Llama-8B-unsloth-bnb-4bit" : (
"unsloth/DeepSeek-R1-Distill-Llama-8B",
"deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
"unsloth/DeepSeek-R1-Distill-Llama-8B-bnb-4bit",
),
"unsloth/DeepSeek-R1-Distill-Llama-70B-bnb-4bit" : (
"unsloth/DeepSeek-R1-Distill-Llama-70B",
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
),
"unsloth/Mistral-Small-24B-Base-2501-unsloth-bnb-4bit" : (
"unsloth/Mistral-Small-24B-Base-2501",
"mistralai/Mistral-Small-24B-Base-2501",
"unsloth/Mistral-Small-24B-Base-2501-bnb-4bit",
),
"unsloth/Mistral-Small-24B-Instruct-2501-unsloth-bnb-4bit" : (
"unsloth/Mistral-Small-24B-Instruct-2501",
"mistralai/Mistral-Small-24B-Instruct-2501",
"unsloth/Mistral-Small-24B-Instruct-2501-bnb-4bit",
),
"unsloth/Qwen2.5-VL-3B-Instruct-unsloth-bnb-4bit" : (
"unsloth/Qwen2.5-VL-3B-Instruct",
"Qwen/Qwen2.5-VL-3B-Instruct",
"unsloth/Qwen2.5-VL-3B-Instruct-bnb-4bit",
),
"unsloth/Qwen2.5-VL-7B-Instruct-unsloth-bnb-4bit" : (
"unsloth/Qwen2.5-VL-7B-Instruct",
"Qwen/Qwen2.5-VL-7B-Instruct",
"unsloth/Qwen2.5-VL-7B-Instruct-bnb-4bit",
),
"unsloth/Qwen2.5-VL-32B-Instruct-unsloth-bnb-4bit" : (
"unsloth/Qwen2.5-VL-32B-Instruct",
"Qwen/Qwen2.5-VL-32B-Instruct",
"unsloth/Qwen2.5-VL-32B-Instruct-bnb-4bit",
),
"unsloth/Qwen2.5-VL-72B-Instruct-unsloth-bnb-4bit" : (
"unsloth/Qwen2.5-VL-72B-Instruct",
"Qwen/Qwen2.5-VL-72B-Instruct",
"unsloth/Qwen2.5-VL-72B-Instruct-bnb-4bit",
),
"unsloth/DeepScaleR-1.5B-Preview-unsloth-bnb-4bit" : (
"unsloth/DeepHermes-3-Llama-3-8B-Preview",
"agentica-org/DeepScaleR-1.5B-Preview",
"unsloth/DeepScaleR-1.5B-Preview-bnb-4bit",
),
"unsloth/OpenThinker-7B-unsloth-bnb-4bit" : (
"unsloth/OpenThinker-7B",
"open-thoughts/OpenThinker-7B",
"unsloth/OpenThinker-7B-bnb-4bit",
),
"unsloth/granite-3.2-2b-instruct-unsloth-bnb-4bit" : (
"unsloth/granite-3.2-2b-instruct",
"ibm-granite/granite-3.2-2b-instruct",
"unsloth/granite-3.2-2b-instruct-bnb-4bit",
),
"unsloth/granite-3.2-8b-instruct-unsloth-bnb-4bit" : (
"unsloth/granite-3.2-8b-instruct",
"ibm-granite/granite-3.2-8b-instruct",
"unsloth/granite-3.2-8b-instruct-bnb-4bit",
),
"unsloth/QwQ-32B-unsloth-bnb-4bit" : (
"unsloth/QwQ-32B",
"Qwen/QwQ-32B",
"unsloth/QwQ-32B-bnb-4bit",
),
"unsloth/gemma-3-1b-it-unsloth-bnb-4bit" : (
"unsloth/gemma-3-1b-it",
"google/gemma-3-1b-it",
"unsloth/gemma-3-1b-it-bnb-4bit",
),
"unsloth/gemma-3-4b-it-unsloth-bnb-4bit" : (
"unsloth/gemma-3-4b-it",
"google/gemma-3-4b-it",
"unsloth/gemma-3-4b-it-bnb-4bit",
),
"unsloth/gemma-3-12b-it-unsloth-bnb-4bit" : (
"unsloth/gemma-3-12b-it",
"google/gemma-3-12b-it",
"unsloth/gemma-3-12b-it-bnb-4bit",
),
"unsloth/gemma-3-27b-it-unsloth-bnb-4bit" : (
"unsloth/gemma-3-27b-it",
"google/gemma-3-27b-it",
"unsloth/gemma-3-27b-it-bnb-4bit",
),
"unsloth/gemma-3-1b-pt-unsloth-bnb-4bit" : (
"unsloth/gemma-3-1b-pt",
"google/gemma-3-1b-pt",
"unsloth/gemma-3-1b-pt-bnb-4bit",
),
"unsloth/gemma-3-4b-pt-unsloth-bnb-4bit" : (
"unsloth/gemma-3-4b-pt",
"google/gemma-3-4b-pt",
"unsloth/gemma-3-4b-pt-bnb-4bit",
),
"unsloth/gemma-3-12b-pt-unsloth-bnb-4bit" : (
"unsloth/gemma-3-12b-pt",
"google/gemma-3-12b-pt",
"unsloth/gemma-3-12b-pt-bnb-4bit",
),
"unsloth/gemma-3-27b-pt-unsloth-bnb-4bit" : (
"unsloth/gemma-3-27b-pt",
"google/gemma-3-27b-pt",
"unsloth/gemma-3-27b-pt-bnb-4bit",
),
"unsloth/reka-flash-3-unsloth-bnb-4bit" : (
"unsloth/reka-flash-3",
"RekaAI/reka-flash-3",
"unsloth/reka-flash-3-bnb-4bit",
),
"unsloth/c4ai-command-a-03-2025-unsloth-bnb-4bit" : (
"unsloth/c4ai-command-a-03-2025",
"CohereForAI/c4ai-command-a-03-2025",
"unsloth/c4ai-command-a-03-2025-bnb-4bit",
),
"unsloth/aya-vision-32b-unsloth-bnb-4bit" : (
"unsloth/aya-vision-32b",
"CohereForAI/aya-vision-32b",
"unsloth/aya-vision-32b-bnb-4bit",
),
"unsloth/aya-vision-8b-unsloth-bnb-4bit" : (
"unsloth/aya-vision-8b",
"CohereForAI/aya-vision-8b",
"unsloth/aya-vision-8b-bnb-4bit",
),
"unsloth/granite-vision-3.2-2b-unsloth-bnb-4bit" : (
"unsloth/granite-vision-3.2-2b",
"ibm-granite/granite-vision-3.2-2b",
"unsloth/granite-vision-3.2-2b-bnb-4bit",
),
"unsloth/OLMo-2-0325-32B-Instruct-unsloth-bnb-4bit" : (
"unsloth/OLMo-2-0325-32B-Instruct",
"allenai/OLMo-2-0325-32B-Instruct",
"unsloth/OLMo-2-0325-32B-Instruct-bnb-4bit",
),
"unsloth/Mistral-Small-3.1-24B-Instruct-2503-unsloth-bnb-4bit" : (
"unsloth/Mistral-Small-3.1-24B-Instruct-2503",
"mistralai/Mistral-Small-3.1-24B-Instruct-2503",
"unsloth/Mistral-Small-3.1-24B-Instruct-2503-bnb-4bit",
),
"unsloth/Mistral-Small-3.1-24B-Base-2503-unsloth-bnb-4bit" : (
"unsloth/Mistral-Small-3.1-24B-Base-2503",
"mistralai/Mistral-Small-3.1-24B-Base-2503",
"unsloth/Mistral-Small-3.1-24B-Base-2503-bnb-4bit",
),
"unsloth/Qwen3-0.6B-unsloth-bnb-4bit" : {
"8" : (
"Qwen/Qwen3-0.6B-FP8",
"unsloth/Qwen3-0.6B-FP8",
"unsloth/Qwen3-0.6B-FP8",
),
"16" : (
"unsloth/Qwen3-0.6B",
"Qwen/Qwen3-0.6B",
"unsloth/Qwen3-0.6B-bnb-4bit",
),
},
"unsloth/Qwen3-1.7B-unsloth-bnb-4bit" : {
"8" : (
"Qwen/Qwen3-1.7B-FP8",
"unsloth/Qwen3-1.7B-FP8",
"unsloth/Qwen3-1.7B-FP8",
),
"16" : (
"unsloth/Qwen3-1.7B",
"Qwen/Qwen3-1.7B",
"unsloth/Qwen3-1.7B-bnb-4bit",
),
},
"unsloth/Qwen3-4B-unsloth-bnb-4bit" : {
"8" : (
"Qwen/Qwen3-4B-FP8",
"unsloth/Qwen3-4B-FP8",
"unsloth/Qwen3-4B-FP8",
),
"16" : (
"unsloth/Qwen3-4B",
"Qwen/Qwen3-4B",
"unsloth/Qwen3-4B-bnb-4bit",
),
},
"unsloth/Qwen3-8B-unsloth-bnb-4bit" : {
"8" : (
"Qwen/Qwen3-8B-FP8",
"unsloth/Qwen3-8B-FP8",
"unsloth/Qwen3-8B-FP8",
),
"16" : (
"unsloth/Qwen3-8B",
"Qwen/Qwen3-8B",
"unsloth/Qwen3-8B-bnb-4bit",
),
},
"unsloth/Qwen3-14B-unsloth-bnb-4bit" : {
"8" : (
"Qwen/Qwen3-14B-FP8",
"unsloth/Qwen3-14B-FP8",
"unsloth/Qwen3-14B-FP8",
),
"16" : (
"unsloth/Qwen3-14B",
"Qwen/Qwen3-14B",
"unsloth/Qwen3-14B-bnb-4bit",
),
},
"unsloth/Qwen3-32B-unsloth-bnb-4bit" : {
"8" : (
"Qwen/Qwen3-32B-FP8",
"unsloth/Qwen3-32B-FP8",
"unsloth/Qwen3-32B-FP8",
),
"16" : (
"unsloth/Qwen3-32B",
"Qwen/Qwen3-32B",
"unsloth/Qwen3-32B-bnb-4bit",
),
},
"unsloth/Qwen3-30B-A3B-unsloth-bnb-4bit" : (
"unsloth/Qwen3-30B-A3B",
"Qwen/Qwen3-30B-A3B",
"unsloth/Qwen3-30B-A3B-bnb-4bit",
),
"unsloth/Qwen3-0.6B-Base-unsloth-bnb-4bit" : (
"unsloth/Qwen3-0.6B-Base",
"Qwen/Qwen3-0.6B-Base",
"unsloth/Qwen3-0.6B-Base-bnb-4bit",
),
"unsloth/Qwen3-1.7B-Base-unsloth-bnb-4bit" : (
"unsloth/Qwen3-1.7B-Base",
"Qwen/Qwen3-1.7B-Base",
"unsloth/Qwen3-1.7B-Base-bnb-4bit",
),
"unsloth/Qwen3-4B-Base-unsloth-bnb-4bit" : (
"unsloth/Qwen3-4B-Base",
"Qwen/Qwen3-4B-Base",
"unsloth/Qwen3-4B-Base-bnb-4bit",
),
"unsloth/Qwen3-8B-Base-unsloth-bnb-4bit" : (
"unsloth/Qwen3-8B-Base",
"Qwen/Qwen3-8B-Base",
"unsloth/Qwen3-8B-Base-bnb-4bit",
),
"unsloth/Qwen3-14B-Base-unsloth-bnb-4bit" : (
"unsloth/Qwen3-14B-Base",
"Qwen/Qwen3-14B-Base",
"unsloth/Qwen3-14B-Base-bnb-4bit",
),
"unsloth/Qwen3-30B-A3B-Base-bnb-4bit" : (
"unsloth/Qwen3-30B-A3B-Base",
"Qwen/Qwen3-30B-A3B-Base",
),
"unsloth/phi-4-reasoning-unsloth-bnb-4bit" : (
"unsloth/phi-4-reasoning",
"microsoft/Phi-4-reasoning",
"unsloth/phi-4-reasoning-bnb-4bit",
),
"unsloth/phi-4-reasoning-plus-unsloth-bnb-4bit" : (
"unsloth/phi-4-reasoning-plus",
"microsoft/Phi-4-reasoning-plus",
"unsloth/phi-4-reasoning-plus-bnb-4bit",
),
"unsloth/phi-4-mini-reasoning-unsloth-bnb-4bit" : (
"unsloth/phi-4-mini-reasoning",
"microsoft/Phi-4-mini-reasoning",
"unsloth/phi-4-mini-reasoning-bnb-4bit",
),
"unsloth/Phi-4-mini-instruct-unsloth-bnb-4bit" : (
"unsloth/Phi-4-mini-instruct",
"microsoft/Phi-4-mini-instruct",
"unsloth/Phi-4-mini-instruct-bnb-4bit",
),
"unsloth/orpheus-3b-0.1-pretrained-unsloth-bnb-4bit" : (
"unsloth/orpheus-3b-0.1-pretrained",
"canopylabs/orpheus-3b-0.1-pretrained",
"unsloth/orpheus-3b-0.1-pretrained-bnb-4bit",
),
"unsloth/orpheus-3b-0.1-ft-unsloth-bnb-4bit" : (
"unsloth/orpheus-3b-0.1-ft",
"canopylabs/orpheus-3b-0.1-ft",
"unsloth/orpheus-3b-0.1-ft-bnb-4bit",
),
"unsloth/csm-1b" : (
"unsloth/csm-1b",
"sesame/csm-1b",
),
"unsloth/whisper-large-v3" : (
"unsloth/whisper-large-v3",
"openai/whisper-large-v3",
),
"unsloth/whisper-large-v3-turbo" : (
"unsloth/whisper-large-v3-turbo",
"openai/whisper-large-v3-turbo",
),
"unsloth/whisper-small" : (
"unsloth/whisper-small",
"openai/whisper-small",
),
"unsloth/CrisperWhisper" : (
"unsloth/CrisperWhisper",
"nyrahealth/CrisperWhisper",
),
"unsloth/Llasa-1B" : (
"unsloth/Llasa-1B",
"HKUSTAudio/Llasa-1B",
),
"unsloth/Spark-TTS-0.5B" : (
"unsloth/Spark-TTS-0.5B",
"SparkAudio/Spark-TTS-0.5B",
),
"unsloth/Llama-OuteTTS-1.0-1B" : (
"unsloth/Llama-OuteTTS-1.0-1B",
"OuteAI/Llama-OuteTTS-1.0-1B",
),
"unsloth/medgemma-4b-it-unsloth-bnb-4bit" : (
"unsloth/medgemma-4b-it",
"google/medgemma-4b-it",
"unsloth/medgemma-4b-it-bnb-4bit",
),
"unsloth/medgemma-27b-text-it-unsloth-bnb-4bit" : (
"unsloth/medgemma-27b-text-it",
"google/medgemma-27b-text-it",
"unsloth/medgemma-27b-text-it-bnb-4bit",
),
"unsloth/Devstral-Small-2505-unsloth-bnb-4bit" : (
"unsloth/Devstral-Small-2505",
"mistralai/Devstral-Small-2505",
"unsloth/Devstral-Small-2505-bnb-4bit",
),
"unsloth/DeepSeek-R1-0528-Qwen3-8B-unsloth-bnb-4bit" : (
"unsloth/DeepSeek-R1-0528-Qwen3-8B",
"deepseek-ai/DeepSeek-R1-0528-Qwen3-8B",
"unsloth/DeepSeek-R1-0528-Qwen3-8B-bnb-4bit",
),
"unsloth/Magistral-Small-2506-unsloth-bnb-4bit" : (
"unsloth/Magistral-Small-2506",
"mistralai/Magistral-Small-2506",
"unsloth/Magistral-Small-2506-bnb-4bit",
),
"unsloth/Mistral-Small-3.2-24B-Instruct-2506-unsloth-bnb-4bit" : (
"unsloth/Mistral-Small-3.2-24B-Instruct-2506",
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | true |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/cohere.py | unsloth/models/cohere.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .llama import *
from ._utils import __version__
from unsloth_zoo.hf_utils import dtype_from_config
from unsloth_zoo.utils import _get_dtype
from ..utils.packing import get_packed_info_from_kwargs
from ..utils.attention_dispatch import (
AttentionConfig,
AttentionContext,
run_attention,
select_attention_backend,
)
try:
from transformers.models.cohere.modeling_cohere import (
CohereAttention,
CohereDecoderLayer,
CohereModel,
CohereForCausalLM,
CohereRotaryEmbedding,
apply_rotary_pos_emb,
repeat_kv,
)
except:
from packaging.version import Version
transformers_version = Version(transformers_version)
if not transformers_version >= Version("4.42"):
raise ImportError(
f"Unsloth: Your transformers version of {transformers_version} does not support Cohere.\n"
f"The minimum required version is 4.42.3.\n"
f'Try `pip install --upgrade "transformers>=4.42.3"`\n'
f"to obtain the latest transformers build, then restart this session."
)
from transformers.modeling_attn_mask_utils import (
_prepare_4d_causal_attention_mask_for_sdpa,
)
# For Pytorch 2.1.1
try:
from transformers.models.cohere.modeling_cohere import (
CohereSdpaAttention,
CohereFlashAttention2,
)
except:
CohereSdpaAttention = CohereAttention
CohereFlashAttention2 = CohereAttention
def fast_layernorm_inference(self, X, out_weight = None):
XX = X.to(torch.float32, copy = True)
XX -= X.mean(-1, keepdim = True)
variance = XX.square().mean(-1, keepdim = True)
variance += self.variance_epsilon
XX *= variance.rsqrt_()
out_weight[:] = self.weight
XX *= out_weight
return XX.to(X.dtype)
# QK norm in Cohere
def CohereAttention_fast_forward(
self,
hidden_states: torch.Tensor,
causal_mask: Optional[BlockDiagonalCausalMask] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
padding_mask: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
*args,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
# Clear inference
if hasattr(self, "paged_attention"):
del self.paged_attention_K
del self.paged_attention_V
del self.paged_attention
del self.temp_QA
del self.temp_KV
del self.RH_Q
del self.attention
del self.q_norm_out_weight
del self.k_norm_out_weight
bsz, q_len, _ = hidden_states.size()
n_heads = self.config.num_attention_heads
n_groups = self.num_key_value_groups
n_kv_heads = self.config.num_key_value_heads
head_dim = self.head_dim
assert n_kv_heads * n_groups == n_heads
Q, K, V = self.apply_qkv(self, hidden_states)
Q = Q.view(bsz, q_len, n_heads, head_dim).transpose(1, 2)
K = K.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2)
V = V.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2)
seq_info = get_packed_info_from_kwargs(kwargs, Q.device)
if self.use_qk_norm:
Q = fast_layernorm_compiled(self.q_norm, Q)
K = fast_layernorm_compiled(self.k_norm, K)
kv_seq_len = K.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
# Extend RoPE dynamically to fit in VRAM
if position_embeddings:
cos, sin = position_embeddings
else:
cos, sin = self.rotary_emb.get_cached(kv_seq_len, Q.device.index)
rope_position_ids = (
position_ids if position_ids is not None else kwargs.get("position_ids")
)
# Useful for LongRoPE
Q, K = fast_rope_embedding(Q, K, cos, sin, rope_position_ids)
if past_key_value is not None:
K = torch.cat([past_key_value[0], K], dim = 2)
V = torch.cat([past_key_value[1], V], dim = 2)
past_key_value = (K, V) if use_cache else None
# Attention module
use_varlen = seq_info is not None and past_key_value is None
backend = select_attention_backend(use_varlen)
attention_config = AttentionConfig(
backend = backend,
n_kv_heads = n_kv_heads,
n_groups = n_groups,
flash_dense_kwargs = {"causal": True},
flash_varlen_kwargs = {
"dropout_p": 0.0,
"causal": True,
"softmax_scale": getattr(self, "softmax_scale", None),
},
)
context = AttentionContext(
bsz = bsz,
q_len = q_len,
kv_seq_len = kv_seq_len,
n_heads = n_heads,
head_dim = head_dim,
requires_grad = hidden_states.requires_grad,
seq_info = seq_info,
attention_mask = attention_mask,
causal_mask = causal_mask,
)
A = run_attention(config = attention_config, context = context, Q = Q, K = K, V = V)
attn_output = A.reshape(bsz, q_len, n_heads * head_dim)
attn_output = self.apply_o(self, attn_output)
attn_weights = None
return attn_output, attn_weights, past_key_value
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L590
def CohereDecoderLayer_fast_forward(
self,
hidden_states: torch.Tensor,
causal_mask: Optional[BlockDiagonalCausalMask] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
padding_mask: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
*args,
**kwargs,
):
if use_cache and hasattr(
self, "_flag_for_generation"
): # past_key_value is not None:
out_weight = torch.empty(
self.input_layernorm.weight.shape, dtype = torch.float32, device = "cuda:0"
)
# Self Attention
residual = hidden_states
hidden_states = fast_layernorm_inference(
self.input_layernorm, hidden_states, out_weight
)
hidden_states_attention, self_attn_weights, present_key_value = self.self_attn(
hidden_states = hidden_states,
causal_mask = causal_mask,
attention_mask = attention_mask,
position_ids = position_ids,
past_key_value = past_key_value,
output_attentions = output_attentions,
use_cache = use_cache,
padding_mask = padding_mask,
**kwargs,
)
# Fully Connected
hidden_states_mlp = fast_swiglu_inference(self.mlp, hidden_states)
residual += hidden_states_attention
residual += hidden_states_mlp
hidden_states = residual
else:
residual = hidden_states
hidden_states = fast_layernorm_compiled(self.input_layernorm, hidden_states)
hidden_states_attention, self_attn_weights, present_key_value = self.self_attn(
hidden_states = hidden_states,
causal_mask = causal_mask,
attention_mask = attention_mask,
position_ids = position_ids,
past_key_value = past_key_value,
output_attentions = output_attentions,
use_cache = use_cache,
padding_mask = padding_mask,
**kwargs,
)
# Fully Connected
hidden_states_mlp = self.mlp(hidden_states)
hidden_states = residual + hidden_states_attention + hidden_states_mlp
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
from math import sqrt as math_sqrt
KV_CACHE_INCREMENT = 256 # KV Cache update size
torch_nn_functional_softmax = torch.nn.functional.softmax
torch_matmul = torch.matmul
def CohereAttention_fast_forward_inference(
self,
hidden_states: torch.Tensor,
past_key_value: Optional[Tuple[torch.Tensor]],
position_ids,
do_prefill = False,
attention_mask = None,
):
Xn = hidden_states
bsz, _, hd = hidden_states.size()
K1, V1 = past_key_value
dtype = Xn.dtype
n_heads = self.config.num_attention_heads
n_groups = self.num_key_value_groups
n_kv_heads = self.config.num_key_value_heads
head_dim = self.head_dim
# assert(n_kv_heads * n_groups == n_heads)
hidden_size = self.config.hidden_size
attention_size = n_heads * head_dim
seq_len = K1.shape[-2]
kv_seq_len = seq_len + 1
# Prefill phase
# if not hasattr(self, "paged_attention"):
if do_prefill:
self.paged_attention = torch.empty(
(KV_CACHE_INCREMENT + seq_len + 1, 2, bsz, n_kv_heads, head_dim),
dtype = dtype,
device = "cuda:0",
)
self.paged_attention_K = self.paged_attention[:, 0]
self.paged_attention_V = self.paged_attention[:, 1]
self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3)
self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3)
self.temp_QA = torch.empty(
(2, bsz, 1, attention_size), dtype = dtype, device = "cuda:0"
)
self.temp_KV = torch.empty(
(2, bsz, 1, n_kv_heads * head_dim), dtype = dtype, device = "cuda:0"
)
self.RH_Q = torch.empty(
(bsz, n_heads, 1, head_dim), dtype = dtype, device = "cuda:0"
)
# Mistral Nemo 12b has weird dimensions
if attention_size != hidden_size:
self.temp_O = torch.empty(
(1, bsz, hidden_size), dtype = dtype, device = "cuda:0"
)
else:
self.temp_O = self.temp_QA[1][:, :, :hidden_size]
self.attention = torch.empty(
(bsz, n_heads, 1, KV_CACHE_INCREMENT + seq_len),
dtype = dtype,
device = "cuda:0",
)
self.scalar = 1.0 / math_sqrt(self.head_dim)
self.half_head_dim = head_dim // 2
# Cohere has QK layernorms
if self.use_qk_norm:
self.q_norm_out_weight = torch.empty(
self.q_norm.weight.shape, dtype = torch.float32, device = "cuda:0"
)
self.k_norm_out_weight = torch.empty(
self.k_norm.weight.shape, dtype = torch.float32, device = "cuda:0"
)
else:
self.q_norm_out_weight = None
self.k_norm_out_weight = None
elif kv_seq_len >= self.paged_attention.shape[0]:
self.paged_attention.resize_(
(
self.paged_attention.shape[0] + KV_CACHE_INCREMENT,
2,
bsz,
n_kv_heads,
head_dim,
)
)
self.paged_attention_K = self.paged_attention[:, 0]
self.paged_attention_V = self.paged_attention[:, 1]
self.attention.resize_(
(bsz, n_heads, 1, self.attention.shape[-1] + KV_CACHE_INCREMENT)
)
Qn = fast_linear_forward(self.q_proj, Xn, out = self.temp_QA[0])
Kn = fast_linear_forward(self.k_proj, Xn, out = self.temp_KV[0])
Vn = fast_linear_forward(self.v_proj, Xn, out = self.temp_KV[1])
Qn = Qn.view(bsz, 1, n_heads, head_dim).transpose(1, 2)
Kn = Kn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2)
Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2)
if self.use_qk_norm:
Qn = fast_layernorm_inference(self.q_norm, Qn, self.q_norm_out_weight)
Kn = fast_layernorm_inference(self.k_norm, Kn, self.k_norm_out_weight)
# cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len)
# Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids)
cos, sin = self.rotary_emb.get_cached(kv_seq_len, Qn.device.index)
cos = cos[position_ids].unsqueeze(1)
sin = sin[position_ids].unsqueeze(1)
h = self.half_head_dim
RH_Q = self.RH_Q
RH_Q[:, :, :, :h] = Qn[:, :, :, h:]
RH_Q[:, :, :, h:] = Qn[:, :, :, :h]
torch.neg(RH_Q[:, :, :, :h], out = RH_Q[:, :, :, :h])
Qn *= cos
Qn.addcmul_(RH_Q, sin)
RH_K = RH_Q[
:, :n_kv_heads, :, :
] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda:0")
RH_K[:, :, :, :h] = Kn[:, :, :, h:]
RH_K[:, :, :, h:] = Kn[:, :, :, :h]
torch.neg(RH_K[:, :, :, :h], out = RH_K[:, :, :, :h])
Kn *= cos
Kn.addcmul_(RH_K, sin)
# New KV cache
# Kn = torch.cat([K1, Kn], dim = 2)
# Vn = torch.cat([V1, Vn], dim = 2)
self.paged_attention_K[seq_len] = Kn.permute(2, 0, 1, 3)
self.paged_attention_V[seq_len] = Vn.permute(2, 0, 1, 3)
Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3)
Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3)
# Handle sliding windows
sliding_window = getattr(self.config, "sliding_window", None)
if sliding_window is not None and kv_seq_len > sliding_window:
# From https://github.com/huggingface/transformers/blob/main/src/transformers/models/mistral/modeling_mistral.py#L193
slicing_tokens = 1 - sliding_window
Knn = Kn[:, :, slicing_tokens:, :] # .contiguous()
Vnn = Vn[:, :, slicing_tokens:, :] # .contiguous()
else:
Knn, Vnn = Kn, Vn
# Grouped query attention
_, _, cached_len, _ = Knn.shape
if n_groups != 1:
Knn = Knn[:, :, None, :, :].expand(
bsz, n_kv_heads, n_groups, cached_len, head_dim
)
Vnn = Vnn[:, :, None, :, :].expand(
bsz, n_kv_heads, n_groups, cached_len, head_dim
)
Knn = Knn.reshape(bsz, n_heads, cached_len, head_dim)
Vnn = Vnn.reshape(bsz, n_heads, cached_len, head_dim)
# else:
# Knn, Vnn = Knn, Vnn
# pass
# Attention
if bsz == 1:
Qn *= self.scalar # See https://github.com/ggerganov/llama.cpp/issues/7805#issuecomment-2153349963
# It seems like doing (Q * scalar) @ K is better than (Q @ K) * scalar to stop overflows
A = torch_matmul(
Qn, Knn.transpose(2, 3), out = self.attention[:, :, :, :cached_len]
)
# if attention_mask is not None: A += attention_mask # Must add attention_mask for batched
A[:] = torch_nn_functional_softmax(
A, dim = -1, dtype = torch.float32
) # .to(A.dtype)
A = torch_matmul(A, Vnn, out = Qn)
else:
A = scaled_dot_product_attention(
Qn, Knn, Vnn, attn_mask = attention_mask, is_causal = False
)
A = A.transpose(1, 2)
A = A.reshape(bsz, 1, attention_size)
A = fast_linear_forward(self.o_proj, A, out = self.temp_O)
return A, (Kn, Vn)
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825
# @torch.inference_mode
def CohereModel_fast_forward_inference(
self,
input_ids,
past_key_values,
position_ids,
attention_mask = None,
):
out_weights = tuple(
torch.empty_like(
self.model.layers[0].input_layernorm.weight,
dtype = torch.float32,
device = torch.device(x),
)
for x in range(DEVICE_COUNT)
)
input_ids = input_ids[:, : self.max_seq_length]
hidden_states = self.model.embed_tokens(input_ids)
hidden_states = hidden_states.to(_get_dtype(dtype_from_config(self.config)))
bsz, q_len, hd = hidden_states.shape
seq_len = past_key_values[0][0].shape[-2]
if bsz != 1:
attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
attention_mask,
(bsz, q_len),
hidden_states,
seq_len,
sliding_window = getattr(self.config, "sliding_window", None),
)
else:
attention_mask = None
next_decoder_cache = []
for idx, decoder_layer in enumerate(self.model.layers):
device_index = getattr(decoder_layer, "_per_layer_device_index", 0)
hidden_states, position_ids = move_to_device(
device_index, hidden_states, position_ids
)
residual = hidden_states
hidden_states = fast_layernorm_inference(
decoder_layer.input_layernorm, hidden_states, out_weights[device_index]
)
hidden_states_attention, present_key_value = (
CohereAttention_fast_forward_inference(
decoder_layer.self_attn,
hidden_states = hidden_states,
past_key_value = past_key_values[idx],
position_ids = position_ids,
attention_mask = attention_mask,
do_prefill = not hasattr(decoder_layer.self_attn, "paged_attention"),
)
)
hidden_states_mlp = fast_swiglu_inference(decoder_layer.mlp, hidden_states)
residual += hidden_states_attention
residual += hidden_states_mlp
hidden_states = residual
next_decoder_cache.append(present_key_value)
hidden_states = fast_layernorm_inference(
self.model.norm, hidden_states, out_weights[device_index]
)
return BaseModelOutputWithPast(
last_hidden_state = hidden_states,
past_key_values = next_decoder_cache,
hidden_states = [],
attentions = [],
)
class FastCohereModel(FastLlamaModel):
@staticmethod
def pre_patch():
init_name, function = patch_linear_scaling(
model_name = "cohere",
rope_module = LlamaRotaryEmbedding,
scaled_rope_module = LlamaLinearScalingRotaryEmbedding,
attention_module = CohereAttention,
)
if init_name is not None:
exec(function, globals())
CohereAttention.__init__ = eval(init_name)
CohereAttention.forward = CohereAttention_fast_forward
CohereSdpaAttention.forward = CohereAttention_fast_forward
CohereFlashAttention2.forward = CohereAttention_fast_forward
CohereDecoderLayer.forward = CohereDecoderLayer_fast_forward
CohereModel.forward = LlamaModel_fast_forward
CohereForCausalLM.forward = CausalLM_fast_forward(
CohereModel_fast_forward_inference
)
PeftModelForCausalLM.forward = PeftModel_fast_forward
fix_prepare_inputs_for_generation(CohereForCausalLM)
import transformers.models.cohere.modeling_cohere
transformers.models.cohere.modeling_cohere.CohereRotaryEmbedding = (
LlamaRotaryEmbedding
)
return
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/__init__.py | unsloth/models/__init__.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .llama import FastLlamaModel
from .loader import FastLanguageModel, FastVisionModel, FastTextModel, FastModel
from .mistral import FastMistralModel
from .qwen2 import FastQwen2Model
from .qwen3 import FastQwen3Model
from .qwen3_moe import FastQwen3MoeModel
from .granite import FastGraniteModel
try:
from .falcon_h1 import FastFalconH1Model
except:
# transformers_version < 4.53.0 does not have falcon_h1 so silently skip it for now
pass
from .dpo import PatchDPOTrainer, PatchKTOTrainer
from ._utils import is_bfloat16_supported, is_vLLM_available, __version__
from .rl import PatchFastRL, vLLMSamplingParams
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/rl.py | unsloth/models/rl.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"PatchFastRL",
"vLLMSamplingParams",
]
import torch
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union
import inspect
import os
import re
from unsloth_zoo.compiler import create_new_function
from unsloth_zoo.log import logger
from unsloth_zoo.logging_utils import PatchRLStatistics
from unsloth_zoo.rl_replacements import RL_REPLACEMENTS
from .rl_replacements import (
RL_EXTRA_ARGS,
RL_FUNCTIONS,
RL_PRE_ITEMS,
RL_CONFIG_CHANGES,
RL_METRICS_CHANGES,
RL_ADDITIONAL_FUNCTIONS,
)
torch_compile_options = {
"epilogue_fusion": True,
"max_autotune": False, # Disable Triton mm kernels
"shape_padding": True,
"trace.enabled": False,
"triton.cudagraphs": False,
}
from trl import __version__ as trl_version
from unsloth_zoo.utils import Version
trl_version = Version(trl_version)
def vLLMSamplingParams(**kwargs):
from vllm import SamplingParams
sampling_params = SamplingParams(**kwargs)
sampling_params._set_kwargs = kwargs
return sampling_params
def PatchRL(FastLanguageModel):
from trl.models.utils import unwrap_model_for_generation
from contextlib import contextmanager
@contextmanager
def unsloth_unwrap_model_for_generation(model, *args, **kwargs):
with unwrap_model_for_generation(model, *args, **kwargs) as unwrapped_model:
# Put the model in inference mode.
FastLanguageModel.for_inference(model)
# We must use .clone for Unsloth since we force inference_mode
# Rather we should have used no_grad
original_generate = unwrapped_model.generate
def generate_with_clone(*args, **kwargs):
out = original_generate(*args, **kwargs)
if isinstance(out, torch.Tensor):
return out.clone()
return out
unwrapped_model.generate = generate_with_clone
try:
yield unwrapped_model
finally:
# Restore generate and return
unwrapped_model.generate = original_generate
FastLanguageModel.for_training(model)
from transformers import Trainer
from transformers.trainer_pt_utils import nested_detach
@torch.no_grad()
def unsloth_prediction_step(
self,
model,
inputs,
prediction_loss_only,
ignore_keys,
):
"""
Perform an evaluation step on `model` using `inputs`.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to evaluate.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (`bool`):
Whether or not to return the loss only.
ignore_keys (`List[str]`, *optional*):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
logits and labels (each being optional).
"""
has_labels = (
False
if len(self.label_names) == 0
else all(inputs.get(k) is not None for k in self.label_names)
)
# For CLIP-like models capable of returning loss values.
# If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss`
# is `True` in `model.forward`.
return_loss = inputs.get("return_loss", None)
if return_loss is None:
return_loss = self.can_return_loss
loss_without_labels = (
True if len(self.label_names) == 0 and return_loss else False
)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(
self.model.config, "keys_to_ignore_at_inference", []
)
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels or loss_without_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
os.environ["UNSLOTH_RETURN_LOGITS"] = "1"
with torch.no_grad():
if has_labels or loss_without_labels:
with self.compute_loss_context_manager():
loss, outputs = self.compute_loss(
model, inputs, return_outputs = True
)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(
v for k, v in outputs.items() if k not in ignore_keys + ["loss"]
)
else:
logits = outputs[1:]
else:
loss = None
with self.compute_loss_context_manager():
tokenized_output = self.processing_class(
inputs["prompt"],
padding = True,
truncation = True,
return_tensors = "pt",
).to(model.device)
outputs = model(**tokenized_output)
if isinstance(outputs, dict):
logits = tuple(
v for k, v in outputs.items() if k not in ignore_keys
)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
os.environ["UNSLOTH_RETURN_LOGITS"] = "0"
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
import trl.trainer
trainers = dir(trl.trainer)
trainers = [x for x in trainers if x.endswith("_trainer")]
unwrap = "unwrap_model_for_generation"
for trainer in trainers:
try:
current_trainer = getattr(trl.trainer, trainer)
except:
continue
if hasattr(current_trainer, unwrap):
try:
setattr(current_trainer, unwrap, unsloth_unwrap_model_for_generation)
except:
continue
Trainer.prediction_step = unsloth_prediction_step
selective_log_softmax = RL_REPLACEMENTS["selective_log_softmax"]
calculate_pad_tokens_in_prompt = RL_REPLACEMENTS["calculate_pad_tokens_in_prompt"]
create_completion_attention_mask = RL_REPLACEMENTS["create_completion_attention_mask"]
left_pack_padding = RL_REPLACEMENTS["left_pack_padding"]
align_logprobs_with_mask = RL_REPLACEMENTS["align_logprobs_with_mask"]
RLTrainer_replacement = '''
import os
from typing import *
from dataclasses import dataclass, field
from packaging.version import Version
import torch
import numpy as np
from contextlib import nullcontext
from torch.nn import functional as F
import inspect
import psutil
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling as TransformersDataCollatorForLanguageModeling
from transformers.training_args import ParallelMode
# Wrap trainer with padding to right and enable training mode
# Also patches W&B since multiple runs must use wandb.finish()
import functools
from types import MethodType
try:
from unsloth_zoo.gradient_checkpointing import reset_unsloth_gradient_checkpointing_buffers
except:
def reset_unsloth_gradient_checkpointing_buffers(): pass
def prepare_for_training_mode(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
# Enable training mode
if hasattr(self, 'model') and hasattr(self.model, "for_training"):
self.model.for_training()
output = f(self, *args, **kwargs)
# Return inference mode
if hasattr(self, 'model') and hasattr(self.model, "for_inference"):
self.model.for_inference()
# Reset gradient checkpointing buffers to free memory while staying ready for next run
try:
reset_unsloth_gradient_checkpointing_buffers()
except:
pass
# Patch W&B to enable logging on future runs, otherwise it'll overwrite the first run
try:
import wandb
wandb.finish()
except:
pass
return output
return wrapper
pass
torch_compile_options = {{
"epilogue_fusion" : True,
"max_autotune" : False,
"shape_padding" : True,
"trace.enabled" : False,
"triton.cudagraphs" : False,
}}
{selective_log_softmax_code}
{calculate_pad_tokens_in_prompt_code}
{create_completion_attention_mask_code}
{left_pack_padding_code}
{align_logprobs_with_mask_code}
{RL_pre}
@dataclass
class Unsloth{RLConfig_name}({RLConfig_name}):
"""
{__RLConfig_doc__}
"""
vllm_sampling_params: Optional[Any] = field(
default = None,
metadata = {{'help': 'vLLM SamplingParams'}},
)
unsloth_num_chunks : Optional[int] = field(
default = -1,
metadata = {{'help': 'Chunk size to reduce memory usage. -1 is most efficient.'}},
)
{max_seq_length_pre}
def __init__({RLConfig_arguments},
vllm_sampling_params = None,
unsloth_num_chunks = -1,
{max_seq_length_call}
**kwargs,
):
{RLConfig_extra_args}
super().__init__({RLConfig_call_args}{RLConfig_kwargs})
self.vllm_sampling_params = vllm_sampling_params
self.unsloth_num_chunks = unsloth_num_chunks
{max_seq_length_post}
pass
{RLTrainer_extras}
class Unsloth{RLTrainer_name}(_Unsloth{RLTrainer_name}):
"""
{__RLTrainer_doc__}
"""
def __init__({RLTrainer_arguments},
**kwargs
):
if args is None: args = Unsloth{RLConfig_name}()
{RLTrainer_extra_args}
# [TODO] Fix up DataParallel multiplying batch sizes
# [TODO] DDP works, but DP seems to not work? [TODO]
if getattr(args, "parallel_mode", None) == ParallelMode.NOT_DISTRIBUTED and args.n_gpu > 1:
if getattr(args, "_n_gpu", 1) != 1:
args._n_gpu = 1
if "model" in locals() and hasattr(model, "for_training"):
model.for_training(use_gradient_checkpointing=getattr(args, 'gradient_checkpointing', True))
super().__init__({RLTrainer_call_args}{RLTrainer_kwargs})
if "model" in locals() and hasattr(model, "for_inference"):
model.for_inference()
{RLTrainer_post}
pass
'''
def _patch_trl_rl_trainers(trainer_file = "grpo_trainer"):
# Patch for vLLM and Unsloth PEFT
import trl
import trl.trainer
try:
trainer = eval(f"trl.trainer.{trainer_file}")
except Exception as error:
print(f"Unsloth: Could not import trl.trainer.{trainer_file}: {error}")
return
# Get SFTTrainer and SFTConfig names
name = [
x
for x in dir(trainer)
if x.endswith("Trainer")
and x != "Trainer"
and trainer_file.split("_")[0] in x.lower()
]
config = [
x
for x in dir(trainer)
if x.endswith("Config")
and x != "Config"
and trainer_file.split("_")[0] in x.lower()
]
if len(name) != 1:
logger.info(
f"Unsloth: Could not find Trainer class in trl.trainer.{trainer_file}. Found: {name}"
)
return
if len(config) != 1:
logger.info(
f"Unsloth: Could not find Config class in trl.trainer.{trainer_file}. Found: {config}"
)
return
# Get SFTTrainer, SFTConfig
RLTrainer_name = name[0]
RLConfig_name = config[0]
try:
RLTrainer = eval(f"trl.trainer.{trainer_file}.{RLTrainer_name}")
except Exception as e:
logger.info(
f"Unsloth: Could not load {RLTrainer_name} from trl.trainer.{trainer_file}: {e}"
)
return
try:
RLConfig = eval(f"trl.trainer.{trainer_file}.{RLConfig_name}")
except Exception as e:
logger.info(
f"Unsloth: Could not load {RLConfig_name} from trl.trainer.{trainer_file}: {e}"
)
return
# Check name
if RLTrainer.__name__.startswith("Unsloth"):
print(f"Unsloth: {RLTrainer.__name__} is already patched.")
return
if RLConfig.__name__.startswith("Unsloth"):
print(f"Unsloth: {RLConfig.__name__} is already patched.")
return
# Get old source
old_RLTrainer_source = inspect.getsource(RLTrainer)
old_RLConfig_source = inspect.getsource(RLConfig)
all_imports = dir(trainer)
# Fix _deprecate_arguments not getting imported so stop __ but not _
imports = [x for x in all_imports if not x.startswith("__")]
# Get default arguments
EMPTY = inspect.Parameter.empty
processed = []
for RLobject in [RLTrainer, RLConfig]:
parameters = inspect.signature(RLobject.__init__).parameters
types = (
bool,
type(None),
int,
float,
str,
)
arguments = ["self"]
call_args = []
for k, v in parameters.items():
if k == "self":
continue
v = v.default
if v == "\n":
v = re.escape("\n")
if v is EMPTY:
arguments.append(k)
elif type(v) is str:
arguments.append(f"{k} = '{v}'")
elif type(v) in types:
arguments.append(f"{k} = {v}")
else:
continue
call_args.append(f"{k} = {k}")
arguments = f"\n{' '*8}" + f",\n{' '*8}".join(arguments)
call_args = f"\n{' '*12}" + f",\n{' '*12}".join(call_args)
processed.append(
(
arguments,
call_args,
)
)
# Process RLTrainer first
arguments, call_args = processed[0]
RLTrainer_post = ""
# Add tokenizer if not seen
if "tokenizer" not in parameters and "processing_class" in parameters:
arguments += f",\n{' '*8}tokenizer = None"
call_args = call_args.replace(
"processing_class = processing_class",
"processing_class = tokenizer if tokenizer is not None else processing_class",
)
# Edit bf16, fp16 by checking model's dtype/torch_dtype directly
extra_args = ""
if "args" in call_args and "model" in call_args:
mixed_precision = (
"use_bf16 = getattr(args, 'bf16', False)\n"
"if type(use_bf16) is not bool: use_bf16 = False\n"
"use_fp16 = getattr(args, 'fp16', False)\n"
"if type(use_fp16) is not bool: use_fp16 = False\n"
"force_float32 = False\n"
"full_finetuning = os.environ.get('UNSLOTH_ENABLE_FULL_FINETUNING', '0') == '1'\n"
"if not full_finetuning and (os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1'):\n"
" print('Unsloth: Switching to float32 training since model cannot work with float16')\n"
" force_float32 = True\n"
"mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')\n"
"dtype = getattr(model.config, 'dtype', None) or getattr(model.config, 'torch_dtype', None)\n"
"if dtype is None: dtype = model.get_input_embeddings().weight.dtype\n"
"from unsloth_zoo.utils import _get_dtype\n"
"dtype = _get_dtype(dtype)\n"
"float16 = dtype == torch.float16\n"
"if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')\n"
"if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')\n"
"if force_float32:\n"
" # Forced float32 training\n"
" args.fp16 = False\n"
" args.bf16 = False\n"
" os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'\n"
" if hasattr(args, 'mixed_precision'): args.mixed_precision = 'no'\n"
" # args.mixed_precision is a new argument which needs to be set now\n"
"elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':\n"
" # Mixed precision training\n"
" args.fp16 = float16\n"
" args.bf16 = not float16\n"
" os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'\n"
" if hasattr(args, 'mixed_precision'): args.mixed_precision = 'fp16' if float16 else 'bf16'\n"
" # args.mixed_precision is a new argument which needs to be set now\n"
"elif mixed_precision_dtype == 'bfloat16':\n"
" # Both False since bfloat16 full finetuning doesn't do any autocasting.\n"
" args.fp16 = False\n"
" args.bf16 = False\n"
" os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'\n"
" if hasattr(args, 'mixed_precision'): args.mixed_precision = 'no'\n"
" # args.mixed_precision is a new argument which needs to be set now\n"
"\n"
)
extra_args += mixed_precision
# Check if per_device_eval_batch_size (default 8) bigger than bsz
# Also use FP16 / BF16 evaluation
if "args" in call_args:
# Check eval_dataset first
if "eval_dataset" in call_args:
check_eval_dataset = (
"if getattr(args, 'eval_dataset', None) is not None and "
"getattr(args, 'eval_strategy', 'no') == 'no':\n"
" args.eval_strategy = 'steps'\n"
" if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1\n"
)
extra_args += check_eval_dataset
# Check if gradient accumulation bug fix is applied
check_ga = (
"ga_steps = getattr(args, 'gradient_accumulation_steps', None)\n"
"if ga_steps is not None and ga_steps > 1:\n"
" from transformers import __version__ as transformers_version\n"
" if Version(transformers_version) <= Version('4.45.2'):\n"
" print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\\n'\n"
" '`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')\n"
)
extra_args += check_ga
eval_changes = (
"if getattr(args, 'eval_strategy', 'no') != 'no':\n"
" eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)\n"
" if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size\n"
" if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps\n"
"fp16_full_eval = getattr(args, 'fp16_full_eval', False)\n"
"if type(fp16_full_eval) is not bool: fp16_full_eval = False\n"
"bf16_full_eval = getattr(args, 'bf16_full_eval', False)\n"
"if type(bf16_full_eval) is not bool: bf16_full_eval = False\n"
"if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True\n"
"if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False\n"
"if force_float32:\n"
" args.bf16_full_eval = False\n"
" args.fp16_full_eval = False\n"
"elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':\n"
" args.bf16_full_eval = True\n"
" args.fp16_full_eval = False\n"
"elif not bf16_full_eval and not fp16_full_eval:\n"
" args.bf16_full_eval = args.bf16\n"
" args.fp16_full_eval = args.fp16\n"
)
extra_args += eval_changes
# Force logits to be produced if preprocess_logits_for_metrics or compute_metrics is used
if "model" in call_args:
logits_check = (
"_output_logits = False\n"
"if locals().get('compute_metrics', None) is not None: _output_logits = True\n"
"if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True\n"
"if _output_logits:\n"
" os.environ['UNSLOTH_RETURN_LOGITS'] = '1'\n"
)
extra_args += logits_check
# Check max_seq_length
if "model" in call_args:
length_check = (
"if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):\n"
" pass\n"
"else:\n"
" model_max_seq_length = getattr(model, 'max_seq_length', None)\n"
" args_max_seq_length = getattr(args, 'max_seq_length', None)\n"
" if args_max_seq_length is None and model_max_seq_length is not None:\n"
" max_seq_length = model.max_seq_length\n"
" if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length\n"
" elif args_max_seq_length is not None and model_max_seq_length is not None:\n"
" if args_max_seq_length > model_max_seq_length:\n"
" print('Unsloth: You set `max_seq_length` as ' + str(args_max_seq_length) + ' but '\n"
" 'the maximum the model supports is ' + str(model_max_seq_length) + '. We shall reduce it.')\n"
" args.max_seq_length = model_max_seq_length\n"
)
extra_args += length_check
# At this point max_seq_length might be set, but trl is moving to max_length
if trainer_file == "sft_trainer":
max_length_check = (
"if 'max_length' not in locals() and not hasattr(args, 'max_length'):\n"
" pass\n"
"else:\n"
" if hasattr(args, 'max_seq_length') and args.max_seq_length is not None and args.max_seq_length > 0:\n"
" if hasattr(args, 'max_length'):\n"
" args.max_length = args.max_seq_length\n"
" max_length = args.max_length\n"
" else:\n"
" model_max_length = getattr(model, 'max_seq_length', None)\n"
" if model_max_length is None: model_max_length = getattr(model, 'max_length', None)\n"
" if model_max_length is not None:\n"
" args.max_length = model_max_length\n"
" max_length = args.max_length\n"
" elif hasattr(args, 'max_length') and args.max_length is not None:\n"
" max_length = args.max_length\n"
" # if we are here, then we are in a weird case where max_length is set but max_seq_length is not set\n"
" setattr(model, 'max_seq_length', max_length)\n"
" else:\n"
" print('Unsloth: We did not find `max_seq_length` or `max_length` in the model or args. We will set it to 1024.')\n"
" args.max_length = 1024\n"
)
extra_args += max_length_check
# Enable for training and move padding side of tokenizer to right
if "model" in call_args:
training_check = (
"if model is not None and hasattr(model, 'for_training'):\n"
" model.for_training(use_gradient_checkpointing=getattr(args, 'gradient_checkpointing', True))\n"
"if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'\n"
"if 'processing_class' in locals():\n"
" if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'\n"
" if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): "
"processing_class.tokenizer.padding_side = 'right'\n"
)
extra_args += training_check
# Check data collator if it's correct!
if "data_collator" in call_args and "train_dataset" in call_args:
data_collator_check = (
"__tokenizer = processing_class if 'processing_class' in locals() else tokenizer\n"
"from unsloth_zoo.vision_utils import UnslothVisionDataCollator\n"
"if not isinstance(data_collator, UnslothVisionDataCollator):\n"
" if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:\n"
" data_collator = TransformersDataCollatorForLanguageModeling(\n"
" __tokenizer,\n"
" mlm = False,\n"
" mlm_probability = 0.0,\n"
" pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),\n"
" )\n"
" elif isinstance(data_collator, TransformersDataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:\n"
" data_collator = DataCollatorForSeq2Seq(\n"
" __tokenizer,\n"
" pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),\n"
" )\n"
"else:\n"
" if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False\n"
" if hasattr(args, 'dataset_text_field'): args.dataset_text_field = ''\n"
" if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True}\n"
)
extra_args += data_collator_check
# Also check if .pad exists -> if not, and is VLM, then change it!
pad_check = (
"if not isinstance(data_collator, UnslothVisionDataCollator):\n"
" if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'):\n"
" if isinstance(data_collator, DataCollatorForSeq2Seq):\n"
" data_collator = DataCollatorForSeq2Seq(\n"
" __tokenizer.tokenizer,\n"
" pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),\n"
" )\n"
" else:\n"
" data_collator = TransformersDataCollatorForLanguageModeling(\n"
" __tokenizer.tokenizer,\n"
" mlm = False,\n"
" mlm_probability = 0.0,\n"
" pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None),\n"
" )\n"
)
extra_args += pad_check
# Check NEFTune
if "model" in call_args:
neftune_check = (
"if hasattr(self, 'neftune_hook_handle'):\n"
" self.neftune_hook_handle.remove()\n"
" if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle\n"
"if getattr(args, 'neftune_noise_alpha', None) is not None:\n"
" model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha\n"
"pass\n"
)
RLTrainer_post += neftune_check
# Add accelerator scaler to model
if "model" in call_args:
accelerator_check = (
"if hasattr(self, 'accelerator'):\n"
" scaler = self.accelerator.scaler\n"
" current_model = model\n"
" while hasattr(current_model, 'model'):\n"
" current_model.accelerator_scaler = scaler\n"
" current_model = current_model.model\n"
" current_model.accelerator_scaler = scaler\n"
"pass\n"
)
RLTrainer_post += accelerator_check
# Add enabling and disabling training modes
if "model" in call_args:
training_check = (
"if hasattr(self, 'train'):\n"
" self.train = MethodType(prepare_for_training_mode(self.__class__.train), self)\n"
"pass\n"
)
RLTrainer_post += training_check
# Edit optional metrics
other_metrics_processor = ""
if trainer_file in RL_METRICS_CHANGES:
process_extra_args = RL_METRICS_CHANGES[trainer_file]
for process_extra_arg in process_extra_args:
other_metrics_processor += process_extra_arg(
old_RLTrainer_source, old_RLConfig_source
)
# Add statistics as well!
extra_args += (
"other_metrics = []\n"
f"{other_metrics_processor}\n"
"from unsloth_zoo.logging_utils import PatchRLStatistics\n"
f"PatchRLStatistics('{trainer_file}', other_metrics)\n"
)
# Patch optional args
if trainer_file in RL_EXTRA_ARGS:
process_extra_args = RL_EXTRA_ARGS[trainer_file]
for process_extra_arg in process_extra_args:
extra_args += process_extra_arg(call_args, extra_args)
# Create RLTrainer args
extra_args = extra_args.split("\n")
extra_args = "\n".join(" " * 8 + x for x in extra_args)
RLTrainer_post = RLTrainer_post.split("\n")
RLTrainer_post = "\n".join(" " * 8 + x for x in RLTrainer_post)
RLTrainer_arguments = arguments
RLTrainer_extra_args = extra_args
RLTrainer_call_args = call_args
# Fix RLConfig next
arguments, call_args = processed[1]
extra_args = ""
# Edit GA / bsz and weight_decay
replacements = {
"output_dir": None,
"logging_nan_inf_filter": False,
"per_device_train_batch_size": 4,
"gradient_accumulation_steps": 2,
"weight_decay": 0.01,
"warmup_ratio": 0.1,
"seed": 3407,
"optim": "adamw_8bit",
"learning_rate": 5e-05,
"per_device_eval_batch_size": 4,
"eval_accumulation_steps": 2,
"torch_empty_cache_steps": 250,
"logging_steps": 1,
"max_seq_length": None,
"num_generations": 8,
# "steps_per_generation" : 1, # Otherwise defaults to ga_steps which is wrong
# "generation_batch_size" : None, # Useless. If steps_per_generation set, generation_batch_size clashes
"top_k": None,
"vllm_mode": "colocate",
"generation_kwargs": {},
"bf16": False,
"fp16": False,
"report_to": "none",
"include_tokens_per_second": False,
"include_num_input_tokens_seen": False,
"auto_find_batch_size": False, # Auto /2 batch size - too many people complained so removing
"dataloader_pin_memory": True,
# Might fail so disable for now
# "dataloader_persistent_workers" : True, # Keeps dataloader in RAM
# "dataloader_prefetch_factor" : 2,
# "dataloader_num_workers" : 2, # Default is 0 means 1
}
for k, v in replacements.items():
x = f"{k}( = [^,\n]{{1,}})?,\n"
y = f"'{v}'" if type(v) is str else f"{v}"
y = f"{k} = {y},\n"
arguments = re.sub(x, y, arguments)
# Fix GRPO beta default as 0.001 TRL used to be 0.04, now 0.00!
# https://github.com/huggingface/trl/pull/3516
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | true |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/llama4.py | unsloth/models/llama4.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from unsloth_studio.models import patch_llama4
# patch_llama4()
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/gemma2.py | unsloth/models/gemma2.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .llama import *
from ._utils import __version__
from unsloth_zoo.utils import _get_dtype
from unsloth_zoo.hf_utils import dtype_from_config
from ..utils.packing import get_packed_info_from_kwargs
from ..utils.attention_dispatch import (
AttentionConfig,
AttentionContext,
run_attention,
select_attention_backend,
SDPA,
)
from .gemma import (
GemmaFixedRotaryEmbedding,
GemmaFixedLinearScalingRotaryEmbedding,
fast_geglu_inference,
)
try:
from transformers.models.gemma2.modeling_gemma2 import (
Gemma2Attention,
Gemma2DecoderLayer,
Gemma2Model,
Gemma2ForCausalLM,
Gemma2RotaryEmbedding,
apply_rotary_pos_emb,
repeat_kv,
)
except:
from packaging.version import Version
transformers_version = Version(transformers_version)
if not transformers_version >= Version("4.42"):
raise ImportError(
f"Unsloth: Your transformers version of {transformers_version} does not support Gemma2.\n"
f"The minimum required version is 4.42.3.\n"
f'Try `pip install --upgrade "transformers>=4.42.3"`\n'
f"to obtain the latest transformers build, then restart this session."
)
from transformers.modeling_attn_mask_utils import (
_prepare_4d_causal_attention_mask_for_sdpa,
)
# For Pytorch 2.1.1
try:
from transformers.models.gemma2.modeling_gemma2 import (
Gemma2SdpaAttention,
Gemma2FlashAttention2,
)
except:
Gemma2SdpaAttention = Gemma2Attention
Gemma2FlashAttention2 = Gemma2Attention
if HAS_FLASH_ATTENTION_SOFTCAPPING:
from flash_attn import flash_attn_func
# Logit softcapping
def Gemma2Attention_fast_forward(
self,
hidden_states: torch.Tensor,
causal_mask: Optional[BlockDiagonalCausalMask] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
padding_mask: Optional[torch.LongTensor] = None,
*args,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
# Clear inference
if hasattr(self, "paged_attention"):
del self.paged_attention_K
del self.paged_attention_V
del self.paged_attention
del self.temp_QA
del self.temp_KV
del self.RH_Q
del self.attention
bsz, q_len, _ = hidden_states.size()
n_heads = self.config.num_attention_heads
n_groups = self.num_key_value_groups
n_kv_heads = self.config.num_key_value_heads
head_dim = self.head_dim
assert n_kv_heads * n_groups == n_heads
Q, K, V = self.apply_qkv(self, hidden_states)
Q = Q.view(bsz, q_len, n_heads, head_dim).transpose(1, 2)
K = K.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2)
V = V.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2)
seq_info = get_packed_info_from_kwargs(kwargs, Q.device)
kv_seq_len = K.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
device_index = Q.device.index
cos = self.rotary_emb.multi_gpu_cos_cached[device_index]
sin = self.rotary_emb.multi_gpu_sin_cached[device_index]
rope_position_ids = (
position_ids if position_ids is not None else kwargs.get("position_ids")
)
if rope_position_ids is not None:
# Useful for LongRoPE
cos_var, sin_var = self.rotary_emb.get_cached(kv_seq_len, device_index)
Q, K = fast_rope_embedding(Q, K, cos_var, sin_var, rope_position_ids)
else:
Q, K = fast_rope_embedding(Q, K, cos, sin)
if past_key_value is not None:
K = torch.cat([past_key_value[0], K], dim = 2)
V = torch.cat([past_key_value[1], V], dim = 2)
past_key_value = (K, V) if use_cache else None
# Only enable if the attention_mask is True
use_sliding_window = kwargs.get("use_sliding_window")
has_sliding_window = (
use_sliding_window
if use_sliding_window is not None
else isinstance(causal_mask, bool) and causal_mask is True
)
use_flash = HAS_FLASH_ATTENTION_SOFTCAPPING and attention_mask is None
if use_flash:
window = (-1, -1)
sliding_window = getattr(self.config, "sliding_window", None)
if has_sliding_window:
sliding_window = (
sliding_window if sliding_window is not None else kv_seq_len
)
window = (
(-1, -1)
if kv_seq_len <= sliding_window
else (sliding_window, sliding_window)
)
if not hasattr(self, "_flash_attention_softmax_scale"):
self._flash_attention_softmax_scale = 1.0 / (
self.config.query_pre_attn_scalar**0.5
)
use_varlen = seq_info is not None and past_key_value is None
attention_config = AttentionConfig(
backend = select_attention_backend(use_varlen),
n_kv_heads = n_kv_heads,
n_groups = n_groups,
flash_dense_kwargs = {
"causal": True,
"softcap": self.config.attn_logit_softcapping,
"softmax_scale": self._flash_attention_softmax_scale,
"window_size": window,
},
flash_varlen_kwargs = {
"dropout_p": 0.0,
"softmax_scale": self._flash_attention_softmax_scale,
"causal": True,
"softcap": self.config.attn_logit_softcapping,
"window_size": window,
},
)
context = AttentionContext(
bsz = bsz,
q_len = q_len,
kv_seq_len = kv_seq_len,
n_heads = n_heads,
head_dim = head_dim,
requires_grad = hidden_states.requires_grad,
seq_info = seq_info,
attention_mask = attention_mask,
causal_mask = causal_mask,
sliding_window = sliding_window,
)
A = run_attention(config = attention_config, context = context, Q = Q, K = K, V = V)
A = A.reshape(bsz, q_len, n_heads * head_dim)
else:
fx = (
slow_inference_attention_softcapping
if "_flag_for_generation" in kwargs
else slow_attention_softcapping
)
A = fx(Q, K, V, causal_mask, self, bsz, kv_seq_len)
A = self.apply_o(self, A)
return A, None, past_key_value
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L590
def Gemma2DecoderLayer_fast_forward(
self,
hidden_states: torch.Tensor,
causal_mask: Optional[BlockDiagonalCausalMask] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
padding_mask: Optional[torch.LongTensor] = None,
*args,
**kwargs,
):
if use_cache and hasattr(
self, "_flag_for_generation"
): # past_key_value is not None:
out_weight = torch.empty(
self.input_layernorm.weight.shape, dtype = torch.float32, device = "cuda:0"
)
# Self Attention
residual = hidden_states
hidden_states = fast_rms_layernorm_inference_gemma(
self.input_layernorm, hidden_states, out_weight
)
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states = hidden_states,
causal_mask = causal_mask,
attention_mask = attention_mask,
position_ids = position_ids,
past_key_value = past_key_value,
output_attentions = output_attentions,
use_cache = use_cache,
padding_mask = padding_mask,
_flag_for_generation = self._flag_for_generation,
**kwargs,
)
hidden_states = fast_rms_layernorm_inference_gemma(
self.post_attention_layernorm, hidden_states, out_weight
)
hidden_states += residual
# Fully Connected
residual = hidden_states
hidden_states = fast_rms_layernorm_inference_gemma(
self.pre_feedforward_layernorm, hidden_states, out_weight
)
hidden_states = fast_geglu_inference(self.mlp, hidden_states)
hidden_states = fast_rms_layernorm_inference_gemma(
self.post_feedforward_layernorm, hidden_states, out_weight
)
hidden_states += residual
else:
residual = hidden_states
hidden_states = fast_rms_layernorm(
self.input_layernorm, hidden_states, gemma = True
)
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states = hidden_states,
causal_mask = causal_mask,
attention_mask = attention_mask,
position_ids = position_ids,
past_key_value = past_key_value,
output_attentions = output_attentions,
use_cache = use_cache,
padding_mask = padding_mask,
**kwargs,
)
hidden_states = fast_rms_layernorm(
self.post_attention_layernorm, hidden_states, gemma = True
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = fast_rms_layernorm(
self.pre_feedforward_layernorm, hidden_states, gemma = True
)
hidden_states = self.mlp(hidden_states)
hidden_states = fast_rms_layernorm(
self.post_feedforward_layernorm, hidden_states, gemma = True
)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
from math import sqrt as math_sqrt
KV_CACHE_INCREMENT = 256 # KV Cache update size
torch_nn_functional_softmax = torch.nn.functional.softmax
torch_matmul = torch.matmul
torch_tanh = torch.tanh
def Gemma2Attention_fast_forward_inference(
self,
hidden_states: torch.Tensor,
past_key_value: Optional[Tuple[torch.Tensor]],
position_ids,
do_prefill = False,
attention_mask = None,
use_sliding_window = False,
**kwargs,
):
Xn = hidden_states
bsz, _, hd = hidden_states.size()
K1, V1 = past_key_value
dtype = Xn.dtype
n_heads = self.config.num_attention_heads
n_groups = self.num_key_value_groups
n_kv_heads = self.config.num_key_value_heads
head_dim = self.head_dim
# assert(n_kv_heads * n_groups == n_heads)
hidden_size = self.config.hidden_size
attention_size = n_heads * head_dim
seq_len = K1.shape[-2]
kv_seq_len = seq_len + 1
device = hidden_states.device
# Prefill phase
# if not hasattr(self, "paged_attention"):
if do_prefill:
self.paged_attention = torch.empty(
(KV_CACHE_INCREMENT + seq_len + 1, 2, bsz, n_kv_heads, head_dim),
dtype = dtype,
device = device,
)
self.paged_attention_K = self.paged_attention[:, 0]
self.paged_attention_V = self.paged_attention[:, 1]
self.paged_attention_K[:seq_len] = K1.permute(2, 0, 1, 3)
self.paged_attention_V[:seq_len] = V1.permute(2, 0, 1, 3)
self.temp_QA = torch.empty(
(2, bsz, 1, attention_size), dtype = dtype, device = device
)
self.temp_KV = torch.empty(
(2, bsz, 1, n_kv_heads * head_dim), dtype = dtype, device = device
)
self.RH_Q = torch.empty((bsz, n_heads, 1, head_dim), dtype = dtype, device = device)
# Only for Gemma2
self.temp_O = torch.empty((1, bsz, hidden_size), dtype = dtype, device = device)
self.attention = torch.empty(
(bsz, n_heads, 1, KV_CACHE_INCREMENT + seq_len), dtype = dtype, device = device
)
# See https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e
# Gemma 9b should use 256 and not 224 (hs / nah). 27b uses the below
# We default to using the config file itself
# s = self.config.hidden_size // self.config.num_attention_heads
self.scalar = 1.0 / math_sqrt(self.config.query_pre_attn_scalar)
# self.scalar = 1.0 / math_sqrt(self.config.hidden_size // self.config.num_attention_heads)
self.half_head_dim = head_dim // 2
self.t = self.config.attn_logit_softcapping
self.reciprocal_t = 1.0 / self.config.attn_logit_softcapping
elif kv_seq_len >= self.paged_attention.shape[0]:
self.paged_attention.resize_(
(
self.paged_attention.shape[0] + KV_CACHE_INCREMENT,
2,
bsz,
n_kv_heads,
head_dim,
)
)
self.paged_attention_K = self.paged_attention[:, 0]
self.paged_attention_V = self.paged_attention[:, 1]
self.attention.resize_(
(bsz, n_heads, 1, self.attention.shape[-1] + KV_CACHE_INCREMENT)
)
Qn = fast_linear_forward(self.q_proj, Xn, out = self.temp_QA[0])
Kn = fast_linear_forward(self.k_proj, Xn, out = self.temp_KV[0])
Vn = fast_linear_forward(self.v_proj, Xn, out = self.temp_KV[1])
Qn = Qn.view(bsz, 1, n_heads, head_dim).transpose(1, 2)
Kn = Kn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2)
Vn = Vn.view(bsz, 1, n_kv_heads, head_dim).transpose(1, 2)
# cos, sin = self.rotary_emb(Vn, seq_len = kv_seq_len)
# Qn, Kn = inplace_rope_embedding(Qn, Kn, cos, sin, position_ids)
cos, sin = self.rotary_emb.get_cached(kv_seq_len, Qn.device.index)
cos = cos[position_ids].unsqueeze(1)
sin = sin[position_ids].unsqueeze(1)
h = self.half_head_dim
RH_Q = self.RH_Q
RH_Q[:, :, :, :h] = Qn[:, :, :, h:]
RH_Q[:, :, :, h:] = Qn[:, :, :, :h]
torch.neg(RH_Q[:, :, :, :h], out = RH_Q[:, :, :, :h])
Qn *= cos
Qn.addcmul_(RH_Q, sin)
RH_K = RH_Q[
:, :n_kv_heads, :, :
] # torch.empty((n_kv_heads, 1, head_dim), dtype = dtype, device = "cuda:0")
RH_K[:, :, :, :h] = Kn[:, :, :, h:]
RH_K[:, :, :, h:] = Kn[:, :, :, :h]
torch.neg(RH_K[:, :, :, :h], out = RH_K[:, :, :, :h])
Kn *= cos
Kn.addcmul_(RH_K, sin)
# New KV cache
# Kn = torch.cat([K1, Kn], dim = 2)
# Vn = torch.cat([V1, Vn], dim = 2)
self.paged_attention_K[seq_len] = Kn.permute(2, 0, 1, 3)
self.paged_attention_V[seq_len] = Vn.permute(2, 0, 1, 3)
Kn = self.paged_attention_K[:kv_seq_len].permute(1, 2, 0, 3)
Vn = self.paged_attention_V[:kv_seq_len].permute(1, 2, 0, 3)
# Handle sliding windows
sliding_window = self.config.sliding_window
if use_sliding_window and kv_seq_len > sliding_window:
# From https://github.com/huggingface/transformers/blob/main/src/transformers/models/mistral/modeling_mistral.py#L193
slicing_tokens = 1 - sliding_window
Knn = Kn[:, :, slicing_tokens:, :] # .contiguous()
Vnn = Vn[:, :, slicing_tokens:, :] # .contiguous()
else:
Knn, Vnn = Kn, Vn
# Grouped query attention
_, _, cached_len, _ = Knn.shape
if n_groups != 1:
Knn = Knn[:, :, None, :, :].expand(
bsz, n_kv_heads, n_groups, cached_len, head_dim
)
Vnn = Vnn[:, :, None, :, :].expand(
bsz, n_kv_heads, n_groups, cached_len, head_dim
)
Knn = Knn.reshape(bsz, n_heads, cached_len, head_dim)
Vnn = Vnn.reshape(bsz, n_heads, cached_len, head_dim)
# else:
# Knn, Vnn = Knn, Vnn
# pass
# Attention
# if bsz == 1:
Qn *= (
self.scalar
) # See https://github.com/ggerganov/llama.cpp/issues/7805#issuecomment-2153349963
# It seems like doing (Q * scalar) @ K is better than (Q @ K) * scalar to stop overflows
A = torch_matmul(Qn, Knn.transpose(2, 3), out = self.attention[:, :, :, :cached_len])
# if attention_mask is not None: A += attention_mask # Must add attention_mask for batched
A *= self.reciprocal_t
torch_tanh(A, out = A)
A *= self.t # Logit softcapping
A[:] = torch_nn_functional_softmax(A, dim = -1, dtype = torch.float32) # .to(A.dtype)
A = torch_matmul(A, Vnn, out = Qn)
# else:
# A = scaled_dot_product_attention(Qn, Knn, Vnn, attn_mask = attention_mask, is_causal = False)
# pass
A = A.transpose(1, 2)
A = A.reshape(bsz, 1, attention_size)
A = fast_linear_forward(self.o_proj, A, out = self.temp_O)
return A, (Kn, Vn)
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L825
# @torch.inference_mode
def Gemma2Model_fast_forward_inference(
self,
input_ids,
past_key_values,
position_ids,
attention_mask = None,
**kwargs,
):
out_weights = tuple(
torch.empty_like(
self.model.layers[0].input_layernorm.weight,
dtype = torch.float32,
device = torch.device(x),
)
for x in range(DEVICE_COUNT)
)
input_ids = input_ids[:, : self.max_seq_length]
hidden_states = self.model.embed_tokens(input_ids)
hidden_states = hidden_states.to(_get_dtype(dtype_from_config(self.config)))
# 3072**0.5 = 55.5000 in bfloat16, whilst 55.4256 in float32
# 2048**0.5 = 45.2500 in bfloat16, whilst 45.2548 in float32
hidden_states *= torch.tensor(
math_sqrt(self.config.hidden_size), dtype = hidden_states.dtype
)
bsz, q_len, hd = hidden_states.shape
seq_len = past_key_values[0][0].shape[-2]
if bsz != 1:
if HAS_FLASH_ATTENTION_SOFTCAPPING:
SWA = True
GA = False
else:
SWA = _prepare_4d_causal_attention_mask_for_sdpa(
attention_mask,
(bsz, q_len),
hidden_states,
seq_len,
sliding_window = self.config.sliding_window,
)
GA = _prepare_4d_causal_attention_mask_for_sdpa(
attention_mask,
(bsz, q_len),
hidden_states,
seq_len,
)
else:
SWA = attention_mask
GA = attention_mask
next_decoder_cache = []
for idx, decoder_layer in enumerate(self.model.layers):
# For pipeline parallelism, we need to move all tensors to the same device
# note that this movement is once per GPU in PP
device_index = getattr(decoder_layer, "_per_layer_device_index", 0)
hidden_states, position_ids = move_to_device(
device_index, hidden_states, position_ids
)
use_sliding_window = idx % 2 == 0
residual = hidden_states
hidden_states = fast_rms_layernorm_inference_gemma(
decoder_layer.input_layernorm, hidden_states, out_weights[device_index]
)
hidden_states, present_key_value = Gemma2Attention_fast_forward_inference(
decoder_layer.self_attn,
hidden_states = hidden_states,
past_key_value = past_key_values[idx],
position_ids = position_ids,
attention_mask = SWA if use_sliding_window else GA,
do_prefill = not hasattr(decoder_layer.self_attn, "paged_attention"),
use_sliding_window = use_sliding_window,
)
hidden_states = fast_rms_layernorm_inference_gemma(
decoder_layer.post_attention_layernorm,
hidden_states,
out_weights[device_index],
)
hidden_states += residual
residual = hidden_states
hidden_states = fast_rms_layernorm_inference_gemma(
decoder_layer.pre_feedforward_layernorm,
hidden_states,
out_weights[device_index],
)
hidden_states = fast_geglu_inference(decoder_layer.mlp, hidden_states)
hidden_states = fast_rms_layernorm_inference_gemma(
decoder_layer.post_feedforward_layernorm,
hidden_states,
out_weights[device_index],
)
hidden_states += residual
next_decoder_cache.append(present_key_value)
hidden_states = fast_rms_layernorm_inference_gemma(
self.model.norm, hidden_states, out_weights[device_index]
)
return BaseModelOutputWithPast(
last_hidden_state = hidden_states,
past_key_values = next_decoder_cache,
hidden_states = [],
attentions = [],
)
class FastGemma2Model(FastLlamaModel):
@staticmethod
def pre_patch():
init_name, function = patch_linear_scaling(
model_name = "gemma2",
rope_module = GemmaFixedRotaryEmbedding,
scaled_rope_module = GemmaFixedLinearScalingRotaryEmbedding,
attention_module = Gemma2Attention,
)
if init_name is not None:
exec(function, globals())
Gemma2Attention.__init__ = eval(init_name)
Gemma2Attention.forward = Gemma2Attention_fast_forward
Gemma2SdpaAttention.forward = Gemma2Attention_fast_forward
Gemma2FlashAttention2.forward = Gemma2Attention_fast_forward
Gemma2DecoderLayer.forward = Gemma2DecoderLayer_fast_forward
Gemma2Model.forward = LlamaModel_fast_forward
Gemma2ForCausalLM.forward = CausalLM_fast_forward(
Gemma2Model_fast_forward_inference
)
PeftModelForCausalLM.forward = PeftModel_fast_forward
fix_prepare_inputs_for_generation(Gemma2ForCausalLM)
# Solves https://github.com/unslothai/unsloth/issues/168
# Static KV Cache was introduced in 4.38.0, causing training to be much slower.
# Inference can now be CUDAGraphed, but we shall retain the old rotary embeddings.
# https://github.com/huggingface/transformers/pull/27931
# https://github.com/huggingface/transformers/blob/v4.37.2/src/transformers/models/llama/modeling_llama.py
import transformers.models.gemma2.modeling_gemma2
transformers.models.gemma2.modeling_gemma2.Gemma2RotaryEmbedding = (
GemmaFixedRotaryEmbedding
)
return
@staticmethod
def post_patch(model, tokenizer):
# Gemma does not downcast RoPE
model, tokenizer = patch_model_and_tokenizer(
model, tokenizer, downcast_rope = False
)
# Add 1 to weight
# return output * (1 + self.weight)
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/gemma/modeling_gemma.py#L89
from transformers.models.gemma2.modeling_gemma2 import Gemma2RMSNorm
# Freeze all parameters except LoRA
# We do this first since += 1 seems to not be liked by requires_grad = True
for name, param in model.named_parameters():
if ".lora_A." in name or ".lora_B." in name:
param.requires_grad_(True)
else:
param.requires_grad_(False)
# Patch RMS Layernorm
for name, module in model.named_modules():
if isinstance(module, Gemma2RMSNorm):
# Must be in float32
# https://github.com/keras-team/keras-nlp/blob/v0.8.2/keras_nlp/models/gemma/rms_normalization.py#L36
# module = module.to(torch.float32)
# Leave + 1 to Triton kernel itself
# module.weight += 1.0 # return output * (1 + self.weight)
if not hasattr(module, "variance_epsilon"):
module.variance_epsilon = (
module.eps
) # Gemma doesn't use variance_epsilon
# Clear deleted GPU items
import gc
for _ in range(3):
gc.collect()
torch.cuda.empty_cache()
return model, tokenizer
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/mistral.py | unsloth/models/mistral.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .llama import *
import os
from ._utils import __version__
from unsloth_zoo.utils import _get_dtype
from unsloth_zoo.hf_utils import dtype_from_config
from ..utils.packing import (
get_packed_info_from_kwargs,
mask_packed_sequence_boundaries,
)
from ..utils.attention_dispatch import (
AttentionConfig,
AttentionContext,
run_attention,
select_attention_backend,
)
from .llama import (
LlamaRotaryEmbedding,
LlamaLinearScalingRotaryEmbedding,
)
from transformers.models.mistral.modeling_mistral import (
MistralAttention,
MistralDecoderLayer,
MistralModel,
MistralForCausalLM,
)
# For Pytorch 2.1.1
try:
from transformers.models.mistral.modeling_mistral import (
MistralSdpaAttention,
MistralFlashAttention2,
)
except:
MistralSdpaAttention = MistralAttention
MistralFlashAttention2 = MistralAttention
from unsloth_zoo.utils import Version, _get_dtype
def MistralAttention_fast_forward(
self,
hidden_states: torch.Tensor,
causal_mask: Optional[BlockDiagonalCausalMask] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
padding_mask: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
*args,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
# Clear inference
if hasattr(self, "paged_attention"):
del self.paged_attention_K
del self.paged_attention_V
del self.paged_attention
del self.temp_QA
del self.temp_KV
del self.RH_Q
del self.attention
bsz, q_len, _ = hidden_states.size()
n_heads = self.config.num_attention_heads
n_groups = self.num_key_value_groups
n_kv_heads = self.config.num_key_value_heads
head_dim = self.head_dim
assert n_kv_heads * n_groups == n_heads
Q, K, V = self.apply_qkv(self, hidden_states)
Q = Q.view(bsz, q_len, n_heads, head_dim).transpose(1, 2)
K = K.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2)
V = V.view(bsz, q_len, n_kv_heads, head_dim).transpose(1, 2)
seq_info = get_packed_info_from_kwargs(kwargs, Q.device)
kv_seq_len = K.shape[-2]
if past_key_value is not None:
kv_seq_len += past_key_value[0].shape[-2]
# Extend RoPE dynamically to fit in VRAM
self.rotary_emb.extend_rope_embedding(V, seq_len = kv_seq_len)
cos, sin = self.rotary_emb.get_cached(kv_seq_len, Q.device.index)
rope_position_ids = (
position_ids if position_ids is not None else kwargs.get("position_ids")
)
# Useful for LongRoPE
Q, K = fast_rope_embedding(Q, K, cos, sin, rope_position_ids)
if past_key_value is not None:
K = torch.cat([past_key_value[0], K], dim = 2)
V = torch.cat([past_key_value[1], V], dim = 2)
past_key_value = (K, V) if use_cache else None
# Attention module
sw_cfg = getattr(self.config, "sliding_window", None)
sw = kv_seq_len if (sw_cfg is None or sw_cfg == "null") else sw_cfg
window_size = (-1, -1) if (kv_seq_len <= sw) else (sw, sw)
use_varlen = (
seq_info is not None and past_key_value is None and window_size == (-1, -1)
)
backend = select_attention_backend(use_varlen)
attention_config = AttentionConfig(
backend = backend,
n_kv_heads = n_kv_heads,
n_groups = n_groups,
flash_dense_kwargs = {"causal": True, "window_size": window_size},
flash_varlen_kwargs = {
"dropout_p": 0.0,
"causal": True,
"softmax_scale": getattr(self, "softmax_scale", None),
},
)
context = AttentionContext(
bsz = bsz,
q_len = q_len,
kv_seq_len = kv_seq_len,
n_heads = n_heads,
head_dim = head_dim,
requires_grad = hidden_states.requires_grad,
seq_info = seq_info,
attention_mask = attention_mask,
causal_mask = causal_mask,
)
A = run_attention(config = attention_config, context = context, Q = Q, K = K, V = V)
attn_output = A.reshape(bsz, q_len, n_heads * head_dim)
attn_output = self.apply_o(self, attn_output)
attn_weights = None
return attn_output, attn_weights, past_key_value
def MistralForCausalLM_fast_forward(
self,
input_ids: torch.LongTensor = None,
causal_mask: Optional[BlockDiagonalCausalMask] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
num_logits_to_keep: Optional[int] = 0,
logits_to_keep: Optional[int] = 0,
*args,
**kwargs,
) -> Union[Tuple, CausalLMOutputWithPast]:
if causal_mask is None and past_key_values is None:
bsz, q_len = input_ids.shape
sliding_window = getattr(self.config, "sliding_window", None)
if HAS_XFORMERS:
# Always create causal mask for xformers
if (
sliding_window is None
or sliding_window == "null"
or sliding_window <= 0
):
causal_mask = xformers.attn_bias.LowerTriangularMask()
elif q_len <= sliding_window:
causal_mask = xformers.attn_bias.LowerTriangularMask()
else:
causal_mask = xformers.attn_bias.BlockDiagonalCausalMask.from_seqlens(
[q_len] * bsz
).make_local_attention(window_size = sliding_window)
# If attention_mask exists, it will be handled in the attention forward
else:
# Not using xformers - need to create attention masks
if (
sliding_window is None
or sliding_window == "null"
or sliding_window <= 0
or q_len <= sliding_window
):
# Fully causal mask
causal_mask_values = torch.triu(
torch.full((q_len, q_len), -torch.inf, device = input_ids.device),
diagonal = 1,
)
else:
# Sliding window attention
q_indices = torch.arange(q_len, device = input_ids.device).view(-1, 1)
k_indices = torch.arange(q_len, device = input_ids.device).view(1, -1)
causal_bool_mask = k_indices <= q_indices
window_bool_mask = (q_indices - k_indices) < sliding_window
causal_mask_values = torch.where(
causal_bool_mask & window_bool_mask, 0.0, -torch.inf
)
# Combine with existing attention_mask if present
if attention_mask is None:
attention_mask = causal_mask_values[None, None, :, :].expand(
bsz, 1, q_len, q_len
)
else:
# attention_mask should be [bsz, 1, q_len, q_len] or broadcastable
# Add causal mask to existing attention mask
if attention_mask.dim() == 2:
# [bsz, seq_len] -> [bsz, 1, 1, seq_len]
attention_mask = attention_mask[:, None, None, :]
attention_mask = attention_mask.expand(bsz, 1, q_len, q_len)
attention_mask = attention_mask + causal_mask_values[None, None, :, :]
attention_mask = attention_mask.to(
dtype = _get_dtype(dtype_from_config(self.config))
)
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
self.model._has_no_labels = labels is None
if past_key_values is not None:
outputs = LlamaModel_fast_forward_inference(
self,
input_ids,
past_key_values,
position_ids = position_ids,
attention_mask = attention_mask,
)
else:
outputs = self.model(
input_ids = input_ids,
causal_mask = causal_mask,
attention_mask = attention_mask,
position_ids = position_ids,
past_key_values = past_key_values,
inputs_embeds = inputs_embeds,
use_cache = use_cache,
output_attentions = output_attentions,
output_hidden_states = output_hidden_states,
return_dict = return_dict,
**kwargs,
)
hidden_states = outputs[0]
bsz, q_len, hd = hidden_states.shape
lm_head = self.lm_head.weight
lm_head_device = lm_head.device
# Move items to same device as lm_head
hidden_states = hidden_states.to(lm_head_device)
if labels is not None:
labels = labels.to(lm_head_device)
# If we are in GRPO mode, return raw hidden states
if os.environ.get("UNSLOTH_RETURN_HIDDEN_STATES", "0") == "1":
num_logits_to_keep = max(num_logits_to_keep, logits_to_keep)
if num_logits_to_keep != 0:
hidden_states = hidden_states[:, -num_logits_to_keep:, :]
return CausalLMOutputWithPast(
loss = None,
logits = hidden_states,
past_key_values = outputs.past_key_values,
hidden_states = outputs.hidden_states,
attentions = outputs.attentions,
)
if bsz == 1 and q_len == 1:
logits = torch.mv(lm_head, hidden_states.ravel().to(lm_head.dtype))
logits = logits.unsqueeze(0).unsqueeze(0)
elif num_logits_to_keep != 0:
logits = self.lm_head(
hidden_states[:, -num_logits_to_keep:, :].to(lm_head.dtype)
)
else:
RETURN_LOGITS = os.environ.get("UNSLOTH_RETURN_LOGITS", "0") == "1"
# < 1024 Normal Unsloth uses less VRAM!
if bsz * q_len <= 1024 and not RETURN_LOGITS:
# Use unsloth_fused_ce_loss which actually calculates the best chunk size to reduce VRAM usage
RETURN_LOGITS = False
if not RETURN_LOGITS and labels is not None:
n_items = kwargs.get("num_items_in_batch", None)
if n_items is None:
n_items = kwargs.get("n_items", None)
logit_softcapping = getattr(self.config, "final_logit_softcapping", 0)
# loss = fused_linear_cross_entropy(
# hidden_states = hidden_states,
# lm_weight = lm_head,
# labels = labels,
# num_items_in_batch = n_items,
# logit_softcapping = logit_softcapping,
# )
loss = unsloth_fused_ce_loss(
trainer = None,
hidden_states = hidden_states,
lm_head_weight = lm_head,
lm_head_bias = None,
labels = labels,
mask = None,
n_items = n_items,
scaling = getattr(self, "accelerator_scaler", None),
target_gb = None,
torch_compile = True,
logit_softcapping = logit_softcapping,
)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
output = CausalLMOutputWithPast(
loss = loss,
logits = EMPTY_LOGITS,
past_key_values = outputs.past_key_values,
hidden_states = outputs.hidden_states,
attentions = outputs.attentions,
)
return output
pass
logits = self.lm_head(hidden_states.to(lm_head.dtype))
logits = logits.to(_get_dtype(dtype_from_config(self.config)))
loss = None
if labels is not None:
shift_logits = logits
# if not hasattr(self, "extra_ignored_labels"):
# # Fixes https://github.com/unslothai/unsloth/issues/10
# self.extra_ignored_labels = torch.full((self.max_seq_length, 1), -100, device = "cuda:0")
# pass
# shift_labels = torch.hstack((labels[..., 1:], self.extra_ignored_labels[:labels.shape[0]]))
shift_labels = torch.empty_like(labels)
shift_labels[..., :-1] = labels[..., 1:]
shift_labels[..., -1] = -100
mask_packed_sequence_boundaries(
shift_labels,
kwargs.get("packed_seq_lengths"),
)
n_items = kwargs.get("num_items_in_batch", None)
if n_items is None:
n_items = kwargs.get("n_items", None)
loss = fast_cross_entropy_loss(
logits = shift_logits,
labels = shift_labels,
n_items = n_items,
)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss = loss,
logits = logits,
past_key_values = outputs.past_key_values,
hidden_states = outputs.hidden_states,
attentions = outputs.attentions,
)
# Transformers had to update for Mistral Nemo 12b since Attention is (5120, 4096) now.
def patch_mistral_nemo_attention(function):
function = function.replace(
"(self.head_dim * self.config.num_attention_heads) != self.config.hidden_size",
"False",
)
function = function.replace(
"self.head_dim = self.config.hidden_size // self.config.num_attention_heads",
"self.head_dim = config.head_dim",
)
function = function.replace(
"self.o_proj = nn.Linear(self.config.hidden_size, self.config.hidden_size, bias=False)",
"self.o_proj = nn.Linear(self.config.num_attention_heads * self.head_dim, self.config.hidden_size, bias=False)",
)
return function
class FastMistralModel(FastLlamaModel):
@staticmethod
def pre_patch():
init_name, function = patch_linear_scaling(
model_name = "mistral",
rope_module = LlamaRotaryEmbedding,
scaled_rope_module = LlamaLinearScalingRotaryEmbedding,
attention_module = MistralAttention,
)
# Just for Mistral Nemo models!
if function is not None and init_name is not None:
function = patch_mistral_nemo_attention(function)
# if True:#init_name is not None:
exec(function, globals())
MistralAttention.__init__ = eval(init_name)
MistralAttention.forward = MistralAttention_fast_forward
MistralSdpaAttention.forward = MistralAttention_fast_forward
MistralFlashAttention2.forward = MistralAttention_fast_forward
MistralDecoderLayer.forward = LlamaDecoderLayer_fast_forward
MistralModel.forward = LlamaModel_fast_forward
MistralForCausalLM.forward = MistralForCausalLM_fast_forward
PeftModelForCausalLM.forward = PeftModel_fast_forward
fix_prepare_inputs_for_generation(MistralForCausalLM)
# Solves https://github.com/unslothai/unsloth/issues/168
# Static KV Cache was introduced in 4.38.0, causing training to be much slower.
# Inference can now be CUDAGraphed, but we shall retain the old rotary embeddings.
# https://github.com/huggingface/transformers/pull/27931
# https://github.com/huggingface/transformers/blob/v4.37.2/src/transformers/models/llama/modeling_llama.py
import transformers.models.mistral.modeling_mistral
transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding = (
LlamaRotaryEmbedding
)
return
@staticmethod
def from_pretrained(
model_name = "unsloth/mistral-7b-bnb-4bit",
max_seq_length = None,
dtype = None,
load_in_4bit = True,
token = None,
device_map = "sequential",
rope_scaling = None, # Mistral does not support RoPE scaling
fix_tokenizer = True,
model_patcher = None,
tokenizer_name = None,
trust_remote_code = False,
**kwargs,
):
return FastLlamaModel.from_pretrained(
model_name = model_name,
max_seq_length = max_seq_length,
dtype = dtype,
load_in_4bit = load_in_4bit,
token = token,
device_map = device_map,
rope_scaling = rope_scaling,
fix_tokenizer = fix_tokenizer,
model_patcher = FastMistralModel,
tokenizer_name = tokenizer_name,
trust_remote_code = trust_remote_code,
**kwargs,
)
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/_utils.py | unsloth/models/_utils.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2026.1.1"
__all__ = [
"SUPPORTS_BFLOAT16",
"is_bfloat16_supported",
"is_vLLM_available",
"prepare_model_for_kbit_training",
"xformers",
"xformers_attention",
"xformers_version",
"__version__",
"importlib_version",
"HAS_FLASH_ATTENTION",
"HAS_FLASH_ATTENTION_SOFTCAPPING",
"USE_MODELSCOPE",
"platform_system",
"patch_tokenizer",
"get_statistics",
"Unsloth_Offloaded_Gradient_Checkpointer",
"offload_to_disk",
"offload_input_embeddings",
"offload_output_embeddings",
"unsloth_offloaded_gradient_checkpoint",
"torch_compile_options",
"patch_linear_scaling",
"patch_llama_rope_scaling",
"create_boolean_mask",
"torch_amp_custom_fwd",
"torch_amp_custom_bwd",
# "accelerate_old_send_to_device",
# "accelerate_new_send_to_device",
"patch_gradient_accumulation_fix",
"patch_compiling_bitsandbytes",
"patch_regional_compilation",
"patch_layernorm",
"patch_torch_compile",
"patch_model_and_tokenizer",
"patch_unsloth_gradient_checkpointing",
"unpatch_unsloth_gradient_checkpointing",
"patch_gradient_checkpointing",
"unpatch_gradient_checkpointing",
"HAS_CUT_CROSS_ENTROPY",
"EMPTY_LOGITS",
"fused_linear_cross_entropy",
"unsloth_fused_ce_loss",
"patch_unsloth_smart_gradient_checkpointing",
"unpatch_unsloth_smart_gradient_checkpointing",
"patch_compiled_autograd",
"process_vision_info",
"unsloth_compile_transformers",
"patch_fast_lora",
"validate_loftq_config",
"RaiseUninitialized",
"fast_inference_setup",
"patch_peft_fast_inference",
"error_out_no_vllm",
"dequantize_module_weight",
"patch_hf_quantizer",
"verify_fp8_support_if_applicable",
"_get_inference_mode_context_manager",
"hf_login",
"make_fast_generate_wrapper",
]
import torch
from typing import Union, Optional, List, Any, Callable, Tuple, Iterator
from platform import system as platform_system
platform_system = platform_system()
import numpy as np
import contextlib
import re
from dataclasses import dataclass, field
import functools
import textwrap
import logging
import warnings, subprocess, inspect, psutil, os, math
from unsloth_zoo.utils import Version, get_quant_type
from importlib.metadata import version as importlib_version
from ..device_type import (
is_hip,
get_device_type,
DEVICE_TYPE,
DEVICE_TYPE_TORCH,
DEVICE_COUNT,
ALLOW_PREQUANTIZED_MODELS,
)
from unsloth_zoo.log import logger
from unsloth_zoo.tokenizer_utils import (
patch_tokenizer as _patch_tokenizer,
)
from unsloth_zoo.rl_environments import (
check_python_modules,
create_locked_down_function,
execute_with_time_limit,
Benchmarker,
)
from unsloth_zoo.patching_utils import (
patch_compiling_bitsandbytes,
patch_layernorm,
patch_torch_compile,
patch_model_and_tokenizer,
patch_compiled_autograd,
)
from unsloth_zoo.gradient_checkpointing import (
Unsloth_Offloaded_Gradient_Checkpointer,
unsloth_offloaded_gradient_checkpoint,
patch_unsloth_gradient_checkpointing,
unpatch_unsloth_gradient_checkpointing,
Unsloth_Gradient_Checkpointer,
unsloth_gradient_checkpoint,
patch_gradient_checkpointing,
unpatch_gradient_checkpointing,
patch_unsloth_smart_gradient_checkpointing,
unpatch_unsloth_smart_gradient_checkpointing,
)
from unsloth_zoo.loss_utils import (
HAS_CUT_CROSS_ENTROPY,
fused_linear_cross_entropy,
_unsloth_get_batch_samples,
unsloth_fused_ce_loss,
)
from unsloth_zoo.vision_utils import (
process_vision_info,
)
from unsloth_zoo.compiler import (
get_transformers_model_type,
unsloth_compile_transformers as _unsloth_compile_transformers,
)
from unsloth_zoo.training_utils import (
prepare_model_for_training,
)
from unsloth_zoo.temporary_patches import (
TEMPORARY_PATCHES,
)
for temporary_patch in TEMPORARY_PATCHES:
temporary_patch()
# =============================================
# Disable some warnings which can get annoying
warnings.filterwarnings(action = "ignore", category = UserWarning, module = "torch")
warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "torch")
warnings.filterwarnings(action = "ignore", category = UserWarning, module = "huggingface_hub")
warnings.filterwarnings(
action = "ignore", category = FutureWarning, module = "huggingface_hub"
)
warnings.filterwarnings(action = "ignore", category = UserWarning, module = "trl")
warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "trl")
warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "xformers")
warnings.filterwarnings(action = "ignore", category = RuntimeWarning, module = "subprocess")
warnings.filterwarnings(action = "ignore", category = UserWarning, module = "transformers")
warnings.filterwarnings(action = "ignore", category = FutureWarning, module = "accelerate")
warnings.filterwarnings(
action = "ignore", category = RuntimeWarning, module = "multiprocessing"
)
warnings.filterwarnings(action = "ignore", category = RuntimeWarning, module = "multiprocess")
warnings.filterwarnings(action = "ignore", category = UserWarning, module = "triton")
warnings.filterwarnings(action = "ignore", category = UserWarning, module = "bitsandbytes")
# Stop "Special tokens have been added in the vocabulary, ..."
logging.getLogger("transformers.tokenization_utils_base").setLevel(logging.CRITICAL + 1)
# Ignore logging messages
class HideLoggingMessage(logging.Filter):
__slots__ = ("text",)
def __init__(self, text):
self.text = text
def filter(self, x):
return not (self.text in x.getMessage())
# Stop vLLM messages
if os.environ.get("UNSLOTH_ENABLE_LOGGING", "0") != "1":
try:
from vllm.worker.worker import logger as vllm_worker_logger
vllm_worker_logger.addFilter(HideLoggingMessage("Sleep mode freed"))
del vllm_worker_logger
except:
pass
try:
from vllm.v1.worker.gpu_worker import logger as vllm_gpu_worker_logger
vllm_gpu_worker_logger.addFilter(HideLoggingMessage("Sleep mode freed"))
del vllm_gpu_worker_logger
except:
pass
try:
from vllm.executor.executor_base import logger as vllm_executor_logger
vllm_executor_logger.addFilter(HideLoggingMessage("to fall asleep"))
vllm_executor_logger.addFilter(HideLoggingMessage("to wake up"))
vllm_executor_logger.addFilter(HideLoggingMessage("Executor is not sleeping"))
del vllm_executor_logger
except:
pass
try:
from vllm.core.block.prefix_caching_block import (
logger as vllm_prefix_caching_logger,
)
vllm_prefix_caching_logger.addFilter(HideLoggingMessage("reset prefix cache"))
del vllm_prefix_caching_logger
except:
pass
try:
from vllm.v1.core.block_pool import logger as vllm_block_pool_logger
vllm_block_pool_logger.addFilter(HideLoggingMessage("reset prefix cache"))
del vllm_block_pool_logger
except:
pass
try:
from vllm.lora.models import logger as vllm_lora_model_logger
vllm_lora_model_logger.addFilter(
HideLoggingMessage(
"Regarding multimodal models, vLLM currently only supports adding"
)
)
del vllm_lora_model_logger
except:
pass
try:
from vllm.attention.utils.fa_utils import (
logger as vllm_attention_utils_fa_utils_logger,
)
vllm_attention_utils_fa_utils_logger.addFilter(
HideLoggingMessage("Cannot use FA version")
)
del vllm_attention_utils_fa_utils_logger
except:
pass
# The speedups for torchdynamo mostly come with GPU Ampere or higher and which is not detected here.
from transformers.training_args import logger as transformers_training_args_logger
transformers_training_args_logger.addFilter(HideLoggingMessage("The speedups"))
# torch.distributed process group is initialized, but parallel_mode != ParallelMode.DISTRIBUTED.
transformers_training_args_logger.addFilter(HideLoggingMessage("torch.distributed"))
# average_tokens_across_devices is set to True but it is invalid when world size is1
transformers_training_args_logger.addFilter(
HideLoggingMessage("average_tokens_across_devices")
)
del transformers_training_args_logger
# No label_names provided for model class
from transformers.trainer import logger as transformers_trainer_logger
transformers_trainer_logger.addFilter(HideLoggingMessage("No label_names"))
# The tokenizer has new PAD/BOS/EOS tokens that differ from the model config and generation config.
transformers_trainer_logger.addFilter(HideLoggingMessage("The tokenizer has new"))
del transformers_trainer_logger
# Using the default loss: `ForCausalLMLoss`.
try:
from transformers.modeling_utils import logger as transformers_modeling_utils_logger
transformers_modeling_utils_logger.addFilter(HideLoggingMessage("ForCausalLMLoss"))
del transformers_modeling_utils_logger
except:
pass
# The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function.
try:
from accelerate.utils.modeling import logger as accelerate_utils_modeling_logger
accelerate_utils_modeling_logger.addFilter(
HideLoggingMessage("The model weights are not tied")
)
del accelerate_utils_modeling_logger
except:
pass
# Setting `pad_token_id` to `eos_token_id`
try:
from transformers.generation.utils import (
logger as transformers_generation_utils_logger,
)
transformers_generation_utils_logger.addFilter(
HideLoggingMessage("Setting `pad_token_id` to `eos_token_id`")
)
# "You have set `compile_config`
transformers_generation_utils_logger.addFilter(HideLoggingMessage("compile_config"))
del transformers_generation_utils_logger
except:
pass
# The following generation flags are not valid and may be ignored:
try:
from transformers.generation.configuration_utils import (
logger as configuration_logger,
)
configuration_logger.addFilter(HideLoggingMessage("following generation flags"))
del configuration_logger
except:
pass
# Gemma3 It is strongly recommended to train Gemma3 models with the `eager`
try:
from transformers.models.gemma3.modeling_gemma3 import logger as gemma3_logger
gemma3_logger.addFilter(HideLoggingMessage("strongly recommended"))
del gemma3_logger
except:
pass
# Xet Storage is enabled for this repo, but the 'hf_xet' package is not installed.
try:
from huggingface_hub.file_download import logger as hub_logger
hub_logger.addFilter(HideLoggingMessage("hf_xet"))
del hub_logger
except:
pass
# MXFP4 quantization requires triton >= 3.4.0
try:
from transformers.quantizers.quantizer_mxfp4 import logger as mxfp4_logger
mxfp4_logger.addFilter(HideLoggingMessage("requires triton"))
del mxfp4_logger
except:
pass
# You passed `quantization_config` or equivalent parameters
try:
warnings.filterwarnings(
action = "ignore",
message = r".*quantization_config.*",
category = UserWarning,
append = True,
)
except:
pass
# UserWarning: Logical operators 'and' and 'or' are deprecated for non-scalar tensors; please use '&' or '|' instead
# Will be fixed in torch 2.8.1 https://github.com/pytorch/pytorch/issues/158463
try:
warnings.filterwarnings(
action = "ignore",
message = r".*Logical operators 'and' and 'or'.*",
category = UserWarning,
append = True,
)
except:
pass
# Using a slow image processor as `use_fast`
try:
from transformers.processing_utils import logger as processing_utils_logger
processing_utils_logger.addFilter(HideLoggingMessage("`use_fast`"))
del processing_utils_logger
except:
pass
# Using a slow image processor as `use_fast`
try:
from transformers.models.auto.image_processing_auto import (
logger as processing_utils_logger,
)
processing_utils_logger.addFilter(HideLoggingMessage("`use_fast`"))
del processing_utils_logger
except:
pass
# `use_cache=True` is incompatible with gradient checkpointing
try:
from transformers.trainer import logger as trainer_logger
trainer_logger.addFilter(HideLoggingMessage("`use_cache=True`"))
del trainer_logger
except:
pass
# `use_cache=True` is incompatible with gradient checkpointing
try:
from transformers.utils.generic import logger as trainer_logger
trainer_logger.addFilter(HideLoggingMessage("`use_cache=True`"))
del trainer_logger
except:
pass
# We detected that you are using `from_pretrained` with a meta device context manager or `torch.set_default_device('meta')
try:
from transformers.modeling_utils import logger as modeling_utils_logger
modeling_utils_logger.addFilter(HideLoggingMessage("anti-pattern"))
del modeling_utils_logger
except:
pass
# Errors out on
# Some weights of Gemma3nForConditionalGeneration were not initialized from the model checkpoint
from transformers.modeling_utils import logger as transformers_logger
class _RaiseUninitialized(logging.Handler):
def __init__(self):
super().__init__()
def emit(self, record):
record_lower = str(record).lower()
if (
("some weights of" in record_lower)
and ("score.weight" not in record_lower)
and ("classifier.weight" not in record_lower)
and ("cls.predictions" not in record_lower)
and ("predictions.decoder" not in record_lower)
and (os.environ.get("UNSLOTH_WARN_UNINITIALIZED", "1") == "1")
):
raise Exception(
f"Unsloth: Critical error since some weights are not initialized.\n"
f"Please try updating Unsloth, transformers and timm via:\n"
f"`pip install --upgrade --force-reinstall --no-cache-dir --no-deps unsloth unsloth_zoo transformers timm`\n"
f"{str(record)}"
)
class RaiseUninitialized:
def __init__(self):
self.error_handler = _RaiseUninitialized()
transformers_logger.addHandler(self.error_handler)
def remove(self):
transformers_logger.removeHandler(self.error_handler)
# Patch get_model_param_count to record correct 4bit / 8bit
from transformers.trainer_pt_utils import is_deepspeed_zero3_enabled
def extract_quant_model_param_count(model):
"""
Calculate quant model param count based on difference in param class. Returns int for param count.
"""
count: int = 0
for name, p in model.named_parameters():
if p.__class__.__name__ == "Params4bit":
count += 2 * p.numel()
else:
count += p.numel()
return count
def get_model_param_count(model, trainable_only = False):
"""
Calculate model's total param count. If trainable_only is True then count only those requiring grads
"""
if is_deepspeed_zero3_enabled():
def numel(p):
return p.ds_numel if hasattr(p, "ds_numel") else p.numel()
else:
def numel(p):
return p.numel()
s = sum(
numel(p) for p in model.parameters() if not trainable_only or p.requires_grad
)
if (
(not trainable_only)
and hasattr(model, "config")
and hasattr(model.config, "quantization_config")
):
approx = extract_quant_model_param_count(model)
if approx is not None:
s = approx
return s
import transformers.trainer_pt_utils
transformers.trainer_pt_utils.get_model_param_count = get_model_param_count
import transformers.trainer
transformers.trainer.get_model_param_count = get_model_param_count
# =============================================
# =============================================
# Edits all Config files to enable RoPE Scaling for all models
# Transformers had to update for Mistral Nemo 12b since Attention is (5120, 4096) now.
def patch_mistral_nemo_config(config):
if "head_dim (" not in config:
add_head_dim = (
"If it is not specified, will default to `8`.\n"
" head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):\n"
" The attention head dimension."
)
config = config.replace(
"If it is not specified, will default to `8`.", add_head_dim
)
add_head_dim = "num_key_value_heads=8,\n head_dim=None,"
config = config.replace("num_key_value_heads=8,", add_head_dim)
add_head_dim = "self.sliding_window = sliding_window\n self.head_dim = head_dim or hidden_size // num_attention_heads\n"
config = config.replace("self.sliding_window = sliding_window", add_head_dim)
return config
try:
# Some Config files use layer_type_validation
# for eg Gemma-2, so we must import it to stop errors.
from transformers.configuration_utils import layer_type_validation
except:
pass
from transformers import __version__ as transformers_version
try:
from transformers import PreTrainedConfig
except:
from transformers import PretrainedConfig
model_architectures = [
"llama",
"mistral",
"gemma",
"gemma2",
"qwen2",
"granite",
"qwen3",
"qwen3_moe",
"falcon_h1",
]
for model_name in model_architectures:
config_filepath = f"transformers.models.{model_name}.configuration_{model_name}"
model_filepath = f"transformers.models.{model_name}.modeling_{model_name}"
config_filename = f"{model_name.title().replace('_','')}Config" # qwen3 arch folder is qwen3_moe but config is Qwen3Config. Need to remove underscore(_) for now
try:
exec(f"from {config_filepath} import {config_filename}", globals())
except:
continue
try:
config = inspect.getsource(eval(config_filename))
except:
continue
if "RopeParameters" in config:
try:
exec(f"from {config_filepath} import RopeParameters", globals())
except:
continue
if "rope_scaling" in config:
continue
config = re.sub(
r"(\*\*kwargs)[\s]{0,}\,[\s]{0,}\)[\s]{0,}\:",
r"rope_scaling=None,"
r"\n **kwargs):\n"
r"\n self.rope_scaling = rope_scaling\n",
config,
)
# Just for Mistral Nemo
if model_name == "mistral":
if Version(transformers_version) <= Version("4.42.4"):
config = patch_mistral_nemo_config(config)
exec(config, globals())
exec(f"import {config_filepath}", globals())
exec(f"{config_filepath}.{config_filename} = {config_filename}", globals())
# =============================================
# =============================================
# torch.cuda.amp.custom_fwd is deprecated >= 2.4
torch_version = torch.__version__
if DEVICE_TYPE in ("cuda", "hip"):
if Version(torch_version) < Version("2.4.0"):
torch_amp_custom_fwd = torch.cuda.amp.custom_fwd
torch_amp_custom_bwd = torch.cuda.amp.custom_bwd
else:
torch_amp_custom_fwd = torch.amp.custom_fwd(device_type = "cuda")
torch_amp_custom_bwd = torch.amp.custom_bwd(device_type = "cuda")
elif DEVICE_TYPE == "xpu":
if Version(torch_version) < Version("2.6.0"):
raise RuntimeError("torch.xpu currently only supports torch.version >= 2.6.0")
else:
torch_amp_custom_fwd = torch.amp.custom_fwd(device_type = "xpu")
torch_amp_custom_bwd = torch.amp.custom_bwd(device_type = "xpu")
# =============================================
# =============================================
# Fix KeyError: 'Cache only has 0 layers, attempted to access layer with index 0'
# import transformers.cache_utils
# if hasattr(transformers.cache_utils, "DynamicCache") and \
# transformers.cache_utils.DynamicCache.__getitem__.__name__ != "__cache_utils_getitem__":
# source = inspect.getsource(transformers.cache_utils.DynamicCache.__getitem__)
# start = source.find("def")
# spaces = start*" "
# source = source.split("\n")
# source = "\n".join(x[start:] for x in source)
# where = source.find("raise KeyError")
# source = source[:where] + \
# f"if len(self) == 0:\n{spaces}{spaces}"\
# " raise RuntimeError('Unsloth: You must call `FastLanguageModel.for_inference(model)` before doing inference for Unsloth models.')\n" + \
# f"{spaces}{spaces}else:\n{spaces}{spaces}{spaces}" + source[where:]
# source = source.replace("__getitem__", "__cache_utils_getitem__", 1)
# exec(source)
# transformers.cache_utils.DynamicCache.__getitem__ = __cache_utils_getitem__
# pass
# =============================================
# =============================================
# Weird Databricks errors
from transformers.utils import is_openai_available
if is_openai_available():
try:
from openai import OpenAI
except:
print("Unsloth: OpenAI failed to import - ignoring for now.")
import transformers.utils
def _is_openai_available():
return False
transformers.utils.is_openai_available = _is_openai_available
# =============================================
# Get Flash Attention v2 if Ampere (RTX 30xx, A100)
import bitsandbytes as bnb
from transformers import AutoTokenizer
from transformers.utils.import_utils import _is_package_available
SUPPORTS_BFLOAT16 = False
HAS_FLASH_ATTENTION = False
HAS_FLASH_ATTENTION_SOFTCAPPING = False
if DEVICE_TYPE == "cuda":
major_version, minor_version = torch.cuda.get_device_capability()
torch.cuda.get_device_capability = functools.cache(torch.cuda.get_device_capability)
if major_version >= 8:
SUPPORTS_BFLOAT16 = True
if _is_package_available("flash_attn"):
# Check for CUDA linking errors "undefined symbol: _ZNK3c106SymIntltEl"
try:
try:
# See https://github.com/unslothai/unsloth/issues/1437
from flash_attn.flash_attn_interface import flash_attn_gpu
except:
from flash_attn.flash_attn_interface import flash_attn_cuda
HAS_FLASH_ATTENTION = True
# Also check for softcapping
from flash_attn import __version__ as flash_attn_version
HAS_FLASH_ATTENTION_SOFTCAPPING = Version(
flash_attn_version
) >= Version("2.6.3")
if not HAS_FLASH_ATTENTION_SOFTCAPPING:
print(
"Unsloth: If you want to finetune Gemma 2, upgrade flash-attn to version 2.6.3 or higher!\n"
"Newer versions support faster and less memory usage kernels for Gemma 2's attention softcapping!\n"
"To update flash-attn, do the below:\n"
'\npip install --no-deps --no-build-isolation --upgrade "flash-attn>=2.6.3"'
)
except:
print(
"Unsloth: Your Flash Attention 2 installation seems to be broken?\n"
"A possible explanation is you have a new CUDA version which isn't\n"
"yet compatible with FA2? Please file a ticket to Unsloth or FA2.\n"
"We shall now use Xformers instead, which does not have any performance hits!\n"
"We found this negligible impact by benchmarking on 1x A100."
)
# Stop Flash Attention from importing!
import transformers.utils.import_utils
transformers.utils.import_utils.is_flash_attn_2_available = (
lambda *args, **kwargs: False
)
import transformers.utils
transformers.utils.is_flash_attn_2_available = (
lambda *args, **kwargs: False
)
HAS_FLASH_ATTENTION = False
else:
HAS_FLASH_ATTENTION = False
else:
# Tri Dao's benchmark shows xformers is faster for now.
HAS_FLASH_ATTENTION = False
elif DEVICE_TYPE == "hip":
SUPPORTS_BFLOAT16 = True
if _is_package_available("flash_attn"):
# Check for CUDA linking errors "undefined symbol: _ZNK3c106SymIntltEl"
try:
try:
# See https://github.com/unslothai/unsloth/issues/1437
from flash_attn.flash_attn_interface import flash_attn_gpu
except:
from flash_attn.flash_attn_interface import flash_attn_cuda
HAS_FLASH_ATTENTION = True
# Also check for softcapping
from flash_attn import __version__ as flash_attn_version
HAS_FLASH_ATTENTION_SOFTCAPPING = Version(flash_attn_version) >= Version(
"2.6.3"
)
if not HAS_FLASH_ATTENTION_SOFTCAPPING:
print(
"Unsloth: If you want to finetune Gemma 2, upgrade flash-attn to version 2.6.3 or higher!\n"
"Newer versions support faster and less memory usage kernels for Gemma 2's attention softcapping!\n"
"To update flash-attn, do the below:\n"
'\npip install --no-deps --no-build-isolation --upgrade "flash-attn>=2.6.3"'
)
except:
print(
"Unsloth: Your Flash Attention 2 installation seems to be broken?\n"
"A possible explanation is you have a new CUDA version which isn't\n"
"yet compatible with FA2? Please file a ticket to Unsloth or FA2.\n"
"We shall now use Xformers instead, which does not have any performance hits!\n"
"We found this negligible impact by benchmarking on 1x A100."
)
# Stop Flash Attention from importing!
import transformers.utils.import_utils
transformers.utils.import_utils.is_flash_attn_2_available = (
lambda *args, **kwargs: False
)
import transformers.utils
transformers.utils.is_flash_attn_2_available = lambda *args, **kwargs: False
HAS_FLASH_ATTENTION = False
elif DEVICE_TYPE == "xpu":
SUPPORTS_BFLOAT16 = True
# =============================================
# Get Xformers
# Silence xformers CUDA mismatch warnings before import
try:
_xformers_logger = logging.getLogger("xformers")
_xformers_logger.setLevel(logging.ERROR)
del _xformers_logger
except:
pass
try:
from xformers import __version__ as xformers_version
# [TODO] Xformers does NOT work on RTX 50x (12), B200 (10), Jetson (11)
# See https://github.com/facebookresearch/xformers/issues/1329
# CUDA error (/workspace/xfrm2/third_party/flash-attention/hopper/flash_fwd_launch_template.h:188)
major_version, minor_version = torch.cuda.get_device_capability()
if (f"{major_version}.{minor_version}" in ("10.0", "11.0", "12.0")) and (
Version(xformers_version) in (Version("0.0.32.post2"),)
):
raise NotImplementedError(
"Unsloth: Xformers does not work in RTX 50X, Blackwell GPUs as of yet. Please build from source via\n"
"```\n"
"pip install ninja\n"
"pip install -v --no-build-isolation -U git+https://github.com/facebookresearch/xformers.git@main#egg=xformers\n"
"```\n"
)
# Temporarily disable 0.0.27 and higher - inference issues
if False: # Version(xformers_version) >= Version("0.0.27"):
raise ImportError(
"Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "
"then press Disconnect Runtime and then Restart it.\n"
"\n"
"%%capture\n"
"# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n"
'!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n'
'!pip install --no-deps "xformers<=0.0.27" trl peft accelerate bitsandbytes\n'
"\n"
f"Otherwise in local machines, your xformers version of {xformers_version} is too new.\n"
'Please downgrade xformers via `pip install --force-reinstall "xformers<=0.0.27"'
)
if Version(torch_version) < Version("2.2.0") and Version(
xformers_version
) >= Version("0.0.24"):
raise ImportError(
f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"
f"Please install xformers < 0.0.24 for torch = {torch_version}."
)
elif Version(torch_version) < Version("2.3.0") and Version(
xformers_version
) >= Version("0.0.26"):
raise ImportError(
f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"
f"Please install xformers < 0.0.26 for torch = {torch_version}."
)
elif Version(torch_version) < Version("2.4.0") and Version(
xformers_version
) > Version("0.0.27"):
raise ImportError(
f"Unsloth: You have torch = {torch_version} but xformers = {xformers_version}.\n"
f"Please install xformers <= 0.0.27 for torch = {torch_version}."
)
from xformers._cpp_lib import _register_extensions
try:
_register_extensions() # Check if C++ modules are loaded correctly
except Exception as error:
raise ImportError(
"Unsloth: Xformers was not installed correctly.\n"
"Please install xformers separately first.\n"
"Then confirm if it's correctly installed by running:\n"
"python -m xformers.info\n\n"
"Longer error message:\n" + str(error)
)
import xformers.ops.fmha as xformers
xformers_attention = xformers.memory_efficient_attention
except ModuleNotFoundError:
xformers = None
xformers_attention = None
xformers_version = None
except Exception as e:
if os.environ.get("UNSLOTH_ENABLE_LOGGING", "0") != "0":
print(
"========\nSwitching to PyTorch attention since your Xformers is broken.\n========\n"
)
print(str(e))
xformers = None
xformers_attention = None
xformers_version = None
# Check TRL version
from trl import __version__ as trl_version
# Unsloth now supports all TRL versions!
if False: # Version(trl_version) >= Version("0.9.0"):
raise ImportError(
"Unsloth: If you are in Colab, we updated the top cell install instructions - please change it to below "
"then press Disconnect Runtime and then Restart it.\n"
"\n"
"%%capture\n"
"# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n"
'!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"\n'
'!pip install --no-deps "xformers<=0.0.27" trl peft accelerate bitsandbytes\n'
"\n"
f"Otherwise in local machines, your TRL version of {trl_version} is too new.\n"
"Please downgrade TRL via `pip install --force-reinstall trl"
)
# =============================================
# Fix new Xformers versions TypeError: Multiple dispatch failed for 'torch._ops.aten.to.dtype_layout'
# accelerate_old_send_to_device = None
# accelerate_new_send_to_device = None
# if xformers_version is not None and Version(xformers_version) >= Version("0.0.27"):
# import accelerate.utils.operations
# if hasattr(accelerate.utils.operations, "send_to_device") and \
# accelerate.utils.operations.send_to_device.__name__ != "_fixed_send_to_device":
# accelerate_old_send_to_device = accelerate.utils.operations.send_to_device
# from accelerate.utils.operations import *
# send_to_device = inspect.getsource(accelerate.utils.operations.send_to_device)
# send_to_device = re.sub(
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | true |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/models/dpo.py | unsloth/models/dpo.py | # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"PatchDPOTrainer",
"PatchKTOTrainer",
]
def PatchDPOTrainer():
return
def PatchKTOTrainer():
return
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/utils/attention_dispatch.py | unsloth/utils/attention_dispatch.py | # Copyright 2023-present Daniel Han-Chen, Michael Han-Chen & the Unsloth team. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""Shared helpers for attention backend selection and execution."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Optional, Tuple
from torch import Tensor
from torch.nn.functional import scaled_dot_product_attention
from ..models._utils import *
from ..utils.packing import (
build_sdpa_packed_attention_mask,
build_xformers_block_causal_mask,
)
if HAS_FLASH_ATTENTION:
from flash_attn import flash_attn_func, flash_attn_varlen_func
HAS_XFORMERS = xformers is not None
BlockDiagonalCausalMask = None
if HAS_XFORMERS:
BlockDiagonalCausalMask = xformers.attn_bias.BlockDiagonalCausalMask
SDPA_HAS_GQA = "enable_gqa" in (scaled_dot_product_attention.__doc__ or "")
FLASH_VARLEN = "flash_varlen"
FLASH_DENSE = "flash_dense"
XFORMERS = "xformers"
SDPA = "sdpa"
XFORMERS_BLOCK_DIAG_CLS = (
xformers.attn_bias.BlockDiagonalCausalMask if HAS_XFORMERS else None
)
@dataclass
class AttentionConfig:
"""
Per-layer attention metadata.
NOTE(djsaunde): I had originally intended this to be populated once per layer, but
we're currently constructing it on every forward pass since it can possibly be
invalid from one forward pass to the next (e.g., switching from training to
inference). For now, I'm keeping separate from AttentionContext for the sake of
better grouping of params.
"""
backend: str
n_kv_heads: int
n_groups: int
flash_dense_kwargs: Optional[dict[str, Any]] = None
flash_varlen_kwargs: Optional[dict[str, Any]] = None
sdpa_kwargs: Optional[dict[str, Any]] = None
xformers_kwargs: Optional[dict[str, Any]] = None
@dataclass
class AttentionContext:
"""Per-call info required to run attention."""
bsz: int
q_len: int
kv_seq_len: int
n_heads: int
head_dim: int
requires_grad: bool
seq_info: Optional[Tuple[Tensor, Tensor, int]]
attention_mask: Optional[Tensor]
causal_mask: Optional[Any]
sliding_window: Optional[int] = None
def select_attention_backend(use_varlen: bool = False) -> str:
"""Return attention backend based on availability / priority order."""
if HAS_FLASH_ATTENTION:
if use_varlen:
return FLASH_VARLEN
else:
return FLASH_DENSE
if HAS_XFORMERS:
return XFORMERS
return SDPA
def run_attention(
*,
config: AttentionConfig,
context: AttentionContext,
Q: Tensor,
K: Tensor,
V: Tensor,
) -> Tensor:
"""
Run attention using config / context info.
Backend choice is prioritized for speed: FlashAttention when installed
(`flash_varlen` for packed/variable-length inputs with `seq_info`, otherwise dense
flash), then xFormers if flash is unavailable, with PyTorch SDPA as the final
fallback (e.g., CPU or no fused kernels).
Varlen flash is preferred when packing metadata is present because it avoids padding
and keeps peak memory low. xFormers and SDPA can also handle packed batches (we
pass a block-diagonal mask into each).
"""
backend = config.backend
if backend == FLASH_VARLEN and context.seq_info is None:
backend = FLASH_DENSE if HAS_FLASH_ATTENTION else SDPA
flash_dense_kwargs = config.flash_dense_kwargs or {}
flash_varlen_kwargs = config.flash_varlen_kwargs or {}
sdpa_kwargs = config.sdpa_kwargs or {}
xformers_kwargs = config.xformers_kwargs or {}
bsz = context.bsz
n_heads = context.n_heads
q_len = context.q_len
head_dim = context.head_dim
kv_seq_len = context.kv_seq_len
requires_grad = context.requires_grad
sliding_window = context.sliding_window
if backend == FLASH_VARLEN:
Q_f = Q.transpose(1, 2).reshape(bsz * q_len, n_heads, head_dim)
K_f = K.transpose(1, 2).reshape(bsz * q_len, config.n_kv_heads, head_dim)
V_f = V.transpose(1, 2).reshape(bsz * q_len, config.n_kv_heads, head_dim)
_, cu_seqlens, max_seqlen = context.seq_info
return flash_attn_varlen_func(
Q_f,
K_f,
V_f,
cu_seqlens,
cu_seqlens,
max_seqlen,
max_seqlen,
**flash_varlen_kwargs,
).view(bsz, q_len, n_heads, head_dim)
elif backend == FLASH_DENSE:
Q_t = Q.transpose(1, 2)
K_t = K.transpose(1, 2)
V_t = V.transpose(1, 2)
return flash_attn_func(Q_t, K_t, V_t, **flash_dense_kwargs).reshape(
bsz, q_len, n_heads, head_dim
)
elif backend == XFORMERS:
attn_bias = build_xformers_block_causal_mask(
context.seq_info,
sliding_window = sliding_window,
base_mask = context.causal_mask,
)
Q_t = Q.transpose(1, 2)
K_t = K.transpose(1, 2)
V_t = V.transpose(1, 2)
K_mod = K_t
V_mod = V_t
Q_mod = Q_t
if config.n_groups != 1:
K_mod = K_t.view(bsz, kv_seq_len, config.n_kv_heads, 1, head_dim)
V_mod = V_t.view(bsz, kv_seq_len, config.n_kv_heads, 1, head_dim)
K_mod = K_mod.expand(
bsz, kv_seq_len, config.n_kv_heads, config.n_groups, head_dim
)
V_mod = V_mod.expand(
bsz, kv_seq_len, config.n_kv_heads, config.n_groups, head_dim
)
if requires_grad:
K_mod = K_mod.reshape(bsz, kv_seq_len, n_heads, head_dim)
V_mod = V_mod.reshape(bsz, kv_seq_len, n_heads, head_dim)
else:
Q_mod = Q_t.view(
bsz, q_len, config.n_kv_heads, config.n_groups, head_dim
)
has_block = XFORMERS_BLOCK_DIAG_CLS is not None and isinstance(
attn_bias, XFORMERS_BLOCK_DIAG_CLS
)
if config.n_groups != 1 and has_block:
if not requires_grad:
Q_mod = Q_mod.view(
1, bsz * q_len, config.n_kv_heads, config.n_groups, head_dim
)
K_mod = K_mod.view(
1, bsz * kv_seq_len, config.n_kv_heads, config.n_groups, head_dim
)
V_mod = V_mod.view(
1, bsz * kv_seq_len, config.n_kv_heads, config.n_groups, head_dim
)
else:
Q_mod = Q_mod.view(1, bsz * q_len, n_heads, head_dim)
K_mod = K_mod.view(1, bsz * kv_seq_len, n_heads, head_dim)
V_mod = V_mod.view(1, bsz * kv_seq_len, n_heads, head_dim)
out = xformers_attention(
Q_mod,
K_mod,
V_mod,
attn_bias = attn_bias,
**xformers_kwargs,
)
if config.n_groups != 1 and not requires_grad:
out = out.view(bsz, q_len, config.n_kv_heads, config.n_groups, head_dim)
out = out.reshape(bsz, q_len, n_heads, head_dim)
else:
out = out.view(bsz, q_len, n_heads, head_dim)
return out
else:
local_mask = context.attention_mask
is_causal_local = False
if context.seq_info is not None and local_mask is None:
local_mask = build_sdpa_packed_attention_mask(
context.seq_info,
dtype = Q.dtype,
device = Q.device,
sliding_window = sliding_window,
)
else:
q_len_local = Q.shape[-2]
k_len_local = K.shape[-2]
is_causal_local = local_mask is None and q_len_local == k_len_local
kwargs = dict(sdpa_kwargs)
kwargs.setdefault("attn_mask", local_mask)
kwargs.setdefault("is_causal", is_causal_local)
if SDPA_HAS_GQA:
kwargs.setdefault("enable_gqa", config.n_groups != 1)
out = scaled_dot_product_attention(Q, K, V, **kwargs)
return out.transpose(1, 2)
K_mod = K
V_mod = V
if config.n_groups != 1:
K_mod = K[:, :, None, :, :].expand(
bsz, config.n_kv_heads, config.n_groups, kv_seq_len, head_dim
)
V_mod = V[:, :, None, :, :].expand(
bsz, config.n_kv_heads, config.n_groups, kv_seq_len, head_dim
)
K_mod = K_mod.reshape(bsz, n_heads, kv_seq_len, head_dim)
V_mod = V_mod.reshape(bsz, n_heads, kv_seq_len, head_dim)
out = scaled_dot_product_attention(
Q.contiguous(),
K_mod.contiguous(),
V_mod.contiguous(),
**kwargs,
)
return out.transpose(1, 2).contiguous()
__all__ = [
"AttentionConfig",
"AttentionContext",
"select_attention_backend",
"run_attention",
]
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/utils/packing.py | unsloth/utils/packing.py | # Copyright 2023-present Daniel Han-Chen, Michael Han-Chen & the Unsloth team. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""Utilities for enabling packed (padding-free) batches across Unsloth."""
from __future__ import annotations
import logging
from collections import OrderedDict
from typing import Any, Iterable, Optional, Sequence, Tuple
import torch
try:
from xformers.ops.fmha.attn_bias import (
BlockDiagonalCausalMask as _XFormersBlockMask,
)
except Exception:
try:
from xformers.attn_bias import BlockDiagonalCausalMask as _XFormersBlockMask
except Exception:
_XFormersBlockMask = None
_XFORMERS_MASK_CACHE_MAXSIZE = 32
_XFORMERS_MASK_CACHE: OrderedDict[Tuple[Tuple[int, ...], int], Any] = OrderedDict()
def _window_cache_key(sliding_window: Optional[int]) -> int:
if sliding_window is None or sliding_window <= 0:
return 0
return int(sliding_window)
def _get_cached_block_mask(
lengths: Tuple[int, ...],
sliding_window: Optional[int],
):
if _XFormersBlockMask is None:
return None
window_key = _window_cache_key(sliding_window)
cache_key = (lengths, window_key)
cached = _XFORMERS_MASK_CACHE.get(cache_key)
if cached is not None:
_XFORMERS_MASK_CACHE.move_to_end(cache_key)
return cached
mask = _XFormersBlockMask.from_seqlens(list(lengths))
if window_key and mask is not None and hasattr(mask, "make_local_attention"):
mask = mask.make_local_attention(window_size = window_key)
_XFORMERS_MASK_CACHE[cache_key] = mask
if len(_XFORMERS_MASK_CACHE) > _XFORMERS_MASK_CACHE_MAXSIZE:
_XFORMERS_MASK_CACHE.popitem(last = False)
return mask
class _TrlPackingWarningFilter(logging.Filter):
to_filter = (
"attention implementation is not",
"kernels-community",
)
def filter(self, record: logging.LogRecord) -> bool:
message = record.getMessage()
return not any(substring in message for substring in self.to_filter)
_TRL_FILTER_INSTALLED = False
def _ensure_trl_warning_filter():
global _TRL_FILTER_INSTALLED
if _TRL_FILTER_INSTALLED:
return
logging.getLogger("trl.trainer.sft_trainer").addFilter(_TrlPackingWarningFilter())
_TRL_FILTER_INSTALLED = True
def mark_allow_overlength(module):
"""Mark a module hierarchy so padding-free batches can exceed max_seq_length."""
if module is None:
return
if hasattr(module, "max_seq_length"):
setattr(module, "_unsloth_allow_packed_overlength", True)
children = getattr(module, "children", None)
if children is None:
return
for child in children():
mark_allow_overlength(child)
def configure_sample_packing(config):
"""Mutate an ``SFTConfig`` so TRL prepares packed batches."""
_ensure_trl_warning_filter()
setattr(config, "packing", True)
setattr(config, "padding_free", True)
def configure_padding_free(config):
"""Mutate an ``SFTConfig`` so TRL enables padding-free batching without packing."""
_ensure_trl_warning_filter()
setattr(config, "padding_free", True)
def enable_sample_packing(
model,
trainer,
*,
sequence_lengths_key: str = "seq_lengths",
) -> None:
"""Enable runtime support for packed batches on an existing trainer."""
if model is None or trainer is None:
raise ValueError("model and trainer must not be None")
mark_allow_overlength(model)
if hasattr(trainer, "args") and hasattr(trainer.args, "remove_unused_columns"):
trainer.args.remove_unused_columns = False
collator = getattr(trainer, "data_collator", None)
if collator is None or not hasattr(collator, "torch_call"):
return
if getattr(collator, "_unsloth_packing_wrapped", False):
return
if hasattr(collator, "padding_free"):
collator.padding_free = True
if hasattr(collator, "return_position_ids"):
collator.return_position_ids = True
original_torch_call = collator.torch_call
def torch_call_with_lengths(examples: Sequence[dict]):
batch = original_torch_call(examples)
if examples and isinstance(examples[0], dict):
seq_lengths: list[int] = []
for example in examples:
lengths = example.get(sequence_lengths_key)
if isinstance(lengths, Iterable):
seq_lengths.extend(int(length) for length in lengths)
if seq_lengths:
batch["packed_seq_lengths"] = torch.tensor(
seq_lengths, dtype = torch.int32
)
if "attention_mask" in batch:
batch.pop("attention_mask")
return batch
collator.torch_call = torch_call_with_lengths
collator._unsloth_packing_wrapped = True
def enable_padding_free_metadata(model, trainer):
"""Inject seq-length metadata when padding-free batching is enabled without packing."""
collator = getattr(trainer, "data_collator", None)
if (
collator is None
or getattr(collator, "_unsloth_padding_free_lengths_wrapped", False)
or not getattr(collator, "padding_free", False)
):
return
mark_allow_overlength(model)
if hasattr(collator, "return_position_ids"):
collator.return_position_ids = True
original_torch_call = collator.torch_call
def torch_call_with_padding_free_metadata(examples: Sequence[dict]):
seq_lengths: list[int] = []
if examples and isinstance(examples[0], dict):
for example in examples:
lengths = example.get("seq_lengths")
if lengths is None:
ids = example.get("input_ids")
if ids is None:
continue
lengths = [len(ids)]
example["seq_lengths"] = lengths
seq_lengths.extend(lengths)
batch = original_torch_call(examples)
if seq_lengths:
batch["packed_seq_lengths"] = torch.tensor(
seq_lengths,
dtype = torch.int32,
)
return batch
collator.torch_call = torch_call_with_padding_free_metadata
collator._unsloth_padding_free_lengths_wrapped = True
def get_packed_info_from_kwargs(
kwargs: dict,
device: torch.device,
) -> Optional[Tuple[torch.Tensor, torch.Tensor, int]]:
"""Return packed sequence metadata expected by the attention kernels."""
seq_lengths = kwargs.get("packed_seq_lengths")
if seq_lengths is None:
return None
lengths = seq_lengths.to(device = device, dtype = torch.int32, non_blocking = True)
cu_seqlens = torch.empty(lengths.numel() + 1, dtype = torch.int32, device = device)
cu_seqlens[0] = 0
torch.cumsum(lengths, dim = 0, dtype = torch.int32, out = cu_seqlens[1:])
max_seqlen = int(lengths.max().item())
return lengths, cu_seqlens, max_seqlen
def build_xformers_block_causal_mask(
seq_info: Optional[Tuple[torch.Tensor, torch.Tensor, int]],
*,
sliding_window: Optional[int] = None,
base_mask: Optional[Any] = None,
):
if _XFormersBlockMask is None:
return None
if seq_info is not None:
seq_lengths, _, _ = seq_info
lengths_tensor = seq_lengths.to("cpu", torch.int32)
if lengths_tensor.numel() == 0:
return None
lengths = tuple(int(x) for x in lengths_tensor.tolist())
mask = _get_cached_block_mask(lengths, sliding_window)
else:
mask = base_mask
if (
sliding_window is not None
and sliding_window > 0
and mask is not None
and hasattr(mask, "make_local_attention")
):
mask = mask.make_local_attention(window_size = sliding_window)
return mask
def build_sdpa_packed_attention_mask(
seq_info: Tuple[torch.Tensor, torch.Tensor, int],
*,
dtype: torch.dtype,
device: torch.device,
sliding_window: Optional[int] = None,
) -> torch.Tensor:
seq_lengths, _, _ = seq_info
total_tokens = int(seq_lengths.sum().item())
mask = torch.full(
(total_tokens, total_tokens),
float("-inf"),
dtype = dtype,
device = device,
)
offset = 0
for length in seq_lengths.tolist():
length = int(length)
if length <= 0:
continue
block = torch.zeros((length, length), dtype = dtype, device = device)
upper = torch.triu(
torch.ones((length, length), device = device), diagonal = 1
).bool()
block = block.masked_fill(upper, float("-inf"))
if (
sliding_window is not None
and sliding_window > 0
and length > sliding_window
):
idx = torch.arange(length, device = device)
dist = idx.unsqueeze(1) - idx.unsqueeze(0)
window_mask = dist >= sliding_window
block = block.masked_fill(window_mask, float("-inf"))
mask[offset : offset + length, offset : offset + length] = block
offset += length
return mask.unsqueeze(0).unsqueeze(0)
def _normalize_packed_lengths(
seq_lengths: Any,
*,
device: torch.device,
) -> Optional[torch.Tensor]:
if seq_lengths is None:
return None
if isinstance(seq_lengths, torch.Tensor):
lengths = seq_lengths.to(device = device, dtype = torch.int64)
else:
lengths = torch.tensor(seq_lengths, device = device, dtype = torch.int64)
if lengths.ndim != 1:
lengths = lengths.reshape(-1)
if lengths.numel() == 0:
return None
return lengths
def mask_packed_sequence_boundaries(
shift_labels: torch.Tensor,
seq_lengths: Any,
*,
ignore_index: int = -100,
) -> bool:
"""Mark final token of every packed sample so CE ignores boundary predictions."""
lengths = _normalize_packed_lengths(seq_lengths, device = shift_labels.device)
if lengths is None:
return False
flat = shift_labels.reshape(-1)
total_tokens = flat.shape[0]
boundary_positions = torch.cumsum(lengths, dim = 0) - 1
valid = boundary_positions < total_tokens
if not torch.all(valid):
boundary_positions = boundary_positions[valid]
if boundary_positions.numel() == 0:
return False
flat[boundary_positions] = ignore_index
return True
__all__ = [
"configure_sample_packing",
"configure_padding_free",
"enable_sample_packing",
"enable_padding_free_metadata",
"mark_allow_overlength",
"get_packed_info_from_kwargs",
"build_xformers_block_causal_mask",
"build_sdpa_packed_attention_mask",
"mask_packed_sequence_boundaries",
]
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/utils/__init__.py | unsloth/utils/__init__.py | # Copyright 2023-present Daniel Han-Chen, Michael Han-Chen & the Unsloth team. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from .packing import (
configure_padding_free,
configure_sample_packing,
enable_padding_free_metadata,
enable_sample_packing,
mark_allow_overlength,
)
from .attention_dispatch import (
AttentionConfig,
AttentionContext,
FLASH_DENSE,
FLASH_VARLEN,
SDPA,
XFORMERS,
run_attention,
select_attention_backend,
)
__all__ = [
"configure_sample_packing",
"configure_padding_free",
"enable_sample_packing",
"enable_padding_free_metadata",
"mark_allow_overlength",
"AttentionConfig",
"AttentionContext",
"FLASH_VARLEN",
"FLASH_DENSE",
"XFORMERS",
"SDPA",
"run_attention",
"select_attention_backend",
]
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/utils/hf_hub.py | unsloth/utils/hf_hub.py | from huggingface_hub import HfApi, ModelInfo
_HFAPI: HfApi = None
POPULARITY_PROPERTIES = [
"downloads",
"downloadsAllTime",
"trendingScore",
"likes",
]
THOUSAND = 1000
MILLION = 1000000
BILLION = 1000000000
def formatted_int(value: int) -> str:
if value < THOUSAND:
return str(value)
elif value < MILLION:
return f"{float(value) / 1000:,.1f}K"
elif value < BILLION:
return f"{float(value) / 1000000:,.1f}M"
else:
return f"{float(value) / 1000000000:,.1f}B"
def get_model_info(
model_id: str, properties: list[str] = ["safetensors", "lastModified"]
) -> ModelInfo:
"""
Get the model info for a specific model.
properties: list[str] = See https://huggingface.co/docs/huggingface_hub/api-ref/hf_hub/hf_api/model_info
Default properties: ["safetensors", "lastModified"], only retrieves minimal information.
Set to None to retrieve the full model information.
"""
global _HFAPI
if _HFAPI is None:
_HFAPI = HfApi()
try:
model_info: ModelInfo = _HFAPI.model_info(model_id, expand = properties)
except Exception as e:
print(f"Error getting model info for {model_id}: {e}")
model_info = None
return model_info
def list_models(
properties: list[str] = None,
full: bool = False,
sort: str = "downloads",
author: str = "unsloth",
search: str = None,
limit: int = 10,
) -> list[ModelInfo]:
"""
Retrieve model information from the Hugging Face Hub.
properties: list[str] = See https://huggingface.co/docs/huggingface_hub/api-ref/hf_hub/hf_api/list_models
full: bool = Whether to retrieve the full model information, if True properties will be ignored.
sort: str = The sort order.
author: str = The author of the model.
search: str = The search query for filtering models.
"""
global _HFAPI
if _HFAPI is None:
_HFAPI = HfApi()
if full:
properties = None
models: list[ModelInfo] = _HFAPI.list_models(
author = author,
search = search,
sort = sort,
limit = limit,
expand = properties,
full = full,
)
return models
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
unslothai/unsloth | https://github.com/unslothai/unsloth/blob/85bfdaf7ab3c32f95f98f3e3927164797fcc6d46/unsloth/registry/_qwen.py | unsloth/registry/_qwen.py | from unsloth.registry.registry import ModelInfo, ModelMeta, QuantType, _register_models
_IS_QWEN_2_5_REGISTERED = False
_IS_QWEN_2_5_VL_REGISTERED = False
_IS_QWEN_QWQ_REGISTERED = False
class QwenModelInfo(ModelInfo):
@classmethod
def construct_model_name(cls, base_name, version, size, quant_type, instruct_tag):
key = f"{base_name}{version}-{size}B"
return super().construct_model_name(
base_name, version, size, quant_type, instruct_tag, key
)
class QwenVLModelInfo(ModelInfo):
@classmethod
def construct_model_name(cls, base_name, version, size, quant_type, instruct_tag):
key = f"{base_name}{version}-VL-{size}B"
return super().construct_model_name(
base_name, version, size, quant_type, instruct_tag, key
)
class QwenQwQModelInfo(ModelInfo):
@classmethod
def construct_model_name(cls, base_name, version, size, quant_type, instruct_tag):
key = f"{base_name}-{size}B"
return super().construct_model_name(
base_name, version, size, quant_type, instruct_tag, key
)
class QwenQVQPreviewModelInfo(ModelInfo):
@classmethod
def construct_model_name(cls, base_name, version, size, quant_type, instruct_tag):
key = f"{base_name}-{size}B-Preview"
return super().construct_model_name(
base_name, version, size, quant_type, instruct_tag, key
)
# Qwen2.5 Model Meta
Qwen_2_5_Meta = ModelMeta(
org = "Qwen",
base_name = "Qwen",
instruct_tags = [None, "Instruct"],
model_version = "2.5",
model_sizes = ["3", "7"],
model_info_cls = QwenModelInfo,
is_multimodal = False,
quant_types = [QuantType.NONE, QuantType.BNB, QuantType.UNSLOTH],
)
# Qwen2.5 VL Model Meta
Qwen_2_5_VLMeta = ModelMeta(
org = "Qwen",
base_name = "Qwen",
instruct_tags = ["Instruct"], # No base, only instruction tuned
model_version = "2.5",
model_sizes = ["3", "7", "32", "72"],
model_info_cls = QwenVLModelInfo,
is_multimodal = True,
quant_types = [QuantType.NONE, QuantType.BNB, QuantType.UNSLOTH],
)
# Qwen QwQ Model Meta
QwenQwQMeta = ModelMeta(
org = "Qwen",
base_name = "QwQ",
instruct_tags = [None],
model_version = "",
model_sizes = ["32"],
model_info_cls = QwenQwQModelInfo,
is_multimodal = False,
quant_types = [QuantType.NONE, QuantType.BNB, QuantType.UNSLOTH, QuantType.GGUF],
)
# Qwen QVQ Preview Model Meta
QwenQVQPreviewMeta = ModelMeta(
org = "Qwen",
base_name = "QVQ",
instruct_tags = [None],
model_version = "",
model_sizes = ["72"],
model_info_cls = QwenQVQPreviewModelInfo,
is_multimodal = True,
quant_types = [QuantType.NONE, QuantType.BNB],
)
def register_qwen_2_5_models(include_original_model: bool = False):
global _IS_QWEN_2_5_REGISTERED
if _IS_QWEN_2_5_REGISTERED:
return
_register_models(Qwen_2_5_Meta, include_original_model = include_original_model)
_IS_QWEN_2_5_REGISTERED = True
def register_qwen_2_5_vl_models(include_original_model: bool = False):
global _IS_QWEN_2_5_VL_REGISTERED
if _IS_QWEN_2_5_VL_REGISTERED:
return
_register_models(Qwen_2_5_VLMeta, include_original_model = include_original_model)
_IS_QWEN_2_5_VL_REGISTERED = True
def register_qwen_qwq_models(include_original_model: bool = False):
global _IS_QWEN_QWQ_REGISTERED
if _IS_QWEN_QWQ_REGISTERED:
return
_register_models(QwenQwQMeta, include_original_model = include_original_model)
_register_models(QwenQVQPreviewMeta, include_original_model = include_original_model)
_IS_QWEN_QWQ_REGISTERED = True
def register_qwen_models(include_original_model: bool = False):
register_qwen_2_5_models(include_original_model = include_original_model)
register_qwen_2_5_vl_models(include_original_model = include_original_model)
register_qwen_qwq_models(include_original_model = include_original_model)
if __name__ == "__main__":
from unsloth.registry.registry import MODEL_REGISTRY, _check_model_info
MODEL_REGISTRY.clear()
register_qwen_models(include_original_model = True)
for model_id, model_info in MODEL_REGISTRY.items():
model_info = _check_model_info(model_id)
if model_info is None:
print(f"\u2718 {model_id}")
else:
print(f"\u2713 {model_id}")
| python | Apache-2.0 | 85bfdaf7ab3c32f95f98f3e3927164797fcc6d46 | 2026-01-04T14:39:29.397284Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.