text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
import inspect from dataclasses import asdict, dataclass from logging import getLogger from typing import TYPE_CHECKING, Any, Callable, Type from smolagents.models import ChatMessage, MessageRole, get_dict_from_nested_dataclasses from smolagents.monitoring import AgentLogger, LogLevel, Timing, TokenUsage from smolagents.utils import AgentError, make_json_serializable if TYPE_CHECKING: import PIL.Image from smolagents.models import ChatMessage from smolagents.monitoring import AgentLogger __all__ = ["AgentMemory"] logger = getLogger(__name__) @dataclass class ToolCall: name: str arguments: Any id: str def dict(self): return { "id": self.id, "type": "function", "function": { "name": self.name, "arguments": make_json_serializable(self.arguments), }, } @dataclass class MemoryStep: def dict(self): return asdict(self) def to_messages(self, summary_mode: bool = False) -> list[ChatMessage]: raise NotImplementedError @dataclass class ActionStep(MemoryStep): step_number: int timing: Timing model_input_messages: list[ChatMessage] | None = None tool_calls: list[ToolCall] | None = None error: AgentError | None = None model_output_message: ChatMessage | None = None model_output: str | list[dict[str, Any]] | None = None code_action: str | None = None observations: str | None = None observations_images: list["PIL.Image.Image"] | None = None action_output: Any = None token_usage: TokenUsage | None = None is_final_answer: bool = False def dict(self): # We overwrite the method to parse the tool_calls and action_output manually return { "step_number": self.step_number, "timing": self.timing.dict(), "model_input_messages": [ make_json_serializable(get_dict_from_nested_dataclasses(msg)) for msg in self.model_input_messages ] if self.model_input_messages else None, "tool_calls": [tc.dict() for tc in self.tool_calls] if self.tool_calls else [], "error": self.error.dict() if self.error else None, "model_output_message": make_json_serializable(get_dict_from_nested_dataclasses(self.model_output_message)) if self.model_output_message else None, "model_output": self.model_output, "code_action": self.code_action, "observations": self.observations, "observations_images": [image.tobytes() for image in self.observations_images] if self.observations_images else None, "action_output": make_json_serializable(self.action_output), "token_usage": asdict(self.token_usage) if self.token_usage else None, "is_final_answer": self.is_final_answer, } def to_messages(self, summary_mode: bool = False) -> list[ChatMessage]: messages = [] if self.model_output is not None and not summary_mode: messages.append( ChatMessage(role=MessageRole.ASSISTANT, content=[{"type": "text", "text": self.model_output.strip()}]) ) if self.tool_calls is not None: messages.append( ChatMessage( role=MessageRole.TOOL_CALL, content=[ { "type": "text", "text": "Calling tools:\n" + str([tc.dict() for tc in self.tool_calls]), } ], ) ) if self.observations_images: messages.append( ChatMessage( role=MessageRole.USER, content=[ { "type": "image", "image": image, } for image in self.observations_images ], ) ) if self.observations is not None: messages.append( ChatMessage( role=MessageRole.TOOL_RESPONSE, content=[ { "type": "text", "text": f"Observation:\n{self.observations}", } ], ) ) if self.error is not None: error_message = ( "Error:\n" + str(self.error) + "\nNow let's retry: take care not to repeat previous errors! If you have retried several times, try a completely different approach.\n" ) message_content = f"Call id: {self.tool_calls[0].id}\n" if self.tool_calls else "" message_content += error_message messages.append( ChatMessage(role=MessageRole.TOOL_RESPONSE, content=[{"type": "text", "text": message_content}]) ) return messages @dataclass class PlanningStep(MemoryStep): model_input_messages: list[ChatMessage] model_output_message: ChatMessage plan: str timing: Timing token_usage: TokenUsage | None = None def dict(self): return { "model_input_messages": [ make_json_serializable(get_dict_from_nested_dataclasses(msg)) for msg in self.model_input_messages ], "model_output_message": make_json_serializable( get_dict_from_nested_dataclasses(self.model_output_message) ), "plan": self.plan, "timing": self.timing.dict(), "token_usage": asdict(self.token_usage) if self.token_usage else None, } def to_messages(self, summary_mode: bool = False) -> list[ChatMessage]: if summary_mode: return [] return [ ChatMessage(role=MessageRole.ASSISTANT, content=[{"type": "text", "text": self.plan.strip()}]), ChatMessage( role=MessageRole.USER, content=[{"type": "text", "text": "Now proceed and carry out this plan."}] ), # This second message creates a role change to prevent models models from simply continuing the plan message ] @dataclass class TaskStep(MemoryStep): task: str task_images: list["PIL.Image.Image"] | None = None def to_messages(self, summary_mode: bool = False) -> list[ChatMessage]: content = [{"type": "text", "text": f"New task:\n{self.task}"}] if self.task_images: content.extend([{"type": "image", "image": image} for image in self.task_images]) return [ChatMessage(role=MessageRole.USER, content=content)] @dataclass class SystemPromptStep(MemoryStep): system_prompt: str def to_messages(self, summary_mode: bool = False) -> list[ChatMessage]: if summary_mode: return [] return [ChatMessage(role=MessageRole.SYSTEM, content=[{"type": "text", "text": self.system_prompt}])] @dataclass class FinalAnswerStep(MemoryStep): output: Any class AgentMemory: """Memory for the agent, containing the system prompt and all steps taken by the agent. This class is used to store the agent's steps, including tasks, actions, and planning steps. It allows for resetting the memory, retrieving succinct or full step information, and replaying the agent's steps. Args: system_prompt (`str`): System prompt for the agent, which sets the context and instructions for the agent's behavior. **Attributes**: - **system_prompt** (`SystemPromptStep`) -- System prompt step for the agent. - **steps** (`list[TaskStep | ActionStep | PlanningStep]`) -- List of steps taken by the agent, which can include tasks, actions, and planning steps. """ def __init__(self, system_prompt: str): self.system_prompt: SystemPromptStep = SystemPromptStep(system_prompt=system_prompt) self.steps: list[TaskStep | ActionStep | PlanningStep] = [] def reset(self): """Reset the agent's memory, clearing all steps and keeping the system prompt.""" self.steps = [] def get_succinct_steps(self) -> list[dict]: """Return a succinct representation of the agent's steps, excluding model input messages.""" return [ {key: value for key, value in step.dict().items() if key != "model_input_messages"} for step in self.steps ] def get_full_steps(self) -> list[dict]: """Return a full representation of the agent's steps, including model input messages.""" if len(self.steps) == 0: return [] return [step.dict() for step in self.steps] def replay(self, logger: AgentLogger, detailed: bool = False): """Prints a pretty replay of the agent's steps. Args: logger (`AgentLogger`): The logger to print replay logs to. detailed (`bool`, default `False`): If True, also displays the memory at each step. Defaults to False. Careful: will increase log length exponentially. Use only for debugging. """ logger.console.log("Replaying the agent's steps:") logger.log_markdown(title="System prompt", content=self.system_prompt.system_prompt, level=LogLevel.ERROR) for step in self.steps: if isinstance(step, TaskStep): logger.log_task(step.task, "", level=LogLevel.ERROR) elif isinstance(step, ActionStep): logger.log_rule(f"Step {step.step_number}", level=LogLevel.ERROR) if detailed and step.model_input_messages is not None: logger.log_messages(step.model_input_messages, level=LogLevel.ERROR) if step.model_output is not None: logger.log_markdown(title="Agent output:", content=step.model_output, level=LogLevel.ERROR) elif isinstance(step, PlanningStep): logger.log_rule("Planning step", level=LogLevel.ERROR) if detailed and step.model_input_messages is not None: logger.log_messages(step.model_input_messages, level=LogLevel.ERROR) logger.log_markdown(title="Agent output:", content=step.plan, level=LogLevel.ERROR) def return_full_code(self) -> str: """Returns all code actions from the agent's steps, concatenated as a single script.""" return "\n\n".join( [step.code_action for step in self.steps if isinstance(step, ActionStep) and step.code_action is not None] ) class CallbackRegistry: """Registry for callbacks that are called at each step of the agent's execution. Callbacks are registered by passing a step class and a callback function. """ def __init__(self): self._callbacks: dict[Type[MemoryStep], list[Callable]] = {} def register(self, step_cls: Type[MemoryStep], callback: Callable): """Register a callback for a step class. Args: step_cls (Type[MemoryStep]): Step class to register the callback for. callback (Callable): Callback function to register. """ if step_cls not in self._callbacks: self._callbacks[step_cls] = [] self._callbacks[step_cls].append(callback) def callback(self, memory_step, **kwargs): """Call callbacks registered for a step type. Args: memory_step (MemoryStep): Step to call the callbacks for. **kwargs: Additional arguments to pass to callbacks that accept them. Typically, includes the agent instance. Notes: For backwards compatibility, callbacks with a single parameter signature receive only the memory_step, while callbacks with multiple parameters receive both the memory_step and any additional kwargs. """ # For compatibility with old callbacks that only take the step as an argument for cls in memory_step.__class__.__mro__: for cb in self._callbacks.get(cls, []): cb(memory_step) if len(inspect.signature(cb).parameters) == 1 else cb(memory_step, **kwargs)
smolagents/src/smolagents/memory.py/0
{ "file_path": "smolagents/src/smolagents/memory.py", "repo_id": "smolagents", "token_count": 5358 }
287
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import json import os import re import tempfile import uuid from collections.abc import Generator from contextlib import nullcontext as does_not_raise from dataclasses import dataclass from pathlib import Path from textwrap import dedent from typing import Optional from unittest.mock import MagicMock, patch import pytest from huggingface_hub import ( ChatCompletionOutputFunctionDefinition, ChatCompletionOutputMessage, ChatCompletionOutputToolCall, ) from rich.console import Console from smolagents import EMPTY_PROMPT_TEMPLATES from smolagents.agent_types import AgentImage, AgentText from smolagents.agents import ( AgentError, AgentMaxStepsError, AgentToolCallError, CodeAgent, MultiStepAgent, RunResult, ToolCall, ToolCallingAgent, ToolOutput, populate_template, ) from smolagents.default_tools import DuckDuckGoSearchTool, FinalAnswerTool, PythonInterpreterTool, VisitWebpageTool from smolagents.memory import ( ActionStep, CallbackRegistry, FinalAnswerStep, MemoryStep, PlanningStep, SystemPromptStep, TaskStep, ) from smolagents.models import ( ChatMessage, ChatMessageToolCall, ChatMessageToolCallFunction, InferenceClientModel, MessageRole, Model, TransformersModel, ) from smolagents.monitoring import AgentLogger, LogLevel, Timing, TokenUsage from smolagents.tools import Tool, tool from smolagents.utils import ( BASE_BUILTIN_MODULES, AgentExecutionError, AgentGenerationError, AgentToolExecutionError, ) @dataclass class ChoiceDeltaToolCallFunction: arguments: Optional[str] = None name: Optional[str] = None @dataclass class ChoiceDeltaToolCall: index: Optional[int] = None id: Optional[str] = None function: Optional[ChoiceDeltaToolCallFunction] = None type: Optional[str] = None @dataclass class ChoiceDelta: content: Optional[str] = None function_call: Optional[str] = None refusal: Optional[str] = None role: Optional[str] = None tool_calls: Optional[list] = None def get_new_path(suffix="") -> str: directory = tempfile.mkdtemp() return os.path.join(directory, str(uuid.uuid4()) + suffix) @pytest.fixture def agent_logger(): return AgentLogger( LogLevel.DEBUG, console=Console(record=True, no_color=True, force_terminal=False, file=io.StringIO()) ) class FakeToolCallModel(Model): def generate(self, messages, tools_to_call_from=None, stop_sequences=None): if len(messages) < 3: return ChatMessage( role=MessageRole.ASSISTANT, content="I will call the python interpreter.", tool_calls=[ ChatMessageToolCall( id="call_0", type="function", function=ChatMessageToolCallFunction( name="python_interpreter", arguments={"code": "2*3.6452"} ), ) ], ) else: return ChatMessage( role=MessageRole.ASSISTANT, content="I will return the final answer.", tool_calls=[ ChatMessageToolCall( id="call_1", type="function", function=ChatMessageToolCallFunction(name="final_answer", arguments={"answer": "7.2904"}), ) ], ) class FakeToolCallModelImage(Model): def generate(self, messages, tools_to_call_from=None, stop_sequences=None): if len(messages) < 3: return ChatMessage( role=MessageRole.ASSISTANT, content="", tool_calls=[ ChatMessageToolCall( id="call_0", type="function", function=ChatMessageToolCallFunction( name="fake_image_generation_tool", arguments={"prompt": "An image of a cat"}, ), ) ], ) else: return ChatMessage( role=MessageRole.ASSISTANT, content="", tool_calls=[ ChatMessageToolCall( id="call_1", type="function", function=ChatMessageToolCallFunction(name="final_answer", arguments="image.png"), ) ], ) class FakeToolCallModelVL(Model): def generate(self, messages, tools_to_call_from=None, stop_sequences=None): if len(messages) < 3: return ChatMessage( role=MessageRole.ASSISTANT, content="", tool_calls=[ ChatMessageToolCall( id="call_0", type="function", function=ChatMessageToolCallFunction( name="fake_image_understanding_tool", arguments={ "prompt": "What is in this image?", "image": "image.png", }, ), ) ], ) else: return ChatMessage( role=MessageRole.ASSISTANT, content="", tool_calls=[ ChatMessageToolCall( id="call_1", type="function", function=ChatMessageToolCallFunction(name="final_answer", arguments="The image is a cat."), ) ], ) class FakeCodeModel(Model): def generate(self, messages, stop_sequences=None): prompt = str(messages) if "special_marker" not in prompt: return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: I should multiply 2 by 3.6452. special_marker <code> result = 2**3.6452 </code> """, ) else: # We're at step 2 return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: I can now answer the initial question <code> final_answer(7.2904) </code> """, ) class FakeCodeModelImageGeneration(Model): def generate(self, messages, stop_sequences=None): prompt = str(messages) if "special_marker" not in prompt: return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: I should generate an image. special_marker <code> image = image_generation_tool() </code> """, ) else: # We're at step 2 return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: I can now answer the initial question <code> final_answer(image) </code> """, ) class FakeCodeModelPlanning(Model): def generate(self, messages, stop_sequences=None): prompt = str(messages) if "planning_marker" not in prompt: return ChatMessage( role=MessageRole.ASSISTANT, content="llm plan update planning_marker", token_usage=TokenUsage(input_tokens=10, output_tokens=10), ) elif "action_marker" not in prompt: return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: I should multiply 2 by 3.6452. action_marker <code> result = 2**3.6452 </code> """, token_usage=TokenUsage(input_tokens=10, output_tokens=10), ) else: return ChatMessage( role=MessageRole.ASSISTANT, content="llm plan again", token_usage=TokenUsage(input_tokens=10, output_tokens=10), ) class FakeCodeModelError(Model): def generate(self, messages, stop_sequences=None): prompt = str(messages) if "special_marker" not in prompt: return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: I should multiply 2 by 3.6452. special_marker <code> print("Flag!") def error_function(): raise ValueError("error") error_function() </code> """, ) else: # We're at step 2 return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: I faced an error in the previous step. <code> final_answer("got an error") </code> """, ) class FakeCodeModelSyntaxError(Model): def generate(self, messages, stop_sequences=None): prompt = str(messages) if "special_marker" not in prompt: return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: I should multiply 2 by 3.6452. special_marker <code> a = 2 b = a * 2 print("Failing due to unexpected indent") print("Ok, calculation done!") </code> """, ) else: # We're at step 2 return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: I can now answer the initial question <code> final_answer("got an error") </code> """, ) class FakeCodeModelImport(Model): def generate(self, messages, stop_sequences=None): return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: I can answer the question <code> import numpy as np final_answer("got an error") </code> """, ) class FakeCodeModelFunctionDef(Model): def generate(self, messages, stop_sequences=None): prompt = str(messages) if "special_marker" not in prompt: return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: Let's define the function. special_marker <code> import numpy as np def moving_average(x, w): return np.convolve(x, np.ones(w), 'valid') / w </code> """, ) else: # We're at step 2 return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: I can now answer the initial question <code> x, w = [0, 1, 2, 3, 4, 5], 2 res = moving_average(x, w) final_answer(res) </code> """, ) class FakeCodeModelSingleStep(Model): def generate(self, messages, stop_sequences=None): return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: I should multiply 2 by 3.6452. special_marker <code> result = python_interpreter(code="2*3.6452") final_answer(result) ``` """, ) class FakeCodeModelNoReturn(Model): def generate(self, messages, stop_sequences=None): return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: I should multiply 2 by 3.6452. special_marker <code> result = python_interpreter(code="2*3.6452") print(result) ``` """, ) class TestAgent: def test_fake_toolcalling_agent(self): agent = ToolCallingAgent(tools=[PythonInterpreterTool()], model=FakeToolCallModel()) output = agent.run("What is 2 multiplied by 3.6452?") assert isinstance(output, str) assert "7.2904" in output assert agent.memory.steps[0].task == "What is 2 multiplied by 3.6452?" assert "7.2904" in agent.memory.steps[1].observations assert agent.memory.steps[2].model_output == "I will return the final answer." def test_toolcalling_agent_handles_image_tool_outputs(self, shared_datadir): import PIL.Image @tool def fake_image_generation_tool(prompt: str) -> PIL.Image.Image: """Tool that generates an image. Args: prompt: The prompt """ import PIL.Image return PIL.Image.open(shared_datadir / "000000039769.png") agent = ToolCallingAgent(tools=[fake_image_generation_tool], model=FakeToolCallModelImage()) output = agent.run("Make me an image.") assert isinstance(output, AgentImage) assert isinstance(agent.state["image.png"], PIL.Image.Image) def test_toolcalling_agent_handles_image_inputs(self, shared_datadir): import PIL.Image image = PIL.Image.open(shared_datadir / "000000039769.png") # dummy input @tool def fake_image_understanding_tool(prompt: str, image: PIL.Image.Image) -> str: """Tool that creates a caption for an image. Args: prompt: The prompt image: The image """ return "The image is a cat." agent = ToolCallingAgent(tools=[fake_image_understanding_tool], model=FakeToolCallModelVL()) output = agent.run("Caption this image.", images=[image]) assert output == "The image is a cat." def test_fake_code_agent(self): agent = CodeAgent(tools=[PythonInterpreterTool()], model=FakeCodeModel(), verbosity_level=10) output = agent.run("What is 2 multiplied by 3.6452?") assert isinstance(output, float) assert output == 7.2904 assert agent.memory.steps[0].task == "What is 2 multiplied by 3.6452?" assert agent.memory.steps[2].tool_calls == [ ToolCall(name="python_interpreter", arguments="final_answer(7.2904)", id="call_2") ] def test_additional_args_added_to_task(self): agent = CodeAgent(tools=[], model=FakeCodeModel()) agent.run( "What is 2 multiplied by 3.6452?", additional_args={"instruction": "Remember this."}, ) assert "Remember this" in agent.task def test_reset_conversations(self): agent = CodeAgent(tools=[PythonInterpreterTool()], model=FakeCodeModel()) output = agent.run("What is 2 multiplied by 3.6452?", reset=True) assert output == 7.2904 assert len(agent.memory.steps) == 3 output = agent.run("What is 2 multiplied by 3.6452?", reset=False) assert output == 7.2904 assert len(agent.memory.steps) == 5 output = agent.run("What is 2 multiplied by 3.6452?", reset=True) assert output == 7.2904 assert len(agent.memory.steps) == 3 def test_setup_agent_with_empty_toolbox(self): ToolCallingAgent(model=FakeToolCallModel(), tools=[]) def test_fails_max_steps(self): agent = CodeAgent( tools=[PythonInterpreterTool()], model=FakeCodeModelNoReturn(), # use this callable because it never ends max_steps=5, ) answer = agent.run("What is 2 multiplied by 3.6452?") assert len(agent.memory.steps) == 7 # Task step + 5 action steps + Final answer assert type(agent.memory.steps[-1].error) is AgentMaxStepsError assert isinstance(answer, str) agent = CodeAgent( tools=[PythonInterpreterTool()], model=FakeCodeModelNoReturn(), # use this callable because it never ends max_steps=5, ) answer = agent.run("What is 2 multiplied by 3.6452?", max_steps=3) assert len(agent.memory.steps) == 5 # Task step + 3 action steps + Final answer assert type(agent.memory.steps[-1].error) is AgentMaxStepsError assert isinstance(answer, str) def test_tool_descriptions_get_baked_in_system_prompt(self): tool = PythonInterpreterTool() tool.name = "fake_tool_name" tool.description = "fake_tool_description" agent = CodeAgent(tools=[tool], model=FakeCodeModel()) agent.run("Empty task") assert agent.system_prompt is not None assert f"def {tool.name}(" in agent.system_prompt assert f'"""{tool.description}' in agent.system_prompt def test_module_imports_get_baked_in_system_prompt(self): agent = CodeAgent(tools=[], model=FakeCodeModel()) agent.run("Empty task") for module in BASE_BUILTIN_MODULES: assert module in agent.system_prompt def test_init_agent_with_different_toolsets(self): toolset_1 = [] agent = CodeAgent(tools=toolset_1, model=FakeCodeModel()) assert len(agent.tools) == 1 # when no tools are provided, only the final_answer tool is added by default toolset_2 = [PythonInterpreterTool(), PythonInterpreterTool()] with pytest.raises(ValueError) as e: agent = CodeAgent(tools=toolset_2, model=FakeCodeModel()) assert "Each tool or managed_agent should have a unique name!" in str(e) with pytest.raises(ValueError) as e: agent.name = "python_interpreter" agent.description = "empty" CodeAgent(tools=[PythonInterpreterTool()], model=FakeCodeModel(), managed_agents=[agent]) assert "Each tool or managed_agent should have a unique name!" in str(e) # check that python_interpreter base tool does not get added to CodeAgent agent = CodeAgent(tools=[], model=FakeCodeModel(), add_base_tools=True) assert len(agent.tools) == 3 # added final_answer tool + search + visit_webpage # check that python_interpreter base tool gets added to ToolCallingAgent agent = ToolCallingAgent(tools=[], model=FakeCodeModel(), add_base_tools=True) assert len(agent.tools) == 4 # added final_answer tool + search + visit_webpage def test_function_persistence_across_steps(self): agent = CodeAgent( tools=[], model=FakeCodeModelFunctionDef(), max_steps=2, additional_authorized_imports=["numpy"], ) res = agent.run("ok") assert res[0] == 0.5 def test_init_managed_agent(self): agent = CodeAgent(tools=[], model=FakeCodeModelFunctionDef(), name="managed_agent", description="Empty") assert agent.name == "managed_agent" assert agent.description == "Empty" def test_agent_description_gets_correctly_inserted_in_system_prompt(self): managed_agent = CodeAgent( tools=[], model=FakeCodeModelFunctionDef(), name="managed_agent", description="Empty" ) manager_agent = CodeAgent( tools=[], model=FakeCodeModelFunctionDef(), managed_agents=[managed_agent], ) assert "You can also give tasks to team members." not in managed_agent.system_prompt assert "{{managed_agents_descriptions}}" not in managed_agent.system_prompt assert "You can also give tasks to team members." in manager_agent.system_prompt def test_replay_shows_logs(self, agent_logger): agent = CodeAgent( tools=[], model=FakeCodeModelImport(), verbosity_level=0, additional_authorized_imports=["numpy"], logger=agent_logger, ) agent.run("Count to 3") str_output = agent_logger.console.export_text() assert "New run" in str_output assert 'final_answer("got' in str_output assert "</code>" in str_output agent = ToolCallingAgent(tools=[PythonInterpreterTool()], model=FakeToolCallModel(), verbosity_level=0) agent.logger = agent_logger agent.run("What is 2 multiplied by 3.6452?") agent.replay() str_output = agent_logger.console.export_text() assert "arguments" in str_output def test_code_nontrivial_final_answer_works(self): class FakeCodeModelFinalAnswer(Model): def generate(self, messages, stop_sequences=None): return ChatMessage( role=MessageRole.ASSISTANT, content="""<code> def nested_answer(): final_answer("Correct!") nested_answer() </code>""", ) agent = CodeAgent(tools=[], model=FakeCodeModelFinalAnswer()) output = agent.run("Count to 3") assert output == "Correct!" def test_transformers_toolcalling_agent(self): @tool def weather_api(location: str, celsius: str = "") -> str: """ Gets the weather in the next days at given location. Secretly this tool does not care about the location, it hates the weather everywhere. Args: location: the location celsius: the temperature type """ return "The weather is UNGODLY with torrential rains and temperatures below -10°C" model = TransformersModel( model_id="HuggingFaceTB/SmolLM2-360M-Instruct", max_new_tokens=100, device_map="auto", do_sample=False, ) agent = ToolCallingAgent(model=model, tools=[weather_api], max_steps=1) task = "What is the weather in Paris? " agent.run(task) assert agent.memory.steps[0].task == task assert agent.memory.steps[1].tool_calls[0].name == "weather_api" step_memory_dict = agent.memory.get_succinct_steps()[1] assert step_memory_dict["model_output_message"]["tool_calls"][0]["function"]["name"] == "weather_api" assert step_memory_dict["model_output_message"]["raw"]["completion_kwargs"]["max_new_tokens"] == 100 assert "model_input_messages" in agent.memory.get_full_steps()[1] assert step_memory_dict["token_usage"]["total_tokens"] > 100 assert step_memory_dict["timing"]["duration"] > 0.1 def test_final_answer_checks(self): error_string = "failed with error" def check_always_fails(final_answer, agent_memory): assert False, "Error raised in check" agent = CodeAgent(model=FakeCodeModel(), tools=[], final_answer_checks=[check_always_fails]) agent.run("Dummy task.") assert error_string in str(agent.write_memory_to_messages()) assert "Error raised in check" in str(agent.write_memory_to_messages()) agent = CodeAgent( model=FakeCodeModel(), tools=[], final_answer_checks=[lambda x, y: x == 7.2904], ) output = agent.run("Dummy task.") assert output == 7.2904 # Check that output is correct assert len([step for step in agent.memory.steps if isinstance(step, ActionStep)]) == 2 assert error_string not in str(agent.write_memory_to_messages()) def test_generation_errors_are_raised(self): class FakeCodeModel(Model): def generate(self, messages, stop_sequences=None): assert False, "Generation failed" agent = CodeAgent(model=FakeCodeModel(), tools=[]) with pytest.raises(AgentGenerationError) as e: agent.run("Dummy task.") assert len(agent.memory.steps) == 2 assert "Generation failed" in str(e) def test_planning_step_with_injected_memory(self): """Test that agent properly uses update plan prompts when memory is injected before a run. This test verifies: 1. Planning steps are created with the correct frequency 2. Injected memory is included in planning context 3. Messages are properly formatted with expected roles and content """ planning_interval = 1 max_steps = 4 task = "Continuous task" previous_task = "Previous user request" # Create agent with planning capability agent = CodeAgent( tools=[], planning_interval=planning_interval, model=FakeCodeModelPlanning(), max_steps=max_steps, ) # Inject memory before run to simulate existing conversation history previous_step = TaskStep(task=previous_task) agent.memory.steps.append(previous_step) # Run the agent agent.run(task, reset=False) # Extract and validate planning steps planning_steps = [step for step in agent.memory.steps if isinstance(step, PlanningStep)] assert len(planning_steps) > 2, "Expected multiple planning steps to be generated" # Verify first planning step incorporates injected memory first_planning_step = planning_steps[0] input_messages = first_planning_step.model_input_messages # Check message structure and content assert len(input_messages) == 4, ( "First planning step should have 4 messages: system-plan-pre-update + memory + task + user-plan-post-update" ) # Verify system message contains current task system_message = input_messages[0] assert system_message.role == "system", "First message should have system role" assert task in system_message.content[0]["text"], f"System message should contain the current task: '{task}'" # Verify memory message contains previous task memory_message = input_messages[1] assert previous_task in memory_message.content[0]["text"], ( f"Memory message should contain previous task: '{previous_task}'" ) # Verify task message contains current task task_message = input_messages[2] assert task in task_message.content[0]["text"], f"Task message should contain current task: '{task}'" # Verify user message for planning user_message = input_messages[3] assert user_message.role == "user", "Fourth message should have user role" # Verify second planning step has more context from first agent actions second_planning_step = planning_steps[1] second_messages = second_planning_step.model_input_messages # Check that conversation history is growing appropriately assert len(second_messages) == 6, "Second planning step should have 6 messages including tool interactions" # Verify all conversation elements are present conversation_text = "".join([msg.content[0]["text"] for msg in second_messages if hasattr(msg, "content")]) assert previous_task in conversation_text, "Previous task should be included in the conversation history" assert task in conversation_text, "Current task should be included in the conversation history" assert "tools" in conversation_text, "Tool interactions should be included in the conversation history" class CustomFinalAnswerTool(FinalAnswerTool): def forward(self, answer) -> str: return answer + "CUSTOM" class MockTool(Tool): def __init__(self, name): self.name = name self.description = "Mock tool description" self.inputs = {} self.output_type = "string" def forward(self): return "Mock tool output" class MockAgent: def __init__(self, name, tools, description="Mock agent description"): self.name = name self.tools = {t.name: t for t in tools} self.description = description class DummyMultiStepAgent(MultiStepAgent): def step(self, memory_step: ActionStep) -> Generator[None]: yield None def initialize_system_prompt(self): pass class FakeLLMModel(Model): def __init__(self, give_token_usage: bool = True): self.give_token_usage = give_token_usage def generate(self, prompt, tools_to_call_from=None, **kwargs): if tools_to_call_from is not None: return ChatMessage( role=MessageRole.ASSISTANT, content="I will call the final_answer tool.", tool_calls=[ ChatMessageToolCall( id="fake_id", type="function", function=ChatMessageToolCallFunction( name="final_answer", arguments={"answer": "This is the final answer."} ), ) ], token_usage=TokenUsage(input_tokens=10, output_tokens=20) if self.give_token_usage else None, ) else: return ChatMessage( role=MessageRole.ASSISTANT, content="""<code> final_answer('This is the final answer.') </code>""", token_usage=TokenUsage(input_tokens=10, output_tokens=20) if self.give_token_usage else None, ) class TestRunResult: def test_backward_compatibility(self): """Test that RunResult handles deprecated 'messages' parameter correctly.""" # Test 1: Using new 'steps' parameter (should work without warning) result1 = RunResult( output="test output", state="success", steps=[{"type": "test", "content": "step1"}], token_usage=None, timing=Timing(start_time=0.0, end_time=1.0), ) assert result1.steps == [{"type": "test", "content": "step1"}] # Test property access warning with pytest.warns(FutureWarning, match="deprecated"): messages = result1.messages assert messages == [{"type": "test", "content": "step1"}] # Test 2: Using deprecated 'messages' parameter (should show deprecation warning) with pytest.warns(FutureWarning, match="deprecated"): result2 = RunResult( output="test output", state="success", messages=[{"type": "test", "content": "message1"}], token_usage=None, timing=Timing(start_time=0.0, end_time=1.0), ) assert result2.steps == [{"type": "test", "content": "message1"}] # Test 3: Using both 'steps' and 'messages' (should raise ValueError) with pytest.raises(ValueError, match="Cannot specify both"): RunResult( output="test output", state="success", steps=[{"type": "test", "content": "step1"}], messages=[{"type": "test", "content": "message1"}], token_usage=None, timing=Timing(start_time=0.0, end_time=1.0), ) @pytest.mark.parametrize("agent_class", [CodeAgent, ToolCallingAgent]) def test_no_token_usage(self, agent_class): agent = agent_class( tools=[], model=FakeLLMModel(give_token_usage=False), max_steps=1, return_full_result=True, ) result = agent.run("Fake task") assert isinstance(result, RunResult) assert result.output == "This is the final answer." assert result.state == "success" assert result.token_usage is None assert isinstance(result.messages, list) assert result.timing.duration > 0 @pytest.mark.parametrize( "init_return_full_result,run_return_full_result,expect_runresult", [ (True, None, True), (False, None, False), (True, False, False), (False, True, True), ], ) def test_full_result(self, init_return_full_result, run_return_full_result, expect_runresult): agent = ToolCallingAgent( tools=[], model=FakeLLMModel(), max_steps=1, return_full_result=init_return_full_result, ) result = agent.run("Fake task", return_full_result=run_return_full_result) if expect_runresult: assert isinstance(result, RunResult) assert result.output == "This is the final answer." assert result.state == "success" assert result.token_usage == TokenUsage(input_tokens=10, output_tokens=20) assert isinstance(result.messages, list) assert result.timing.duration > 0 else: assert isinstance(result, str) class TestMultiStepAgent: def test_instantiation_disables_logging_to_terminal(self): fake_model = MagicMock() agent = DummyMultiStepAgent(tools=[], model=fake_model) assert agent.logger.level == -1, "logging to terminal should be disabled for testing using a fixture" def test_instantiation_with_prompt_templates(self, prompt_templates): agent = DummyMultiStepAgent(tools=[], model=MagicMock(), prompt_templates=prompt_templates) assert agent.prompt_templates == prompt_templates assert agent.prompt_templates["system_prompt"] == "This is a test system prompt." assert "managed_agent" in agent.prompt_templates assert agent.prompt_templates["managed_agent"]["task"] == "Task for {{name}}: {{task}}" assert agent.prompt_templates["managed_agent"]["report"] == "Report for {{name}}: {{final_answer}}" @pytest.mark.parametrize( "tools, expected_final_answer_tool", [([], FinalAnswerTool), ([CustomFinalAnswerTool()], CustomFinalAnswerTool)], ) def test_instantiation_with_final_answer_tool(self, tools, expected_final_answer_tool): agent = DummyMultiStepAgent(tools=tools, model=MagicMock()) assert "final_answer" in agent.tools assert isinstance(agent.tools["final_answer"], expected_final_answer_tool) def test_system_prompt_property(self): """Test that system_prompt property is read-only and calls initialize_system_prompt.""" class SimpleAgent(MultiStepAgent): def initialize_system_prompt(self) -> str: return "Test system prompt" def step(self, memory_step: ActionStep) -> Generator[None]: yield None # Create a simple agent with mocked model model = MagicMock() agent = SimpleAgent(tools=[], model=model) # Test reading the property works and calls initialize_system_prompt assert agent.system_prompt == "Test system prompt" # Test setting the property raises AttributeError with correct message with pytest.raises( AttributeError, match=re.escape( """The 'system_prompt' property is read-only. Use 'self.prompt_templates["system_prompt"]' instead.""" ), ): agent.system_prompt = "New system prompt" # assert "read-only" in str(exc_info.value) # assert "Use 'self.prompt_templates[\"system_prompt\"]' instead" in str(exc_info.value) @pytest.mark.parametrize( "step_callbacks, expected_registry_state", [ # Case 0: None as input (initializes empty registry) ( None, { "MemoryStep": 0, "ActionStep": 1, "PlanningStep": 0, "TaskStep": 0, "SystemPromptStep": 0, "FinalAnswerStep": 0, }, # Only monitor.update_metrics is registered for ActionStep ), # Case 1: List of callbacks (registers only for ActionStep: backward compatibility) ( [MagicMock(), MagicMock()], { "MemoryStep": 0, "ActionStep": 3, "PlanningStep": 0, "TaskStep": 0, "SystemPromptStep": 0, "FinalAnswerStep": 0, }, ), # Case 2: Dict mapping specific step types to callbacks ( {ActionStep: MagicMock(), PlanningStep: MagicMock()}, { "MemoryStep": 0, "ActionStep": 2, "PlanningStep": 1, "TaskStep": 0, "SystemPromptStep": 0, "FinalAnswerStep": 0, }, ), # Case 3: Dict with list of callbacks for a step type ( {ActionStep: [MagicMock(), MagicMock()]}, { "MemoryStep": 0, "ActionStep": 3, "PlanningStep": 0, "TaskStep": 0, "SystemPromptStep": 0, "FinalAnswerStep": 0, }, ), # Case 4: Dict with mixed single and list callbacks ( {ActionStep: MagicMock(), MemoryStep: [MagicMock(), MagicMock()]}, { "MemoryStep": 2, "ActionStep": 2, "PlanningStep": 0, "TaskStep": 0, "SystemPromptStep": 0, "FinalAnswerStep": 0, }, ), ], ) def test_setup_step_callbacks(self, step_callbacks, expected_registry_state): """Test that _setup_step_callbacks correctly sets up the callback registry.""" # Create a dummy agent agent = DummyMultiStepAgent(tools=[], model=MagicMock()) # Mock the monitor agent.monitor = MagicMock() # Call the method agent._setup_step_callbacks(step_callbacks) # Check that step_callbacks is a CallbackRegistry assert isinstance(agent.step_callbacks, CallbackRegistry) # Count callbacks for each step type actual_registry_state = {} for step_type in [MemoryStep, ActionStep, PlanningStep, TaskStep, SystemPromptStep, FinalAnswerStep]: callbacks = agent.step_callbacks._callbacks.get(step_type, []) actual_registry_state[step_type.__name__] = len(callbacks) # Verify registry state matches expected assert actual_registry_state == expected_registry_state def test_finalize_step_callbacks_with_list(self): # Create mock callbacks callback1 = MagicMock() callback2 = MagicMock() # Create a test agent with a list of callbacks agent = DummyMultiStepAgent(tools=[], model=MagicMock(), step_callbacks=[callback1, callback2]) # Create steps of different types action_step = ActionStep(step_number=1, timing=Timing(start_time=0.0)) planning_step = PlanningStep( timing=Timing(start_time=1.0), model_input_messages=[], model_output_message=ChatMessage(role="assistant", content="Test plan"), plan="Test planning step", ) # Test with ActionStep agent._finalize_step(action_step) # Verify all callbacks were called callback1.assert_called_once_with(action_step, agent=agent) callback2.assert_called_once_with(action_step, agent=agent) # Reset mocks callback1.reset_mock() callback2.reset_mock() # Test with PlanningStep agent._finalize_step(planning_step) # Verify all callbacks were called again with the planning step callback1.assert_not_called() callback2.assert_not_called() def test_finalize_step_callbacks_by_type(self): # Create mock callbacks for different step types action_step_callback = MagicMock() action_step_callback_2 = MagicMock() planning_step_callback = MagicMock() step_callback = MagicMock() # Register callbacks for different step types step_callbacks = { ActionStep: [action_step_callback, action_step_callback_2], PlanningStep: planning_step_callback, MemoryStep: step_callback, } agent = DummyMultiStepAgent(tools=[], model=MagicMock(), step_callbacks=step_callbacks) # Create steps of different types action_step = ActionStep(step_number=1, timing=Timing(start_time=0.0)) planning_step = PlanningStep( timing=Timing(start_time=1.0), model_input_messages=[], model_output_message=ChatMessage(role="assistant", content="Test plan"), plan="Test planning step", ) # Test with ActionStep agent._finalize_step(action_step) # Verify correct callbacks were called action_step_callback.assert_called_once_with(action_step, agent=agent) action_step_callback_2.assert_called_once_with(action_step, agent=agent) step_callback.assert_called_once_with(action_step, agent=agent) planning_step_callback.assert_not_called() # Reset mocks action_step_callback.reset_mock() action_step_callback_2.reset_mock() planning_step_callback.reset_mock() step_callback.reset_mock() # Test with PlanningStep agent._finalize_step(planning_step) # Verify correct callbacks were called planning_step_callback.assert_called_once_with(planning_step, agent=agent) step_callback.assert_called_once_with(planning_step, agent=agent) action_step_callback.assert_not_called() action_step_callback_2.assert_not_called() def test_logs_display_thoughts_even_if_error(self): class FakeJsonModelNoCall(Model): def generate(self, messages, stop_sequences=None, tools_to_call_from=None): return ChatMessage( role=MessageRole.ASSISTANT, content="""I don't want to call tools today""", tool_calls=None, raw="""I don't want to call tools today""", ) agent_toolcalling = ToolCallingAgent(model=FakeJsonModelNoCall(), tools=[], max_steps=1, verbosity_level=10) with agent_toolcalling.logger.console.capture() as capture: agent_toolcalling.run("Dummy task") assert "don't" in capture.get() and "want" in capture.get() class FakeCodeModelNoCall(Model): def generate(self, messages, stop_sequences=None): return ChatMessage( role=MessageRole.ASSISTANT, content="""I don't want to write an action today""", ) agent_code = CodeAgent(model=FakeCodeModelNoCall(), tools=[], max_steps=1, verbosity_level=10) with agent_code.logger.console.capture() as capture: agent_code.run("Dummy task") assert "don't" in capture.get() and "want" in capture.get() def test_step_number(self): fake_model = MagicMock() fake_model.generate.return_value = ChatMessage( role=MessageRole.ASSISTANT, content="Model output.", tool_calls=None, raw="Model output.", token_usage=None, ) max_steps = 2 agent = CodeAgent(tools=[], model=fake_model, max_steps=max_steps) assert hasattr(agent, "step_number"), "step_number attribute should be defined" assert agent.step_number == 0, "step_number should be initialized to 0" agent.run("Test task") assert hasattr(agent, "step_number"), "step_number attribute should be defined" assert agent.step_number == max_steps + 1, "step_number should be max_steps + 1 after run method is called" @pytest.mark.parametrize( "step, expected_messages_list", [ ( 1, [ [ ChatMessage( role=MessageRole.USER, content=[{"type": "text", "text": "INITIAL_PLAN_USER_PROMPT"}] ), ], ], ), ( 2, [ [ ChatMessage( role=MessageRole.SYSTEM, content=[{"type": "text", "text": "UPDATE_PLAN_SYSTEM_PROMPT"}], ), ChatMessage( role=MessageRole.USER, content=[{"type": "text", "text": "UPDATE_PLAN_USER_PROMPT"}], ), ], ], ), ], ) def test_planning_step(self, step, expected_messages_list): fake_model = MagicMock() agent = CodeAgent( tools=[], model=fake_model, ) task = "Test task" planning_step = list(agent._generate_planning_step(task, is_first_step=(step == 1), step=step))[-1] expected_message_texts = { "INITIAL_PLAN_USER_PROMPT": populate_template( agent.prompt_templates["planning"]["initial_plan"], variables=dict( task=task, tools=agent.tools, managed_agents=agent.managed_agents, answer_facts=planning_step.model_output_message.content, ), ), "UPDATE_PLAN_SYSTEM_PROMPT": populate_template( agent.prompt_templates["planning"]["update_plan_pre_messages"], variables=dict(task=task) ), "UPDATE_PLAN_USER_PROMPT": populate_template( agent.prompt_templates["planning"]["update_plan_post_messages"], variables=dict( task=task, tools=agent.tools, managed_agents=agent.managed_agents, facts_update=planning_step.model_output_message.content, remaining_steps=agent.max_steps - step, ), ), } for expected_messages in expected_messages_list: for expected_message in expected_messages: expected_message.content[0]["text"] = expected_message_texts[expected_message.content[0]["text"]] assert isinstance(planning_step, PlanningStep) expected_model_input_messages = expected_messages_list[0] model_input_messages = planning_step.model_input_messages assert isinstance(model_input_messages, list) assert len(model_input_messages) == len(expected_model_input_messages) # 2 for message, expected_message in zip(model_input_messages, expected_model_input_messages): assert isinstance(message, ChatMessage) assert message.role in MessageRole.__members__.values() assert message.role == expected_message.role assert isinstance(message.content, list) for content, expected_content in zip(message.content, expected_message.content): assert content == expected_content # Test calls to model assert len(fake_model.generate.call_args_list) == 1 for call_args, expected_messages in zip(fake_model.generate.call_args_list, expected_messages_list): assert len(call_args.args) == 1 messages = call_args.args[0] assert isinstance(messages, list) assert len(messages) == len(expected_messages) for message, expected_message in zip(messages, expected_messages): assert isinstance(message, ChatMessage) assert message.role in MessageRole.__members__.values() assert message.role == expected_message.role assert isinstance(message.content, list) for content, expected_content in zip(message.content, expected_message.content): assert content == expected_content @pytest.mark.parametrize( "expected_messages_list", [ [ [ ChatMessage( role=MessageRole.SYSTEM, content=[{"type": "text", "text": "FINAL_ANSWER_SYSTEM_PROMPT"}], ), ChatMessage( role=MessageRole.USER, content=[{"type": "text", "text": "FINAL_ANSWER_USER_PROMPT"}], ), ] ], [ [ ChatMessage( role=MessageRole.SYSTEM, content=[ {"type": "text", "text": "FINAL_ANSWER_SYSTEM_PROMPT"}, {"type": "image", "image": "image1.png"}, ], ), ChatMessage( role=MessageRole.USER, content=[{"type": "text", "text": "FINAL_ANSWER_USER_PROMPT"}], ), ] ], ], ) def test_provide_final_answer(self, expected_messages_list): fake_model = MagicMock() fake_model.generate.return_value = ChatMessage( role=MessageRole.ASSISTANT, content="Final answer.", tool_calls=None, raw="Final answer.", token_usage=None, ) agent = CodeAgent( tools=[], model=fake_model, ) task = "Test task" final_answer = agent.provide_final_answer(task).content expected_message_texts = { "FINAL_ANSWER_SYSTEM_PROMPT": agent.prompt_templates["final_answer"]["pre_messages"], "FINAL_ANSWER_USER_PROMPT": populate_template( agent.prompt_templates["final_answer"]["post_messages"], variables=dict(task=task) ), } for expected_messages in expected_messages_list: for expected_message in expected_messages: for expected_content in expected_message.content: if "text" in expected_content: expected_content["text"] = expected_message_texts[expected_content["text"]] assert final_answer == "Final answer." # Test calls to model assert len(fake_model.generate.call_args_list) == 1 for call_args, expected_messages in zip(fake_model.generate.call_args_list, expected_messages_list): assert len(call_args.args) == 1 messages = call_args.args[0] assert isinstance(messages, list) assert len(messages) == len(expected_messages) for message, expected_message in zip(messages, expected_messages): assert isinstance(message, ChatMessage) assert message.role in MessageRole.__members__.values() assert message.role == expected_message.role assert isinstance(message.content, list) for content, expected_content in zip(message.content, expected_message.content): assert content == expected_content def test_interrupt(self): fake_model = MagicMock() fake_model.generate.return_value = ChatMessage( role=MessageRole.ASSISTANT, content="Model output.", tool_calls=None, raw="Model output.", token_usage=None, ) def interrupt_callback(memory_step, agent): agent.interrupt() agent = CodeAgent( tools=[], model=fake_model, step_callbacks=[interrupt_callback], ) with pytest.raises(AgentError) as e: agent.run("Test task") assert "Agent interrupted" in str(e) @pytest.mark.parametrize( "tools, managed_agents, name, expectation", [ # Valid case: no duplicates ( [MockTool("tool1"), MockTool("tool2")], [MockAgent("agent1", [MockTool("tool3")])], "test_agent", does_not_raise(), ), # Invalid case: duplicate tool names ([MockTool("tool1"), MockTool("tool1")], [], "test_agent", pytest.raises(ValueError)), # Invalid case: tool name same as managed agent name ( [MockTool("tool1")], [MockAgent("tool1", [MockTool("final_answer")])], "test_agent", pytest.raises(ValueError), ), # Valid case: tool name same as managed agent's tool name ([MockTool("tool1")], [MockAgent("agent1", [MockTool("tool1")])], "test_agent", does_not_raise()), # Invalid case: duplicate managed agent name and managed agent tool name ([MockTool("tool1")], [], "tool1", pytest.raises(ValueError)), # Valid case: duplicate tool names across managed agents ( [MockTool("tool1")], [ MockAgent("agent1", [MockTool("tool2"), MockTool("final_answer")]), MockAgent("agent2", [MockTool("tool2"), MockTool("final_answer")]), ], "test_agent", does_not_raise(), ), ], ) def test_validate_tools_and_managed_agents(self, tools, managed_agents, name, expectation): fake_model = MagicMock() with expectation: DummyMultiStepAgent( tools=tools, model=fake_model, name=name, managed_agents=managed_agents, ) def test_from_dict(self): # Create a test agent dictionary agent_dict = { "model": {"class": "TransformersModel", "data": {"model_id": "test/model"}}, "tools": [ { "name": "valid_tool_function", "code": 'from smolagents import Tool\nfrom typing import Any, Optional\n\nclass SimpleTool(Tool):\n name = "valid_tool_function"\n description = "A valid tool function."\n inputs = {"input":{"type":"string","description":"Input string."}}\n output_type = "string"\n\n def forward(self, input: str) -> str:\n """A valid tool function.\n\n Args:\n input (str): Input string.\n """\n return input.upper()', "requirements": {"smolagents"}, } ], "managed_agents": {}, "prompt_templates": EMPTY_PROMPT_TEMPLATES, "max_steps": 15, "verbosity_level": 2, "planning_interval": 3, "name": "test_agent", "description": "Test agent description", } # Call from_dict with patch("smolagents.models.TransformersModel") as mock_model_class: mock_model_instance = mock_model_class.from_dict.return_value agent = DummyMultiStepAgent.from_dict(agent_dict) # Verify the agent was created correctly assert agent.model == mock_model_instance assert mock_model_class.from_dict.call_args.args[0] == {"model_id": "test/model"} assert agent.max_steps == 15 assert agent.logger.level == 2 assert agent.planning_interval == 3 assert agent.name == "test_agent" assert agent.description == "Test agent description" # Verify the tool was created correctly assert sorted(agent.tools.keys()) == ["final_answer", "valid_tool_function"] assert agent.tools["valid_tool_function"].name == "valid_tool_function" assert agent.tools["valid_tool_function"].description == "A valid tool function." assert agent.tools["valid_tool_function"].inputs == { "input": {"type": "string", "description": "Input string."} } assert agent.tools["valid_tool_function"]("test") == "TEST" # Test overriding with kwargs with patch("smolagents.models.TransformersModel") as mock_model_class: agent = DummyMultiStepAgent.from_dict(agent_dict, max_steps=30) assert agent.max_steps == 30 def test_multiagent_to_dict_from_dict_roundtrip(self): """Test that to_dict() and from_dict() work correctly for agents with managed agents.""" # Create a managed agent managed_agent = CodeAgent( tools=[], model=MagicMock(), name="managed_agent", description="A managed agent for testing", max_steps=5 ) # Create a main agent with the managed agent main_agent = ToolCallingAgent( tools=[], managed_agents=[managed_agent], model=MagicMock(), name="main_agent", description="Main agent with managed agents", max_steps=10, ) # Convert to dict agent_dict = main_agent.to_dict() # Verify managed_agents structure in dict assert "managed_agents" in agent_dict assert isinstance(agent_dict["managed_agents"], list) assert len(agent_dict["managed_agents"]) == 1 managed_agent_dict = agent_dict["managed_agents"][0] assert managed_agent_dict["name"] == "managed_agent" assert managed_agent_dict["class"] == "CodeAgent" assert managed_agent_dict["description"] == "A managed agent for testing" assert managed_agent_dict["max_steps"] == 5 # Test round-trip: from_dict should recreate the agent # Mock the model classes directly instead of patching smolagents.models.MagicMock with patch("smolagents.agents.importlib.import_module") as mock_import: # Mock the models module mock_models_module = MagicMock() mock_model_class = MagicMock() mock_model_instance = MagicMock() mock_model_class.from_dict.return_value = mock_model_instance mock_models_module.MagicMock = mock_model_class # Mock the agents module mock_agents_module = MagicMock() mock_agents_module.CodeAgent = CodeAgent mock_agents_module.ToolCallingAgent = ToolCallingAgent def side_effect(module_name): if module_name == "smolagents.models": return mock_models_module elif module_name == "smolagents.agents": return mock_agents_module return MagicMock() mock_import.side_effect = side_effect recreated_agent = ToolCallingAgent.from_dict(agent_dict) # Verify the recreated agent has the same structure assert recreated_agent.name == "main_agent" assert recreated_agent.description == "Main agent with managed agents" assert recreated_agent.max_steps == 10 assert len(recreated_agent.managed_agents) == 1 recreated_managed_agent = list(recreated_agent.managed_agents.values())[0] assert recreated_managed_agent.name == "managed_agent" assert recreated_managed_agent.description == "A managed agent for testing" assert recreated_managed_agent.max_steps == 5 class TestToolCallingAgent: def test_toolcalling_agent_instructions(self): agent = ToolCallingAgent(tools=[], model=MagicMock(), instructions="Test instructions") assert agent.instructions == "Test instructions" assert "Test instructions" in agent.system_prompt def test_toolcalling_agent_passes_both_tools_and_managed_agents(self, test_tool): """Test that both tools and managed agents are passed to the model.""" managed_agent = MagicMock() managed_agent.name = "managed_agent" model = MagicMock() model.generate.return_value = ChatMessage( role=MessageRole.ASSISTANT, content="", tool_calls=[ ChatMessageToolCall( id="call_0", type="function", function=ChatMessageToolCallFunction(name="test_tool", arguments={"input": "test_value"}), ) ], ) agent = ToolCallingAgent(tools=[test_tool], managed_agents=[managed_agent], model=model) # Run the agent one step to trigger the model call next(agent.run("Test task", stream=True)) # Check that the model was called with both tools and managed agents: # - Get all tool_to_call_from names passed to the model tools_to_call_from_names = [tool.name for tool in model.generate.call_args.kwargs["tools_to_call_from"]] # - Verify both regular tools and managed agents are included assert "test_tool" in tools_to_call_from_names # The regular tool assert "managed_agent" in tools_to_call_from_names # The managed agent assert "final_answer" in tools_to_call_from_names # The final_answer tool (added by default) @patch("huggingface_hub.InferenceClient") def test_toolcalling_agent_api(self, mock_inference_client): mock_client = mock_inference_client.return_value mock_response = mock_client.chat_completion.return_value mock_response.choices[0].message = ChatCompletionOutputMessage( role=MessageRole.ASSISTANT, content='{"name": "weather_api", "arguments": {"location": "Paris", "date": "today"}}', ) mock_response.usage.prompt_tokens = 10 mock_response.usage.completion_tokens = 20 model = InferenceClientModel(model_id="test-model") from smolagents import tool @tool def weather_api(location: str, date: str) -> str: """ Gets the weather in the next days at given location. Args: location: the location date: the date """ return f"The weather in {location} on date:{date} is sunny." agent = ToolCallingAgent(model=model, tools=[weather_api], max_steps=1) agent.run("What's the weather in Paris?") assert agent.memory.steps[0].task == "What's the weather in Paris?" assert agent.memory.steps[1].tool_calls[0].name == "weather_api" assert agent.memory.steps[1].tool_calls[0].arguments == {"location": "Paris", "date": "today"} assert agent.memory.steps[1].observations == "The weather in Paris on date:today is sunny." mock_response.choices[0].message = ChatCompletionOutputMessage( role=MessageRole.ASSISTANT, content=None, tool_calls=[ ChatCompletionOutputToolCall( function=ChatCompletionOutputFunctionDefinition( name="weather_api", arguments='{"location": "Paris", "date": "today"}' ), id="call_0", type="function", ) ], ) agent.run("What's the weather in Paris?") assert agent.memory.steps[0].task == "What's the weather in Paris?" assert agent.memory.steps[1].tool_calls[0].name == "weather_api" assert agent.memory.steps[1].tool_calls[0].arguments == {"location": "Paris", "date": "today"} assert agent.memory.steps[1].observations == "The weather in Paris on date:today is sunny." @patch("openai.OpenAI") def test_toolcalling_agent_stream_logs_multiple_tool_calls_observations(self, mock_openai_client, test_tool): """Test that ToolCallingAgent with stream_outputs=True logs the observations of all tool calls when multiple are called.""" mock_client = mock_openai_client.return_value from smolagents import OpenAIServerModel # Mock streaming response with multiple tool calls mock_deltas = [ ChoiceDelta(role=MessageRole.ASSISTANT), ChoiceDelta( tool_calls=[ ChoiceDeltaToolCall( index=0, id="call_1", function=ChoiceDeltaToolCallFunction(name="test_tool"), type="function", ) ] ), ChoiceDelta( tool_calls=[ChoiceDeltaToolCall(index=0, function=ChoiceDeltaToolCallFunction(arguments='{"in'))] ), ChoiceDelta( tool_calls=[ChoiceDeltaToolCall(index=0, function=ChoiceDeltaToolCallFunction(arguments='put"'))] ), ChoiceDelta( tool_calls=[ChoiceDeltaToolCall(index=0, function=ChoiceDeltaToolCallFunction(arguments=': "out'))] ), ChoiceDelta( tool_calls=[ChoiceDeltaToolCall(index=0, function=ChoiceDeltaToolCallFunction(arguments="put1"))] ), ChoiceDelta( tool_calls=[ChoiceDeltaToolCall(index=0, function=ChoiceDeltaToolCallFunction(arguments='"}'))] ), ChoiceDelta( tool_calls=[ ChoiceDeltaToolCall( index=1, id="call_2", function=ChoiceDeltaToolCallFunction(name="test_tool"), type="function", ) ] ), ChoiceDelta( tool_calls=[ChoiceDeltaToolCall(index=1, function=ChoiceDeltaToolCallFunction(arguments='{"in'))] ), ChoiceDelta( tool_calls=[ChoiceDeltaToolCall(index=1, function=ChoiceDeltaToolCallFunction(arguments='put"'))] ), ChoiceDelta( tool_calls=[ChoiceDeltaToolCall(index=1, function=ChoiceDeltaToolCallFunction(arguments=': "out'))] ), ChoiceDelta( tool_calls=[ChoiceDeltaToolCall(index=1, function=ChoiceDeltaToolCallFunction(arguments="put2"))] ), ChoiceDelta( tool_calls=[ChoiceDeltaToolCall(index=1, function=ChoiceDeltaToolCallFunction(arguments='"}'))] ), ] class MockChoice: def __init__(self, delta): self.delta = delta class MockChunk: def __init__(self, delta): self.choices = [MockChoice(delta)] self.usage = None mock_client.chat.completions.create.return_value = (MockChunk(delta) for delta in mock_deltas) # Mock usage for non-streaming fallback mock_usage = MagicMock() mock_usage.prompt_tokens = 10 mock_usage.completion_tokens = 20 model = OpenAIServerModel(model_id="fakemodel") agent = ToolCallingAgent(model=model, tools=[test_tool], max_steps=1, stream_outputs=True) agent.run("Dummy task") assert agent.memory.steps[1].model_output_message.tool_calls[0].function.name == "test_tool" assert agent.memory.steps[1].model_output_message.tool_calls[1].function.name == "test_tool" assert agent.memory.steps[1].observations == "Processed: output1\nProcessed: output2" @patch("openai.OpenAI") def test_toolcalling_agent_final_answer_cannot_be_called_with_parallel_tool_calls( self, mock_openai_client, test_tool ): """Test that ToolCallingAgent with stream_outputs=True returns the all tool calls when multiple are called.""" mock_client = mock_openai_client.return_value from smolagents import OpenAIServerModel class ExtendedChatMessage(ChatMessage): def __init__(self, *args, usage, **kwargs): super().__init__(*args, **kwargs) def model_dump(self, include=None): return super().model_dump_json() class MockChoice: def __init__(self, chat_message): self.message = chat_message class MockChatCompletion: def __init__(self, chat_message): self.choices = [MockChoice(chat_message)] self.usage = MockTokenUsage(prompt_tokens=10, completion_tokens=20) class MockTokenUsage: def __init__(self, prompt_tokens, completion_tokens): self.prompt_tokens = prompt_tokens self.completion_tokens = completion_tokens from dataclasses import asdict class ExtendedChatCompletionOutputMessage(ChatCompletionOutputMessage): def __init__(self, *args, usage, **kwargs): super().__init__(*args, **kwargs) self.usage = usage def model_dump(self, include=None): print("TOOL CALLS", self.tool_calls) return { "role": self.role, "content": self.content, "tool_calls": [asdict(tc) for tc in self.tool_calls], } mock_client.chat.completions.create.return_value = MockChatCompletion( ExtendedChatCompletionOutputMessage( role=MessageRole.ASSISTANT, content=None, tool_calls=[ ChatMessageToolCall( id="call_0", type="function", function=ChatMessageToolCallFunction(name="test_tool", arguments={"input": "out1"}), ), ChatMessageToolCall( id="1", type="function", function=ChatMessageToolCallFunction(name="final_answer", arguments={"answer": "out1"}), ), ], usage=MockTokenUsage(prompt_tokens=10, completion_tokens=20), ) ) model = OpenAIServerModel(model_id="fakemodel") agent = ToolCallingAgent(model=model, tools=[test_tool], max_steps=1) agent.run("Dummy task") assert agent.memory.steps[1].error is not None assert ( "do not perform any other tool calls than the final answer tool call!" in agent.memory.steps[1].error.message ) @patch("huggingface_hub.InferenceClient") def test_toolcalling_agent_api_misformatted_output(self, mock_inference_client): """Test that even misformatted json blobs don't interrupt the run for a ToolCallingAgent.""" mock_client = mock_inference_client.return_value mock_response = mock_client.chat_completion.return_value mock_response.choices[0].message = ChatCompletionOutputMessage( role=MessageRole.ASSISTANT, content='{"name": weather_api", "arguments": {"location": "Paris", "date": "today"}}', ) mock_response.usage.prompt_tokens = 10 mock_response.usage.completion_tokens = 20 model = InferenceClientModel(model_id="test-model") logger = AgentLogger(console=Console(markup=False, no_color=True)) agent = ToolCallingAgent(model=model, tools=[], max_steps=2, verbosity_level=1, logger=logger) with agent.logger.console.capture() as capture: agent.run("What's the weather in Paris?") assert agent.memory.steps[0].task == "What's the weather in Paris?" assert agent.memory.steps[1].tool_calls is None assert "The JSON blob you used is invalid" in agent.memory.steps[1].error.message assert "Error while parsing" in capture.get() assert len(agent.memory.steps) == 4 @pytest.mark.skip( reason="Test is not properly implemented (GH-1255) because fake_tools should have the same name. " "Additionally, it uses CodeAgent instead of ToolCallingAgent (GH-1409)" ) def test_change_tools_after_init(self): from smolagents import tool @tool def fake_tool_1() -> str: """Fake tool""" return "1" @tool def fake_tool_2() -> str: """Fake tool""" return "2" class FakeCodeModel(Model): def generate(self, messages, stop_sequences=None): return ChatMessage(role=MessageRole.ASSISTANT, content="<code>\nfinal_answer(fake_tool_1())\n</code>") agent = CodeAgent(tools=[fake_tool_1], model=FakeCodeModel()) agent.tools["final_answer"] = CustomFinalAnswerTool() agent.tools["fake_tool_1"] = fake_tool_2 answer = agent.run("Fake task.") assert answer == "2CUSTOM" def test_custom_final_answer_with_custom_inputs(self, test_tool): class CustomFinalAnswerToolWithCustomInputs(FinalAnswerTool): inputs = { "answer1": {"type": "string", "description": "First part of the answer."}, "answer2": {"type": "string", "description": "Second part of the answer."}, } def forward(self, answer1: str, answer2: str) -> str: return answer1 + " and " + answer2 model = MagicMock() model.generate.return_value = ChatMessage( role=MessageRole.ASSISTANT, content=None, tool_calls=[ ChatMessageToolCall( id="call_0", type="function", function=ChatMessageToolCallFunction( name="final_answer", arguments={"answer1": "1", "answer2": "2"} ), ), ], ) agent = ToolCallingAgent(tools=[test_tool, CustomFinalAnswerToolWithCustomInputs()], model=model) answer = agent.run("Fake task.") assert answer == "1 and 2" assert agent.memory.steps[-1].model_output_message.tool_calls[0].function.name == "final_answer" @pytest.mark.parametrize( "test_case", [ # Case 0: Single valid tool call { "tool_calls": [ ChatMessageToolCall( id="call_1", type="function", function=ChatMessageToolCallFunction(name="test_tool", arguments={"input": "test_value"}), ) ], "expected_observations": "Processed: test_value", "expected_final_outputs": ["Processed: test_value"], "expected_error": None, }, # Case 1: Multiple tool calls { "tool_calls": [ ChatMessageToolCall( id="call_1", type="function", function=ChatMessageToolCallFunction(name="test_tool", arguments={"input": "value1"}), ), ChatMessageToolCall( id="call_2", type="function", function=ChatMessageToolCallFunction(name="test_tool", arguments={"input": "value2"}), ), ], "expected_observations": "Processed: value1\nProcessed: value2", "expected_final_outputs": ["Processed: value1", "Processed: value2"], "expected_error": None, }, # Case 2: Invalid tool name { "tool_calls": [ ChatMessageToolCall( id="call_1", type="function", function=ChatMessageToolCallFunction(name="nonexistent_tool", arguments={"input": "test"}), ) ], "expected_error": AgentToolExecutionError, }, # Case 3: Tool execution error { "tool_calls": [ ChatMessageToolCall( id="call_1", type="function", function=ChatMessageToolCallFunction(name="test_tool", arguments={"input": "error"}), ) ], "expected_error": AgentToolExecutionError, }, # Case 4: Empty tool calls list { "tool_calls": [], "expected_observations": "", "expected_final_outputs": [], "expected_error": None, }, # Case 5: Final answer call { "tool_calls": [ ChatMessageToolCall( id="call_1", type="function", function=ChatMessageToolCallFunction( name="final_answer", arguments={"answer": "This is the final answer"} ), ) ], "expected_observations": "This is the final answer", "expected_final_outputs": ["This is the final answer"], "expected_error": None, }, # Case 6: Invalid arguments { "tool_calls": [ ChatMessageToolCall( id="call_1", type="function", function=ChatMessageToolCallFunction(name="test_tool", arguments={"wrong_param": "value"}), ) ], "expected_error": AgentToolCallError, }, ], ) def test_process_tool_calls(self, test_case, test_tool): # Create a ToolCallingAgent instance with the test tool agent = ToolCallingAgent(tools=[test_tool], model=MagicMock()) # Create chat message with the specified tool calls for process_tool_calls chat_message = ChatMessage(role=MessageRole.ASSISTANT, content="", tool_calls=test_case["tool_calls"]) # Create a memory step for process_tool_calls memory_step = ActionStep(step_number=10, timing="mock_timing", model_output="") # Process tool calls if test_case["expected_error"]: with pytest.raises(test_case["expected_error"]): list(agent.process_tool_calls(chat_message, memory_step)) else: final_outputs = list(agent.process_tool_calls(chat_message, memory_step)) assert memory_step.model_output == "" assert memory_step.observations == test_case["expected_observations"] assert [ final_output.output for final_output in final_outputs if isinstance(final_output, ToolOutput) ] == test_case["expected_final_outputs"] # Verify memory step tool calls were updated correctly if test_case["tool_calls"]: assert memory_step.tool_calls == [ ToolCall(name=tool_call.function.name, arguments=tool_call.function.arguments, id=tool_call.id) for tool_call in test_case["tool_calls"] ] class TestCodeAgent: def test_code_agent_instructions(self): agent = CodeAgent(tools=[], model=MagicMock(), instructions="Test instructions") assert agent.instructions == "Test instructions" assert "Test instructions" in agent.system_prompt agent = CodeAgent( tools=[], model=MagicMock(), instructions="Test instructions", use_structured_outputs_internally=True ) assert agent.instructions == "Test instructions" assert "Test instructions" in agent.system_prompt @pytest.mark.parametrize("provide_run_summary", [False, True]) def test_call_with_provide_run_summary(self, provide_run_summary): agent = CodeAgent(tools=[], model=MagicMock(), provide_run_summary=provide_run_summary) assert agent.provide_run_summary is provide_run_summary agent.name = "test_agent" agent.run = MagicMock(return_value="Test output") agent.write_memory_to_messages = MagicMock( return_value=[ChatMessage(role=MessageRole.ASSISTANT, content="Test summary")] ) result = agent("Test request") expected_summary = "Here is the final answer from your managed agent 'test_agent':\nTest output" if provide_run_summary: expected_summary += ( "\n\nFor more detail, find below a summary of this agent's work:\n" "<summary_of_work>\n\nTest summary\n---\n</summary_of_work>" ) assert result == expected_summary def test_code_agent_image_output(self): from PIL import Image from smolagents import tool @tool def image_generation_tool(): """Generate an image""" return Image.new("RGB", (100, 100), color="red") agent = CodeAgent(tools=[image_generation_tool], model=FakeCodeModelImageGeneration(), verbosity_level=1) output = agent.run("Make me an image from the latest trend on google trends.") assert isinstance(output, Image.Image) def test_errors_logging(self): class FakeCodeModel(Model): def generate(self, messages, stop_sequences=None): return ChatMessage(role=MessageRole.ASSISTANT, content="<code>\nsecret=3;['1', '2'][secret]\n</code>") agent = CodeAgent(tools=[], model=FakeCodeModel(), verbosity_level=1) with agent.logger.console.capture() as capture: agent.run("Test request") assert "secret\\\\" in repr(capture.get()) def test_missing_import_triggers_advice_in_error_log(self): # Set explicit verbosity level to 1 to override the default verbosity level of -1 set in CI fixture agent = CodeAgent(tools=[], model=FakeCodeModelImport(), verbosity_level=1) with agent.logger.console.capture() as capture: agent.run("Count to 3") str_output = capture.get() assert "`additional_authorized_imports`" in str_output.replace("\n", "") def test_errors_show_offending_line_and_error(self): agent = CodeAgent(tools=[PythonInterpreterTool()], model=FakeCodeModelError()) output = agent.run("What is 2 multiplied by 3.6452?") assert isinstance(output, AgentText) assert output == "got an error" assert "Code execution failed at line 'error_function()'" in str(agent.memory.steps[1].error) assert "ValueError" in str(agent.memory.steps) def test_error_saves_previous_print_outputs(self): agent = CodeAgent(tools=[PythonInterpreterTool()], model=FakeCodeModelError()) agent.run("What is 2 multiplied by 3.6452?") assert "Flag!" in str(agent.memory.steps[1].observations) def test_syntax_error_show_offending_lines(self): agent = CodeAgent(tools=[PythonInterpreterTool()], model=FakeCodeModelSyntaxError()) output = agent.run("What is 2 multiplied by 3.6452?") assert isinstance(output, AgentText) assert output == "got an error" assert ' print("Failing due to unexpected indent")' in str(agent.memory.steps) assert isinstance(agent.memory.steps[-2], ActionStep) assert agent.memory.steps[-2].code_action == dedent("""a = 2 b = a * 2 print("Failing due to unexpected indent") print("Ok, calculation done!")""") def test_end_code_appending(self): # Checking original output message orig_output = FakeCodeModelNoReturn().generate([]) assert not orig_output.content.endswith("<end_code>") # Checking the step output agent = CodeAgent( tools=[PythonInterpreterTool()], model=FakeCodeModelNoReturn(), max_steps=1, ) answer = agent.run("What is 2 multiplied by 3.6452?") assert answer memory_steps = agent.memory.steps actions_steps = [s for s in memory_steps if isinstance(s, ActionStep)] outputs = [s.model_output for s in actions_steps if s.model_output] assert outputs assert all(o.endswith("</code>") for o in outputs) messages = [s.model_output_message for s in actions_steps if s.model_output_message] assert messages assert all(m.content.endswith("</code>") for m in messages) @pytest.mark.skip( reason="Test is not properly implemented (GH-1255) because fake_tools should have the same name. " ) def test_change_tools_after_init(self): from smolagents import tool @tool def fake_tool_1() -> str: """Fake tool""" return "1" @tool def fake_tool_2() -> str: """Fake tool""" return "2" class FakeCodeModel(Model): def generate(self, messages, stop_sequences=None): return ChatMessage(role=MessageRole.ASSISTANT, content="<code>\nfinal_answer(fake_tool_1())\n</code>") agent = CodeAgent(tools=[fake_tool_1], model=FakeCodeModel()) agent.tools["final_answer"] = CustomFinalAnswerTool() agent.tools["fake_tool_1"] = fake_tool_2 answer = agent.run("Fake task.") assert answer == "2CUSTOM" def test_local_python_executor_with_custom_functions(self): model = MagicMock() model.generate.return_value = ChatMessage( role=MessageRole.ASSISTANT, content="", tool_calls=None, raw="", token_usage=None, ) agent = CodeAgent(tools=[], model=model, executor_kwargs={"additional_functions": {"open": open}}) agent.run("Test run") assert "open" in agent.python_executor.static_tools @pytest.mark.parametrize("agent_dict_version", ["v1.9", "v1.10", "v1.20"]) def test_from_folder(self, agent_dict_version, get_agent_dict): agent_dict = get_agent_dict(agent_dict_version) with ( patch("smolagents.agents.Path") as mock_path, patch("smolagents.models.InferenceClientModel") as mock_model, ): import json mock_path.return_value.__truediv__.return_value.read_text.return_value = json.dumps(agent_dict) mock_model.from_dict.return_value.model_id = "Qwen/Qwen2.5-Coder-32B-Instruct" agent = CodeAgent.from_folder("ignored_dummy_folder") assert isinstance(agent, CodeAgent) assert agent.name == "test_agent" assert agent.description == "dummy description" assert agent.max_steps == 10 assert agent.planning_interval == 2 assert agent.additional_authorized_imports == ["pandas"] assert "pandas" in agent.authorized_imports assert agent.executor_type == "local" assert agent.executor_kwargs == {} assert agent.max_print_outputs_length is None assert agent.managed_agents == {} assert set(agent.tools.keys()) == {"final_answer"} assert agent.model == mock_model.from_dict.return_value assert mock_model.from_dict.call_args.args[0]["model_id"] == "Qwen/Qwen2.5-Coder-32B-Instruct" assert agent.model.model_id == "Qwen/Qwen2.5-Coder-32B-Instruct" assert agent.logger.level == 2 assert agent.prompt_templates["system_prompt"] == "dummy system prompt" def test_from_dict(self): # Create a test agent dictionary agent_dict = { "model": {"class": "InferenceClientModel", "data": {"model_id": "Qwen/Qwen2.5-Coder-32B-Instruct"}}, "tools": [ { "name": "valid_tool_function", "code": 'from smolagents import Tool\nfrom typing import Any, Optional\n\nclass SimpleTool(Tool):\n name = "valid_tool_function"\n description = "A valid tool function."\n inputs = {"input":{"type":"string","description":"Input string."}}\n output_type = "string"\n\n def forward(self, input: str) -> str:\n """A valid tool function.\n\n Args:\n input (str): Input string.\n """\n return input.upper()', "requirements": {"smolagents"}, } ], "managed_agents": {}, "prompt_templates": EMPTY_PROMPT_TEMPLATES, "max_steps": 15, "verbosity_level": 2, "use_structured_output": False, "planning_interval": 3, "name": "test_code_agent", "description": "Test code agent description", "authorized_imports": ["pandas", "numpy"], "executor_type": "local", "executor_kwargs": {"max_print_outputs_length": 10_000}, "max_print_outputs_length": 1000, } # Call from_dict with patch("smolagents.models.InferenceClientModel") as mock_model_class: mock_model_instance = mock_model_class.from_dict.return_value agent = CodeAgent.from_dict(agent_dict) # Verify the agent was created correctly with CodeAgent-specific parameters assert agent.model == mock_model_instance assert agent.additional_authorized_imports == ["pandas", "numpy"] assert agent.executor_type == "local" assert agent.executor_kwargs == {"max_print_outputs_length": 10_000} assert agent.max_print_outputs_length == 1000 # Test with missing optional parameters minimal_agent_dict = { "model": {"class": "InferenceClientModel", "data": {"model_id": "Qwen/Qwen2.5-Coder-32B-Instruct"}}, "tools": [], "managed_agents": {}, } with patch("smolagents.models.InferenceClientModel"): agent = CodeAgent.from_dict(minimal_agent_dict) # Verify defaults are used assert agent.max_steps == 20 # default from MultiStepAgent.__init__ # Test overriding with kwargs with patch("smolagents.models.InferenceClientModel"): agent = CodeAgent.from_dict( agent_dict, additional_authorized_imports=["requests"], executor_kwargs={"max_print_outputs_length": 5_000}, ) assert agent.additional_authorized_imports == ["requests"] assert agent.executor_kwargs == {"max_print_outputs_length": 5_000} def test_custom_final_answer_with_custom_inputs(self): class CustomFinalAnswerToolWithCustomInputs(FinalAnswerTool): inputs = { "answer1": {"type": "string", "description": "First part of the answer."}, "answer2": {"type": "string", "description": "Second part of the answer."}, } def forward(self, answer1: str, answer2: str) -> str: return answer1 + "CUSTOM" + answer2 model = MagicMock() model.generate.return_value = ChatMessage( role=MessageRole.ASSISTANT, content="<code>\nfinal_answer(answer1='1', answer2='2')\n</code>" ) agent = CodeAgent(tools=[CustomFinalAnswerToolWithCustomInputs()], model=model) answer = agent.run("Fake task.") assert answer == "1CUSTOM2" def test_use_structured_outputs_internally(self): expected_code = "print('Hello, world!')" model = MagicMock() # mock structured output generation model.generate.return_value = ChatMessage( role=MessageRole.ASSISTANT, content=json.dumps({"thought": "LLM-generated thought", "code": expected_code}), ) agent = CodeAgent( tools=[], model=model, use_structured_outputs_internally=True ) # Use structured outputs internally tool_call: ToolCall = next( agent._step_stream(ActionStep(step_number=1, timing="mock_timing", model_output="")) ) assert tool_call.arguments == expected_code class TestMultiAgents: def test_multiagents_save(self, tmp_path): model = InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct", max_tokens=2096, temperature=0.5) web_agent = ToolCallingAgent( model=model, tools=[DuckDuckGoSearchTool(max_results=2), VisitWebpageTool()], name="web_agent", description="does web searches", ) code_agent = CodeAgent(model=model, tools=[], name="useless", description="does nothing in particular") agent = CodeAgent( model=model, tools=[], additional_authorized_imports=["pandas", "datetime"], managed_agents=[web_agent, code_agent], max_print_outputs_length=1000, executor_type="local", executor_kwargs={"max_print_outputs_length": 10_000}, ) agent.save(tmp_path) expected_structure = { "managed_agents": { "useless": {"tools": {"files": ["final_answer.py"]}, "files": ["agent.json", "prompts.yaml"]}, "web_agent": { "tools": {"files": ["final_answer.py", "visit_webpage.py", "web_search.py"]}, "files": ["agent.json", "prompts.yaml"], }, }, "tools": {"files": ["final_answer.py"]}, "files": ["app.py", "requirements.txt", "agent.json", "prompts.yaml"], } def verify_structure(current_path: Path, structure: dict): for dir_name, contents in structure.items(): if dir_name != "files": # For directories, verify they exist and recurse into them dir_path = current_path / dir_name assert dir_path.exists(), f"Directory {dir_path} does not exist" assert dir_path.is_dir(), f"{dir_path} is not a directory" verify_structure(dir_path, contents) else: # For files, verify each exists in the current path for file_name in contents: file_path = current_path / file_name assert file_path.exists(), f"File {file_path} does not exist" assert file_path.is_file(), f"{file_path} is not a file" verify_structure(tmp_path, expected_structure) # Test that re-loaded agents work as expected. agent2 = CodeAgent.from_folder(tmp_path, planning_interval=5) assert agent2.planning_interval == 5 # Check that kwargs are used assert set(agent2.authorized_imports) == set(["pandas", "datetime"] + BASE_BUILTIN_MODULES) assert agent2.max_print_outputs_length == 1000 assert agent2.executor_type == "local" assert agent2.executor_kwargs == {"max_print_outputs_length": 10_000} assert ( agent2.managed_agents["web_agent"].tools["web_search"].max_results == 10 ) # For now tool init parameters are forgotten assert agent2.model.kwargs["temperature"] == pytest.approx(0.5) def test_multiagents(self): class FakeModelMultiagentsManagerAgent(Model): model_id = "fake_model" def generate( self, messages, stop_sequences=None, tools_to_call_from=None, ): if tools_to_call_from is not None: if len(messages) < 3: return ChatMessage( role=MessageRole.ASSISTANT, content="", tool_calls=[ ChatMessageToolCall( id="call_0", type="function", function=ChatMessageToolCallFunction( name="search_agent", arguments="Who is the current US president?", ), ) ], ) else: assert "Report on the current US president" in str(messages) return ChatMessage( role=MessageRole.ASSISTANT, content="", tool_calls=[ ChatMessageToolCall( id="call_0", type="function", function=ChatMessageToolCallFunction( name="final_answer", arguments="Final report." ), ) ], ) else: if len(messages) < 3: return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: Let's call our search agent. <code> result = search_agent("Who is the current US president?") </code> """, ) else: assert "Report on the current US president" in str(messages) return ChatMessage( role=MessageRole.ASSISTANT, content=""" Thought: Let's return the report. <code> final_answer("Final report.") </code> """, ) manager_model = FakeModelMultiagentsManagerAgent() class FakeModelMultiagentsManagedAgent(Model): model_id = "fake_model" def generate( self, messages, tools_to_call_from=None, stop_sequences=None, ): return ChatMessage( role=MessageRole.ASSISTANT, content="Here is the secret content: FLAG1", tool_calls=[ ChatMessageToolCall( id="call_0", type="function", function=ChatMessageToolCallFunction( name="final_answer", arguments="Report on the current US president", ), ) ], ) managed_model = FakeModelMultiagentsManagedAgent() web_agent = ToolCallingAgent( tools=[], model=managed_model, max_steps=10, name="search_agent", description="Runs web searches for you. Give it your request as an argument. Make the request as detailed as needed, you can ask for thorough reports", verbosity_level=2, ) manager_code_agent = CodeAgent( tools=[], model=manager_model, managed_agents=[web_agent], additional_authorized_imports=["time", "numpy", "pandas"], ) report = manager_code_agent.run("Fake question.") assert report == "Final report." manager_toolcalling_agent = ToolCallingAgent( tools=[], model=manager_model, managed_agents=[web_agent], ) with web_agent.logger.console.capture() as capture: report = manager_toolcalling_agent.run("Fake question.") assert report == "Final report." assert "FLAG1" in capture.get() # Check that managed agent's output is properly logged # Test that visualization works with manager_toolcalling_agent.logger.console.capture() as capture: manager_toolcalling_agent.visualize() assert "├──" in capture.get() @pytest.fixture def prompt_templates(): return { "system_prompt": "This is a test system prompt.", "managed_agent": {"task": "Task for {{name}}: {{task}}", "report": "Report for {{name}}: {{final_answer}}"}, "planning": { "initial_plan": "The plan.", "update_plan_pre_messages": "custom", "update_plan_post_messages": "custom", }, "final_answer": {"pre_messages": "custom", "post_messages": "custom"}, } @pytest.mark.parametrize( "arguments", [ {}, {"arg": "bar"}, {None: None}, [1, 2, 3], ], ) def test_tool_calling_agents_raises_tool_call_error_being_invoked_with_wrong_arguments(arguments): @tool def _sample_tool(prompt: str) -> str: """Tool that returns same string Args: prompt: The string to return Returns: The same string """ return prompt agent = ToolCallingAgent(model=FakeToolCallModel(), tools=[_sample_tool]) with pytest.raises(AgentToolCallError): agent.execute_tool_call(_sample_tool.name, arguments) def test_tool_calling_agents_raises_agent_execution_error_when_tool_raises(): @tool def _sample_tool(_: str) -> float: """Tool that fails Args: _: The pointless string Returns: Some number """ return 1 / 0 agent = ToolCallingAgent(model=FakeToolCallModel(), tools=[_sample_tool]) with pytest.raises(AgentExecutionError): agent.execute_tool_call(_sample_tool.name, "sample")
smolagents/tests/test_agents.py/0
{ "file_path": "smolagents/tests/test_agents.py", "repo_id": "smolagents", "token_count": 46503 }
288
import ast from textwrap import dedent import pytest from smolagents.default_tools import ( DuckDuckGoSearchTool, GoogleSearchTool, SpeechToTextTool, VisitWebpageTool, WebSearchTool, ) from smolagents.tool_validation import MethodChecker, validate_tool_attributes from smolagents.tools import Tool, tool UNDEFINED_VARIABLE = "undefined_variable" @pytest.mark.parametrize( "tool_class", [DuckDuckGoSearchTool, GoogleSearchTool, SpeechToTextTool, VisitWebpageTool, WebSearchTool] ) def test_validate_tool_attributes_with_default_tools(tool_class): assert validate_tool_attributes(tool_class) is None, f"failed for {tool_class.name} tool" class ValidTool(Tool): name = "valid_tool" description = "A valid tool" inputs = {"input": {"type": "string", "description": "input"}} output_type = "string" simple_attr = "string" dict_attr = {"key": "value"} def __init__(self, optional_param="default"): super().__init__() self.param = optional_param def forward(self, input: str) -> str: return input.upper() @tool def valid_tool_function(input: str) -> str: """A valid tool function. Args: input (str): Input string. """ return input.upper() @pytest.mark.parametrize("tool_class", [ValidTool, valid_tool_function.__class__]) def test_validate_tool_attributes_valid(tool_class): assert validate_tool_attributes(tool_class) is None class InvalidToolName(Tool): name = "invalid tool name" description = "Tool with invalid name" inputs = {"input": {"type": "string", "description": "input"}} output_type = "string" def __init__(self): super().__init__() def forward(self, input: str) -> str: return input class InvalidToolComplexAttrs(Tool): name = "invalid_tool" description = "Tool with complex class attributes" inputs = {"input": {"type": "string", "description": "input"}} output_type = "string" complex_attr = [x for x in range(3)] # Complex class attribute def __init__(self): super().__init__() def forward(self, input: str) -> str: return input class InvalidToolRequiredParams(Tool): name = "invalid_tool" description = "Tool with required params" inputs = {"input": {"type": "string", "description": "input"}} output_type = "string" def __init__(self, required_param, kwarg1=1): # No default value super().__init__() self.param = required_param def forward(self, input: str) -> str: return input class InvalidToolNonLiteralDefaultParam(Tool): name = "invalid_tool" description = "Tool with non-literal default parameter value" inputs = {"input": {"type": "string", "description": "input"}} output_type = "string" def __init__(self, default_param=UNDEFINED_VARIABLE): # UNDEFINED_VARIABLE as default is non-literal super().__init__() self.default_param = default_param def forward(self, input: str) -> str: return input class InvalidToolUndefinedNames(Tool): name = "invalid_tool" description = "Tool with undefined names" inputs = {"input": {"type": "string", "description": "input"}} output_type = "string" def forward(self, input: str) -> str: return UNDEFINED_VARIABLE # Undefined name @pytest.mark.parametrize( "tool_class, expected_error", [ ( InvalidToolName, "Class attribute 'name' must be a valid Python identifier and not a reserved keyword, found 'invalid tool name'", ), (InvalidToolComplexAttrs, "Complex attributes should be defined in __init__, not as class attributes"), (InvalidToolRequiredParams, "Parameters in __init__ must have default values, found required parameters"), ( InvalidToolNonLiteralDefaultParam, "Parameters in __init__ must have literal default values, found non-literal defaults", ), (InvalidToolUndefinedNames, "Name 'UNDEFINED_VARIABLE' is undefined"), ], ) def test_validate_tool_attributes_exceptions(tool_class, expected_error): with pytest.raises(ValueError, match=expected_error): validate_tool_attributes(tool_class) class MultipleAssignmentsTool(Tool): name = "multiple_assignments_tool" description = "Tool with multiple assignments" inputs = {"input": {"type": "string", "description": "input"}} output_type = "string" def __init__(self): super().__init__() def forward(self, input: str) -> str: a, b = "1", "2" return a + b def test_validate_tool_attributes_multiple_assignments(): validate_tool_attributes(MultipleAssignmentsTool) @tool def tool_function_with_multiple_assignments(input: str) -> str: """A valid tool function. Args: input (str): Input string. """ a, b = "1", "2" return input.upper() + a + b @pytest.mark.parametrize("tool_instance", [MultipleAssignmentsTool(), tool_function_with_multiple_assignments]) def test_tool_to_dict_validation_with_multiple_assignments(tool_instance): tool_instance.to_dict() class TestMethodChecker: def test_multiple_assignments(self): source_code = dedent( """ def forward(self) -> str: a, b = "1", "2" return a + b """ ) method_checker = MethodChecker(set()) method_checker.visit(ast.parse(source_code)) assert method_checker.errors == []
smolagents/tests/test_tool_validation.py/0
{ "file_path": "smolagents/tests/test_tool_validation.py", "repo_id": "smolagents", "token_count": 2093 }
289
# Rust builder FROM lukemathwalker/cargo-chef:latest-rust-1.85.1 AS chef WORKDIR /usr/src ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse FROM chef AS planner COPY Cargo.lock Cargo.lock COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY backends backends COPY launcher launcher RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ python3.11-dev RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \ unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \ unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \ rm -f $PROTOC_ZIP COPY --from=planner /usr/src/recipe.json recipe.json RUN cargo chef cook --profile release-opt --recipe-path recipe.json ARG GIT_SHA ARG DOCKER_LABEL COPY Cargo.lock Cargo.lock COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY backends backends COPY launcher launcher RUN cargo build --profile release-opt --frozen FROM rocm/dev-ubuntu-22.04:6.3.1-complete AS base ARG HIPBLASLT_BRANCH="4d40e36" ARG HIPBLAS_COMMON_BRANCH="7c1566b" ARG LEGACY_HIPBLASLT_OPTION= ARG RCCL_BRANCH="648a58d" ARG RCCL_REPO="https://github.com/ROCm/rccl" ARG TRITON_BRANCH="e5be006" ARG TRITON_REPO="https://github.com/triton-lang/triton.git" ARG PYTORCH_BRANCH="3a585126" ARG PYTORCH_VISION_BRANCH="v0.19.1" ARG PYTORCH_REPO="https://github.com/pytorch/pytorch.git" ARG PYTORCH_VISION_REPO="https://github.com/pytorch/vision.git" ARG FA_BRANCH="b7d29fb" ARG FA_REPO="https://github.com/ROCm/flash-attention.git" ARG AITER_BRANCH="21d47a9" ARG AITER_REPO="https://github.com/ROCm/aiter.git" ENV PATH=/opt/rocm/llvm/bin:$PATH ENV ROCM_PATH=/opt/rocm ENV LD_LIBRARY_PATH=/opt/rocm/lib:/usr/local/lib: ARG PYTORCH_ROCM_ARCH=gfx90a;gfx942 ENV PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH} ARG PYTHON_VERSION=3.11 RUN mkdir -p /app WORKDIR /app ENV DEBIAN_FRONTEND=noninteractive # Install Python and other dependencies RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ build-essential \ ca-certificates \ ccache \ curl \ git \ ninja-build \ cmake \ software-properties-common \ python3.11-dev \ python3.11-venv && \ rm -rf /var/lib/apt/lists/* COPY --from=ghcr.io/astral-sh/uv:0.5.31 /uv /uvx /bin/ ENV PATH="$PATH:/root/.local/bin" RUN uv python install ${PYTHON_VERSION} RUN uv venv --python ${PYTHON_VERSION} && uv pip install pip setuptools packaging ENV VIRTUAL_ENV=/usr/src/.venv/ ENV PATH="$PATH:/usr/src/.venv/bin/" RUN . .venv/bin/activate && pip install -U packaging cmake ninja wheel setuptools pybind11 Cython FROM base AS build_hipblaslt ARG HIPBLASLT_BRANCH ARG HIPBLAS_COMMON_BRANCH # Set to "--legacy_hipblas_direct" for ROCm<=6.2 ARG LEGACY_HIPBLASLT_OPTION RUN git clone https://github.com/ROCm/hipBLAS-common.git RUN . .venv/bin/activate && cd hipBLAS-common \ && git checkout ${HIPBLAS_COMMON_BRANCH} \ && mkdir build \ && cd build \ && cmake .. \ && make package \ && dpkg -i ./*.deb RUN git clone https://github.com/ROCm/hipBLASLt RUN . .venv/bin/activate && cd hipBLASLt \ && git checkout ${HIPBLASLT_BRANCH} \ && ./install.sh -d --architecture ${PYTORCH_ROCM_ARCH} ${LEGACY_HIPBLASLT_OPTION} \ && cd build/release \ && make package RUN mkdir -p /app/install && cp /app/hipBLASLt/build/release/*.deb /app/hipBLAS-common/build/*.deb /app/install FROM base AS build_rccl ARG RCCL_BRANCH ARG RCCL_REPO RUN git clone ${RCCL_REPO} RUN . .venv/bin/activate && cd rccl \ && git checkout ${RCCL_BRANCH} \ && ./install.sh -p --amdgpu_targets ${PYTORCH_ROCM_ARCH} RUN mkdir -p /app/install && cp /app/rccl/build/release/*.deb /app/install FROM base AS build_triton ARG TRITON_BRANCH ARG TRITON_REPO RUN git clone ${TRITON_REPO} RUN . .venv/bin/activate && cd triton \ && git checkout ${TRITON_BRANCH} \ && cd python \ && python3 setup.py bdist_wheel --dist-dir=dist RUN mkdir -p /app/install && cp /app/triton/python/dist/*.whl /app/install FROM base AS build_amdsmi RUN . .venv/bin/activate && cd /opt/rocm/share/amd_smi \ && pip wheel . --wheel-dir=dist RUN mkdir -p /app/install && cp /opt/rocm/share/amd_smi/dist/*.whl /app/install FROM base AS build_pytorch ARG PYTORCH_BRANCH ARG PYTORCH_VISION_BRANCH ARG PYTORCH_REPO ARG PYTORCH_VISION_REPO ARG FA_BRANCH ARG FA_REPO RUN git clone ${PYTORCH_REPO} pytorch RUN . .venv/bin/activate && cd pytorch && git checkout ${PYTORCH_BRANCH} && \ pip install -r requirements.txt && git submodule update --init --recursive \ && python3 tools/amd_build/build_amd.py \ && CMAKE_PREFIX_PATH=$(python3 -c 'import sys; print(sys.prefix)') python3 setup.py bdist_wheel --dist-dir=dist \ && pip install dist/*.whl RUN git clone ${PYTORCH_VISION_REPO} vision RUN . .venv/bin/activate && cd vision && git checkout ${PYTORCH_VISION_BRANCH} \ && python3 setup.py bdist_wheel --dist-dir=dist \ && pip install dist/*.whl RUN git clone ${FA_REPO} RUN . .venv/bin/activate && cd flash-attention \ && git checkout ${FA_BRANCH} \ && git submodule update --init \ && MAX_JOBS=64 GPU_ARCHS=${PYTORCH_ROCM_ARCH} python3 setup.py bdist_wheel --dist-dir=dist RUN mkdir -p /app/install && cp /app/pytorch/dist/*.whl /app/install \ && cp /app/vision/dist/*.whl /app/install \ && cp /app/flash-attention/dist/*.whl /app/install FROM base AS final RUN --mount=type=bind,from=build_hipblaslt,src=/app/install/,target=/install \ dpkg -i /install/*deb \ && sed -i 's/, hipblaslt-dev \(.*\), hipcub-dev/, hipcub-dev/g' /var/lib/dpkg/status \ && sed -i 's/, hipblaslt \(.*\), hipfft/, hipfft/g' /var/lib/dpkg/status RUN --mount=type=bind,from=build_rccl,src=/app/install/,target=/install \ dpkg -i /install/*deb \ && sed -i 's/, rccl-dev \(.*\), rocalution/, rocalution/g' /var/lib/dpkg/status \ && sed -i 's/, rccl \(.*\), rocalution/, rocalution/g' /var/lib/dpkg/status RUN --mount=type=bind,from=build_triton,src=/app/install/,target=/install \ . .venv/bin/activate && \ pip install /install/*.whl RUN --mount=type=bind,from=build_amdsmi,src=/app/install/,target=/install \ . .venv/bin/activate && \ pip install /install/*.whl RUN --mount=type=bind,from=build_pytorch,src=/app/install/,target=/install \ . .venv/bin/activate && \ pip install /install/*.whl ARG AITER_REPO ARG AITER_BRANCH RUN git clone --recursive ${AITER_REPO} RUN . .venv/bin/activate && cd aiter \ && git checkout ${AITER_BRANCH} \ && git submodule update --init --recursive \ && pip install -r requirements.txt \ && PREBUILD_KERNELS=1 GPU_ARCHS=gfx942 python3 setup.py develop && pip show aiter RUN rm -rf /var/lib/apt/lists/* FROM final AS kernel-builder # # Build vllm kernels FROM kernel-builder AS vllm-builder COPY server/Makefile-vllm Makefile RUN . .venv/bin/activate && pip install setuptools_scm # Build specific version of vllm RUN . .venv/bin/activate && make build-vllm-rocm # Build Transformers CUDA kernels (gpt-neox and bloom) FROM kernel-builder AS custom-kernels-builder COPY server/custom_kernels/ . RUN . .venv/bin/activate && python3 setup.py bdist_wheel --dist-dir=dist # Build exllama kernels FROM kernel-builder AS exllama-kernels-builder COPY server/exllama_kernels/ . RUN . .venv/bin/activate && python3 setup.py bdist_wheel --dist-dir=dist # Build exllama v2 kernels FROM kernel-builder AS exllamav2-kernels-builder COPY server/exllamav2_kernels/ . RUN . .venv/bin/activate && python3 setup.py bdist_wheel --dist-dir=dist FROM kernel-builder AS marlin-kernels ENV MARLIN_KERNELS_BRANCH=v0.3.6 ENV VLLM_TARGET_DEVICE=rocm RUN . .venv/bin/activate && git clone https://github.com/danieldk/marlin-kernels.git && \ cd marlin-kernels && \ git checkout ${MARLIN_KERNELS_BRANCH} && \ python3 setup.py bdist_wheel --dist-dir=dist FROM kernel-builder AS moe-kernels ENV MOE_KERNELS_BRANCH=v0.8.2 ENV VLLM_TARGET_DEVICE=rocm RUN . .venv/bin/activate && git clone https://github.com/danieldk/moe-kernels.git && \ cd moe-kernels && \ git checkout ${MOE_KERNELS_BRANCH} && \ python3 setup.py bdist_wheel --dist-dir=dist FROM final AS base-copy # Text Generation Inference base env ENV HF_HOME=/data \ HF_HUB_ENABLE_HF_TRANSFER=1 \ PORT=80 ENV VIRTUAL_ENV=/app/.venv/ ENV PATH="$PATH:/app/.venv/bin/" # Install server COPY proto proto COPY server server COPY server/Makefile server/Makefile RUN cd server && \ uv pip install grpcio-tools mypy-protobuf && \ uv pip install -e ".[accelerate, compressed-tensors, peft, outlines]" --no-cache-dir && \ make gen-server-raw RUN cd server && \ pwd && \ text-generation-server --help RUN --mount=type=bind,from=vllm-builder,src=/app/vllm/dist,target=/install \ uv pip install /install/*.whl RUN --mount=type=bind,from=custom-kernels-builder,src=/app/dist,target=/install \ uv pip install /install/*.whl RUN --mount=type=bind,from=custom-kernels-builder,src=/app/dist,target=/install \ uv pip install /install/*.whl RUN --mount=type=bind,from=exllama-kernels-builder,src=/app/dist,target=/install \ uv pip install /install/*.whl RUN --mount=type=bind,from=exllamav2-kernels-builder,src=/app/dist,target=/install \ uv pip install /install/*.whl RUN --mount=type=bind,from=marlin-kernels,src=/app/marlin-kernels/dist,target=/install \ uv pip install /install/*.whl RUN --mount=type=bind,from=moe-kernels,src=/app/moe-kernels/dist,target=/install \ uv pip install /install/*.whl # Install benchmarker COPY --from=builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark # Install router COPY --from=builder /usr/src/target/release-opt/text-generation-router /usr/local/bin/text-generation-router # Install launcher COPY --from=builder /usr/src/target/release-opt/text-generation-launcher /usr/local/bin/text-generation-launcher # AWS Sagemaker compatible image FROM base AS sagemaker COPY sagemaker-entrypoint.sh entrypoint.sh RUN chmod +x entrypoint.sh ENTRYPOINT ["./entrypoint.sh"] # Final image FROM base-copy # Set AS recommended: https://github.com/ROCm/triton/wiki/A-script-to-set-program-execution-environment-in-ROCm ENV HIP_FORCE_DEV_KERNARG=1 # On MI250 and MI300, performances for flash with Triton FA are slightly better than CK. # However, Triton requires a tunning for each prompt length, which is prohibitive. ENV ROCM_USE_FLASH_ATTN_V2_TRITON=0 ENV ROCM_USE_CUSTOM_PAGED_ATTN=1 ENV PYTORCH_TUNABLEOP_TUNING_AFTER_WARMUP=0 ENV VLLM_MOE_PADDING=0 ENV ATTENTION=paged ENV PREFIX_CACHING=0 ENV PREFILL_CHUNKING=0 ENV ROCM_USE_SKINNY_GEMM=1 COPY ./tgi-entrypoint.sh /tgi-entrypoint.sh RUN chmod +x /tgi-entrypoint.sh ENTRYPOINT ["/tgi-entrypoint.sh"] ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/root/.local/share/uv/python/cpython-3.11.11-linux-x86_64-gnu/lib" ENV PYTHONPATH=/app/.venv/lib/python3.11/site-packages # CMD ["--json-output"]
text-generation-inference/Dockerfile_amd/0
{ "file_path": "text-generation-inference/Dockerfile_amd", "repo_id": "text-generation-inference", "token_count": 4709 }
290
#[allow(clippy::derive_partial_eq_without_eq)] mod pb; mod client; mod sharded_client; pub use client::Client; pub use pb::generate::v2::HealthResponse; pub use pb::generate::v2::{ Batch, CachedBatch, FinishReason, GeneratedText, Generation, GrammarType, InfoResponse, NextTokenChooserParameters, Request, StoppingCriteriaParameters, Tokens, }; pub use sharded_client::ShardedClient;
text-generation-inference/backends/client/src/v2/mod.rs/0
{ "file_path": "text-generation-inference/backends/client/src/v2/mod.rs", "repo_id": "text-generation-inference", "token_count": 134 }
291
commit_cuda := d243e9dc7e2c9c2e36a4150ec8e64809cb55c01b commit_rocm := 4e0929e6e4fa0a3d09d358715c288020ea9dc247 build-vllm-cuda: if [ ! -d 'vllm' ]; then \ pip install -U ninja packaging --no-cache-dir && \ git clone https://github.com/Narsil/vllm.git vllm; \ fi cd vllm && git fetch origin && git checkout $(commit_cuda) && python setup.py build install-vllm-cuda: build-vllm-cuda cd vllm && git fetch origin && git checkout $(commit_cuda) && pip install -e . build-vllm-rocm: if [ ! -d 'vllm' ]; then \ pip install -U ninja packaging --no-cache-dir && \ git clone https://github.com/mht-sharma/vllm.git vllm; \ fi cd vllm && git fetch && git checkout $(commit_rocm) && \ PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py build install-vllm-rocm: build-vllm-rocm cd vllm && git fetch && git checkout $(commit_rocm) && \ PYTORCH_ROCM_ARCH="gfx90a;gfx942" pip install -e .
text-generation-inference/backends/gaudi/server/Makefile-vllm/0
{ "file_path": "text-generation-inference/backends/gaudi/server/Makefile-vllm", "repo_id": "text-generation-inference", "token_count": 397 }
292
from .common import ( Seqlen, HPUPagedAttentionMetadata, trim_attn_metadata, trim_seqlen_metadata, _async_h2d_tensor_copy, ) from .hpu import ( SUPPORTS_WINDOWING, attention, paged_attention, paged_attention_mla, set_block_mapping, ) # KVCache needs `reshape_and_cache`, so ensure that it is defined already. from .kv_cache import KVCache, get_kv_scales, KVCompressCache __all__ = [ "attention", "get_kv_scales", "paged_attention", "paged_attention_mla", "set_block_mapping", "SUPPORTS_WINDOWING", "KVCache", "KVCompressCache", "Seqlen", "HPUPagedAttentionMetadata", "trim_seqlen_metadata", "trim_attn_metadata", "_async_h2d_tensor_copy", ]
text-generation-inference/backends/gaudi/server/text_generation_server/layers/attention/__init__.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/layers/attention/__init__.py", "repo_id": "text-generation-inference", "token_count": 344 }
293
# coding=utf-8 # Copyright 2022 HuggingFace Inc. team and BigScience workshop. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BLOOM model.""" import math import os import warnings from typing import Optional, Tuple, Union import torch import torch.distributed import torch.utils.checkpoint from torch import nn from torch.nn import LayerNorm from torch.nn import functional as F from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, ) from transformers import BloomConfig, PreTrainedModel from text_generation_server.layers import ( TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, SpeculativeHead, ) CUSTOM_KERNELS_ENABLED = False if ( torch.cuda.is_available() and not os.environ.get("DISABLE_CUSTOM_KERNELS", "False") == "True" ): try: from custom_kernels import fused_bloom_attention_cuda CUSTOM_KERNELS_ENABLED = True except ImportError: pass _CHECKPOINT_FOR_DOC = "bigscience/bloom-560m" _CONFIG_FOR_DOC = "BloomConfig" BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST = [ "bigscience/bigscience-small-testing", "bigscience/bloom-560m", "bigscience/bloom-1b1", "bigscience/bloom-1b7", "bigscience/bloom-3b", "bigscience/bloom-7b1", "bigscience/bloom", ] def _make_causal_mask( input_ids_shape: torch.Size, device: torch.device, past_key_values_length: int ) -> torch.BoolTensor: """ Make causal mask used for self-attention. """ batch_size, target_length = input_ids_shape mask = torch.ones( (target_length, target_length + past_key_values_length), dtype=torch.bool, device=device, ) mask = mask.triu(1 + past_key_values_length) expanded_mask = mask.unsqueeze(0).expand( batch_size, target_length, target_length + past_key_values_length ) return expanded_mask def _expand_mask(mask: torch.Tensor, tgt_length: int) -> torch.BoolTensor: """ Expands attention_mask from `[batch_size, src_length]` to `[batch_size, 1, tgt_length, src_length]`. """ batch_size, src_length = mask.shape tgt_length = tgt_length if tgt_length is not None else src_length expanded_mask = ~(mask[:, None, :].to(torch.bool)) return expanded_mask.expand(batch_size, tgt_length, src_length) def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int) -> torch.Tensor: """ Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value `softmax(l+a) = softmax(l)`. Based on https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742 TODO @thomasw21 this doesn't work as nicely due to the masking strategy, and so masking varies slightly. Args: Returns tensor shaped (batch_size * num_heads, 1, max_seq_len) attention_mask (`torch.Tensor`): Token-wise attention mask, this should be of shape (batch_size, max_seq_len). num_heads (`int`, *required*): number of heads dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`): dtype of the output tensor """ batch_size, seq_length = attention_mask.shape closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = torch.tensor( 2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32, ) powers = torch.arange( 1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32 ) slopes = torch.pow(base, powers) if closest_power_of_2 != num_heads: extra_base = torch.tensor( 2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32, ) num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) extra_powers = torch.arange( 1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32, ) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) # Note: alibi will added to the attention bias that will be applied to the query, key product of attention # => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length) # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length) # => the query_length dimension will then be broadcasted correctly # This is more or less identical to T5's relative position bias: # https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527 arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :] alibi = slopes[..., None] * arange_tensor return alibi # @torch.jit.script def dropout_add( x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool ) -> torch.Tensor: """ Dropout add function Args: x (`torch.tensor`, *required*): input tensor residual (`torch.tensor`, *required*): esidual tensor prob (`float`, *required*): dropout probability training (`bool`, *required*): training mode """ out = F.dropout(x, p=prob, training=training) out = residual + out return out # @torch.jit.script # this is shit for unknow reasons. def _split_heads( fused_qkv: torch.Tensor, num_heads: int, head_dim: int ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory storage as `fused_qkv` Args: fused_qkv (`torch.tensor`, *required*): [batch_size, seq_length, num_heads * 3 * head_dim] Returns: query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim] value: [batch_size, seq_length, num_heads, head_dim] """ batch_size, seq_length, three_times_hidden_size = fused_qkv.shape fused_qkv = fused_qkv.view(batch_size, seq_length, num_heads, 3 * head_dim) query_layer, key_layer, value_layer = fused_qkv.split(head_dim, dim=-1) query_layer = query_layer.transpose(1, 2).reshape( batch_size * num_heads, seq_length, head_dim ) key_layer = key_layer.permute(0, 2, 3, 1).reshape( batch_size * num_heads, head_dim, seq_length ) value_layer = value_layer.transpose(1, 2).reshape( batch_size * num_heads, seq_length, head_dim ) return query_layer, key_layer, value_layer # @torch.jit.script def _merge_heads(x: torch.Tensor, num_heads: int, head_dim: int) -> torch.Tensor: """ Merge heads together over the last dimenstion Args: x: (`torch.tensor`, *required*): [batch_size * num_heads, seq_length, head_dim] Returns: torch.tensor: [batch_size, seq_length, num_heads * head_dim] """ # What we want to achieve is: # batch_size * num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads * head_dim batch_size_and_num_heads, seq_length, _ = x.shape batch_size = batch_size_and_num_heads // num_heads # First view to decompose the batch size # batch_size * num_heads, seq_length, head_dim -> batch_size, num_heads, seq_length, head_dim x = x.view(batch_size, num_heads, seq_length, head_dim) # batch_size, num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads, head_dim x = x.permute(0, 2, 1, 3) # batch_size, seq_length, num_heads, head_dim -> batch_size, seq_length, num_heads * head_dim return x.reshape(batch_size, seq_length, num_heads * head_dim) class BloomAttention(nn.Module): def __init__(self, prefix, config: BloomConfig, weights): super().__init__() self.pretraining_tp = config.pretraining_tp self.slow_but_exact = config.slow_but_exact self.process_group = weights.process_group self.hidden_size = config.hidden_size self.num_heads = config.n_head self.head_dim = self.hidden_size // self.num_heads self.split_size = self.hidden_size self.hidden_dropout = config.hidden_dropout if self.head_dim * self.num_heads != self.hidden_size: raise ValueError( f"`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:" f" {self.num_heads})." ) # Layer-wise attention scaling self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim) self.beta = 1.0 process_group = weights.process_group if self.num_heads % process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {process_group.size()}" ) self.num_heads = self.num_heads // process_group.size() self.query_key_value = TensorParallelColumnLinear.load( config=config, prefix=f"{prefix}.query_key_value", weights=weights, bias=True, ) self.dense = TensorParallelRowLinear.load( config=config, prefix=f"{prefix}.dense", weights=weights, bias=True ) self.attention_dropout = nn.Dropout(config.attention_dropout) @staticmethod def compute_attention( fused_qkv: torch.Tensor, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]], alibi: torch.Tensor, attention_mask: torch.Tensor, head_mask: Optional[torch.Tensor], beta: float, inv_norm_factor: float, num_heads: int, use_cache: bool, ): batch_size, q_length, three_times_hidden_size = fused_qkv.shape head_dim = three_times_hidden_size // (3 * num_heads) batch_size * num_heads ### TODO @thomasw21: this takes quite a bit of time, how do I accelerate that? # 3 x [batch_size, seq_length, num_heads, head_dim] (query_layer, key_layer, value_layer) = _split_heads( fused_qkv, num_heads=num_heads, head_dim=head_dim ) if layer_past is not None: past_key, past_value = layer_past # concatenate along seq_length dimension: # - key: [batch_size * self.num_heads, head_dim, kv_length] # - value: [batch_size * self.num_heads, kv_length, head_dim] past_key = past_key.view(-1, *past_key.shape[-2:]) key_layer = torch.cat((past_key, key_layer), dim=2) past_value = past_value.view(-1, *past_value.shape[-2:]) value_layer = torch.cat((past_value, value_layer), dim=1) _, _, kv_length = key_layer.shape if use_cache is True: present = (key_layer, value_layer) else: present = None ### # [batch_size * num_heads, q_length, kv_length] # we use `torch.Tensor.baddbmm` instead of `torch.baddbmm` as the latter isn't supported by TorchScript v1.11 attention_scores = alibi.baddbmm( batch1=query_layer, batch2=key_layer, beta=beta, alpha=inv_norm_factor, ) # cast attention scores to fp32, compute scaled softmax and cast back to initial dtype - [batch_size, num_heads, q_length, kv_length] input_dtype = attention_scores.dtype # `float16` has a minimum value of -65504.0, whereas `bfloat16` and `float32` have a minimum value of `-3.4e+38` if input_dtype == torch.float16: attention_scores = attention_scores.to(torch.float) # torch.finfo not supported by torch.jit, we temporarily remplace with `-1e34` attn_weights = attention_scores.masked_fill_( attention_mask, torch.finfo(attention_scores.dtype).min ) attention_probs = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to( input_dtype ) # # [batch_size, num_heads, q_length, kv_length] # attention_probs = self.attention_dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask # matmul: [batch_size * num_heads, q_length, head_dim] context_layer = torch.bmm(attention_probs, value_layer, out=query_layer) # change view [batch_size, num_heads, q_length, head_dim] context_layer = _merge_heads( context_layer, num_heads=num_heads, head_dim=head_dim ) return context_layer, present, attention_probs def forward( self, hidden_states: torch.Tensor, residual: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, ): fused_qkv = self.query_key_value( hidden_states ) # [batch_size, seq_length, 3 x hidden_size] batch_size, q_length, _ = fused_qkv.shape if layer_past is not None: past_key, past_value = layer_past layer_past = ( past_key.view(-1, *past_key.shape[-2:]), past_value.view(-1, *past_value.shape[-2:]), ) if CUSTOM_KERNELS_ENABLED and attention_mask.shape[-1] < 4096: assert self.training is False, "Only foward pass was implemented" assert ( attention_mask.shape[-1] < 4096 ), "Custom kernel support only up to 4096 tokens" ( context_layer, present, attention_probs, ) = fused_bloom_attention_cuda.forward( fused_qkv, layer_past, alibi, attention_mask, head_mask, self.beta, self.inv_norm_factor, self.num_heads, use_cache, ) else: context_layer, present, attention_probs = self.compute_attention( fused_qkv=fused_qkv, layer_past=layer_past, alibi=alibi, attention_mask=attention_mask, head_mask=head_mask, beta=self.beta, inv_norm_factor=self.inv_norm_factor, num_heads=self.num_heads, use_cache=use_cache, ) # aggregate results across tp ranks. See here: https://github.com/pytorch/pytorch/issues/76232 if self.pretraining_tp > 1 and self.slow_but_exact: slices = self.hidden_size / self.pretraining_tp output_tensor = torch.zeros_like(context_layer) for i in range(self.pretraining_tp): output_tensor = output_tensor + F.linear( context_layer[:, :, int(i * slices) : int((i + 1) * slices)], self.dense.weight[:, int(i * slices) : int((i + 1) * slices)], ) else: output_tensor = self.dense(context_layer) # output_tensor = dropout_add(output_tensor, residual, self.hidden_dropout, self.training) output_tensor += residual outputs = (output_tensor, present) if output_attentions: outputs += (attention_probs,) return outputs class BloomMLP(nn.Module): def __init__(self, prefix, config: BloomConfig, weights): super().__init__() self.pretraining_tp = config.pretraining_tp self.slow_but_exact = config.slow_but_exact self.dense_h_to_4h = TensorParallelColumnLinear.load( config=config, prefix=f"{prefix}.dense_h_to_4h", weights=weights, bias=True ) self.dense_4h_to_h = TensorParallelRowLinear.load( config=config, prefix=f"{prefix}.dense_4h_to_h", weights=weights, bias=True ) self.gelu_impl = torch.nn.GELU(approximate="tanh") self.hidden_dropout = config.hidden_dropout def forward( self, hidden_states: torch.Tensor, residual: torch.Tensor ) -> torch.Tensor: hidden_states = self.gelu_impl(self.dense_h_to_4h(hidden_states)) if self.pretraining_tp > 1 and self.slow_but_exact: intermediate_output = torch.zeros_like(residual) slices = self.dense_4h_to_h.weight.shape[-1] / self.pretraining_tp for i in range(self.pretraining_tp): intermediate_output = intermediate_output + F.linear( hidden_states[:, :, int(i * slices) : int((i + 1) * slices)], self.dense_4h_to_h.weight[ :, int(i * slices) : int((i + 1) * slices) ], ) else: intermediate_output = self.dense_4h_to_h(hidden_states) # output = dropout_add(intermediate_output, residual, self.hidden_dropout, self.training) intermediate_output += residual return intermediate_output class BloomBlock(nn.Module): def __init__(self, layer_id: int, config: BloomConfig, weights): super().__init__() prefix = f"h.{layer_id}" self.input_layernorm = LayerNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.layer_norm_epsilon, ) self.num_heads = config.n_head self.self_attention = BloomAttention( prefix=f"{prefix}.self_attention", config=config, weights=weights ) self.post_attention_layernorm = LayerNorm.load( prefix=f"{prefix}.post_attention_layernorm", weights=weights, eps=config.layer_norm_epsilon, ) self.mlp = BloomMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) self.apply_residual_connection_post_layernorm = ( config.apply_residual_connection_post_layernorm ) self.hidden_dropout = config.hidden_dropout def forward( self, hidden_states: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, ): # hidden_states: [batch_size, seq_length, hidden_size] # Layer norm at the beginning of the transformer layer. layernorm_output = self.input_layernorm(hidden_states) # Layer norm post the self attention. if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states # Self attention. attn_outputs = self.self_attention( layernorm_output, residual, layer_past=layer_past, attention_mask=attention_mask, alibi=alibi, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attention_output = attn_outputs[0] outputs = attn_outputs[1:] layernorm_output = self.post_attention_layernorm(attention_output) # Get residual if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = attention_output # MLP. output = self.mlp(layernorm_output, residual) if use_cache: outputs = (output,) + outputs else: outputs = (output,) + outputs[1:] return outputs # hidden_states, present, attentions class BloomPreTrainedModel(PreTrainedModel): config_class = BloomConfig base_model_prefix = "transformer" _no_split_modules = ["BloomBlock"] @staticmethod def _convert_to_standard_cache( past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]], batch_size: int ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]: """ Standardizes the format of the cache so as to match most implementations, i.e. to tuple(tuple([batch_size, num_heads, ...])) """ batch_size_times_num_heads, head_dim, seq_length = past_key_value[0][0].shape num_heads = batch_size_times_num_heads // batch_size # key: [batch_size * num_heads, head_dim, seq_length] -> [batch_size, num_heads, head_dim, seq_length] # value: [batch_size * num_heads, seq_length, head_dim] -> [batch_size, num_heads, seq_length, head_dim] return tuple( ( layer_past[0].view(batch_size, num_heads, head_dim, seq_length), layer_past[1].view(batch_size, num_heads, seq_length, head_dim), ) for layer_past in past_key_value ) @staticmethod def _convert_to_bloom_cache( past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]], ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]: """ Converts the cache to the format expected by Bloom, i.e. to tuple(tuple([batch_size * num_heads, ...])) """ batch_size, num_heads, head_dim, seq_length = past_key_value[0][0].shape batch_size_times_num_heads = batch_size * num_heads # key: [batch_size, num_heads, head_dim, seq_length] -> [batch_size * num_heads, head_dim, seq_length] # value: [batch_size, num_heads, seq_length, head_dim] -> [batch_size * num_heads, seq_length, head_dim] return tuple( ( layer_past[0].view(batch_size_times_num_heads, head_dim, seq_length), layer_past[1].view(batch_size_times_num_heads, seq_length, head_dim), ) for layer_past in past_key_value ) class BloomModel(BloomPreTrainedModel): def __init__(self, config: BloomConfig, weights): super().__init__(config) self.embed_dim = config.hidden_size self.num_heads = config.n_head process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.word_embeddings = TensorParallelEmbedding( prefix="word_embeddings", weights=weights ) self.word_embeddings_layernorm = LayerNorm.load( prefix="word_embeddings_layernorm", weights=weights, eps=config.layer_norm_epsilon, ) # Transformer blocks self.h = nn.ModuleList( [ BloomBlock(layer_id=layer_id, config=config, weights=weights) for layer_id in range(config.num_hidden_layers) ] ) # Final Layer Norm self.ln_f = LayerNorm.load( prefix="ln_f", weights=weights, eps=config.layer_norm_epsilon ) def _prepare_attn_mask( self, attention_mask: torch.Tensor, input_shape: Tuple[int, int], past_key_values_length: int, ) -> torch.BoolTensor: # create causal mask # [batch_size, seq_length] -> [batch_size, tgt_length, src_length] combined_attention_mask = None device = attention_mask.device _, src_length = input_shape if src_length > 1: combined_attention_mask = _make_causal_mask( input_shape, device=device, past_key_values_length=past_key_values_length, ) # [batch_size, seq_length] -> [batch_size, tgt_length, src_length] expanded_attn_mask = _expand_mask(attention_mask, tgt_length=src_length) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask ) return combined_attention_mask def set_input_embeddings(self, new_embeddings: torch.Tensor): self.word_embeddings = new_embeddings def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **deprecated_arguments, ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]: if deprecated_arguments.pop("position_ids", False) is not False: # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None` warnings.warn( "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore" " passing `position_ids`.", FutureWarning, ) if len(deprecated_arguments) > 0: raise ValueError(f"Got unexpected arguments: {deprecated_arguments}") output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if input_ids is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") if past_key_values is None: past_key_values = tuple([None] * len(self.h)) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape batch_size x num_heads x N x N # head_mask has shape n_layer x batch x num_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) hidden_states = self.word_embeddings_layernorm(inputs_embeds) presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None # Compute alibi tensor: check build_alibi_tensor documentation seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values[0] is not None: past_key_values_length = past_key_values[0][0].shape[-1] seq_length_with_past = seq_length_with_past + past_key_values_length if attention_mask is None: attention_mask = torch.ones( (batch_size, seq_length_with_past), device=hidden_states.device ) else: attention_mask = attention_mask.to(hidden_states.device) alibi = build_alibi_tensor(attention_mask, self.num_heads) causal_mask = self._prepare_attn_mask( attention_mask, input_shape=(batch_size, seq_length), past_key_values_length=past_key_values_length, ) if hasattr(self, "tp_rank"): assert self.num_heads % self.tp_world_size == 0 block_size = self.num_heads // self.tp_world_size alibi = alibi[ :, self.tp_rank * block_size : (self.tp_rank + 1) * block_size ] alibi = alibi.reshape(batch_size * block_size, 1, seq_length_with_past) causal_mask = torch.repeat_interleave(causal_mask, block_size, dim=0) else: alibi = alibi.reshape(batch_size * self.num_heads, 1, seq_length_with_past) causal_mask = torch.repeat_interleave(causal_mask, self.num_heads, dim=0) alibi = alibi.to(hidden_states.dtype) for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block( hidden_states, layer_past=layer_past, attention_mask=causal_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, alibi=alibi, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + ( outputs[2 if use_cache else 1], ) # Add last hidden state hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, presents, all_hidden_states, all_self_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class BloomForCausalLM(BloomPreTrainedModel): def __init__(self, prefix: str, config, weights): super().__init__(config) self.transformer = BloomModel(config, weights) self.lm_head = SpeculativeHead.load( config, prefix="word_embeddings", weights=weights, ) def prepare_inputs_for_generation( self, input_ids: torch.LongTensor, past_key_values: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, **kwargs, ) -> dict: # only last token for input_ids if past is not None if past_key_values: input_ids = input_ids[:, -1].unsqueeze(-1) # the cache may be in the stardard format (e.g. in contrastive search), convert to bloom's format if needed if past_key_values[0][0].shape[0] == input_ids.shape[0]: past_key_values = self._convert_to_bloom_cache(past_key_values) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **deprecated_arguments, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ if deprecated_arguments.pop("position_ids", False) is not False: # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None` warnings.warn( "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore" " passing `position_ids`.", FutureWarning, ) if len(deprecated_arguments) > 0: raise ValueError(f"Got unexpected arguments: {deprecated_arguments}") return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits, speculative_logits = self.lm_head(hidden_states) loss = None if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return ( CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ), speculative_logits, )
text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/bloom_modeling.py", "repo_id": "text-generation-inference", "token_count": 16226 }
294
from typing import Optional, Tuple import warnings import math import torch from torch import nn from transformers.activations import ACT2FN from transformers.modeling_outputs import ( BaseModelOutputWithPooling, ) from transformers import SiglipConfig, SiglipVisionConfig from torch.nn.init import _calculate_fan_in_and_fan_out from text_generation_server.layers.tensor_parallel import ( TensorParallelEmbedding, TensorParallelColumnLinear, TensorParallelRowLinear, ) class SiglipVisionEmbeddings(nn.Module): def __init__(self, prefix, config: SiglipVisionConfig, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, padding="valid", ) self.patch_embedding.weight = nn.Parameter( weights.get_tensor(f"{prefix}.patch_embedding.weight"), requires_grad=False ) self.patch_embedding.bias = nn.Parameter( weights.get_tensor(f"{prefix}.patch_embedding.bias"), requires_grad=False ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches self.position_embedding = TensorParallelEmbedding( prefix=f"{prefix}.position_embedding", weights=weights ) self.register_buffer( "position_ids", torch.arange(self.num_positions, device=weights.device).expand((1, -1)), persistent=False, ) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: patch_embeds = self.patch_embedding( pixel_values ) # shape = [*, width, grid, grid] embeddings = patch_embeds.flatten(2).transpose(1, 2) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class SiglipAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, prefix, config, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.head_size = self.head_dim if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.num_heads = self.num_heads // weights.process_group.size() self.embed_dim = self.embed_dim // weights.process_group.size() self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.k_proj", weights=weights, bias=True ) self.v_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.v_proj", weights=weights, bias=True ) self.q_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.q_proj", weights=weights, bias=True ) self.out_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.out_proj", weights=weights, bias=True ) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return ( tensor.view(bsz, seq_len, self.num_heads, self.head_dim) .transpose(1, 2) .contiguous() ) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) # scale post matmul attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) * self.scale if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = ( attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) # upcast attention to fp32 attn_weights = nn.functional.softmax( attn_weights, dim=-1, dtype=torch.float32 ).to(attn_weights.dtype) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_size): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_size)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_size) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class SiglipMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = TensorParallelColumnLinear.load( # config.hidden_size, config.intermediate_size prefix=f"{prefix}.fc1", config=config, weights=weights, bias=True ) self.fc2 = TensorParallelRowLinear.load( # config.intermediate_size, config.hidden_size prefix=f"{prefix}.fc2", config=config, weights=weights, bias=True ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class SiglipEncoderLayer(nn.Module): def __init__(self, prefix, config: SiglipConfig, weights): super().__init__() self.embed_dim = config.hidden_size self.self_attn = SiglipAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights ) self.layer_norm1 = nn.LayerNorm.load( prefix=f"{prefix}.layer_norm1", weights=weights, eps=config.layer_norm_eps ) self.mlp = SiglipMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) self.layer_norm2 = nn.LayerNorm.load( prefix=f"{prefix}.layer_norm2", weights=weights, eps=config.layer_norm_eps ) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, ) -> Tuple[torch.FloatTensor]: residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states, None class SiglipMultiheadAttentionPoolingHead(nn.Module): """Multihead Attention Pooling.""" def __init__(self, prefix, config: SiglipVisionConfig, weights): super().__init__() self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size)) self.attention = torch.nn.MultiheadAttention( config.hidden_size, config.num_attention_heads, batch_first=True ) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = SiglipMLP(prefix, config, weights) def forward(self, hidden_state): batch_size = hidden_state.shape[0] probe = self.probe.repeat(batch_size, 1, 1) hidden_state = self.attention(probe, hidden_state, hidden_state)[0] residual = hidden_state hidden_state = self.layernorm(hidden_state) hidden_state = residual + self.mlp(hidden_state) return hidden_state[:, 0] def _trunc_normal_(tensor, mean, std, a, b): # Cut & paste from PyTorch official master until it's in a few official releases - RW # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf def norm_cdf(x): # Computes standard normal cumulative distribution function return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 if (mean < a - 2 * std) or (mean > b + 2 * std): warnings.warn( "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " "The distribution of values may be incorrect.", stacklevel=2, ) # Values are generated by using a truncated uniform distribution and # then using the inverse CDF for the normal distribution. # Get upper and lower cdf values lower = norm_cdf((a - mean) / std) upper = norm_cdf((b - mean) / std) # Uniformly fill tensor with values from [l, u], then translate to # [2l-1, 2u-1]. tensor.uniform_(2 * lower - 1, 2 * upper - 1) # Use inverse cdf transform for normal distribution to get truncated # standard normal tensor.erfinv_() # Transform to proper mean, std tensor.mul_(std * math.sqrt(2.0)) tensor.add_(mean) # Clamp to ensure it's in the proper range tensor.clamp_(min=a, max=b) def trunc_normal_tf_( tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0, ) -> torch.Tensor: """Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \\leq \text{mean} \\leq b`. NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0 and the result is subsquently scaled and shifted by the mean and std args. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value """ with torch.no_grad(): _trunc_normal_(tensor, 0, 1.0, a, b) tensor.mul_(std).add_(mean) def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"): fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) if mode == "fan_in": denom = fan_in elif mode == "fan_out": denom = fan_out elif mode == "fan_avg": denom = (fan_in + fan_out) / 2 variance = scale / denom if distribution == "truncated_normal": # constant is stddev of standard normal truncated to (-2, 2) trunc_normal_tf_(tensor, std=math.sqrt(variance) / 0.87962566103423978) elif distribution == "normal": with torch.no_grad(): tensor.normal_(std=math.sqrt(variance)) elif distribution == "uniform": bound = math.sqrt(3 * variance) with torch.no_grad(): tensor.uniform_(-bound, bound) else: raise ValueError(f"invalid distribution {distribution}") def lecun_normal_(tensor): variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal") def default_flax_embed_init(tensor): variance_scaling_(tensor, mode="fan_in", distribution="normal") class SiglipEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`SiglipEncoderLayer`]. Args: config: SiglipConfig """ def __init__(self, prefix, config: SiglipConfig, weights): super().__init__() self.config = config self.layers = nn.ModuleList( [ SiglipEncoderLayer( prefix=f"{prefix}.layers.{i}", config=config, weights=weights ) for i in range(config.num_hidden_layers) ] ) def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, ): hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): hidden_states, _ = encoder_layer( hidden_states, attention_mask, ) return hidden_states class SiglipVisionTransformer(nn.Module): def __init__(self, prefix, config: SiglipVisionConfig, weights): super().__init__() self.config = config self.embeddings = SiglipVisionEmbeddings( prefix=f"{prefix}.embeddings", config=config, weights=weights ) self.encoder = SiglipEncoder( prefix=f"{prefix}.encoder", config=config, weights=weights ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, ): if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values) # NOTE: up until this point, the code logits are exactly # the same as the transformers code. The values evaulate # slightly differently in our encoder layer. encoder_outputs = self.encoder( inputs_embeds=hidden_states, ) last_hidden_state = encoder_outputs return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, # pooler_output=pooled_output, # hidden_states=encoder_outputs, )
text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/siglip.py", "repo_id": "text-generation-inference", "token_count": 6676 }
295
# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company. import os import glob import time import habana_frameworks.torch as htorch import numpy as np START_TS = None DBG_TRACE_FILENAME = os.environ.get("DBG_TRACE_FILENAME") if "GRAPH_VISUALIZATION" in os.environ: for f in glob.glob(".graph_dumps/*"): os.remove(f) def to_gb_rounded(mem: float) -> float: """ Rounds and converts to GB. Args: mem (float): memory in bytes Returns: float: memory in GB rounded to the second decimal """ return np.round(mem / 1024**3, 2) def count_hpu_graphs(): return len(glob.glob(".graph_dumps/*PreGraph*")) def dbg_trace(tag, txt): global START_TS if DBG_TRACE_FILENAME is not None and int(os.getenv("RANK", 0)) == 0: if START_TS is None: START_TS = time.perf_counter() time_offset = time.perf_counter() - START_TS mem_stats = htorch.hpu.memory.memory_stats() mem_used = to_gb_rounded(mem_stats["InUse"]) max_mem_used = to_gb_rounded(mem_stats["MaxInUse"]) print( f"ts:{time_offset:.3f}s g:{count_hpu_graphs()} mu:{mem_used:.1f}GB " f"mmu:{max_mem_used:.1f}GB | {tag} | {txt}", flush=True, file=open(DBG_TRACE_FILENAME, "a"), )
text-generation-inference/backends/gaudi/server/text_generation_server/utils/debug.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/utils/debug.py", "repo_id": "text-generation-inference", "token_count": 613 }
296
from packaging.version import Version from packaging import version import subprocess def get_driver_version(): """ Returns the driver version. """ # Enable console printing for `hl-smi` check output = subprocess.run( "hl-smi", shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env={"ENABLE_CONSOLE": "true"}, ) if output.returncode == 0 and output.stdout: return version.parse( output.stdout.split("\n")[2] .replace(" ", "") .split(":")[1][:-1] .split("-")[0] ) return None MIN_TGI_GAUDI_SYNAPSE_VERSION = Version("1.19.0") def is_driver_compatible(): driver_version = get_driver_version() if driver_version is not None: if driver_version < MIN_TGI_GAUDI_SYNAPSE_VERSION: return False return True
text-generation-inference/backends/gaudi/server/text_generation_server/utils/version.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/utils/version.py", "repo_id": "text-generation-inference", "token_count": 412 }
297
# Text-generation-inference - Neuron backend for AWS Trainium and inferentia2 ## Description This is the TGI backend for AWS Neuron Trainium and Inferentia family of chips. This backend is composed of: - the AWS Neuron SDK, - the legacy v2 TGI launcher and router, - a neuron specific inference server for text-generation. ## Usage Please refer to the official [documentation](https://huggingface.co/docs/text-generation-inference/backends/neuron). ## Build your own image The simplest way to build TGI with the neuron backend is to use the provided `Makefile`: ```shell $ make -C backends/neuron image ``` Alternatively, you can build the image directly from the top directory using a command similar to the one defined in the `Makefile` under the `image` target.
text-generation-inference/backends/neuron/README.md/0
{ "file_path": "text-generation-inference/backends/neuron/README.md", "repo_id": "text-generation-inference", "token_count": 211 }
298
from text_generation_server.generator import NeuronGenerator from text_generation_server.pb.generate_pb2 import ( Batch, NextTokenChooserParameters, Request, StoppingCriteriaParameters, ) def create_request( id: int, inputs: str, truncate: int = 0, max_new_tokens: int = 20, do_sample: bool = False, top_k: int = 50, top_p: float = 0.9, temperature: float = 1.0, seed: int = 42, repetition_penalty: float = 1.0, ): parameters = NextTokenChooserParameters( temperature=temperature, top_k=top_k, top_p=top_p, do_sample=do_sample, seed=seed, repetition_penalty=repetition_penalty, ) stopping_parameters = StoppingCriteriaParameters(max_new_tokens=max_new_tokens) return Request( id=id, inputs=inputs, truncate=truncate, parameters=parameters, stopping_parameters=stopping_parameters, ) def check_prefill( input_text, expected_token_id, expected_token_text, do_sample, batch_size, model_path, ): """Verify that a prefill for a single request generates the expected output.""" generator = NeuronGenerator.from_pretrained(model_path) assert generator.model.batch_size >= batch_size requests = [] max_new_tokens = 20 for i in range(batch_size): requests.append( create_request( id=0, inputs=input_text, do_sample=do_sample, max_new_tokens=max_new_tokens, ) ) # Let's be pessimistic when estimating max_tokens batch_size * (len(input_text) + max_new_tokens) max_length = generator.model.max_length batch = Batch( id=0, requests=requests, size=batch_size, max_tokens=batch_size * max_length ) generations, next_batch = generator.prefill(batch) assert next_batch.size == batch_size # Whatever was passed as max_tokens, the server will correct it # because of static batching assert next_batch.max_tokens == batch_size * max_length assert len(generations) == batch_size for g in generations: tokens = g.tokens assert tokens.ids == [expected_token_id] assert tokens.texts == [expected_token_text] def check_decode_single( input_text, max_new_tokens, generated_text, do_sample, model_path ): """Verify that a decoding for a single request generates the expected output.""" generator = NeuronGenerator.from_pretrained(model_path) request = create_request( id=0, inputs=input_text, max_new_tokens=max_new_tokens, do_sample=do_sample ) max_length = generator.model.max_length batch = Batch(id=0, requests=[request], size=1, max_tokens=max_length) generations, next_batch = generator.prefill(batch) # We already generated one token: call decode max_new_tokens - 1 times for _ in range(max_new_tokens - 1): assert next_batch.size == 1 assert next_batch.max_tokens == max_length assert len(generations) == 1 assert len(generations[0].tokens.ids) == 1 generations, next_batch = generator.decode([next_batch]) assert next_batch is None assert len(generations) == 1 output = generations[0].generated_text assert output.generated_tokens == max_new_tokens assert output.finish_reason == 0 assert output.text == generated_text def check_decode_multiple(model_path): """Verify that two requests added to the batch at different generation steps generate the same outputs (continuous batching). """ generator = NeuronGenerator.from_pretrained(model_path) assert generator.model.batch_size > 1 input_text = "Once upon a time" max_new_tokens = 20 # Prefill a single request, remembering the generated token tokens = {0: [], 1: []} request = create_request(id=0, inputs=input_text, max_new_tokens=max_new_tokens) max_length = generator.model.max_length batch = Batch(id=0, requests=[request], size=1, max_tokens=max_length) generations, next_batch = generator.prefill(batch) assert next_batch.size == 1 assert len(generations) == 1 g = generations[0] tokens[g.request_id].append(g.tokens.ids[0]) assert len(tokens[0]) == 1 # Decode a few tokens gen_tokens = 4 for _ in range(gen_tokens - 1): generations, next_batch = generator.decode([next_batch]) assert len(generations) == 1 g = generations[0] tokens[g.request_id].append(g.tokens.ids[0]) assert len(tokens[0]) == gen_tokens assert next_batch.size == 1 # Add a second request request = create_request(id=1, inputs=input_text, max_new_tokens=max_new_tokens) batch = Batch(id=1, requests=[request], size=1, max_tokens=max_length) generations, next_batch_1 = generator.prefill(batch) assert next_batch_1.size == 1 # We should have generated only a single token assert len(generations) == 1 g = generations[0] tokens[g.request_id].append(g.tokens.ids[0]) assert len(tokens[0]) == gen_tokens assert len(tokens[1]) == 1 # Decode more tokens until we reach the maximum for the first request batches = [next_batch, next_batch_1] for _ in range(max_new_tokens - gen_tokens): generations, next_batch = generator.decode(batches) for g in generations: tokens[g.request_id].append(g.tokens.ids[0]) batches = [next_batch] # Verify we now only have one pending request assert next_batch.size == 1 assert len(tokens[0]) == max_new_tokens assert len(tokens[1]) == max_new_tokens - gen_tokens + 1 # Verify we have the output for the first request for g in generations: if g.request_id == 0: output = g.generated_text assert output.text != "" assert output.generated_tokens == max_new_tokens generated_text = output.text # Continue decoding until the end of the second request for _ in range(gen_tokens - 1): generations, next_batch = generator.decode([next_batch]) assert len(generations) == 1 g = generations[0] tokens[g.request_id].append(g.tokens.ids[0]) assert next_batch is None output = generations[0].generated_text assert output.generated_tokens == max_new_tokens assert tokens[0] == tokens[1] assert output.text == generated_text
text-generation-inference/backends/neuron/tests/server/helpers.py/0
{ "file_path": "text-generation-inference/backends/neuron/tests/server/helpers.py", "repo_id": "text-generation-inference", "token_count": 2593 }
299
set(TRT_INCLUDE_DIR ${TGI_TRTLLM_BACKEND_TRT_INCLUDE_DIR}) set(TRT_LIB_DIR ${TGI_TRTLLM_BACKEND_TRT_LIB_DIR}) set(USE_CXX11_ABI ON) set(BUILD_PYT OFF) set(BUILD_PYBIND OFF) set(BUILD_MICRO_BENCHMARKS OFF) set(BUILD_BENCHMARKS OFF) set(BUILD_TESTS OFF) set(CMAKE_CUDA_ARCHITECTURES ${TGI_TRTLLM_BACKEND_TARGET_CUDA_ARCH_LIST}) message(STATUS "Building for CUDA Architectures: ${CMAKE_CUDA_ARCHITECTURES}") set(ENABLE_UCX OFF) if (${CMAKE_BUILD_TYPE} STREQUAL "Debug") set(FAST_BUILD ON) set(NVTX_DISABLE ON) set(INDEX_RANGE_CHECK ON) else () set(FAST_BUILD OFF) set(FAST_MATH ON) set(NVTX_DISABLE OFF) set(INDEX_RANGE_CHECK OFF) endif () find_package(Python3 REQUIRED Interpreter) fetchcontent_declare( trtllm GIT_REPOSITORY https://github.com/nvidia/TensorRT-LLM.git GIT_TAG v0.17.0 GIT_SHALLOW ON DOWNLOAD_EXTRACT_TIMESTAMP ) fetchcontent_makeavailable(trtllm) message(STATUS "Found TensorRT-LLM: ${trtllm_SOURCE_DIR}") execute_process(COMMAND git lfs install WORKING_DIRECTORY "${trtllm_SOURCE_DIR}/") execute_process(COMMAND git lfs pull WORKING_DIRECTORY "${trtllm_SOURCE_DIR}/") # TRTLLM use a JIT based *precompiled* library to generate some specific kernels, we are generating the path to this one here set(TRTLLM_NVRTC_LIBRARY_NAME "${CMAKE_SHARED_LIBRARY_PREFIX}tensorrt_llm_nvrtc_wrapper${CMAKE_SHARED_LIBRARY_SUFFIX}" CACHE INTERNAL "nvrtc wrapper library name") set(TRTLLM_NVRTC_WRAPPER_LIBRARY_PATH "${trtllm_SOURCE_DIR}/cpp/tensorrt_llm/kernels/decoderMaskedMultiheadAttention/decoderXQAImplJIT/nvrtcWrapper/${CMAKE_LIBRARY_ARCHITECTURE}/${TRTLLM_NVRTC_LIBRARY_NAME}" CACHE INTERNAL "nvrtc wrapper library path") # The same Executor Static library set(TRTLLM_EXECUTOR_STATIC_LIBRARY_NAME "${CMAKE_SHARED_LIBRARY_PREFIX}tensorrt_llm_executor_static${CMAKE_STATIC_LIBRARY_SUFFIX}" CACHE INTERNAL "executor_static library name") set(TRTLLM_EXECUTOR_STATIC_LIBRARY_PATH "${trtllm_SOURCE_DIR}/cpp/tensorrt_llm/executor/${CMAKE_LIBRARY_ARCHITECTURE}/${TRTLLM_EXECUTOR_STATIC_LIBRARY_NAME}" CACHE INTERNAL "executor_static library path")
text-generation-inference/backends/trtllm/cmake/trtllm.cmake/0
{ "file_path": "text-generation-inference/backends/trtllm/cmake/trtllm.cmake", "repo_id": "text-generation-inference", "token_count": 976 }
300
mod backend; pub mod block_allocator; mod client; mod queue; pub mod radix; use crate::client::{ClientError, ShardedClient}; pub(crate) use backend::BackendV3; use serde::Serialize; use thiserror::Error; use utoipa::ToSchema; #[derive(Clone, Debug, Serialize, ToSchema)] pub struct BackendInfo { /// Mandatory #[schema(example = "cuda")] pub model_device_type: String, #[schema(example = "torch.float16")] pub model_dtype: String, /// Backend parameters #[schema(example = "1")] pub speculate: usize, #[schema(example = "1.2")] pub waiting_served_ratio: f32, #[schema(example = "32000")] pub max_batch_total_tokens: u32, #[schema(example = "20")] pub max_waiting_tokens: usize, #[schema(nullable = true, example = "null")] pub max_batch_size: Option<usize>, #[schema(example = "false")] pub support_chunking: bool, #[schema(example = "false")] pub prefix_caching: bool, #[schema(example = "flashinfer")] pub attention_impl: String, #[schema(example = "1")] pub block_size: u32, #[schema(example = "30000")] pub max_input_tokens: usize, #[schema(example = "32000")] pub max_total_tokens: usize, } #[allow(clippy::too_many_arguments)] pub async fn connect_backend( max_input_tokens: Option<usize>, max_total_tokens: Option<usize>, master_shard_uds_path: String, waiting_served_ratio: f32, max_batch_prefill_tokens: u32, max_batch_total_tokens: Option<u32>, max_waiting_tokens: usize, max_batch_size: Option<usize>, ) -> Result<(BackendV3, BackendInfo), V3Error> { // Helper function let check_max_batch_total_tokens = |( max_supported_batch_total_tokens, shard_max_input_tokens, shard_max_total_tokens, ): (Option<u32>, u32, u32)| -> Result<(u32, usize, usize), V3Error> { if let Some(max_input_tokens) = max_input_tokens { assert_eq!(max_input_tokens as u32, shard_max_input_tokens); } if let Some(max_total_tokens) = max_total_tokens { assert_eq!(max_total_tokens as u32, shard_max_total_tokens); } match max_supported_batch_total_tokens { // Older models do not support automatic max-batch-total-tokens None => { let max_batch_total_tokens = max_batch_total_tokens.unwrap_or( 16000 .max(shard_max_total_tokens) .max(max_batch_prefill_tokens), ); tracing::warn!("Model does not support automatic max batch total tokens"); Ok(( max_batch_total_tokens, shard_max_input_tokens as usize, shard_max_total_tokens as usize, )) } // Flash attention models return their max supported total tokens Some(max_supported_batch_total_tokens) => { // Warn if user added his own max-batch-total-tokens as we will ignore it if max_batch_total_tokens.is_some() { tracing::warn!( "`--max-batch-total-tokens` is deprecated for Flash \ Attention models." ); tracing::warn!( "Inferred max batch total tokens: {max_supported_batch_total_tokens}" ); } if shard_max_total_tokens > max_supported_batch_total_tokens { return Err(V3Error::NotEnoughMemory(shard_max_total_tokens as usize)); } Ok(( max_supported_batch_total_tokens, shard_max_input_tokens as usize, shard_max_total_tokens as usize, )) } } }; let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path) .await .map_err(V3Error::Connection)?; // server is running on v3 // Clear the cache; useful if the webserver rebooted sharded_client .clear_cache(None) .await .map_err(V3Error::Cache)?; // Get info from the shard let shard_info = sharded_client.info().await.map_err(V3Error::Info)?; // Warmup model tracing::info!("Warming up model"); let answer = sharded_client .warmup( max_input_tokens.map(|p| p as u32), max_batch_prefill_tokens, max_total_tokens.map(|p| p as u32), max_batch_size, ) .await .map_err(V3Error::Warmup)?; let (max_batch_total_tokens, max_input_tokens, max_total_tokens) = check_max_batch_total_tokens(answer)?; tracing::info!("Setting max batch total tokens to {max_batch_total_tokens}"); metrics::gauge!("tgi_batch_max_total_tokens").set(max_batch_total_tokens); let backend_info = BackendInfo { waiting_served_ratio, max_batch_total_tokens, max_input_tokens, max_total_tokens, max_waiting_tokens, max_batch_size, model_device_type: shard_info.device_type.clone(), model_dtype: shard_info.dtype.clone(), speculate: shard_info.speculate as usize, support_chunking: shard_info.support_chunking, prefix_caching: shard_info.use_prefix_caching, attention_impl: shard_info.attention_impl.clone(), block_size: shard_info.block_size, }; let backend = BackendV3::new( sharded_client, waiting_served_ratio, max_batch_prefill_tokens, max_batch_total_tokens, max_waiting_tokens, max_batch_size, shard_info, ); tracing::info!("Using backend V3"); Ok((backend, backend_info)) } #[derive(Debug, Error)] pub enum V3Error { #[error("Unable to clear the Python model shards cache: {0}")] Cache(ClientError), #[error("Unable to connect to the Python model shards: {0}")] Connection(ClientError), #[error("Unable to get the Python model shards info: {0}")] Info(ClientError), #[error("Unable to warmup the Python model shards: {0}")] Warmup(ClientError), #[error("Not enough memory to handle `max_total_tokens={0}`")] NotEnoughMemory(usize), }
text-generation-inference/backends/v3/src/lib.rs/0
{ "file_path": "text-generation-inference/backends/v3/src/lib.rs", "repo_id": "text-generation-inference", "token_count": 3087 }
301
- sections: - local: index title: Text Generation Inference - local: quicktour title: Quick Tour - local: supported_models title: Supported Models - local: installation_nvidia title: Using TGI with Nvidia GPUs - local: installation_amd title: Using TGI with AMD GPUs - local: installation_gaudi title: Using TGI with Intel Gaudi - local: installation_inferentia title: Using TGI with AWS Trainium and Inferentia - local: installation_tpu title: Using TGI with Google TPUs - local: installation_intel title: Using TGI with Intel GPUs - local: installation title: Installation from source - local: multi_backend_support title: Multi-backend support - local: architecture title: Internal Architecture - local: usage_statistics title: Usage Statistics title: Getting started - sections: - local: basic_tutorials/consuming_tgi title: Consuming TGI - local: basic_tutorials/preparing_model title: Preparing Model for Serving - local: basic_tutorials/gated_model_access title: Serving Private & Gated Models - local: basic_tutorials/using_cli title: Using TGI CLI - local: basic_tutorials/non_core_models title: Non-core Model Serving - local: basic_tutorials/safety title: Safety - local: basic_tutorials/using_guidance title: Using Guidance, JSON, tools - local: basic_tutorials/visual_language_models title: Visual Language Models - local: basic_tutorials/monitoring title: Monitoring TGI with Prometheus and Grafana - local: basic_tutorials/train_medusa title: Train Medusa title: Tutorials - sections: - local: backends/neuron title: Neuron - local: backends/gaudi title: Gaudi - local: backends/trtllm title: TensorRT-LLM - local: backends/llamacpp title: Llamacpp title: Backends - sections: - local: reference/launcher title: All TGI CLI options - local: reference/metrics title: Exported Metrics - local: reference/api_reference title: API Reference title: Reference - sections: - local: conceptual/chunking title: V3 update, caching and chunking - local: conceptual/streaming title: Streaming - local: conceptual/quantization title: Quantization - local: conceptual/tensor_parallelism title: Tensor Parallelism - local: conceptual/paged_attention title: PagedAttention - local: conceptual/safetensors title: Safetensors - local: conceptual/flash_attention title: Flash Attention - local: conceptual/speculation title: Speculation (Medusa, ngram) - local: conceptual/guidance title: How Guidance Works (via outlines) - local: conceptual/lora title: LoRA (Low-Rank Adaptation) - local: conceptual/external title: External Resources title: Conceptual Guides
text-generation-inference/docs/source/_toctree.yml/0
{ "file_path": "text-generation-inference/docs/source/_toctree.yml", "repo_id": "text-generation-inference", "token_count": 924 }
302
# TGI v3 overview ## Summary Performance leap: TGI processes 3x more tokens, 13x faster than vLLM on long prompts. Zero config ! ### 3x more tokens. By reducing our memory footprint, we’re able to ingest many more tokens and more dynamically than before. A single L4 (24GB) can handle 30k tokens on llama 3.1-8B, while vLLM gets barely 10k. A lot of work went into reducing the footprint of the runtime and its effect are best seen on smaller constrained environments. ### 13x faster On long prompts (200k+ tokens) conversation replies take 27.5s in vLLM, while it takes only 2s in TGI. How so ? We keep the initial conversation around, so when a new reply comes in, we can answer almost instantly. The overhead of the lookup is ~5us. Thanks @Daniël de Kok for the beast data structure. ### Zero config That’s it. Remove all the flags your are using and you’re likely to get the best performance. By evaluating the hardware and model, TGI carefully selects automatic values to give best performance. In production, we don’t have any flags anymore in our deployments. We kept all existing flags around, they may come in handy in niche scenarios. ## Benchmarks ### Methodology To ensure accurate and reliable results, we employed a robust benchmarking protocol that addresses common pitfalls in performance evaluation. Specifically: 1. **Consistent Code**: We used the same codebase to run against different engines, ensuring that any performance differences are attributable to the LLM itself, rather than variations in the testing framework. 2. **Request-Based Measurement**: Instead of measuring Requests Per Second (RPS) by sending as many requests as possible, we opted for a more consistent approach, sending a fixed number of requests and measuring the time it takes for the server to complete all of them. This method avoids boundary effects and provides a more accurate representation of performance. 3. **Realistic Combinations**: We selected realistic combinations of LLMs and hardware configurations so we used 8xH100 for a 70B, not a 8B, which would be a waste of money. 4. **Realistic scenarios** We benchmarked engines with prefix caching on, so we are reporting the results of the 2nd run, not the first one. During the first run of a benchmark, every request is new, so prefix caching is not working, masking the real world benefits of using it. Note: Boundary effect is when the benchmarks are flaky because their results depend on fine details of the engine being benchmarked. For instance, a system ingesting a constant 10RPS, but receiving in the benchmark a single final request at -0.1s before the end of the benchmark, and that single request takes a full 10s to process. Then a benchmark taking 30s would measure 7.5RPS instead of the expected 10, because that single query isn't being parallelized with others. Another very slightly slower engine would receive that request at +0.1s which would get discarded by the benchmark and therefore measure the slower system as being faster. For more details on benchmarking in general we recommend the documentation of k6: https://grafana.com/docs/k6/latest/. ### Scenarios We selected a handful of scenarios to simplify the picture, they seem to accurately reflect a larger trend. 1. **Small scenario**: This scenario consists of the first 200 requests from the orca datasets being prompted to the model. The 200 requests total 8k tokens together and are representative of conversation starters. Prefix caching has very limited impact in that scenario and we feel it's a relatively balanced benchmark for simple use cases. 2. **Long scenario**: This scenario consists of 20 requests totalling 200k prompt tokens which are essentially asking for summaries of large chunks for text. In practical scenarios this is really useful when you are feeding large chunks of code, large chunks of business data or documents repeatedly and ask simple questions about them (summarization, classification, or where to find some data). This scenario is the one closest to what a lot of professional use cases seem to be doing by including a lot of information in the prompt itself. Those very long conversations are the ones that benefit the most for our recent changes since we are enable ever larger prompts and ever faster caching. ### Hardware 1. `L4` : This is a single L4 (24GB) which represents small or even home compute capabilities. We tested `meta-llama/Meta-Llama-3.1-8B-Instruct` on it. 2. `4xL4`: This is a more beefy deployment usually used for either very large requests deployments for 8B models (the ones under test) or it can also easily handle all 30GB models. For this benchmark we tested `meta-llama/Meta-Llama-3.1-8B-Instruct` 3. `8xH100` This is one of the beefiest deployments possible. We tested `meta-llama/Meta-Llama-3.1-70B-Instruct` as it's the most representative models of this size. Llama 3.3 wasn't released at the time of benchmarking (it's the exact same model so it doesn't make any difference). ### Replicating the results The commands to run the benchmarks are as follows: 1. Prepare the datasets: ```bash cd text-generation-inference/load_tests make prepare_orca python long.py ``` 2. Launch the engine: TGI: `text-generation-launcher --model-id $MODEL_ID --num-shard $N --port 8000` (or docker variant) vLLM: `vllm serve $MODEL_ID --tensor-parallel $N —enable-prefix-caching` (or docker variant) 3. Start scenario: Small: `MODEL_ID=$MODEL_ID HOST=localhost:8000 k6 run load_tests/common.js` Long: `MODEL_ID=$MODEL_ID HOST=localhost:8000 k6 run load_tests/long.js` ### Results ![benchmarks_v3](https://raw.githubusercontent.com/huggingface/text-generation-inference/refs/heads/main/assets/v3_benchmarks.png) Our benchmarking results show significant performance gains, with a 13x speedup over vLLM with prefix caching, and up to 30x speedup without prefix caching. These results are consistent with our production data and demonstrate the effectiveness of our optimized LLM architecture. Raw results | | | | | | |---|---|---|---|---| |2nd run ||**TGI v3** (time in s)|**vLLM** (s)|**Amount of req**| |**Llama 3.1 8b**|Small test - L4 - 8B|17.5|19.9|200| |**Llama 3.1 8b**|Long test* - L4 - 8B|53|57|10| |**Llama 3.1 8b**|Small test - 4xL4 - 8B|4.8|6|200| |**Llama 3.1 8b**|Long test - 4xL4 - 8B|3.2|12.5|20| |**Llama 3.1 70b**|Small test - 8XH100 - 70B|6.2|7.4|200| |**Llama 3.1 70b**|Long test - 8H100 - 70B|2|27.5|20| |||||| |1st run ||TGI (s)|vLLM (s)|Amount of req| |**Llama 3.1 8b**|Small test - L4|19.9|19.9|200| |**Llama 3.1 8b**|Long test (10) - L4|49.8|55|10| |**Llama 3.1 8b**|Small test - 4xL4|13|12.6|200| |**Llama 3.1 8b**|Long test - 4xL4|47|50.3|20| |**Llama 3.1 70b**|Small test - 8XH100|7.5|7.6|200| |**Llama 3.1 70b**|Long test - 8H100|12.1|28.3|20| ### Caveats and Limitations While our results are promising, there are some caveats to consider: 1. **Constrained kv-cache**: If a deployment lacks kv-cache space, that means that many queries will require the same slots of kv-cache, leading to contention in the kv-cache. You can limit that effect by limiting `--max-total-tokens` to reduce individual queries impact. You can also use more GPUs or larger GPUs in order to increase the size of the kv-cache. 2. **Replication**: In scenarios where multiple replicas are behind a single endpoint, there's no reason for every query from a particular user to hit the same replica, therefore the cache will not be present, meaning no speed benefit. You can use sticky sessions load balancing to force every user to send their requests on the same replica. Do not apply this blindly, it's possible this may not be necessary at all. ## Technical Insights Our performance gains can be attributed to several key factors: 1. **New Kernels**: Our custom kernels, including `flashinfer` and `flashdecoding`, offer improved performance at large prompt lengths and enable more efficient scheduling. 2. **Prefix Caching**: Our optimized prefix caching structure allows for fast query matching, even for long prompts. The overhead is roughly 6us. 3. **Chunking Code**: Our chunking code enables finer control over compute resources, ensuring optimal performance and reduced VRAM usage. 4. **Kernel Optimizations**: We've implemented various other kernel optimizations, including better kernel selection. Notably we've implemented several small kernels involved in the queries bookkeeping which are particularly efficient on small models. Every kernel launch has an overhead of several milliseconds so fusing them together increases a lot performance when this bookkeeping is important relative to the raw model calculations. This happens typically on oversized compute for a particular model and particularly small models. 5. **VRAM efficiency**: In the realm of very large requests (100k+ tokens) there are a lot of places which start becoming big memory consumers. We've hunted the biggest ones and found ways to reduce/reuse or delete them. The biggest culprit probably is `logits` calculation. Logits for llama 3.1-8b take 25.6GB (=100k tokens * 128k vocabulary * 2(f16)) which is more than the full model which is 16GB. The thing is that in general we do not need every prompt logits, so we simply removed them and removed them from being potentially asked by users by default. We think this is ok since they are mostly used by researchers. You can enable your deployments to have them again by using the `--enable-prefill-logprobs` flag, but you will experience reduced token prompt size. ## Future Directions While we've made significant progress, there are still opportunities for improvement: 1. **Special models**: All LLMs come with the aforementioned improvements. Some specific set of features might not (some quantizations, speculation or VLMs for instance are harder to optimize for with the same level of detail). 2. **KV-Cache Long-Term Retention**: Addressing KV-cache long-term retention is a challenge. There are several solutions envisionned like shared KV-cache (like redis or memcached) solutions or innovative storage approaches. It is an area of ongoing research of ours. 3. **Multimodal models**: We are also investigating quite a lot other kind of models, like audio-to-audio, image/video generation, and other hybrids, where we see a lot of potential of applying the same principles we've applied in TGI to maximize performance. By sharing our benchmarking methodology, results, and technical insights, we aim to contribute to the ongoing development of more efficient and effective LLMs.
text-generation-inference/docs/source/conceptual/chunking.md/0
{ "file_path": "text-generation-inference/docs/source/conceptual/chunking.md", "repo_id": "text-generation-inference", "token_count": 2789 }
303
# Using TGI with Intel GPUs TGI optimized models are supported on Intel Data Center GPU [Max1100](https://www.intel.com/content/www/us/en/products/sku/232876/intel-data-center-gpu-max-1100/specifications.html), [Max1550](https://www.intel.com/content/www/us/en/products/sku/232873/intel-data-center-gpu-max-1550/specifications.html), the recommended usage is through Docker. On a server powered by Intel GPUs, TGI can be launched with the following command: ```bash model=teknium/OpenHermes-2.5-Mistral-7B volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --rm --privileged --cap-add=sys_nice \ --device=/dev/dri \ --ipc=host --shm-size 1g --net host -v $volume:/data \ ghcr.io/huggingface/text-generation-inference:3.3.4-intel-xpu \ --model-id $model --cuda-graphs 0 ``` # Using TGI with Intel CPUs Intel® Extension for PyTorch (IPEX) also provides further optimizations for Intel CPUs. The IPEX provides optimization operations such as flash attention, page attention, Add + LayerNorm, ROPE and more. On a server powered by Intel CPU, TGI can be launched with the following command: ```bash model=teknium/OpenHermes-2.5-Mistral-7B volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --rm --privileged --cap-add=sys_nice \ --device=/dev/dri \ --ipc=host --shm-size 1g --net host -v $volume:/data \ ghcr.io/huggingface/text-generation-inference:3.3.4-intel-cpu \ --model-id $model --cuda-graphs 0 ``` The launched TGI server can then be queried from clients, make sure to check out the [Consuming TGI](./basic_tutorials/consuming_tgi) guide.
text-generation-inference/docs/source/installation_intel.md/0
{ "file_path": "text-generation-inference/docs/source/installation_intel.md", "repo_id": "text-generation-inference", "token_count": 562 }
304
import json import os from typing import Dict, Any, Generator import pytest from test_gaudi_generate import TEST_CONFIGS UNKNOWN_CONFIGS = { name: config for name, config in TEST_CONFIGS.items() if config["expected_greedy_output"] == "unknown" or config["expected_batch_output"] == "unknown" } @pytest.fixture(scope="module", params=UNKNOWN_CONFIGS.keys()) def test_config(request) -> Dict[str, Any]: """Fixture that provides model configurations for testing.""" test_config = UNKNOWN_CONFIGS[request.param] test_config["test_name"] = request.param return test_config @pytest.fixture(scope="module") def test_name(test_config): yield test_config["test_name"] @pytest.fixture(scope="module") def tgi_service(launcher, test_config, test_name) -> Generator: """Fixture that provides a TGI service for testing.""" with launcher(test_config["model_id"], test_name) as service: yield service @pytest.mark.asyncio async def test_capture_expected_outputs(tgi_service, test_config, test_name): """Test that captures expected outputs for models with unknown outputs.""" print(f"Testing {test_name} with {test_config['model_id']}") # Wait for service to be ready await tgi_service.health(1000) client = tgi_service.client # Test single request (greedy) print("Testing single request...") response = await client.generate( test_config["input"], max_new_tokens=32, ) greedy_output = response.generated_text # Test multiple requests (batch) print("Testing batch requests...") responses = [] for _ in range(4): response = await client.generate( test_config["input"], max_new_tokens=32, ) responses.append(response.generated_text) # Store results in a JSON file output_file = "server/integration-tests/expected_outputs.json" results = {} # Try to load existing results if file exists if os.path.exists(output_file): with open(output_file, "r") as f: results = json.load(f) # Update results for this model results[test_name] = { "model_id": test_config["model_id"], "input": test_config["input"], "greedy_output": greedy_output, "batch_outputs": responses, "args": test_config["args"], } # Save updated results with open(output_file, "w") as f: json.dump(results, f, indent=2) print(f"\nResults for {test_name} saved to {output_file}")
text-generation-inference/integration-tests/gaudi/capture_expected_outputs.py/0
{ "file_path": "text-generation-inference/integration-tests/gaudi/capture_expected_outputs.py", "repo_id": "text-generation-inference", "token_count": 947 }
305
[ { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": " A" } ], "created": 1741340006, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": " This" } ], "created": 1741340006, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": " Paris" } ], "created": 1741340006, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": "us" } ], "created": 1741340006, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": " Beginner" } ], "created": 1741340006, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": " is" } ], "created": 1741340006, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": "\n" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": "cul" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": "’s" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": " a" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": "What" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": "as" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": " Guide" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": " question" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": " is" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": "_minus" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": "\n" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": " that" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": " the" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": "cul" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": "Deep" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": " has" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": " capital" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": "as" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": " learning" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": " puzzled" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": " of" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": "(s" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": " is" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": " many" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": " France" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": "):\n" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": " a" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": " people" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": "?\n" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": " " } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "finish_reason": "length", "index": 0, "logprobs": null, "text": " subset" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": { "completion_tokens": 10, "completion_tokens_details": null, "prompt_tokens": 6, "prompt_tokens_details": null, "total_tokens": 16 } }, { "choices": [ { "finish_reason": "length", "index": 1, "logprobs": null, "text": " for" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": { "completion_tokens": 10, "completion_tokens_details": null, "prompt_tokens": 5, "prompt_tokens_details": null, "total_tokens": 15 } }, { "choices": [ { "finish_reason": "length", "index": 2, "logprobs": null, "text": "The" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": { "completion_tokens": 10, "completion_tokens_details": null, "prompt_tokens": 8, "prompt_tokens_details": null, "total_tokens": 18 } }, { "choices": [ { "finish_reason": "length", "index": 3, "logprobs": null, "text": " \"\"\"\n" } ], "created": 1741340007, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": { "completion_tokens": 10, "completion_tokens_details": null, "prompt_tokens": 3, "prompt_tokens_details": null, "total_tokens": 13 } } ]
text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_many_prompts_stream.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_many_prompts_stream.json", "repo_id": "text-generation-inference", "token_count": 7648 }
306
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 1736, "logprob": -2.09375, "special": false, "text": " form" }, { "id": 109, "logprob": -1.9140625, "special": false, "text": "\n\n" }, { "id": 651, "logprob": -2.453125, "special": false, "text": "The" }, { "id": 2121, "logprob": -1.8984375, "special": false, "text": " test" }, { "id": 3853, "logprob": -0.23535156, "special": false, "text": " request" }, { "id": 1736, "logprob": -0.091308594, "special": false, "text": " form" }, { "id": 603, "logprob": -0.96875, "special": false, "text": " is" }, { "id": 1671, "logprob": -1.6484375, "special": false, "text": " used" }, { "id": 577, "logprob": -0.43164062, "special": false, "text": " to" }, { "id": 3853, "logprob": -1.2421875, "special": false, "text": " request" } ], "top_tokens": null }, "generated_text": " form\n\nThe test request form is used to request" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 1736, "logprob": -2.09375, "special": false, "text": " form" }, { "id": 109, "logprob": -1.9140625, "special": false, "text": "\n\n" }, { "id": 651, "logprob": -2.453125, "special": false, "text": "The" }, { "id": 2121, "logprob": -1.8984375, "special": false, "text": " test" }, { "id": 3853, "logprob": -0.23535156, "special": false, "text": " request" }, { "id": 1736, "logprob": -0.091308594, "special": false, "text": " form" }, { "id": 603, "logprob": -0.96875, "special": false, "text": " is" }, { "id": 1671, "logprob": -1.6484375, "special": false, "text": " used" }, { "id": 577, "logprob": -0.43164062, "special": false, "text": " to" }, { "id": 3853, "logprob": -1.2421875, "special": false, "text": " request" } ], "top_tokens": null }, "generated_text": " form\n\nThe test request form is used to request" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 1736, "logprob": -2.09375, "special": false, "text": " form" }, { "id": 109, "logprob": -1.9140625, "special": false, "text": "\n\n" }, { "id": 651, "logprob": -2.453125, "special": false, "text": "The" }, { "id": 2121, "logprob": -1.8984375, "special": false, "text": " test" }, { "id": 3853, "logprob": -0.23535156, "special": false, "text": " request" }, { "id": 1736, "logprob": -0.091308594, "special": false, "text": " form" }, { "id": 603, "logprob": -0.96875, "special": false, "text": " is" }, { "id": 1671, "logprob": -1.6484375, "special": false, "text": " used" }, { "id": 577, "logprob": -0.43164062, "special": false, "text": " to" }, { "id": 3853, "logprob": -1.2421875, "special": false, "text": " request" } ], "top_tokens": null }, "generated_text": " form\n\nThe test request form is used to request" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 1736, "logprob": -2.09375, "special": false, "text": " form" }, { "id": 109, "logprob": -1.9140625, "special": false, "text": "\n\n" }, { "id": 651, "logprob": -2.453125, "special": false, "text": "The" }, { "id": 2121, "logprob": -1.8984375, "special": false, "text": " test" }, { "id": 3853, "logprob": -0.23535156, "special": false, "text": " request" }, { "id": 1736, "logprob": -0.091308594, "special": false, "text": " form" }, { "id": 603, "logprob": -0.96875, "special": false, "text": " is" }, { "id": 1671, "logprob": -1.6484375, "special": false, "text": " used" }, { "id": 577, "logprob": -0.43164062, "special": false, "text": " to" }, { "id": 3853, "logprob": -1.2421875, "special": false, "text": " request" } ], "top_tokens": null }, "generated_text": " form\n\nThe test request form is used to request" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma/test_flash_gemma_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma/test_flash_gemma_load.json", "repo_id": "text-generation-inference", "token_count": 4072 }
307
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -2.0566406, "special": false, "text": "\n" }, { "id": 13, "logprob": -1.5253906, "special": false, "text": "\n" }, { "id": 29902, "logprob": -2.7578125, "special": false, "text": "I" }, { "id": 4966, "logprob": -1.9033203, "special": false, "text": " hope" }, { "id": 445, "logprob": -0.5019531, "special": false, "text": " this" }, { "id": 6911, "logprob": -0.21264648, "special": false, "text": " helps" }, { "id": 29991, "logprob": -0.5991211, "special": false, "text": "!" }, { "id": 2803, "logprob": -0.37475586, "special": false, "text": " Let" }, { "id": 592, "logprob": -0.018463135, "special": false, "text": " me" }, { "id": 1073, "logprob": -0.0008597374, "special": false, "text": " know" } ], "top_tokens": null }, "generated_text": "\n\nI hope this helps! Let me know" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar.json", "repo_id": "text-generation-inference", "token_count": 866 }
308
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": 0, "tokens": [ { "id": 28747, "logprob": 0.0, "special": false, "text": ":" }, { "id": 3169, "logprob": -0.1307373, "special": false, "text": " Let" }, { "id": 332, "logprob": -2.3359375, "special": false, "text": " u" }, { "id": 347, "logprob": 0.0, "special": false, "text": " be" }, { "id": 325, "logprob": -1.0234375, "special": false, "text": " (" }, { "id": 28734, "logprob": -2.0292969, "special": false, "text": "0" }, { "id": 648, "logprob": -1.0439453, "special": false, "text": " +" }, { "id": 28705, "logprob": -0.24499512, "special": false, "text": " " }, { "id": 28770, "logprob": -0.5073242, "special": false, "text": "3" }, { "id": 387, "logprob": -1.5507812, "special": false, "text": " -" } ], "top_tokens": null }, "generated_text": "Test request: Let u be (0 + 3 -" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral_all_params.json", "repo_id": "text-generation-inference", "token_count": 856 }
309
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 8, "prefill": [], "seed": null, "tokens": [ { "id": 2502, "logprob": -1.7890625, "special": false, "text": "image" }, { "id": 2196, "logprob": -0.53125, "special": false, "text": " result" }, { "id": 604, "logprob": -0.0077209473, "special": false, "text": " for" }, { "id": 12254, "logprob": -1.703125, "special": false, "text": " chicken" }, { "id": 611, "logprob": -0.21582031, "special": false, "text": " on" }, { "id": 573, "logprob": -0.734375, "special": false, "text": " the" }, { "id": 8318, "logprob": -0.026000977, "special": false, "text": " beach" }, { "id": 1, "logprob": -0.2109375, "special": true, "text": "<eos>" } ], "top_tokens": null }, "generated_text": "image result for chicken on the beach" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_pali_gemma/test_flash_pali_gemma_two_images.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_pali_gemma/test_flash_pali_gemma_two_images.json", "repo_id": "text-generation-inference", "token_count": 719 }
310
{ "choices": [ { "finish_reason": "stop", "index": 0, "logprobs": null, "message": { "content": "The image shows a stylized scene set in what appears to be a diner or restaurant. In the foreground, there is a table with various food items, including a burger with lettuce and tomato, a bowl of fries, and a drink in a cup with a straw. On the right side of the table, there is an owl sitting alertly, looking directly at the camera. Behind the owl and the table, there is a large, green, dinosaur-like creature resembling Godzilla, with its mouth open and tongue visible. In the background, the diner's decor includes various signs and posters, with a green sign reading \"Basta\" and another sign that says \"Tabasco.\" The setting has a retro or vintage feel, with fluorescent lighting overhead and clean, polished surfaces.", "name": null, "role": "assistant", "tool_calls": null }, "usage": null } ], "created": 1738348100, "id": "", "model": "Qwen/Qwen2-VL-7B-Instruct", "object": "chat.completion", "system_fingerprint": "3.1.1-dev0-native", "usage": { "completion_tokens": 156, "prompt_tokens": 5375, "total_tokens": 5531 } }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2_vl/test_flash_qwen2_vl_inpaint.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2_vl/test_flash_qwen2_vl_inpaint.json", "repo_id": "text-generation-inference", "token_count": 422 }
311
[ { "choices": [ { "delta": { "content": "{", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\"", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "f", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "irs", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "t", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "Name", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\":", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\"", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "David", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\",", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\"", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "l", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "ast", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "Name", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\":", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\"", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "Unknown", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975615, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\",", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\"", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "h", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "obb", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "y", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\":", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\",", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " \\\"", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "riding", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " bicycles", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\\\",", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " \\\"", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "having", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " cats", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\\\"\",", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\"", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "num", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "Cats", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "\":", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "2", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "}", "role": "assistant" }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "", "role": "assistant" }, "finish_reason": "stop", "index": 0, "logprobs": null } ], "created": 1741975616, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion.chunk", "system_fingerprint": "3.2.1-dev0-native", "usage": null } ]
text-generation-inference/integration-tests/models/__snapshots__/test_json_schema_constrain/test_json_schema_stream.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_json_schema_constrain/test_json_schema_stream.json", "repo_id": "text-generation-inference", "token_count": 8555 }
312
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": 0, "tokens": [ { "id": 16017, "logprob": 0.0, "special": false, "text": " blue" }, { "id": 20495, "logprob": 0.0, "special": false, "text": " sky" }, { "id": 259, "logprob": -0.47070312, "special": false, "text": " " }, { "id": 261, "logprob": -0.15307617, "special": false, "text": "," }, { "id": 35622, "logprob": -0.796875, "special": false, "text": " cloud" }, { "id": 263, "logprob": -1.2958984, "special": false, "text": "s" }, { "id": 305, "logprob": 0.0, "special": false, "text": " and" }, { "id": 35622, "logprob": -1.2998047, "special": false, "text": " cloud" }, { "id": 263, "logprob": 0.0, "special": false, "text": "s" }, { "id": 1, "logprob": 0.0, "special": true, "text": "</s>" } ], "top_tokens": null }, "generated_text": "Why is the sky blue?blue sky , clouds and clouds" }
text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_all_params.json", "repo_id": "text-generation-inference", "token_count": 910 }
313
[ { "choices": [ { "delta": { "content": "I", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "'m", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " an", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " artificial", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " intelligence", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " model", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " known", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " as", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " a", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " large", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " language", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " model", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " (", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "LL", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "M", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": ")", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " or", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " convers", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "ational", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " AI", "role": "assistant", "tool_calls": null }, "finish_reason": "length", "index": 0, "logprobs": null } ], "created": 1741694017, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null } ]
text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_insufficient_information_stream.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_insufficient_information_stream.json", "repo_id": "text-generation-inference", "token_count": 4844 }
314
import pytest @pytest.fixture(scope="module") def bloom_560_handle(launcher): with launcher("bigscience/bloom-560m", num_shard=1) as handle: yield handle @pytest.fixture(scope="module") async def bloom_560(bloom_560_handle): await bloom_560_handle.health(240) return bloom_560_handle.client @pytest.mark.release @pytest.mark.asyncio async def test_bloom_560m(bloom_560, response_snapshot): response = await bloom_560.generate( "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, top_p=0.9, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_bloom_560m_all_params(bloom_560, response_snapshot): response = await bloom_560.generate( "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_bloom_560m_load(bloom_560, generate_load, response_snapshot): responses = await generate_load( bloom_560, "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_bloom_560m.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_bloom_560m.py", "repo_id": "text-generation-inference", "token_count": 783 }
315
import pytest @pytest.fixture(scope="module") def flash_gemma2_handle(launcher): with launcher("google/gemma-2-9b-it", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def flash_gemma2(flash_gemma2_handle): await flash_gemma2_handle.health(300) return flash_gemma2_handle.client @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_gemma2(flash_gemma2, response_snapshot): response = await flash_gemma2.generate( "<start_of_turn>user:\nWrite a poem to help me remember the first 10 elements on the periodic table, giving each element its own line.<end_of_turn>\n<start_of_turn>model:\n", max_new_tokens=10, decoder_input_details=True, ) assert response.generated_text == "**Hydrogen**, light and free,\n**He" assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_gemma2_load(flash_gemma2, generate_load, response_snapshot): responses = await generate_load( flash_gemma2, "<start_of_turn>user:\nWrite a poem to help me remember the first 10 elements on the periodic table, giving each element its own line.<end_of_turn>\n<start_of_turn>model:\n", max_new_tokens=10, n=4, ) assert responses[0].generated_text == "**Hydrogen**, light and free,\n**He" assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_gemma2.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_gemma2.py", "repo_id": "text-generation-inference", "token_count": 602 }
316
import pytest @pytest.fixture(scope="module") def flash_mixtral_handle(launcher): with launcher("mistralai/Mixtral-8x7B-v0.1", num_shard=8) as handle: yield handle @pytest.fixture(scope="module") async def flash_mixtral(flash_mixtral_handle): await flash_mixtral_handle.health(300) return flash_mixtral_handle.client @pytest.mark.skip(reason="requires > 4 shards") @pytest.mark.asyncio async def test_flash_mixtral(flash_mixtral, response_snapshot): response = await flash_mixtral.generate( "What is gradient descent?\n\n", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert ( response.generated_text == "Gradient descent is an optimization algorithm used to minimize" ) assert response == response_snapshot @pytest.mark.skip(reason="requires > 4 shards") @pytest.mark.asyncio async def test_flash_mixtral_all_params(flash_mixtral, response_snapshot): response = await flash_mixtral.generate( "What is gradient descent?\n\n", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert ( response.generated_text == "What is gradient descent?\n\nIt seems to me, that if you're" ) assert response == response_snapshot @pytest.mark.skip(reason="requires > 4 shards") @pytest.mark.asyncio async def test_flash_mixtral_load(flash_mixtral, generate_load, response_snapshot): responses = await generate_load( flash_mixtral, "What is gradient descent?\n\n", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert responses[0].details.generated_tokens == 10 assert ( responses[0].generated_text == "Gradient descent is an optimization algorithm used to minimize" ) assert all( [r.generated_text == responses[0].generated_text for r in responses] ), f"{[r.generated_text for r in responses]}" assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_mixtral.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_mixtral.py", "repo_id": "text-generation-inference", "token_count": 926 }
317
import pytest @pytest.fixture(scope="module") def flash_starcoder_gptq_handle(launcher): with launcher("Narsil/starcoder-gptq", num_shard=2, quantize="gptq") as handle: yield handle @pytest.fixture(scope="module") async def flash_starcoder_gptq(flash_starcoder_gptq_handle): await flash_starcoder_gptq_handle.health(300) return flash_starcoder_gptq_handle.client @pytest.mark.release @pytest.mark.asyncio async def test_flash_starcoder_gptq(flash_starcoder_gptq, generous_response_snapshot): response = await flash_starcoder_gptq.generate( "def geometric_mean(L: List[float]):", max_new_tokens=20, decoder_input_details=True, ) assert response.details.generated_tokens == 2 assert response == generous_response_snapshot # Deactivated because it's flaky # Only this model seems affected and it's only a logprob precision issue. # @pytest.mark.release # @pytest.mark.asyncio # async def test_flash_starcoder_gptq_default_params( # flash_starcoder_gptq, generous_response_snapshot # ): # response = await flash_starcoder_gptq.generate( # "def geometric_mean(L: List[float]):", # max_new_tokens=20, # temperature=0.2, # top_p=0.95, # decoder_input_details=True, # seed=0, # ) # assert response.details.generated_tokens == 2 # assert response == generous_response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_flash_starcoder_gptq_load( flash_starcoder_gptq, generate_load, generous_response_snapshot ): responses = await generate_load( flash_starcoder_gptq, "def geometric_mean(L: List[float]):", max_new_tokens=10, n=4, ) assert len(responses) == 4 # XXX: TODO: Fix this test. # assert all([r.generated_text == responses[0].generated_text for r in responses]) # assert responses == generous_response_snapshot
text-generation-inference/integration-tests/models/test_flash_starcoder_gptq.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_starcoder_gptq.py", "repo_id": "text-generation-inference", "token_count": 802 }
318
import pytest @pytest.fixture(scope="module") def flash_smolvlm_next_handle(launcher): with launcher("HuggingFaceTB/SmolVLM-Instruct") as handle: yield handle @pytest.fixture(scope="module") async def flash_smolvlm_next(flash_smolvlm_next_handle): await flash_smolvlm_next_handle.health(300) return flash_smolvlm_next_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_smolvlm_next_simple_url(flash_smolvlm_next, response_snapshot): ny_skyline = "https://huggingface.co/spaces/merve/chameleon-7b/resolve/main/bee.jpg" query = "What is in this image?" response = await flash_smolvlm_next.generate( f"<|begin_of_text|><|begin_of_text|>User:![]({ny_skyline}){query}<end_of_utterance>\nAssistant:", max_new_tokens=10, seed=1337, ) print(response) assert ( response.generated_text == " A bee on a pink flower." ), f"{repr(response.generated_text)}" assert response.details.generated_tokens == 8 assert response == response_snapshot
text-generation-inference/integration-tests/models/test_smolvlm.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_smolvlm.py", "repo_id": "text-generation-inference", "token_count": 435 }
319
ShareGPT_V3_unfiltered_cleaned_split.json: wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json prepare_share: ShareGPT_V3_unfiltered_cleaned_split.json python filter.py prepare_orca: python orca.py
text-generation-inference/load_tests/Makefile/0
{ "file_path": "text-generation-inference/load_tests/Makefile", "repo_id": "text-generation-inference", "token_count": 123 }
320
syntax = "proto3"; package generate.v2; service TextGenerationService { /// Model Info rpc Info (InfoRequest) returns (InfoResponse) {} /// Service discovery rpc ServiceDiscovery (ServiceDiscoveryRequest) returns (ServiceDiscoveryResponse) {} /// Empties batch cache rpc ClearCache (ClearCacheRequest) returns (ClearCacheResponse); /// Remove requests from a cached batch rpc FilterBatch (FilterBatchRequest) returns (FilterBatchResponse); /// Warmup the model and compute max cache size rpc Warmup (WarmupRequest) returns (WarmupResponse); /// Prefill batch and decode first token rpc Prefill (PrefillRequest) returns (PrefillResponse); /// Decode token for a list of prefilled batches rpc Decode (DecodeRequest) returns (DecodeResponse); /// Health check rpc Health (HealthRequest) returns (HealthResponse); } message HealthRequest {} message HealthResponse {} /// Empty request message InfoRequest {} message InfoResponse { bool requires_padding = 1; string dtype = 2; string device_type = 3; optional uint32 window_size = 4; uint32 speculate = 5; } /// Empty request message ServiceDiscoveryRequest {} message ServiceDiscoveryResponse { /// Other shards urls repeated string urls = 1; } message ClearCacheRequest { /// Optional batch id optional uint64 id = 1; } /// Empty response message ClearCacheResponse {} enum GrammarType { GRAMMAR_TYPE_NONE = 0; GRAMMAR_TYPE_JSON = 1; GRAMMAR_TYPE_REGEX = 2; } message NextTokenChooserParameters { /// exponential scaling output probability distribution float temperature = 1; /// restricting to the k highest probability elements uint32 top_k = 2; /// restricting to top tokens summing to prob_cut_off <= prob_cut_off float top_p = 3; /// restricting to top tokens summing to prob_cut_off <= prob_cut_off float typical_p = 4; /// apply sampling on the logits bool do_sample = 5; /// random seed for sampling uint64 seed = 6; /// repetition penalty float repetition_penalty = 7; /// frequency penalty float frequency_penalty = 9; /// token watermarking using "A Watermark for Large Language Models" bool watermark = 8; /// grammar (applied if not empty) string grammar = 10; /// grammar type GrammarType grammar_type = 11; } message StoppingCriteriaParameters { /// Maximum number of generated tokens uint32 max_new_tokens = 1; /// Optional stopping sequences repeated string stop_sequences = 2; /// Ignore end of sequence token /// used for benchmarking bool ignore_eos_token = 3; } message Request { /// Request ID uint64 id = 1; /// The generation context string inputs = 2; /// Context truncation uint32 truncate = 3; /// Next Token Chooser Parameters NextTokenChooserParameters parameters = 4; /// Stopping Criteria Parameters StoppingCriteriaParameters stopping_parameters = 5; /// Return prefill logprobs bool prefill_logprobs = 6; /// Return most likely n tokens uint32 top_n_tokens = 7; } message Batch { /// Batch ID uint64 id = 1; /// Individual requests repeated Request requests = 2; /// Batch size (==len(requests)) uint32 size = 3; /// Maximum number of tokens this batch will grow to uint32 max_tokens = 4; } message CachedBatch { /// Batch ID uint64 id = 1; /// Individual requests ids repeated uint64 request_ids = 2; /// Batch size (==len(requests)) uint32 size = 3; /// Maximum number of tokens this batch will grow to uint32 max_tokens = 4; } enum FinishReason { FINISH_REASON_LENGTH = 0; FINISH_REASON_EOS_TOKEN = 1; FINISH_REASON_STOP_SEQUENCE = 2; } message GeneratedText { /// Output string text = 1; /// Number of generated tokens uint32 generated_tokens = 2; /// Finish reason FinishReason finish_reason = 3; /// Seed optional uint64 seed = 4; } message Tokens { /// Token IDs repeated uint32 ids = 1; /// Logprobs repeated float logprobs = 2; /// tokens repeated string texts = 3; /// special repeated bool is_special = 4; } message Generation { /// Request ID uint64 request_id = 1; /// Prefill tokens (optional) Tokens prefill_tokens = 2; Tokens tokens = 3; /// Complete generated text optional GeneratedText generated_text = 4; /// Top tokens repeated Tokens top_tokens = 5; } message FilterBatchRequest { /// Batch ID uint64 batch_id = 1; /// Requests to keep repeated uint64 request_ids = 2; } message FilterBatchResponse { /// Filtered Batch (cached) CachedBatch batch = 1; } message PrefillRequest { /// Batch Batch batch = 1; } message PrefillResponse { /// Generation repeated Generation generations = 1; /// Next batch (cached) optional CachedBatch batch = 2; /// Forward elapsed time in nanoseconds uint64 forward_ns = 3; /// Decode elapsed time in nanoseconds uint64 decode_ns = 4; /// Total elapsed time in nanoseconds uint64 total_ns = 5; } message DecodeRequest { /// Cached batches repeated CachedBatch batches = 1; } message DecodeResponse { /// Decodes repeated Generation generations = 1; /// Next batch (cached) optional CachedBatch batch = 2; /// Forward elapsed time in nanoseconds uint64 forward_ns = 3; /// Decode elapsed time in nanoseconds uint64 decode_ns = 4; /// Total elapsed time in nanoseconds uint64 total_ns = 5; /// Concatenate elapsed time in nanoseconds optional uint64 concat_ns = 6; } message WarmupRequest { /// Batch to warmup on Batch batch = 1; uint32 max_input_length = 2; uint32 max_prefill_tokens = 3; uint32 max_total_tokens = 4; } message WarmupResponse { /// Maximum number of tokens supported by the model optional uint32 max_supported_total_tokens = 1; }
text-generation-inference/proto/generate.proto/0
{ "file_path": "text-generation-inference/proto/generate.proto", "repo_id": "text-generation-inference", "token_count": 2074 }
321
use crate::config::Config; use crate::validation::ValidationError::{BestOfSampling, BestOfSeed, EmptyInput}; use crate::{ GenerateParameters, GenerateRequest, GrammarType, HubPreprocessorConfig, Idefics2Preprocessor, TokenizerTrait, }; use crate::{PyTokenizer, Tokenizer}; use base64::{engine::general_purpose::STANDARD, Engine}; use image::{ImageFormat, ImageReader}; use outlines_core::json_schema::to_regex as json_schema_to_regex; use rand::{thread_rng, Rng}; use serde_json::Value; /// Payload validation logic use std::cmp::min; use std::io::Cursor; use std::iter; use std::sync::Arc; use thiserror::Error; use tokio::sync::mpsc; use tokio::sync::oneshot; use tracing::warn; use tracing::{instrument, Span}; use {once_cell::sync::Lazy, regex::Regex}; static DEFAULT_GENERATION_LENGTH: u32 = 1024; /// Validation #[derive(Debug, Clone)] pub struct Validation { /// Validation parameters max_best_of: usize, max_stop_sequences: usize, max_top_n_tokens: u32, max_input_length: usize, max_total_tokens: usize, disable_grammar_support: bool, /// Channel to communicate with the background tokenization task sender: mpsc::UnboundedSender<TokenizerRequest>, } impl Validation { #[allow(clippy::too_many_arguments)] pub(crate) fn new( workers: usize, tokenizer: Tokenizer, config: Option<Config>, preprocessor_config: Option<HubPreprocessorConfig>, max_best_of: usize, max_stop_sequences: usize, max_top_n_tokens: u32, max_input_length: usize, max_total_tokens: usize, disable_grammar_support: bool, ) -> Self { let workers = if let Tokenizer::Python { .. } = &tokenizer { 1 } else { workers }; // If we have a fast tokenizer let sender = { // Create round robin channel let (validation_sender, validation_round_robin_receiver) = mpsc::unbounded_channel(); let mut senders = Vec::with_capacity(workers); // Create workers for _ in 0..workers { let tokenizer_clone = tokenizer.clone(); let config_clone = config.clone(); let preprocessor_config_clone = preprocessor_config.clone(); let (tokenizer_sender, tokenizer_receiver) = mpsc::unbounded_channel(); senders.push(tokenizer_sender); // Spawn worker tokio::task::spawn_blocking(move || { tokenizer_worker( tokenizer_clone, config_clone, preprocessor_config_clone, tokenizer_receiver, ) }); } // Create tokenization round robin task tokio::spawn(round_robin_task(validation_round_robin_receiver, senders)); validation_sender }; Self { max_best_of, sender, max_stop_sequences, max_top_n_tokens, max_input_length, max_total_tokens, disable_grammar_support, } } #[instrument(skip(self, inputs))] pub async fn tokenize( &self, inputs: String, add_special_tokens: bool, truncate: Option<usize>, ) -> Result<(tokenizers::Encoding, Vec<Chunk>), ValidationError> { // If we have a fast tokenizer // Create response channel let (response_sender, response_receiver) = oneshot::channel(); // Send request to the background validation task // Unwrap is safe here let _ = &self .sender .send(( (inputs, add_special_tokens, truncate), response_sender, Span::current(), )) .unwrap(); // Await on response channel // Unwrap is safe here let encoding = response_receiver.await.unwrap()?; Ok(encoding) } #[allow(clippy::type_complexity)] #[instrument(skip(self, inputs))] async fn validate_input( &self, inputs: String, add_special_tokens: bool, truncate: Option<usize>, max_new_tokens: Option<u32>, ) -> Result<(Vec<Chunk>, Option<Vec<u32>>, usize, u32, u32), ValidationError> { // If we have a fast tokenizer let (encoding, inputs) = self .tokenize(inputs.clone(), add_special_tokens, truncate) .await?; // Create response channel let input_length = if let Some(truncate) = truncate { std::cmp::min(encoding.len(), truncate) } else { encoding.len() }; // Get total tokens let (max_new_tokens, max_total_new_tokens) = if let Some(max_new_tokens) = max_new_tokens { // Do not accept humongous max_new_tokens queries. // We preallocate the default but we prevent a single user // from taking up all the slots in a handful of queries that consume little // amount of tokens. (You can have 10 token long query that creates a handful of token // but the requested amount to be 120k. let chunk_size = min(max_new_tokens, DEFAULT_GENERATION_LENGTH); (chunk_size, max_new_tokens) } else { // Use the maximum possible number of tokens as default // However, the system will re-queue the request everytime it completes // `DEFAULT_GENERATION_LENGTH` tokens. let max_new_tokens = self.max_total_tokens.saturating_sub(input_length) as u32; ( min(max_new_tokens, DEFAULT_GENERATION_LENGTH), max_new_tokens, ) }; let total_tokens = input_length + max_new_tokens as usize; // Validate MaxTotalTokens if total_tokens > self.max_total_tokens { return Err(ValidationError::MaxTotalTokens( self.max_total_tokens, input_length, max_new_tokens, )); } // Validate InputLength if input_length > self.max_input_length { return Err(ValidationError::InputLength( self.max_input_length, input_length, )); } let ids = encoding.get_ids(); let input_ids = ids[ids.len().saturating_sub(input_length)..].to_owned(); metrics::histogram!("tgi_request_input_length").record(input_length as f64); Ok(( inputs, Some(input_ids), input_length, max_new_tokens, max_total_new_tokens, )) } /// Validate a payload and get the number of tokens in the input #[instrument(skip_all)] pub(crate) async fn validate( &self, request: GenerateRequest, ) -> Result<ValidGenerateRequest, ValidationError> { let GenerateParameters { best_of, temperature, repetition_penalty, frequency_penalty, top_k, top_p, typical_p, do_sample, max_new_tokens, stop: stop_sequences, truncate, seed, watermark, decoder_input_details, top_n_tokens, grammar, adapter_id, .. } = request.parameters; // sampling must be true when best_of > 1 let best_of = best_of.unwrap_or(1); let sampling = do_sample || temperature.is_some() || top_k.is_some() || top_p.is_some() || typical_p.is_some(); if best_of > 1 && !sampling { return Err(BestOfSampling); } let temperature = temperature.unwrap_or(1.0); if temperature <= 0.0 { return Err(ValidationError::Temperature); } let repetition_penalty = repetition_penalty.unwrap_or(1.0); if repetition_penalty <= 0.0 { return Err(ValidationError::RepetitionPenalty); } let frequency_penalty = frequency_penalty.unwrap_or(0.0); if !(-2.0..=2.0).contains(&frequency_penalty) { return Err(ValidationError::FrequencyPenalty); } // Different because the proto default value is not a valid value // for the user let top_p = top_p .map(|value| { if value <= 0.0 || value >= 1.0 { return Err(ValidationError::TopP); } Ok(value) }) .unwrap_or(Ok(1.0))?; let typical_p = typical_p .map(|value| { if value <= 0.0 || value >= 1.0 { return Err(ValidationError::TypicalP); } Ok(value) }) .unwrap_or(Ok(1.0))?; let top_k: u32 = top_k .map(|value| { if value <= 0 { return Err(ValidationError::TopK); } Ok(value as u32) }) .unwrap_or(Ok(0))?; if max_new_tokens == Some(0) { return Err(ValidationError::NegativeMaxNewTokens); } if stop_sequences.len() > self.max_stop_sequences { return Err(ValidationError::StopSequence( self.max_stop_sequences, stop_sequences.len(), )); } // If seed is None, assign a random one let seed = match seed { None => thread_rng().gen(), Some(seed) => { if best_of > 1 { return Err(BestOfSeed); } seed } }; let top_n_tokens = top_n_tokens .map(|value| { if value > self.max_top_n_tokens { return Err(ValidationError::TopNTokens(self.max_top_n_tokens, value)); } Ok(value) }) .unwrap_or(Ok(0))?; // Check if inputs is empty if request.inputs.is_empty() { return Err(EmptyInput); } // Check if truncate is strictly positive and less than max_input_length let truncate = truncate .map(|value| { if value == 0 || value > self.max_input_length { return Err(ValidationError::Truncate(self.max_input_length, value)); } Ok(Some(value)) }) .unwrap_or(Ok(None))?; // Validate inputs let (inputs, input_ids, input_length, max_new_tokens, max_total_new_tokens) = self .validate_input( request.inputs, request.add_special_tokens, truncate, max_new_tokens, ) .await?; // TODO: we should build the FSM here and pass the compiled FSM instead of the grammar // NOTE: this is currently difficult because we need the tokenizer in Python to build // the FSM and we'd have to load a copy of the tokenizer into our Pyo3 instance which // may be slow and memory intensive. Best case is to have a Rust implementation of the FSM // compiler and use that to build the FSM here. // Validate grammar and unpack the grammar and type for the proto message let grammar = match grammar { Some(grammar) => { // Ensure that grammar is not set if it's not supported if self.disable_grammar_support { return Err(ValidationError::Grammar); } let valid_grammar = match grammar { GrammarType::Json(json) => { let json = match json { // if value is a string, we need to parse it again to make sure its // a valid json Value::String(s) => serde_json::from_str(&s) .map_err(|e| ValidationError::InvalidGrammar(e.to_string())), Value::Object(_) => Ok(json), _ => Err(ValidationError::Grammar), }?; // Check if the json is a valid JSONSchema jsonschema::draft202012::meta::validate(&json) .map_err(|e| ValidationError::InvalidGrammar(e.to_string()))?; // The schema can be valid but lack properties. // We need properties for the grammar to be successfully parsed in Python. // Therefore, we must check and throw an error if properties are missing. json.get("properties") .ok_or(ValidationError::InvalidGrammar( "Grammar must have a 'properties' field".to_string(), ))?; // Do compilation in the router for performance. In the future, we // should also move regex -> automaton compilation in the router, // but this is not yet supported in pure Rust by outlines-core. let grammar_regex = json_schema_to_regex(&json, None, &json) .map_err(ValidationError::RegexFromSchema)?; ValidGrammar::Regex(grammar_regex.to_string()) } GrammarType::JsonSchema(schema_config) => { // Extract the actual schema for validation let json = &schema_config.schema; // Check if the json is a valid JSONSchema jsonschema::draft202012::meta::validate(json) .map_err(|e| ValidationError::InvalidGrammar(e.to_string()))?; // The schema can be valid but lack properties. // We need properties for the grammar to be successfully parsed in Python. // Therefore, we must check and throw an error if properties are missing. json.get("properties") .ok_or(ValidationError::InvalidGrammar( "Grammar must have a 'properties' field".to_string(), ))?; // Do compilation in the router for performance let grammar_regex = json_schema_to_regex(json, None, json) .map_err(ValidationError::RegexFromSchema)?; ValidGrammar::Regex(grammar_regex.to_string()) } GrammarType::Regex(regex) => ValidGrammar::Regex(regex), }; Some(valid_grammar) } None => None, }; let parameters = ValidParameters { temperature, repetition_penalty, frequency_penalty, top_k, top_p, typical_p, do_sample, seed, watermark, grammar, }; let stopping_parameters = ValidStoppingParameters { max_new_tokens, max_total_new_tokens, stop_sequences, ignore_eos_token: false, }; metrics::histogram!("tgi_request_max_new_tokens").record(max_new_tokens as f64); Ok(ValidGenerateRequest { inputs, input_ids: input_ids.map(Arc::new), add_special_tokens: request.add_special_tokens, decoder_input_details, input_length: input_length as u32, truncate: truncate.unwrap_or(self.max_input_length) as u32, parameters, stopping_parameters, top_n_tokens, adapter_id, }) } /// Validate the best_of parameter #[instrument(skip_all)] pub(crate) fn validate_best_of(&self, best_of: usize) -> Result<usize, ValidationError> { if self.max_best_of == 1 && best_of != 1 { return Err(ValidationError::BestOfDisabled); } if best_of > self.max_best_of { return Err(ValidationError::BestOf(self.max_best_of, best_of)); } Ok(best_of) } } /// Round robin tokenization task async fn round_robin_task( mut receiver: mpsc::UnboundedReceiver<TokenizerRequest>, senders: Vec<mpsc::UnboundedSender<TokenizerRequest>>, ) { loop { for sender in &senders { match receiver.recv().await { None => return, Some(request) => sender.send(request).unwrap(), }; } } } /// Start tokenization workers fn tokenizer_worker( tokenizer: Tokenizer, config: Option<Config>, preprocessor_config: Option<HubPreprocessorConfig>, mut receiver: mpsc::UnboundedReceiver<TokenizerRequest>, ) { match tokenizer { Tokenizer::Python { tokenizer_name, revision, trust_remote_code, } => { pyo3::Python::with_gil(|py| -> pyo3::PyResult<()> { let tokenizer = PyTokenizer::from_py(py, tokenizer_name, revision, trust_remote_code)?; // Loop over requests while let Some(((inputs, add_special_tokens, truncate), response_tx, parent_span)) = receiver.blocking_recv() { parent_span.in_scope(|| { response_tx .send(prepare_input( inputs, truncate, add_special_tokens, &tokenizer, config.as_ref(), preprocessor_config.as_ref(), )) .unwrap_or(()) }) } Ok(()) }) .expect("Failure in python tokenizer worker"); } Tokenizer::Rust(tokenizer) => { while let Some(((inputs, add_special_tokens, truncate), response_tx, parent_span)) = receiver.blocking_recv() { parent_span.in_scope(|| { response_tx .send(prepare_input( inputs, truncate, add_special_tokens, &tokenizer, config.as_ref(), preprocessor_config.as_ref(), )) .unwrap_or(()) }) } } } } fn format_from_mimetype(mimetype: &str) -> Option<ImageFormat> { match mimetype { "image/png" => Some(ImageFormat::Png), "image/jpeg" => Some(ImageFormat::Jpeg), "image/jpg" => Some(ImageFormat::Jpeg), "image/gif" => Some(ImageFormat::Gif), "image/webp" => Some(ImageFormat::WebP), "image/tiff" => Some(ImageFormat::Tiff), // "image/pnm"=>Some(ImageFormat::Pnm), // "image/tga"=>Some(ImageFormat::Tga), // "image/dds"=>Some(ImageFormat::Dds), // "image/bmp"=>Some(ImageFormat::Bmp), // "image/ico"=>Some(ImageFormat::Ico), // "image/x-exr"=>Some(ImageFormat::OpenExr), _ => None, } } fn format_to_mimetype(format: ImageFormat) -> String { match format { ImageFormat::Png => "image/png", ImageFormat::Jpeg => "image/jpeg", ImageFormat::Gif => "image/gif", ImageFormat::WebP => "image/webp", ImageFormat::Tiff => "image/tiff", _ => "application/octet-stream", } .to_string() } fn fetch_image(input: &str) -> Result<(Vec<u8>, String, usize, usize), ValidationError> { if input.starts_with("![](http://") || input.starts_with("![](https://") { let url = &input["![](".len()..input.len() - 1]; let data = reqwest::blocking::get(url)?.bytes()?; let format = image::guess_format(&data)?; // TODO Remove this clone let img = ImageReader::with_format(Cursor::new(data.clone()), format).decode()?; let height: usize = img.height().try_into()?; let width: usize = img.width().try_into()?; let mimetype = format_to_mimetype(format); Ok((data.to_vec(), mimetype, height, width)) } else if input.starts_with("![](data:") { // Remove ![](....) let content = &input["![](data:".len()..input.len() - 1]; let tokens: Vec<_> = content.split(';').collect(); if tokens.len() != 2 { return Err(ValidationError::InvalidImageContent(content.to_string())); } let mimetype = tokens[0]; let content = tokens[1]; if !content.starts_with("base64,") { return Err(ValidationError::InvalidImageContent(content.to_string())); } let data = STANDARD.decode(&content["base64,".len()..])?; let img = if let Some(format) = format_from_mimetype(mimetype) { ImageReader::with_format(Cursor::new(&data), format).decode()? } else { ImageReader::new(Cursor::new(&data)) .with_guessed_format() .map_err(|_io_error| ValidationError::InvalidImageContent(content.to_string()))? .decode()? }; let height: usize = img.height().try_into()?; let width: usize = img.width().try_into()?; Ok((data, mimetype.to_string(), height, width)) } else { Err(ValidationError::InvalidImageContent(input.to_string())) } } fn image_tokens( config: &Config, preprocessor_config: Option<&HubPreprocessorConfig>, height: usize, width: usize, ) -> String { use Config::*; use HubPreprocessorConfig::*; match config { Idefics => "<image>".to_string(), Mllama => "<|image|>".to_string(), Idefics2(config) => { const FAKE: &str = "<fake_token_around_image>"; const IMAGE: &str = "<image>"; let slots = config.get_number_of_features(height, width); let mut image_string = String::with_capacity(2 * FAKE.len() + slots * IMAGE.len()); image_string.push_str(FAKE); image_string.extend(iter::repeat_n(IMAGE, slots)); image_string.push_str(FAKE); if matches!( preprocessor_config, Some(Idefics2Processor(Idefics2Preprocessor { do_image_splitting: true, .. })) ) { image_string = image_string.repeat(5); }; image_string } Idefics3(config) => { const FAKE: &str = "<fake_token_around_image>"; const IMAGE: &str = "<image>"; const GLOBAL_IMG: &str = "<global-img>"; let max_longest_edge_for_image_resize = config.get_max_longest_edge_for_image_resize(); let max_image_size = config.get_max_image_size(); let (height, width) = { let h = height as f32; let w = width as f32; // First resize to max_longest_edge (always scale to this size) let scale1 = max_longest_edge_for_image_resize as f32 / h.max(w); let (h, w) = (h * scale1, w * scale1); // Ensure we dont exceed max_size (only scale down) let scale2 = (max_image_size as f32 / h.max(w)).min(1.0); ((h * scale2) as usize, (w * scale2) as usize) }; let image_seq_len = config.get_number_of_features(); let max_edge = config.get_max_longest_edge(); let (image_rows, image_cols) = if height > max_edge || width > max_edge { ( (height as f32 / max_edge as f32).ceil() as usize, (width as f32 / max_edge as f32).ceil() as usize, ) } else { (0, 0) }; let mut image_string = String::new(); if image_rows == 0 && image_cols == 0 { // Single image case image_string.push_str(FAKE); image_string.push_str(GLOBAL_IMG); image_string.push_str(&IMAGE.repeat(image_seq_len)); image_string.push_str(FAKE); } else { // Split image case for n_h in 0..image_rows { for n_w in 0..image_cols { image_string.push_str(FAKE); image_string.push_str(&format!("<row_{}_col_{}>", n_h + 1, n_w + 1)); image_string.push_str(&IMAGE.repeat(image_seq_len)); } image_string.push('\n'); } image_string.push('\n'); image_string.push_str(FAKE); image_string.push_str(GLOBAL_IMG); image_string.push_str(&IMAGE.repeat(image_seq_len)); image_string.push_str(FAKE); } image_string } Paligemma(config) => "<image>".repeat(config.get_number_of_features(height, width)), LlavaNext(config) => "<image>".repeat(config.get_number_of_features(height, width)), Llama4(config) => { const IMAGE_START: &str = "<|image_start|>"; const IMAGE: &str = "<|image|>"; const IMAGE_END: &str = "<|image_end|>"; const PATCH: &str = "<|patch|>"; const TILE_X_SEP: &str = "<|tile_x_separator|>"; const TILE_Y_SEP: &str = "<|tile_y_separator|>"; let image_height = config.image_size(); let patch_size = config.patch_size(); let pixel_shuffle_ratio = config.pixel_shuffle_ratio(); let max_patches = match preprocessor_config { Some(HubPreprocessorConfig::Llama4Processor(cfg)) => cfg.max_patches, _ => panic!("Expected Llama4Processor in preprocessor_config"), }; let downsample_ratio = (1.0 / (pixel_shuffle_ratio * pixel_shuffle_ratio)).round() as usize; let (ratio_h, ratio_w) = config.get_aspect_ratios(height, width, max_patches); let image_width = image_height; // Assuming pixel shape: [H][W][C] let num_patches_per_chunk = (image_height / patch_size) * (image_width / patch_size) / downsample_ratio; let mut img_string = String::new(); img_string.push_str(IMAGE_START); if ratio_h * ratio_w > 1 { for _yy in 0..ratio_h { for xx in 0..ratio_w { img_string.push_str(&PATCH.repeat(num_patches_per_chunk)); if xx < ratio_w - 1 { img_string.push_str(TILE_X_SEP); } } img_string.push_str(TILE_Y_SEP); } } img_string.push_str(IMAGE); img_string.push_str(&PATCH.repeat(num_patches_per_chunk)); img_string.push_str(IMAGE_END); img_string } Qwen2Vl(config) => format!( "<|vision_start|>{:?}<|vision_end|>", "<|image_pad|>".repeat(config.get_number_of_features(height, width)) ), Qwen2_5Vl(config) => format!( "<|vision_start|>{:?}<|vision_end|>", "<|image_pad|>".repeat(config.get_number_of_features(height, width)) ), Gemma3(_config) => { // TODO: prefer using the config to determine the number of features let num_mm_soft_tokens_per_image = 256; format!( "\n\n<start_of_image>{}<end_of_image>\n\n", "<image_soft_token>".repeat(num_mm_soft_tokens_per_image) ) } _ => unimplemented!("Images tokens are not supported for this model configuration"), } } fn image_tokens_fixup(config: &Config, text: String) -> String { match config { Config::Idefics2(_) => { const FAKE: &str = "<fake_token_around_image>"; text.replace(&format!("{FAKE}{FAKE}"), FAKE) } _ => text, } } /// Get input length and optionally truncate it fn prepare_input<T: TokenizerTrait>( inputs: String, _truncate: Option<usize>, add_special_tokens: bool, tokenizer: &T, config: Option<&Config>, preprocessor_config: Option<&HubPreprocessorConfig>, ) -> Result<(tokenizers::Encoding, Vec<Chunk>), ValidationError> { use Config::*; static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"!\[\]\([^\)]*\)").unwrap()); let (tokenizer_query, input_chunks) = match config { Some( config @ (Idefics | Mllama | Idefics2(_) | Idefics3(_) | Gemma3(_) | Llama4(_) | Paligemma(_) | LlavaNext(_) | Qwen2Vl(_) | Qwen2_5Vl(_)), ) => { let mut input_chunks = Vec::new(); let mut tokenizer_query = String::with_capacity(inputs.len()); let mut start = 0; for chunk in RE.find_iter(&inputs) { let chunk_start = chunk.start(); let chunk_end = chunk.end(); if chunk_start != start { input_chunks.push(Chunk::Text(inputs[start..chunk_start].to_string())); tokenizer_query.push_str(&inputs[start..chunk_start]); } let (data, mimetype, height, width) = fetch_image(&inputs[chunk_start..chunk_end])?; input_chunks.push(Chunk::Image(Image { data, mimetype })); tokenizer_query.push_str(&image_tokens(config, preprocessor_config, height, width)); start = chunk_end; } if start != inputs.len() { input_chunks.push(Chunk::Text(inputs[start..].to_string())); tokenizer_query.push_str(&inputs[start..]); } tokenizer_query = image_tokens_fixup(config, tokenizer_query); (tokenizer_query, input_chunks) } _ => (inputs.clone(), vec![Chunk::Text(inputs)]), }; // Get the number of tokens in the input let encoding = tokenizer .encode_trait(tokenizer_query, add_special_tokens) .map_err(|err| ValidationError::Tokenizer(err.to_string()))?; Ok((encoding, input_chunks)) } type TokenizerRequest = ( (String, bool, Option<usize>), oneshot::Sender<Result<(tokenizers::Encoding, Vec<Chunk>), ValidationError>>, Span, ); #[derive(Debug, Clone, Eq, PartialEq)] pub struct Image { pub data: Vec<u8>, pub mimetype: String, } #[derive(Debug, Clone, Eq, PartialEq)] pub enum Chunk { Text(String), Image(Image), } /// Convert input chunks to a stringly-typed input for backwards /// compat for backends that haven't implemented chunked inputs. pub trait ChunksToString { /// Convert chunks to string. fn chunks_to_string(&self) -> String; } impl ChunksToString for Vec<Chunk> { fn chunks_to_string(&self) -> String { let mut output = String::new(); self.iter().for_each(|c| match &c { Chunk::Text(text) => output.push_str(text), Chunk::Image(Image { data, mimetype }) => { let encoded = STANDARD.encode(data); output.push_str(&format!("![](data:{};base64,{})", mimetype, encoded)) } }); output } } #[derive(Debug, Clone)] pub enum ValidGrammar { Json(String), Regex(String), } #[derive(Debug, Clone)] pub struct ValidParameters { /// / exponential scaling output probability distribution pub temperature: f32, /// / restricting to the k highest probability elements pub top_k: u32, /// / restricting to top tokens summing to prob_cut_off <= prob_cut_off pub top_p: f32, /// / restricting to top tokens summing to prob_cut_off <= prob_cut_off pub typical_p: f32, /// / apply sampling on the logits pub do_sample: bool, /// / random seed for sampling pub seed: u64, /// / repetition penalty pub repetition_penalty: f32, /// / frequency penalty pub frequency_penalty: f32, /// / token watermarking using "A Watermark for Large Language Models" pub watermark: bool, /// / grammar (applied if not empty) pub grammar: Option<ValidGrammar>, } #[derive(Debug, Clone)] pub struct ValidStoppingParameters { /// / Maximum number of generated tokens pub max_new_tokens: u32, /// Maximum number of generated tokens before being re-queued by the system pub max_total_new_tokens: u32, /// / Optional stopping sequences pub stop_sequences: Vec<String>, /// / Ignore end of sequence token /// / used for benchmarking pub ignore_eos_token: bool, } #[derive(Debug, Clone)] pub struct ValidGenerateRequest { pub inputs: Vec<Chunk>, pub input_ids: Option<Arc<Vec<u32>>>, pub input_length: u32, pub truncate: u32, pub add_special_tokens: bool, pub decoder_input_details: bool, pub parameters: ValidParameters, pub stopping_parameters: ValidStoppingParameters, pub top_n_tokens: u32, pub adapter_id: Option<String>, } #[derive(Error, Debug)] pub enum ValidationError { #[error("`best_of` must be > 0 and <= {0}. Given: {1}")] BestOf(usize, usize), #[error("`best_of` != 1 is not allowed for this endpoint")] BestOfDisabled, #[error("you must use sampling when `best_of` is > 1")] BestOfSampling, #[error("`seed` must not be set when `best_of` > 1")] BestOfSeed, #[error("`best_of` != 1 is not supported when streaming tokens")] BestOfStream, #[error("`top_n_tokens` must be >= 0 and <= {0}. Given: {1}")] TopNTokens(u32, u32), #[error("`top_n_tokens` != 0 is not allowed for this endpoint")] TopNTokensDisabled, #[error("`decoder_input_details` == true is not supported when streaming tokens")] PrefillDetailsStream, #[error("`temperature` must be strictly positive")] Temperature, #[error("`repetition_penalty` must be strictly positive")] RepetitionPenalty, #[error("`frequency_penalty` must be >= -2.0 and <= 2.0")] FrequencyPenalty, #[error("`top_p` must be > 0.0 and < 1.0")] TopP, #[error("`top_k` must be strictly positive")] TopK, #[error("`truncate` must be strictly positive and less than {0}. Given: {1}")] Truncate(usize, usize), #[error("`typical_p` must be > 0.0 and < 1.0")] TypicalP, #[error("one of `max_new_tokens` or `truncate` must be set if a fast tokenizer is not in use")] UnsetMaxNewTokens, #[error("`max_new_tokens` must be strictly positive")] NegativeMaxNewTokens, #[error("`max_new_tokens` must be <= {0}. Given: {1}")] MaxNewTokens(usize, u32), #[error("`inputs` tokens + `max_new_tokens` must be <= {0}. Given: {1} `inputs` tokens and {2} `max_new_tokens`")] MaxTotalTokens(usize, usize, u32), #[error("`inputs` must have less than {0} tokens. Given: {1}")] InputLength(usize, usize), #[error("`inputs` cannot be empty")] EmptyInput, #[error("`stop` supports up to {0} stop sequences. Given: {1}")] StopSequence(usize, usize), #[error("tokenizer error {0}")] Tokenizer(String), #[error("grammar is not supported")] Grammar, #[error("grammar is not valid: {0}")] InvalidGrammar(String), #[error("cannot compile regex from schema: {0}")] RegexFromSchema(anyhow::Error), #[error("base64 encoding is invalid: {0}")] InvalidBase64(#[from] base64::DecodeError), #[error("invalid image: {0}")] InvalidImage(#[from] image::ImageError), #[error("invalid integer: {0}")] InvalidInt(#[from] core::num::TryFromIntError), #[error("invalid image content: {0}")] InvalidImageContent(String), #[error("Could not fetch image: {0}")] FailedFetchImage(#[from] reqwest::Error), #[error("{0} modality is not supported")] UnsupportedModality(&'static str), } #[cfg(test)] mod tests { use super::*; use crate::config::{Idefics2, PaliTextConfig, Paligemma}; use crate::default_parameters; use crate::tests::get_tokenizer; #[tokio::test] async fn test_validation_max_new_tokens() { let tokenizer = get_tokenizer(); let max_best_of = 2; let max_stop_sequence = 3; let max_top_n_tokens = 4; let max_input_length = 5; let max_total_tokens = 6; let workers = 1; let disable_grammar_support = true; let config = None; let validation = Validation::new( workers, tokenizer, config, None, max_best_of, max_stop_sequence, max_top_n_tokens, max_input_length, max_total_tokens, disable_grammar_support, ); let max_new_tokens = 10; match validation .validate_input("Hello".to_string(), true, None, Some(max_new_tokens)) .await { Err(ValidationError::MaxTotalTokens(6, 1, 10)) => (), // Ok((_s, _, 0, 10)) => (), r => panic!("Unexpected not max new tokens: {r:?}"), } } #[tokio::test] async fn test_validation_input_length() { let tokenizer = get_tokenizer(); let max_best_of = 2; let max_stop_sequence = 3; let max_top_n_tokens = 4; let max_input_length = 5; let max_total_tokens = 6; let disable_grammar_support = true; let workers = 1; let config = None; let validation = Validation::new( workers, tokenizer, config, None, max_best_of, max_stop_sequence, max_top_n_tokens, max_input_length, max_total_tokens, disable_grammar_support, ); let max_new_tokens = 10; match validation .validate_input("Hello".to_string(), true, None, Some(max_new_tokens)) .await { Err(ValidationError::MaxTotalTokens(6, 1, 10)) => (), _ => panic!("Unexpected not max new tokens"), } } #[tokio::test] async fn test_validation_best_of_sampling() { let tokenizer = get_tokenizer(); let max_best_of = 2; let max_stop_sequence = 3; let max_top_n_tokens = 4; let max_input_length = 5; let max_total_tokens = 6; let workers = 1; let disable_grammar_support = true; let config = None; let validation = Validation::new( workers, tokenizer, config, None, max_best_of, max_stop_sequence, max_top_n_tokens, max_input_length, max_total_tokens, disable_grammar_support, ); match validation .validate(GenerateRequest { inputs: "Hello".to_string(), add_special_tokens: true, parameters: GenerateParameters { best_of: Some(2), do_sample: false, ..default_parameters() }, }) .await { Err(ValidationError::BestOfSampling) => (), _ => panic!("Unexpected not best of sampling"), } } #[tokio::test] async fn test_validation_top_p() { let tokenizer = get_tokenizer(); let max_best_of = 2; let max_stop_sequence = 3; let max_top_n_tokens = 4; let max_input_length = 5; let max_total_tokens = 106; let workers = 1; let disable_grammar_support = true; let config = None; let validation = Validation::new( workers, tokenizer, config, None, max_best_of, max_stop_sequence, max_top_n_tokens, max_input_length, max_total_tokens, disable_grammar_support, ); match validation .validate(GenerateRequest { inputs: "Hello".to_string(), add_special_tokens: true, parameters: GenerateParameters { top_p: Some(1.0), max_new_tokens: Some(5), ..default_parameters() }, }) .await { Err(ValidationError::TopP) => (), _ => panic!("Unexpected top_p"), } match validation .validate(GenerateRequest { inputs: "Hello".to_string(), add_special_tokens: true, parameters: GenerateParameters { top_p: Some(0.99), max_new_tokens: Some(5), ..default_parameters() }, }) .await { Ok(_) => (), _ => panic!("Unexpected top_p error"), } let valid_request = validation .validate(GenerateRequest { inputs: "Hello".to_string(), add_special_tokens: true, parameters: GenerateParameters { top_p: None, max_new_tokens: Some(5), ..default_parameters() }, }) .await .unwrap(); // top_p == 1.0 is invalid for users to ask for but it's the default resolved value. assert_eq!(valid_request.parameters.top_p, 1.0); } #[tokio::test] async fn test_validation_top_n_tokens() { let tokenizer = get_tokenizer(); let max_best_of = 2; let max_stop_sequences = 3; let max_top_n_tokens = 4; let max_input_length = 5; let max_total_tokens = 106; let workers = 1; let disable_grammar_support = true; let config = None; let validation = Validation::new( workers, tokenizer, config, None, max_best_of, max_stop_sequences, max_top_n_tokens, max_input_length, max_total_tokens, disable_grammar_support, ); match validation .validate(GenerateRequest { inputs: "Hello".to_string(), add_special_tokens: true, parameters: GenerateParameters { top_n_tokens: Some(5), max_new_tokens: Some(5), ..default_parameters() }, }) .await { Err(ValidationError::TopNTokens(4, 5)) => (), _ => panic!("Unexpected top_n_tokens"), } validation .validate(GenerateRequest { inputs: "Hello".to_string(), add_special_tokens: true, parameters: GenerateParameters { top_n_tokens: Some(4), max_new_tokens: Some(5), ..default_parameters() }, }) .await .unwrap(); validation .validate(GenerateRequest { inputs: "Hello".to_string(), add_special_tokens: true, parameters: GenerateParameters { top_n_tokens: Some(0), max_new_tokens: Some(5), ..default_parameters() }, }) .await .unwrap(); let valid_request = validation .validate(GenerateRequest { inputs: "Hello".to_string(), add_special_tokens: true, parameters: GenerateParameters { top_n_tokens: None, max_new_tokens: Some(5), ..default_parameters() }, }) .await .unwrap(); assert_eq!(valid_request.top_n_tokens, 0); } static PIXEL_GIF: &str = "R0lGODdhAQABAIEAAP///wAAAAAAAAAAACwAAAAAAQABAAAIBAABBAQAOw=="; #[tokio::test] async fn test_prepare_input_chunks() { let pixel_data = STANDARD.decode(PIXEL_GIF).unwrap(); let tokenizer = get_tokenizer(); let max_best_of = 2; let max_stop_sequence = 3; let max_top_n_tokens = 4; let max_input_length = 5; let max_total_tokens = 6; let disable_grammar_support = true; let workers = 1; let config = Config::Paligemma(Paligemma { text_config: PaliTextConfig { num_image_tokens: 1, }, }); let validation = Validation::new( workers, tokenizer, Some(config), None, max_best_of, max_stop_sequence, max_top_n_tokens, max_input_length, max_total_tokens, disable_grammar_support, ); let chunks = match validation .tokenize( format!("test![](data:image/gif;base64,{})", PIXEL_GIF), true, None, ) .await { Ok((_encoding, chunks)) => chunks, _ => panic!("Unexpected tokenization failure"), }; assert!( chunks == vec![ Chunk::Text("test".to_string()), Chunk::Image(Image { data: pixel_data.clone(), mimetype: "image/gif".to_string() }) ], "Failed to process images", ); } #[tokio::test] async fn test_idefics2_correct_n_fake_tokens() { let pixel_data = STANDARD.decode(PIXEL_GIF).unwrap(); let tokenizer = get_tokenizer(); let max_best_of = 2; let max_stop_sequence = 3; let max_top_n_tokens = 4; let max_input_length = 5; let max_total_tokens = 6; let disable_grammar_support = true; let workers = 1; let config = Config::Idefics2(Idefics2 {}); let validation = Validation::new( workers, tokenizer, Some(config), Some(HubPreprocessorConfig::Idefics2Processor( Idefics2Preprocessor { do_image_splitting: true, }, )), max_best_of, max_stop_sequence, max_top_n_tokens, max_input_length, max_total_tokens, disable_grammar_support, ); let (encoding, chunks) = match validation .tokenize( format!( "test![](data:image/gif;base64,{})![](data:image/gif;base64,{})", PIXEL_GIF, PIXEL_GIF ), true, None, ) .await { Ok((encoding, chunks)) => (encoding, chunks), _ => panic!("Unexpected tokenization failure"), }; assert!( chunks == vec![ Chunk::Text("test".to_string()), Chunk::Image(Image { data: pixel_data.clone(), mimetype: "image/gif".to_string() }), Chunk::Image(Image { data: pixel_data.clone(), mimetype: "image/gif".to_string() }) ], "Failed to process images", ); // Verify the number of fake tokens: // // - Two images surrounded/separated by a fake token = 3. // - Both are split in 5 subimages, separated by a fake token: 2 * 4 // // Fake tokens get split up by the testing tokenizer, but we don't care. assert_eq!( encoding .get_tokens() .iter() .filter(|t| *t == "fake") .count(), 11 ); } }
text-generation-inference/router/src/validation.rs/0
{ "file_path": "text-generation-inference/router/src/validation.rs", "repo_id": "text-generation-inference", "token_count": 25286 }
322
#include <ATen/Dispatch.h> #include <THC/THCAtomics.cuh> #include <ATen/ATen.h> #include <torch/torch.h> #include <vector> #include <optional> /** * Friendly reminder of how multithreading works in CUDA: https://developer.nvidia.com/blog/even-easier-introduction-cuda * Check example at https://github.com/thomasw21/LinearTransformers/blob/main/model/attention/fast_weight/fast_weight_cuda.cu **/ // Available in pytorch main //#define DISPATCH_CASE_FLOATING_TYPES(...) \ // at::AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \ // at::AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \ // at::AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \ // at::AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) \ /* * Forward passes */ /** * cast to fp32 if in fp16 + mask + softmax computation in fp32 + cast back to original dtype **/ template<typename attention_scores_scalar, int64_t min_kv_length_shard_size_per_thread> __global__ void forward_masked_softmax_kernel( const torch::PackedTensorAccessor32<attention_scores_scalar, 2, torch::RestrictPtrTraits> attention_scores, // [B, KV] const torch::PackedTensorAccessor32<bool, 2, torch::RestrictPtrTraits> mask, // [B, KV] torch::PackedTensorAccessor32<attention_scores_scalar, 2, torch::RestrictPtrTraits> result, // [B, KV] const int64_t effective_kv_length, const dim3 blockDim, const int64_t rows_per_block, const int64_t kv_length, const int64_t batch_size ) { const auto row_id = threadIdx.x / effective_kv_length; const auto effective_kv_length_id = threadIdx.x % effective_kv_length; const auto kv_length_start = effective_kv_length_id * min_kv_length_shard_size_per_thread; auto kv_length_end_ = (effective_kv_length_id + 1) * min_kv_length_shard_size_per_thread; kv_length_end_ = (kv_length_end_ > kv_length) ? kv_length : kv_length_end_; const auto kv_length_end = kv_length_end_; const auto batch_id = blockIdx.x * rows_per_block + row_id; // We need 2 float storage for each row, one for max computation, the other for normalizing exponential extern __shared__ float temp_storage[]; const auto row_id_mem_offset = row_id * 2; if (effective_kv_length_id == 0) { temp_storage[row_id_mem_offset] = -std::numeric_limits<float>::infinity(); temp_storage[row_id_mem_offset + 1] = 0; } __syncthreads(); // Compute mask and max if (batch_id < batch_size) { float thread_max = -std::numeric_limits<float>::infinity(); for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { if (mask[batch_id][kv_length_id] == 0) { const float candidate = attention_scores[batch_id][kv_length_id]; thread_max = (thread_max < candidate) ? candidate : thread_max; } } if (thread_max != -std::numeric_limits<float>::infinity()) { // TODO @thomasw21 with more memory we can probably compute a much faster `max-reduce` in parallel O(ln(n)) operations in each memory slot gpuAtomicMax(&temp_storage[row_id_mem_offset], thread_max); } } __syncthreads(); // Compute exp(elt - max) masked float exponential[min_kv_length_shard_size_per_thread]; if (batch_id < batch_size) { float thread_add = 0; for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { if (mask[batch_id][kv_length_id] == 0) { exponential[kv_length_id - kv_length_start] = std::exp(static_cast<float>(attention_scores[batch_id][kv_length_id]) - temp_storage[row_id_mem_offset]); thread_add = thread_add + exponential[kv_length_id - kv_length_start]; } else { exponential[kv_length_id - kv_length_start] = 0.; } } if (thread_add > 0) { // TODO @thomasw21 with more memory we can probably compute a much faster `sum-reduce` in parallel O(ln(n)) operations in each memory slot gpuAtomicAdd(&temp_storage[row_id_mem_offset + 1], thread_add); } } __syncthreads(); // Compute softmax if (batch_id < batch_size) { // If sum of all exponential is 0, we set the softmax values to 0 if (temp_storage[row_id_mem_offset + 1] == 0.) { for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { result[batch_id][kv_length_id] = 0.; } } else { for (int kv_length_id = kv_length_start; kv_length_id < kv_length_end; ++kv_length_id) { result[batch_id][kv_length_id] = static_cast<attention_scores_scalar>(exponential[kv_length_id - kv_length_start] / temp_storage[row_id_mem_offset + 1]); } } } } #define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) std::tuple<at::Tensor, std::optional<std::vector<at::Tensor>>, at::Tensor> forward( const at::Tensor query, const at::Tensor key, const at::Tensor value, const std::optional<std::vector<at::Tensor>> layer_past, const at::Tensor attention_mask, const std::optional<at::Tensor> head_mask, const float inv_norm_factor, const int num_heads, const bool use_cache ) { auto query_layer = query; auto key_layer = key; auto value_layer = value; if (layer_past) { const auto past_key = (*layer_past).at(0); const auto past_value = (*layer_past).at(1); key_layer = at::cat({past_key, key_layer}, 2); value_layer = at::cat({past_value, value_layer}, 2); } std::optional<std::vector<at::Tensor>> present; if (use_cache) { present = {key_layer, value_layer}; } else { present = {}; } const auto batch_size = query_layer.size(0); const auto q_length = query_layer.size(2); const auto attn_head_size = query_layer.size(3); const auto batch_size_times_num_heads = batch_size * num_heads; const auto kv_length = key_layer.size(2); const auto query_view = query_layer.reshape({batch_size_times_num_heads, q_length, attn_head_size}); auto key_view = key_layer.reshape({batch_size_times_num_heads, kv_length, attn_head_size}).transpose(1, 2); auto value_view = value_layer.reshape({batch_size_times_num_heads, kv_length, attn_head_size}); auto query_scaled = query_view * inv_norm_factor; auto attention_scores = at::bmm(query_scaled, key_view); // Computing `optionally_cast_fp16_to_fp32 + masked_fill + softmax + cast_to_intial_dtype` at::Tensor attention_probs; if (true) { // TODO @thomasw21: it's easier to think of attention_scores as 2D tensors const auto attention_scores_2d = attention_scores.view({batch_size_times_num_heads * q_length, kv_length}); const auto attention_mask_2d = attention_mask.view({batch_size_times_num_heads * q_length, kv_length}); // Custom kernel attention_probs = at::empty_like(attention_scores_2d); // Check that inputs and contiguous + cuda tensors CHECK_INPUT(attention_scores_2d); CHECK_INPUT(attention_mask_2d); // TODO @thomas21: change by to this as it's cleaner when pytorch 1.13 comes out // DISPATCH_CASE_FLOATING_TYPES(attention_scores.scalar_type(), "masked_softmax", [&] { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, attention_scores.scalar_type(), "masked_softmax", [&] { /* * Understanding how GPUs work: https://developer.nvidia.com/blog/cuda-refresher-cuda-programming-model/ * A100 specifications: https://images.nvidia.com/aem-dam/en-zz/Solutions/data-center/nvidia-ampere-architecture-whitepaper.pdf * - SMs: 108 * - TPCs: 56 (What's that?) * - Memory size: 40 GB * - L2 Cache size: 40960 KB (shared across all SMs) * - L1/Shared memory size: 192 KB (shared across all threads within a SM) * - Max Threads / SM: 2048 * - Max Thread Blocks / SM: 32 */ /* * We should split [batch_size_times_num_heads_block, q_length] in seperate blocks and [batch_size_times_num_heads_block_size, kv_length] a single block * with multiple threads as we need to `sync_threads` to run exponential sum. * We maximise the usage of threads within a single block */ // TODO @thomasw21 figure out everything warp related: // - why do they have to be power of 2 // TODO @thomas21 check why everyone is setting 1024 when officially it's 2048 const auto MAX_THREADS_PER_SM = 1024; // TODO @thomasw21 figure out how to have longer sequences, currently the maximum is `max_kv_length = MAX_THREADS_PER_SM * MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD` const auto MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD = 4; // `effective_kv_length = ceil(kv_length / MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD)` const auto effective_kv_length = (kv_length - 1)/ MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD + 1; const auto rows_per_block = MAX_THREADS_PER_SM / effective_kv_length; const auto num_blocks = (batch_size_times_num_heads * q_length - 1) / rows_per_block + 1; const dim3 gridDim(num_blocks); // Number of blocks that run const dim3 blockDim(MAX_THREADS_PER_SM); // Number of threads that run per block const int shared_mem_forward = rows_per_block * 2 * sizeof(float); // 192 * 2 ** 10 // const auto MAX_L1_MEMORY = 196608; // const auto MAX_SMs = 108; // TORCH_CHECK(batch_size_times_num_heads * q_length <= MAX_L1_MEMORY, "Shared memory exceeds 192KB limitation."); // TORCH_CHECK(gridDim.x * gridDim.y * gridDim.z <= MAX_SMs, "A100s only have 108 SMs. Raising as require blocks is bigger."); // TORCH_CHECK(blockDim.x * blockDim.y * blockDim.z <= MAX_THREADS_PER_SM, "A100s only have 2048 threads per block. Raising as require requested threads is higher."); forward_masked_softmax_kernel<scalar_t, MIN_KV_LENGTH_SHARD_SIZE_PER_THREAD><<<gridDim, blockDim, shared_mem_forward>>>( attention_scores_2d.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), attention_mask_2d.packed_accessor32<bool, 2, torch::RestrictPtrTraits>(), attention_probs.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), effective_kv_length, blockDim, rows_per_block, kv_length, batch_size_times_num_heads * q_length ); }); attention_probs = attention_probs.view({batch_size_times_num_heads, q_length, kv_length}); } else { // Pytorch C++ API auto input_dtype = attention_scores.scalar_type(); if (input_dtype == at::ScalarType::Float) { attention_scores = attention_scores.to(at::ScalarType::Float); }; // TODO @thomasw21 Figure out how to get minimum value auto attn_weights = attention_scores.masked_fill_(attention_mask, -1e34); attention_probs = attn_weights.softmax(-1, at::ScalarType::Float).to(input_dtype); } auto context_layer = attention_probs.bmm(value_view); // `_merge_heads` context_layer = context_layer.view({batch_size, num_heads, q_length, attn_head_size}); context_layer = context_layer.permute({0, 2, 1, 3}); context_layer = context_layer.reshape({batch_size, q_length, attn_head_size * num_heads}); return std::make_tuple(context_layer, present, attention_probs); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def( "forward", &forward, "GPT-Neox attention mechanism forward (CUDA)" ); }
text-generation-inference/server/custom_kernels/custom_kernels/fused_attention_cuda.cu/0
{ "file_path": "text-generation-inference/server/custom_kernels/custom_kernels/fused_attention_cuda.cu", "repo_id": "text-generation-inference", "token_count": 5265 }
323
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #ifndef _util_cuh #define _util_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cstdint> #include <cstdio> #if defined(USE_ROCM) #define cudaUnspecified hipErrorUnknown #else #define cudaUnspecified cudaErrorApiFailureBase #endif // React to failure on return code != cudaSuccess #define _cuda_check(fn) \ do { \ {_cuda_err = fn;} \ if (_cuda_err != cudaSuccess) goto _cuda_fail; \ } while(false) // React to failure on return code == 0 #define _alloc_check(fn) \ do { \ if (!(fn)) { _cuda_err = cudaUnspecified; goto _cuda_fail; } \ else _cuda_err = cudaSuccess; \ } while(false) #endif
text-generation-inference/server/exllama_kernels/exllama_kernels/util.cuh/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/util.cuh", "repo_id": "text-generation-inference", "token_count": 283 }
324
#ifndef _qdq_6_cuh #define _qdq_6_cuh #include "qdq_util.cuh" #include "../../config.h" #if QMODE_6BIT == 1 // Not implemented #else __forceinline__ __device__ void shuffle_6bit_16 ( uint32_t* q, int stride ) { } __forceinline__ __device__ void dequant_6bit_16 ( const uint32_t q_0, const uint32_t q_1, const uint32_t q_2, half2 (&dq)[8], int stride ) { half dqh[16]; for (int i = 0; i < 5; i++) dqh[ i] = dq_ns(exb( q_0, i * 6 , 0x3f), 32); dqh[ 5 ] = dq_ns(exb(q_1, q_0, 30, 0x3f), 32); for (int i = 0; i < 4; i++) dqh[ 6 + i] = dq_ns(exb( q_1, i * 6 + 4, 0x3f), 32); dqh[10 ] = dq_ns(exb(q_2, q_1, 28, 0x3f), 32); for (int i = 0; i < 5; i++) dqh[11 + i] = dq_ns(exb( q_2, i * 6 + 2, 0x3f), 32); for (int i = 0; i < 8; i++) dq[i] = __halves2half2(dqh[i * 2], dqh[i * 2 + 1]); } #endif #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_6.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_6.cuh", "repo_id": "text-generation-inference", "token_count": 571 }
325
import pytest import torch from transformers import AutoTokenizer from text_generation_server.models import Model def get_test_model(): class TestModel(Model): def batch_type(self): raise NotImplementedError def generate_token(self, batch): raise NotImplementedError tokenizer = AutoTokenizer.from_pretrained("huggingface/llama-7b") model = TestModel( "test_model_id", torch.nn.Linear(1, 1), tokenizer, False, torch.float32, torch.device("cpu"), ) return model @pytest.mark.private def test_decode_streaming_english_spaces(): model = get_test_model() truth = "Hello here, this is a simple test" all_input_ids = [15043, 1244, 29892, 445, 338, 263, 2560, 1243] assert ( all_input_ids == model.tokenizer(truth, add_special_tokens=False)["input_ids"] ) decoded_text = "" offset = 0 token_offset = 0 for i in range(len(all_input_ids)): text, offset, token_offset = model.decode_token( all_input_ids[: i + 1], offset, token_offset ) decoded_text += text assert decoded_text == truth @pytest.mark.private def test_decode_streaming_chinese_utf8(): model = get_test_model() truth = "我很感谢你的热情" all_input_ids = [ 30672, 232, 193, 139, 233, 135, 162, 235, 179, 165, 30919, 30210, 234, 134, 176, 30993, ] decoded_text = "" offset = 0 token_offset = 0 for i in range(len(all_input_ids)): text, offset, token_offset = model.decode_token( all_input_ids[: i + 1], offset, token_offset ) decoded_text += text assert decoded_text == truth
text-generation-inference/server/tests/models/test_model.py/0
{ "file_path": "text-generation-inference/server/tests/models/test_model.py", "repo_id": "text-generation-inference", "token_count": 876 }
326
from .loader import CompressedTensorsLoader __all__ = ["CompressedTensorsLoader"]
text-generation-inference/server/text_generation_server/layers/compressed_tensors/__init__.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/compressed_tensors/__init__.py", "repo_id": "text-generation-inference", "token_count": 25 }
327
import math import numpy as np import torch import torch.nn as nn from torch.cuda.amp import custom_fwd import triton import triton.language as tl from . import custom_autotune # code based https://github.com/fpgaminer/GPTQ-triton @custom_autotune.autotune( configs=[ triton.Config( { "BLOCK_SIZE_M": 64, "BLOCK_SIZE_N": 256, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 8, }, num_stages=4, num_warps=4, ), triton.Config( { "BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 128, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 8, }, num_stages=4, num_warps=4, ), triton.Config( { "BLOCK_SIZE_M": 64, "BLOCK_SIZE_N": 128, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 8, }, num_stages=4, num_warps=4, ), triton.Config( { "BLOCK_SIZE_M": 128, "BLOCK_SIZE_N": 32, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 8, }, num_stages=4, num_warps=4, ), triton.Config( { "BLOCK_SIZE_M": 64, "BLOCK_SIZE_N": 64, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 8, }, num_stages=4, num_warps=4, ), triton.Config( { "BLOCK_SIZE_M": 64, "BLOCK_SIZE_N": 128, "BLOCK_SIZE_K": 32, "GROUP_SIZE_M": 8, }, num_stages=2, num_warps=8, ), triton.Config( { "BLOCK_SIZE_M": 64, "BLOCK_SIZE_N": 64, "BLOCK_SIZE_K": 64, "GROUP_SIZE_M": 8, }, num_stages=3, num_warps=8, ), triton.Config( { "BLOCK_SIZE_M": 32, "BLOCK_SIZE_N": 32, "BLOCK_SIZE_K": 128, "GROUP_SIZE_M": 8, }, num_stages=2, num_warps=4, ), ], key=["M", "N", "K"], nearest_power_of_two=True, prune_configs_by={ "early_config_prune": custom_autotune.matmul248_kernel_config_pruner, "perf_model": None, "top_k": None, }, ) @triton.jit def matmul_248_kernel( a_ptr, b_ptr, c_ptr, scales_ptr, zeros_ptr, g_ptr, M, N, K, bits, maxq, stride_am, stride_ak, stride_bk, stride_bn, stride_cm, stride_cn, stride_scales, stride_zeros, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, ): """ Compute the matrix multiplication C = A x B. A is of shape (M, K) float16 B is of shape (K//8, N) int32 C is of shape (M, N) float16 scales is of shape (G, N) float16 zeros is of shape (G, N) float16 g_ptr is of shape (K) int32 """ infearure_per_bits = 32 // bits pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) num_pid_k = tl.cdiv(K, BLOCK_SIZE_K) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + (pid % group_size_m) pid_n = (pid % num_pid_in_group) // group_size_m offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) offs_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = a_ptr + ( offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak ) # (BLOCK_SIZE_M, BLOCK_SIZE_K) a_mask = offs_am[:, None] < M # b_ptrs is set up such that it repeats elements along the K axis 8 times b_ptrs = b_ptr + ( (offs_k[:, None] // infearure_per_bits) * stride_bk + offs_bn[None, :] * stride_bn ) # (BLOCK_SIZE_K, BLOCK_SIZE_N) g_ptrs = g_ptr + offs_k # shifter is used to extract the N bits of each element in the 32-bit word from B scales_ptrs = scales_ptr + offs_bn[None, :] zeros_ptrs = zeros_ptr + (offs_bn[None, :] // infearure_per_bits) shifter = (offs_k % infearure_per_bits) * bits zeros_shifter = (offs_bn % infearure_per_bits) * bits accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for k in range(0, num_pid_k): g_idx = tl.load(g_ptrs) # Fetch scales and zeros; these are per-outfeature and thus reused in the inner loop scales = tl.load( scales_ptrs + g_idx[:, None] * stride_scales ) # (BLOCK_SIZE_K, BLOCK_SIZE_N,) zeros = tl.load( zeros_ptrs + g_idx[:, None] * stride_zeros ) # (BLOCK_SIZE_K, BLOCK_SIZE_N,) zeros = (zeros >> zeros_shifter[None, :]) & maxq zeros = (zeros + 1) & maxq # eventually avoid overflow a = tl.load(a_ptrs, mask=a_mask, other=0.0) # (BLOCK_SIZE_M, BLOCK_SIZE_K) b = tl.load(b_ptrs) # (BLOCK_SIZE_K, BLOCK_SIZE_N), but repeated # Now we need to unpack b (which is N-bit values) into 32-bit values b = (b >> shifter[:, None]) & maxq # Extract the N-bit values b = (b - zeros) * scales # Scale and shift accumulator += tl.dot(a, b) a_ptrs += BLOCK_SIZE_K b_ptrs += (BLOCK_SIZE_K // infearure_per_bits) * stride_bk g_ptrs += BLOCK_SIZE_K c_ptrs = c_ptr + stride_cm * offs_am[:, None] + stride_cn * offs_bn[None, :] c_mask = (offs_am[:, None] < M) & (offs_bn[None, :] < N) tl.store(c_ptrs, accumulator, mask=c_mask) def matmul248(input, qweight, scales, qzeros, g_idx, bits, maxq): with ( torch.xpu.device(input.device) if torch.xpu.is_available() else torch.cuda.device(input.device) ): output = torch.empty( (input.shape[0], qweight.shape[1]), device=input.device, dtype=torch.float16 ) def grid(META): return ( triton.cdiv(input.shape[0], META["BLOCK_SIZE_M"]) * triton.cdiv(qweight.shape[1], META["BLOCK_SIZE_N"]), ) matmul_248_kernel[grid]( input, qweight, output, scales, qzeros, g_idx, input.shape[0], qweight.shape[1], input.shape[1], bits, maxq, input.stride(0), input.stride(1), qweight.stride(0), qweight.stride(1), output.stride(0), output.stride(1), scales.stride(0), qzeros.stride(0), ) return output class QuantLinearFunction(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward(ctx, input, qweight, scales, qzeros, g_idx, bits, maxq): output = matmul248(input, qweight, scales, qzeros, g_idx, bits, maxq) return output class QuantLinear(nn.Module): def __init__(self, qweight, qzeros, scales, g_idx, bias, bits, groupsize): super().__init__() self.register_buffer("qweight", qweight) self.register_buffer("qzeros", qzeros) self.register_buffer("scales", scales) self.register_buffer("g_idx", g_idx) if bias is not None: self.register_buffer("bias", bias) else: self.bias = None if bits not in [2, 4, 8]: raise NotImplementedError("Only 2,4,8 bits are supported.") self.bits = bits self.maxq = 2**self.bits - 1 self.groupsize = groupsize self.outfeatures = qweight.shape[1] self.infeatures = qweight.shape[0] * 32 // bits @classmethod def new(cls, bits, groupsize, infeatures, outfeatures, bias): if bits not in [2, 4, 8]: raise NotImplementedError("Only 2,4,8 bits are supported.") qweight = torch.zeros((infeatures // 32 * bits, outfeatures), dtype=torch.int32) qzeros = torch.zeros( (math.ceil(infeatures / groupsize), outfeatures // 32 * bits), dtype=torch.int32, ) scales = torch.zeros( (math.ceil(infeatures / groupsize), outfeatures), dtype=torch.float16 ) g_idx = torch.tensor( [i // groupsize for i in range(infeatures)], dtype=torch.int32 ) if bias: bias = torch.zeros((outfeatures), dtype=torch.float16) else: bias = None return cls(qweight, qzeros, scales, g_idx, bias, bits, groupsize) def pack(self, linear, scales, zeros, g_idx=None): self.g_idx = g_idx.clone() if g_idx is not None else self.g_idx scales = scales.t().contiguous() zeros = zeros.t().contiguous() scale_zeros = zeros * scales self.scales = scales.clone().half() if linear.bias is not None: self.bias = linear.bias.clone().half() intweight = [] for idx in range(self.infeatures): intweight.append( torch.round( (linear.weight.data[:, idx] + scale_zeros[self.g_idx[idx]]) / self.scales[self.g_idx[idx]] ).to(torch.int)[:, None] ) intweight = torch.cat(intweight, dim=1) intweight = intweight.t().contiguous() intweight = intweight.numpy().astype(np.uint32) qweight = np.zeros( (intweight.shape[0] // 32 * self.bits, intweight.shape[1]), dtype=np.uint32 ) i = 0 row = 0 while row < qweight.shape[0]: if self.bits in [2, 4, 8]: for j in range(i, i + (32 // self.bits)): qweight[row] |= intweight[j] << (self.bits * (j - i)) i += 32 // self.bits row += 1 else: raise NotImplementedError("Only 2,4,8 bits are supported.") qweight = qweight.astype(np.int32) self.qweight = torch.from_numpy(qweight) zeros -= 1 zeros = zeros.numpy().astype(np.uint32) qzeros = np.zeros( (zeros.shape[0], zeros.shape[1] // 32 * self.bits), dtype=np.uint32 ) i = 0 col = 0 while col < qzeros.shape[1]: if self.bits in [2, 4, 8]: for j in range(i, i + (32 // self.bits)): qzeros[:, col] |= zeros[:, j] << (self.bits * (j - i)) i += 32 // self.bits col += 1 else: raise NotImplementedError("Only 2,4,8 bits are supported.") qzeros = qzeros.astype(np.int32) self.qzeros = torch.from_numpy(qzeros) def forward(self, x): out_shape = x.shape[:-1] + (self.outfeatures,) out = QuantLinearFunction.apply( x.reshape(-1, x.shape[-1]), self.qweight, self.scales, self.qzeros, self.g_idx, self.bits, self.maxq, ) out = out + self.bias if self.bias is not None else out return out.reshape(out_shape)
text-generation-inference/server/text_generation_server/layers/gptq/triton.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/gptq/triton.py", "repo_id": "text-generation-inference", "token_count": 6339 }
328
from typing import Callable, List, Optional import torch import torch.nn as nn from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.kernels import load_kernel from text_generation_server.utils.weights import UnquantizedWeight, Weights if SYSTEM == "ipex": from intel_extension_for_pytorch.llm.modules import GatedMLPMOE elif SYSTEM == "cuda": moe_kernels = load_kernel(module="moe", repo_id="kernels-community/moe") else: import moe_kernels class UnquantizedSparseMoELayer(nn.Module): def __init__( self, *, n_expert_group: Optional[int], n_experts: int, prefix: str, renormalize: bool, topk: int, topk_group: Optional[int], weights: Weights, scoring_func: Optional[str] = "softmax", e_score_correction_bias: Optional[float] = None, gate_proj_name: str = "gate_proj", up_proj_name: str = "up_proj", down_proj_name: str = "down_proj", ): super().__init__() assert (n_expert_group is None) == ( topk_group is None ), "n_expert_group and topk_group must both be None or have some value" self.n_expert_group = n_expert_group self.topk = topk self.topk_group = topk_group self.renormalize = renormalize self.weight_block_size = weights.weights_loader.weight_block_size self.scoring_func = scoring_func self.e_score_correction_bias = e_score_correction_bias self.gate_up_proj = _load_expert_multi_weights_col( prefix=prefix, n_experts=n_experts, gate_proj_name=gate_proj_name, up_proj_name=up_proj_name, weights=weights, ) self.down_proj = _load_expert_weights_row( prefix=prefix, n_experts=n_experts, name=down_proj_name, weights=weights, ) if SYSTEM == "ipex": self.ipex_fused_moe = GatedMLPMOE( W13=self.gate_up_proj, W2=self.down_proj, use_prepack=True ) def forward(self, x: torch.Tensor, *, gating_output: torch.Tensor) -> torch.Tensor: if SYSTEM == "rocm": return moe_kernels.fused_moe( x, self.gate_up_proj, self.down_proj, gating_output, self.topk, renormalize=self.renormalize, inplace=True, ) elif SYSTEM == "ipex": return self.ipex_fused_moe( hidden_states=x, router_logits=gating_output, top_k=self.topk, renormalize=self.renormalize, use_grouped_topk=self.n_expert_group is not None, num_expert_group=self.n_expert_group, topk_group=self.topk_group, scoring_func=self.scoring_func, e_score_correction_bias=self.e_score_correction_bias, ) return fused_moe( x, w1=self.gate_up_proj, w2=self.down_proj, gating_output=gating_output, topk=self.topk, renormalize=self.renormalize, inplace=True, use_grouped_topk=self.n_expert_group is not None, num_expert_group=self.n_expert_group, topk_group=self.topk_group, scoring_func=self.scoring_func, e_score_correction_bias=self.e_score_correction_bias, ) def _load_expert_multi_weights_col( *, prefix: str, n_experts: int, gate_proj_name: str, up_proj_name: str, weights: Weights, ) -> torch.Tensor: all_weight = None for i in range(n_experts): weight = weights.get_multi_weights_col( [f"{prefix}.{i}.{gate_proj_name}", f"{prefix}.{i}.{up_proj_name}"], 0 ) assert isinstance(weight, UnquantizedWeight) if all_weight is None: all_weight = torch.empty( (n_experts,) + weight.weight.shape, dtype=weight.weight.dtype, device=weight.weight.device, ) all_weight[i] = weight.weight assert all_weight is not None return all_weight def _load_expert_weights_row( *, prefix: str, n_experts: int, name: str, weights: Weights, ) -> torch.Tensor: all_weight = None for i in range(n_experts): weight = weights.get_weights_row( f"{prefix}.{i}.{name}", ) assert isinstance(weight, UnquantizedWeight) if all_weight is None: all_weight = torch.empty( (n_experts,) + weight.weight.shape, dtype=weight.weight.dtype, device=weight.weight.device, ) all_weight[i] = weight.weight assert all_weight is not None return all_weight def fused_moe( hidden_states: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, gating_output: torch.Tensor, topk: int, renormalize: bool, inplace: bool = False, use_grouped_topk: bool = False, num_expert_group: Optional[int] = None, topk_group: Optional[int] = None, custom_routing_function: Optional[Callable] = None, scoring_func: str = "softmax", e_score_correction_bias: Optional[torch.Tensor] = None, use_fp8_w8a8: bool = False, use_int8_w8a16: bool = False, use_int4_w4a16: bool = False, w1_scale: Optional[torch.Tensor] = None, w2_scale: Optional[torch.Tensor] = None, a1_scale: Optional[torch.Tensor] = None, a2_scale: Optional[torch.Tensor] = None, block_shape: Optional[List[int]] = None, ) -> torch.Tensor: """ This function computes a Mixture of Experts (MoE) layer using two sets of weights, w1 and w2, and top-k gating mechanism. Parameters: - hidden_states (torch.Tensor): The input tensor to the MoE layer. - w1 (torch.Tensor): The first set of expert weights. - w2 (torch.Tensor): The second set of expert weights. - gating_output (torch.Tensor): The output of the gating operation (before softmax). - topk (int): The number of top-k experts to select. - renormalize (bool): If True, renormalize the top-k weights to sum to 1. - inplace (bool): If True, perform the operation in-place. Defaults to False. - num_expert_group: Optional[int]: additional parameter for grouped_topk - topk_group: Optional[int]: additional parameter for grouped_topk - use_grouped_topk: If True, use grouped_topk instead of fused_topk note: Deepseekv2 model uses grouped_topk - use_fp8_w8a8 (bool): If True, use fp8 arithmetic to compute the inner products for w1 and w2. Defaults to False. - use_int8_w8a16 (bool): If True, use fp8 arithmetic to compute the inner products for w1 and w2. Defaults to False. - use_int4_w4a16 (bool): If True, use matmul of int4 weight and bf16/fp16 activation to compute the inner products for w1 and w2. Defaults to False. - w1_scale (Optional[torch.Tensor]): Optional scale to be used for w1. - w2_scale (Optional[torch.Tensor]): Optional scale to be used for w2. - a1_scale (Optional[torch.Tensor]): Optional scale to be used for a1. - a2_scale (Optional[torch.Tensor]): Optional scale to be used for a2. - block_shape: (Optional[List[int]]): Optional block size for block-wise quantization. Returns: - torch.Tensor: The output tensor after applying the MoE layer. """ # Check constraints. assert gating_output.shape[1] == w1.shape[0], "Number of experts mismatch" if use_grouped_topk: assert num_expert_group is not None and topk_group is not None from loguru import logger import inspect logger.info(f"{inspect.signature(moe_kernels.grouped_topk)}") topk_weights, topk_ids = moe_kernels.grouped_topk( hidden_states, gating_output, topk, renormalize, num_expert_group, topk_group, scoring_func=scoring_func, e_score_correction_bias=e_score_correction_bias, ) elif custom_routing_function is None: topk_weights, topk_ids = moe_kernels.fused_topk( hidden_states, gating_output, topk, renormalize ) else: topk_weights, topk_ids = custom_routing_function( hidden_states, gating_output, topk, renormalize ) return moe_kernels.fused_experts( hidden_states, w1, w2, topk_weights, topk_ids, inplace=inplace, use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16, use_int4_w4a16=use_int4_w4a16, w1_scale=w1_scale, w2_scale=w2_scale, a1_scale=a1_scale, a2_scale=a2_scale, block_shape=block_shape, )
text-generation-inference/server/text_generation_server/layers/moe/unquantized.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/moe/unquantized.py", "repo_id": "text-generation-inference", "token_count": 4359 }
329
# coding=utf-8 # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from typing import List, Optional, Union from transformers.feature_extraction_utils import BatchFeature from transformers.image_utils import ImageInput from transformers.processing_utils import ( ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack, ) from transformers.tokenization_utils_base import PreTokenizedInput, TextInput from transformers.utils import to_py_obj from text_generation_server.models.custom_modeling.gemma3.image_processing_gemma3 import ( Gemma3ImageProcessor, ) from transformers.image_utils import PILImageResampling from .utils import make_nested_list_of_images class Gemma3ImagesKwargs(ImagesKwargs): do_pan_and_scan: Optional[bool] pan_and_scan_min_crop_size: Optional[int] pan_and_scan_max_num_crops: Optional[int] pan_and_scan_min_ratio_to_activate: Optional[float] do_convert_rgb: Optional[bool] class Gemma3ProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "padding": False, }, "images_kwargs": { "do_pan_and_scan": False, "pan_and_scan_min_crop_size": 256, "pan_and_scan_max_num_crops": 4, "pan_and_scan_min_ratio_to_activate": 1.2, }, } class Gemma3Processor(ProcessorMixin): attributes = ["image_processor", "tokenizer"] valid_kwargs = ["chat_template"] # # image_processor_class = "Gemma3ImageProcessor" image_processor_class = "AutoProcessor" tokenizer_class = "AutoTokenizer" def __init__( self, image_processor, tokenizer, chat_template=None, num_mm_soft_tokens_per_image: int = 256, **kwargs, ): num_mm_soft_tokens_per_image = 256 chat_template = None image_processor = Gemma3ImageProcessor( image_mean=(127.5,) * 3, image_std=(127.5,) * 3, size={"height": 896, "width": 896}, do_rescale=False, resample=PILImageResampling.BILINEAR, ) self.image_token_id = tokenizer.image_token_id image_tokens_expanded = "".join( [tokenizer.image_token] * num_mm_soft_tokens_per_image ) self.full_image_sequence = ( f"\n\n{tokenizer.boi_token}{image_tokens_expanded}{tokenizer.eoi_token}\n\n" ) self.image_processor = image_processor self.tokenizer = tokenizer self.chat_template = chat_template # super().__init__( # image_processor=image_processor, # tokenizer=tokenizer, # chat_template=chat_template, # **kwargs, # ) def __call__( self, images: ImageInput = None, text: Union[ TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput] ] = None, videos=None, audio=None, **kwargs: Unpack[Gemma3ProcessorKwargs], ) -> BatchFeature: if text is None and images is None: raise ValueError("Provide at least one of `text` or `images`.") output_kwargs = self._merge_kwargs( Gemma3ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): raise ValueError( "Invalid input text. Please provide a string, or a list of strings" ) image_inputs = {} if images is not None: batched_images = make_nested_list_of_images(images) image_inputs = self.image_processor( batched_images, **output_kwargs["images_kwargs"] ) # Create empty text to be replaced with placeholders if not text: text = [ " ".join(["<image>"] * len(images)) for images in batched_images ] if len(batched_images) != len(text): raise ValueError( f"Received inconsistently sized batches of images ({len(batched_images)}) and text ({len(text)})." ) # Replace image tokens by the full expanded sequence batch_num_crops = to_py_obj(image_inputs.pop("num_crops")) for prompt, images, num_crops in zip(text, batched_images, batch_num_crops): image_indexes = [m.start() for m in re.finditer("<image>", prompt)] if len(images) != len(image_indexes): raise ValueError( f"Prompt contained {len(image_indexes)} image tokens but received {len(images)} images." ) # Insert additional image tokens for Pan-and-Scan crops for num, idx in reversed(list(zip(num_crops, image_indexes))): if num: formatted_image_text = ( "Here is the original image <image> and here are some crops to help you see better " + " ".join(["<image>"] * num) ) prompt = ( prompt[:idx] + formatted_image_text + prompt[idx + len("<image>") :] ) # Expand placeholder image tokens to the full image token sequence text = [ prompt.replace("<image>", self.full_image_sequence) for prompt in text ] text_input = self.tokenizer(text=text, **output_kwargs["text_kwargs"]) return BatchFeature(data={**text_input, **image_inputs}) # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Gemma def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to GemmaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Gemma def decode(self, *args, **kwargs): """ This method forwards all its arguments to GemmaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names with CLIP->PaliGemma def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) __all__ = ["Gemma3Processor"]
text-generation-inference/server/text_generation_server/models/custom_modeling/gemma3/processing_gemma3.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/gemma3/processing_gemma3.py", "repo_id": "text-generation-inference", "token_count": 3387 }
330
# imlementation of the PhiModel and PhiForCausalLM classes import torch import torch.distributed import math from torch import nn from typing import Optional, List, Tuple from transformers.configuration_utils import PretrainedConfig from transformers.modeling_outputs import CausalLMOutputWithPast from text_generation_server.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, FastLinear, ) # PhiConfig is the configuration class for the PhiModel. class PhiConfig(PretrainedConfig): def __init__( self, vocab_size=51200, n_positions=2048, n_embd=2560, n_layer=32, n_inner=None, n_head=32, rotary_dim=32, layer_norm_epsilon=1e-5, tie_word_embeddings=False, pad_vocab_size_multiple=64, pad_token_id=0, bos_token_id=1, eos_token_id=2, no_bias=False, **kwargs, ): self.vocab_size = vocab_size self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_inner = n_inner self.n_head = n_head self.rotary_dim = rotary_dim self.layer_norm_epsilon = layer_norm_epsilon self.tie_word_embeddings = tie_word_embeddings self.pad_vocab_size_multiple = pad_vocab_size_multiple self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.no_bias = no_bias super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) # RotaryEmbedding is a class that implements the rotary embedding. class RotaryEmbedding(nn.Module): def __init__(self, dim, max_seq_len): super().__init__() inv_freq = [1.0 / 10000.0 ** (i / dim) for i in range(0, dim, 2)] inv_freq_len = len(inv_freq) inv_freq = torch.tensor(inv_freq).view(1, inv_freq_len) t = torch.arange(0, max_seq_len, dtype=torch.float).view(max_seq_len, 1) freqs = t.matmul(inv_freq) self.sin = freqs.sin() self.cos = freqs.cos() def apply_rotary_emb_qkv(self, qkv, seqlen_offset): b_size, seqlen, three, _, _headdim = qkv.shape if three != 3: raise Exception("unexpected shape for qkv") _, rotary_dim = self.cos.shape rotary_dim = rotary_dim * 2 q_rot = qkv[:, :, 0, :, :rotary_dim] q_pass = qkv[:, :, 0, :, rotary_dim:] k_rot = qkv[:, :, 1, :, :rotary_dim] k_pass = qkv[:, :, 1, :, rotary_dim:] q12 = torch.chunk(q_rot, 2, dim=-1) k12 = torch.chunk(k_rot, 2, dim=-1) q1, q2 = q12[0], q12[1] k1, k2 = k12[0], k12[1] c = self.cos.narrow(0, seqlen_offset, seqlen).unsqueeze(1) s = self.sin.narrow(0, seqlen_offset, seqlen).unsqueeze(1) q_rot = torch.cat( [ q1 * c - q2 * s, q1 * s + q2 * c, ], dim=-1, ) k_rot = torch.cat( [ k1 * c - k2 * s, k1 * s + k2 * c, ], dim=-1, ) q = torch.cat([q_rot, q_pass], dim=-1) k = torch.cat([k_rot, k_pass], dim=-1) v = qkv[:, :, 2] return q, k, v # PhiCausalLMHead is the head of the PhiModel. It is a linear layer with a layer norm. class PhiCausalLMHead(nn.Module): def __init__(self, config, weights): super().__init__() self.ln = nn.LayerNorm.load( prefix="lm_head.ln", weights=weights, eps=config.layer_norm_epsilon, ) self.linear = SpeculativeHead.load( config=config, prefix="lm_head.linear", weights=weights ) def forward(self, hidden_states): hidden_states = self.ln(hidden_states) hidden_states = self.linear(hidden_states) return hidden_states # PhiMHA is a multi-head attention layer. This layer uses an attention mask to prevent tokens from attending to subsequent tokens. class PhiMHA(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.Wqkv = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.Wqkv", weights=weights, bias=not config.no_bias ) self.out_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.out_proj", weights=weights, bias=not config.no_bias, ) self.op_size = config.n_embd self.head_dim = int(config.n_embd / config.n_head) self.num_heads = config.n_head self.rotary_emb = RotaryEmbedding( config.rotary_dim, config.n_positions, ) self.softmax_scale = 1.0 / math.sqrt(self.head_dim) def forward( self, hidden_states, past_kv_cache, attention_mask=None, ): b_size, seq_len, _n_embd = hidden_states.shape qkv = self.Wqkv(hidden_states) qkv = qkv.view(b_size, seq_len, 3, self.num_heads, self.head_dim) seqlen_offset = 0 if past_kv_cache is None else past_kv_cache[0].shape[1] q, k, v = self.rotary_emb.apply_rotary_emb_qkv(qkv, seqlen_offset) # if there is a kv_cache, then we need to concatenate if past_kv_cache is not None: prev_k, prev_v = past_kv_cache k = torch.cat([prev_k, k], dim=1) v = torch.cat([prev_v, v], dim=1) past_kv_cache = [k, v] attn_weights = torch.einsum("bthd,bshd->bhts", q, k * self.softmax_scale) if attention_mask is not None: seqlen_k = k.shape[1] seqlen_q = q.shape[1] causal_mask = torch.triu( torch.full((seqlen_q, seqlen_k), -10000.0, device=attn_weights.device), 1, ) attn_weights = attn_weights + causal_mask.to(dtype=attn_weights.dtype) attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1) attn_output = attn_weights.matmul(v.transpose(1, 2)).squeeze(0) attn_output = ( attn_output.view((b_size, self.num_heads, seq_len, self.head_dim)) .transpose(1, 2) .flatten(-2) ) return self.out_proj(attn_output), past_kv_cache # PhiMLP is a multi-layer perceptron. It contains two linear layers with a gelu activation function. class PhiMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.n_inner = config.n_inner self.fc1 = FastLinear.load( config=config, prefix=f"{prefix}.fc1", weights=weights, bias=False, ) self.fc2 = FastLinear.load( config=config, prefix=f"{prefix}.fc2", weights=weights, bias=False, ) self.activation = torch.nn.functional.gelu def forward(self, hidden_states): hidden_states = self.fc1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states # PhiBlock is a single transformer block. It contains a layer norm, a multi-head attention layer and an multi-layer perceptron. class PhiBlock(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() self.layer_id = layer_id self.layer_norm = nn.LayerNorm.load( prefix=f"{layer_id}.ln", weights=weights, eps=config.layer_norm_epsilon ) self.mixer = PhiMHA(prefix=f"{layer_id}.mixer", config=config, weights=weights) self.mlp = PhiMLP(prefix=f"{layer_id}.mlp", config=config, weights=weights) def forward( self, hidden_states, kv_cache, attention_mask, ): residual = hidden_states hidden_states = self.layer_norm(hidden_states) attn_outputs, past_kv_cache = self.mixer( hidden_states, kv_cache, attention_mask ) feed_forward_hidden_states = self.mlp(hidden_states) out = attn_outputs + feed_forward_hidden_states + residual return out, past_kv_cache # PhiModel implements the embedding layer and the transformer blocks. class PhiModel(nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.tp_rank = weights.process_group.rank() self.tp_world_size = weights.process_group.size() self.embed_tokens = TensorParallelEmbedding( prefix=f"{prefix}.embd.wte", weights=weights ) self.blocks = nn.ModuleList( [ PhiBlock(f"{prefix}.h.{layer_id}", config, weights) for layer_id in range(config.n_layer) ] ) def forward( self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, attention_mask: Optional[torch.ByteTensor] = None, return_dict: Optional[bool] = None, use_cache: Optional[bool] = None, ) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]: hidden_states = self.embed_tokens(input_ids) seq_len = hidden_states.shape[1] mask = None if seq_len <= 1 else attention_mask past_key_values = ( [None] * len(self.blocks) if past_key_values is None else past_key_values ) for index, block in enumerate(self.blocks): hidden_states, new_key_values = block( hidden_states, past_key_values[index], mask ) past_key_values[index] = new_key_values return hidden_states, past_key_values # PhiForCausalLM wraps the PhiModel and PhiCausalLMHead together and returns a CausalLMOutputWithPast object. class PhiForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() if not prefix: prefix = "transformer" else: prefix = f"{prefix}.transformer" self.model = PhiModel(prefix, config, weights) self.lm_head = PhiCausalLMHead(config, weights) def forward( self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, attention_mask: Optional[torch.ByteTensor] = None, return_dict: Optional[bool] = None, use_cache: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]: model_output = self.model( input_ids, past_key_values, attention_mask, return_dict, use_cache ) logits = self.lm_head(model_output[0]) loss = None if labels is not None: loss = nn.CrossEntropyLoss()( logits[:, :-1].view(-1, logits.size(-1)), labels[:, 1:].view(-1) ) if not return_dict: return ( ((loss,) + (logits,) + model_output[1:]) if loss is not None else (logits,) + model_output[1:] ) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=model_output[1], hidden_states=None, attentions=None, )
text-generation-inference/server/text_generation_server/models/custom_modeling/phi_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/phi_modeling.py", "repo_id": "text-generation-inference", "token_count": 5696 }
331
import math from typing import List, Optional import torch from opentelemetry import trace from transformers import AutoTokenizer, AutoProcessor import transformers.modeling_utils from text_generation_server.models.flash_causal_lm import FlashCausalLM from text_generation_server.models.vlm_causal_lm import VlmCausalLM, VlmCausalLMBatch from text_generation_server.utils import initialize_torch_distributed from text_generation_server.layers.attention import paged_attention, attention, Seqlen from text_generation_server.layers.attention.kv_cache import KVScales, KVCache from text_generation_server.models.globals import ATTENTION import torch.nn.functional as F from text_generation_server.utils.import_utils import SYSTEM tracer = trace.get_tracer(__name__) # The base TP plan of these models has replicated q/k/v. This means that each process will see the full states, # hence we should not divide the number of heads by the world size. This is a known waste of VRAM (the cache # will be fully replicated on each process) and GPU communication (additional all-gather operations), however due # to internal constraints it was not (yet?) possible to circumvent REPLICATED_ATTENTION_MODELS = [ "olmo2", "phi3", ] # # Qwen2VL # transformers.models.qwen2_vl.modeling_qwen2_vl.QWEN2_VL_VISION_ATTENTION_CLASSES[ # "tgi" # ] = transformers.models.qwen2_vl.modeling_qwen2_vl.QWEN2_VL_VISION_ATTENTION_CLASSES[ # "eager" # ] def tgi_flash_attention_forward( module, query_states: torch.Tensor, key_states: torch.Tensor, value_states: torch.Tensor, attention_mask: Optional[torch.Tensor], # This is a positional arg in Transformers kv_cache: List[KVCache], kv_head_mapping: torch.Tensor, slots: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], seqlen: Seqlen, block_tables: torch.Tensor, max_s: int, kv_scales: KVScales, softmax_scale: Optional[float] = None, sliding_window: Optional[int] = None, softcap: Optional[float] = None, use_sdpa: Optional[bool] = False, **kwargs, # This is needed to "absorb" other args passed by Transformers modeling ): kv_cache = kv_cache[module.layer_idx] query_states = query_states.transpose(1, 2).squeeze(dim=0) key_states = key_states.transpose(1, 2).squeeze(dim=0) value_states = value_states.transpose(1, 2).squeeze(dim=0) # Take care of updating the cache in-place kv_cache.store(key=key_states, value=value_states, slots=slots, kv_scales=kv_scales) _, num_heads, head_dim = query_states.shape softmax_scale = 1 / math.sqrt(head_dim) if softmax_scale is None else softmax_scale sliding_window = -1 if sliding_window is None else sliding_window if cu_seqlen_prefill is not None: if not use_sdpa: attn_output = attention( query=query_states, key=key_states, value=value_states, kv_cache=kv_cache, kv_scales=kv_scales, seqlen=seqlen, block_tables=block_tables, softmax_scale=softmax_scale, window_size_left=sliding_window, softcap=softcap, ) else: lengths = cu_seqlen_prefill[1:] - cu_seqlen_prefill[:-1] max_length = max(lengths) attention_mask = attention_mask[:, :, :, :max_length] enable_gqa = query_states.shape[1] != key_states.shape[1] # Split tensors using vectorized split query_list = torch.split(query_states, lengths.tolist(), dim=0) key_list = torch.split(key_states, lengths.tolist(), dim=0) value_list = torch.split(value_states, lengths.tolist(), dim=0) padded_query = torch.nn.utils.rnn.pad_sequence(query_list, batch_first=True) padded_key = torch.nn.utils.rnn.pad_sequence(key_list, batch_first=True) padded_value = torch.nn.utils.rnn.pad_sequence(value_list, batch_first=True) padded_query = padded_query.transpose(1, 2).contiguous() padded_key = padded_key.transpose(1, 2).contiguous() padded_value = padded_value.transpose(1, 2).contiguous() # Compute attention attn_output = F.scaled_dot_product_attention( padded_query, padded_key, padded_value, attn_mask=attention_mask, scale=softmax_scale, enable_gqa=enable_gqa, ) attn_output = attn_output.transpose( 1, 2 ) # [batch_size, seq_len, num_heads, head_dim] max_seq_len = padded_query.size(2) seq_range = torch.arange(max_seq_len, device=padded_query.device).unsqueeze( 0 ) lengths_tensor = torch.tensor( lengths, device=padded_query.device ).unsqueeze(1) mask = seq_range < lengths_tensor # [batch, max_seq_len] attn_output = attn_output[mask] # [total_seq_len, num_heads, head_dim] else: attn_output = paged_attention( query_states, kv_cache, kv_head_mapping, softmax_scale, block_tables, seqlen, max_s, kv_scales=kv_scales, softcap=softcap, window_size_left=sliding_window, ) attn_output = attn_output.view(-1, num_heads * head_dim) return attn_output, None transformers.modeling_utils.ALL_ATTENTION_FUNCTIONS["tgi"] = tgi_flash_attention_forward # TODO: implement # tgi_cross_attention_forward class TransformersFlashVlmCausalLM(VlmCausalLM): def __init__( self, model_id: str, model_class, revision: Optional[str] = None, quantize: Optional[str] = None, speculator: Optional[str] = None, dtype: Optional[torch.dtype] = None, default_dtype=torch.float16, trust_remote_code: bool = False, tokenizer_class=AutoTokenizer, processor_class=AutoProcessor, processor_kwargs=None, kv_cache_dtype: Optional[torch.dtype] = None, batch_class=VlmCausalLMBatch, support_chunking: bool = True, ): self.batch_class = batch_class self.quantize = quantize self.process_group, rank, world_size = initialize_torch_distributed() self.dtype = dtype if speculator: raise RuntimeError("Speculator decoding is not enabled for AutoModel") if torch.cuda.is_available(): device = torch.device(f"cuda:{rank}") dtype = default_dtype if dtype is None else dtype elif SYSTEM == "ipex": if hasattr(torch, "xpu") and torch.xpu.is_available(): device = torch.device(f"xpu:{rank}") else: device = torch.device("cpu") dtype = default_dtype if dtype is None else dtype else: raise ValueError( "Flash `Transformers` modeling backend is not available on cpu." ) tokenizer = tokenizer_class.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) if processor_kwargs is None: processor_kwargs = {} self.processor = processor_class.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code, **processor_kwargs, ) attn_implementation = { "text_config": "tgi", "vision_config": "sdpa", } model = model_class.from_pretrained( model_id, revision=revision, torch_dtype=dtype, load_in_8bit=quantize == "bitsandbytes", trust_remote_code=trust_remote_code, attn_implementation=attn_implementation, device_map=device if world_size == 1 else None, tp_plan="auto" if world_size > 1 else None, ) torch.distributed.barrier(group=self.process_group) self.config = model.config config = model.config # VLM models define the config we care about in their text_config text_config = getattr(model.config, "text_config", None) if text_config is not None: config = text_config if tokenizer.pad_token_id is None: if model.config.pad_token_id is not None: tokenizer.pad_token_id = model.config.pad_token_id elif model.config.eos_token_id is not None and isinstance( model.config.eos_token_id, int ): tokenizer.pad_token_id = model.config.eos_token_id elif tokenizer.eos_token_id is not None: tokenizer.pad_token_id = tokenizer.eos_token_id else: tokenizer.add_special_tokens({"pad_token": "[PAD]"}) self.num_layers = config.num_hidden_layers self.num_heads = config.num_attention_heads self.num_kv_heads = config.num_key_value_heads # Some models use GQA and different sizes for o_proj # and q_proj, that allows for that. if hasattr(config, "head_dim"): self.head_size = config.head_dim else: self.head_size = config.hidden_size // config.num_attention_heads # Skip it for models in the exception list if config.model_type not in REPLICATED_ATTENTION_MODELS: self.num_heads = self.num_heads // self.process_group.size() self.num_kv_heads = ( self.num_kv_heads // self.process_group.size() if self.num_kv_heads > 1 else self.num_kv_heads ) self.cuda_graphs = {} self.kv_cache = [] self.kv_cache_dtype = dtype if kv_cache_dtype is None else kv_cache_dtype if ATTENTION == "flashinfer": from text_generation_server.layers.attention.flashinfer import ( create_prefill_state, create_decode_state, create_prefill_with_paged_kv_state, ) self.prefill_state = create_prefill_state(device=device) self.prefill_with_paged_kv_state = create_prefill_with_paged_kv_state( device=device ) self.decode_state = create_decode_state( device=device, num_heads=self.num_heads, num_kv_heads=self.num_kv_heads, ) self.num_groups = self.num_heads // self.num_kv_heads # Those will never change and will be used in the forwards self.kv_head_mapping = torch.arange( 0, self.num_kv_heads, dtype=torch.int32, device=device ).repeat_interleave(self.num_groups) # This means no scale self.kv_scales = KVScales( torch.tensor(1.0, device=device), torch.tensor(1.0, device=device), ) # Skip FlashCausalLM init. super(FlashCausalLM, self).__init__( model_id=model_id, model=model, tokenizer=tokenizer, requires_padding=False, dtype=dtype, device=device, rank=rank, world_size=world_size, support_chunking=support_chunking, ) # Monkey patch of `self.model.forward` to match `FlashCausalLM`. It avoids duplicating a lot of code # We first copy the original model.forward because we still need it in the monkey patch self.model.original_forward = self.model.forward self.model.forward = self._model_forward self.model.get_position_ids = self.get_position_ids torch.distributed.barrier(group=self.process_group) def get_position_ids(self, input_ids, image_grid_thw, position_ids): return position_ids def pre_process_inputs(self, input_ids, position_ids, cu_seqlen_prefill): return { "input_ids": input_ids.unsqueeze(0), "position_ids": position_ids.unsqueeze(0), } def post_process_outputs(self, logits, lm_head_indices): return logits.squeeze(dim=0) @classmethod def fallback( cls, model_id: str, model_class, revision: Optional[str] = None, quantize: Optional[str] = None, speculator: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, batch_class: Optional[type] = VlmCausalLMBatch, processor_kwargs: Optional[dict] = None, support_chunking: bool = True, ): return cls( model_id=model_id, model_class=model_class, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, batch_class=batch_class, processor_kwargs=processor_kwargs, support_chunking=support_chunking, ) def _model_forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[KVCache], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, lm_head_indices: Optional[torch.Tensor], prefill_cache_indices=None, # not used, but passed to match original signature adapter_data=None, # not supported, but passed to match original signature pixel_values: torch.FloatTensor = None, image_grid_thw: Optional[torch.LongTensor] = None, pixel_attention_mask=None, image_sizes: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ): # A value of `None` (i.e. no logit slicing) translates to `0` in Transformers logits_to_keep = lm_head_indices if lm_head_indices is not None else 0 inputs = self.pre_process_inputs( input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, ) inputs["input_ids"] = None # This is equivalent to `self.model.forward`, see the monkey patch in __init__ logits = self.model.original_forward( input_ids=inputs["input_ids"], inputs_embeds=inputs_embeds.unsqueeze(0), position_ids=inputs["position_ids"], past_key_values=None, # we use self.kv_cache instead of transformers cache object use_cache=False, # we use self.kv_cache instead of transformers cache object logits_to_keep=logits_to_keep, return_dict=True, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, block_tables=block_tables, slots=slots, seqlen=seqlen, max_s=max_s, kv_head_mapping=self.kv_head_mapping, kv_scales=self.kv_scales, pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask, image_sizes=image_sizes, image_grid_thw=image_grid_thw, attention_mask=inputs.get("attention_mask", None), use_sdpa=inputs.get("use_sdpa", False), cache_position=inputs.get("cache_position", None), ).logits logits = self.post_process_outputs(logits, lm_head_indices) return logits, None class TransformersQwen2VlmCausalLM(TransformersFlashVlmCausalLM): def get_position_ids(self, input_ids: torch.Tensor, image_grid_thw: torch.Tensor): if image_grid_thw is None: return ( torch.arange(input_ids.shape[0], device=input_ids.device) .unsqueeze(1) .repeat(1, 3) ) spatial_merge_size = self.config.vision_config.spatial_merge_size vision_start_token_id = self.config.vision_start_token_id vision_end_token_id = self.config.vision_end_token_id device = input_ids.device dtype = input_ids.dtype input_ids_len = input_ids.shape[0] vision_starts = torch.where(input_ids == vision_start_token_id)[0] vision_ends = torch.where(input_ids == vision_end_token_id)[0] vision_segments = torch.stack((vision_starts, vision_ends), dim=1) prev_vision_end = torch.cat( [torch.zeros(1, device=vision_ends.device, dtype=dtype), vision_ends[:-1]] ) text_lengths_between_vision = vision_segments[:, 0] - prev_vision_end + 1 vision_widths_max = torch.cat( [ torch.zeros(1, device=image_grid_thw.device, dtype=dtype), image_grid_thw[:-1, 2] // spatial_merge_size, ] ) vision_segment_lengths = vision_widths_max + text_lengths_between_vision vision_segment_lengths = vision_segment_lengths.cumsum(dim=0) text_segment_lengths = vision_segment_lengths - text_lengths_between_vision # create position ids for each vision segment based on the image grid llm_pos_ids_list = [] for i, _ in enumerate(vision_segments): t, h, w = ( image_grid_thw[i][0], image_grid_thw[i][1] // spatial_merge_size, image_grid_thw[i][2] // spatial_merge_size, ) t_indices = torch.arange(t, device=device).repeat_interleave(h * w) h_indices = torch.arange(h, device=device).repeat_interleave(w).repeat(t) w_indices = torch.arange(w, device=device).repeat(t * h) image_position_ids = torch.stack([t_indices, h_indices, w_indices], dim=0) # offset by the position of the last vision segment im = image_position_ids + vision_segment_lengths[i] llm_pos_ids_list.append(im) # create position ids for each text segment text_ranges = [ torch.arange(seq_len, device=device).view(1, -1).expand(3, -1) + text_segment_lengths[i] for i, seq_len in enumerate(text_lengths_between_vision) ] full_llm_pos_ids_list = [ item for sublist in zip(text_ranges, llm_pos_ids_list) for item in sublist ] # import ipdb # ipdb.set_trace() max_s = full_llm_pos_ids_list[-1].max() + 1 final_text_len = input_ids_len - vision_ends[-1] if final_text_len > 0: m = torch.arange(final_text_len, device=device).view(1, -1).expand(3, -1) full_llm_pos_ids_list.append(m + max_s) position_ids = ( torch.cat(full_llm_pos_ids_list, dim=1).reshape(3, -1).transpose(0, 1) ) return position_ids def post_process_outputs(self, logits, lm_head_indices): return logits.squeeze(dim=0)[lm_head_indices].unsqueeze(0) def pre_process_inputs(self, input_ids, position_ids, cu_seqlen_prefill): input_ids = input_ids.unsqueeze(0) position_ids = position_ids.transpose(0, 1).unsqueeze(1) return {"input_ids": input_ids, "position_ids": position_ids} class TransformersGemma3VlmCausalLM(TransformersFlashVlmCausalLM): def get_attention_mask(self, input_ids, cu_seqlen_prefill): device = input_ids.device dtype = self.dtype min_dtype = torch.finfo(dtype).min lengths = (cu_seqlen_prefill[1:] - cu_seqlen_prefill[:-1]).tolist() batch_size = len(lengths) sequence_length = max(lengths) target_length = sequence_length # Create the padding mask from the computed lengths. # pad_mask: [batch, sequence_length] where True indicates valid tokens. seq_range = torch.arange(sequence_length, device=device).unsqueeze(0) lengths_tensor = torch.tensor(lengths, device=device).unsqueeze(1) pad_mask = seq_range < lengths_tensor # shape: [batch, sequence_length] # Build the base causal mask (for non-image tokens): causal_mask = torch.tril( torch.ones( (sequence_length, sequence_length), dtype=torch.bool, device=device ) ) base_mask = pad_mask.unsqueeze(2) & pad_mask.unsqueeze( 1 ) # [batch, sequence_length, sequence_length] base_mask = base_mask & causal_mask.unsqueeze(0) # apply causal constraint image_token_mask = (input_ids == self.config.image_token_index).to( input_ids.device ) image_token_mask = torch.nn.utils.rnn.pad_sequence( torch.split(image_token_mask, lengths), batch_first=True, padding_value=0 ) bidirectional_mask = image_token_mask.unsqueeze(2) & image_token_mask.unsqueeze( 1 ) # Combine the causal base mask and the bidirectional mask. combined_mask = torch.logical_or( base_mask.unsqueeze(1), bidirectional_mask.unsqueeze(1) ).to(device) # combined_mask now has shape [batch, 1, sequence_length, sequence_length] full_attention_mask = torch.zeros( (batch_size, 1, sequence_length, target_length), device=device, dtype=torch.bool, ) full_attention_mask[:, :, :, :sequence_length] = combined_mask final_attention_mask = torch.where(full_attention_mask, 0, min_dtype).to(device) return final_attention_mask def pre_process_inputs(self, input_ids, position_ids, cu_seqlen_prefill): inputs = { "input_ids": input_ids.unsqueeze(0), "position_ids": position_ids.unsqueeze(0), } if cu_seqlen_prefill is not None: attention_mask = self.get_attention_mask( input_ids.squeeze(0), cu_seqlen_prefill ) inputs["attention_mask"] = attention_mask inputs["use_sdpa"] = True return inputs class TransformersLlama4VlmCausalLM(TransformersFlashVlmCausalLM): def pre_process_inputs(self, input_ids, position_ids, cu_seqlen_prefill): inputs = super().pre_process_inputs(input_ids, position_ids, cu_seqlen_prefill) inputs["cache_position"] = position_ids inputs["attention_mask"] = torch.zeros((1, 1, 1, 1), device=input_ids.device) return inputs def get_vision_embeds( self, pixel_values: torch.FloatTensor, pixel_attention_mask: Optional[torch.FloatTensor] = None, image_sizes: Optional[torch.Tensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, ): image_features = self.model.get_image_features( pixel_values=pixel_values, vision_feature_layer=self.model.config.vision_config.vision_feature_layer, vision_feature_select_strategy=self.model.config.vision_config.vision_feature_select_strategy, image_sizes=image_sizes, ) vision_flat = image_features.view(-1, image_features.size(-1)) projected_vision_flat = self.model.multi_modal_projector(vision_flat) return projected_vision_flat def get_inputs_embeds(self, input_ids, vision_embeds=None): inputs_embeds = self.model.get_input_embeddings()(input_ids) if vision_embeds is not None: original_inputs_embeds_shape = inputs_embeds.shape special_image_mask = (input_ids == self.config.image_token_index).unsqueeze( -1 ) final_mask = special_image_mask.to(inputs_embeds.device) inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-1)) final_mask_1d = final_mask[..., 0].reshape(-1) num_tokens_to_fill = final_mask_1d.sum() if num_tokens_to_fill != vision_embeds.size(0): raise ValueError( f"Mismatch: final_mask wants {num_tokens_to_fill} embeddings, " f"but multi_modal_projector returned {vision_embeds.size(0)}" ) expanded_mask = final_mask_1d.unsqueeze(-1).expand( -1, inputs_embeds.size(-1) ) inputs_embeds = inputs_embeds.masked_scatter(expanded_mask, vision_embeds) inputs_embeds = inputs_embeds.view(original_inputs_embeds_shape) return inputs_embeds
text-generation-inference/server/text_generation_server/models/transformers_flash_vlm.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/transformers_flash_vlm.py", "repo_id": "text-generation-inference", "token_count": 11604 }
332
import copy from abc import ABC from collections import defaultdict from typing import TYPE_CHECKING, Dict, List, Tuple, Type, Union from text_generation_server.utils.merges.utils import ( calculate_majority_sign_mask, disjoint_merge, prune, ) import torch if TYPE_CHECKING: from text_generation_server.adapters.lora import LoraConfig from text_generation_server.utils.adapter import ModuleMap class AdapterParameters: def __init__( self, adapter_ids, weights, merge_strategy, density, majority_sign_method ): self.adapter_ids = adapter_ids self.weights = weights self.merge_strategy = merge_strategy self.density = density self.majority_sign_method = majority_sign_method def _apply_weights( tensors: Union[torch.Tensor, List[torch.Tensor]], w: torch.Tensor ) -> torch.Tensor: if isinstance(tensors, torch.Tensor): t = tensors else: t = torch.stack(tensors, dim=0) # element-wise weighting of each task tensor # need to unsqueeze weights to match task tensor dimensions # for multiplication to apply element-wise while len(t.shape) > len(w.shape): w = w.unsqueeze(-1) return t * w class MergeStrategy(ABC): def merge( self, task_tensors: List[torch.Tensor], weights: torch.Tensor ) -> torch.Tensor: raise NotImplementedError() class LinearMerge(MergeStrategy): def __init__(self, **kwargs): pass def merge( self, task_tensors: List[torch.Tensor], weights: torch.Tensor ) -> torch.Tensor: weighted_task_tensors = _apply_weights(task_tensors, weights) return weighted_task_tensors.sum(dim=0) class TiesMerge(MergeStrategy): def __init__(self, density: float, majority_sign_method: str = "total", **kwargs): self.density = density self.majority_sign_method = majority_sign_method def merge( self, task_tensors: List[torch.Tensor], weights: torch.Tensor ) -> torch.Tensor: # sparsify task_tensors = [ prune(tensor, self.density, method="magnitude") for tensor in task_tensors ] task_tensors = torch.stack(task_tensors, dim=0) # elect sign before applying weights majority_sign_mask = calculate_majority_sign_mask( task_tensors, method=self.majority_sign_method ) weighted_task_tensors = _apply_weights(task_tensors, weights) # disjoint merge return disjoint_merge(weighted_task_tensors, majority_sign_mask) class DareLinearMerge(MergeStrategy): def __init__(self, density: float, **kwargs): self.density = density def merge( self, task_tensors: List[torch.Tensor], weights: torch.Tensor ) -> torch.Tensor: # sparsify task_tensors = [ prune(tensor, self.density, method="random", rescale=True) for tensor in task_tensors ] weighted_task_tensors = _apply_weights(task_tensors, weights) return weighted_task_tensors.sum(dim=0) class DareTiesMerge(MergeStrategy): def __init__(self, density: float, majority_sign_method: str = "total", **kwargs): self.density = density self.majority_sign_method = majority_sign_method def merge( self, task_tensors: List[torch.Tensor], weights: torch.Tensor ) -> torch.Tensor: # sparsify task_tensors = [ prune(tensor, self.density, method="random", rescale=True) for tensor in task_tensors ] task_tensors = torch.stack(task_tensors, dim=0) # elect sign before applying weights majority_sign_mask = calculate_majority_sign_mask( task_tensors, method=self.majority_sign_method ) weighted_task_tensors = _apply_weights(task_tensors, weights) # disjoint merge mixed_task_tensors = disjoint_merge(weighted_task_tensors, majority_sign_mask) return mixed_task_tensors strategy_registry: Dict[str, Type[MergeStrategy]] = { "linear": LinearMerge, "ties": TiesMerge, "dare_linear": DareLinearMerge, "dare_ties": DareTiesMerge, } def merge_adapters( adapters: List[Tuple["ModuleMap", "LoraConfig"]], merge_params: AdapterParameters, ) -> Tuple["ModuleMap", "LoraConfig"]: # strategy_name = MergeStrategyEnum.Name(merge_params.merge_strategy).lower() strategy_name = "linear" weights = merge_params.weights if not weights: weights = torch.ones(len(adapters)) else: weights = torch.tensor(weights) merge_config = { "density": merge_params.density, # "majority_sign_method": MajoritySignMethodEnum.Name( # merge_params.majority_sign_method # ).lower(), "majority_sign_method": "total", } merge_strategy = strategy_registry[strategy_name](**merge_config) module_maps: Dict[str, Dict[str, Dict[str, List[torch.Tensor]]]] = defaultdict( lambda: defaultdict(lambda: defaultdict(list)) ) lora_configs = [] weight_name_to_adapter_idx = defaultdict(list) # input is list of (module_map, lora_config) tuples # convert into dict[k][param_name] -> list of tensors for idx, (module_map, lora_config) in enumerate(adapters): for weight_name, data in module_map.items(): weight_name_to_adapter_idx[weight_name].append(idx) for k, (param_data, param_name) in data.items(): module_maps[weight_name][k][param_name].append(param_data) lora_configs.append(lora_config) # validate lora configs are compatible _validate_lora_configs(lora_configs) # merge tensors for each module such that we have a single ModuleMap: # dict[k] -> merged tensor merged_module_map: "ModuleMap" = defaultdict(dict) for weight_name, data in module_maps.items(): indices = weight_name_to_adapter_idx[weight_name] param_weights = weights[indices] for k, param_data in data.items(): for param_name, tensors in param_data.items(): merged_tensor = merge_strategy.merge(tensors, param_weights) merged_module_map[weight_name][k] = (merged_tensor, param_name) # merge lora configs merged_lora_config = _merge_lora_configs(lora_configs) return merged_module_map, merged_lora_config def _validate_lora_configs(lora_configs: List["LoraConfig"]): # check that all configs have the same rank ranks = set(lora_config.r for lora_config in lora_configs) if len(ranks) > 1: raise ValueError( f"unable to merge adapters, lora configs have different ranks: {ranks}" ) if all(len(lora_config.target_modules) == 0 for lora_config in lora_configs): raise ValueError( "unable to merge adapters, lora configs have no target modules" ) def _merge_lora_configs(lora_configs: List["LoraConfig"]) -> "LoraConfig": merged_lora_config = copy.copy(lora_configs[0]) # merge target modules as a union operation merged_target_modules = sorted( set( module for lora_config in lora_configs for module in lora_config.target_modules ) ) merged_lora_config.target_modules = merged_target_modules return merged_lora_config
text-generation-inference/server/text_generation_server/utils/merges/strategies.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/merges/strategies.py", "repo_id": "text-generation-inference", "token_count": 3074 }
333
## How to release # Before the release Simple checklist on how to make releases for `tokenizers`. - Freeze `master` branch. - Run all tests (Check CI has properly run) - If any significant work, check benchmarks: - `cd tokenizers && cargo bench` (needs to be run on latest release tag to measure difference if it's your first time) - Run all `transformers` tests. (`transformers` is a big user of `tokenizers` we need to make sure we don't break it, testing is one way to make sure nothing unforeseen has been done.) - Run all fast tests at the VERY least (not just the tokenization tests). (`RUN_PIPELINE_TESTS=1 CUDA_VISIBLE_DEVICES=-1 pytest -sv tests/`) - When all *fast* tests work, then we can also (it's recommended) run the whole `transformers` test suite. - Rebase this [PR](https://github.com/huggingface/transformers/pull/16708). This will create new docker images ready to run the tests suites with `tokenizers` from the main branch. - Wait for actions to finish - Rebase this [PR](https://github.com/huggingface/transformers/pull/16712) This will run the actual full test suite. - Check the results. - **If any breaking change has been done**, make sure the version can safely be increased for transformers users (`tokenizers` version need to make sure users don't upgrade before `transformers` has). [link](https://github.com/huggingface/transformers/blob/main/setup.py#L154) For instance `tokenizers>=0.10,<0.11` so we can safely upgrade to `0.11` without impacting current users - Then start a new PR containing all desired code changes from the following steps. - You will `Create release` after the code modifications are on `master`. # Rust - `tokenizers` (rust, python & node) versions don't have to be in sync but it's very common to release for all versions at once for new features. - Edit `Cargo.toml` to reflect new version - Edit `CHANGELOG.md`: - Add relevant PRs that were added (python PRs do not belong for instance). - Add links at the end of the files. - Go to [Releases](https://github.com/huggingface/tokenizers/releases) - Create new Release: - Mark it as pre-release - Use new version name with a new tag (create on publish) `vX.X.X`. - Copy paste the new part of the `CHANGELOG.md` - ⚠️ Click on `Publish release`. This will start the whole process of building a uploading the new version on `crates.io`, there's no going back after this - Go to the [Actions](https://github.com/huggingface/tokenizers/actions) tab and check everything works smoothly. - If anything fails, you need to fix the CI/CD to make it work again. Since your package was not uploaded to the repository properly, you can try again. # Python - Edit `bindings/python/setup.py` to reflect new version. - Edit `bindings/python/py_src/tokenizers/__init__.py` to reflect new version. - Edit `CHANGELOG.md`: - Add relevant PRs that were added (node PRs do not belong for instance). - Add links at the end of the files. - Go to [Releases](https://github.com/huggingface/tokenizers/releases) - Create new Release: - Mark it as pre-release - Use new version name with a new tag (create on publish) `python-vX.X.X`. - Copy paste the new part of the `CHANGELOG.md` - ⚠️ Click on `Publish release`. This will start the whole process of building a uploading the new version on `pypi`, there's no going back after this - Go to the [Actions](https://github.com/huggingface/tokenizers/actions) tab and check everything works smoothly. - If anything fails, you need to fix the CI/CD to make it work again. Since your package was not uploaded to the repository properly, you can try again. - This CI/CD has 3 distinct builds, `Pypi`(normal), `conda` and `extra`. `Extra` is REALLY slow (~4h), this is normal since it has to rebuild many things, but enables the wheel to be available for old Linuxes # Node - Edit `bindings/node/package.json` to reflect new version. - Edit `CHANGELOG.md`: - Add relevant PRs that were added (python PRs do not belong for instance). - Add links at the end of the files. - Go to [Releases](https://github.com/huggingface/tokenizers/releases) - Create new Release: - Mark it as pre-release - Use new version name with a new tag (create on publish) `node-vX.X.X`. - Copy paste the new part of the `CHANGELOG.md` - ⚠️ Click on `Publish release`. This will start the whole process of building a uploading the new version on `npm`, there's no going back after this - Go to the [Actions](https://github.com/huggingface/tokenizers/actions) tab and check everything works smoothly. - If anything fails, you need to fix the CI/CD to make it work again. Since your package was not uploaded to the repository properly, you can try again. # Testing the CI/CD for release If you want to make modifications to the CI/CD of the release GH actions, you need to : - **Comment the part that uploads the artifacts** to `crates.io`, `PyPi` or `npm`. - Change the trigger mechanism so it can trigger every time you push to your branch. - Keep pushing your changes until the artifacts are properly created.
tokenizers/RELEASE.md/0
{ "file_path": "tokenizers/RELEASE.md", "repo_id": "tokenizers", "token_count": 1519 }
334
/* eslint-disable */ var globRequire = require console.log = (..._args: any[]) => {} describe('quicktourExample', () => { function require(mod: string) { if (mod.startsWith('tokenizers')) { return globRequire('../../') } else { return globRequire(mod) } } it.skip('trains the tokenizer', async () => { // START init_tokenizer let { Tokenizer } = require('tokenizers') let { BPE } = require('tokenizers') let tokenizer = new Tokenizer(BPE.init({}, [], { unkToken: '[UNK]' })) // END init_tokenizer // START init_trainer let { bpeTrainer } = require('tokenizers') let trainer = bpeTrainer({ specialTokens: ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]'], }) // END init_trainer // START init_pretok let { whitespacePreTokenizer } = require('tokenizers') tokenizer.setPreTokenizer(whitespacePreTokenizer()) // END init_pretok // START train let files = ['test', 'train', 'valid'].map((split) => `data/wikitext-103-raw/wiki.${split}.raw`) tokenizer.train(files, trainer) // END train // START save tokenizer.save('data/tokenizer-wiki.json') // END save }) it('shows a quicktour example', async () => { let { Tokenizer } = require('tokenizers') // START reload_tokenizer let tokenizer = Tokenizer.fromFile('data/tokenizer-wiki.json') // END reload_tokenizer // START encode var output = await tokenizer.encode("Hello, y'all! How are you 😁 ?") // END encode // START print_tokens console.log(output.getTokens()) // ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?"] // END print_tokens expect(output.getTokens()).toEqual(['Hello', ',', 'y', "'", 'all', '!', 'How', 'are', 'you', '[UNK]', '?']) // START print_ids console.log(output.getIds()) // [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35] // END print_ids expect(output.getIds()).toEqual([27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35]) // START print_offsets let offsets = output.getOffsets() console.log(offsets[9]) // (26, 27) // END print_offsets expect(offsets[9]).toEqual([26, 27]) // START use_offsets let { slice } = require('tokenizers') let sentence = "Hello, y'all! How are you 😁 ?" let [start, end] = offsets[9] console.log(slice(sentence, start, end)) // "😁" // END use_offsets expect(slice(sentence, start, end)).toEqual('😁') // START check_sep console.log(tokenizer.tokenToId('[SEP]')) // 2 // END check_sep expect(tokenizer.tokenToId('[SEP]')).toEqual(2) // START init_template_processing let { templateProcessing } = require('tokenizers') tokenizer.setPostProcessor( templateProcessing('[CLS] $A [SEP]', '[CLS] $A [SEP] $B:1 [SEP]:1', [ ['[CLS]', tokenizer.tokenToId('[CLS]')], ['[SEP]', tokenizer.tokenToId('[SEP]')], ]), ) // END init_template_processing // START print_special_tokens var output = await tokenizer.encode("Hello, y'all! How are you 😁 ?") console.log(output.getTokens()) // ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"] // END print_special_tokens expect(output.getTokens()).toEqual([ '[CLS]', 'Hello', ',', 'y', "'", 'all', '!', 'How', 'are', 'you', '[UNK]', '?', '[SEP]', ]) // START print_special_tokens_pair var output = await tokenizer.encode("Hello, y'all!", 'How are you 😁 ?') console.log(output.getTokens()) // ["[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]"] // END print_special_tokens_pair expect(output.getTokens()).toEqual([ '[CLS]', 'Hello', ',', 'y', "'", 'all', '!', '[SEP]', 'How', 'are', 'you', '[UNK]', '?', '[SEP]', ]) // START print_type_ids console.log(output.getTypeIds()) // [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] // END print_type_ids expect(output.getTypeIds()).toEqual([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]) // START encode_batch var output = await tokenizer.encodeBatch(["Hello, y'all!", 'How are you 😁 ?']) // END encode_batch // START encode_batch_pair // var output = await tokenizer.encodeBatch( // [["Hello, y'all!", "How are you 😁 ?"], ["Hello to you too!", "I'm fine, thank you!"]] // ); // END encode_batch_pair // START enable_padding tokenizer.setPadding({ padId: 3, padToken: '[PAD]' }) // END enable_padding // START print_batch_tokens var output = await tokenizer.encodeBatch(["Hello, y'all!", 'How are you 😁 ?']) console.log(output[1].getTokens()) // ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"] // END print_batch_tokens expect(output[1].getTokens()).toEqual(['[CLS]', 'How', 'are', 'you', '[UNK]', '?', '[SEP]', '[PAD]']) // START print_attention_mask console.log(output[1].getAttentionMask()) // [1, 1, 1, 1, 1, 1, 1, 0] // END print_attention_mask expect(output[1].getAttentionMask()).toEqual([1, 1, 1, 1, 1, 1, 1, 0]) }) })
tokenizers/bindings/node/examples/documentation/quicktour.test.ts/0
{ "file_path": "tokenizers/bindings/node/examples/documentation/quicktour.test.ts", "repo_id": "tokenizers", "token_count": 2324 }
335
{ "name": "tokenizers-android-arm-eabi", "version": "0.13.4-rc1", "os": [ "android" ], "cpu": [ "arm" ], "main": "tokenizers.android-arm-eabi.node", "files": [ "tokenizers.android-arm-eabi.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers" }
tokenizers/bindings/node/npm/android-arm-eabi/package.json/0
{ "file_path": "tokenizers/bindings/node/npm/android-arm-eabi/package.json", "repo_id": "tokenizers", "token_count": 269 }
336
{ "name": "tokenizers-linux-x64-gnu", "version": "0.13.4-rc1", "os": [ "linux" ], "cpu": [ "x64" ], "main": "tokenizers.linux-x64-gnu.node", "files": [ "tokenizers.linux-x64-gnu.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers", "libc": [ "glibc" ] }
tokenizers/bindings/node/npm/linux-x64-gnu/package.json/0
{ "file_path": "tokenizers/bindings/node/npm/linux-x64-gnu/package.json", "repo_id": "tokenizers", "token_count": 289 }
337
use crate::arc_rwlock_serde; use napi::bindgen_prelude::*; use napi_derive::napi; use serde::{Deserialize, Serialize}; use std::sync::{Arc, RwLock}; use tk::normalizers::NormalizerWrapper; use tk::NormalizedString; use tokenizers as tk; /// Normalizer #[derive(Debug, Clone, Serialize, Deserialize)] #[napi] pub struct Normalizer { #[serde(flatten, with = "arc_rwlock_serde")] normalizer: Option<Arc<RwLock<NormalizerWrapper>>>, } #[napi] impl Normalizer { #[napi] pub fn normalize_string(&self, sequence: String) -> Result<String> { use tk::Normalizer; let mut normalized = NormalizedString::from(sequence); self .normalize(&mut normalized) .map_err(|e| Error::from_reason(format!("{e}")))?; Ok(normalized.get().to_string()) } } impl tk::Normalizer for Normalizer { fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> { self .normalizer .as_ref() .ok_or("Uninitialized Normalizer")? .read() .unwrap() .normalize(normalized)?; Ok(()) } } #[napi] pub fn prepend_normalizer(prepend: String) -> Normalizer { Normalizer { normalizer: Some(Arc::new(RwLock::new( tk::normalizers::prepend::Prepend::new(prepend).into(), ))), } } #[napi] pub fn strip_accents_normalizer() -> Normalizer { Normalizer { normalizer: Some(Arc::new(RwLock::new( tk::normalizers::strip::StripAccents.into(), ))), } } #[napi(object)] #[derive(Default)] pub struct BertNormalizerOptions { pub clean_text: Option<bool>, pub handle_chinese_chars: Option<bool>, pub strip_accents: Option<bool>, pub lowercase: Option<bool>, } /// bert_normalizer(options?: { /// cleanText?: bool = true, /// handleChineseChars?: bool = true, /// stripAccents?: bool = true, /// lowercase?: bool = true /// }) #[napi] pub fn bert_normalizer(options: Option<BertNormalizerOptions>) -> Normalizer { let options = options.unwrap_or_default(); Normalizer { normalizer: Some(Arc::new(RwLock::new( tk::normalizers::bert::BertNormalizer::new( options.clean_text.unwrap_or(true), options.handle_chinese_chars.unwrap_or(true), options.strip_accents, options.lowercase.unwrap_or(true), ) .into(), ))), } } #[napi] pub fn nfd_normalizer() -> Normalizer { Normalizer { normalizer: Some(Arc::new(RwLock::new(tk::normalizers::unicode::NFD.into()))), } } #[napi] pub fn nfkd_normalizer() -> Normalizer { Normalizer { normalizer: Some(Arc::new(RwLock::new(tk::normalizers::unicode::NFKD.into()))), } } #[napi] pub fn nfc_normalizer() -> Normalizer { Normalizer { normalizer: Some(Arc::new(RwLock::new(tk::normalizers::unicode::NFC.into()))), } } #[napi] pub fn nfkc_normalizer() -> Normalizer { Normalizer { normalizer: Some(Arc::new(RwLock::new(tk::normalizers::unicode::NFKC.into()))), } } // /// strip(left?: boolean, right?: boolean) #[napi] pub fn strip_normalizer(left: Option<bool>, right: Option<bool>) -> Normalizer { let left = left.unwrap_or(true); let right = right.unwrap_or(true); Normalizer { normalizer: Some(Arc::new(RwLock::new( tk::normalizers::strip::Strip::new(left, right).into(), ))), } } #[napi] pub fn sequence_normalizer(normalizers: Vec<&Normalizer>) -> Normalizer { let mut sequence: Vec<NormalizerWrapper> = Vec::with_capacity(normalizers.len()); normalizers.into_iter().for_each(|normalizer| { if let Some(normalizer) = &normalizer.normalizer { sequence.push((**normalizer).read().unwrap().clone()) } }); Normalizer { normalizer: Some(Arc::new(RwLock::new(NormalizerWrapper::Sequence( tk::normalizers::Sequence::new(sequence), )))), } } #[napi] pub fn lowercase() -> Normalizer { Normalizer { normalizer: Some(Arc::new(RwLock::new( tk::normalizers::utils::Lowercase.into(), ))), } } #[napi] pub fn replace(pattern: String, content: String) -> Result<Normalizer> { Ok(Normalizer { normalizer: Some(Arc::new(RwLock::new( tk::normalizers::replace::Replace::new(pattern, content) .map_err(|e| Error::from_reason(e.to_string()))? .into(), ))), }) } #[napi] pub fn nmt() -> Normalizer { Normalizer { normalizer: Some(Arc::new(RwLock::new(tk::normalizers::unicode::Nmt.into()))), } } #[napi] pub fn precompiled(bytes: Vec<u8>) -> Result<Normalizer> { Ok(Normalizer { normalizer: Some(Arc::new(RwLock::new( tk::normalizers::precompiled::Precompiled::from(&bytes) .map_err(|e| Error::from_reason(e.to_string()))? .into(), ))), }) }
tokenizers/bindings/node/src/normalizers.rs/0
{ "file_path": "tokenizers/bindings/node/src/normalizers.rs", "repo_id": "tokenizers", "token_count": 1885 }
338
include Cargo.toml include pyproject.toml include rust-toolchain include ../../LICENSE recursive-include src * recursive-include tokenizers-lib * recursive-exclude tokenizers-lib/target *
tokenizers/bindings/python/MANIFEST.in/0
{ "file_path": "tokenizers/bindings/python/MANIFEST.in", "repo_id": "tokenizers", "token_count": 57 }
339
from typing import Dict, List, Optional, Tuple, Union from tokenizers import AddedToken, EncodeInput, Encoding, InputSequence, Tokenizer from tokenizers.decoders import Decoder from tokenizers.models import Model from tokenizers.normalizers import Normalizer from tokenizers.pre_tokenizers import PreTokenizer from tokenizers.processors import PostProcessor Offsets = Tuple[int, int] class BaseTokenizer: def __init__(self, tokenizer: Tokenizer, parameters=None): self._tokenizer = tokenizer self._parameters = parameters if parameters is not None else {} def __repr__(self): return "Tokenizer(vocabulary_size={}, {})".format( self._tokenizer.get_vocab_size(), ", ".join(k + "=" + str(v) for k, v in self._parameters.items()), ) def num_special_tokens_to_add(self, is_pair: bool) -> int: """ Return the number of special tokens that would be added for single/pair sentences. :param is_pair: Boolean indicating if the input would be a single sentence or a pair :return: """ return self._tokenizer.num_special_tokens_to_add(is_pair) def get_vocab(self, with_added_tokens: bool = True) -> Dict[str, int]: """Returns the vocabulary Args: with_added_tokens: boolean: Whether to include the added tokens in the vocabulary Returns: The vocabulary """ return self._tokenizer.get_vocab(with_added_tokens=with_added_tokens) def get_added_tokens_decoder(self) -> Dict[int, AddedToken]: """Returns the added reverse vocabulary Returns: The added vocabulary mapping ints to AddedTokens """ return self._tokenizer.get_added_tokens_decoder() def get_vocab_size(self, with_added_tokens: bool = True) -> int: """Return the size of vocabulary, with or without added tokens. Args: with_added_tokens: (`optional`) bool: Whether to count in added special tokens or not Returns: Size of vocabulary """ return self._tokenizer.get_vocab_size(with_added_tokens=with_added_tokens) def enable_padding( self, direction: Optional[str] = "right", pad_to_multiple_of: Optional[int] = None, pad_id: Optional[int] = 0, pad_type_id: Optional[int] = 0, pad_token: Optional[str] = "[PAD]", length: Optional[int] = None, ): """Change the padding strategy Args: direction: (`optional`) str: Can be one of: `right` or `left` pad_to_multiple_of: (`optional`) unsigned int: If specified, the padding length should always snap to the next multiple of the given value. For example if we were going to pad with a length of 250 but `pad_to_multiple_of=8` then we will pad to 256. pad_id: (`optional`) unsigned int: The indice to be used when padding pad_type_id: (`optional`) unsigned int: The type indice to be used when padding pad_token: (`optional`) str: The pad token to be used when padding length: (`optional`) unsigned int: If specified, the length at which to pad. If not specified we pad using the size of the longest sequence in a batch """ return self._tokenizer.enable_padding( direction=direction, pad_to_multiple_of=pad_to_multiple_of, pad_id=pad_id, pad_type_id=pad_type_id, pad_token=pad_token, length=length, ) def no_padding(self): """Disable padding""" return self._tokenizer.no_padding() @property def padding(self) -> Optional[dict]: """Get the current padding parameters Returns: None if padding is disabled, a dict with the currently set parameters if the padding is enabled. """ return self._tokenizer.padding def enable_truncation(self, max_length: int, stride: Optional[int] = 0, strategy: Optional[str] = "longest_first"): """Change the truncation options Args: max_length: unsigned int: The maximum length at which to truncate stride: (`optional`) unsigned int: The length of the previous first sequence to be included in the overflowing sequence strategy: (`optional`) str: Can be one of `longest_first`, `only_first` or `only_second` """ return self._tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy) def no_truncation(self): """Disable truncation""" return self._tokenizer.no_truncation() @property def truncation(self) -> Optional[dict]: """Get the current truncation parameters Returns: None if truncation is disabled, a dict with the current truncation parameters if truncation is enabled """ return self._tokenizer.truncation def add_tokens(self, tokens: List[Union[str, AddedToken]]) -> int: """Add the given tokens to the vocabulary Args: tokens: List[Union[str, AddedToken]]: A list of tokens to add to the vocabulary. Each token can either be a string, or an instance of AddedToken Returns: The number of tokens that were added to the vocabulary """ return self._tokenizer.add_tokens(tokens) def add_special_tokens(self, special_tokens: List[Union[str, AddedToken]]) -> int: """Add the given special tokens to the vocabulary, and treat them as special tokens. The special tokens will never be processed by the model, and will be removed while decoding. Args: tokens: List[Union[str, AddedToken]]: A list of special tokens to add to the vocabulary. Each token can either be a string, or an instance of AddedToken Returns: The number of tokens that were added to the vocabulary """ return self._tokenizer.add_special_tokens(special_tokens) def normalize(self, sequence: str) -> str: """Normalize the given sequence Args: sequence: str: The sequence to normalize Returns: The normalized string """ return self._tokenizer.normalize(sequence) def encode( self, sequence: InputSequence, pair: Optional[InputSequence] = None, is_pretokenized: bool = False, add_special_tokens: bool = True, ) -> Encoding: """Encode the given sequence and pair. This method can process raw text sequences as well as already pre-tokenized sequences. Args: sequence: InputSequence: The sequence we want to encode. This sequence can be either raw text or pre-tokenized, according to the `is_pretokenized` argument: - If `is_pretokenized=False`: `InputSequence` is expected to be `str` - If `is_pretokenized=True`: `InputSequence` is expected to be `Union[List[str], Tuple[str]]` is_pretokenized: bool: Whether the input is already pre-tokenized. add_special_tokens: bool: Whether to add the special tokens while encoding. Returns: An Encoding """ if sequence is None: raise ValueError("encode: `sequence` can't be `None`") return self._tokenizer.encode(sequence, pair, is_pretokenized, add_special_tokens) def encode_batch( self, inputs: List[EncodeInput], is_pretokenized: bool = False, add_special_tokens: bool = True, ) -> List[Encoding]: """Encode the given inputs. This method accept both raw text sequences as well as already pre-tokenized sequences. Args: inputs: List[EncodeInput]: A list of single sequences or pair sequences to encode. Each `EncodeInput` is expected to be of the following form: `Union[InputSequence, Tuple[InputSequence, InputSequence]]` Each `InputSequence` can either be raw text or pre-tokenized, according to the `is_pretokenized` argument: - If `is_pretokenized=False`: `InputSequence` is expected to be `str` - If `is_pretokenized=True`: `InputSequence` is expected to be `Union[List[str], Tuple[str]]` is_pretokenized: bool: Whether the input is already pre-tokenized. add_special_tokens: bool: Whether to add the special tokens while encoding. Returns: A list of Encoding """ if inputs is None: raise ValueError("encode_batch: `inputs` can't be `None`") return self._tokenizer.encode_batch(inputs, is_pretokenized, add_special_tokens) def decode(self, ids: List[int], skip_special_tokens: Optional[bool] = True) -> str: """Decode the given list of ids to a string sequence Args: ids: List[unsigned int]: A list of ids to be decoded skip_special_tokens: (`optional`) boolean: Whether to remove all the special tokens from the output string Returns: The decoded string """ if ids is None: raise ValueError("None input is not valid. Should be a list of integers.") return self._tokenizer.decode(ids, skip_special_tokens=skip_special_tokens) def decode_batch(self, sequences: List[List[int]], skip_special_tokens: Optional[bool] = True) -> str: """Decode the list of sequences to a list of string sequences Args: sequences: List[List[unsigned int]]: A list of sequence of ids to be decoded skip_special_tokens: (`optional`) boolean: Whether to remove all the special tokens from the output strings Returns: A list of decoded strings """ if sequences is None: raise ValueError("None input is not valid. Should be list of list of integers.") return self._tokenizer.decode_batch(sequences, skip_special_tokens=skip_special_tokens) def token_to_id(self, token: str) -> Optional[int]: """Convert the given token to its corresponding id Args: token: str: The token to convert Returns: The corresponding id if it exists, None otherwise """ return self._tokenizer.token_to_id(token) def id_to_token(self, id: int) -> Optional[str]: """Convert the given token id to its corresponding string Args: token: id: The token id to convert Returns: The corresponding string if it exists, None otherwise """ return self._tokenizer.id_to_token(id) def save_model(self, directory: str, prefix: Optional[str] = None): """Save the current model to the given directory Args: directory: str: A path to the destination directory prefix: (Optional) str: An optional prefix, used to prefix each file name """ return self._tokenizer.model.save(directory, prefix=prefix) def save(self, path: str, pretty: bool = True): """Save the current Tokenizer at the given path Args: path: str: A path to the destination Tokenizer file """ return self._tokenizer.save(path, pretty) def to_str(self, pretty: bool = False): """Get a serialized JSON version of the Tokenizer as a str Args: pretty: bool: Whether the JSON string should be prettified Returns: str """ return self._tokenizer.to_str(pretty) def post_process( self, encoding: Encoding, pair: Optional[Encoding] = None, add_special_tokens: bool = True ) -> Encoding: """Apply all the post-processing steps to the given encodings. The various steps are: 1. Truncate according to global params (provided to `enable_truncation`) 2. Apply the PostProcessor 3. Pad according to global params. (provided to `enable_padding`) Args: encoding: Encoding: The main Encoding to post process pair: Optional[Encoding]: An optional pair Encoding add_special_tokens: bool: Whether to add special tokens Returns: The resulting Encoding """ return self._tokenizer.post_process(encoding, pair, add_special_tokens) @property def model(self) -> Model: return self._tokenizer.model @model.setter def model(self, model: Model): self._tokenizer.model = model @property def normalizer(self) -> Normalizer: return self._tokenizer.normalizer @normalizer.setter def normalizer(self, normalizer: Normalizer): self._tokenizer.normalizer = normalizer @property def pre_tokenizer(self) -> PreTokenizer: return self._tokenizer.pre_tokenizer @pre_tokenizer.setter def pre_tokenizer(self, pre_tokenizer: PreTokenizer): self._tokenizer.pre_tokenizer = pre_tokenizer @property def post_processor(self) -> PostProcessor: return self._tokenizer.post_processor @post_processor.setter def post_processor(self, post_processor: PostProcessor): self._tokenizer.post_processor = post_processor @property def decoder(self) -> Decoder: return self._tokenizer.decoder @decoder.setter def decoder(self, decoder: Decoder): self._tokenizer.decoder = decoder
tokenizers/bindings/python/py_src/tokenizers/implementations/base_tokenizer.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/base_tokenizer.py", "repo_id": "tokenizers", "token_count": 6036 }
340
import itertools import os import re from string import Template from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple from tokenizers import Encoding, Tokenizer dirname = os.path.dirname(__file__) css_filename = os.path.join(dirname, "visualizer-styles.css") with open(css_filename) as f: css = f.read() class Annotation: start: int end: int label: int def __init__(self, start: int, end: int, label: str): self.start = start self.end = end self.label = label AnnotationList = List[Annotation] PartialIntList = List[Optional[int]] class CharStateKey(NamedTuple): token_ix: Optional[int] anno_ix: Optional[int] class CharState: char_ix: Optional[int] def __init__(self, char_ix): self.char_ix = char_ix self.anno_ix: Optional[int] = None self.tokens: List[int] = [] @property def token_ix(self): return self.tokens[0] if len(self.tokens) > 0 else None @property def is_multitoken(self): """ BPE tokenizers can output more than one token for a char """ return len(self.tokens) > 1 def partition_key(self) -> CharStateKey: return CharStateKey( token_ix=self.token_ix, anno_ix=self.anno_ix, ) class Aligned: pass class EncodingVisualizer: """ Build an EncodingVisualizer Args: tokenizer (:class:`~tokenizers.Tokenizer`): A tokenizer instance default_to_notebook (:obj:`bool`): Whether to render html output in a notebook by default annotation_converter (:obj:`Callable`, `optional`): An optional (lambda) function that takes an annotation in any format and returns an Annotation object """ unk_token_regex = re.compile("(.{1}\b)?(unk|oov)(\b.{1})?", flags=re.IGNORECASE) def __init__( self, tokenizer: Tokenizer, default_to_notebook: bool = True, annotation_converter: Optional[Callable[[Any], Annotation]] = None, ): if default_to_notebook: try: from IPython.core.display import HTML, display except ImportError: raise Exception( """We couldn't import IPython utils for html display. Are you running in a notebook? You can also pass `default_to_notebook=False` to get back raw HTML """ ) self.tokenizer = tokenizer self.default_to_notebook = default_to_notebook self.annotation_coverter = annotation_converter pass def __call__( self, text: str, annotations: AnnotationList = [], default_to_notebook: Optional[bool] = None, ) -> Optional[str]: """ Build a visualization of the given text Args: text (:obj:`str`): The text to tokenize annotations (:obj:`List[Annotation]`, `optional`): An optional list of annotations of the text. The can either be an annotation class or anything else if you instantiated the visualizer with a converter function default_to_notebook (:obj:`bool`, `optional`, defaults to `False`): If True, will render the html in a notebook. Otherwise returns an html string. Returns: The HTML string if default_to_notebook is False, otherwise (default) returns None and renders the HTML in the notebook """ final_default_to_notebook = self.default_to_notebook if default_to_notebook is not None: final_default_to_notebook = default_to_notebook if final_default_to_notebook: try: from IPython.core.display import HTML, display except ImportError: raise Exception( """We couldn't import IPython utils for html display. Are you running in a notebook?""" ) if self.annotation_coverter is not None: annotations = list(map(self.annotation_coverter, annotations)) encoding = self.tokenizer.encode(text) html = EncodingVisualizer.__make_html(text, encoding, annotations) if final_default_to_notebook: display(HTML(html)) else: return html @staticmethod def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]: """ Generates a color palette for all the labels in a given set of annotations Args: annotations (:obj:`Annotation`): A list of annotations Returns: :obj:`dict`: A dictionary mapping labels to colors in HSL format """ if len(annotations) == 0: return {} labels = set(map(lambda x: x.label, annotations)) num_labels = len(labels) h_step = int(255 / num_labels) if h_step < 20: h_step = 20 s = 32 l = 64 # noqa: E741 h = 10 colors = {} for label in sorted(labels): # sort so we always get the same colors for a given set of labels colors[label] = f"hsl({h},{s}%,{l}%" h += h_step return colors @staticmethod def consecutive_chars_to_html( consecutive_chars_list: List[CharState], text: str, encoding: Encoding, ): """ Converts a list of "consecutive chars" into a single HTML element. Chars are consecutive if they fall under the same word, token and annotation. The CharState class is a named tuple with a "partition_key" method that makes it easy to compare if two chars are consecutive. Args: consecutive_chars_list (:obj:`List[CharState]`): A list of CharStates that have been grouped together text (:obj:`str`): The original text being processed encoding (:class:`~tokenizers.Encoding`): The encoding returned from the tokenizer Returns: :obj:`str`: The HTML span for a set of consecutive chars """ first = consecutive_chars_list[0] if first.char_ix is None: # its a special token stoken = encoding.tokens[first.token_ix] # special tokens are represented as empty spans. We use the data attribute and css # magic to display it return f'<span class="special-token" data-stoken={stoken}></span>' # We're not in a special token so this group has a start and end. last = consecutive_chars_list[-1] start = first.char_ix end = last.char_ix + 1 span_text = text[start:end] css_classes = [] # What css classes will we apply on the resulting span data_items = {} # What data attributes will we apply on the result span if first.token_ix is not None: # We can either be in a token or not (e.g. in white space) css_classes.append("token") if first.is_multitoken: css_classes.append("multi-token") if first.token_ix % 2: # We use this to color alternating tokens. # A token might be split by an annotation that ends in the middle of it, so this # lets us visually indicate a consecutive token despite its possible splitting in # the html markup css_classes.append("odd-token") else: # Like above, but a different color so we can see the tokens alternate css_classes.append("even-token") if EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix]) is not None: # This is a special token that is in the text. probably UNK css_classes.append("special-token") # TODO is this the right name for the data attribute ? data_items["stok"] = encoding.tokens[first.token_ix] else: # In this case we are looking at a group/single char that is not tokenized. # e.g. white space css_classes.append("non-token") css = f'''class="{" ".join(css_classes)}"''' data = "" for key, val in data_items.items(): data += f' data-{key}="{val}"' return f"<span {css} {data} >{span_text}</span>" @staticmethod def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str: char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations) current_consecutive_chars = [char_states[0]] prev_anno_ix = char_states[0].anno_ix spans = [] label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations) cur_anno_ix = char_states[0].anno_ix if cur_anno_ix is not None: # If we started in an annotation make a span for it anno = annotations[cur_anno_ix] label = anno.label color = label_colors_dict[label] spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">') for cs in char_states[1:]: cur_anno_ix = cs.anno_ix if cur_anno_ix != prev_anno_ix: # If we've transitioned in or out of an annotation spans.append( # Create a span from the current consecutive characters EncodingVisualizer.consecutive_chars_to_html( current_consecutive_chars, text=text, encoding=encoding, ) ) current_consecutive_chars = [cs] if prev_anno_ix is not None: # if we transitioned out of an annotation close it's span spans.append("</span>") if cur_anno_ix is not None: # If we entered a new annotation make a span for it anno = annotations[cur_anno_ix] label = anno.label color = label_colors_dict[label] spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">') prev_anno_ix = cur_anno_ix if cs.partition_key() == current_consecutive_chars[0].partition_key(): # If the current charchter is in the same "group" as the previous one current_consecutive_chars.append(cs) else: # Otherwise we make a span for the previous group spans.append( EncodingVisualizer.consecutive_chars_to_html( current_consecutive_chars, text=text, encoding=encoding, ) ) # An reset the consecutive_char_list to form a new group current_consecutive_chars = [cs] # All that's left is to fill out the final span # TODO I think there is an edge case here where an annotation's span might not close spans.append( EncodingVisualizer.consecutive_chars_to_html( current_consecutive_chars, text=text, encoding=encoding, ) ) res = HTMLBody(spans) # Send the list of spans to the body of our html return res @staticmethod def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList: """ Args: text (:obj:`str`): The raw text we want to align to annotations (:obj:`AnnotationList`): A (possibly empty) list of annotations Returns: A list of length len(text) whose entry at index i is None if there is no annotation on character i or k, the index of the annotation that covers index i where k is with respect to the list of annotations """ annotation_map = [None] * len(text) for anno_ix, a in enumerate(annotations): for i in range(a.start, a.end): annotation_map[i] = anno_ix return annotation_map @staticmethod def __make_char_states(text: str, encoding: Encoding, annotations: AnnotationList) -> List[CharState]: """ For each character in the original text, we emit a tuple representing it's "state": * which token_ix it corresponds to * which word_ix it corresponds to * which annotation_ix it corresponds to Args: text (:obj:`str`): The raw text we want to align to annotations (:obj:`List[Annotation]`): A (possibly empty) list of annotations encoding: (:class:`~tokenizers.Encoding`): The encoding returned from the tokenizer Returns: :obj:`List[CharState]`: A list of CharStates, indicating for each char in the text what it's state is """ annotation_map = EncodingVisualizer.__make_anno_map(text, annotations) # Todo make this a dataclass or named tuple char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))] for token_ix, token in enumerate(encoding.tokens): offsets = encoding.token_to_chars(token_ix) if offsets is not None: start, end = offsets for i in range(start, end): char_states[i].tokens.append(token_ix) for char_ix, anno_ix in enumerate(annotation_map): char_states[char_ix].anno_ix = anno_ix return char_states def HTMLBody(children: List[str], css_styles=css) -> str: """ Generates the full html with css from a list of html spans Args: children (:obj:`List[str]`): A list of strings, assumed to be html elements css_styles (:obj:`str`, `optional`): Optional alternative implementation of the css Returns: :obj:`str`: An HTML string with style markup """ children_text = "".join(children) return f""" <html> <head> <style> {css_styles} </style> </head> <body> <div class="tokenized-text" dir=auto> {children_text} </div> </body> </html> """
tokenizers/bindings/python/py_src/tokenizers/tools/visualizer.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/tools/visualizer.py", "repo_id": "tokenizers", "token_count": 6751 }
341
use std::convert::TryInto; use std::sync::Arc; use std::sync::RwLock; use crate::encoding::PyEncoding; use crate::error::ToPyResult; use pyo3::exceptions; use pyo3::exceptions::PyException; use pyo3::prelude::*; use pyo3::types::*; use serde::ser::SerializeStruct; use serde::Deserializer; use serde::Serializer; use serde::{Deserialize, Serialize}; use tk::processors::bert::BertProcessing; use tk::processors::byte_level::ByteLevel; use tk::processors::roberta::RobertaProcessing; use tk::processors::template::{SpecialToken, Template}; use tk::processors::PostProcessorWrapper; use tk::{Encoding, PostProcessor}; use tokenizers as tk; /// Base class for all post-processors /// /// This class is not supposed to be instantiated directly. Instead, any implementation of /// a PostProcessor will return an instance of this class when instantiated. #[pyclass( dict, module = "tokenizers.processors", name = "PostProcessor", subclass )] #[derive(Clone, Deserialize, Serialize)] #[serde(transparent)] pub struct PyPostProcessor { processor: PyPostProcessorTypeWrapper, } impl<I> From<I> for PyPostProcessor where I: Into<PostProcessorWrapper>, { fn from(processor: I) -> Self { PyPostProcessor { processor: processor.into().into(), } } } impl PyPostProcessor { pub(crate) fn new(processor: PyPostProcessorTypeWrapper) -> Self { PyPostProcessor { processor } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok( match self.processor { PyPostProcessorTypeWrapper::Sequence(_) => Py::new(py, (PySequence {}, base))? .into_pyobject(py)? .into_any() .into(), PyPostProcessorTypeWrapper::Single(ref inner) => { match &*inner.read().map_err(|_| { PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor") })? { PostProcessorWrapper::ByteLevel(_) => Py::new(py, (PyByteLevel {}, base))? .into_pyobject(py)? .into_any() .into(), PostProcessorWrapper::Bert(_) => Py::new(py, (PyBertProcessing {}, base))? .into_pyobject(py)? .into_any() .into(), PostProcessorWrapper::Roberta(_) => Py::new(py, (PyRobertaProcessing {}, base))? .into_pyobject(py)? .into_any() .into(), PostProcessorWrapper::Template(_) => Py::new(py, (PyTemplateProcessing {}, base))? .into_pyobject(py)? .into_any() .into(), PostProcessorWrapper::Sequence(_) => Py::new(py, (PySequence {}, base))? .into_pyobject(py)? .into_any() .into(), } } } ) } } impl PostProcessor for PyPostProcessor { // TODO: update signature to `tk::Result<usize>` fn added_tokens(&self, is_pair: bool) -> usize { self.processor.added_tokens(is_pair) } fn process_encodings( &self, encodings: Vec<Encoding>, add_special_tokens: bool, ) -> tk::Result<Vec<Encoding>> { self.processor .process_encodings(encodings, add_special_tokens) } } #[pymethods] impl PyPostProcessor { fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.processor).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle PostProcessor: {e}" )) })?; Ok(PyBytes::new(py, data.as_bytes()).into()) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&[u8]>(py) { Ok(s) => { self.processor = serde_json::from_slice(s).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle PostProcessor: {e}" )) })?; Ok(()) } Err(e) => Err(e), } } /// Return the number of special tokens that would be added for single/pair sentences. /// /// Args: /// is_pair (:obj:`bool`): /// Whether the input would be a pair of sequences /// /// Returns: /// :obj:`int`: The number of tokens to add #[pyo3(text_signature = "(self, is_pair)")] fn num_special_tokens_to_add(&self, is_pair: bool) -> PyResult<usize> { Ok(self.processor.added_tokens(is_pair)) } /// Post-process the given encodings, generating the final one /// /// Args: /// encoding (:class:`~tokenizers.Encoding`): /// The encoding for the first sequence /// /// pair (:class:`~tokenizers.Encoding`, `optional`): /// The encoding for the pair sequence /// /// add_special_tokens (:obj:`bool`): /// Whether to add the special tokens /// /// Return: /// :class:`~tokenizers.Encoding`: The final encoding #[pyo3(signature = (encoding, pair = None, add_special_tokens = true))] #[pyo3(text_signature = "(self, encoding, pair=None, add_special_tokens=True)")] fn process( &self, encoding: &PyEncoding, pair: Option<&PyEncoding>, add_special_tokens: bool, ) -> PyResult<PyEncoding> { let final_encoding = ToPyResult(self.processor.process( encoding.encoding.clone(), pair.map(|e| e.encoding.clone()), add_special_tokens, )) .into_py()?; Ok(final_encoding.into()) } fn __repr__(&self) -> PyResult<String> { crate::utils::serde_pyo3::repr(self) .map_err(|e| exceptions::PyException::new_err(e.to_string())) } fn __str__(&self) -> PyResult<String> { crate::utils::serde_pyo3::to_string(self) .map_err(|e| exceptions::PyException::new_err(e.to_string())) } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); if let PyPostProcessorTypeWrapper::Single(ref single) = super_.processor { if let PostProcessorWrapper::$variant(ref post) = *single.read().expect( "RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor" ) { post.$($name)+ } else { unreachable!() } } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyPostProcessorTypeWrapper::Single(ref single) = super_.processor { if let PostProcessorWrapper::$variant(ref mut post) = *single.write().expect( "RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor", ) { post.$name = $value; } } }}; ($self: ident, $variant: ident, @$name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyPostProcessorTypeWrapper::Single(ref single) = super_.processor { if let PostProcessorWrapper::$variant(ref mut post) = *single.write().expect( "RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor", ) { post.$name($value); } } };}; } #[derive(Clone)] pub(crate) enum PyPostProcessorTypeWrapper { Sequence(Vec<Arc<RwLock<PostProcessorWrapper>>>), Single(Arc<RwLock<PostProcessorWrapper>>), } impl PostProcessor for PyPostProcessorTypeWrapper { fn added_tokens(&self, is_pair: bool) -> usize { match self { PyPostProcessorTypeWrapper::Single(inner) => inner .read() .expect("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor") .added_tokens(is_pair), PyPostProcessorTypeWrapper::Sequence(inner) => inner.iter().map(|p| { p.read() .expect("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor") .added_tokens(is_pair) }).sum::<usize>(), } } fn process_encodings( &self, mut encodings: Vec<Encoding>, add_special_tokens: bool, ) -> tk::Result<Vec<Encoding>> { match self { PyPostProcessorTypeWrapper::Single(inner) => inner .read() .map_err(|_| PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPreTokenizer"))? .process_encodings(encodings, add_special_tokens), PyPostProcessorTypeWrapper::Sequence(inner) => { for processor in inner.iter() { encodings = processor .read() .map_err(|_| PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPreTokenizer"))? .process_encodings(encodings, add_special_tokens)?; } Ok(encodings) }, } } } impl<'de> Deserialize<'de> for PyPostProcessorTypeWrapper { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let wrapper = PostProcessorWrapper::deserialize(deserializer)?; Ok(wrapper.into()) } } impl Serialize for PyPostProcessorTypeWrapper { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match self { PyPostProcessorTypeWrapper::Sequence(seq) => { let mut ser = serializer.serialize_struct("Sequence", 2)?; ser.serialize_field("type", "Sequence")?; ser.serialize_field("processors", seq)?; ser.end() } PyPostProcessorTypeWrapper::Single(inner) => inner.serialize(serializer), } } } impl<I> From<I> for PyPostProcessorTypeWrapper where I: Into<PostProcessorWrapper>, { fn from(processor: I) -> Self { let processor = processor.into(); match processor { PostProcessorWrapper::Sequence(seq) => PyPostProcessorTypeWrapper::Sequence( seq.into_iter().map(|p| Arc::new(RwLock::new(p))).collect(), ), _ => PyPostProcessorTypeWrapper::Single(Arc::new(RwLock::new(processor.clone()))), } } } /// This post-processor takes care of adding the special tokens needed by /// a Bert model: /// /// - a SEP token /// - a CLS token /// /// Args: /// sep (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the SEP token, and its id /// /// cls (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the CLS token, and its id #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "BertProcessing")] pub struct PyBertProcessing {} #[pymethods] impl PyBertProcessing { #[new] #[pyo3(text_signature = "(self, sep, cls)")] fn new(sep: (String, u32), cls: (String, u32)) -> (Self, PyPostProcessor) { (PyBertProcessing {}, BertProcessing::new(sep, cls).into()) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> PyResult<Bound<'p, PyTuple>> { PyTuple::new(py, [("", 0), ("", 0)]) } #[getter] fn get_sep(self_: PyRef<Self>) -> Result<Bound<'_, PyTuple>, PyErr> { let py = self_.py(); let (tok, id) = getter!(self_, Bert, get_sep_copy()); PyTuple::new( py, Vec::<PyObject>::from([tok.into_pyobject(py)?.into(), id.into_pyobject(py)?.into()]), ) } #[setter] fn set_sep(self_: PyRef<Self>, sep: Bound<'_, PyTuple>) -> PyResult<()> { let sep = sep.extract()?; setter!(self_, Bert, sep, sep); Ok(()) } #[getter] fn get_cls(self_: PyRef<Self>) -> Result<Bound<'_, PyTuple>, PyErr> { let py = self_.py(); let (tok, id) = getter!(self_, Bert, get_cls_copy()); PyTuple::new( py, Vec::<PyObject>::from([tok.into_pyobject(py)?.into(), id.into_pyobject(py)?.into()]), ) } #[setter] fn set_cls(self_: PyRef<Self>, cls: Bound<'_, PyTuple>) -> PyResult<()> { let cls = cls.extract()?; setter!(self_, Bert, cls, cls); Ok(()) } } /// This post-processor takes care of adding the special tokens needed by /// a Roberta model: /// /// - a SEP token /// - a CLS token /// /// It also takes care of trimming the offsets. /// By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't /// want the offsets to include these whitespaces, then this PostProcessor should be initialized /// with :obj:`trim_offsets=True` /// /// Args: /// sep (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the SEP token, and its id /// /// cls (:obj:`Tuple[str, int]`): /// A tuple with the string representation of the CLS token, and its id /// /// trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to trim the whitespaces from the produced offsets. /// /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether the add_prefix_space option was enabled during pre-tokenization. This /// is relevant because it defines the way the offsets are trimmed out. #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "RobertaProcessing")] pub struct PyRobertaProcessing {} #[pymethods] impl PyRobertaProcessing { #[new] #[pyo3(signature = (sep, cls, trim_offsets = true, add_prefix_space = true), text_signature = "(self, sep, cls, trim_offsets=True, add_prefix_space=True)")] fn new( sep: (String, u32), cls: (String, u32), trim_offsets: bool, add_prefix_space: bool, ) -> (Self, PyPostProcessor) { let proc = RobertaProcessing::new(sep, cls) .trim_offsets(trim_offsets) .add_prefix_space(add_prefix_space); (PyRobertaProcessing {}, proc.into()) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> PyResult<Bound<'p, PyTuple>> { PyTuple::new(py, [("", 0), ("", 0)]) } #[getter] fn get_sep(self_: PyRef<Self>) -> Result<Bound<'_, PyTuple>, PyErr> { let py = self_.py(); let (tok, id) = getter!(self_, Roberta, get_sep_copy()); PyTuple::new( py, Vec::<PyObject>::from([tok.into_pyobject(py)?.into(), id.into_pyobject(py)?.into()]), ) } #[setter] fn set_sep(self_: PyRef<Self>, sep: Bound<'_, PyTuple>) -> PyResult<()> { let sep = sep.extract()?; setter!(self_, Roberta, sep, sep); Ok(()) } #[getter] fn get_cls(self_: PyRef<Self>) -> Result<Bound<'_, PyTuple>, PyErr> { let py = self_.py(); let (tok, id) = getter!(self_, Roberta, get_cls_copy()); PyTuple::new( py, Vec::<PyObject>::from([tok.into_pyobject(py)?.into(), id.into_pyobject(py)?.into()]), ) } #[setter] fn set_cls(self_: PyRef<Self>, cls: Bound<'_, PyTuple>) -> PyResult<()> { let cls = cls.extract()?; setter!(self_, Roberta, cls, cls); Ok(()) } #[getter] fn get_trim_offsets(self_: PyRef<Self>) -> bool { getter!(self_, Roberta, trim_offsets) } #[setter] fn set_trim_offsets(self_: PyRef<Self>, trim_offsets: bool) { setter!(self_, Roberta, trim_offsets, trim_offsets) } #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, Roberta, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, Roberta, add_prefix_space, add_prefix_space) } } /// This post-processor takes care of trimming the offsets. /// /// By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't /// want the offsets to include these whitespaces, then this PostProcessor must be used. /// /// Args: /// trim_offsets (:obj:`bool`): /// Whether to trim the whitespaces from the produced offsets. #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "ByteLevel")] pub struct PyByteLevel {} #[pymethods] impl PyByteLevel { #[new] #[pyo3(signature = (add_prefix_space = None, trim_offsets = None, use_regex = None, **_kwargs), text_signature = "(self, trim_offsets=True)")] fn new( add_prefix_space: Option<bool>, trim_offsets: Option<bool>, use_regex: Option<bool>, _kwargs: Option<&Bound<'_, PyDict>>, ) -> (Self, PyPostProcessor) { let mut byte_level = ByteLevel::default(); if let Some(aps) = add_prefix_space { byte_level = byte_level.add_prefix_space(aps); } if let Some(to) = trim_offsets { byte_level = byte_level.trim_offsets(to); } if let Some(ur) = use_regex { byte_level = byte_level.use_regex(ur); } (PyByteLevel {}, byte_level.into()) } #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, ByteLevel, add_prefix_space, add_prefix_space) } #[getter] fn get_trim_offsets(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, trim_offsets) } #[setter] fn set_trim_offsets(self_: PyRef<Self>, trim_offsets: bool) { setter!(self_, ByteLevel, trim_offsets, trim_offsets) } #[getter] fn get_use_regex(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, use_regex) } #[setter] fn set_use_regex(self_: PyRef<Self>, use_regex: bool) { setter!(self_, ByteLevel, use_regex, use_regex) } } #[derive(Clone, Debug)] pub struct PySpecialToken(SpecialToken); impl From<PySpecialToken> for SpecialToken { fn from(v: PySpecialToken) -> Self { v.0 } } impl FromPyObject<'_> for PySpecialToken { fn extract_bound(ob: &Bound<'_, PyAny>) -> PyResult<Self> { if let Ok(v) = ob.extract::<(String, u32)>() { Ok(Self(v.into())) } else if let Ok(v) = ob.extract::<(u32, String)>() { Ok(Self(v.into())) } else if let Ok(d) = ob.downcast::<PyDict>() { let id = d .get_item("id")? .ok_or_else(|| exceptions::PyValueError::new_err("`id` must be specified"))? .extract::<String>()?; let ids = d .get_item("ids")? .ok_or_else(|| exceptions::PyValueError::new_err("`ids` must be specified"))? .extract::<Vec<u32>>()?; let tokens = d .get_item("tokens")? .ok_or_else(|| exceptions::PyValueError::new_err("`tokens` must be specified"))? .extract::<Vec<String>>()?; Ok(Self( ToPyResult(SpecialToken::new(id, ids, tokens)).into_py()?, )) } else { Err(exceptions::PyTypeError::new_err( "Expected Union[Tuple[str, int], Tuple[int, str], dict]", )) } } } #[derive(Clone, Debug)] pub struct PyTemplate(Template); impl From<PyTemplate> for Template { fn from(v: PyTemplate) -> Self { v.0 } } impl FromPyObject<'_> for PyTemplate { fn extract_bound(ob: &Bound<'_, PyAny>) -> PyResult<Self> { if let Ok(s) = ob.extract::<String>() { Ok(Self( s.try_into().map_err(exceptions::PyValueError::new_err)?, )) } else if let Ok(s) = ob.extract::<Vec<String>>() { Ok(Self( s.try_into().map_err(exceptions::PyValueError::new_err)?, )) } else { Err(exceptions::PyTypeError::new_err( "Expected Union[str, List[str]]", )) } } } /// Provides a way to specify templates in order to add the special tokens to each /// input sequence as relevant. /// /// Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to /// delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first /// sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair /// sequences. The final result looks like this: /// /// - Single sequence: :obj:`[CLS] Hello there [SEP]` /// - Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]` /// /// With the type ids as following:: /// /// [CLS] ... [SEP] ... [SEP] /// 0 0 0 1 1 /// /// You can achieve such behavior using a TemplateProcessing:: /// /// TemplateProcessing( /// single="[CLS] $0 [SEP]", /// pair="[CLS] $A [SEP] $B:1 [SEP]:1", /// special_tokens=[("[CLS]", 1), ("[SEP]", 0)], /// ) /// /// In this example, each input sequence is identified using a ``$`` construct. This identifier /// lets us specify each input sequence, and the type_id to use. When nothing is specified, /// it uses the default values. Here are the different ways to specify it: /// /// - Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B`` /// - Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ... /// - Specifying both: ``$A:0``, ``$B:1``, ... /// /// The same construct is used for special tokens: ``<identifier>(:<type_id>)?``. /// /// **Warning**: You must ensure that you are giving the correct tokens/ids as these /// will be added to the Encoding without any further check. If the given ids correspond /// to something totally different in a `Tokenizer` using this `PostProcessor`, it /// might lead to unexpected results. /// /// Args: /// single (:obj:`Template`): /// The template used for single sequences /// /// pair (:obj:`Template`): /// The template used when both sequences are specified /// /// special_tokens (:obj:`Tokens`): /// The list of special tokens used in each sequences /// /// Types: /// /// Template (:obj:`str` or :obj:`List`): /// - If a :obj:`str` is provided, the whitespace is used as delimiter between tokens /// - If a :obj:`List[str]` is provided, a list of tokens /// /// Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`): /// - A :obj:`Tuple` with both a token and its associated ID, in any order /// - A :obj:`dict` with the following keys: /// - "id": :obj:`str` => The special token id, as specified in the Template /// - "ids": :obj:`List[int]` => The associated IDs /// - "tokens": :obj:`List[str]` => The associated tokens /// /// The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have /// the same length. #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "TemplateProcessing")] pub struct PyTemplateProcessing {} #[pymethods] impl PyTemplateProcessing { #[new] #[pyo3(signature = (single = None, pair = None, special_tokens = None), text_signature = "(self, single, pair, special_tokens)")] fn new( single: Option<PyTemplate>, pair: Option<PyTemplate>, special_tokens: Option<Vec<PySpecialToken>>, ) -> PyResult<(Self, PyPostProcessor)> { let mut builder = tk::processors::template::TemplateProcessing::builder(); if let Some(seq) = single { builder.single(seq.into()); } if let Some(seq) = pair { builder.pair(seq.into()); } if let Some(sp) = special_tokens { builder.special_tokens(sp); } let processor = builder .build() .map_err(|e| exceptions::PyValueError::new_err(e.to_string()))?; Ok((PyTemplateProcessing {}, processor.into())) } #[getter] fn get_single(self_: PyRef<Self>) -> String { getter!(self_, Template, get_single()) } #[setter] fn set_single(self_: PyRef<Self>, single: PyTemplate) -> PyResult<()> { let template: Template = Template::from(single); let super_ = self_.as_ref(); if let PyPostProcessorTypeWrapper::Single(ref inner) = super_.processor { if let PostProcessorWrapper::Template(ref mut post) = *inner .write() .map_err(|_| PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor"))? { post.set_single(template); } } Ok(()) } } /// Sequence Processor /// /// Args: /// processors (:obj:`List[PostProcessor]`) /// The processors that need to be chained #[pyclass(extends=PyPostProcessor, module = "tokenizers.processors", name = "Sequence")] pub struct PySequence {} #[pymethods] impl PySequence { #[new] #[pyo3(signature = (processors_py), text_signature = "(self, processors)")] fn new(processors_py: &Bound<'_, PyList>) -> PyResult<(Self, PyPostProcessor)> { let mut processors = Vec::with_capacity(processors_py.len()); for n in processors_py.iter() { let processor: PyRef<PyPostProcessor> = n.extract()?; match &processor.processor { PyPostProcessorTypeWrapper::Sequence(inner) => { processors.extend(inner.iter().cloned()) } PyPostProcessorTypeWrapper::Single(inner) => processors.push(inner.clone()), } } Ok(( PySequence {}, PyPostProcessor::new(PyPostProcessorTypeWrapper::Sequence(processors)), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> PyResult<Bound<'p, PyTuple>> { PyTuple::new(py, [PyList::empty(py)]) } fn __getitem__(self_: PyRef<'_, Self>, py: Python<'_>, index: usize) -> PyResult<Py<PyAny>> { match &self_.as_ref().processor { PyPostProcessorTypeWrapper::Sequence(ref inner) => match inner.get(index) { Some(item) => { PyPostProcessor::new(PyPostProcessorTypeWrapper::Single(item.clone())) .get_as_subtype(py) } _ => Err(PyErr::new::<pyo3::exceptions::PyIndexError, _>( "Index not found", )), }, _ => Err(PyErr::new::<pyo3::exceptions::PyIndexError, _>( "This processor is not a Sequence, it does not support __getitem__", )), } } fn __setitem__(self_: PyRef<'_, Self>, index: usize, value: Bound<'_, PyAny>) -> PyResult<()> { let processor: PyPostProcessor = value.extract()?; let PyPostProcessorTypeWrapper::Single(processor) = processor.processor else { return Err(PyException::new_err("processor should not be a sequence")); }; match &self_.as_ref().processor { PyPostProcessorTypeWrapper::Sequence(inner) => match inner.get(index) { Some(item) => { *item .write() .map_err(|_| PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor"))? = processor .read() .map_err(|_| PyException::new_err("RwLock synchronisation primitive is poisoned, cannot get subtype of PyPostProcessor"))? .clone(); } _ => { return Err(PyErr::new::<pyo3::exceptions::PyIndexError, _>( "Index not found", )) } }, _ => { return Err(PyException::new_err( "This processor is not a Sequence, it does not support __setitem__", )) } }; Ok(()) } } /// Processors Module #[pymodule] pub fn processors(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::<PyPostProcessor>()?; m.add_class::<PyBertProcessing>()?; m.add_class::<PyRobertaProcessing>()?; m.add_class::<PyByteLevel>()?; m.add_class::<PyTemplateProcessing>()?; m.add_class::<PySequence>()?; Ok(()) } #[cfg(test)] mod test { use std::sync::{Arc, RwLock}; use pyo3::prelude::*; use tk::processors::bert::BertProcessing; use tk::processors::PostProcessorWrapper; use crate::processors::{PyPostProcessor, PyPostProcessorTypeWrapper}; #[test] fn get_subtype() { Python::with_gil(|py| { let py_proc = PyPostProcessor::new(PyPostProcessorTypeWrapper::Single(Arc::new( RwLock::new(BertProcessing::new(("SEP".into(), 0), ("CLS".into(), 1)).into()), ))); let py_bert = py_proc.get_as_subtype(py).unwrap(); assert_eq!( "BertProcessing", py_bert.bind(py).get_type().qualname().unwrap() ); }) } #[test] fn serialize() { let rs_processing = BertProcessing::new(("SEP".into(), 0), ("CLS".into(), 1)); let rs_wrapper: PostProcessorWrapper = rs_processing.clone().into(); let rs_processing_ser = serde_json::to_string(&rs_processing).unwrap(); let rs_wrapper_ser = serde_json::to_string(&rs_wrapper).unwrap(); let py_processing = PyPostProcessor::new(PyPostProcessorTypeWrapper::Single(Arc::new( RwLock::new(rs_wrapper), ))); let py_ser = serde_json::to_string(&py_processing).unwrap(); assert_eq!(py_ser, rs_processing_ser); assert_eq!(py_ser, rs_wrapper_ser); let py_processing: PyPostProcessor = serde_json::from_str(&rs_processing_ser).unwrap(); match py_processing.processor { PyPostProcessorTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() { PostProcessorWrapper::Bert(_) => (), _ => panic!("Expected Bert postprocessor."), }, _ => panic!("Expected a single processor, got a sequence"), } let py_processing: PyPostProcessor = serde_json::from_str(&rs_wrapper_ser).unwrap(); match py_processing.processor { PyPostProcessorTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() { PostProcessorWrapper::Bert(_) => (), _ => panic!("Expected Bert postprocessor."), }, _ => panic!("Expected a single processor, got a sequence"), }; } }
tokenizers/bindings/python/src/processors.rs/0
{ "file_path": "tokenizers/bindings/python/src/processors.rs", "repo_id": "tokenizers", "token_count": 14503 }
342
import pickle import pytest from tokenizers.models import BPE, Model, WordLevel, WordPiece from ..utils import bert_files, data_dir, roberta_files class TestBPE: def test_instantiate(self, roberta_files): assert isinstance(BPE(), Model) assert isinstance(BPE(), BPE) vocab = {"a": 0, "b": 1, "ab": 2} merges = [("a", "b")] assert isinstance(BPE(vocab, merges), Model) assert isinstance(BPE.from_file(roberta_files["vocab"], roberta_files["merges"]), BPE) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(vocab=vocab) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(merges=merges) assert isinstance( pickle.loads(pickle.dumps(BPE(vocab, merges))), BPE, ) # Deprecated calls in 0.9 with pytest.deprecated_call(): assert isinstance(BPE(roberta_files["vocab"], roberta_files["merges"]), Model) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(vocab=roberta_files["vocab"]) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(merges=roberta_files["merges"]) with pytest.deprecated_call(): assert isinstance( pickle.loads(pickle.dumps(BPE(roberta_files["vocab"], roberta_files["merges"]))), BPE, ) def test_can_modify(self): model = BPE( dropout=0.5, unk_token="[UNK]", continuing_subword_prefix="__prefix__", end_of_word_suffix="__suffix__", fuse_unk=False, ) assert model.dropout == 0.5 assert model.unk_token == "[UNK]" assert model.continuing_subword_prefix == "__prefix__" assert model.end_of_word_suffix == "__suffix__" assert model.fuse_unk == False assert model.byte_fallback == False # Modify these model.dropout = 0.1 assert pytest.approx(model.dropout) == 0.1 model.unk_token = "<unk>" assert model.unk_token == "<unk>" model.continuing_subword_prefix = None assert model.continuing_subword_prefix == None model.end_of_word_suffix = "suff" assert model.end_of_word_suffix == "suff" model.fuse_unk = True assert model.fuse_unk == True model.byte_fallback = True assert model.byte_fallback == True def test_dropout_zero(self): model = BPE(dropout=0.0) assert model.dropout == 0.0 class TestWordPiece: def test_instantiate(self, bert_files): assert isinstance(WordPiece(), Model) assert isinstance(WordPiece(), WordPiece) vocab = {"a": 0, "b": 1, "ab": 2} assert isinstance(WordPiece(vocab), Model) assert isinstance(WordPiece(vocab), WordPiece) assert isinstance(WordPiece.from_file(bert_files["vocab"]), WordPiece) assert isinstance(pickle.loads(pickle.dumps(WordPiece(vocab))), WordPiece) # Deprecated calls in 0.9 with pytest.deprecated_call(): assert isinstance(WordPiece(bert_files["vocab"]), Model) with pytest.deprecated_call(): assert isinstance(pickle.loads(pickle.dumps(WordPiece(bert_files["vocab"]))), WordPiece) def test_can_modify(self): model = WordPiece( unk_token="<oov>", continuing_subword_prefix="__prefix__", max_input_chars_per_word=200, ) assert model.unk_token == "<oov>" assert model.continuing_subword_prefix == "__prefix__" assert model.max_input_chars_per_word == 200 # Modify these model.unk_token = "<unk>" assert model.unk_token == "<unk>" model.continuing_subword_prefix = "$$$" assert model.continuing_subword_prefix == "$$$" model.max_input_chars_per_word = 10 assert model.max_input_chars_per_word == 10 class TestWordLevel: def test_instantiate(self, roberta_files): assert isinstance(WordLevel(), Model) assert isinstance(WordLevel(), WordLevel) vocab = {"a": 0, "b": 1, "ab": 2} assert isinstance(WordLevel(vocab), Model) assert isinstance(WordLevel(vocab), WordLevel) assert isinstance(WordLevel.from_file(roberta_files["vocab"]), WordLevel) # The WordLevel model expects a vocab.json using the same format as roberta # so we can just try to load with this file with pytest.deprecated_call(): assert isinstance(WordLevel(roberta_files["vocab"]), Model) with pytest.deprecated_call(): assert isinstance(WordLevel(roberta_files["vocab"]), WordLevel) def test_can_modify(self): model = WordLevel(unk_token="<oov>") assert model.unk_token == "<oov>" # Modify these model.unk_token = "<unk>" assert model.unk_token == "<unk>"
tokenizers/bindings/python/tests/bindings/test_models.py/0
{ "file_path": "tokenizers/bindings/python/tests/bindings/test_models.py", "repo_id": "tokenizers", "token_count": 2304 }
343
# Visualizer <tokenizerslangcontent> <python> ## Annotation [[autodoc]] tokenizers.tools.Annotation ## EncodingVisualizer [[autodoc]] tokenizers.tools.EncodingVisualizer - __call__ </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/visualizer.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/visualizer.mdx", "repo_id": "tokenizers", "token_count": 134 }
344
# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [0.13.2] - Python only changes ## [0.13.1] - [#1072] Fixing Roberta type ids. ## [0.13.0] - [#1009] `unstable_wasm` feature to support building on Wasm (it's unstable !) - [#1008] `Decoder` is now a composable trait, but without being backward incompatible - [#1047, #1051, #1052] `Processor` is now a composable trait, but without being backward incompatible Both trait changes warrant a "major" number since, despite best efforts to not break backward compatibility, the code is different enough that we cannot be exactly sure. ## [0.12.1] - [#938] **Reverted breaking change**. https://github.com/huggingface/transformers/issues/16520 ## [0.12.0] YANKED Bump minor version because of a breaking change. - [#938] [REVERTED IN 0.12.1] **Breaking change**. Decoder trait is modified to be composable. This is only breaking if you are using decoders on their own. tokenizers should be error free. - [#939] Making the regex in `ByteLevel` pre_tokenizer optional (necessary for BigScience) - [#952] Fixed the vocabulary size of UnigramTrainer output (to respect added tokens) - [#954] Fixed not being able to save vocabularies with holes in vocab (ConvBert). Yell warnings instead, but stop panicking. - [#961] Added link for Ruby port of `tokenizers` - [#960] Feature gate for `cli` and its `clap` dependency ## [0.11.3] - [#919] Fixing single_word AddedToken. (regression from 0.11.2) - [#916] Deserializing faster `added_tokens` by loading them in batch. ## [0.11.2] - [#884] Fixing bad deserialization following inclusion of a default for Punctuation ## [0.11.1] - [#882] Fixing Punctuation deserialize without argument. - [#868] Fixing missing direction in TruncationParams - [#860] Adding TruncationSide to TruncationParams ## [0.11.0] ### Fixed - [#236]: Fix a bug with offsets being shifted when there are sub-sequences (Usually with special tokens and/or added tokens in the sequence). - [#286]: Fix various crash when training a BPE model - [#309]: Fixed a few bugs related to additional vocabulary/tokens - [#363]: Fix panic from unwrapping `File::open` in `count_words` ### Changed - [#234]: Completely changed the alignment mappings available on `Encoding`. Previous mappings were misleading and only providing offsets. New ones provide methods to easily convert between `char` or `word` (input space) and `token` (output space) - [#236]: `AddedToken` with special options like `rstrip` will keep the matched whitespaces in the textual representation of the token, exposed in `tokens` on the `Encoding`. The ID stays the same as usual. This fixes the offsets for said tokens. - [#236]: Offsets are now converted back to the original referential before we merge the sub-sequences together and then do the post-processing. This also fixes some offsets bugs. - [#236]: ByteLevel PostProcessor now uses the `add_prefix_space` attribute to determine how to trim offsets. - Improved `TruncationError` to handle cases where provided max length is too low. - [#249]: `encode` and `encode_batch` input has been greatly improved, and it now also accept pre-tokenized inputs. - Improved `TruncationError` to handle cases where provided max length is too low. - [#276]: Improve BPE training speeds, by reading files sequentially, but parallelizing the processing of each file - [#280]: Use `onig` for byte-level pre-tokenization to remove all the differences with the original implementation from GPT-2 - [#309]: Improved the management of the additional vocabulary. This introduces an option `normalized`, controlling whether a token should be extracted from the normalized version of the input text. - [#330]: BertNormalizer now keeps the same behavior than the original implementation when `strip_accents` is not specified. - [#355]: Tokenizer does not use any dynamic dispatch anymore. - [#377]: Use byte offsets everywhere (instead of the char offsets) ### Added - [#236]: RobertaProcessing is now also taking care of trimming offsets, and works just as ByteLevel on this front. - [#272]: Serialization of the `Tokenizer` and all the parts (`PreTokenizer`, `Normalizer`, ...) using serde. It is now easy to save/load an entire tokenizer. - [#289]: Ability to pad to a multiple of a specified value. This is especially useful to ensure activation of the Tensor Cores, while ensuring padding to a multiple of 8. - [#298]: Ability to get the currently set truncation/padding params - [#311]: Ability to enable/disable the parallelism using the `TOKENIZERS_PARALLELISM` environment variable. - [#403]: Add `TemplateProcessing` `PostProcessor`. ### How to migrate - Replace any `XXX_to_YYY_offsets()` method call by any of the new ones. - Specify the `add_prefix_space` and `trim_offsets` options on `RobertaProcessing` if you don't want the offsets trimmed out. - Any custom `PostProcessor` now handles offsets relative to the original string (as opposed to the normalized one). ## [0.10.1] ### Fixed - [#226]: Fix the word indexes when there are special tokens ## [0.10.0] ### Changed - [#222]: All Tokenizer's subparts must now be `Send + Sync` ### Added - [#208]: Ability to retrieve the vocabulary from the `Tokenizer` & `Model` ### Fixed - [#205]: Trim the decoded string in `BPEDecoder` - [b770f36]: Fix a bug with added tokens generated IDs ## [0.9.0] ### Changed - Only one progress bar while reading files during training. This is better for use-cases with a high number of files as it avoids having too many progress bars on screen. Also avoids reading the size of each file before starting to actually read these files, as this process could take really long. - [#190]: Improved BPE and WordPiece builders - [#193]: `encode` and `encode_batch` now take a new argument, specifying whether we should add the special tokens - [#197]: The `NormalizedString` has been removed from the `Encoding`. It is now possible to retrieve it by calling `normalize` on the `Tokenizer`. This brings a reduction of 70% of the memory footprint - [#197]: The `NormalizedString` API has been improved. It is now possible to retrieve parts of both strings using both "normalized" or "original" offsets - [#197]: The offsets provided on `Encoding` are now relative to the original string, and not the normalized one anymore - `AddedToken` are now used for both `add_special_tokens` and `add_tokens`. Also, these AddedToken have more options to allow various behaviors. ### Added - [#188]: `impl PostProcessor for ByteLevel`: Handles trimming the offsets if activated. This avoids the unintuitive inclusion of the whitespaces in the produced offsets, even if these whitespaces are part of the actual token - More alignment mappings on the `Encoding`. - `post_process` can be called on the `Tokenizer` ### Fixed - [#193]: Fix some issues with the offsets being wrong with the `ByteLevel` BPE: - when `add_prefix_space` is activated - [#156]: when a Unicode character gets split-up in multiple byte-level characters - Fix a bug where offsets were wrong when there was any added tokens in the sequence being encoded. - [#175]: Fix a bug that prevented the addition of more than a certain amount of tokens (even if not advised, but that's not the question) ### How to migrate - Add the `ByteLevel` `PostProcessor` to your byte-level BPE tokenizers if relevant. ## [0.8.0] ### Changed - [#165]: Big improvements in speed for BPE (Both training and tokenization) ### Fixed - [#163]: Do not open all files directly while training - [#156]: There was a bug in ByteLevel PreTokenizer that caused offsets to be wrong if a char got split up in multiple bytes - [#174]: The `LongestFirst` truncation strategy had a bug [#1072]: https://github.com/huggingface/tokenizers/pull/1072 [#956]: https://github.com/huggingface/tokenizers/pull/956 [#1008]: https://github.com/huggingface/tokenizers/pull/1008 [#1009]: https://github.com/huggingface/tokenizers/pull/1009 [#1047]: https://github.com/huggingface/tokenizers/pull/1047 [#1055]: https://github.com/huggingface/tokenizers/pull/1055 [#1051]: https://github.com/huggingface/tokenizers/pull/1051 [#1052]: https://github.com/huggingface/tokenizers/pull/1052 [#938]: https://github.com/huggingface/tokenizers/pull/938 [#939]: https://github.com/huggingface/tokenizers/pull/939 [#952]: https://github.com/huggingface/tokenizers/pull/952 [#954]: https://github.com/huggingface/tokenizers/pull/954 [#961]: https://github.com/huggingface/tokenizers/pull/961 [#960]: https://github.com/huggingface/tokenizers/pull/960 [#919]: https://github.com/huggingface/tokenizers/pull/919 [#916]: https://github.com/huggingface/tokenizers/pull/916 [#884]: https://github.com/huggingface/tokenizers/pull/884 [#882]: https://github.com/huggingface/tokenizers/pull/882 [#868]: https://github.com/huggingface/tokenizers/pull/868 [#860]: https://github.com/huggingface/tokenizers/pull/860 [#403]: https://github.com/huggingface/tokenizers/pull/403 [#377]: https://github.com/huggingface/tokenizers/pull/377 [#355]: https://github.com/huggingface/tokenizers/pull/355 [#363]: https://github.com/huggingface/tokenizers/pull/363 [#330]: https://github.com/huggingface/tokenizers/pull/330 [#311]: https://github.com/huggingface/tokenizers/pull/311 [#309]: https://github.com/huggingface/tokenizers/pull/309 [#298]: https://github.com/huggingface/tokenizers/pull/298 [#289]: https://github.com/huggingface/tokenizers/pull/289 [#286]: https://github.com/huggingface/tokenizers/pull/286 [#280]: https://github.com/huggingface/tokenizers/pull/280 [#276]: https://github.com/huggingface/tokenizers/pull/276 [#272]: https://github.com/huggingface/tokenizers/pull/272 [#249]: https://github.com/huggingface/tokenizers/pull/249 [b770f36]: https://github.com/huggingface/tokenizers/commit/b770f364280af33efeffea8f0003102cda8cf1b7 [#236]: https://github.com/huggingface/tokenizers/pull/236 [#234]: https://github.com/huggingface/tokenizers/pull/234 [#226]: https://github.com/huggingface/tokenizers/pull/226 [#222]: https://github.com/huggingface/tokenizers/pull/222 [#208]: https://github.com/huggingface/tokenizers/pull/208 [#205]: https://github.com/huggingface/tokenizers/issues/205 [#197]: https://github.com/huggingface/tokenizers/pull/197 [#193]: https://github.com/huggingface/tokenizers/pull/193 [#190]: https://github.com/huggingface/tokenizers/pull/190 [#188]: https://github.com/huggingface/tokenizers/pull/188 [#175]: https://github.com/huggingface/tokenizers/issues/175 [#174]: https://github.com/huggingface/tokenizers/issues/174 [#165]: https://github.com/huggingface/tokenizers/pull/165 [#163]: https://github.com/huggingface/tokenizers/issues/163 [#156]: https://github.com/huggingface/tokenizers/pull/156
tokenizers/tokenizers/CHANGELOG.md/0
{ "file_path": "tokenizers/tokenizers/CHANGELOG.md", "repo_id": "tokenizers", "token_count": 3387 }
345
<div align="center"> <h1><code>wasm-pack-template</code></h1> <strong>A template for kick starting a Rust and WebAssembly project using <a href="https://github.com/rustwasm/wasm-pack">wasm-pack</a>.</strong> <p> <a href="https://travis-ci.org/rustwasm/wasm-pack-template"><img src="https://img.shields.io/travis/rustwasm/wasm-pack-template.svg?style=flat-square" alt="Build Status" /></a> </p> <h3> <a href="https://rustwasm.github.io/docs/wasm-pack/tutorials/npm-browser-packages/index.html">Tutorial</a> <span> | </span> <a href="https://discordapp.com/channels/442252698964721669/443151097398296587">Chat</a> </h3> <sub>Built with 🦀🕸 by <a href="https://rustwasm.github.io/">The Rust and WebAssembly Working Group</a></sub> </div> ## About This is an example project showing off a very basic use case for `wasm` tokenizers usage. [**📚 Read this template tutorial! 📚**][template-docs] This template is designed for compiling Rust libraries into WebAssembly and publishing the resulting package to NPM. Be sure to check out [other `wasm-pack` tutorials online][tutorials] for other templates and usages of `wasm-pack`. [tutorials]: https://rustwasm.github.io/docs/wasm-pack/tutorials/index.html [template-docs]: https://rustwasm.github.io/docs/wasm-pack/tutorials/npm-browser-packages/index.html ## 🚴 Usage ### 🐑 Use `cargo generate` to Clone this Template [Learn more about `cargo generate` here.](https://github.com/ashleygwilliams/cargo-generate) ``` cargo generate --git https://github.com/rustwasm/wasm-pack-template.git --name my-project cd my-project ``` ### 🛠️ Build with `wasm-pack build` ``` wasm-pack build ``` ### 🔬 Test in Headless Browsers with `wasm-pack test` ``` wasm-pack test --headless --firefox ``` ### 🎁 Publish to NPM with `wasm-pack publish` ``` wasm-pack publish ``` ## 🔋 Batteries Included * [`wasm-bindgen`](https://github.com/rustwasm/wasm-bindgen) for communicating between WebAssembly and JavaScript. * [`console_error_panic_hook`](https://github.com/rustwasm/console_error_panic_hook) for logging panic messages to the developer console. * [`wee_alloc`](https://github.com/rustwasm/wee_alloc), an allocator optimized for small code size.
tokenizers/tokenizers/examples/unstable_wasm/README.md/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/README.md", "repo_id": "tokenizers", "token_count": 811 }
346
stable
tokenizers/tokenizers/rust-toolchain/0
{ "file_path": "tokenizers/tokenizers/rust-toolchain", "repo_id": "tokenizers", "token_count": 2 }
347
use dary_heap::QuaternaryHeap; use rand::distr::weighted::WeightedIndex; use rand::{prelude::*, rng}; use std::cell::RefCell; use std::cmp::{min, Ordering}; use std::rc::Rc; type NodeRef = Rc<RefCell<Node>>; type HypothesisRef = Rc<RefCell<Hypothesis>>; type Agenda = QuaternaryHeap<Hypothesis>; struct Hypothesis { node_ref: NodeRef, next: Option<HypothesisRef>, fx: f64, gx: f64, } impl Hypothesis { pub fn new(node_ref: NodeRef, next: Option<HypothesisRef>, fx: f64, gx: f64) -> Self { Self { node_ref, next, fx, gx, } } } impl PartialEq for Hypothesis { fn eq(&self, other: &Self) -> bool { self.fx == other.fx } } impl Eq for Hypothesis {} impl PartialOrd for Hypothesis { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } // TODO Maybe use Ordered Floats (https://docs.rs/ordered-float/1.0.2/ordered_float/) impl Ord for Hypothesis { fn cmp(&self, other: &Self) -> Ordering { if self.fx < other.fx { Ordering::Less } else { Ordering::Greater } } } /// Structure to implement Viterbi algorithm to find the best encoding, or sample /// from all possible encodings of a given sentence. #[derive(Debug)] pub struct Lattice<'a> { pub(super) sentence: &'a str, len: usize, nodes: Vec<NodeRef>, pub(super) begin_nodes: Vec<Vec<NodeRef>>, pub(super) end_nodes: Vec<Vec<NodeRef>>, _bos_id: usize, _eos_id: usize, } impl std::fmt::Display for Lattice<'_> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let display_pieces = |nodes: &Vec<Vec<NodeRef>>| { nodes .iter() .map(|l| { l.iter() .map(|n| self.piece(&n.borrow())) .collect::<Vec<_>>() }) .collect::<Vec<_>>() }; f.debug_struct("Lattice") .field("sentence", &self.sentence) .field("begin_nodes", &display_pieces(&self.begin_nodes)) .field("end_nodes", &display_pieces(&self.end_nodes)) .finish() } } /// A node from the lattice, that helps reconstruct the underlying `String` #[derive(Debug, Clone)] pub struct Node { // Vocabulary id pub(super) id: usize, // Local lattice identifier pub(super) node_id: usize, pos: usize, length: usize, prev: Option<NodeRef>, backtrace_score: f64, score: f64, } impl PartialEq for Node { fn eq(&self, other: &Node) -> bool { self.id == other.id } } impl Node { pub fn new(id: usize, node_id: usize, pos: usize, length: usize, score: f64) -> Self { Self { id, node_id, pos, length, prev: None, score, backtrace_score: 0.0, } } } /// Returns log(exp(x) + exp(y)). /// if init_mode is true, returns log(exp(y)) == y. /// log(\sum_i exp(a[i])) can be computed as /// for (int i = 0; i < a.size(); ++i) /// x = LogSumExp(x, a[i], i == 0); fn log_sum_exp(x: f64, y: f64, init_mode: bool) -> f64 { if init_mode { y } else { let (vmin, vmax) = if x > y { (y, x) } else { (x, y) }; let k_minus_log_epsilon = 50.0; if vmax > vmin + k_minus_log_epsilon { vmax } else { vmax + ((vmin - vmax).exp() + 1.0).ln() } } } impl<'a> Lattice<'a> { pub fn from(sentence: &'a str, bos_id: usize, eos_id: usize) -> Self { let len = sentence.len(); let k_reserved_node_size = 16; // We are adding 2 tokens, bos and eos let mut nodes: Vec<NodeRef> = Vec::with_capacity(k_reserved_node_size); let mut begin_nodes = vec![Vec::with_capacity(k_reserved_node_size); len + 1]; let mut end_nodes = vec![Vec::with_capacity(k_reserved_node_size); len + 1]; let bos = Rc::new(RefCell::new(Node::new(bos_id, 0, 0, 0, 0.0))); let eos = Rc::new(RefCell::new(Node::new(eos_id, 1, len, 0, 0.0))); begin_nodes[len].push(Rc::clone(&eos)); end_nodes[0].push(Rc::clone(&bos)); nodes.push(bos); nodes.push(eos); Self { sentence, len, nodes, begin_nodes, end_nodes, _bos_id: bos_id, _eos_id: eos_id, } } pub fn insert(&mut self, pos: usize, length: usize, score: f64, id: usize) { let node_id = self.nodes.len(); let node = Rc::new(RefCell::new(Node::new(id, node_id, pos, length, score))); self.begin_nodes[pos].push(Rc::clone(&node)); self.end_nodes[pos + length].push(Rc::clone(&node)); self.nodes.push(node); } pub fn viterbi(&mut self) -> Vec<NodeRef> { let len = self.len; let mut pos = 0; while pos <= len { if self.begin_nodes[pos].is_empty() { return vec![]; } for rnode in &self.begin_nodes[pos] { rnode.borrow_mut().prev = None; let mut best_score = 0.0; let mut best_node: Option<NodeRef> = None; for lnode in &self.end_nodes[pos] { let score = lnode.borrow().backtrace_score + rnode.borrow().score; if best_node.is_none() || score > best_score { // TODO can we remove this clone ? best_node = Some(lnode.clone()); best_score = score } } match best_node { Some(bnode) => { rnode.borrow_mut().prev = Some(Rc::clone(&bnode)); rnode.borrow_mut().backtrace_score = best_score; } None => return vec![], } } if let Some(c) = self.sentence[pos..].chars().next() { pos += c.len_utf8(); } else { break; } } let mut results: Vec<NodeRef> = vec![]; let root = self.begin_nodes[len][0].borrow(); let prev = root.prev.as_ref(); if prev.is_none() { return vec![]; } let mut node: NodeRef = prev.unwrap().clone(); while node.borrow().prev.is_some() { results.push(node.clone()); let n = node.borrow().clone(); node = n.prev.as_ref().unwrap().clone(); } results.reverse(); results } pub fn piece(&self, node: &Node) -> String { self.sentence[node.pos..node.pos + node.length].to_owned() } pub fn tokens(&mut self) -> Vec<String> { self.viterbi() .iter() .map(|node| self.piece(&node.borrow())) .collect() } pub fn nbest(&mut self, n: usize) -> Vec<Vec<NodeRef>> { match n { 0 => vec![], 1 => vec![self.viterbi()], _ => { // let k_reserved_hypothesis_size = 512; let mut agenda: Agenda = QuaternaryHeap::new(); let mut hypotheses: Vec<Vec<NodeRef>> = vec![]; let eos = self.eos_node(); let score = eos.borrow().score; let hypo = Hypothesis::new(eos, None, score, score); agenda.push(hypo); // Fill backtrace scores self.viterbi(); while !agenda.is_empty() { let top = Rc::new(RefCell::new(agenda.pop().unwrap())); let node = Rc::clone(&top.borrow().node_ref); if node.borrow().id == self.bos_node().borrow().id { let mut hypothesis = vec![]; let mut next: HypothesisRef = Rc::clone(top.borrow().next.as_ref().unwrap()); while next.borrow().next.is_some() { hypothesis.push(next.borrow().node_ref.clone()); let c: HypothesisRef = next.clone(); // let c: Ref<Hypothesis> = next.clone().borrow(); next = Rc::clone(c.borrow().next.as_ref().unwrap()); } hypotheses.push(hypothesis); if hypotheses.len() == n { return hypotheses; } } else { for lnode in &self.end_nodes[node.borrow().pos] { let top_gx = top.borrow().gx; let fx = lnode.borrow().backtrace_score + top_gx; let gx = lnode.borrow().score + top_gx; let hyp = Hypothesis::new(Rc::clone(lnode), Some(Rc::clone(&top)), fx, gx); agenda.push(hyp); } // When the input is too long or contains duplicated phrases, // `agenda` will get extremely big. Here we avoid this case by // dynamically shrinking the agenda. let k_max_agenda_size = 100_000; let k_min_agenda_size = 512; if agenda.len() > k_max_agenda_size { let mut new_agenda = QuaternaryHeap::new(); let len = min(k_min_agenda_size, n * 10); for _i in 0..len { new_agenda.push(agenda.pop().unwrap()); } agenda = new_agenda; } } } hypotheses } } } pub fn nbest_tokens(&mut self, n: usize) -> Vec<Vec<String>> { self.nbest(n) .iter() .map(|v| v.iter().map(|node| self.piece(&node.borrow())).collect()) .collect() } pub fn len(&self) -> usize { self.len } pub fn is_empty(&self) -> bool { self.len == 0 } pub fn bos_node(&self) -> NodeRef { Rc::clone(&self.end_nodes[0][0]) } pub fn eos_node(&self) -> NodeRef { Rc::clone(&self.begin_nodes[self.len][0]) } pub fn surface(&self, n: usize) -> &str { match self.sentence.char_indices().nth(n) { Some((pos, _)) => &self.sentence[pos..], None => "", } } pub fn sentence(&self) -> &str { self.sentence } pub fn populate_marginal(&self, freq: f64, expected: &mut [f64]) -> f64 { let len = self.len(); let n_nodes = self.nodes.len(); let mut alpha = vec![0.0; n_nodes]; let mut beta = vec![0.0; n_nodes]; for pos in 0..=len { for rnode in &self.begin_nodes[pos] { for lnode in &self.end_nodes[pos] { let lid = lnode.borrow().node_id; let rid = rnode.borrow().node_id; alpha[rid] = log_sum_exp( alpha[rid], lnode.borrow().score + alpha[lid], *lnode == self.end_nodes[pos][0], ); } } } for pos in (0..=len).rev() { // let rpos = len - pos; for lnode in &self.end_nodes[pos] { for rnode in &self.begin_nodes[pos] { let lid = lnode.borrow().node_id; let rid = rnode.borrow().node_id; beta[lid] = log_sum_exp( beta[lid], rnode.borrow().score + beta[rid], *rnode == self.begin_nodes[pos][0], ); } } } let eos_id = self.begin_nodes[len][0].borrow().node_id; let z = alpha[eos_id]; for pos in 0..len { for node in &self.begin_nodes[pos] { let node_id = node.borrow().node_id; let id = node.borrow().id; let a = alpha[node_id]; let b = beta[node_id]; let total = a + node.borrow().score + b - z; let update = freq * total.exp(); expected[id] += update; } } freq * z } pub fn sample(&self, theta: f64) -> Vec<NodeRef> { let len = self.len(); if len == 0 { return vec![]; } let mut alpha = vec![0.0; self.nodes.len()]; for pos in 0..=len { for rnode in &self.begin_nodes[pos] { for lnode in &self.end_nodes[pos] { let lid = lnode.borrow().node_id; let rid = rnode.borrow().node_id; alpha[rid] = log_sum_exp( alpha[rid], theta * (lnode.borrow().score + alpha[lid]), *lnode == self.end_nodes[pos][0], ); } } } let mut rng = rng(); let mut results: Vec<NodeRef> = vec![]; let mut probs: Vec<f64> = vec![]; let mut z = alpha[self.eos_node().borrow().node_id]; let mut node = self.eos_node(); loop { probs.clear(); let pos = node.borrow().pos; for lnode in &self.end_nodes[pos] { let lid = lnode.borrow().node_id; probs.push((alpha[lid] + theta * lnode.borrow().score - z).exp()) } let dist = WeightedIndex::new(&probs).unwrap(); let index = dist.sample(&mut rng); node = Rc::clone(&self.end_nodes[pos][index]); if node == self.bos_node() { break; } z = alpha[node.borrow().node_id]; results.push(Rc::clone(&node)); } results.reverse(); results } pub fn sample_token(&self, theta: f64) -> Vec<String> { self.sample(theta) .iter() .map(|node| self.piece(&node.borrow())) .collect() } } #[cfg(test)] mod tests { use super::*; use assert_approx_eq::assert_approx_eq; #[test] fn set_sentence() { let lattice = Lattice::from("", 1, 2); assert_eq!(lattice.len(), 0); let lattice = Lattice::from("", 1, 2); assert_eq!(lattice.len(), 0); assert_eq!(lattice.sentence(), ""); assert_eq!(lattice.surface(0), ""); let lattice = Lattice::from("test", 1, 2); assert_eq!(lattice.len(), 4); assert_eq!(lattice.sentence(), "test"); assert_eq!(lattice.surface(0), "test"); assert_eq!(lattice.surface(1), "est"); assert_eq!(lattice.surface(2), "st"); assert_eq!(lattice.surface(3), "t"); let bos = lattice.bos_node(); let eos = lattice.eos_node(); assert_eq!(bos.borrow().id, 1); assert_eq!(eos.borrow().id, 2); assert_eq!( lattice.end_nodes[0].first().unwrap().borrow().id, bos.borrow().id ); assert_eq!( lattice.begin_nodes[4].first().unwrap().borrow().id, eos.borrow().id ); let lattice = Lattice::from("テストab", 1, 2); assert_eq!(lattice.len(), 11); assert_eq!(lattice.sentence(), "テストab"); assert_eq!(lattice.surface(0), "テストab"); assert_eq!(lattice.surface(1), "ストab"); assert_eq!(lattice.surface(2), "トab"); assert_eq!(lattice.surface(3), "ab"); assert_eq!(lattice.surface(4), "b"); } #[test] fn insert_test() { let mut lattice = Lattice::from("ABあい", 1, 2); lattice.insert(0, 1, 0.0, 3); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 3, 0.0, 5); lattice.insert(5, 3, 0.0, 6); lattice.insert(0, 2, 0.0, 7); lattice.insert(1, 4, 0.0, 8); lattice.insert(2, 6, 0.0, 9); // 0 & 1 are bos and eos let node0 = lattice.nodes[2].borrow(); let node1 = lattice.nodes[3].borrow(); let node2 = lattice.nodes[4].borrow(); let node3 = lattice.nodes[5].borrow(); let node4 = lattice.nodes[6].borrow(); let node5 = lattice.nodes[7].borrow(); let node6 = lattice.nodes[8].borrow(); assert_eq!(lattice.piece(&node0), "A"); assert_eq!(lattice.piece(&node1), "B"); assert_eq!(lattice.piece(&node2), "あ"); assert_eq!(lattice.piece(&node3), "い"); assert_eq!(lattice.piece(&node4), "AB"); assert_eq!(lattice.piece(&node5), "Bあ"); assert_eq!(lattice.piece(&node6), "あい"); assert_eq!(node0.pos, 0); assert_eq!(node1.pos, 1); assert_eq!(node2.pos, 2); assert_eq!(node3.pos, 5); assert_eq!(node4.pos, 0); assert_eq!(node5.pos, 1); assert_eq!(node6.pos, 2); assert_eq!(node0.length, 1); assert_eq!(node1.length, 1); assert_eq!(node2.length, 3); assert_eq!(node3.length, 3); assert_eq!(node4.length, 2); assert_eq!(node5.length, 4); assert_eq!(node6.length, 6); assert_eq!(lattice.bos_node().borrow().id, 1); assert_eq!(lattice.eos_node().borrow().id, 2); assert_eq!(node0.id, 3); assert_eq!(node1.id, 4); assert_eq!(node2.id, 5); assert_eq!(node3.id, 6); assert_eq!(node4.id, 7); assert_eq!(node5.id, 8); assert_eq!(node6.id, 9); assert_eq!(lattice.begin_nodes[0].len(), 2); assert_eq!(lattice.begin_nodes[1].len(), 2); assert_eq!(lattice.begin_nodes[2].len(), 2); assert_eq!(lattice.begin_nodes[5].len(), 1); assert_eq!(lattice.begin_nodes[8].len(), 1); assert_eq!(lattice.end_nodes[0].len(), 1); assert_eq!(lattice.end_nodes[1].len(), 1); assert_eq!(lattice.end_nodes[2].len(), 2); assert_eq!(lattice.end_nodes[5].len(), 2); assert_eq!(lattice.end_nodes[8].len(), 2); assert_eq!(lattice.begin_nodes[0][0].borrow().id, node0.id); assert_eq!(lattice.begin_nodes[0][1].borrow().id, node4.id); assert_eq!(lattice.begin_nodes[1][0].borrow().id, node1.id); assert_eq!(lattice.begin_nodes[1][1].borrow().id, node5.id); assert_eq!(lattice.begin_nodes[2][0].borrow().id, node2.id); assert_eq!(lattice.begin_nodes[2][1].borrow().id, node6.id); assert_eq!(lattice.begin_nodes[5][0].borrow().id, node3.id); assert_eq!( lattice.eos_node().borrow().id, lattice.begin_nodes[8][0].borrow().id ); assert_eq!( lattice.bos_node().borrow().id, lattice.end_nodes[0][0].borrow().id ); assert_eq!(node0.id, lattice.end_nodes[1][0].borrow().id); assert_eq!(node1.id, lattice.end_nodes[2][0].borrow().id); assert_eq!(node4.id, lattice.end_nodes[2][1].borrow().id); assert_eq!(node2.id, lattice.end_nodes[5][0].borrow().id); assert_eq!(node5.id, lattice.end_nodes[5][1].borrow().id); assert_eq!(node3.id, lattice.end_nodes[8][0].borrow().id); assert_eq!(node6.id, lattice.end_nodes[8][1].borrow().id); } #[test] fn test_viterbi() { let mut lattice = Lattice::from("ABC", 1, 2); assert_eq!(lattice.viterbi(), vec![]); // Still incomplete lattice.insert(0, 1, 0.0, 3); assert_eq!(lattice.viterbi(), vec![]); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 1, 0.0, 5); // XXX: In sentence piece this is not tested, still incomplete ? assert_eq!(lattice.viterbi().len(), 3); } #[test] fn test_viterbi2() { let mut lattice = Lattice::from("ABC", 1, 2); lattice.insert(0, 1, 0.0, 3); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 1, 0.0, 5); assert_eq!(lattice.tokens(), ["A", "B", "C"]); lattice.insert(0, 2, 2.0, 6); assert_eq!(lattice.tokens(), ["AB", "C"]); lattice.insert(1, 2, 5.0, 7); assert_eq!(lattice.tokens(), ["A", "BC"]); lattice.insert(0, 3, 10.0, 8); assert_eq!(lattice.tokens(), ["ABC"]); } #[test] fn test_nbest() { let mut lattice = Lattice::from("ABC", 1, 2); lattice.insert(0, 1, 0.0, 3); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 1, 0.0, 5); lattice.insert(0, 2, 2.0, 6); lattice.insert(1, 2, 5.0, 7); lattice.insert(0, 3, 10.0, 8); let nbests = lattice.nbest_tokens(10); assert_eq!( nbests, vec![ vec!["ABC"], vec!["A", "BC"], vec!["AB", "C"], vec!["A", "B", "C"] ] ); assert!(lattice.nbest_tokens(0).is_empty()); assert_eq!(lattice.nbest_tokens(1), vec![vec!["ABC"]]); } #[test] fn test_log_sum_exp() { let mut x = 0.0; let v: Vec<f64> = vec![1.0, 2.0, 3.0]; for (i, y) in v.iter().enumerate() { x = log_sum_exp(x, *y, i == 0); } assert_approx_eq!(x, v.iter().map(|n| n.exp()).sum::<f64>().ln(), 0.001); } #[test] fn test_populate() { let mut lattice = Lattice::from("ABC", 1, 2); lattice.insert(0, 1, 1.0, 3); // A lattice.insert(1, 1, 1.2, 4); // B lattice.insert(2, 1, 2.5, 5); // C lattice.insert(0, 2, 3.0, 6); // AB lattice.insert(1, 2, 4.0, 7); // BC lattice.insert(0, 3, 2.0, 8); // ABC let mut probs = vec![0.0; 9]; let p1 = (1.0_f64 + 1.2 + 2.5).exp(); let p2 = (3.0_f64 + 2.5).exp(); let p3 = (1.0_f64 + 4.0).exp(); let p4 = 2.0_f64.exp(); let z = p1 + p2 + p3 + p4; let log_z = lattice.populate_marginal(1.0, &mut probs); assert_approx_eq!(log_z, z.ln(), 0.001); assert_approx_eq!(probs[0], 0.0, 0.001); assert_approx_eq!(probs[1], 0.0, 0.001); assert_approx_eq!(probs[2], 0.0, 0.001); assert_approx_eq!(probs[3], (p1 + p3) / z, 0.001); assert_approx_eq!(probs[4], (p1) / z, 0.001); assert_approx_eq!(probs[5], (p1 + p2) / z, 0.001); assert_approx_eq!(probs[6], (p2) / z, 0.001); assert_approx_eq!(probs[7], (p3) / z, 0.001); assert_approx_eq!(probs[8], (p4) / z, 0.001); } }
tokenizers/tokenizers/src/models/unigram/lattice.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/unigram/lattice.rs", "repo_id": "tokenizers", "token_count": 12693 }
348
use crate::tokenizer::{NormalizedString, Normalizer, Result}; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(tag = "type")] pub struct Prepend { pub prepend: String, } impl Prepend { pub fn new(prepend: String) -> Self { Self { prepend } } } impl Normalizer for Prepend { /// Strip the normalized string inplace fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { if !normalized.is_empty() { normalized.prepend(&self.prepend); } Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_prepend() { let original = "Hello"; let normalized = "▁Hello"; assert_ne!(original, normalized); let mut n = NormalizedString::from(original); let prepend = Prepend::new("▁".to_string()); prepend.normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); assert_eq!( n, NormalizedString::new( original.to_string(), normalized.to_string(), vec![ (0, 1), (0, 1), (0, 1), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5) ], 0 ) ); assert_eq!( n.alignments_original(), vec![(0, 4), (4, 5), (5, 6), (6, 7), (7, 8)] ); } }
tokenizers/tokenizers/src/normalizers/prepend.rs/0
{ "file_path": "tokenizers/tokenizers/src/normalizers/prepend.rs", "repo_id": "tokenizers", "token_count": 856 }
349
use crate::pre_tokenizers::unicode_scripts::scripts::{get_script, Script}; use crate::tokenizer::{normalizer::Range, PreTokenizedString, PreTokenizer, Result}; use crate::utils::macro_rules_attribute; #[derive(Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct UnicodeScripts; impl UnicodeScripts { pub fn new() -> Self { Self {} } } impl Default for UnicodeScripts { fn default() -> Self { Self::new() } } // This code exists in the Unigram default IsValidSentencePiece. // It could be integrated directly within `get_script` but I // think it's kind of tricky to see those modifications later // I am guessing release mode will optimize this away anyway. fn fixed_script(c: char) -> Script { let raw_script = get_script(c); if c as u32 == 0x30FC { Script::Han } else if c == ' ' { Script::Any } else { match raw_script { Script::Hiragana => Script::Han, Script::Katakana => Script::Han, script => script, } } } impl PreTokenizer for UnicodeScripts { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { pretokenized.split(|_, normalized| { let mut last_script = None; let mut offset = 0; let mut ranges: Vec<_> = normalized .get() .chars() .filter_map(|c| { let script = Some(fixed_script(c)); let result = if script != Some(Script::Any) && last_script != Some(Script::Any) && last_script != script { Some(offset) } else { None }; offset += c.len_utf8(); if script != Some(Script::Any) { last_script = script; } result }) .collect(); ranges.push(normalized.get().len()); Ok(ranges .windows(2) .map(|item| { normalized .slice(Range::Normalized(item[0]..item[1])) .expect("NormalizedString bad split") }) .collect::<Vec<_>>()) }) } } #[cfg(test)] mod tests { use super::*; use crate::OffsetReferential; use crate::OffsetType; #[test] fn basic() { let pretok = UnicodeScripts {}; let mut pretokenized = PreTokenizedString::from("どこで生れ。Yes"); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Normalized, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![("どこで生れ", (0, 15)), ("。", (15, 18)), ("Yes", (18, 21))] ); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![("どこで生れ", (0, 15)), ("。", (15, 18)), ("Yes", (18, 21))] ); } #[test] fn spaces_are_included_in_every_script() { let pretok = UnicodeScripts {}; let mut pretokenized = PreTokenizedString::from("Apples are りんご 林檎"); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Normalized, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![("Apples are ", (0, 11)), ("りんご 林檎", (11, 27))] ); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![("Apples are ", (0, 11)), ("りんご 林檎", (11, 27))] ); } #[test] fn test_unicode_script() { assert_eq!(Script::Han, fixed_script('京')); assert_eq!(Script::Han, fixed_script('太')); assert_eq!(Script::Han, fixed_script('い')); assert_eq!(Script::Han, fixed_script('グ')); assert_eq!(Script::Han, fixed_script('ー')); assert_eq!(Script::Latin, fixed_script('a')); assert_eq!(Script::Latin, fixed_script('A')); assert_eq!(Script::Common, fixed_script('0')); assert_eq!(Script::Common, fixed_script('$')); assert_eq!(Script::Common, fixed_script('@')); assert_eq!(Script::Common, fixed_script('-')); assert_eq!(Script::Any, fixed_script(' ')); } }
tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/pre_tokenizer.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/pre_tokenizer.rs", "repo_id": "tokenizers", "token_count": 2584 }
350
use crate::tokenizer::pattern::Pattern; use crate::Offsets; use fancy_regex::Regex; use std::error::Error; #[derive(Debug)] pub struct SysRegex { regex: Regex, } impl SysRegex { pub fn find_iter<'r, 't>(&'r self, inside: &'t str) -> Matches<'r, 't> { Matches(self.regex.find_iter(inside)) } pub fn new(regex_str: &str) -> Result<Self, Box<dyn Error + Send + Sync + 'static>> { Ok(Self { regex: Regex::new(regex_str)?, }) } } pub struct Matches<'r, 't>(fancy_regex::Matches<'r, 't>); impl Iterator for Matches<'_, '_> { type Item = (usize, usize); fn next(&mut self) -> Option<Self::Item> { match self.0.next() { Some(Ok(mat)) => Some((mat.start(), mat.end())), // stop if an error is encountered None | Some(Err(_)) => None, } } } impl Pattern for &Regex { fn find_matches( &self, inside: &str, ) -> Result<Vec<(Offsets, bool)>, Box<dyn Error + Send + Sync + 'static>> { if inside.is_empty() { return Ok(vec![((0, 0), false)]); } let mut prev = 0; let mut splits = Vec::with_capacity(inside.len()); for match_ in self.find_iter(inside) { let match_ = match_?; let start = match_.start(); let end = match_.end(); if prev != start { splits.push(((prev, start), false)); } splits.push(((start, end), true)); prev = end; } if prev != inside.len() { splits.push(((prev, inside.len()), false)) } Ok(splits) } }
tokenizers/tokenizers/src/utils/fancy.rs/0
{ "file_path": "tokenizers/tokenizers/src/utils/fancy.rs", "repo_id": "tokenizers", "token_count": 823 }
351
use tokenizers::models::bpe::BPE; use tokenizers::pre_tokenizers::whitespace::Whitespace; use tokenizers::{DecoderWrapper, NormalizerWrapper, PostProcessorWrapper, PreTokenizerWrapper}; use tokenizers::{Model, Tokenizer, TokenizerBuilder}; #[test] fn bpe_values_after_training() { let mut tokenizer = TokenizerBuilder::< BPE, NormalizerWrapper, PreTokenizerWrapper, PostProcessorWrapper, DecoderWrapper, >::default() .with_model( BPE::builder() .unk_token("[UNK]".to_string()) .dropout(0.1) .build() .unwrap(), ) .build() .unwrap(); let mut trainer = tokenizer.get_model().get_trainer(); tokenizer .train_from_files(&mut trainer, vec!["./data/small.txt".to_string()]) .unwrap(); assert_eq!(tokenizer.get_model().dropout, Some(0.1)); assert_eq!(tokenizer.get_model().unk_token, Some("[UNK]".to_string())); } #[test] fn bpe_continuing_subword_prefix_error() { let mut tokenizer = TokenizerBuilder::< BPE, NormalizerWrapper, PreTokenizerWrapper, PostProcessorWrapper, DecoderWrapper, >::default() .with_model( BPE::builder() .unk_token("[UNK]".to_string()) .continuing_subword_prefix("##".to_string()) .build() .unwrap(), ) .with_pre_tokenizer(Some(PreTokenizerWrapper::Whitespace(Whitespace {}))) .build() .unwrap(); let mut trainer = tokenizer.get_model().get_trainer(); tokenizer .train_from_files(&mut trainer, vec!["./data/small.txt".to_string()]) .unwrap(); tokenizer.save("tokenizer.json", true).unwrap(); let tokenizer = Tokenizer::from_file("tokenizer.json").unwrap(); assert_eq!(tokenizer.get_vocab_size(false), 1526); std::fs::remove_file("tokenizer.json").unwrap(); }
tokenizers/tokenizers/tests/training.rs/0
{ "file_path": "tokenizers/tokenizers/tests/training.rs", "repo_id": "tokenizers", "token_count": 851 }
352
# Accessing Private/Gated Models <Tip> Due to the possibility of leaking access tokens to users of your website or web application, we only support accessing private/gated models from server-side environments (e.g., Node.js) that have access to the process' environment variables. </Tip> ## Step 1: Generating a User Access Token [User Access Tokens](https://huggingface.co/docs/hub/security-tokens) are the preferred way to authenticate an application to Hugging Face services. To generate an access token, navigate to the [Access Tokens tab](https://huggingface.co/settings/tokens) in your settings and click on the **New token** button. Choose a name for your token and click **Generate a token** (we recommend keeping the "Role" as read-only). You can then click the **Copy** button next to your newly-created token to copy it to your clipboard. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/new-token.png"/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/new-token-dark.png"/> </div> To delete or refresh User Access Tokens, you can click the **Manage** button. ## Step 2: Using the access token in Transformers.js Transformers.js will attach an Authorization header to requests made to the Hugging Face Hub when the `HF_TOKEN` environment variable is set and visible to the process. One way to do this is to call your program with the environment variable set. For example, let's say you have a file called `llama.js` with the following code: ```js import { AutoTokenizer } from '@huggingface/transformers'; // Load tokenizer for a gated repository. const tokenizer = await AutoTokenizer.from_pretrained('meta-llama/Llama-2-7b-hf'); // Encode text. const text = 'Hello world!'; const encoded = tokenizer.encode(text); console.log(encoded); ``` You can then use the following command to set the `HF_TOKEN` environment variable and run the file: ```bash HF_TOKEN=hf_... node tests/llama.js ``` (remember to replace `hf_...` with your actual access token). If done correctly, you should see the following output: ```bash [ 1, 15043, 3186, 29991 ] ``` Alternatively, you can set the environment variable directly in your code: ```js // Set access token (NB: Keep this private!) process.env.HF_TOKEN = 'hf_...'; // ... rest of your code ```
transformers.js/docs/source/guides/private.md/0
{ "file_path": "transformers.js/docs/source/guides/private.md", "repo_id": "transformers.js", "token_count": 711 }
353
import React from 'react' import ReactDOM from 'react-dom/client' import App from './App.jsx' import './index.css' ReactDOM.createRoot(document.getElementById('root')).render( <React.StrictMode> <App /> </React.StrictMode>, )
transformers.js/examples/cross-encoder/src/main.jsx/0
{ "file_path": "transformers.js/examples/cross-encoder/src/main.jsx", "repo_id": "transformers.js", "token_count": 87 }
354
* { box-sizing: border-box; padding: 0; margin: 0; font-family: sans-serif; } html, body { height: 100%; } body { padding: 16px 32px; } body, #container, #upload-button { display: flex; flex-direction: column; justify-content: center; align-items: center; } h1 { text-align: center; } #container { position: relative; width: 640px; height: 420px; max-width: 100%; max-height: 100%; border: 2px dashed #D1D5DB; border-radius: 0.75rem; overflow: hidden; margin-top: 1rem; background-size: 100% 100%; background-position: center; background-repeat: no-repeat; } #mask-output { position: absolute; width: 100%; height: 100%; pointer-events: none; } #upload-button { gap: 0.4rem; font-size: 18px; cursor: pointer; } #upload { display: none; } svg { pointer-events: none; } #example { font-size: 14px; text-decoration: underline; cursor: pointer; } #example:hover { color: #2563EB; } canvas { position: absolute; width: 100%; height: 100%; } #status { min-height: 16px; margin: 8px 0; } input[type="range"] { position: absolute; top: 10px; right: 10px; z-index: 1; }
transformers.js/examples/depth-anything-client/style.css/0
{ "file_path": "transformers.js/examples/depth-anything-client/style.css", "repo_id": "transformers.js", "token_count": 474 }
355
{ "name": "extension", "version": "0.0.1", "description": "Transformers.js | Sample browser extension", "scripts": { "build": "webpack", "dev": "webpack --watch" }, "type": "module", "author": "Xenova", "license": "MIT", "devDependencies": { "copy-webpack-plugin": "^11.0.0", "html-webpack-plugin": "^5.5.1", "webpack": "^5.79.0" }, "dependencies": { "@huggingface/transformers": "^3.4.0" } }
transformers.js/examples/extension/package.json/0
{ "file_path": "transformers.js/examples/extension/package.json", "repo_id": "transformers.js", "token_count": 198 }
356
import { useState, useRef } from 'react'; const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/beetle.png'; const ImageInput = ({ onImageChange, ...props }) => { const [imagePreview, setImagePreview] = useState(null); const fileInputRef = useRef(null); const readFile = (file) => { if (!file) return; const reader = new FileReader(); reader.onloadend = () => { setImagePreview(reader.result); if (onImageChange) { onImageChange(file, reader.result); } }; reader.readAsDataURL(file); } const handleImageChange = (event) => { readFile(event.target.files[0]); }; const handleDragOver = (event) => { event.preventDefault(); }; const handleDrop = (event) => { event.preventDefault(); readFile(event.dataTransfer.files[0]); }; const handleClick = () => { fileInputRef.current.click(); }; return ( <div {...props} onClick={handleClick} onDragOver={handleDragOver} onDrop={handleDrop} > <input type="file" accept="image/*" onChange={handleImageChange} ref={fileInputRef} className="hidden" /> {imagePreview ? ( <img src={imagePreview} alt="Selected" className="w-full max-h-[250px] h-full object-contain rounded-md" /> ) : ( <div className="w-full h-full flex flex-col items-center justify-center border-2 border-dashed border-gray-300 rounded-md"> <span className="text-gray-600 text-center m-3"><u>Drag & drop</u> or <u>click</u><br />to select an image</span> <span className="text-gray-500 text-sm hover:text-gray-800" onClick={(e) => { e.stopPropagation(); setImagePreview(EXAMPLE_URL); onImageChange(null, EXAMPLE_URL); }}>(or <u>try an example</u>)</span> </div> )} </div> ); }; export default ImageInput;
transformers.js/examples/florence2-webgpu/src/components/ImageInput.jsx/0
{ "file_path": "transformers.js/examples/florence2-webgpu/src/components/ImageInput.jsx", "repo_id": "transformers.js", "token_count": 1106 }
357
import './globals.css' import { Inter } from 'next/font/google' const inter = Inter({ subsets: ['latin'] }) export const metadata = { title: 'Create Next App', description: 'Generated by create next app', } export default function RootLayout({ children }) { return ( <html lang="en"> <body className={inter.className}>{children}</body> </html> ) }
transformers.js/examples/next-client/src/app/layout.js/0
{ "file_path": "transformers.js/examples/next-client/src/app/layout.js", "repo_id": "transformers.js", "token_count": 128 }
358
// Create a custom request handler for the /classify route. // For more information, see https://nextjs.org/docs/app/building-your-application/routing/router-handlers import { NextResponse } from 'next/server' import PipelineSingleton from './pipeline.js'; export async function GET(request) { const text = request.nextUrl.searchParams.get('text'); if (!text) { return NextResponse.json({ error: 'Missing text parameter', }, { status: 400 }); } // Get the classification pipeline. When called for the first time, // this will load the pipeline and cache it for future use. const classifier = await PipelineSingleton.getInstance(); // Actually perform the classification const result = await classifier(text); return NextResponse.json(result); }
transformers.js/examples/next-server/src/app/classify/route.js/0
{ "file_path": "transformers.js/examples/next-server/src/app/classify/route.js", "repo_id": "transformers.js", "token_count": 250 }
359
#root { max-width: 1280px; margin: 0 auto; padding: 2rem; text-align: center; } .language-container { display: flex; gap: 20px; } .textbox-container { display: flex; justify-content: center; gap: 20px; width: 800px; } .textbox-container>textarea, .language-selector { width: 50%; } .language-selector>select { width: 150px; } .progress-container { position: relative; font-size: 14px; color: white; background-color: #e9ecef; border: solid 1px; border-radius: 8px; text-align: left; overflow: hidden; } .progress-bar { padding: 0 4px; z-index: 0; top: 0; width: 1%; height: 100%; overflow: hidden; background-color: #007bff; white-space: nowrap; } .progress-text { z-index: 2; } .selector-container { display: flex; gap: 20px; } .progress-bars-container { padding: 8px; height: 140px; } .container { margin: 25px; display: flex; flex-direction: column; gap: 10px; }
transformers.js/examples/react-translator/src/App.css/0
{ "file_path": "transformers.js/examples/react-translator/src/App.css", "repo_id": "transformers.js", "token_count": 383 }
360
This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app). ## Getting Started First, run the development server: ```bash npm run dev # or yarn dev # or pnpm dev ``` Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. You can start editing the page by modifying `app/page.js`. The page auto-updates as you edit the file. This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font. ## Learn More To learn more about Next.js, take a look at the following resources: - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. - [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome! ## Deploy on Vercel The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. Check out our [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details.
transformers.js/examples/semantic-image-search-client/README.md/0
{ "file_path": "transformers.js/examples/semantic-image-search-client/README.md", "repo_id": "transformers.js", "token_count": 413 }
361
import { env, AutoTokenizer, CLIPTextModelWithProjection } from '@xenova/transformers'; import { getCachedFile, getCachedJSON } from './utils.js'; const EMBED_DIM = 512; // Skip local model check env.allowLocalModels = false; class ApplicationSingleton { static model_id = 'Xenova/clip-vit-base-patch16'; static BASE_URL = 'https://huggingface.co/datasets/Xenova/semantic-image-search-assets/resolve/main/'; static tokenizer = null; static text_model = null; static metadata = null; static embeddings = null; static async getInstance(progress_callback = null) { // Load tokenizer and text model if (this.tokenizer === null) { this.tokenizer = AutoTokenizer.from_pretrained(this.model_id, { progress_callback }); } if (this.text_model === null) { this.text_model = CLIPTextModelWithProjection.from_pretrained(this.model_id, { progress_callback }); } if (this.metadata === null) { this.metadata = getCachedJSON(this.BASE_URL + 'image-embeddings.json'); } if (this.embeddings === null) { this.embeddings = new Promise( (resolve, reject) => { getCachedFile(this.BASE_URL + 'image-embeddings_25k-512-32bit.bin') .then((buffer) => { resolve(new Float32Array(buffer)); }) .catch(reject); } ) } return Promise.all([this.tokenizer, this.text_model, this.metadata, this.embeddings]); } } function cosineSimilarity(query_embeds, database_embeds) { const numDB = database_embeds.length / EMBED_DIM; const similarityScores = new Array(numDB); for (let i = 0; i < numDB; ++i) { const startOffset = i * EMBED_DIM; const dbVector = database_embeds.slice(startOffset, startOffset + EMBED_DIM); let dotProduct = 0; let normEmbeds = 0; let normDB = 0; for (let j = 0; j < EMBED_DIM; ++j) { const embedValue = query_embeds[j]; const dbValue = dbVector[j]; dotProduct += embedValue * dbValue; normEmbeds += embedValue * embedValue; normDB += dbValue * dbValue; } similarityScores[i] = dotProduct / (Math.sqrt(normEmbeds) * Math.sqrt(normDB)); } return similarityScores; } // Listen for messages from the main thread self.addEventListener('message', async (event) => { // Get the tokenizer, model, metadata, and embeddings. When called for the first time, // this will load the files and cache them for future use. const [tokenizer, text_model, metadata, embeddings] = await ApplicationSingleton.getInstance(self.postMessage); // Send the output back to the main thread self.postMessage({ status: 'ready' }); // Run tokenization const text_inputs = tokenizer(event.data.text, { padding: true, truncation: true }); // Compute embeddings const { text_embeds } = await text_model(text_inputs); // Compute similarity scores const scores = cosineSimilarity(text_embeds.data, embeddings); // Make a copy of the metadata let output = metadata.slice(0); // Add scores to output for (let i = 0; i < metadata.length; ++i) { output[i].score = scores[i]; } // Sort by score output.sort((a, b) => b.score - a.score); // Get top 100 results output = output.slice(0, 100); // Send the output back to the main thread self.postMessage({ status: 'complete', output: output, }); });
transformers.js/examples/semantic-image-search-client/src/app/worker.js/0
{ "file_path": "transformers.js/examples/semantic-image-search-client/src/app/worker.js", "repo_id": "transformers.js", "token_count": 1518 }
362
import Image from 'next/image' import { blurHashToDataURL } from '../utils.js' export function ImageGrid({ images, setCurrentImage }) { return ( <div className="columns-2 gap-4 sm:columns-3 xl:columns-4 2xl:columns-5"> {images && images.map(({ photo_id, photo_url, photo_image_url, photo_aspect_ratio, photo_width, photo_height, blur_hash, photo_description, ai_description, similarity, }) => ( <div key={photo_id} href={photo_url} className='after:content group cursor-pointer relative mb-4 block w-full after:pointer-events-none after:absolute after:inset-0 after:rounded-lg after:shadow-highlight' onClick={() => { setCurrentImage({ photo_id, photo_url, photo_image_url, photo_aspect_ratio, photo_width, photo_height, blur_hash, photo_description, ai_description, similarity, }); }} > <Image alt={photo_description || ai_description || ""} className="transform rounded-lg brightness-90 transition will-change-auto group-hover:brightness-110" style={{ transform: 'translate3d(0, 0, 0)' }} placeholder="blur" blurDataURL={blurHashToDataURL(blur_hash)} src={`${photo_image_url}?auto=format&fit=crop&w=480&q=80`} width={480} height={480 / photo_aspect_ratio} unoptimized={true} /> </div> ))} </div>) }
transformers.js/examples/semantic-image-search/src/app/components/ImageGrid.jsx/0
{ "file_path": "transformers.js/examples/semantic-image-search/src/app/components/ImageGrid.jsx", "repo_id": "transformers.js", "token_count": 1339 }
363
import React, { useState, useEffect, useRef } from 'react'; import AudioPlayer from './components/AudioPlayer'; import Progress from './components/Progress'; import { SPEAKERS, DEFAULT_SPEAKER } from './constants'; const App = () => { // Model loading const [ready, setReady] = useState(null); const [disabled, setDisabled] = useState(false); const [progressItems, setProgressItems] = useState([]); // Inputs and outputs const [text, setText] = useState('I love Hugging Face!'); const [selectedSpeaker, setSelectedSpeaker] = useState(DEFAULT_SPEAKER); const [output, setOutput] = useState(null); // Create a reference to the worker object. const worker = useRef(null); // We use the `useEffect` hook to setup the worker as soon as the `App` component is mounted. useEffect(() => { if (!worker.current) { // Create the worker if it does not yet exist. worker.current = new Worker(new URL('./worker.js', import.meta.url), { type: 'module' }); } // Create a callback function for messages from the worker thread. const onMessageReceived = (e) => { switch (e.data.status) { case 'initiate': // Model file start load: add a new progress item to the list. setReady(false); setProgressItems(prev => [...prev, e.data]); break; case 'progress': // Model file progress: update one of the progress items. setProgressItems( prev => prev.map(item => { if (item.file === e.data.file) { return { ...item, progress: e.data.progress } } return item; }) ); break; case 'done': // Model file loaded: remove the progress item from the list. setProgressItems( prev => prev.filter(item => item.file !== e.data.file) ); break; case 'ready': // Pipeline ready: the worker is ready to accept messages. setReady(true); break; case 'complete': // Generation complete: re-enable the "Translate" button setDisabled(false); const blobUrl = URL.createObjectURL(e.data.output); setOutput(blobUrl); break; } }; // Attach the callback function as an event listener. worker.current.addEventListener('message', onMessageReceived); // Define a cleanup function for when the component is unmounted. return () => worker.current.removeEventListener('message', onMessageReceived); }); const handleGenerateSpeech = () => { setDisabled(true); worker.current.postMessage({ text, speaker_id: selectedSpeaker, }); }; const isLoading = ready === false; return ( <div className='min-h-screen flex items-center justify-center bg-gray-100'> <div className='absolute gap-1 z-50 top-0 left-0 w-full h-full transition-all px-8 flex flex-col justify-center text-center' style={{ opacity: isLoading ? 1 : 0, pointerEvents: isLoading ? 'all' : 'none', background: 'rgba(0, 0, 0, 0.9)', backdropFilter: 'blur(8px)', }}> {isLoading && ( <label className='text-white text-xl p-3'>Loading models... (only run once)</label> )} {progressItems.map(data => ( <div key={`${data.name}/${data.file}`}> <Progress text={`${data.name}/${data.file}`} percentage={data.progress} /> </div> ))} </div> <div className='bg-white p-8 rounded-lg shadow-lg w-full max-w-xl m-2'> <h1 className='text-3xl font-semibold text-gray-800 mb-1 text-center'>In-browser Text to Speech</h1> <h2 className='text-base font-medium text-gray-700 mb-2 text-center'>Made with <a href='https://huggingface.co/docs/transformers.js'>🤗 Transformers.js</a></h2> <div className='mb-4'> <label htmlFor='text' className='block text-sm font-medium text-gray-600'> Text </label> <textarea id='text' className='border border-gray-300 rounded-md p-2 w-full' rows='4' placeholder='Enter text here' value={text} onChange={(e) => setText(e.target.value)} ></textarea> </div> <div className='mb-4'> <label htmlFor='speaker' className='block text-sm font-medium text-gray-600'> Speaker </label> <select id='speaker' className='border border-gray-300 rounded-md p-2 w-full' value={selectedSpeaker} onChange={(e) => setSelectedSpeaker(e.target.value)} > {Object.entries(SPEAKERS).map(([key, value]) => ( <option key={key} value={value}> {key} </option> ))} </select> </div> <div className='flex justify-center'> <button className={`${disabled ? 'bg-gray-400 cursor-not-allowed' : 'bg-blue-500 hover:bg-blue-600' } text-white rounded-md py-2 px-4`} onClick={handleGenerateSpeech} disabled={disabled} > {disabled ? 'Generating...' : 'Generate'} </button> </div> {output && <AudioPlayer audioUrl={output} mimeType={'audio/wav'} />} </div> </div> ); }; export default App;
transformers.js/examples/text-to-speech-client/src/App.jsx/0
{ "file_path": "transformers.js/examples/text-to-speech-client/src/App.jsx", "repo_id": "transformers.js", "token_count": 2478 }
364
import './style.css'; import { env, AutoModel, ones } from '@xenova/transformers'; import Chart from 'chart.js/auto'; // Throw an error if WebGPU is not supported if (!navigator.gpu) { const err = 'WebGPU is not supported by this browser.'; alert(err) throw Error(err); } env.backends.onnx.wasm.wasmPaths = 'https://cdn.jsdelivr.net/npm/onnxruntime-web@1.17.1/dist/'; env.backends.onnx.wasm.numThreads = 1; // Reference the elements that we will need const ctx = document.getElementById('chart'); const batchSizes = document.getElementById('batch-sizes'); const xscale = document.getElementById('x-scale'); const yscale = document.getElementById('y-scale'); const sequenceLength = document.getElementById('sequence-length'); const modelID = document.getElementById('model-id'); const status = document.getElementById('status'); const start = document.getElementById('start'); const stop = document.getElementById('stop'); const tests = document.getElementsByClassName('tests'); // Benchmark settings const NUM_WARMUP_STEPS = 3; const MODEL_CACHE = new Map(); // Chart configuration const initChart = () => { const config = { type: 'line', data: { labels: [], datasets: [], }, options: { responsive: true, maintainAspectRatio: false, plugins: { legend: { position: 'top', }, }, scales: { x: { title: { display: true, text: 'Batch size', }, min: 1, }, y: { title: { display: true, text: 'Time (ms)', }, } } }, }; const chart = new Chart(ctx, config); return chart; } let chart = initChart(); const toggleScale = (axis, enabled) => { chart.options.scales[axis].type = enabled ? 'logarithmic' : 'linear'; chart.update(); } const getSelectedTests = () => { return [...tests].filter(x => x.checked); } const updateDatasets = () => { chart.data.datasets = getSelectedTests().map(test => { const color = test.getAttribute('data-color'); return { label: test.value, data: [], borderColor: `rgba(${color}, 1)`, backgroundColor: `rgba(${color}, 0.5)`, } }) chart.update(); } updateDatasets(); [...tests].forEach(test => test.addEventListener('change', updateDatasets)); xscale.addEventListener('change', () => toggleScale('x', xscale.checked)); yscale.addEventListener('change', () => toggleScale('y', yscale.checked)); const generateDummyInputs = (batch_size, seqLength) => { const inputs = ones([batch_size, seqLength]); const model_inputs = { input_ids: inputs, attention_mask: inputs, } return model_inputs; } let adapterInfo; let gpuHasFp16 = false; try { // Shouldn't fail since the WebGPU model has loaded successfully const adapter = await navigator.gpu.requestAdapter(); adapterInfo = await adapter.requestAdapterInfo(); gpuHasFp16 = adapter.features.has('shader-f16') } catch (err) { adapterInfo = {}; } if (!gpuHasFp16) { const element = document.querySelector('.tests[data-device="webgpu"][data-dtype="fp16"]'); element.setAttribute('unsupported', true); element.disabled = true; element.title = 'This device does not support fp16 on WebGPU'; } status.textContent = 'Ready'; let interrupted = false; start.addEventListener('click', async () => { const validTests = [...tests].filter(test => !test.getAttribute('unsupported')) // Update UI start.disabled = true; stop.disabled = false; batchSizes.disabled = true; sequenceLength.disabled = true; modelID.disabled = true; validTests.forEach(test => test.disabled = true); interrupted = false; // Get parameters const model_id = modelID.value; const batch_sizes = batchSizes.value.split(',').map(x => parseInt(x)).filter(x => x); const seqLength = parseInt(sequenceLength.value); const selectedTests = getSelectedTests().map(x => ({ label: x.value, dtype: x.getAttribute('data-dtype'), device: x.getAttribute('data-device'), })); // Reset chart.destroy(); chart = initChart(); updateDatasets(); // NOTE: Models must be loaded sequentially (otherwise it will fail due to multiple calls to initWasm()) const testsToRun = new Map(); for (const test of selectedTests) { const { label, dtype, device, quantized } = test; const key = `${model_id}///${label}`; const cached = MODEL_CACHE.get(key); if (cached) { testsToRun.set(label, cached); continue; } status.textContent = 'Loading model(s)...'; try { const model = await AutoModel.from_pretrained(model_id, { quantized, device, dtype, }); MODEL_CACHE.set(key, model); testsToRun.set(label, model); } catch (err) { status.textContent = err.message; alert(err.message) throw err; } } status.textContent = 'Warming up...'; // Warm up: This is important for the WebGPU execution provider, which compiles the shaders on first load for (let i = 0; i < NUM_WARMUP_STEPS; ++i) { const model_inputs = generateDummyInputs(1, seqLength); for (const [label, model] of testsToRun) { await model(model_inputs); } } status.textContent = 'Running benchmark...'; for (const batch_size of batch_sizes) { if (interrupted) break; const model_inputs = generateDummyInputs(batch_size, seqLength); const times = [] for (const [label, model] of testsToRun) { const start = performance.now(); await model(model_inputs); const end = performance.now(); times.push(end - start); } chart.data.labels.push(batch_size); for (let i = 0; i < times.length; ++i) { chart.data.datasets[i].data.push(times[i]); } chart.update(); } // Calculate max speedup: if (chart.data.labels.length === 0) return; const testNames = [...testsToRun.keys()]; const table = generateResultsTable(model_id, testNames, chart.data, seqLength); // Calculate slowest and fastest times let minMaxTimes = [Infinity, 0]; let minMaxIndices = [0, 0]; for (let i = 0; i < chart.data.datasets.length; i++) { const lastTime = chart.data.datasets[i].data.at(-1); if (lastTime < minMaxTimes[0]) { minMaxTimes[0] = lastTime; minMaxIndices[0] = i; } if (lastTime > minMaxTimes[1]) { minMaxTimes[1] = lastTime; minMaxIndices[1] = i; } } const speedup = minMaxTimes[1] / minMaxTimes[0]; const roundedSpeedup = speedup.toFixed(2); const params = new URLSearchParams({ title: `⚡ WebGPU Benchmark Results (${roundedSpeedup}x speedup)`, description: table.outerHTML, }); const paramsStr = params.toString(); status.innerHTML = `⚡ Done! ${testNames.at(minMaxIndices[0])} is <strong>${roundedSpeedup}x</strong> faster than ${testNames.at(minMaxIndices[1])}! ⚡<br><a href="https://huggingface.co/spaces/Xenova/webgpu-embedding-benchmark/discussions/new?${paramsStr}" target="_blank">Share results</a>`; start.disabled = false; stop.disabled = true; batchSizes.disabled = false; sequenceLength.disabled = false; modelID.disabled = false; validTests.forEach(test => test.disabled = false); }); start.disabled = false; stop.addEventListener('click', () => { status.textContent = 'Stopping...'; interrupted = true; stop.disabled = true; }); function generateResultsTable(model_id, testNames, data, sequence_length) { const datasets = data.datasets.map(d => d.data); const batch_sizes = data.labels; const container = document.createElement('div'); const table = document.createElement('table'); const thead = table.createTHead(); const tbody = table.createTBody(); // Add header row const headerRow = thead.insertRow(); headerRow.insertCell().textContent = 'Batch Size'; testNames.forEach(model => { headerRow.insertCell().textContent = model; }); // Add data rows batch_sizes.forEach((batchSize, rowIndex) => { const row = tbody.insertRow(); row.insertCell().textContent = batchSize; datasets.forEach(dataset => { row.insertCell().textContent = dataset[rowIndex].toFixed(2); }); }); container.appendChild(table); const createBulletPoint = (text) => { const li = document.createElement('li'); li.textContent = text; return li; } // Add other information const info = document.createElement('ul'); info.appendChild(createBulletPoint(`Model: ${model_id}`)); info.appendChild(createBulletPoint(`Tests run: ${testNames.join(', ')}`)); info.appendChild(createBulletPoint(`Sequence length: ${sequence_length}`)); info.appendChild(createBulletPoint(`Browser: ${navigator.userAgent}`)); info.appendChild(createBulletPoint(`GPU: vendor=${adapterInfo.vendor}, architecture=${adapterInfo.architecture}, device=${adapterInfo.device}, description=${adapterInfo.description}`)); container.appendChild(info); return container; }
transformers.js/examples/webgpu-embedding-benchmark/main.js/0
{ "file_path": "transformers.js/examples/webgpu-embedding-benchmark/main.js", "repo_id": "transformers.js", "token_count": 3269 }
365
export default function StopIcon(props) { return ( <svg {...props} xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round" > <path d="M21 12a9 9 0 1 1-18 0 9 9 0 0 1 18 0Z" /> <path fill="currentColor" d="M9 9.563C9 9.252 9.252 9 9.563 9h4.874c.311 0 .563.252.563.563v4.874c0 .311-.252.563-.563.563H9.564A.562.562 0 0 1 9 14.437V9.564Z" /> </svg> ) }
transformers.js/examples/webgpu-vlm/src/components/icons/StopIcon.jsx/0
{ "file_path": "transformers.js/examples/webgpu-vlm/src/components/icons/StopIcon.jsx", "repo_id": "transformers.js", "token_count": 375 }
366
import { useRef, useCallback, useEffect } from "react"; export function AudioVisualizer({ stream, ...props }) { const canvasRef = useRef(null); const visualize = useCallback((stream) => { const audioContext = new (window.AudioContext || window.webkitAudioContext)(); const source = audioContext.createMediaStreamSource(stream); const analyser = audioContext.createAnalyser(); analyser.fftSize = 2048; source.connect(analyser); const canvas = canvasRef.current; const canvasCtx = canvas.getContext('2d'); const bufferLength = analyser.frequencyBinCount; const dataArray = new Uint8Array(bufferLength); const drawVisual = () => { requestAnimationFrame(drawVisual); analyser.getByteTimeDomainData(dataArray); canvasCtx.fillStyle = 'rgb(255, 255, 255)'; canvasCtx.fillRect(0, 0, canvas.width, canvas.height); canvasCtx.lineWidth = 2; canvasCtx.strokeStyle = 'rgb(0, 0, 0)'; canvasCtx.beginPath(); const sliceWidth = canvas.width * 1.0 / bufferLength; let x = 0; for (let i = 0; i < bufferLength; ++i) { const v = dataArray[i] / 128.0; const y = v * canvas.height / 2; if (i === 0) { canvasCtx.moveTo(x, y); } else { canvasCtx.lineTo(x, y); } x += sliceWidth; } canvasCtx.lineTo(canvas.width, canvas.height / 2); canvasCtx.stroke(); }; drawVisual(); }, []); useEffect(() => { stream && visualize(stream); }, [visualize, stream]); return ( <canvas {...props} width={720} height={240} ref={canvasRef}></canvas> ) }
transformers.js/examples/webgpu-whisper/src/components/AudioVisualizer.jsx/0
{ "file_path": "transformers.js/examples/webgpu-whisper/src/components/AudioVisualizer.jsx", "repo_id": "transformers.js", "token_count": 865 }
367
import { useState, forwardRef, useRef, useImperativeHandle, useEffect, useCallback } from 'react'; const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/whisper-timestamps-demo.mp4'; const MediaInput = forwardRef(({ onInputChange, onTimeUpdate, ...props }, ref) => { // UI states const [dragging, setDragging] = useState(false); const fileInputRef = useRef(null); // Create a reference to the audio and video elements const audioElement = useRef(null); const videoElement = useRef(null); const currentTimeRef = useRef(0); useImperativeHandle(ref, () => ({ setMediaTime(time) { if (audioElement.current?.src) { audioElement.current.currentTime = time; } else if (videoElement.current?.src) { videoElement.current.currentTime = time; } currentTimeRef.current = time; } })); const onBufferLoad = (arrayBuffer, type) => { const blob = new Blob([arrayBuffer.slice(0)], { type: type }); const url = URL.createObjectURL(blob); processFile(arrayBuffer); // Create a URL for the Blob if (type.startsWith('audio/')) { // Dispose the previous source videoElement.current.pause(); videoElement.current.removeAttribute('src'); videoElement.current.load(); audioElement.current.src = url; } else if (type.startsWith('video/')) { // Dispose the previous source audioElement.current.pause(); audioElement.current.removeAttribute('src'); audioElement.current.load(); videoElement.current.src = url; } else { alert(`Unsupported file type: ${type}`); } } const readFile = (file) => { if (!file) return; // file.type const reader = new FileReader(); reader.onload = (e) => { onBufferLoad(e.target.result, file.type); } reader.readAsArrayBuffer(file); } const handleInputChange = (event) => { readFile(event.target.files[0]); }; const handleDragOver = (event) => { event.preventDefault(); }; const handleDrop = (event) => { event.preventDefault(); setDragging(false); readFile(event.dataTransfer.files[0]); }; const handleClick = (e) => { if (e.target.tagName === 'VIDEO' || e.target.tagName === 'AUDIO') { e.preventDefault(); fileInputRef.current.click(); } else if (e.target.tagName === 'INPUT') { e.stopPropagation(); } else { fileInputRef.current.click(); e.stopPropagation(); } }; const processFile = async (buffer) => { const audioContext = new (window.AudioContext || window.webkitAudioContext)({ sampleRate: 16_000 }); try { const audioBuffer = await audioContext.decodeAudioData(buffer); let audio; if (audioBuffer.numberOfChannels === 2) { // Merge channels const SCALING_FACTOR = Math.sqrt(2); const left = audioBuffer.getChannelData(0); const right = audioBuffer.getChannelData(1); audio = new Float32Array(left.length); for (let i = 0; i < audioBuffer.length; ++i) { audio[i] = SCALING_FACTOR * (left[i] + right[i]) / 2; } } else { audio = audioBuffer.getChannelData(0); } onInputChange(audio); } catch (e) { alert(e); } }; const requestRef = useRef(); const updateTime = useCallback(() => { let elem; if (audioElement.current?.src) { elem = audioElement.current; } else if (videoElement.current?.src) { elem = videoElement.current; } if (elem && currentTimeRef.current !== elem.currentTime) { currentTimeRef.current = elem.currentTime; onTimeUpdate(elem.currentTime); } // Request the next frame requestRef.current = requestAnimationFrame(updateTime); }, [onTimeUpdate]); useEffect(() => { // Start the animation requestRef.current = requestAnimationFrame(updateTime); return () => { // Cleanup on component unmount cancelAnimationFrame(requestRef.current); }; }, [updateTime]); return ( <div {...props} onClick={handleClick} onDragOver={handleDragOver} onDrop={handleDrop} onDragEnter={(e) => setDragging(true)} onDragLeave={(e) => setDragging(false)} > <input type="file" accept="audio/*,video/*" onChange={handleInputChange} ref={fileInputRef} className="hidden" /> { <audio ref={audioElement} controls style={{ display: audioElement.current?.src ? 'block' : 'none' }} className='w-full max-h-full' /> } { <video ref={videoElement} controls style={{ display: videoElement.current?.src ? 'block' : 'none' }} className='w-full max-h-full' /> } { !audioElement.current?.src && !videoElement.current?.src && ( <div className="w-full flex flex-col items-center justify-center border-2 border-dashed border-gray-300 rounded-md h-[250px]" style={{ borderColor: dragging ? 'blue' : 'lightgray' }} > <span className="text-gray-600 text-center"><u>Drag & drop</u> or <u>click</u><br />to select media</span> <span className="text-gray-500 text-sm hover:text-gray-800 mt-2" onClick={async (e) => { e.stopPropagation(); const buffer = await fetch(EXAMPLE_URL).then((r) => r.arrayBuffer()); videoElement.current.src = URL.createObjectURL(new Blob([buffer], { type: 'video/mp4' })); onBufferLoad(buffer, 'video/mp4'); }}>(or <u>try an example</u>)</span> </div> ) } </div> ); }); MediaInput.displayName = 'MediaInput'; export default MediaInput;
transformers.js/examples/whisper-word-timestamps/src/components/MediaInput.jsx/0
{ "file_path": "transformers.js/examples/whisper-word-timestamps/src/components/MediaInput.jsx", "repo_id": "transformers.js", "token_count": 3304 }
368
def generate_tokenizer_json(tokenizer): vocab = tokenizer.get_vocab() normalizers = [] if tokenizer.normalize: # Lowercase the input string normalizers.append({ "type": "Lowercase", }) if tokenizer.language == 'ron': # Replace diacritics normalizers.append({ "type": "Replace", "pattern": { "String": "ț", }, "content": "ţ", }) if tokenizer.phonemize: raise NotImplementedError("Phonemization is not implemented yet") elif tokenizer.normalize: # strip any chars outside of the vocab (punctuation) chars = ''.join(x for x in vocab if len(x) == 1) escaped = chars.replace('-', r'\-').replace(']', r'\]') normalizers.append({ "type": "Replace", "pattern": { "Regex": f"[^{escaped}]", }, "content": "", }) normalizers.append({ "type": "Strip", "strip_left": True, "strip_right": True, }) if tokenizer.add_blank: # add pad token between each char normalizers.append({ "type": "Replace", "pattern": { # Add a blank token between each char, except when blank (then do nothing) "Regex": "(?=.)|(?<!^)$", }, "content": tokenizer.pad_token, }) if len(normalizers) == 0: normalizer = None elif len(normalizers) == 1: normalizer = normalizers[0] else: normalizer = { "type": "Sequence", "normalizers": normalizers, } tokenizer_json = { "version": "1.0", "truncation": None, "padding": None, "added_tokens": [ { "id": vocab[token], "content": token, "single_word": False, "lstrip": False, "rstrip": False, "normalized": False, "special": True } for token in vocab # `tokenizer.pad_token` should not be considered an added token if token in (tokenizer.unk_token, ) ], "normalizer": normalizer, "pre_tokenizer": { "type": "Split", "pattern": { "Regex": "" }, "behavior": "Isolated", "invert": False }, "post_processor": None, "decoder": None, # Custom decoder implemented in JS "model": { "vocab": vocab }, } return tokenizer_json
transformers.js/scripts/extra/vits.py/0
{ "file_path": "transformers.js/scripts/extra/vits.py", "repo_id": "transformers.js", "token_count": 1431 }
369
/** * @module generation/parameters */ /** * @typedef {Object} GenerationFunctionParameters * @property {import('../utils/tensor.js').Tensor} [inputs=null] (`Tensor` of varying shape depending on the modality, *optional*): * The sequence used as a prompt for the generation or as model inputs to the encoder. If `null` the * method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` * should be in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of * `input_ids`, `input_values`, `input_features`, or `pixel_values`. * @property {import('./configuration_utils.js').GenerationConfig} [generation_config=null] (`GenerationConfig`, *optional*): * The generation configuration to be used as base parametrization for the generation call. * `**kwargs` passed to generate matching the attributes of `generation_config` will override them. * If `generation_config` is not provided, the default will be used, which has the following loading * priority: * - (1) from the `generation_config.json` model file, if it exists; * - (2) from the model configuration. Please note that unspecified parameters will inherit [`GenerationConfig`]'s * default values, whose documentation should be checked to parameterize generation. * @property {import('./logits_process.js').LogitsProcessorList} [logits_processor=null] (`LogitsProcessorList`, *optional*): * Custom logits processors that complement the default logits processors built from arguments and * generation config. If a logit processor is passed that is already created with the arguments or a * generation config an error is thrown. This feature is intended for advanced users. * @property {import('./stopping_criteria.js').StoppingCriteriaList} [stopping_criteria=null] (`StoppingCriteriaList`, *optional*): * Custom stopping criteria that complements the default stopping criteria built from arguments and a * generation config. If a stopping criteria is passed that is already created with the arguments or a * generation config an error is thrown. This feature is intended for advanced users. * @property {import('./streamers.js').BaseStreamer} [streamer=null] (`BaseStreamer`, *optional*): * Streamer object that will be used to stream the generated sequences. Generated tokens are passed * through `streamer.put(token_ids)` and the streamer is responsible for any further processing. * @property {number[]} [decoder_input_ids=null] (`number[]`, *optional*): * If the model is an encoder-decoder model, this argument is used to pass the `decoder_input_ids`. * @param {any} [kwargs] (`Dict[str, any]`, *optional*): */
transformers.js/src/generation/parameters.js/0
{ "file_path": "transformers.js/src/generation/parameters.js", "repo_id": "transformers.js", "token_count": 701 }
370
import { ImageProcessor, post_process_object_detection, post_process_panoptic_segmentation, post_process_instance_segmentation, } from "../../base/image_processors_utils.js"; import { full } from '../../utils/tensor.js'; /** * @typedef {object} DetrFeatureExtractorResultProps * @property {import('../../utils/tensor.js').Tensor} pixel_mask * @typedef {import('../../base/image_processors_utils.js').ImageProcessorResult & DetrFeatureExtractorResultProps} DetrFeatureExtractorResult */ export class DetrImageProcessor extends ImageProcessor { /** * Calls the feature extraction process on an array of images, preprocesses * each image, and concatenates the resulting features into a single Tensor. * @param {import('../../utils/image.js').RawImage[]} images The image(s) to extract features from. * @returns {Promise<DetrFeatureExtractorResult>} An object containing the concatenated pixel values of the preprocessed images. */ async _call(images) { const result = await super._call(images); // TODO support differently-sized images, for now assume all images are the same size. // TODO support different mask sizes (not just 64x64) // Currently, just fill pixel mask with 1s const maskSize = [result.pixel_values.dims[0], 64, 64]; const pixel_mask = full(maskSize, 1n); return { ...result, pixel_mask }; } /** @type {typeof post_process_object_detection} */ post_process_object_detection(...args) { return post_process_object_detection(...args); } /** @type {typeof post_process_panoptic_segmentation} */ post_process_panoptic_segmentation(...args) { return post_process_panoptic_segmentation(...args); } /** @type {typeof post_process_instance_segmentation} */ post_process_instance_segmentation(...args) { return post_process_instance_segmentation(...args); } } export class DetrFeatureExtractor extends DetrImageProcessor { } // NOTE: extends DetrImageProcessor
transformers.js/src/models/detr/image_processing_detr.js/0
{ "file_path": "transformers.js/src/models/detr/image_processing_detr.js", "repo_id": "transformers.js", "token_count": 711 }
371
import { ImageProcessor, } from "../../base/image_processors_utils.js"; export class VLMImageProcessor extends ImageProcessor { constructor(config) { super({ do_pad: true, pad_size: { width: config.image_size, height: config.image_size, }, ...config, }); // @ts-expect-error TS2339 this.constant_values = this.config.background_color.map(x => x * this.rescale_factor) } pad_image(pixelData, imgDims, padSize, options) { return super.pad_image(pixelData, imgDims, padSize, { constant_values: this.constant_values, center: true, ...options, }); } }
transformers.js/src/models/janus/image_processing_janus.js/0
{ "file_path": "transformers.js/src/models/janus/image_processing_janus.js", "repo_id": "transformers.js", "token_count": 359 }
372
import { DonutImageProcessor } from "../donut/image_processing_donut.js"; // NOTE: extends DonutImageProcessor export class NougatImageProcessor extends DonutImageProcessor { }
transformers.js/src/models/nougat/image_processing_nougat.js/0
{ "file_path": "transformers.js/src/models/nougat/image_processing_nougat.js", "repo_id": "transformers.js", "token_count": 53 }
373
import { ImageProcessor, post_process_semantic_segmentation, } from "../../base/image_processors_utils.js"; export class SapiensImageProcessor extends ImageProcessor { /** @type {typeof post_process_semantic_segmentation} */ post_process_semantic_segmentation(...args) { return post_process_semantic_segmentation(...args); } } export class SapiensFeatureExtractor extends SapiensImageProcessor { }
transformers.js/src/models/sapiens/image_processing_sapiens.js/0
{ "file_path": "transformers.js/src/models/sapiens/image_processing_sapiens.js", "repo_id": "transformers.js", "token_count": 145 }
374
import { AutoTokenizer } from "../../tokenizers.js"; import { AutoFeatureExtractor } from "../auto/feature_extraction_auto.js"; import { Processor } from "../../base/processing_utils.js"; export class Wav2Vec2Processor extends Processor { static tokenizer_class = AutoTokenizer static feature_extractor_class = AutoFeatureExtractor /** * Calls the feature_extractor function with the given audio input. * @param {any} audio The audio input to extract features from. * @returns {Promise<any>} A Promise that resolves with the extracted features. */ async _call(audio) { return await this.feature_extractor(audio) } }
transformers.js/src/models/wav2vec2/processing_wav2vec2.js/0
{ "file_path": "transformers.js/src/models/wav2vec2/processing_wav2vec2.js", "repo_id": "transformers.js", "token_count": 211 }
375
/** * The list of devices supported by Transformers.js */ export const DEVICE_TYPES = Object.freeze({ auto: 'auto', // Auto-detect based on device and environment gpu: 'gpu', // Auto-detect GPU cpu: 'cpu', // CPU wasm: 'wasm', // WebAssembly webgpu: 'webgpu', // WebGPU cuda: 'cuda', // CUDA dml: 'dml', // DirectML webnn: 'webnn', // WebNN (default) 'webnn-npu': 'webnn-npu', // WebNN NPU 'webnn-gpu': 'webnn-gpu', // WebNN GPU 'webnn-cpu': 'webnn-cpu', // WebNN CPU }); /** * @typedef {keyof typeof DEVICE_TYPES} DeviceType */
transformers.js/src/utils/devices.js/0
{ "file_path": "transformers.js/src/utils/devices.js", "repo_id": "transformers.js", "token_count": 247 }
376
import { PreTrainedTokenizer, ArceeForCausalLM } from "../../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js"; export default () => { describe("ArceeForCausalLM", () => { const model_id = "onnx-internal-testing/tiny-random-ArceeForCausalLM"; /** @type {ArceeForCausalLM} */ let model; /** @type {PreTrainedTokenizer} */ let tokenizer; beforeAll(async () => { model = await ArceeForCausalLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await PreTrainedTokenizer.from_pretrained(model_id); tokenizer.padding_side = "left"; }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const inputs = tokenizer("hello"); const outputs = await model.generate({ ...inputs, max_length: 10, }); expect(outputs.tolist()).toEqual([[1n, 22172n, 1316n, 11038n, 25378n, 11619n, 7959n, 15231n, 15231n, 23659n]]); }, MAX_TEST_EXECUTION_TIME, ); it( "batch_size>1", async () => { const inputs = tokenizer(["hello", "hello world"], { padding: true }); const outputs = await model.generate({ ...inputs, max_length: 10, }); expect(outputs.tolist()).toEqual([ [2n, 1n, 22172n, 5706n, 3803n, 11619n, 28763n, 4015n, 18904n, 7959n], [1n, 22172n, 3186n, 1316n, 11038n, 22918n, 9469n, 25671n, 22918n, 2687n], ]); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/models/arcee/test_modeling_arcee.js/0
{ "file_path": "transformers.js/tests/models/arcee/test_modeling_arcee.js", "repo_id": "transformers.js", "token_count": 792 }
377
import { DacFeatureExtractor, DacModel, DacEncoderModel, DacDecoderModel } from "../../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js"; export default () => { describe("DacModel", () => { const model_id = "hf-internal-testing/tiny-random-DacModel"; /** @type {DacModel} */ let model; /** @type {DacFeatureExtractor} */ let feature_extractor; let inputs; beforeAll(async () => { model = await DacModel.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); feature_extractor = await DacFeatureExtractor.from_pretrained(model_id); inputs = await feature_extractor(new Float32Array(12000)); }, MAX_MODEL_LOAD_TIME); it( "forward", async () => { const { audio_values } = await model(inputs); expect(audio_values.dims).toEqual([1, 1, 11832]); }, MAX_TEST_EXECUTION_TIME, ); it( "encode & decode", async () => { const encoder_outputs = await model.encode(inputs); expect(encoder_outputs.audio_codes.dims).toEqual([1, model.config.n_codebooks, 37]); const { audio_values } = await model.decode(encoder_outputs); expect(audio_values.dims).toEqual([1, 1, 11832]); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe("DacEncoderModel and DacDecoderModel", () => { const model_id = "hf-internal-testing/tiny-random-DacModel"; /** @type {DacEncoderModel} */ let encoder_model; /** @type {DacDecoderModel} */ let decoder_model; /** @type {DacFeatureExtractor} */ let feature_extractor; let inputs; let encoder_outputs; beforeAll(async () => { encoder_model = await DacEncoderModel.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); decoder_model = await DacDecoderModel.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); feature_extractor = await DacFeatureExtractor.from_pretrained(model_id); inputs = await feature_extractor(new Float32Array(12000)); }, MAX_MODEL_LOAD_TIME); it( "encode", async () => { encoder_outputs = await encoder_model(inputs); expect(encoder_outputs.audio_codes.dims).toEqual([1, encoder_model.config.n_codebooks, 37]); }, MAX_TEST_EXECUTION_TIME, ); it( "decode", async () => { const { audio_values } = await decoder_model(encoder_outputs); expect(audio_values.dims).toEqual([1, 1, 11832]); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await encoder_model?.dispose(); await decoder_model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/models/dac/test_modeling_dac.js/0
{ "file_path": "transformers.js/tests/models/dac/test_modeling_dac.js", "repo_id": "transformers.js", "token_count": 1194 }
378
import { AutoProcessor, JinaCLIPProcessor } from "../../../src/transformers.js"; import { load_cached_image } from "../../asset_cache.js"; import { MAX_PROCESSOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js"; export default () => { describe("JinaCLIPProcessor", () => { const model_id = "jinaai/jina-clip-v2"; /** @type {JinaCLIPProcessor} */ let processor; beforeAll(async () => { processor = await AutoProcessor.from_pretrained(model_id); }, MAX_PROCESSOR_LOAD_TIME); it( "Image and text", async () => { // Prepare inputs const images = [await load_cached_image("white_image"), await load_cached_image("blue_image")]; const sentences = [ "غروب جميل على الشاطئ", // Arabic "海滩上美丽的日落", // Chinese "Un beau coucher de soleil sur la plage", // French "Ein wunderschöner Sonnenuntergang am Strand", // German "Ένα όμορφο ηλιοβασίλεμα πάνω από την παραλία", // Greek "समुद्र तट पर एक खूबसूरत सूर्यास्त", // Hindi "Un bellissimo tramonto sulla spiaggia", // Italian "浜辺に沈む美しい夕日", // Japanese "해변 위로 아름다운 일몰", // Korean ]; // Encode text and images const { input_ids, attention_mask, pixel_values } = await processor(sentences, images, { padding: true, truncation: true }); expect(input_ids.dims).toEqual([sentences.length, 19]); expect(attention_mask.dims).toEqual([sentences.length, 19]); expect(pixel_values.dims).toEqual([images.length, 3, 512, 512]); expect(pixel_values.mean().item()).toBeCloseTo(0.7857685685157776, 6); }, MAX_TEST_EXECUTION_TIME, ); }); };
transformers.js/tests/models/jina_clip/test_processor_jina_clip.js/0
{ "file_path": "transformers.js/tests/models/jina_clip/test_processor_jina_clip.js", "repo_id": "transformers.js", "token_count": 863 }
379
import { T5Tokenizer, MusicgenForConditionalGeneration, full } from "../../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js"; export default () => { describe("MusicgenForConditionalGeneration", () => { const model_id = "hf-internal-testing/tiny-random-MusicgenForConditionalGeneration"; // Example adapted from https://huggingface.co/docs/transformers/model_doc/musicgen#text-conditional-generation const texts = ["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"]; /** @type {MusicgenForConditionalGeneration} */ let model; /** @type {T5Tokenizer} */ let tokenizer; beforeAll(async () => { model = await MusicgenForConditionalGeneration.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await T5Tokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "forward", async () => { // Example from https://huggingface.co/docs/transformers/model_doc/musicgen#transformers.MusicgenForConditionalGeneration.forward.example const inputs = tokenizer(texts, { padding: true }); const pad_token_id = BigInt(model.generation_config.pad_token_id); const decoder_input_ids = full([inputs.input_ids.dims[0] * model.config.decoder.num_codebooks, 1], pad_token_id); const { logits } = await model({ ...inputs, decoder_input_ids }); expect(logits.dims).toEqual([8, 1, 99]); expect(logits.mean().item()).toBeCloseTo(-0.0018370470497757196, 4); }, MAX_TEST_EXECUTION_TIME, ); it( "batch_size=1", async () => { const inputs = tokenizer(texts[0]); const audio_values = await model.generate({ ...inputs, max_length: 10 }); expect(audio_values.dims).toEqual([1, 1, 1920]); expect(audio_values.mean().item()).toBeCloseTo(0.16644205152988434, 5); }, MAX_TEST_EXECUTION_TIME, ); it( "batch_size>1", async () => { const inputs = tokenizer(texts, { padding: true }); const audio_values = await model.generate({ ...inputs, max_length: 10 }); expect(audio_values.dims).toEqual([2, 1, 1920]); expect(audio_values.mean().item()).toBeCloseTo(0.16644206643104553, 5); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/models/musicgen/test_modeling_musicgen.js/0
{ "file_path": "transformers.js/tests/models/musicgen/test_modeling_musicgen.js", "repo_id": "transformers.js", "token_count": 1011 }
380
import { Qwen2VLProcessor, Qwen2VLForConditionalGeneration, RawImage } from "../../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js"; export default () => { const CONVERSATION = [ { role: "user", content: [{ type: "text", text: "Hello" }], }, ]; // Example adapted from https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct const CONVERSATION_WITH_IMAGE = [ { role: "user", content: [{ type: "image" }, { type: "text", text: "Describe this image." }], }, ]; // Empty white image const dims = [224, 224, 3]; const image = new RawImage(new Uint8ClampedArray(dims[0] * dims[1] * dims[2]).fill(255), ...dims); describe("Qwen2VLForConditionalGeneration", () => { const model_id = "hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration"; /** @type {Qwen2VLForConditionalGeneration} */ let model; /** @type {Qwen2VLProcessor} */ let processor; beforeAll(async () => { model = await Qwen2VLForConditionalGeneration.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); processor = await Qwen2VLProcessor.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "forward", async () => { const text = processor.apply_chat_template(CONVERSATION_WITH_IMAGE, { add_generation_prompt: true, }); const inputs = await processor(text, image); const { logits } = await model(inputs); expect(logits.dims).toEqual([1, 89, 152064]); expect(logits.mean().item()).toBeCloseTo(-0.0011299321195110679, 5); }, MAX_TEST_EXECUTION_TIME, ); it( "text-only (batch_size=1)", async () => { const text = processor.apply_chat_template(CONVERSATION, { add_generation_prompt: true, }); const inputs = await processor(text); const generate_ids = await model.generate({ ...inputs, max_new_tokens: 10, }); const new_tokens = generate_ids.slice(null, [inputs.input_ids.dims.at(-1), null]); expect(new_tokens.tolist()).toEqual([[24284n, 63986n, 108860n, 84530n, 8889n, 23262n, 128276n, 64948n, 136757n, 138348n]]); }, MAX_TEST_EXECUTION_TIME, ); it( "text + image (batch_size=1)", async () => { const text = processor.apply_chat_template(CONVERSATION_WITH_IMAGE, { add_generation_prompt: true, }); const inputs = await processor(text, image); const generate_ids = await model.generate({ ...inputs, max_new_tokens: 10, }); const new_tokens = generate_ids.slice(null, [inputs.input_ids.dims.at(-1), null]); expect(new_tokens.tolist()).toEqual([[24284n, 35302n, 60575n, 38679n, 113390n, 115118n, 137596n, 38241n, 96726n, 142301n]]); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/models/qwen2_vl/test_modeling_qwen2_vl.js/0
{ "file_path": "transformers.js/tests/models/qwen2_vl/test_modeling_qwen2_vl.js", "repo_id": "transformers.js", "token_count": 1367 }
381
import { AutoFeatureExtractor, WeSpeakerFeatureExtractor } from "../../../src/transformers.js"; import { MAX_FEATURE_EXTRACTOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js"; export default () => { // WeSpeakerFeatureExtractor describe("WeSpeakerFeatureExtractor", () => { const model_id = "onnx-community/wespeaker-voxceleb-resnet34-LM"; /** @type {WeSpeakerFeatureExtractor} */ let feature_extractor; beforeAll(async () => { feature_extractor = await AutoFeatureExtractor.from_pretrained(model_id); }, MAX_FEATURE_EXTRACTOR_LOAD_TIME); it( "default", async () => { const audio = new Float32Array(16000).map((_, i) => Math.sin(i / 100)); const { input_features } = await feature_extractor(audio); const { dims, data } = input_features; expect(dims).toEqual([1, 98, 80]); expect(input_features.mean().item()).toBeCloseTo(5.461731689138105e-8); expect(data[0]).toBeCloseTo(-0.19300270080566406); expect(data[1]).toBeCloseTo(-0.05825042724609375); expect(data[78]).toBeCloseTo(0.2683420181274414); expect(data[79]).toBeCloseTo(0.26250171661376953); expect(data[80]).toBeCloseTo(0.19062232971191406); expect(data.at(-2)).toBeCloseTo(-0.43694400787353516); expect(data.at(-1)).toBeCloseTo(-0.4266204833984375); }, MAX_TEST_EXECUTION_TIME, ); it( "pad to `min_num_frames`", async () => { const audio = new Float32Array(3).map((_, i) => Math.sin(i / 100)); const { input_features } = await feature_extractor(audio); const { dims, data } = input_features; expect(dims).toEqual([1, 9, 80]); expect(input_features.mean().item()).toBeCloseTo(-0.0000010093053181966146); expect(data[0]).toBeCloseTo(20.761859893798828); expect(data[1]).toBeCloseTo(21.02924346923828); expect(data[78]).toBeCloseTo(19.083993911743164); expect(data[79]).toBeCloseTo(18.003454208374023); expect(data[80]).toBeCloseTo(-2.595233917236328); expect(data.at(-2)).toBeCloseTo(-2.385499954223633); expect(data.at(-1)).toBeCloseTo(-2.2504329681396484); }, MAX_TEST_EXECUTION_TIME, ); }); };
transformers.js/tests/models/wespeaker_resnet/test_feature_extraction_wespeaker_resnet.js/0
{ "file_path": "transformers.js/tests/models/wespeaker_resnet/test_feature_extraction_wespeaker_resnet.js", "repo_id": "transformers.js", "token_count": 1014 }
382
import { pipeline, ImageSegmentationPipeline } from "../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js"; import { load_cached_image } from "../asset_cache.js"; const PIPELINE_ID = "image-segmentation"; export default () => { describe("Image Segmentation", () => { describe("Panoptic Segmentation", () => { const model_id = "Xenova/detr-resnet-50-panoptic"; /** @type {ImageSegmentationPipeline} */ let pipe; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it("should be an instance of ImageSegmentationPipeline", () => { expect(pipe).toBeInstanceOf(ImageSegmentationPipeline); }); it( "single", async () => { const image = await load_cached_image("cats"); const output = await pipe(image); // First, check mask shapes for (const item of output) { expect(item.mask.width).toEqual(image.width); expect(item.mask.height).toEqual(image.height); expect(item.mask.channels).toEqual(1); delete item.mask; // No longer needed } // Next, compare scores and labels const target = [ { score: 0.9918501377105713, label: "cat", }, { score: 0.9985815286636353, label: "remote", }, { score: 0.999537467956543, label: "remote", }, { score: 0.9919270277023315, label: "couch", }, { score: 0.9993696808815002, label: "cat", }, ]; expect(output).toBeCloseToNested(target, 2); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe("Semantic Segmentation", () => { const model_id = "Xenova/segformer_b0_clothes"; /** @type {ImageSegmentationPipeline } */ let pipe; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it( "single", async () => { const image = await load_cached_image("man_on_car"); const output = await pipe(image); // First, check mask shapes for (const item of output) { expect(item.mask.width).toEqual(image.width); expect(item.mask.height).toEqual(image.height); expect(item.mask.channels).toEqual(1); delete item.mask; // No longer needed } // Next, compare scores and labels const target = [ { score: null, label: "Background" }, { score: null, label: "Hair" }, { score: null, label: "Upper-clothes" }, { score: null, label: "Pants" }, { score: null, label: "Left-shoe" }, { score: null, label: "Right-shoe" }, { score: null, label: "Face" }, { score: null, label: "Right-leg" }, { score: null, label: "Left-arm" }, { score: null, label: "Right-arm" }, { score: null, label: "Bag" }, ]; expect(output).toBeCloseToNested(target, 2); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); }); };
transformers.js/tests/pipelines/test_pipelines_image_segmentation.js/0
{ "file_path": "transformers.js/tests/pipelines/test_pipelines_image_segmentation.js", "repo_id": "transformers.js", "token_count": 1834 }
383
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import copy import os import random from dataclasses import dataclass from typing import Any, Dict, List, Optional import glob import yaml COMMON_ENV_VARIABLES = { "OMP_NUM_THREADS": 1, "TRANSFORMERS_IS_CI": True, "PYTEST_TIMEOUT": 120, "RUN_PIPELINE_TESTS": False, # will be adjust in `CircleCIJob.to_dict`. "RUN_FLAKY": True, } # Disable the use of {"s": None} as the output is way too long, causing the navigation on CircleCI impractical COMMON_PYTEST_OPTIONS = {"max-worker-restart": 0, "vvv": None, "rsfE":None} DEFAULT_DOCKER_IMAGE = [{"image": "cimg/python:3.8.12"}] # Strings that commonly appear in the output of flaky tests when they fail. These are used with `pytest-rerunfailures` # to rerun the tests that match these patterns. FLAKY_TEST_FAILURE_PATTERNS = [ "OSError", # Machine/connection transient error "Timeout", # Machine/connection transient error "ConnectionError", # Connection transient error "FileNotFoundError", # Raised by `datasets` on Hub failures "PIL.UnidentifiedImageError", # Raised by `PIL.Image.open` on connection issues "HTTPError", # Also catches HfHubHTTPError "AssertionError: Tensor-likes are not close!", # `torch.testing.assert_close`, we might have unlucky random values # TODO: error downloading tokenizer's `merged.txt` from hub can cause all the exceptions below. Throw and handle # them under a single message. "TypeError: expected str, bytes or os.PathLike object, not NoneType", "TypeError: stat: path should be string, bytes, os.PathLike or integer, not NoneType", "Converting from Tiktoken failed", "KeyError: <class ", "TypeError: not a string", ] class EmptyJob: job_name = "empty" def to_dict(self): steps = [{"run": 'ls -la'}] if self.job_name == "collection_job": steps.extend( [ "checkout", {"run": "pip install requests || true"}, {"run": """while [[ $(curl --location --request GET "https://circleci.com/api/v2/workflow/$CIRCLE_WORKFLOW_ID/job" --header "Circle-Token: $CCI_TOKEN"| jq -r '.items[]|select(.name != "collection_job")|.status' | grep -c "running") -gt 0 ]]; do sleep 5; done || true"""}, {"run": 'python utils/process_circleci_workflow_test_reports.py --workflow_id $CIRCLE_WORKFLOW_ID || true'}, {"store_artifacts": {"path": "outputs"}}, {"run": 'echo "All required jobs have now completed"'}, ] ) return { "docker": copy.deepcopy(DEFAULT_DOCKER_IMAGE), "resource_class": "small", "steps": steps, } @dataclass class CircleCIJob: name: str additional_env: Dict[str, Any] = None docker_image: List[Dict[str, str]] = None install_steps: List[str] = None marker: Optional[str] = None parallelism: Optional[int] = 0 pytest_num_workers: int = 8 pytest_options: Dict[str, Any] = None resource_class: Optional[str] = "xlarge" tests_to_run: Optional[List[str]] = None num_test_files_per_worker: Optional[int] = 10 # This should be only used for doctest job! command_timeout: Optional[int] = None def __post_init__(self): # Deal with defaults for mutable attributes. if self.additional_env is None: self.additional_env = {} if self.docker_image is None: # Let's avoid changing the default list and make a copy. self.docker_image = copy.deepcopy(DEFAULT_DOCKER_IMAGE) else: # BIG HACK WILL REMOVE ONCE FETCHER IS UPDATED print(os.environ.get("GIT_COMMIT_MESSAGE")) if "[build-ci-image]" in os.environ.get("GIT_COMMIT_MESSAGE", "") or os.environ.get("GIT_COMMIT_MESSAGE", "") == "dev-ci": self.docker_image[0]["image"] = f"{self.docker_image[0]['image']}:dev" print(f"Using {self.docker_image} docker image") if self.install_steps is None: self.install_steps = ["uv pip install ."] # Use a custom patched pytest to force exit the process at the end, to avoid `Too long with no output (exceeded 10m0s): context deadline exceeded` self.install_steps.append("uv pip install git+https://github.com/ydshieh/pytest.git@8.4.1-ydshieh") if self.pytest_options is None: self.pytest_options = {} if isinstance(self.tests_to_run, str): self.tests_to_run = [self.tests_to_run] else: test_file = os.path.join("test_preparation" , f"{self.job_name}_test_list.txt") print("Looking for ", test_file) if os.path.exists(test_file): with open(test_file) as f: expanded_tests = f.read().strip().split("\n") self.tests_to_run = expanded_tests print("Found:", expanded_tests) else: self.tests_to_run = [] print("not Found") def to_dict(self): env = COMMON_ENV_VARIABLES.copy() # Do not run tests decorated by @is_flaky on pull requests env['RUN_FLAKY'] = os.environ.get("CIRCLE_PULL_REQUEST", "") == "" env.update(self.additional_env) job = { "docker": self.docker_image, "environment": env, } if self.resource_class is not None: job["resource_class"] = self.resource_class all_options = {**COMMON_PYTEST_OPTIONS, **self.pytest_options} pytest_flags = [f"--{key}={value}" if (value is not None or key in ["doctest-modules"]) else f"-{key}" for key, value in all_options.items()] pytest_flags.append( f"--make-reports={self.name}" if "examples" in self.name else f"--make-reports=tests_{self.name}" ) # Examples special case: we need to download NLTK files in advance to avoid cuncurrency issues timeout_cmd = f"timeout {self.command_timeout} " if self.command_timeout else "" marker_cmd = f"-m '{self.marker}'" if self.marker is not None else "" junit_flags = f" -p no:warning -o junit_family=xunit1 --junitxml=test-results/junit.xml" joined_flaky_patterns = "|".join(FLAKY_TEST_FAILURE_PATTERNS) repeat_on_failure_flags = f"--reruns 5 --reruns-delay 2 --only-rerun '({joined_flaky_patterns})'" parallel = f' << pipeline.parameters.{self.job_name}_parallelism >> ' steps = [ "checkout", {"attach_workspace": {"at": "test_preparation"}}, {"run": "apt-get update && apt-get install -y curl"}, {"run": " && ".join(self.install_steps)}, {"run": {"name": "Download NLTK files", "command": """python -c "import nltk; nltk.download('punkt', quiet=True)" """} if "example" in self.name else "echo Skipping"}, {"run": { "name": "Show installed libraries and their size", "command": """du -h -d 1 "$(pip -V | cut -d ' ' -f 4 | sed 's/pip//g')" | grep -vE "dist-info|_distutils_hack|__pycache__" | sort -h | tee installed.txt || true"""} }, {"run": { "name": "Show installed libraries and their versions", "command": """pip list --format=freeze | tee installed.txt || true"""} }, {"run": { "name": "Show biggest libraries", "command": """dpkg-query --show --showformat='${Installed-Size}\t${Package}\n' | sort -rh | head -25 | sort -h | awk '{ package=$2; sub(".*/", "", package); printf("%.5f GB %s\n", $1/1024/1024, package)}' || true"""} }, {"run": {"name": "Create `test-results` directory", "command": "mkdir test-results"}}, {"run": {"name": "Get files to test", "command":f'curl -L -o {self.job_name}_test_list.txt <<pipeline.parameters.{self.job_name}_test_list>> --header "Circle-Token: $CIRCLE_TOKEN"' if self.name != "pr_documentation_tests" else 'echo "Skipped"'}}, {"run": {"name": "Split tests across parallel nodes: show current parallel tests", "command": f"TESTS=$(circleci tests split --split-by=timings {self.job_name}_test_list.txt) && echo $TESTS > splitted_tests.txt && echo $TESTS | tr ' ' '\n'" if self.parallelism else f"awk '{{printf \"%s \", $0}}' {self.job_name}_test_list.txt > splitted_tests.txt" } }, {"run": {"name": "fetch hub objects before pytest", "command": "python3 utils/fetch_hub_objects_for_ci.py"}}, {"run": { "name": "Run tests", "command": f"({timeout_cmd} python3 -m pytest {marker_cmd} -n {self.pytest_num_workers} {junit_flags} {repeat_on_failure_flags} {' '.join(pytest_flags)} $(cat splitted_tests.txt) | tee tests_output.txt)"} }, {"run": {"name": "Expand to show skipped tests", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --skip"}}, {"run": {"name": "Failed tests: show reasons", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --fail"}}, {"run": {"name": "Errors", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --errors"}}, {"store_test_results": {"path": "test-results"}}, {"store_artifacts": {"path": "test-results/junit.xml"}}, {"store_artifacts": {"path": "reports"}}, {"store_artifacts": {"path": "tests.txt"}}, {"store_artifacts": {"path": "splitted_tests.txt"}}, {"store_artifacts": {"path": "installed.txt"}}, ] if self.parallelism: job["parallelism"] = parallel job["steps"] = steps return job @property def job_name(self): return self.name if ("examples" in self.name or "pipeline" in self.name or "pr_documentation" in self.name) else f"tests_{self.name}" # JOBS torch_job = CircleCIJob( "torch", docker_image=[{"image": "huggingface/transformers-torch-light"}], marker="not generate", parallelism=6, ) generate_job = CircleCIJob( "generate", docker_image=[{"image": "huggingface/transformers-torch-light"}], # networkx==3.3 (after #36957) cause some issues # TODO: remove this once it works directly install_steps=["uv pip install ."], marker="generate", parallelism=6, ) tokenization_job = CircleCIJob( "tokenization", docker_image=[{"image": "huggingface/transformers-torch-light"}], parallelism=8, ) processor_job = CircleCIJob( "processors", docker_image=[{"image": "huggingface/transformers-torch-light"}], parallelism=8, ) pipelines_torch_job = CircleCIJob( "pipelines_torch", additional_env={"RUN_PIPELINE_TESTS": True}, docker_image=[{"image":"huggingface/transformers-torch-light"}], marker="is_pipeline_test", parallelism=4, ) custom_tokenizers_job = CircleCIJob( "custom_tokenizers", additional_env={"RUN_CUSTOM_TOKENIZERS": True}, docker_image=[{"image": "huggingface/transformers-custom-tokenizers"}], ) examples_torch_job = CircleCIJob( "examples_torch", additional_env={"OMP_NUM_THREADS": 8}, docker_image=[{"image":"huggingface/transformers-examples-torch"}], # TODO @ArthurZucker remove this once docker is easier to build install_steps=["uv pip install . && uv pip install -r examples/pytorch/_tests_requirements.txt"], pytest_num_workers=4, ) hub_job = CircleCIJob( "hub", additional_env={"HUGGINGFACE_CO_STAGING": True}, docker_image=[{"image":"huggingface/transformers-torch-light"}], install_steps=[ 'uv pip install .', 'git config --global user.email "ci@dummy.com"', 'git config --global user.name "ci"', ], marker="is_staging_test", pytest_num_workers=2, resource_class="medium", ) onnx_job = CircleCIJob( "onnx", docker_image=[{"image":"huggingface/transformers-torch-tf-light"}], install_steps=[ "uv pip install .[testing,sentencepiece,onnxruntime,vision,rjieba]", ], pytest_options={"k onnx": None}, pytest_num_workers=1, resource_class="small", ) exotic_models_job = CircleCIJob( "exotic_models", docker_image=[{"image":"huggingface/transformers-exotic-models"}], parallelism=4, pytest_options={"durations": 100}, ) repo_utils_job = CircleCIJob( "repo_utils", docker_image=[{"image":"huggingface/transformers-consistency"}], pytest_num_workers=4, resource_class="large", ) non_model_job = CircleCIJob( "non_model", docker_image=[{"image": "huggingface/transformers-torch-light"}], # networkx==3.3 (after #36957) cause some issues # TODO: remove this once it works directly install_steps=["uv pip install .[serving]"], marker="not generate", parallelism=6, ) # We also include a `dummy.py` file in the files to be doc-tested to prevent edge case failure. Otherwise, the pytest # hangs forever during test collection while showing `collecting 0 items / 21 errors`. (To see this, we have to remove # the bash output redirection.) py_command = 'from utils.tests_fetcher import get_doctest_files; to_test = get_doctest_files() + ["dummy.py"]; to_test = " ".join(to_test); print(to_test)' py_command = f"$(python3 -c '{py_command}')" command = f'echo """{py_command}""" > pr_documentation_tests_temp.txt' doc_test_job = CircleCIJob( "pr_documentation_tests", docker_image=[{"image":"huggingface/transformers-consistency"}], additional_env={"TRANSFORMERS_VERBOSITY": "error", "DATASETS_VERBOSITY": "error", "SKIP_CUDA_DOCTEST": "1"}, install_steps=[ # Add an empty file to keep the test step running correctly even no file is selected to be tested. "uv pip install .", "touch dummy.py", command, "cat pr_documentation_tests_temp.txt", "tail -n1 pr_documentation_tests_temp.txt | tee pr_documentation_tests_test_list.txt" ], tests_to_run="$(cat pr_documentation_tests.txt)", # noqa pytest_options={"-doctest-modules": None, "doctest-glob": "*.md", "dist": "loadfile", "rvsA": None}, command_timeout=1200, # test cannot run longer than 1200 seconds pytest_num_workers=1, ) REGULAR_TESTS = [torch_job, hub_job, onnx_job, tokenization_job, processor_job, generate_job, non_model_job] # fmt: skip EXAMPLES_TESTS = [examples_torch_job] PIPELINE_TESTS = [pipelines_torch_job] REPO_UTIL_TESTS = [repo_utils_job] DOC_TESTS = [doc_test_job] ALL_TESTS = REGULAR_TESTS + EXAMPLES_TESTS + PIPELINE_TESTS + REPO_UTIL_TESTS + DOC_TESTS + [custom_tokenizers_job] + [exotic_models_job] # fmt: skip def create_circleci_config(folder=None): if folder is None: folder = os.getcwd() os.environ["test_preparation_dir"] = folder jobs = [k for k in ALL_TESTS if os.path.isfile(os.path.join("test_preparation" , f"{k.job_name}_test_list.txt") )] print("The following jobs will be run ", jobs) if len(jobs) == 0: jobs = [EmptyJob()] else: print("Full list of job name inputs", {j.job_name + "_test_list":{"type":"string", "default":''} for j in jobs}) # Add a job waiting all the test jobs and aggregate their test summary files at the end collection_job = EmptyJob() collection_job.job_name = "collection_job" jobs = [collection_job] + jobs config = { "version": "2.1", "parameters": { # Only used to accept the parameters from the trigger "nightly": {"type": "boolean", "default": False}, # Only used to accept the parameters from GitHub Actions trigger "GHA_Actor": {"type": "string", "default": ""}, "GHA_Action": {"type": "string", "default": ""}, "GHA_Event": {"type": "string", "default": ""}, "GHA_Meta": {"type": "string", "default": ""}, "tests_to_run": {"type": "string", "default": ""}, **{j.job_name + "_test_list":{"type":"string", "default":''} for j in jobs}, **{j.job_name + "_parallelism":{"type":"integer", "default":1} for j in jobs}, }, "jobs": {j.job_name: j.to_dict() for j in jobs} } if "CIRCLE_TOKEN" in os.environ: # For private forked repo. (e.g. new model addition) config["workflows"] = {"version": 2, "run_tests": {"jobs": [{j.job_name: {"context": ["TRANSFORMERS_CONTEXT"]}} for j in jobs]}} else: # For public repo. (e.g. `transformers`) config["workflows"] = {"version": 2, "run_tests": {"jobs": [j.job_name for j in jobs]}} with open(os.path.join(folder, "generated_config.yml"), "w") as f: f.write(yaml.dump(config, sort_keys=False, default_flow_style=False).replace("' << pipeline", " << pipeline").replace(">> '", " >>")) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--fetcher_folder", type=str, default=None, help="Only test that all tests and modules are accounted for." ) args = parser.parse_args() create_circleci_config(args.fetcher_folder)
transformers/.circleci/create_circleci_config.py/0
{ "file_path": "transformers/.circleci/create_circleci_config.py", "repo_id": "transformers", "token_count": 7498 }
384
FROM python:3.9-slim ENV PYTHONDONTWRITEBYTECODE=1 ARG REF=main RUN echo ${REF} USER root RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git git-lfs ENV UV_PYTHON=/usr/local/bin/python RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools RUN uv pip install --no-cache-dir --no-deps accelerate --extra-index-url https://download.pytorch.org/whl/cpu RUN uv pip install --no-cache-dir 'torch' 'torchaudio' 'torchvision' --index-url https://download.pytorch.org/whl/cpu RUN git lfs install RUN uv pip install --no-cache-dir pypi-kenlm RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[tf-cpu,sklearn,sentencepiece,vision,testing]" RUN uv pip install --no-cache-dir "protobuf==3.20.3" librosa RUN uv pip uninstall transformers RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
transformers/docker/torch-tf-light.dockerfile/0
{ "file_path": "transformers/docker/torch-tf-light.dockerfile", "repo_id": "transformers", "token_count": 386 }
385
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu22.04 LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive RUN apt update RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg RUN python3 -m pip install --no-cache-dir --upgrade pip ARG REF=main RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-tensorflow,testing] # If set to nothing, will install the latest version ARG TENSORFLOW='2.13' RUN [ ${#TENSORFLOW} -gt 0 ] && VERSION='tensorflow=='$TENSORFLOW'.*' || VERSION='tensorflow'; python3 -m pip install --no-cache-dir -U $VERSION RUN python3 -m pip uninstall -y torch flax RUN python3 -m pip install -U "itsdangerous<2.1.0" RUN python3 -m pip install --no-cache-dir -U "tensorflow_probability<0.22" # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop
transformers/docker/transformers-tensorflow-gpu/Dockerfile/0
{ "file_path": "transformers/docker/transformers-tensorflow-gpu/Dockerfile", "repo_id": "transformers", "token_count": 380 }
386