runner-ai-intelligence / src /llm /mock_client.py
avfranco's picture
HF Space deploy snapshot (minimal allow-list)
557ee65
from typing import Dict, Any, Optional, List, Type, Union
from pydantic import BaseModel
from .base import LLMClient, LLMCapabilities
class MockLLMClient(LLMClient):
"""
Mock LLMClient for testing.
"""
@property
def capabilities(self) -> LLMCapabilities:
return LLMCapabilities()
def __init__(self, response: Union[str, Dict[str, Any], BaseModel] = "Mock response", **kwargs):
self.response = response
self.last_prompt = None
self.last_schema = None
self.call_count = 0
async def generate(
self,
prompt: str,
*,
instruction: Optional[str] = None,
schema: Optional[Type[BaseModel]] = None,
temperature: Optional[float] = None,
tools: Optional[List[Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
name: Optional[str] = None,
) -> Union[str, Dict[str, Any], BaseModel]:
self.last_prompt = prompt
self.last_instruction = instruction
self.last_schema = schema
self.last_name = name
self.call_count += 1
if tools:
# Simple mock tool calling if needed
for tool in tools:
if hasattr(tool, "__name__") and tool.__name__ in prompt:
# Fake tool execution log
pass
if schema and isinstance(self.response, dict):
return schema(**self.response)
return self.response
async def chat(
self,
messages: List[Dict[str, str]],
*,
instruction: Optional[str] = None,
schema: Optional[Type[BaseModel]] = None,
temperature: Optional[float] = None,
tools: Optional[List[Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
name: Optional[str] = None,
) -> Union[str, Dict[str, Any], BaseModel]:
prompt = messages[-1]["content"] if messages else ""
return await self.generate(
prompt,
instruction=instruction,
schema=schema,
temperature=temperature,
tools=tools,
metadata=metadata,
name=name,
)