Spaces:
Running
Running
File size: 2,522 Bytes
cf7f643 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 | """
Sample API demonstrating the new LLM client framework.
This API shows how to use the unified LLM client with Pydantic schema validation.
One file = one API function (matching the file name).
"""
import os
from typing import List
from pydantic import BaseModel, Field
from src.clients import LLMClient
from src.utils.tracer import customtracer
# =============================================================================
# Response Schema (Pydantic Model)
# =============================================================================
class TextAnalysisResponse(BaseModel):
"""Schema for text analysis results."""
summary: str = Field(description="Brief summary of the input text")
sentiment: str = Field(description="Sentiment: positive, negative, or neutral")
key_points: List[str] = Field(description="List of key points extracted from text")
confidence: float = Field(ge=0.0, le=1.0, description="Confidence score 0-1")
# =============================================================================
# API Function
# =============================================================================
@customtracer
def sample(
text: str,
model: str = "meta-llama/Llama-3.1-8B-Instruct",
openai_key: str = "default",
) -> dict:
"""
input1 (text): This product is amazing! The quality exceeded my expectations.
input2 (text): gpt-4o
input3 (text): default
output1 (json): Analysis result with summary, sentiment, key_points, and confidence
"""
# Setup API key
if openai_key == "default":
api_key = os.environ.get("OPENAI_KEY") or os.environ.get("OPENAI_API_KEY")
else:
api_key = openai_key
# Create LLM client
client = LLMClient(openai_key=api_key)
# Define the prompt
prompt = f"""Analyze the following text and provide:
1. A brief summary
2. The overall sentiment (positive, negative, or neutral)
3. Key points extracted from the text
4. Your confidence level in this analysis (0-1)
Text to analyze:
{text}
"""
system_prompt = (
"You are a text analysis assistant. Provide accurate, concise analysis. "
"Focus on the actual content and avoid over-interpretation."
)
# Call LLM with Pydantic schema validation
result = client.call(
prompt=prompt,
schema=TextAnalysisResponse,
model=model,
system_prompt=system_prompt,
temperature=0.3,
)
# Return as dict (Gradio JSON component expects dict)
return result.model_dump()
|