Spaces:
Running
Running
File size: 6,650 Bytes
3193174 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 | """
Input / output schema validation via Pydantic β no LLM required.
Demonstrates:
1. Defining Pydantic models for agent input and output
2. Validating correct and incorrect input data
3. Validating LLM responses (correct vs missing required fields)
4. Embedding JSON Schema strings in prompts
5. Using plain JSON Schema dicts instead of Pydantic
Run:
python -m examples.schema_validation_example
"""
import json
from pydantic import BaseModel, Field
from builder import GraphBuilder
# ββ Pydantic schemas βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
class MathProblemInput(BaseModel):
"""Input for the math solver agent."""
question: str = Field(..., description="Mathematical question to solve")
context: str | None = Field(None, description="Additional context or constraints")
difficulty: int = Field(1, ge=1, le=10, description="Difficulty level 1β10")
class MathSolutionOutput(BaseModel):
"""Output produced by the math solver agent."""
answer: str = Field(..., description="The final answer")
confidence: float = Field(..., ge=0.0, le=1.0, description="Confidence 0.0β1.0")
explanation: str | None = Field(None, description="Step-by-step explanation")
steps: list[str] = Field(default_factory=list, description="Solution steps")
class ReviewInput(BaseModel):
solution: str
original_question: str
class ReviewOutput(BaseModel):
is_correct: bool
feedback: str
confidence: float
# ββ Graph factory ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
def _create_pipeline():
"""Solver β Reviewer pipeline with schema validation."""
builder = GraphBuilder()
builder.add_agent(
"solver",
display_name="Math Solver",
persona="Expert mathematician who solves problems step by step",
description="Solves mathematical problems with detailed explanations",
input_schema=MathProblemInput,
output_schema=MathSolutionOutput,
llm_backbone="gpt-4",
temperature=0.0,
tools=["calculator"],
)
builder.add_agent(
"reviewer",
display_name="Solution Reviewer",
persona="Critical thinker who validates mathematical solutions",
description="Reviews and validates mathematical solutions",
input_schema=ReviewInput,
output_schema=ReviewOutput,
llm_backbone="gpt-4o-mini",
temperature=0.0,
)
builder.add_workflow_edge("solver", "reviewer")
return builder.build()
# ββ Examples βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
def _header(title: str) -> None:
print(f"\nββ {title} ββ")
def example_valid_input():
_header("1 Β· Valid input")
graph = _create_pipeline()
r = graph.validate_agent_input(
"solver",
{
"question": "Solve: xΒ² + 5x + 6 = 0",
"context": "Find both solutions",
"difficulty": 3,
},
)
print(f" {'β
Valid' if r.valid else f'β Errors: {r.errors}'}")
def example_invalid_input():
_header("2 Β· Invalid input (missing field + wrong type)")
graph = _create_pipeline()
r = graph.validate_agent_input(
"solver",
{
"context": "Some context",
"difficulty": "hard", # should be int; 'question' is missing
},
)
if r.valid:
print(" β
Unexpectedly valid")
else:
print(f" β {len(r.errors)} error(s):")
for e in r.errors:
print(f" β’ {e}")
def example_valid_output():
_header("3 Β· Valid LLM output")
graph = _create_pipeline()
r = graph.validate_agent_output(
"solver",
json.dumps(
{
"answer": "xβ = β2, xβ = β3",
"confidence": 0.95,
"explanation": "Factoring: (x+2)(x+3) = 0",
"steps": ["Factor the equation", "Apply zero product property", "Solve for x"],
}
),
)
print(f" {'β
Valid' if r.valid else f'β Errors: {r.errors}'}")
def example_invalid_output():
_header("4 Β· Invalid LLM output (missing 'confidence')")
graph = _create_pipeline()
r = graph.validate_agent_output(
"solver",
json.dumps(
{
"answer": "x = β2 or x = β3",
"explanation": "Solved it!",
}
),
)
if r.valid:
print(" β
Unexpectedly valid")
else:
print(f" β Errors: {r.errors}")
print(" β Strategy: retry or fall back to a safe default.")
def example_schema_in_prompt():
_header("5 Β· JSON Schema embedded in prompt")
graph = _create_pipeline()
in_schema = graph.get_input_schema_json("solver")
out_schema = graph.get_output_schema_json("solver")
prompt = (
"You are a math solver.\n\n"
f"Input format:\n{json.dumps(in_schema, indent=2)}\n\n"
f"Output format:\n{json.dumps(out_schema, indent=2)}\n\n"
"Now solve: {{question}}"
)
print(f" Prompt preview (300 chars):\n {prompt[:300]}β¦")
def example_dict_schema():
_header("6 Β· Plain dict schema (no Pydantic)")
schema = {
"type": "object",
"properties": {"result": {"type": "string"}, "score": {"type": "number"}},
"required": ["result", "score"],
}
builder = GraphBuilder()
builder.add_agent("simple_solver", output_schema=schema)
graph = builder.build()
r1 = graph.validate_agent_output("simple_solver", {"result": "42", "score": 0.9})
r2 = graph.validate_agent_output("simple_solver", {"result": "42", "score": "high"})
print(f" Valid data β valid={r1.valid}")
print(f" Invalid data β valid={r2.valid} errors={r2.errors}")
# ββ Entry point ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
def main():
example_valid_input()
example_invalid_input()
example_valid_output()
example_invalid_output()
example_schema_in_prompt()
example_dict_schema()
print("\nAll schema examples completed β
")
if __name__ == "__main__":
main()
|