Spaces:
Sleeping
Sleeping
File size: 2,614 Bytes
e7b102f e6d4920 e7b102f 39de946 e7b102f e6d4920 e7b102f e6d4920 e7b102f 8a2e13f e7b102f e6d4920 e7b102f e6d4920 68923c2 e6d4920 e7b102f e6d4920 e7b102f e6d4920 e7b102f 39de946 e7b102f 8a2e13f e6d4920 8a2e13f e6d4920 8a2e13f e6d4920 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
# app.py
import asyncio
import gradio as gr
from typing import List, Tuple
from agents import FileSearchTool, Agent, ModelSettings, TResponseInputItem, Runner, RunConfig, trace
import os
from dotenv import load_dotenv
load_dotenv()
VECTOR_STORE_ID = os.getenv("VECTOR_STORE_ID")
WORKFLOW_ID = os.getenv("WORKFLOW_ID")
# --- Tools & Agent setup ---
file_search = FileSearchTool(
vector_store_ids=[VECTOR_STORE_ID]
)
my_agent = Agent(
name="My agent",
instructions=(
"Ante la pregunta del alumno sobre su calendario academico, consulta la herramienta de reglamentos_upn y por favor respondele como si fueras Yoda. "
),
model="gpt-4.1-mini",
tools=[file_search],
model_settings=ModelSettings(
temperature=1,
top_p=1,
max_tokens=2048,
store=True
)
)
def _to_items_from_history(history: List[Tuple[str, str]]) -> List[TResponseInputItem]:
"""Convert Gradio chat history to Agent format."""
items: List[TResponseInputItem] = []
for user_msg, assistant_msg in history:
if user_msg:
items.append({
"role": "user",
"content": [{"type": "input_text", "text": user_msg}],
})
if assistant_msg:
items.append({
"role": "assistant",
"content": [{"type": "output_text", "text": assistant_msg}],
})
return items
async def agent_reply(message: str, history: List[Tuple[str, str]]):
"""Handler for Gradio ChatInterface."""
conversation_items = _to_items_from_history(history)
conversation_items.append({
"role": "user",
"content": [{"type": "input_text", "text": message}],
})
with trace("Gradio message"):
result = await Runner.run(
my_agent,
input=conversation_items,
run_config=RunConfig(trace_metadata={
"__trace_source__": "agent-builder",
"workflow_id": WORKFLOW_ID
})
)
return result.final_output_as(str)
# --- Updated Gradio ChatInterface ---
demo = gr.ChatInterface(
fn=agent_reply,
title="My agent (FileSearch-powered)",
description=(
"Este chat usa un agente con FileSearchTool. "
"Escribe tu pregunta y el agente buscará primero en el file search antes de responder."
),
examples=["¿Qué me puedes contar del reglamento?", "¿Puedo copiar"],
theme="soft",
submit_btn="Enviar"
# Removed clear_btn as it's not a valid parameter
)
# If you need to run the app
if __name__ == "__main__":
demo.launch() |