Spaces:
Sleeping
Sleeping
File size: 1,673 Bytes
a1b5009 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 | import os
import json
from smolagents import create_agent_executor, Agent, Task
from langgraph.prebuilt import ToolExecutor
from huggingface_hub import InferenceClient
from tools import *
# Load Hugging Face endpoint from environment
HF_API_URL = os.getenv("HF_ENDPOINT_URL")
HF_API_TOKEN = os.getenv("HF_TOKEN")
if not HF_API_URL or not HF_API_TOKEN:
raise ValueError("Missing Hugging Face endpoint URL or token.")
llm = InferenceClient(
model=HF_API_URL,
token=HF_API_TOKEN
)
def run_llm(prompt: str) -> str:
response = llm.text_generation(
prompt,
max_new_tokens=512,
do_sample=False,
temperature=0.0,
return_full_text=False,
)
return response.strip()
tool_list = [
GetAttachmentTool(),
GoogleSearchTool(),
GoogleSiteSearchTool(),
ContentRetrieverTool(),
SpeechRecognitionTool(),
YouTubeVideoTool(),
ClassifierTool(),
ImageToChessBoardFENTool()
]
# Create tool executor for LangGraph
tool_executor = ToolExecutor(tool_list)
# Create agent instance
agent = Agent(
llm=run_llm,
tools=tool_list,
)
agent_executor = create_agent_executor(
agent=agent,
tool_executor=tool_executor,
stream=False,
)
def load_tasks(metadata_path="metadata.jsonl") -> list[Task]:
with open(metadata_path, "r") as f:
tasks = []
for line in f:
data = json.loads(line)
tasks.append(Task(
task_id=data["question_id"],
input=data["answer"]
))
return tasks
def solve_task(task: Task) -> str:
result = agent_executor.invoke(task.input)
return result.get("output", "") |