import asyncio import nest_asyncio from llama_index.core.agent.workflow import AgentWorkflow from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI from youtube_tool import youtube_transcript_tool, youtube_transcript_snippet_tool from multiple_tools import round_to_two_decimals_tool, text_inverter_tool, google_web_search_tool, wikipedia_search_tool async def smart_agent(question : str, api_key : str) -> str: llm = HuggingFaceInferenceAPI( model_name="deepseek-ai/DeepSeek-R1-0528", token=api_key, provider="auto", max_iterations=10, max_execution_time=60 ) # Create Alfred, our gala agent, with the guest info tool llamaindex_agent = AgentWorkflow.from_tools_or_functions( [wikipedia_search_tool, youtube_transcript_tool, youtube_transcript_snippet_tool, round_to_two_decimals_tool, text_inverter_tool, google_web_search_tool], llm=llm, ) # Example query Alfred might receive during the gala response = await llamaindex_agent.run(question) return response